text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import torch.nn as nn
from torch.distributions import MultivariateNormal, constraints
import pyro.distributions as dist
from pyro.contrib.timeseries.base import TimeSeriesModel
from pyro.nn import PyroParam, pyro_method
from pyro.ops.ssm_gp import MaternKernel
from pyro.ops.tensor_utils import block_diag_embed, repeated_matmul
class GenericLGSSMWithGPNoiseModel(TimeSeriesModel):
"""
A generic Linear Gaussian State Space Model parameterized with arbitrary time invariant
transition and observation dynamics together with separate Gaussian Process noise models
for each output dimension. In more detail, the generative process is:
:math:`y_i(t) = \\sum_j A_{ij} z_j(t) + f_i(t) + \\epsilon_i(t)`
where the latent variables :math:`{\\bf z}(t)` follow generic time invariant Linear Gaussian dynamics
and the :math:`f_i(t)` are Gaussian Processes with Matern kernels.
The targets are (implicitly) assumed to be evenly spaced in time. In particular a timestep of
:math:`dt=1.0` for the continuous-time GP dynamics corresponds to a single discrete step of
the :math:`{\\bf z}`-space dynamics. Training and inference are logarithmic in the length of
the time series T.
:param int obs_dim: The dimension of the targets at each time step.
:param int state_dim: The dimension of the :math:`{\\bf z}` latent state at each time step.
:param float nu: The order of the Matern kernel; one of 0.5, 1.5 or 2.5.
:param torch.Tensor length_scale_init: optional initial values for the kernel length scale
given as a ``obs_dim``-dimensional tensor
:param torch.Tensor kernel_scale_init: optional initial values for the kernel scale
given as a ``obs_dim``-dimensional tensor
:param torch.Tensor obs_noise_scale_init: optional initial values for the observation noise scale
given as a ``obs_dim``-dimensional tensor
:param bool learnable_observation_loc: whether the mean of the observation model should be learned or not;
defaults to False.
"""
def __init__(
self,
obs_dim=1,
state_dim=2,
nu=1.5,
obs_noise_scale_init=None,
length_scale_init=None,
kernel_scale_init=None,
learnable_observation_loc=False,
):
self.obs_dim = obs_dim
self.state_dim = state_dim
self.nu = nu
if obs_noise_scale_init is None:
obs_noise_scale_init = 0.2 * torch.ones(obs_dim)
assert obs_noise_scale_init.shape == (obs_dim,)
super().__init__()
self.kernel = MaternKernel(
nu=nu,
num_gps=obs_dim,
length_scale_init=length_scale_init,
kernel_scale_init=kernel_scale_init,
)
self.dt = 1.0
self.full_state_dim = self.kernel.state_dim * obs_dim + state_dim
self.full_gp_state_dim = self.kernel.state_dim * obs_dim
self.obs_noise_scale = PyroParam(
obs_noise_scale_init, constraint=constraints.positive
)
self.trans_noise_scale_sq = PyroParam(
torch.ones(state_dim), constraint=constraints.positive
)
self.z_trans_matrix = nn.Parameter(
torch.eye(state_dim) + 0.03 * torch.randn(state_dim, state_dim)
)
self.z_obs_matrix = nn.Parameter(0.3 * torch.randn(state_dim, obs_dim))
self.init_noise_scale_sq = PyroParam(
torch.ones(state_dim), constraint=constraints.positive
)
gp_obs_matrix = torch.zeros(self.kernel.state_dim * obs_dim, obs_dim)
for i in range(obs_dim):
gp_obs_matrix[self.kernel.state_dim * i, i] = 1.0
self.register_buffer("gp_obs_matrix", gp_obs_matrix)
self.obs_selector = torch.tensor(
[self.kernel.state_dim * d for d in range(obs_dim)], dtype=torch.long
)
if learnable_observation_loc:
self.obs_loc = nn.Parameter(torch.zeros(obs_dim))
else:
self.register_buffer("obs_loc", torch.zeros(obs_dim))
def _get_obs_matrix(self):
# (obs_dim + state_dim, obs_dim) => (gp_state_dim * obs_dim + state_dim, obs_dim)
return torch.cat([self.gp_obs_matrix, self.z_obs_matrix], dim=0)
def _get_init_dist(self):
loc = self.z_trans_matrix.new_zeros(self.full_state_dim)
covar = self.z_trans_matrix.new_zeros(self.full_state_dim, self.full_state_dim)
covar[: self.full_gp_state_dim, : self.full_gp_state_dim] = block_diag_embed(
self.kernel.stationary_covariance()
)
covar[
self.full_gp_state_dim :, self.full_gp_state_dim :
] = self.init_noise_scale_sq.diag_embed()
return MultivariateNormal(loc, covar)
def _get_obs_dist(self):
return dist.Normal(self.obs_loc, self.obs_noise_scale).to_event(1)
def get_dist(self, duration=None):
"""
Get the :class:`~pyro.distributions.GaussianHMM` distribution that corresponds
to :class:`GenericLGSSMWithGPNoiseModel`.
:param int duration: Optional size of the time axis ``event_shape[0]``.
This is required when sampling from homogeneous HMMs whose parameters
are not expanded along the time axis.
"""
(
gp_trans_matrix,
gp_process_covar,
) = self.kernel.transition_matrix_and_covariance(dt=self.dt)
trans_covar = self.z_trans_matrix.new_zeros(
self.full_state_dim, self.full_state_dim
)
trans_covar[
: self.full_gp_state_dim, : self.full_gp_state_dim
] = block_diag_embed(gp_process_covar)
trans_covar[
self.full_gp_state_dim :, self.full_gp_state_dim :
] = self.trans_noise_scale_sq.diag_embed()
trans_dist = MultivariateNormal(
trans_covar.new_zeros(self.full_state_dim), trans_covar
)
full_trans_mat = trans_covar.new_zeros(self.full_state_dim, self.full_state_dim)
full_trans_mat[
: self.full_gp_state_dim, : self.full_gp_state_dim
] = block_diag_embed(gp_trans_matrix)
full_trans_mat[
self.full_gp_state_dim :, self.full_gp_state_dim :
] = self.z_trans_matrix
return dist.GaussianHMM(
self._get_init_dist(),
full_trans_mat,
trans_dist,
self._get_obs_matrix(),
self._get_obs_dist(),
duration=duration,
)
@pyro_method
def log_prob(self, targets):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued ``targets`` at each time step
:returns torch.Tensor: A (scalar) log probability.
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self.get_dist().log_prob(targets)
@torch.no_grad()
def _filter(self, targets):
"""
Return the filtering state for the associated state space model.
"""
assert targets.dim() == 2 and targets.size(-1) == self.obs_dim
return self.get_dist().filter(targets)
@torch.no_grad()
def _forecast(self, N_timesteps, filtering_state, include_observation_noise=True):
"""
Internal helper for forecasting.
"""
dts = (
torch.arange(
N_timesteps,
dtype=self.z_trans_matrix.dtype,
device=self.z_trans_matrix.device,
)
+ 1.0
)
dts = dts.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
(
gp_trans_matrix,
gp_process_covar,
) = self.kernel.transition_matrix_and_covariance(dt=dts)
gp_trans_matrix = block_diag_embed(gp_trans_matrix)
gp_process_covar = block_diag_embed(gp_process_covar[..., 0:1, 0:1])
N_trans_matrix = repeated_matmul(self.z_trans_matrix, N_timesteps)
N_trans_obs = torch.matmul(N_trans_matrix, self.z_obs_matrix)
# z-state contribution + gp contribution
predicted_mean1 = torch.matmul(
filtering_state.loc[-self.state_dim :].unsqueeze(-2), N_trans_obs
).squeeze(-2)
predicted_mean2 = torch.matmul(
filtering_state.loc[: self.full_gp_state_dim].unsqueeze(-2),
gp_trans_matrix[..., self.obs_selector],
).squeeze(-2)
predicted_mean = predicted_mean1 + predicted_mean2
# first compute the contributions from filtering_state.covariance_matrix: z-space and gp
fs_cov = filtering_state.covariance_matrix
predicted_covar1z = torch.matmul(
N_trans_obs.transpose(-1, -2),
torch.matmul(
fs_cov[self.full_gp_state_dim :, self.full_gp_state_dim :], N_trans_obs
),
) # N O O
gp_trans = gp_trans_matrix[..., self.obs_selector]
predicted_covar1gp = torch.matmul(
gp_trans.transpose(-1, -2),
torch.matmul(
fs_cov[: self.full_gp_state_dim :, : self.full_gp_state_dim], gp_trans
),
)
# next compute the contribution from process noise that is injected at each timestep.
# (we need to do a cumulative sum to integrate across time for the z-state contribution)
z_process_covar = self.trans_noise_scale_sq.diag_embed()
N_trans_obs_shift = torch.cat(
[self.z_obs_matrix.unsqueeze(0), N_trans_obs[0:-1]]
)
predicted_covar2z = torch.matmul(
N_trans_obs_shift.transpose(-1, -2),
torch.matmul(z_process_covar, N_trans_obs_shift),
) # N O O
predicted_covar = (
predicted_covar1z
+ predicted_covar1gp
+ gp_process_covar
+ torch.cumsum(predicted_covar2z, dim=0)
)
if include_observation_noise:
predicted_covar = (
predicted_covar + self.obs_noise_scale.pow(2.0).diag_embed()
)
return predicted_mean, predicted_covar
@pyro_method
def forecast(self, targets, N_timesteps):
"""
:param torch.Tensor targets: A 2-dimensional tensor of real-valued targets
of shape ``(T, obs_dim)``, where ``T`` is the length of the time series and ``obs_dim``
is the dimension of the real-valued targets at each time step. These
represent the training data that are conditioned on for the purpose of making
forecasts.
:param int N_timesteps: The number of timesteps to forecast into the future from
the final target ``targets[-1]``.
:returns torch.distributions.MultivariateNormal: Returns a predictive MultivariateNormal distribution
with batch shape ``(N_timesteps,)`` and event shape ``(obs_dim,)``
"""
filtering_state = self._filter(targets)
predicted_mean, predicted_covar = self._forecast(N_timesteps, filtering_state)
return MultivariateNormal(predicted_mean, predicted_covar)
| uber/pyro | pyro/contrib/timeseries/lgssmgp.py | Python | apache-2.0 | 11,250 | [
"Gaussian"
] | 4ff8cb91bbfac93980f0777b68374e2a9dd7c37ae6cd54fd20f0c143adb156e3 |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2015 by Tobias Houska
This file is part of Statistical Parameter Estimation Tool (SPOTPY).
:author: Tobias Houska
Implements a variant of DE-MC_Z. The sampler is a multi-chain sampler that
proposal states based on the differences between random past states.
The sampler does not use the snooker updater but does use the crossover
probability, probability distribution. Convergence assessment is based on a
naive implementation of the Gelman-Rubin convergence statistics.
The basis for this algorithm are the following papers:
Provides the basis for the DE-MC_Z extension (also see second paper).
C.J.F. ter Braak, and J.A. Vrugt, Differential evolution Markov chain with
snooker updater and fewer chains, Statistics and Computing, 18(4),
435-446, doi:10.1007/s11222-008-9104-9, 2008.
Introduces the origional DREAM idea:
J.A. Vrugt, C.J.F. ter Braak, C.G.H. Diks, D. Higdon, B.A. Robinson, and
J.M. Hyman, Accelerating Markov chain Monte Carlo simulation by
differential evolution with self-adaptive randomized subspace sampling,
International Journal of Nonlinear Sciences and Numerical
Simulation, 10(3), 273-290, 2009.
This paper uses DREAM in an application
J.A. Vrugt, C.J.F. ter Braak, M.P. Clark, J.M. Hyman, and B.A. Robinson,
Treatment of input uncertainty in hydrologic modeling: Doing hydrology
backward with Markov chain Monte Carlo simulation, Water Resources
Research, 44, W00B09, doi:10.1029/2007WR006720, 2008.
Based on multichain_mcmc 0.3
Copyright (c) 2010 John Salvatier.
All rights reserved.
Redistribution and use in source and binary forms are permitted
provided that the above copyright notice and this paragraph are
duplicated in all such forms and that any documentation,
advertising materials, and other materials related to such
distribution and use acknowledge that the software was developed
by the <organization>. The name of the
<organization> may not be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from . import _algorithm
import numpy as np
import time
class DEMCZError(Exception):
pass
class demcz(_algorithm):
'''
Implements the DE-MC_Z algorithm from ter Braak and Vrugt (2008).
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
* Name of the database where parameter, objectivefunction value and simulation results will be saved.
dbformat: str
* ram: fast suited for short sampling time. no file will be created and results are saved in an array.
* csv: A csv file will be created, which you can import afterwards.
parallel: str
* seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
* mpc: Multi processing: Iterations on all available cores on your cpu (recommended for windows os).
* mpi: Message Passing Interface: Parallel computing on cluster pcs (recommended for unix os).
save_sim: boolean
*True: Simulation results will be saved
*False: Simulationt results will not be saved
alt_objfun: str or None, default: 'log_p'
alternative objectivefunction to be used for algorithm
* None: the objfun defined in spot_setup.objectivefunction is used
* any str: if str is found in spotpy.objectivefunctions,
this objectivefunction is used, else falls back to None
e.g.: 'log_p', 'rmse', 'bias', 'kge' etc.
'''
def __init__(self, *args, **kwargs):
if 'alt_objfun' not in kwargs:
kwargs['alt_objfun'] = 'log_p'
super(demcz, self).__init__(*args, **kwargs)
def check_par_validity(self, par):
if len(par) == len(self.min_bound) and len(par) == len(self.max_bound):
for i in range(len(par)):
if par[i] < self.min_bound[i]:
par[i] = self.min_bound[i]
if par[i] > self.max_bound[i]:
par[i] = self.max_bound[i]
else:
print('ERROR Bounds have not the same lenghts as Parameterarray')
return par
# def simulate(self):
def sample(self, repetitions, nChains=5, burnIn=100, thin=1,
convergenceCriteria=.8, variables_of_interest=None,
DEpairs=2, adaptationRate='auto', eps=5e-2,
mConvergence=True, mAccept=True):
"""
Samples from a posterior distribution using DREAM.
Parameters
----------
repetitions : int
number of draws from the sample distribution to be returned
nChains : int
number of different chains to employ
burnInSize : int
number of iterations (meaning draws / nChains) to do before doing actual sampling.
DEpairs : int
number of pairs of chains to base movements off of
eps : float
used in jittering the chains
Returns
-------
None : None
sample sets
self.history which contains the combined draws for all the chains
self.cur_iter which is the total number of iterations
self.acceptRatio which is the acceptance ratio
self.burnIn which is the number of burn in iterations done
self.R which is the gelman rubin convergence diagnostic for each dimension
"""
starttime = time.time()
intervaltime = starttime
self.min_bound, self.max_bound = self.parameter(
)['minbound'], self.parameter()['maxbound']
repetitions = int(repetitions / nChains)
ndraw_max = repetitions * nChains
maxChainDraws = int(ndraw_max / nChains)
dimensions = len(self.parameter()['random'])
# minbound,maxbound=self.find_min_max()
# select variables if necessary
if variables_of_interest is not None:
slices = []
for var in variables_of_interest:
slices.append(self.slices[var])
else:
slices = [slice(None, None)]
# make a list of starting chains that at least span the dimension space
# in this case it will be of size 2*dim
nSeedIterations = max(int(np.ceil(dimensions * 2 / nChains)), 2)
# init a simulationhistory instance
history = _SimulationHistory(maxChainDraws + nSeedIterations,
nChains, dimensions)
history.add_group('interest', slices)
### BURN_IN
firstcall = True
burnInpar = [np.zeros((nChains, dimensions))] * nSeedIterations
for i in range(nSeedIterations):
self._logPs = []
simulationlist = []
old_like = np.empty(nChains)
param_generator = (
(rep, self.parameter()['random']) for rep in range(int(nChains)))
for rep, vector, simulations in self.repeat(param_generator):
burnInpar[i][rep] = vector
likelist = self.objectivefunction(
evaluation=self.evaluation, simulation=simulations)
if firstcall == True:
self.initialize_database(list(vector), self.parameter()['name'], simulations, likelist)
firstcall = False
simulationlist.append(simulations)
self._logPs.append(likelist)
old_like[rep] = likelist
self.status(rep, likelist, vector)
burnInpar[i][rep] = vector
# Save everything in the database
self.datawriter.save(likelist, list(vector), simulations=simulations)
history.record(burnInpar[i], self._logPs, 1)
gamma = None
self.accepts_ratio = 0
# initilize the convergence diagnostic object
grConvergence = _GRConvergence()
covConvergence = _CovarianceConvergence()
# get the starting log objectivefunction and position for each of the
# chains
currentVectors = burnInpar[-1]
currentLogPs = self._logPs[-1]
# 2)now loop through and sample
cur_iter = 0
accepts_ratio_weighting = 1 - np.exp(-1.0 / 30)
lastRecalculation = 0
# continue sampling if:
# 1) we have not drawn enough samples to satisfy the minimum number of iterations
# 2) or any of the dimensions have not converged
# 3) and we have not done more than the maximum number of iterations
while cur_iter < maxChainDraws:
if cur_iter == burnIn:
history.start_sampling()
# every5th iteration allow a big jump
if np.random.randint(5) == 0.0:
gamma = np.array([1.0])
else:
gamma = np.array([2.38 / np.sqrt(2 * DEpairs * dimensions)])
if cur_iter >= burnIn:
proposalVectors = _dream_proposals(
currentVectors, history, dimensions, nChains, DEpairs, gamma, .05, eps)
for i in range(len(proposalVectors)):
proposalVectors[i] = self.check_par_validity(
proposalVectors[i])
# print proposalVectors
else:
proposalVectors = []
for i in range(nChains):
proposalVectors.append(self.parameter()['random'])
proposalVectors[i] = self.check_par_validity(
proposalVectors[i])
# if self.bounds_ok(minbound,maxbound,proposalVectors,nChains):
proposalLogPs = []
old_simulationlist = simulationlist
old_likelist = self._logPs[-1]
new_simulationlist = []
new_likelist = []
param_generator = (
(rep, list(proposalVectors[rep])) for rep in range(int(nChains)))
for rep, vector, simulations in self.repeat(param_generator):
new_simulationlist.append(simulations)
like = self.objectivefunction(
evaluation=self.evaluation, simulation=simulations)
self._logPs.append(like)
new_likelist.append(like)
proposalLogPs.append(like)
self.status(rep, like, vector)
# for i in range(nChains):
# simulations=self.model(proposalVectors[i])#THIS WILL WORK ONLY FOR MULTIPLE CHAINS
# new_simulationlist.append(simulations)
# like=self.objectivefunction(self.evaluation, simulations)
# new_likelist.append(like)
# proposalLogPs.append(like)
# apply the metrop decision to decide whether to accept or reject
# each chain proposal
decisions, acceptance = self._metropolis_hastings(
currentLogPs, proposalLogPs, nChains)
try:
self._update_accepts_ratio(accepts_ratio_weighting, acceptance)
except DEMCZError:
pass
# if mAccept and cur_iter % 20 == 0:
# print self.accepts_ratio
# choose from list of possible choices if 1d_decision is True at
# specific index, else use default choice
# np.choose(1d_decision[:,None], (list of possible choices, default
# choice)
save_likes=[]
save_pars=[]
save_sims=[]
#print(len(self._logPs))
for curchain in range(nChains):
if decisions[curchain]:
save_likes.append(float(new_likelist[curchain]))
old_like[curchain]=float(new_likelist[curchain])
save_pars.append(proposalVectors[curchain])
save_sims.append(new_simulationlist[curchain])
else:
save_likes.append(old_like[curchain])
save_pars.append(currentVectors[curchain])
save_sims.append(old_simulationlist[curchain])
#print(len(save_pars) )
currentVectors = np.choose(
decisions[:, np.newaxis], (currentVectors, proposalVectors))
currentLogPs = np.choose(decisions, (currentLogPs, proposalLogPs))
simulationlist = [[new_simulationlist, old_simulationlist][
int(x)][ix] for ix, x in enumerate(decisions)]
likelist = list(
np.choose(decisions[:, np.newaxis], (new_likelist, old_likelist)))
# we only want to recalculate convergence criteria when we are past
# the burn in period
if cur_iter % thin == 0:
historyStartMovementRate = adaptationRate
# try to adapt more when the acceptance rate is low and less
# when it is high
if adaptationRate == 'auto':
historyStartMovementRate = min(
(.234 / self.accepts_ratio) * .5, .95)
history.record(
currentVectors, currentLogPs, historyStartMovementRate, grConvergence=grConvergence.R)
for chain in range(nChains):
if not any([x in simulationlist[chain] for x in [-np.Inf, np.Inf]]):
self.datawriter.save(save_likes[chain],
save_pars[chain],
simulations=save_sims[chain],
chains=chain)
if history.nsamples > 0 and cur_iter > lastRecalculation * 1.1 and history.nsequence_histories > dimensions:
lastRecalculation = cur_iter
grConvergence.update(history)
covConvergence.update(history, 'all')
covConvergence.update(history, 'interest')
if all(grConvergence.R < convergenceCriteria):
cur_iter = maxChainDraws
print(
'All chains fullfil the convergence criteria. Sampling stopped.')
cur_iter += 1
# else:
# print 'A proposal vector was ignored'
# Progress bar
acttime = time.time()
# Refresh progressbar every second
if acttime - intervaltime >= 2:
text = str(cur_iter) + ' of ' + str(repetitions)
print(text)
intervaltime = time.time()
# 3) finalize
# only make the second half of draws available because that's the only
# part used by the convergence diagnostic
self.history = history.samples
self.histo = history
self.iter = cur_iter
self.burnIn = burnIn
self.R = grConvergence.R
text = 'Gelman Rubin R=' + str(self.R)
print(text)
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: # Happens if no database was assigned
pass
print('End of sampling')
text = '%i of %i (best like=%g)' % (
self.status.rep, repetitions, self.status.objectivefunction)
print(text)
print('Best parameter set')
print(self.status.params)
text = 'Duration:' + str(round((acttime - starttime), 2)) + ' s'
print(text)
def _update_accepts_ratio(self, weighting, acceptances):
self.accepts_ratio = weighting * \
np.mean(acceptances) + (1 - weighting) * self.accepts_ratio
def _metropolis_hastings(self, currentLogPs, proposalLogPs, nChains,
jumpLogP=0, reverseJumpLogP=0):
"""
makes a decision about whether the proposed vector should be accepted
"""
logMetropHastRatio = (np.array(
proposalLogPs) - np.array(currentLogPs)) # + (reverseJumpLogP - jumpLogP)
decision = np.log(np.random.uniform(size=nChains)) < logMetropHastRatio
return decision, np.minimum(1, np.exp(logMetropHastRatio))
class _SimulationHistory(object):
group_indicies = {'all': slice(None, None)}
def __init__(self, maxChainDraws, nChains, dimensions):
self._combined_history = np.zeros(
(nChains * maxChainDraws, dimensions))
self._sequence_histories = np.zeros(
(nChains, dimensions, maxChainDraws))
self._logPSequences = np.zeros((nChains, maxChainDraws))
self._logPHistory = np.zeros(nChains * maxChainDraws)
self.r_hat = [] * dimensions
self._sampling_start = np.Inf
self._nChains = nChains
self._dimensions = dimensions
self.relevantHistoryStart = 0
self.relevantHistoryEnd = 0
def add_group(self, name, slices):
indexes = list(range(self._dimensions))
indicies = []
for s in slices:
indicies.extend(indexes[s])
self.group_indicies[name] = np.array(indicies)
def record(self, vectors, logPs, increment, grConvergence=None):
if len(vectors.shape) < 3:
self._record(vectors, logPs, increment, grConvergence)
else:
for i in range(vectors.shape[2]):
self._record(
vectors[:, :, i], logPs[:, i], increment, grConvergence)
def _record(self, vectors, logPs, increment, grConvergence):
self._sequence_histories[:, :, self.relevantHistoryEnd] = vectors
self._combined_history[(self.relevantHistoryEnd * self._nChains):(
self.relevantHistoryEnd * self._nChains + self._nChains), :] = vectors
self._logPSequences[:, self.relevantHistoryEnd] = logPs
self._logPHistory[(self.relevantHistoryEnd * self._nChains):
(self.relevantHistoryEnd * self._nChains + self._nChains)] = logPs
self.relevantHistoryEnd += 1
if np.isnan(increment):
self.relevantHistoryStart += 0
else:
self.relevantHistoryStart += increment
self.r_hat.append(grConvergence)
def start_sampling(self):
self._sampling_start = self.relevantHistoryEnd
@property
def sequence_histories(self):
return self.group_sequence_histories('all')
def group_sequence_histories(self, name):
return self._sequence_histories[:, self.group_indicies[name], int(np.ceil(self.relevantHistoryStart)):self.relevantHistoryEnd]
@property
def nsequence_histories(self):
return self.sequence_histories.shape[2]
@property
def combined_history(self):
return self.group_combined_history('all')
def group_combined_history(self, name):
# print self._combined_history
# print self.relevantHistoryStart
return self._combined_history[(int(np.ceil(self.relevantHistoryStart)) * self._nChains):(self.relevantHistoryEnd * self._nChains), self.group_indicies[name]]
@property
def ncombined_history(self):
return self.combined_history.shape[0]
@property
def samples(self):
return self.group_samples('all')
def group_samples(self, name):
if self._sampling_start < np.Inf:
start = int(
max(np.ceil(self.relevantHistoryStart), self._sampling_start) * self._nChains)
end = (self.relevantHistoryEnd * self._nChains)
else:
start = 0
end = 0
return self._combined_history[start:end, self.group_indicies[name]]
@property
def nsamples(self):
return self.samples.shape[0]
@property
def combined_history_logps(self):
return self._logPHistory[(np.ceil(self.relevantHistoryStart) * self._nChains):(self.relevantHistoryEnd * self._nChains)]
def _random_no_replace(sampleSize, populationSize, numSamples):
samples = np.zeros((numSamples, sampleSize), dtype=int)
# Use Knuth's variable names
n = sampleSize
N = populationSize
i = 0
t = 0 # total input records dealt with
m = 0 # number of items selected so far
while i < numSamples:
t = 0
m = 0
while m < n:
# call a uniform(0,1) random number generator
u = np.random.uniform()
if (N - t) * u >= n - m:
t += 1
else:
samples[i, m] = t
t += 1
m += 1
i += 1
return samples
class _CovarianceConvergence:
relativeVariances = {}
def update(self, history, group):
relevantHistory = history.group_combined_history(group)
self.relativeVariances[group] = self.rv(relevantHistory)
@staticmethod
def rv(relevantHistory):
end = relevantHistory.shape[0]
midpoint = int(np.floor(end / 2))
covariance1 = np.cov(relevantHistory[0:midpoint, :].transpose())
covariance2 = np.cov(relevantHistory[midpoint:end, :].transpose())
_eigenvalues1, _eigenvectors1 = _eigen(covariance1)
basis1 = (np.sqrt(_eigenvalues1)[np.newaxis, :] * _eigenvectors1)
_eigenvalues2, _eigenvectors2 = _eigen(covariance2)
basis2 = (np.sqrt(_eigenvalues2)[np.newaxis, :] * _eigenvectors2)
# project the second basis onto the first basis
try:
projection = np.dot(np.linalg.inv(basis1), basis2)
except np.linalg.linalg.LinAlgError:
projection = (np.array(basis1) * np.nan)
print('Exception happend!')
# find the releative size in each of the basis1 directions
return np.log(np.sum(projection**2, axis=0)**.5)
def _eigen(a, n=-1):
# if we got a 0-dimensional array we have to turn it back into a 2
# dimensional one
if len(a.shape) == 0:
a = a[np.newaxis, np.newaxis]
if n == -1:
n = a.shape[0]
_eigenvalues, _eigenvectors = np.linalg.eigh(a)
indicies = np.argsort(_eigenvalues)[::-1]
return _eigenvalues[indicies[0:n]], _eigenvectors[:, indicies[0:n]]
def _dream_proposals(currentVectors, history, dimensions, nChains, DEpairs, gamma, jitter, eps):
"""
generates and returns proposal vectors given the current states
"""
sampleRange = history.ncombined_history
currentIndex = np.arange(sampleRange - nChains, sampleRange)[:, np.newaxis]
combined_history = history.combined_history
# choose some chains without replacement to combine
chains = _random_no_replace(DEpairs * 2, sampleRange - 1, nChains)
# makes sure we have already selected the current chain so it is not replaced
# this ensures that the the two chosen chains cannot be the same as the
# chain for which the jump is
chains += (chains >= currentIndex)
chainDifferences = (np.sum(combined_history[chains[:, 0:DEpairs], :], axis=1) -
np.sum(combined_history[chains[:, DEpairs:(DEpairs * 2)], :], axis=1))
e = np.random.normal(0, jitter, (nChains, dimensions))
# could replace eps with 1e-6 here
E = np.random.normal(0, eps, (nChains, dimensions))
proposalVectors = currentVectors + \
(1 + e) * gamma[:, np.newaxis] * chainDifferences + E
return proposalVectors
def _dream2_proposals(currentVectors, history, dimensions, nChains, DEpairs,
gamma, jitter, eps):
"""
generates and returns proposal vectors given the current states
NOT USED ATM
"""
sampleRange = history.ncombined_history
currentIndex = np.arange(sampleRange - nChains, sampleRange)[:, np.newaxis]
combined_history = history.combined_history
# choose some chains without replacement to combine
chains = _random_no_replace(1, sampleRange - 1, nChains)
# makes sure we have already selected the current chain so it is not replaced
# this ensures that the the two chosen chains cannot be the same as the
# chain for which the jump is
chains += (chains >= currentIndex)
proposalVectors = combined_history[chains[:, 0], :]
return proposalVectors
class _GRConvergence:
"""
Gelman Rubin convergence diagnostic calculator class. It currently only calculates the naive
version found in the first paper. It does not check to see whether the variances have been
stabilizing so it may be misleading sometimes.
"""
_R = np.Inf
_V = np.Inf
_VChange = np.Inf
_W = np.Inf
_WChange = np.Inf
def __init__(self):
pass
def _get_R(self):
return self._R
R = property(_get_R)
@property
def VChange(self):
return self._VChange
@property
def WChange(self):
return self._WChange
def update(self, history):
"""
Updates the convergence diagnostic with the current history.
"""
N = history.nsequence_histories
sequences = history.sequence_histories
variances = np.var(sequences, axis=2)
means = np.mean(sequences, axis=2)
withinChainVariances = np.mean(variances, axis=0)
betweenChainVariances = np.var(means, axis=0) * N
varEstimate = (1 - 1.0 / N) * withinChainVariances + \
(1.0 / N) * betweenChainVariances
self._R = np.sqrt(varEstimate / withinChainVariances)
self._WChange = np.abs(np.log(withinChainVariances / self._W)**.5)
self._W = withinChainVariances
self._VChange = np.abs(np.log(varEstimate / self._V)**.5)
self._V = varEstimate
| p-lauer/spotpy | spotpy/algorithms/demcz.py | Python | mit | 26,423 | [
"Gaussian"
] | ea5db8f08c86320a3e44b0a6f9e1f11d58c6eb952bc5cc8a4f8234f8bf6e5a9f |
"""
Test basic molecular features.
"""
import numpy as np
import unittest
from deepchem.feat.basic import MolecularWeight, RDKitDescriptors
class TestMolecularWeight(unittest.TestCase):
"""
Test MolecularWeight.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
from rdkit import Chem
self.mol = Chem.MolFromSmiles(smiles)
self.engine = MolecularWeight()
def testMW(self):
"""
Test MW.
"""
assert np.allclose(self.engine([self.mol]), 180, atol=0.1)
class TestRDKitDescriptors(unittest.TestCase):
"""
Test RDKitDescriptors.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
from rdkit import Chem
self.mol = Chem.MolFromSmiles(smiles)
self.engine = RDKitDescriptors()
def testRDKitDescriptors(self):
"""
Test simple descriptors.
"""
descriptors = self.engine([self.mol])
assert np.allclose(
descriptors[0, self.engine.descriptors.index('ExactMolWt')],
180,
atol=0.1)
| ktaneishi/deepchem | deepchem/feat/tests/test_basic.py | Python | mit | 1,069 | [
"RDKit"
] | be0de9afe20b68ec85221ed506fa72a01805ac29920612583c593fe0225658cb |
"""
Add UUIDs to workflows
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
from galaxy.model.custom_types import UUIDType, TrimmedString
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
"""
Because both workflow and job requests can be determined
based the a fixed data structure, their IDs are based on
hashing the data structure
"""
workflow_uuid_column = Column( "uuid", UUIDType, nullable=True )
def display_migration_details():
print "This migration script adds a UUID column to workflows"
def upgrade(migrate_engine):
print __doc__
metadata.bind = migrate_engine
metadata.reflect()
# Add the uuid colum to the workflow table
try:
workflow_table = Table( "workflow", metadata, autoload=True )
workflow_uuid_column.create( workflow_table )
assert workflow_uuid_column is workflow_table.c.uuid
except Exception, e:
print str(e)
log.error( "Adding column 'uuid' to workflow table failed: %s" % str( e ) )
return
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
# Drop the workflow table's uuid column.
try:
workflow_table = Table( "workflow", metadata, autoload=True )
workflow_uuid = workflow_table.c.uuid
workflow_uuid.drop()
except Exception, e:
log.debug( "Dropping 'uuid' column from workflow table failed: %s" % ( str( e ) ) )
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/model/migrate/versions/0121_workflow_uuids.py | Python | gpl-3.0 | 1,498 | [
"Galaxy"
] | 7c1ee223c4a3fc00e1334d6697e698a31c67b37e91a2de3b7a7eba4de9784fa9 |
#!/usr/bin/env python
'''VIC testing command line interface'''
from __future__ import print_function
import os
import argparse
from collections import namedtuple
import psutil
import string
import subprocess
from subprocess import check_call
import datetime
import getpass
import socket
import time
import numpy as np
from tonic.models.vic.vic import VIC
host_config = namedtuple('host_config',
('profile', 'template', 'submit', 'mpiexec'))
def log2_range(m):
'''
make an array of integers that increase by 2^n with maximum value of m
'''
n = int(np.floor(np.log2(m))) + 1
return np.exp2(np.arange(n)).astype(np.int)
table_header = '''----------------- START VIC SCALING PROFILE -----------------
Date : $date
Machine : $hostname
User : $user
VIC Test Git Version : $git_version
VIC Executable : $vic_exe
VIC Global Parameter File : $vic_global
VIC Executable Version Info
---------------------------
$vic_version
Cores | Time (Seconds)
----------------------
'''
hosts = {
'local': host_config(profile=[dict(np=np) for np in
log2_range(psutil.cpu_count())],
submit=None,
template=None,
mpiexec=os.getenv('MPIEXEC', 'mpiexec')),
'hydra': host_config(profile=[dict(np=np) for np in log2_range(64)],
submit='qsub', mpiexec='mpiexec',
template='''#!/bin/bash
#
#$ -N VIC_scaling_test_$np
#$ -cwd
#$ -j y
#$ -S /bin/bash
#$ -m be
#$ -pe orte $np
# Qsub template for UW's Hydra Cluster
# Scheduler: SGE
# Valid values for np 1-64
if [ "$np" -gt "64" ]
echo "$np exceeds maximum number of processes on Hydra"
exit 1
fi
START=$(date +%s)
$mpiexec -np $np $vic_exe -g $vic_global
END=$(date +%s)
DIFF=$(echo "$END - $START" | bc)
printf "%5s | %f" $np $DIFF >> $timing_table_file'''),
'topaz': host_config(profile=[dict(select=1, mpiprocs=1),
dict(select=1, mpiprocs=3),
dict(select=1, mpiprocs=9),
dict(select=1, mpiprocs=18),
dict(select=1, mpiprocs=36),
dict(select=2, mpiprocs=36),
dict(select=3, mpiprocs=36),
dict(select=4, mpiprocs=36),
dict(select=5, mpiprocs=36),
dict(select=6, mpiprocs=36),
dict(select=8, mpiprocs=36),
dict(select=10, mpiprocs=36),
dict(select=12, mpiprocs=36)],
submit='qsub', mpiexec='mpiexec_mpt',
template='''#!/bin/bash
#!/bin/bash
#PBS -N VIC_scaling_test_$i
#PBS -q standard
#PBS -A NPSCA07935242
#PBS -l application=VIC
#PBS -l select=$select:ncpus=36:mpiprocs=$mpiprocs
#PBS -l walltime=06:00:00
#PBS -j oe
# Qsub template for ERDC TOPAZ
# Scheduler: PBS
module load usp-netcdf/intel-15.0.3/4.3.3.1
START=$(date +%s)
mpiexec_mpt -np ${BC_MPI_TASKS_ALLOC} $vic_exe -g $vic_global
END=$(date +%s)
DIFF=$(echo "$END - $START" | bc)
printf "%5s | %f\n" ${BC_MPI_TASKS_ALLOC} $DIFF >> $timing_table_file''')}
OUT_WIDTH = 100
description = '''
VIC Test Suite
-------------------------------------------------------------------------------
This is the VIC Profiling Test Suite. There are 2 main test types:
1. Gprof Profiling: This test will generate a profiling call graph using
gprof. This test requires building your VIC executable with the
flags `-pg`.
2. Scaling: This test will generate a MPI scaling timing table.
-------------------------------------------------------------------------------
'''
epilog = '''
-------------------------------------------------------------------------------
For questions about the development or use of VIC or use of this test module,
please email the VIC users list serve at vic_users@u.washington.edu.
-------------------------------------------------------------------------------
'''
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def main():
''' '''
# dates and times
starttime = datetime.datetime.now()
ymd = starttime.strftime('%Y%m%d')
# parse command line options
parser = argparse.ArgumentParser(description=description, epilog=epilog,
formatter_class=CustomFormatter)
parser.add_argument('vic_exe', type=str,
help='VIC executable to test')
parser.add_argument('--kind', type=str,
help='Specify which type of test should be run',
choices=['scaling', 'profile'],
default='scaling')
parser.add_argument('--host', type=str,
help='Host machine to run test on, if not specified, '
'test will be run locally',
choices=list(hosts.keys()),
default='local')
parser.add_argument('--global_param', '-g', type=str,
help='global parameter file to test')
parser.add_argument('--timing', '-t', type=str,
default='vic_timing_{}.txt'.format(ymd),
help='path to timing file')
parser.add_argument('--clean', action='store_true',
help='Clean up run files')
parser.add_argument('--test', action='store_true',
help='Test the setup but do not run VIC')
args = parser.parse_args()
if args.global_param is None:
raise ValueError('Global Parameter option is required')
if args.kind == 'scaling':
run_scaling(args)
elif args.kind == 'profile':
run_profiling(args)
else:
raise ValueError('Unknown test kind %s' % args.kind)
def run_profiling(args):
'''wrapper function for profiling tests'''
cmd = './profiling/run_gprof.bash -e {vic_exe} -g {vic_global}'.format(
vic_exe=args.vic_exe, vic_global=args.global_param)
check_call(cmd, shell=True)
def run_scaling(args):
'''wrapper function for scaling tests'''
config = hosts[args.host]
vic_exe = VIC(args.vic_exe)
# write timing file header
header = string.Template(table_header)
header_kwargs = get_header_info(args.vic_exe, args.global_param)
header = header.safe_substitute(**header_kwargs)
with open(args.timing, 'w') as f:
f.write(header)
for i, kwargs in enumerate(config.profile):
if config.template:
# run on a cluster of some kind
# start by printing the template
print('-'.ljust(OUT_WIDTH, '-'))
print('{host} template'.format(
host=args.host).center(OUT_WIDTH))
print('-'.ljust(OUT_WIDTH, '-'))
print(config.template)
print('-'.ljust(OUT_WIDTH, '-'))
template = string.Template(config.template)
run_string = template.safe_substitute(
vic_exe=args.vic_exe, vic_global=args.global_param,
timing_table_file=args.timing, i=i, **kwargs)
run_file = 'vic_{host}_{i}.sh'.format(host=args.host, i=i)
with open(run_file, 'w') as f:
f.write(run_string)
cmd = '{submit} {run_file}'.format(submit=config.submit,
run_file=run_file)
print(cmd)
if not args.test:
check_call(cmd, shell=True)
if args.clean:
os.remove(run_file)
else:
# run locally
n = kwargs['np']
print('Running {} with {} processors'.format(args.vic_exe, n))
if not args.test:
start = time.time()
vic_exe.run(args.global_param, mpi_proc=int(n))
end = time.time()
diff = end - start
with open(args.timing, 'a') as f:
f.write('%5s | %.2f\n' % (n, diff))
print('See %s for scaling table' % args.timing)
def get_header_info(vic_exe, vic_global):
'''get info for timing table headers'''
header_kwargs = {}
header_kwargs['date'] = datetime.datetime.now()
header_kwargs['hostname'] = socket.gethostname()
header_kwargs['user'] = getpass.getuser()
header_kwargs['git_version'] = subprocess.check_output(
['git', 'describe', '--abbrev=4',
'--dirty', '--always', '--tags']).decode()
header_kwargs['vic_exe'] = vic_exe
header_kwargs['vic_global'] = vic_global
try:
header_kwargs['vic_version'] = subprocess.check_output(
[vic_exe, '-v']).decode()
except subprocess.CalledProcessError:
pass
return header_kwargs
if __name__ == '__main__':
main()
| dgergel/VIC | tests/run_profiling.py | Python | gpl-2.0 | 9,136 | [
"NetCDF"
] | a3b9ac16f08ea190bb63b3a937172c3314331e37963e9da1e7b862f7a57ef445 |
#!/usr/bin/python
"""
Python source code - Reads a file containing item stats in base 'identify' format,
splits it into appropriate variables, and stores them in the stat DB
"""
import sys
import psycopg2
import locale
import re
from optparse import OptionParser # replace with argparse in 2.7
from datetime import datetime, timedelta
from subprocess import Popen, PIPE
timestart = datetime.now()
locale.setlocale(locale.LC_ALL, 'en_US')
parser = OptionParser()
parser.add_option("-i", "--identify", default=False, metavar='file',
action='store', type='string',
help='Parse a file and create SQL for DB import.')
parser.add_option("-s", "--short", action='store_true', default=False,
help='Parse item DB for short_stats column.')
parser.add_option("-l", "--long", action='store_true', default=False,
help='Parse item DB for long_stats column.')
parser.add_option("-e", "--legacy", action='store_true', default=False,
help='Parse legacy item DB for import to new items DB.')
(options, args) = parser.parse_args()
conn = psycopg2.connect(database='torildb', user='kalkinine')
def db(query, params):
try:
cur = conn.cursor()
cur.execute(query, (params))
if query.startswith("SELECT"):
return cur.fetchall()
else:
conn.commit()
except psycopg2.DatabaseError, e:
if not query.startswith("SELECT"):
if conn:
conn.rollback()
print 'Error %s' % e
sys.exit(1)
# import short_stats from torileq website output
def import_legacy():
cmd = 'cat'
file = 'short_stats.txt'
stats = Popen([cmd, file], stdout=PIPE, stderr=PIPE).communicate()
if stats[1] != '':
print 'Error: '+stats[1]
else:
lines = stats[0].splitlines()
for line in lines:
line = line.strip()
item = line.split('*', 1)
name = item[0].strip()
query = "UPDATE items SET short_stats = %s WHERE item_name = %s"
params = (line, name)
db(query, params)
def parse_identify():
today = datetime.today()
cmd = 'cat'
file = options.identify
stats = Popen([cmd, file], stdout=PIPE, stderr=PIPE).communicate()
if stats[1] != '':
print 'Error: '+stats[1]
else:
# put all flags/restricts, or effects, on one line
items = re.sub(r'(?<=[A-Z]){2}\n(?=[A-Z]{2})',' ', stats[0])
# put enchant info on one line
items = re.sub(r'\n(?=Duration)',' ', items)
# split into separate items
items = items.split('\n\n')
for item in items:
# instantiate a blank item template with all variables at default
(flags, restrs, slots, effs) = [], [], [], []
(name, keys, type, ench, dice, wtype, wclass) = '', '', '', '', '', '', ''
(wt, val, ac, crit, multi) = 0, 0, 0, 0, 0
(dam_pct, freq_pct, dam_mod, duration) = 0, 0, 0, 0
(attrib1, attrib2, ptype) = '', '', ''
(atval1, atval2, qual, stut, mlvl) = 0, 0, 0, 0, 0
(pgs, lvl, apps, mchg, cchg) = 0, 0, 0, 0, 0
lines = item.splitlines()
for line in lines:
line = line.strip()
if "Name '" in line: # item_name
name = line.replace("Name '",'')
name = name[:len(name)-1]
elif "Keyword '" in line: # keywords, type
# bug: if keywords too long, can't split properly
#regex: re.sub(r"Keyword '<stuff>', Item type: [A-Z_]+",' ',items)
#with a \n anywhere in the above line after Keyword '
kt = line.split(',')
keys = kt[0].replace("Keyword '",'')
keys = keys.strip("' ")
type = kt[1].replace("Item type:",'')
type = type.strip()
elif "Item can be worn" in line: # worn slots
worn = line.replace("Item can be worn on:",'')
worn = worn.strip()
slots = worn.split()
elif "Item will give you" in line: # effects
effs = line.replace("Item will give you following abilities:",'')
effs = effs.split() # NOBITS is equivalent to null
elif "Item is:" in line: # restricts/flags, NOBITSNOBITS null
rfs = line.replace("Item is:",'')
rfs = rfs.split()
for rf in rfs:
if rf.startswith("ANTI-") or rf.startswith("NO-"):
restrs.append(rf)
else:
flags.append(rf)
elif "Weight:" in line: # wt/val
wv = line.split(',')
wt = wv[0].replace("Weight:",'')
wt = int(wt.strip())
val = wv[1].replace("Value:",'')
val = int(val.strip())
elif "AC-apply is" in line: # AC
ac = line.replace("AC-apply is",'')
ac = int(ac.strip())
elif "Damage Dice" in line: # old weapon dice
dice = line.replace("Damage Dice are '",'')
dice = dice.strip("' ")
elif "Class:" in line: # new weapon, type/class
tc = line.replace("Type: ",'')
tc = tc.replace("Class: ",'')
tc = tc.rsplit(None, 1)
wtype = tc[0].strip()
wclass = tc[1].strip()
elif "Crit Range:" in line: # new weapon, dice/crit/multi
dcc = line.split('%')
dc = dcc[0].replace("Damage:",'')
dc = dc.replace("Crit Range:",'')
dc = dc.split()
dice = dc[0]
crit = int(dc[1])
multi = dcc[1].replace("Crit Bonus: ",'')
multi = multi.strip('x ')
multi = int(multi)
elif "Frequency:" in line: # enchantment
enchs = line.replace('Type:','')
enchs = enchs.replace('Damage:','')
enchs = enchs.replace('Frequency:','')
enchs = enchs.replace('Modifier:','')
enchs = enchs.replace('Duration:','')
enchs = enchs.replace('%','')
enchs = enchs.rsplit(None, 4)
ench = enchs[0]
dam_pct = int(enchs[1])
freq_pct = int(enchs[2])
dam_mod = int(enchs[3])
duration = int(enchs[4])
elif "Affects :" in line: # attribs
attrs = line.replace("Affects :",'')
attrs = attrs.replace('by','By')
attrs = attrs.split('By')
if attrib1 == '':
attrib1 = attrs[0]
atval1 = int(attrs[1])
else:
attrib2 = attrs[0]
atval2 = int(attrs[1])
elif "Total Pages:" in line:
# Total Pages: 300
pgs = line.replace('Total Pages:','')
pgs = int(pgs.strip())
elif "capacity, charged" in line:
# Has 700 capacity, charged with 700 points.
psps = line.split('cap')
psp = psps[0].replace('Has','')
psp = int(psp.strip())
elif "Poison affects" in line:
# Poison affects as blindness at level 25.
poiss = line.split('at')
lvl = poiss[1].replace('level','')
lvl = int(lvl.strip('. '))
ptype = poiss[0].replace('Poison affects as','')
ptype = ptype.strip()
elif "Applications remaining" in line:
# Applications remaining: 10
apps = line.replace('Applications remaining:','')
apps = line.strip()
elif "Stutter:" in line:
# Quality: 15, Stutter: 0, Min Level: 40
ins = line.split(',')
qual = ins[0].replace('Quality:','')
qual = int(qual.strip())
stut = ins[1].replace('Stutter:','')
stut = int(stut.strip())
mlvl = ins[2].replace('Min Level:','')
mlvl = int(mlvl.strip())
elif "charges, with" in line: # wand, staff
# Has 5 charges, with 4 charges left.
chgs = line.split(',')
mchg = chgs[0].replace('charges','')
mchg = mchg.replace('Has','')
mchg = int(mchg.strip())
cchg = chgs[1].replace('with','')
cchg = cchg.replace('charges left.','')
cchg = int(cchg.strip())
elif "spells of:" in line: # potion/scroll
# Level 35 spells of: fly on its own line :/
lvl = line.replace('Level','')
lvl = lvl.replace('spells of:','')
lvl = int(lvl.strip())
# input spells manually?
elif "spell of:" in line: # staff/wand, spell on its own line
lvl = line.replace('Level','')
lvl = lvl.replace('spell of:','')
lvl = int(lvl.strip())
# input spell manually?
elif "Special Effects :" in line: # proc, can be multline sigh
pass # input manually?
elif "Special Bonus" in line: # can be plural or singular
pass # manually?
elif "Combat Critical :" in line:
pass # manually?
# back to 'for item in items' iteration
# check if exact name is already in DB
query = "SELECT * FROM items WHERE item_name = %s"
params = (name,)
rows = db(query, params)
if len(rows) > 0:
# if already in DB, check each stat to see if it matches
#print name+' is already in the DB.'
# if it does match, update the date of last_id
if len(rows) > 0:
pass
# if it doesn't match, mark as potential update and compile
# update queries
else:
pass
# if it's not in the DB, compile full insert queries
else:
sql = ('INSERT INTO items (item_name, keywords, weight, '
'c_value, item_type, full_stats, last_id) '
'VALUES(%s, %s, %s, %s, %s, %s, %s) '
'RETURNING item_id;')
params = (name, keys, wt, val, type, item, today)
cur = conn.cursor()
print cur.mogrify(sql, (params))
id = 0 # cur.execute(sql, params) ? conn.commit() ? fetchall() ?
# build item_slots insert
sql = ('INSERT INTO item_slots VALUES (%s, %s)')
for slot in slots:
if slot != 'NOBITS':
params = (id, slot)
print cur.mogrify(sql, (params))
# build item_specials insert
sql = ('INSERT INTO item_specials VALUES (%s, %s)')
if item_type == '':
pass
# build item_attribs insert
# build item_flags insert
# build item_restricts insert
# build item_effects insert
# build item_enchants insert
# build item_resists insert (when it's parseable)
# send all insert/update queries as a .sql file to review
# manual updates: resists (for now) procs, spells for potion/scroll/staff/wand,
# container holds/wtless, zone, quest/used/rare/invasion/store
# generate short_stats from new items tables
def short_stats():
query = "SELECT item_id FROM items"
params = ''
ids = db(query, params)
for id in ids:
query = ("SELECT item_name, "
"INITCAP(item_type), weight, c_value, "
"from_zone, last_id, is_rare, from_store, from_quest, "
"for_quest, from_invasion, out_of_game, no_identify "
"FROM items WHERE item_id = %s")
params = (id[0],)
item = db(query, params)
i = item[0][0]
query = "SELECT INITCAP(slot_abbr) FROM item_slots WHERE item_id = %s"
slots = db(query, params)
if len(slots) > 0: # if item has worn slots
for slot in slots:
i += ' ('+slot[0]+')'
query = ("SELECT UPPER(spec_abbr), spec_value FROM item_specials "
"WHERE item_id = %s AND item_type = 'armor'")
specs = db(query, params)
if len(specs) > 0: # if item has AC because it's type Armor
for spec in specs:
i += ' '+spec[0]+':'+str(spec[1])
# put in attribs
query = ("SELECT INITCAP(attrib_abbr), attrib_value "
"FROM item_attribs WHERE item_id = %s")
attrs = db(query, params)
if len(attrs) > 0:
for att in attrs:
i += ' '+att[0]+':'+str(att[1])
query = ("SELECT INITCAP(resist_abbr), resist_value "
"FROM item_resists WHERE item_id = %s")
resi = db(query, params)
if len(resi) > 0: # if item has resistances
for res in resi:
i += ' '+res[0]+':'+str(res[1])+'%'
query = ("SELECT item_type, INITCAP(spec_abbr), INITCAP(spec_value) "
"FROM item_specials "
"WHERE item_id = %s AND item_type != 'armor'")
specs = db(query, params)
if len(specs) > 0: # if item has specials, like weapon or instrument
special = ' * ('+item[0][1]+')'
if specs[0][0] == 'crystal' or specs[0][0] == 'spellbook' or \
specs[0][0] == 'comp_bag' or specs[0][0] == 'ammo':
for spec in specs:
special += ' '+spec[1]+':'+spec[2]
elif specs[0][0] == 'container':
(holds, wtless) = '', ''
for spec in specs:
if spec[1] == 'Holds':
holds = ' '+spec[1]+':'+spec[2]
elif spec[1] == 'Wtless':
wtless = ' '+spec[1]+':'+spec[2]
special += holds+wtless
elif specs[0][0] == 'poison':
(lvl, type, apps) = '', '', ''
for spec in specs:
if spec[1] == 'Level':
lvl = ' Lvl:'+spec[2]
elif spec[1] == 'Type':
type = ' '+spec[1]+':'+spec[2]
elif spec[1] == 'Apps':
apps = ' '+spec[1]+':'+spec[2]
special += lvl+type+apps
elif specs[0][0] == 'scroll' or specs[0][0] == 'potion':
(lvl, sp1, sp2, sp3) = '', '', '', ''
for spec in specs:
if spec[1] == 'Level':
lvl = ' Lvl:'+spec[2]
elif spec[1] == 'Spell1':
sp1 = ' '+spec[2]
elif spec[1] == 'Spell2' or spec[1] == 'Spell3':
sp2 = ' - '+spec[2]
special += lvl+sp1+sp2+sp3
elif specs[0][0] == 'staff' or specs[0][0] == 'wand':
(lvl, sp, ch) = '', '', ''
for spec in specs:
if spec[1] == 'Level':
lvl = ' Lvl:'+spec[2]
elif spec[1] == 'Spell':
sp = ' '+spec[2]
elif spec[1] == 'Charges':
ch = ' '+spec[1]+':'+spec[2]
special += lvl+sp+ch
elif specs[0][0] == 'instrument':
(qua, stu, min) = '', '', ''
for spec in specs:
if spec[1] == 'Quality':
qua = ' '+spec[1]+':'+spec[2]
elif spec[1] == 'Stutter':
stu = ' '+spec[1]+':'+spec[2]
elif spec[1] == 'Min_Level':
min = ' '+spec[1]+':'+spec[2]
special += qua+stu+min
elif specs[0][0] == 'weapon':
(dice, type, clas, crit, multi) = '', '', '', '', ''
for spec in specs:
if spec[1] == 'Dice':
dice = ' '+spec[1]+':'+spec[2]
elif spec[1] == 'Crit':
crit = ' '+spec[1]+':'+spec[2]+'%'
elif spec[1] == 'Multi':
multi = ' '+spec[1]+':'+spec[2]+'x'
elif spec[1] == 'Class':
clas = ' ('+spec[1]+':'+spec[2]
elif spec[1] == 'Type':
type = ' '+spec[1]+':'+spec[2]+')'
special += dice+crit+multi+clas+type
i += special
query = ("SELECT INITCAP(effect_abbr) "
"FROM item_effects WHERE item_id = %s")
effects = db(query, params)
if len(effects) > 0: # if item has effects like infra
i += ' *'
for eff in effects:
i += ' '+eff[0]
query = "SELECT proc_name FROM item_procs WHERE item_id = %s"
procs = db(query, params)
if len(procs) > 0: # if item has procs
process = ' * Procs:'
for proc in procs:
if process == ' * Procs:':
process += ' '+proc[0]
else:
process += ' - '+proc[0]
i += process
query = ("SELECT INITCAP(ench_name), "
"dam_pct, freq_pct, sv_mod, duration "
"FROM item_enchants WHERE item_id = %s")
enchs = db(query, params)
if len(enchs) > 0: # if item has weapon enchantment
enchant = ' *'
for ench in enchs:
if enchant == ' *':
enchant += ' '
else:
enchant += ' - '
enchant += ench[0]+' '+str(ench[1])+'% '+str(ench[2])\
+'% '+str(ench[3])+' '+str(ench[4])
i += enchant
query = "SELECT INITCAP(flag_abbr) FROM item_flags WHERE item_id = %s"
flags = db(query, params)
if len(flags) > 0: # if item has flags like magic
i += ' *'
for flag in flags:
i += ' '+flag[0]
query = ("SELECT INITCAP(restrict_abbr) "
"FROM item_restricts WHERE item_id = %s")
restr = db(query, params)
if len(restr) > 0: # if item has restrictions
i += ' *'
for res in restr:
i += ' '+res[0]
type = ' *'
if item[0][12]:
type += ' NoID'
if item[0][2] != None:
type += ' Wt:'+str(item[0][2])
if item[0][3] != None:
type += ' Val:'+str(item[0][3])
#type += ' Type:'+item[0][1]
i += type
# add is_rare, from_quest, etc. to zone info
zext = ''
if item[0][6]:
zext += 'R'
if item[0][7]:
zext += 'S'
if item[0][8]:
zext += 'Q'
if item[0][9]:
zext += 'U'
if item[0][10]:
zext += 'I'
if item[0][11]:
zext += 'O'
zone = item[0][4]
if zext != '':
zone += ' ('+zext+')'
i += ' * Zone: '+zone+' * Last ID: '+str(item[0][5])
#print 'Item '+str(id[0])+': '+i
query = "UPDATE items SET short_stats = %s WHERE item_id = %s"
params = (i, id[0])
db(query, params)
def long_stats():
query = "SELECT item_id FROM items"
params = ''
ids = db(query, params)
for id in ids:
query = ("SELECT item_name, "
"item_type, weight, c_value, zone_name, last_id, "
"is_rare, from_store, from_quest, for_quest, from_invasion, "
"out_of_game, no_identify, keywords "
"FROM items i, zones z "
"WHERE i.from_zone = z.zone_abbr AND item_id = %s")
params = (id[0],)
item = db(query, params)
i = item[0][0]
query = ("SELECT i.slot_abbr, slot_display "
"FROM item_slots i, slots s "
"WHERE i.slot_abbr = s.slot_abbr AND item_id = %s")
slots = db(query, params)
if len(slots) > 0:
i += ' *'
for slot in slots:
i += ', '+slot[1]
i += ' *'
query = ("SELECT i.spec_abbr, spec_value, spec_display "
"FROM item_specials i, specials s "
"WHERE i.spec_abbr = s.spec_abbr AND item_id = %s "
"AND i.spec_abbr = 'ac'")
specs = db(query, params)
if len(specs) > 0:
i += ', '+specs[0][2]+': '+str(specs[0][1])
query = ("SELECT i.attrib_abbr, attrib_value, attrib_display "
"FROM item_attribs i, attribs a "
"WHERE i.attrib_abbr = a.attrib_abbr AND item_id = %s")
attrs = db(query, params)
if len(attrs) > 0:
i += ' *'
for att in attrs:
i += ', '+att[2]+': '+str(att[1])
query = ("SELECT i.resist_abbr, resist_value, resist_display "
"FROM item_resists i, resists r "
"WHERE i.resist_abbr = r.resist_abbr AND item_id = %s")
resis = db(query, params)
if len(resis) > 0:
i += ' *'
for res in resis:
i += ', '+res[2]+': '+str(res[1])+'%'
query = ("SELECT i.spec_abbr, spec_value, spec_display "
"FROM item_specials i, specials s "
"WHERE i.spec_abbr = s.spec_abbr AND item_id = %s "
"AND i.spec_abbr != 'ac' "
"GROUP BY i.spec_abbr, spec_value, spec_display")
specs = db(query, params)
# should really switch this out into lots of if/else for proper
# formatting, right now it's bugged
if len(specs) > 0:
i += ' *'
if item[0][1] == 'crystal' or item[0][1] == 'spellbook' or \
item[0][1] == 'comp_bag' or item[0][1] == 'ammo':
for spec in specs:
i += ', '+spec[2]+': '+str(spec[1])
elif item[0][1] == 'container':
pass
elif item[0][1] == 'poison':
pass
elif item[0][1] == 'scroll' or item[0][1] == 'potion':
pass
elif item[0][1] == 'staff' or item[0][1] == 'wand':
pass
elif item[0][1] == 'instrument':
pass
elif item[0][1] == 'weapon':
pass
query = ("SELECT i.effect_abbr, effect_display "
"FROM item_effects i, effects e "
"WHERE i.effect_abbr = e.effect_abbr AND item_id = %s")
effs = db(query, params)
if len(effs) > 0:
i += ' *'
for eff in effs:
i += ', '+eff[1]
query = "SELECT proc_name FROM item_procs WHERE item_id = %s"
procs = db(query, params)
if len(procs) > 0:
i += ' *'
for proc in procs:
i += ', '+proc[0]
query = ("SELECT ench_name, dam_pct, freq_pct, sv_mod, duration "
"FROM item_enchants WHERE item_id = %s")
enchs = db(query, params)
if len(enchs) > 0:
i += ' *'
pass
query = ("SELECT i.flag_abbr, flag_display "
"FROM item_flags i, flags f "
"WHERE i.flag_abbr = f.flag_abbr AND item_id = %s")
flags = db(query, params)
if len(flags) > 0:
i += ' *'
for flag in flags:
i += ', '+flag[1]
query = ("SELECT i.restrict_abbr, restrict_name "
"FROM item_restricts i, restricts r "
"WHERE i.restrict_abbr = r.restrict_abbr AND item_id = %s")
rests = db(query, params)
if len(rests) > 0:
i += ' *'
for rest in rests:
i += ', '+rest[1]
if item[0][13]:
i += ' * Keywords:('+item[0][13]+')'
type = ' *'
if item[0][12]:
type += ', No-Identify'
if item[0][2] != None:
wt = locale.format("%d", item[0][2], grouping=True)
type += ', Weight: '+str(wt)+' lbs'
if item[0][3] != None:
val = locale.format("%d", item[0][3], grouping=True)
type += ', Value: '+str(val)+' copper'
#type += ', Type: '+item[0][1]
i += type
# add is_rare, from_quest, etc. to zone info
zext = ''
if item[0][6]:
zext += ', Is Rare'
if item[0][7]:
zext += ', From Store'
if item[0][8]:
zext += ', From Quest'
if item[0][9]:
zext += ', Used In Quest'
if item[0][10]:
zext += ', From Invasion'
if item[0][11]:
zext += ', Out Of Game'
zone = item[0][4]
if zext != '':
zone += ' ('+zext+')'
zone = zone.replace('(, ', '(')
i += ' * Zone: '+zone+' * Last ID: '+str(item[0][5])
i = i.replace('*, ', '* ')
i = i.replace('* *', '*')
#print 'Item '+str(id[0])+': '+i
query = "UPDATE items SET long_stats = %s WHERE item_id = %s"
params = (i, id[0])
db(query, params)
if options.legacy:
import_legacy()
if options.identify:
parse_identify()
if options.short:
short_stats()
if options.long:
long_stats()
timediff = datetime.now() - timestart
print 'The script took '+str(timediff)
if conn:
conn.close()
| grokh/torilbot | parseStats.py | Python | mit | 26,337 | [
"CRYSTAL"
] | a44730cd12150c3afaa78b8991f05678fff7f2e49824fbdc6ba3eb638d5c0440 |
#!/opt/anaconda1anaconda2anaconda3/bin/python
from __future__ import print_function
import os
import sys
import argparse
from argparse import RawTextHelpFormatter
parser = argparse.ArgumentParser(description="Build and Run path advisor for Psi4",
formatter_class=RawTextHelpFormatter)
parser.add_argument('--psi4-compile', action='store_true', help="""\
(Command Default) Generates a minimal CMake command for building Psi4 against
this psi4-dev conda metapackage.
>>> git clone https://github.com/psi4/psi4.git
>>> cd {top-level-psi4-dir}
>>> conda create -n p4dev python={3.6} psi4-dev -c psi4[/label/dev]
>>> conda activate p4dev
>>> psi4-path-advisor
# execute or adapt `cmake` commands above; DepsCache handles python & addons;
# DepsMKLCache handles math; further psi4-path-advisor options handle compilers.
>>> cd objdir && make -j`getconf _NPROCESSORS_ONLN`
>>> make install""")
parser.add_argument('--disable-addons', action='store_true',
help="""Disengage building against the psi4-dev-provided _optional_ link-time Add-Ons like CheMPS2.""")
parser.add_argument('--disable-mkl', action='store_false', dest='mkl',
help="""Disengage building against the psi4-dev-provided MKL libraries (`libmkl_rt`).""")
if sys.platform.startswith('linux'):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--intel', action='store_true',
help="""Engage self-provided icc/icpc/ifort compilers backed by conda's psi4-dev-provided gcc/g++.""")
group.add_argument('--intel-multiarch', action='store_true',
help="""Engage self-provided icc/icpc/ifort compilers backed by conda's psi4-dev-provided gcc/g++ PLUS compile for multiple architectures (useful for cluster deployments).""")
group.add_argument('--gcc', action='store_true',
help="""Engage conda's psi4-dev-provided gcc/g++/gfortran compilers.""")
elif sys.platform == 'darwin':
parser.add_argument('--clang', action='store_true',
help="""Engage conda's psi4-dev-provided clang/clang++/gfortran compilers. You must have downloaded this file https://github.com/phracker/MacOSX-SDKs/releases/download/10.13/MacOSX10.9.sdk.tar.xz, unpacked it, and saved it at ~/SDKs/MacOSX10.9.sdk . !Change! this arg invoked XCode AppleClang prior to Jul 2018.""")
# help="""Engage system-provided clang/clang++ compilers and psi4-dev-provided gfortran.""")
#parser.add_argument('--gcc', action='store_true',
# help="""Engage psi4-dev-provided gcc/g++/gfortran compilers.""")
# duplicates from `bin/psi4`
psi4 = os.path.abspath(os.path.dirname(__file__)) + os.path.sep + 'psi4'
psi4alongside = os.path.isfile(psi4) and os.access(psi4, os.X_OK)
if psi4alongside:
parser.add_argument("--psiapi-path", action='store_true',
help="""(Duplicate from `psi4`) Generates a bash command to source correct Python for `python -c "import psi4"`""")
parser.add_argument('--plugin-compile', action='store_true', help="""\
(Duplicate from `psi4`) Generates a CMake command for building a plugin against this Psi4 installation.
>>> cd <plugin_directory>
>>> `psi4 --plugin-compile`
>>> make
>>> psi4""")
args = parser.parse_args()
if psi4alongside:
from subprocess import call
if args.psiapi_path:
call([psi4, '--psiapi-path'])
sys.exit(0)
if args.plugin_compile:
call([psi4, '--plugin-compile'])
sys.exit(0)
else:
if args.plugin_compile:
print("""Install "psi4" via `conda install psi4 -c psi4[/label/dev]`, then reissue command.""")
#advice = {
# 'cmake': '/opt/anaconda1anaconda2anaconda3/bin/cmake \\',
# 'here': ' -H. \\',
# 'deps': ' -C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsCache.cmake \\',
# 'nooptl': ' -C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsDisableCache.cmake \\',
# 'Lintel': ' -C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsIntelCache.cmake \\',
# 'Lgnu': ' -C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsGNUCache.cmake \\',
# 'Mclang': ' -C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsAppleClangCache.cmake \\',
# 'Mgnu': ' -C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsGNUCache.cmake \\',
# 'objdir': ' -Bobjdir',
#}
recc = ['/opt/anaconda1anaconda2anaconda3/bin/cmake',
'-H.',
'-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsCache.cmake',
'-Bobjdir']
if args.disable_addons:
recc.insert(-1, '-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsDisableCache.cmake')
if args.mkl:
recc.insert(-1, '-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsMKLCache.cmake')
if sys.platform.startswith('linux'):
if args.intel:
recc.insert(-1, '-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsIntelCache.cmake')
if args.intel_multiarch:
recc.insert(-1, '-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsIntelMultiarchCache.cmake')
if args.gcc:
recc.insert(-1, '-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsGNUCache.cmake')
if sys.platform == 'darwin':
if args.clang:
recc.insert(-1, '-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsClangCache.cmake')
# recc.insert(0, 'CONDA_BUILD_SYSROOT=~/SDKs/MacOSX10.9.sdk')
# recc.insert(-1, '-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsAppleClangCache.cmake')
#if args.gcc:
# recc.insert(-1, '-C/opt/anaconda1anaconda2anaconda3/share/cmake/psi4/psi4DepsGNUCache.cmake')
srecc = """ """.join(recc)
print(srecc)
| psi4/psi4meta | conda-recipes/psi4-dev/src/psi4-path-advisor.py | Python | gpl-2.0 | 5,808 | [
"Psi4"
] | bb5044f6c84c634369ce46226ceb0bc7ccd0c46175ebdcb4e2074f368245b5d1 |
import contextlib
import gzip
import itertools
import math
import os.path
import pickle
import re
import shutil
import sys
import tempfile
import warnings
from contextlib import ExitStack
from io import BytesIO
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
import pytest
from packaging.version import Version
from pandas.errors import OutOfBoundsDatetime
import xarray as xr
from xarray import (
DataArray,
Dataset,
backends,
load_dataarray,
load_dataset,
open_dataarray,
open_dataset,
open_mfdataset,
save_mfdataset,
)
from xarray.backends.common import robust_getitem
from xarray.backends.h5netcdf_ import H5netcdfBackendEntrypoint
from xarray.backends.netcdf3 import _nc3_dtype_coercions
from xarray.backends.netCDF4_ import (
NetCDF4BackendEntrypoint,
_extract_nc4_variable_encoding,
)
from xarray.backends.pydap_ import PydapDataStore
from xarray.backends.scipy_ import ScipyBackendEntrypoint
from xarray.coding.variables import SerializationWarning
from xarray.conventions import encode_dataset_coordinates
from xarray.core import indexing
from xarray.core.options import set_options
from xarray.core.pycompat import dask_array_type
from xarray.tests import mock
from . import (
arm_xfail,
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
assert_no_warnings,
has_dask,
has_h5netcdf_0_12,
has_netCDF4,
has_scipy,
network,
requires_cfgrib,
requires_cftime,
requires_dask,
requires_fsspec,
requires_h5netcdf,
requires_h5netcdf_0_12,
requires_iris,
requires_netCDF4,
requires_pseudonetcdf,
requires_pydap,
requires_pynio,
requires_rasterio,
requires_scipy,
requires_scipy_or_netCDF4,
requires_zarr,
)
from .test_coding_times import (
_ALL_CALENDARS,
_NON_STANDARD_CALENDARS,
_STANDARD_CALENDARS,
)
from .test_dataset import create_append_test_data, create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
try:
import dask
import dask.array as da
except ImportError:
pass
ON_WINDOWS = sys.platform == "win32"
default_value = object()
def open_example_dataset(name, *args, **kwargs):
return open_dataset(
os.path.join(os.path.dirname(__file__), "data", name), *args, **kwargs
)
def open_example_mfdataset(names, *args, **kwargs):
return open_mfdataset(
[os.path.join(os.path.dirname(__file__), "data", name) for name in names],
*args,
**kwargs,
)
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2], dtype=np.float32)
encoding = {
"_FillValue": -1,
"add_offset": 10,
"scale_factor": np.float32(0.1),
"dtype": "i2",
}
return Dataset({"x": ("t", x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {"_FillValue": -1, "add_offset": 10, "scale_factor": np.float32(0.1)}
return Dataset({"x": ("t", np.int16([-1, -1, 0, 1, 2]), attributes)})
def create_unsigned_masked_scaled_data():
encoding = {
"_FillValue": 255,
"_Unsigned": "true",
"dtype": "i1",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=np.float32)
return Dataset({"x": ("t", x, {}, encoding)})
def create_encoded_unsigned_masked_scaled_data():
# These are values as written to the file: the _FillValue will
# be represented in the signed form.
attributes = {
"_FillValue": -1,
"_Unsigned": "true",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
# Create unsigned data corresponding to [0, 1, 127, 128, 255] unsigned
sb = np.asarray([0, 1, 127, -128, -1], dtype="i1")
return Dataset({"x": ("t", sb, attributes)})
def create_bad_unsigned_masked_scaled_data():
encoding = {
"_FillValue": 255,
"_Unsigned": True,
"dtype": "i1",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=np.float32)
return Dataset({"x": ("t", x, {}, encoding)})
def create_bad_encoded_unsigned_masked_scaled_data():
# These are values as written to the file: the _FillValue will
# be represented in the signed form.
attributes = {
"_FillValue": -1,
"_Unsigned": True,
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
# Create signed data corresponding to [0, 1, 127, 128, 255] unsigned
sb = np.asarray([0, 1, 127, -128, -1], dtype="i1")
return Dataset({"x": ("t", sb, attributes)})
def create_signed_masked_scaled_data():
encoding = {
"_FillValue": -127,
"_Unsigned": "false",
"dtype": "i1",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
x = np.array([-1.0, 10.1, 22.7, np.nan], dtype=np.float32)
return Dataset({"x": ("t", x, {}, encoding)})
def create_encoded_signed_masked_scaled_data():
# These are values as written to the file: the _FillValue will
# be represented in the signed form.
attributes = {
"_FillValue": -127,
"_Unsigned": "false",
"add_offset": 10,
"scale_factor": np.float32(0.1),
}
# Create signed data corresponding to [0, 1, 127, 128, 255] unsigned
sb = np.asarray([-110, 1, 127, -127], dtype="i1")
return Dataset({"x": ("t", sb, attributes)})
def create_boolean_data():
attributes = {"units": "-"}
return Dataset({"x": ("t", [True, False, False, True], attributes)})
class TestCommon:
def test_robust_getitem(self):
class UnreliableArrayFailure(Exception):
pass
class UnreliableArray:
def __init__(self, array, failures=1):
self.array = array
self.failures = failures
def __getitem__(self, key):
if self.failures > 0:
self.failures -= 1
raise UnreliableArrayFailure
return self.array[key]
array = UnreliableArray([0])
with pytest.raises(UnreliableArrayFailure):
array[0]
assert array[0] == 0
actual = robust_getitem(array, 0, catch=UnreliableArrayFailure, initial_delay=0)
assert actual == 0
class NetCDF3Only:
netcdf3_formats = ("NETCDF3_CLASSIC", "NETCDF3_64BIT")
@requires_scipy
def test_dtype_coercion_error(self):
"""Failing dtype coercion should lead to an error"""
for dtype, format in itertools.product(
_nc3_dtype_coercions, self.netcdf3_formats
):
if dtype == "bool":
# coerced upcast (bool to int8) ==> can never fail
continue
# Using the largest representable value, create some data that will
# no longer compare equal after the coerced downcast
maxval = np.iinfo(dtype).max
x = np.array([0, 1, 2, maxval], dtype=dtype)
ds = Dataset({"x": ("t", x, {})})
with create_tmp_file(allow_cleanup_failure=False) as path:
with pytest.raises(ValueError, match="could not safely cast"):
ds.to_netcdf(path, format=format)
class DatasetIOBase:
engine: Optional[str] = None
file_format: Optional[str] = None
def create_store(self):
raise NotImplementedError()
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path:
self.save(data, path, **save_kwargs)
with self.open(path, **open_kwargs) as ds:
yield ds
@contextlib.contextmanager
def roundtrip_append(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path:
for i, key in enumerate(data.variables):
mode = "a" if i > 0 else "w"
self.save(data[[key]], path, mode=mode, **save_kwargs)
with self.open(path, **open_kwargs) as ds:
yield ds
# The save/open methods may be overwritten below
def save(self, dataset, path, **kwargs):
return dataset.to_netcdf(
path, engine=self.engine, format=self.file_format, **kwargs
)
@contextlib.contextmanager
def open(self, path, **kwargs):
with open_dataset(path, engine=self.engine, **kwargs) as ds:
yield ds
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected["float_var"] = ([], 1.0e9, {"units": "units of awesome"})
expected["bytes_var"] = ([], b"foobar")
expected["string_var"] = ([], "foobar")
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
# we need to cf decode the store because it has time and
# non-dimension coordinates
with xr.decode_cf(store) as actual:
assert_allclose(expected, actual)
def check_dtypes_roundtripped(self, expected, actual):
for k in expected.variables:
expected_dtype = expected.variables[k].dtype
# For NetCDF3, the backend should perform dtype coercion
if (
isinstance(self, NetCDF3Only)
and str(expected_dtype) in _nc3_dtype_coercions
):
expected_dtype = np.dtype(_nc3_dtype_coercions[str(expected_dtype)])
actual_dtype = actual.variables[k].dtype
# TODO: check expected behavior for string dtypes more carefully
string_kinds = {"O", "S", "U"}
assert expected_dtype == actual_dtype or (
expected_dtype.kind in string_kinds
and actual_dtype.kind in string_kinds
)
def test_roundtrip_test_data(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
self.check_dtypes_roundtripped(expected, actual)
assert_identical(expected, actual)
def test_load(self):
expected = create_test_data()
@contextlib.contextmanager
def assert_loads(vars=None):
if vars is None:
vars = expected
with self.roundtrip(expected) as actual:
for k, v in actual.variables.items():
# IndexVariables are eagerly loaded into memory
assert v._in_memory == (k in actual.dims)
yield actual
for k, v in actual.variables.items():
if k in vars:
assert v._in_memory
assert_identical(expected, actual)
with pytest.raises(AssertionError):
# make sure the contextmanager works!
with assert_loads() as ds:
pass
with assert_loads() as ds:
ds.load()
with assert_loads(["var1", "dim1", "dim2"]) as ds:
ds["var1"].load()
# verify we can read data even after closing the file
with self.roundtrip(expected) as ds:
actual = ds.load()
assert_identical(expected, actual)
def test_dataset_compute(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
# Test Dataset.compute()
for k, v in actual.variables.items():
# IndexVariables are eagerly cached
assert v._in_memory == (k in actual.dims)
computed = actual.compute()
for k, v in actual.variables.items():
assert v._in_memory == (k in actual.dims)
for v in computed.variables.values():
assert v._in_memory
assert_identical(expected, actual)
assert_identical(expected, computed)
def test_pickle(self):
if not has_dask:
pytest.xfail("pickling requires dask for SerializableLock")
expected = Dataset({"foo": ("x", [42])})
with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
with roundtripped:
# Windows doesn't like reopening an already open file
raw_pickle = pickle.dumps(roundtripped)
with pickle.loads(raw_pickle) as unpickled_ds:
assert_identical(expected, unpickled_ds)
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_pickle_dataarray(self):
if not has_dask:
pytest.xfail("pickling requires dask for SerializableLock")
expected = Dataset({"foo": ("x", [42])})
with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
with roundtripped:
raw_pickle = pickle.dumps(roundtripped["foo"])
# TODO: figure out how to explicitly close the file for the
# unpickled DataArray?
unpickled = pickle.loads(raw_pickle)
assert_identical(expected["foo"], unpickled)
def test_dataset_caching(self):
expected = Dataset({"foo": ("x", [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert isinstance(actual.foo.variable._data, indexing.MemoryCachedArray)
assert not actual.foo.variable._in_memory
actual.foo.values # cache
assert actual.foo.variable._in_memory
with self.roundtrip(expected, open_kwargs={"cache": False}) as actual:
assert isinstance(actual.foo.variable._data, indexing.CopyOnWriteArray)
assert not actual.foo.variable._in_memory
actual.foo.values # no caching
assert not actual.foo.variable._in_memory
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_roundtrip_None_variable(self):
expected = Dataset({None: (("x", "y"), [[0, 1], [2, 3]])})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_object_dtype(self):
floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
bytes_ = np.array([b"ab", b"cdef", b"g"], dtype=object)
bytes_nans = np.array([b"ab", b"cdef", np.nan], dtype=object)
strings = np.array(["ab", "cdef", "g"], dtype=object)
strings_nans = np.array(["ab", "cdef", np.nan], dtype=object)
all_nans = np.array([np.nan, np.nan], dtype=object)
original = Dataset(
{
"floats": ("a", floats),
"floats_nans": ("a", floats_nans),
"bytes": ("b", bytes_),
"bytes_nans": ("b", bytes_nans),
"strings": ("b", strings),
"strings_nans": ("b", strings_nans),
"all_nans": ("c", all_nans),
"nan": ([], np.nan),
}
)
expected = original.copy(deep=True)
with self.roundtrip(original) as actual:
try:
assert_identical(expected, actual)
except AssertionError:
# Most stores use '' for nans in strings, but some don't.
# First try the ideal case (where the store returns exactly)
# the original Dataset), then try a more realistic case.
# This currently includes all netCDF files when encoding is not
# explicitly set.
# https://github.com/pydata/xarray/issues/1647
expected["bytes_nans"][-1] = b""
expected["strings_nans"][-1] = ""
assert_identical(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({"x": ("t", ["ab", "cdef"])})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_string_encoded_characters(self):
expected = Dataset({"x": ("t", ["ab", "cdef"])})
expected["x"].encoding["dtype"] = "S1"
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
assert actual["x"].encoding["_Encoding"] == "utf-8"
expected["x"].encoding["_Encoding"] = "ascii"
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
assert actual["x"].encoding["_Encoding"] == "ascii"
@arm_xfail
def test_roundtrip_numpy_datetime_data(self):
times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"])
expected = Dataset({"t": ("t", times), "t0": times[0]})
kwargs = {"encoding": {"t0": {"units": "days since 1950-01-01"}}}
with self.roundtrip(expected, save_kwargs=kwargs) as actual:
assert_identical(expected, actual)
assert actual.t0.encoding["units"] == "days since 1950-01-01"
@requires_cftime
def test_roundtrip_cftime_datetime_data(self):
from .test_coding_times import _all_cftime_date_types
date_types = _all_cftime_date_types()
for date_type in date_types.values():
times = [date_type(1, 1, 1), date_type(1, 1, 2)]
expected = Dataset({"t": ("t", times), "t0": times[0]})
kwargs = {"encoding": {"t0": {"units": "days since 0001-01-01"}}}
expected_decoded_t = np.array(times)
expected_decoded_t0 = np.array([date_type(1, 1, 1)])
expected_calendar = times[0].calendar
with warnings.catch_warnings():
if expected_calendar in {"proleptic_gregorian", "standard"}:
warnings.filterwarnings("ignore", "Unable to decode time axis")
with self.roundtrip(expected, save_kwargs=kwargs) as actual:
abs_diff = abs(actual.t.values - expected_decoded_t)
assert (abs_diff <= np.timedelta64(1, "s")).all()
assert (
actual.t.encoding["units"]
== "days since 0001-01-01 00:00:00.000000"
)
assert actual.t.encoding["calendar"] == expected_calendar
abs_diff = abs(actual.t0.values - expected_decoded_t0)
assert (abs_diff <= np.timedelta64(1, "s")).all()
assert actual.t0.encoding["units"] == "days since 0001-01-01"
assert actual.t.encoding["calendar"] == expected_calendar
def test_roundtrip_timedelta_data(self):
time_deltas = pd.to_timedelta(["1h", "2h", "NaT"])
expected = Dataset({"td": ("td", time_deltas), "td0": time_deltas[0]})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_float64_data(self):
expected = Dataset({"x": ("y", np.array([1.0, 2.0, np.pi], dtype="float64"))})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_example_1_netcdf(self):
with open_example_dataset("example_1.nc") as expected:
with self.roundtrip(expected) as actual:
# we allow the attributes to differ since that
# will depend on the encoding used. For example,
# without CF encoding 'actual' will end up with
# a dtype attribute.
assert_equal(expected, actual)
def test_roundtrip_coordinates(self):
original = Dataset(
{"foo": ("x", [0, 1])}, {"x": [2, 3], "y": ("a", [42]), "z": ("x", [4, 5])}
)
with self.roundtrip(original) as actual:
assert_identical(original, actual)
original["foo"].encoding["coordinates"] = "y"
with self.roundtrip(original, open_kwargs={"decode_coords": False}) as expected:
# check roundtripping when decode_coords=False
with self.roundtrip(
expected, open_kwargs={"decode_coords": False}
) as actual:
assert_identical(expected, actual)
def test_roundtrip_global_coordinates(self):
original = Dataset(
{"foo": ("x", [0, 1])}, {"x": [2, 3], "y": ("a", [42]), "z": ("x", [4, 5])}
)
with self.roundtrip(original) as actual:
assert_identical(original, actual)
# test that global "coordinates" is as expected
_, attrs = encode_dataset_coordinates(original)
assert attrs["coordinates"] == "y"
# test warning when global "coordinates" is already set
original.attrs["coordinates"] = "foo"
with pytest.warns(SerializationWarning):
_, attrs = encode_dataset_coordinates(original)
assert attrs["coordinates"] == "foo"
def test_roundtrip_coordinates_with_space(self):
original = Dataset(coords={"x": 0, "y z": 1})
expected = Dataset({"y z": 1}, {"x": 0})
with pytest.warns(SerializationWarning):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_boolean_dtype(self):
original = create_boolean_data()
assert original["x"].dtype == "bool"
with self.roundtrip(original) as actual:
assert_identical(original, actual)
assert actual["x"].dtype == "bool"
def test_orthogonal_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {"dim1": [1, 2, 0], "dim2": [3, 2, 0, 3], "dim3": np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
# make sure the array is not yet loaded into memory
assert not actual["var1"].variable._in_memory
assert_identical(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
def test_vectorized_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {
"dim1": DataArray([0, 2, 0], dims="a"),
"dim2": DataArray([0, 2, 3], dims="a"),
}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
# make sure the array is not yet loaded into memory
assert not actual["var1"].variable._in_memory
assert_identical(expected, actual.load())
# do it twice, to make sure we're switched from
# vectorized -> numpy when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
def multiple_indexing(indexers):
# make sure a sequence of lazy indexings certainly works.
with self.roundtrip(in_memory) as on_disk:
actual = on_disk["var3"]
expected = in_memory["var3"]
for ind in indexers:
actual = actual.isel(**ind)
expected = expected.isel(**ind)
# make sure the array is not yet loaded into memory
assert not actual.variable._in_memory
assert_identical(expected, actual.load())
# two-staged vectorized-indexing
indexers = [
{
"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]),
"dim3": DataArray([[0, 4], [1, 3], [2, 2]], dims=["a", "b"]),
},
{"a": DataArray([0, 1], dims=["c"]), "b": DataArray([0, 1], dims=["c"])},
]
multiple_indexing(indexers)
# vectorized-slice mixed
indexers = [
{
"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]),
"dim3": slice(None, 10),
}
]
multiple_indexing(indexers)
# vectorized-integer mixed
indexers = [
{"dim3": 0},
{"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"])},
{"a": slice(None, None, 2)},
]
multiple_indexing(indexers)
# vectorized-integer mixed
indexers = [
{"dim3": 0},
{"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"])},
{"a": 1, "b": 0},
]
multiple_indexing(indexers)
@pytest.mark.xfail(
reason="zarr without dask handles negative steps in slices incorrectly",
)
def test_vectorized_indexing_negative_step(self):
# use dask explicitly when present
if has_dask:
open_kwargs = {"chunks": {}}
else:
open_kwargs = None
in_memory = create_test_data()
def multiple_indexing(indexers):
# make sure a sequence of lazy indexings certainly works.
with self.roundtrip(in_memory, open_kwargs=open_kwargs) as on_disk:
actual = on_disk["var3"]
expected = in_memory["var3"]
for ind in indexers:
actual = actual.isel(**ind)
expected = expected.isel(**ind)
# make sure the array is not yet loaded into memory
assert not actual.variable._in_memory
assert_identical(expected, actual.load())
# with negative step slice.
indexers = [
{
"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]),
"dim3": slice(-1, 1, -1),
}
]
multiple_indexing(indexers)
# with negative step slice.
indexers = [
{
"dim1": DataArray([[0, 7], [2, 6], [3, 5]], dims=["a", "b"]),
"dim3": slice(-1, 1, -2),
}
]
multiple_indexing(indexers)
def test_isel_dataarray(self):
# Make sure isel works lazily. GH:issue:1688
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
expected = in_memory.isel(dim2=in_memory["dim2"] < 3)
actual = on_disk.isel(dim2=on_disk["dim2"] < 3)
assert_identical(expected, actual)
def validate_array_type(self, ds):
# Make sure that only NumpyIndexingAdapter stores a bare np.ndarray.
def find_and_validate_array(obj):
# recursively called function. obj: array or array wrapper.
if hasattr(obj, "array"):
if isinstance(obj.array, indexing.ExplicitlyIndexed):
find_and_validate_array(obj.array)
else:
if isinstance(obj.array, np.ndarray):
assert isinstance(obj, indexing.NumpyIndexingAdapter)
elif isinstance(obj.array, dask_array_type):
assert isinstance(obj, indexing.DaskIndexingAdapter)
elif isinstance(obj.array, pd.Index):
assert isinstance(obj, indexing.PandasIndexingAdapter)
else:
raise TypeError(f"{type(obj.array)} is wrapped by {type(obj)}")
for k, v in ds.variables.items():
find_and_validate_array(v._data)
def test_array_type_after_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
self.validate_array_type(on_disk)
indexers = {"dim1": [1, 2, 0], "dim2": [3, 2, 0, 3], "dim3": np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
self.validate_array_type(actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
self.validate_array_type(actual)
def test_dropna(self):
# regression test for GH:issue:1694
a = np.random.randn(4, 3)
a[1, 1] = np.NaN
in_memory = xr.Dataset(
{"a": (("y", "x"), a)}, coords={"y": np.arange(4), "x": np.arange(3)}
)
assert_identical(
in_memory.dropna(dim="x"), in_memory.isel(x=slice(None, None, 2))
)
with self.roundtrip(in_memory) as on_disk:
self.validate_array_type(on_disk)
expected = in_memory.dropna(dim="x")
actual = on_disk.dropna(dim="x")
assert_identical(expected, actual)
def test_ondisk_after_print(self):
"""Make sure print does not load file into memory"""
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
repr(on_disk)
assert not on_disk["var1"]._in_memory
class CFEncodedBase(DatasetIOBase):
def test_roundtrip_bytes_with_fill_value(self):
values = np.array([b"ab", b"cdef", np.nan], dtype=object)
encoding = {"_FillValue": b"X", "dtype": "S1"}
original = Dataset({"x": ("t", values, {}, encoding)})
expected = original.copy(deep=True)
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
original = Dataset({"x": ("t", values, {}, {"_FillValue": b""})})
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_string_with_fill_value_nchar(self):
values = np.array(["ab", "cdef", np.nan], dtype=object)
expected = Dataset({"x": ("t", values)})
encoding = {"dtype": "S1", "_FillValue": b"X"}
original = Dataset({"x": ("t", values, {}, encoding)})
# Not supported yet.
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
@pytest.mark.parametrize(
"decoded_fn, encoded_fn",
[
(
create_unsigned_masked_scaled_data,
create_encoded_unsigned_masked_scaled_data,
),
pytest.param(
create_bad_unsigned_masked_scaled_data,
create_bad_encoded_unsigned_masked_scaled_data,
marks=pytest.mark.xfail(reason="Bad _Unsigned attribute."),
),
(
create_signed_masked_scaled_data,
create_encoded_signed_masked_scaled_data,
),
(create_masked_and_scaled_data, create_encoded_masked_and_scaled_data),
],
)
def test_roundtrip_mask_and_scale(self, decoded_fn, encoded_fn):
decoded = decoded_fn()
encoded = encoded_fn()
with self.roundtrip(decoded) as actual:
for k in decoded.variables:
assert decoded.variables[k].dtype == actual.variables[k].dtype
assert_allclose(decoded, actual, decode_bytes=False)
with self.roundtrip(decoded, open_kwargs=dict(decode_cf=False)) as actual:
# TODO: this assumes that all roundtrips will first
# encode. Is that something we want to test for?
for k in encoded.variables:
assert encoded.variables[k].dtype == actual.variables[k].dtype
assert_allclose(encoded, actual, decode_bytes=False)
with self.roundtrip(encoded, open_kwargs=dict(decode_cf=False)) as actual:
for k in encoded.variables:
assert encoded.variables[k].dtype == actual.variables[k].dtype
assert_allclose(encoded, actual, decode_bytes=False)
# make sure roundtrip encoding didn't change the
# original dataset.
assert_allclose(encoded, encoded_fn(), decode_bytes=False)
with self.roundtrip(encoded) as actual:
for k in decoded.variables:
assert decoded.variables[k].dtype == actual.variables[k].dtype
assert_allclose(decoded, actual, decode_bytes=False)
@staticmethod
def _create_cf_dataset():
original = Dataset(
dict(
variable=(
("ln_p", "latitude", "longitude"),
np.arange(8, dtype="f4").reshape(2, 2, 2),
{"ancillary_variables": "std_devs det_lim"},
),
std_devs=(
("ln_p", "latitude", "longitude"),
np.arange(0.1, 0.9, 0.1).reshape(2, 2, 2),
{"standard_name": "standard_error"},
),
det_lim=(
(),
0.1,
{"standard_name": "detection_minimum"},
),
),
dict(
latitude=("latitude", [0, 1], {"units": "degrees_north"}),
longitude=("longitude", [0, 1], {"units": "degrees_east"}),
latlon=((), -1, {"grid_mapping_name": "latitude_longitude"}),
latitude_bnds=(("latitude", "bnds2"), [[0, 1], [1, 2]]),
longitude_bnds=(("longitude", "bnds2"), [[0, 1], [1, 2]]),
areas=(
("latitude", "longitude"),
[[1, 1], [1, 1]],
{"units": "degree^2"},
),
ln_p=(
"ln_p",
[1.0, 0.5],
{
"standard_name": "atmosphere_ln_pressure_coordinate",
"computed_standard_name": "air_pressure",
},
),
P0=((), 1013.25, {"units": "hPa"}),
),
)
original["variable"].encoding.update(
{"cell_measures": "area: areas", "grid_mapping": "latlon"},
)
original.coords["latitude"].encoding.update(
dict(grid_mapping="latlon", bounds="latitude_bnds")
)
original.coords["longitude"].encoding.update(
dict(grid_mapping="latlon", bounds="longitude_bnds")
)
original.coords["ln_p"].encoding.update({"formula_terms": "p0: P0 lev : ln_p"})
return original
def test_grid_mapping_and_bounds_are_not_coordinates_in_file(self):
original = self._create_cf_dataset()
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
assert ds.coords["latitude"].attrs["bounds"] == "latitude_bnds"
assert ds.coords["longitude"].attrs["bounds"] == "longitude_bnds"
assert "coordinates" not in ds["variable"].attrs
assert "coordinates" not in ds.attrs
def test_coordinate_variables_after_dataset_roundtrip(self):
original = self._create_cf_dataset()
with self.roundtrip(original, open_kwargs={"decode_coords": "all"}) as actual:
assert_identical(actual, original)
with self.roundtrip(original) as actual:
expected = original.reset_coords(
["latitude_bnds", "longitude_bnds", "areas", "P0", "latlon"]
)
# equal checks that coords and data_vars are equal which
# should be enough
# identical would require resetting a number of attributes
# skip that.
assert_equal(actual, expected)
def test_grid_mapping_and_bounds_are_coordinates_after_dataarray_roundtrip(self):
original = self._create_cf_dataset()
# The DataArray roundtrip should have the same warnings as the
# Dataset, but we already tested for those, so just go for the
# new warnings. It would appear that there is no way to tell
# pytest "This warning and also this warning should both be
# present".
# xarray/tests/test_conventions.py::TestCFEncodedDataStore
# needs the to_dataset. The other backends should be fine
# without it.
with pytest.warns(
UserWarning,
match=(
r"Variable\(s\) referenced in bounds not in variables: "
r"\['l(at|ong)itude_bnds'\]"
),
):
with self.roundtrip(
original["variable"].to_dataset(), open_kwargs={"decode_coords": "all"}
) as actual:
assert_identical(actual, original["variable"].to_dataset())
@requires_iris
def test_coordinate_variables_after_iris_roundtrip(self):
original = self._create_cf_dataset()
iris_cube = original["variable"].to_iris()
actual = DataArray.from_iris(iris_cube)
# Bounds will be missing (xfail)
del original.coords["latitude_bnds"], original.coords["longitude_bnds"]
# Ancillary vars will be missing
# Those are data_vars, and will be dropped when grabbing the variable
assert_identical(actual, original["variable"])
def test_coordinates_encoding(self):
def equals_latlon(obj):
return obj == "lat lon" or obj == "lon lat"
original = Dataset(
{"temp": ("x", [0, 1]), "precip": ("x", [0, -1])},
{"lat": ("x", [2, 3]), "lon": ("x", [4, 5])},
)
with self.roundtrip(original) as actual:
assert_identical(actual, original)
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
assert equals_latlon(ds["temp"].attrs["coordinates"])
assert equals_latlon(ds["precip"].attrs["coordinates"])
assert "coordinates" not in ds.attrs
assert "coordinates" not in ds["lat"].attrs
assert "coordinates" not in ds["lon"].attrs
modified = original.drop_vars(["temp", "precip"])
with self.roundtrip(modified) as actual:
assert_identical(actual, modified)
with create_tmp_file() as tmp_file:
modified.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
assert equals_latlon(ds.attrs["coordinates"])
assert "coordinates" not in ds["lat"].attrs
assert "coordinates" not in ds["lon"].attrs
original["temp"].encoding["coordinates"] = "lat"
with self.roundtrip(original) as actual:
assert_identical(actual, original)
original["precip"].encoding["coordinates"] = "lat"
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=True) as ds:
assert "lon" not in ds["temp"].encoding["coordinates"]
assert "lon" not in ds["precip"].encoding["coordinates"]
assert "coordinates" not in ds["lat"].encoding
assert "coordinates" not in ds["lon"].encoding
def test_roundtrip_endian(self):
ds = Dataset(
{
"x": np.arange(3, 10, dtype=">i2"),
"y": np.arange(3, 20, dtype="<i4"),
"z": np.arange(3, 30, dtype="=i8"),
"w": ("x", np.arange(3, 10, dtype=float)),
}
)
with self.roundtrip(ds) as actual:
# technically these datasets are slightly different,
# one hold mixed endian data (ds) the other should be
# all big endian (actual). assertDatasetIdentical
# should still pass though.
assert_identical(ds, actual)
if self.engine == "netcdf4":
ds["z"].encoding["endian"] = "big"
with pytest.raises(NotImplementedError):
with self.roundtrip(ds) as actual:
pass
def test_invalid_dataarray_names_raise(self):
te = (TypeError, "string or None")
ve = (ValueError, "string must be length 1 or")
data = np.random.random((2, 2))
da = xr.DataArray(data)
for name, (error, msg) in zip([0, (4, 5), True, ""], [te, te, te, ve]):
ds = Dataset({name: da})
with pytest.raises(error) as excinfo:
with self.roundtrip(ds):
pass
excinfo.match(msg)
excinfo.match(repr(name))
def test_encoding_kwarg(self):
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = dict(encoding={"x": {"dtype": "f4"}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
encoded_dtype = actual.x.encoding["dtype"]
# On OS X, dtype sometimes switches endianness for unclear reasons
assert encoded_dtype.kind == "f" and encoded_dtype.itemsize == 4
assert ds.x.encoding == {}
kwargs = dict(encoding={"x": {"foo": "bar"}})
with pytest.raises(ValueError, match=r"unexpected encoding"):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={"x": "foo"})
with pytest.raises(ValueError, match=r"must be castable"):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={"invalid": {}})
with pytest.raises(KeyError):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
def test_encoding_kwarg_dates(self):
ds = Dataset({"t": pd.date_range("2000-01-01", periods=3)})
units = "days since 1900-01-01"
kwargs = dict(encoding={"t": {"units": units}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.t.encoding["units"] == units
assert_identical(actual, ds)
def test_encoding_kwarg_fixed_width_string(self):
# regression test for GH2149
for strings in [[b"foo", b"bar", b"baz"], ["foo", "bar", "baz"]]:
ds = Dataset({"x": strings})
kwargs = dict(encoding={"x": {"dtype": "S1"}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual["x"].encoding["dtype"] == "S1"
assert_identical(actual, ds)
def test_default_fill_value(self):
# Test default encoding for float:
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = dict(encoding={"x": {"dtype": "f4"}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert math.isnan(actual.x.encoding["_FillValue"])
assert ds.x.encoding == {}
# Test default encoding for int:
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = dict(encoding={"x": {"dtype": "int16"}})
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".*floating point data as an integer")
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert "_FillValue" not in actual.x.encoding
assert ds.x.encoding == {}
# Test default encoding for implicit int:
ds = Dataset({"x": ("y", np.arange(10, dtype="int16"))})
with self.roundtrip(ds) as actual:
assert "_FillValue" not in actual.x.encoding
assert ds.x.encoding == {}
def test_explicitly_omit_fill_value(self):
ds = Dataset({"x": ("y", [np.pi, -np.pi])})
ds.x.encoding["_FillValue"] = None
with self.roundtrip(ds) as actual:
assert "_FillValue" not in actual.x.encoding
def test_explicitly_omit_fill_value_via_encoding_kwarg(self):
ds = Dataset({"x": ("y", [np.pi, -np.pi])})
kwargs = dict(encoding={"x": {"_FillValue": None}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert "_FillValue" not in actual.x.encoding
assert ds.y.encoding == {}
def test_explicitly_omit_fill_value_in_coord(self):
ds = Dataset({"x": ("y", [np.pi, -np.pi])}, coords={"y": [0.0, 1.0]})
ds.y.encoding["_FillValue"] = None
with self.roundtrip(ds) as actual:
assert "_FillValue" not in actual.y.encoding
def test_explicitly_omit_fill_value_in_coord_via_encoding_kwarg(self):
ds = Dataset({"x": ("y", [np.pi, -np.pi])}, coords={"y": [0.0, 1.0]})
kwargs = dict(encoding={"y": {"_FillValue": None}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert "_FillValue" not in actual.y.encoding
assert ds.y.encoding == {}
def test_encoding_same_dtype(self):
ds = Dataset({"x": ("y", np.arange(10.0, dtype="f4"))})
kwargs = dict(encoding={"x": {"dtype": "f4"}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
encoded_dtype = actual.x.encoding["dtype"]
# On OS X, dtype sometimes switches endianness for unclear reasons
assert encoded_dtype.kind == "f" and encoded_dtype.itemsize == 4
assert ds.x.encoding == {}
def test_append_write(self):
# regression for GH1215
data = create_test_data()
with self.roundtrip_append(data) as actual:
assert_identical(data, actual)
def test_append_overwrite_values(self):
# regression for GH1215
data = create_test_data()
with create_tmp_file(allow_cleanup_failure=False) as tmp_file:
self.save(data, tmp_file, mode="w")
data["var2"][:] = -999
data["var9"] = data["var2"] * 3
self.save(data[["var2", "var9"]], tmp_file, mode="a")
with self.open(tmp_file) as actual:
assert_identical(data, actual)
def test_append_with_invalid_dim_raises(self):
data = create_test_data()
with create_tmp_file(allow_cleanup_failure=False) as tmp_file:
self.save(data, tmp_file, mode="w")
data["var9"] = data["var2"] * 3
data = data.isel(dim1=slice(2, 6)) # modify one dimension
with pytest.raises(
ValueError, match=r"Unable to update size for existing dimension"
):
self.save(data, tmp_file, mode="a")
def test_multiindex_not_implemented(self):
ds = Dataset(coords={"y": ("x", [1, 2]), "z": ("x", ["a", "b"])}).set_index(
x=["y", "z"]
)
with pytest.raises(NotImplementedError, match=r"MultiIndex"):
with self.roundtrip(ds):
pass
_counter = itertools.count()
@contextlib.contextmanager
def create_tmp_file(suffix=".nc", allow_cleanup_failure=False):
temp_dir = tempfile.mkdtemp()
path = os.path.join(temp_dir, f"temp-{next(_counter)}{suffix}")
try:
yield path
finally:
try:
shutil.rmtree(temp_dir)
except OSError:
if not allow_cleanup_failure:
raise
@contextlib.contextmanager
def create_tmp_files(nfiles, suffix=".nc", allow_cleanup_failure=False):
with ExitStack() as stack:
files = [
stack.enter_context(create_tmp_file(suffix, allow_cleanup_failure))
for apath in np.arange(nfiles)
]
yield files
class NetCDF4Base(CFEncodedBase):
"""Tests for both netCDF4-python and h5netcdf."""
engine = "netcdf4"
def test_open_group(self):
# Create a netCDF file with a dataset stored within a group
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as rootgrp:
foogrp = rootgrp.createGroup("foo")
ds = foogrp
ds.createDimension("time", size=10)
x = np.arange(10)
ds.createVariable("x", np.int32, dimensions=("time",))
ds.variables["x"][:] = x
expected = Dataset()
expected["x"] = ("time", x)
# check equivalent ways to specify group
for group in "foo", "/foo", "foo/", "/foo/":
with self.open(tmp_file, group=group) as actual:
assert_equal(actual["x"], expected["x"])
# check that missing group raises appropriate exception
with pytest.raises(OSError):
open_dataset(tmp_file, group="bar")
with pytest.raises(ValueError, match=r"must be a string"):
open_dataset(tmp_file, group=(1, 2, 3))
def test_open_subgroup(self):
# Create a netCDF file with a dataset stored within a group within a
# group
with create_tmp_file() as tmp_file:
rootgrp = nc4.Dataset(tmp_file, "w")
foogrp = rootgrp.createGroup("foo")
bargrp = foogrp.createGroup("bar")
ds = bargrp
ds.createDimension("time", size=10)
x = np.arange(10)
ds.createVariable("x", np.int32, dimensions=("time",))
ds.variables["x"][:] = x
rootgrp.close()
expected = Dataset()
expected["x"] = ("time", x)
# check equivalent ways to specify group
for group in "foo/bar", "/foo/bar", "foo/bar/", "/foo/bar/":
with self.open(tmp_file, group=group) as actual:
assert_equal(actual["x"], expected["x"])
def test_write_groups(self):
data1 = create_test_data()
data2 = data1 * 2
with create_tmp_file() as tmp_file:
self.save(data1, tmp_file, group="data/1")
self.save(data2, tmp_file, group="data/2", mode="a")
with self.open(tmp_file, group="data/1") as actual1:
assert_identical(data1, actual1)
with self.open(tmp_file, group="data/2") as actual2:
assert_identical(data2, actual2)
def test_encoding_kwarg_vlen_string(self):
for input_strings in [[b"foo", b"bar", b"baz"], ["foo", "bar", "baz"]]:
original = Dataset({"x": input_strings})
expected = Dataset({"x": ["foo", "bar", "baz"]})
kwargs = dict(encoding={"x": {"dtype": str}})
with self.roundtrip(original, save_kwargs=kwargs) as actual:
assert actual["x"].encoding["dtype"] is str
assert_identical(actual, expected)
def test_roundtrip_string_with_fill_value_vlen(self):
values = np.array(["ab", "cdef", np.nan], dtype=object)
expected = Dataset({"x": ("t", values)})
# netCDF4-based backends don't support an explicit fillvalue
# for variable length strings yet.
# https://github.com/Unidata/netcdf4-python/issues/730
# https://github.com/h5netcdf/h5netcdf/issues/37
original = Dataset({"x": ("t", values, {}, {"_FillValue": "XXX"})})
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
original = Dataset({"x": ("t", values, {}, {"_FillValue": ""})})
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_character_array(self):
with create_tmp_file() as tmp_file:
values = np.array([["a", "b", "c"], ["d", "e", "f"]], dtype="S")
with nc4.Dataset(tmp_file, mode="w") as nc:
nc.createDimension("x", 2)
nc.createDimension("string3", 3)
v = nc.createVariable("x", np.dtype("S1"), ("x", "string3"))
v[:] = values
values = np.array(["abc", "def"], dtype="S")
expected = Dataset({"x": ("x", values)})
with open_dataset(tmp_file) as actual:
assert_identical(expected, actual)
# regression test for #157
with self.roundtrip(actual) as roundtripped:
assert_identical(expected, roundtripped)
def test_default_to_char_arrays(self):
data = Dataset({"x": np.array(["foo", "zzzz"], dtype="S")})
with self.roundtrip(data) as actual:
assert_identical(data, actual)
assert actual["x"].dtype == np.dtype("S4")
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as ds:
ds.createDimension("time", size=10)
ds.createVariable("time", np.int32, dimensions=("time",))
units = "days since 1999-01-01"
ds.variables["time"].setncattr("units", units)
ds.variables["time"][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range("1999-01-05", periods=10)
encoding = {"units": units, "dtype": np.dtype("int32")}
expected["time"] = ("time", time, {}, encoding)
with open_dataset(tmp_file) as actual:
assert_equal(actual["time"], expected["time"])
actual_encoding = {
k: v
for k, v in actual["time"].encoding.items()
if k in expected["time"].encoding
}
assert actual_encoding == expected["time"].encoding
def test_dump_encodings(self):
# regression test for #709
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = dict(encoding={"x": {"zlib": True}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.x.encoding["zlib"]
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as ds:
ds.createDimension("time", size=10)
ds.createVariable("time", np.int32, dimensions=("time",))
units = "days since 1999-01-01"
ds.variables["time"].setncattr("units", units)
ds.variables["time"][:] = np.arange(10) + 4
with open_dataset(tmp_file) as xarray_dataset:
with create_tmp_file() as tmp_file2:
xarray_dataset.to_netcdf(tmp_file2)
with nc4.Dataset(tmp_file2, "r") as ds:
assert ds.variables["time"].getncattr("units") == units
assert_array_equal(ds.variables["time"], np.arange(10) + 4)
def test_compression_encoding(self):
data = create_test_data()
data["var2"].encoding.update(
{
"zlib": True,
"chunksizes": (5, 5),
"fletcher32": True,
"shuffle": True,
"original_shape": data.var2.shape,
}
)
with self.roundtrip(data) as actual:
for k, v in data["var2"].encoding.items():
assert v == actual["var2"].encoding[k]
# regression test for #156
expected = data.isel(dim1=0)
with self.roundtrip(expected) as actual:
assert_equal(expected, actual)
def test_encoding_kwarg_compression(self):
ds = Dataset({"x": np.arange(10.0)})
encoding = dict(
dtype="f4",
zlib=True,
complevel=9,
fletcher32=True,
chunksizes=(5,),
shuffle=True,
)
kwargs = dict(encoding=dict(x=encoding))
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert_equal(actual, ds)
assert actual.x.encoding["dtype"] == "f4"
assert actual.x.encoding["zlib"]
assert actual.x.encoding["complevel"] == 9
assert actual.x.encoding["fletcher32"]
assert actual.x.encoding["chunksizes"] == (5,)
assert actual.x.encoding["shuffle"]
assert ds.x.encoding == {}
def test_keep_chunksizes_if_no_original_shape(self):
ds = Dataset({"x": [1, 2, 3]})
chunksizes = (2,)
ds.variables["x"].encoding = {"chunksizes": chunksizes}
with self.roundtrip(ds) as actual:
assert_identical(ds, actual)
assert_array_equal(
ds["x"].encoding["chunksizes"], actual["x"].encoding["chunksizes"]
)
def test_encoding_chunksizes_unlimited(self):
# regression test for GH1225
ds = Dataset({"x": [1, 2, 3], "y": ("x", [2, 3, 4])})
ds.variables["x"].encoding = {
"zlib": False,
"shuffle": False,
"complevel": 0,
"fletcher32": False,
"contiguous": False,
"chunksizes": (2**20,),
"original_shape": (3,),
}
with self.roundtrip(ds) as actual:
assert_equal(ds, actual)
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
nc.createDimension("t", 5)
nc.createVariable("x", "int16", ("t",), fill_value=-1)
v = nc.variables["x"]
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
# first make sure netCDF4 reads the masked and scaled data
# correctly
with nc4.Dataset(tmp_file, mode="r") as nc:
expected = np.ma.array(
[-1, -1, 10, 10.1, 10.2], mask=[True, True, False, False, False]
)
actual = nc.variables["x"][:]
assert_array_equal(expected, actual)
# now check xarray
with open_dataset(tmp_file) as ds:
expected = create_masked_and_scaled_data()
assert_identical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
v = nc.createVariable("x", "int16")
v[...] = 123
with open_dataset(tmp_file) as ds:
expected = Dataset({"x": ((), 123)})
assert_identical(expected, ds)
def test_read_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(["foo", "bar", "baz"], dtype=object)
with nc4.Dataset(tmp_file, mode="w") as nc:
nc.createDimension("x", 3)
v = nc.createVariable("x", str, ("x",))
v[:] = values
expected = Dataset({"x": ("x", values)})
for kwargs in [{}, {"decode_cf": True}]:
with open_dataset(tmp_file, **kwargs) as actual:
assert_identical(expected, actual)
def test_encoding_unlimited_dims(self):
ds = Dataset({"x": ("y", np.arange(10.0))})
with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
ds.encoding = {"unlimited_dims": ["y"]}
with self.roundtrip(ds) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
@requires_netCDF4
class TestNetCDF4Data(NetCDF4Base):
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(tmp_file, mode="w") as store:
yield store
def test_variable_order(self):
# doesn't work with scipy or h5py :(
ds = Dataset()
ds["a"] = 1
ds["z"] = 2
ds["b"] = 3
ds.coords["c"] = 4
with self.roundtrip(ds) as actual:
assert list(ds.variables) == list(actual.variables)
def test_unsorted_index_raises(self):
# should be fixed in netcdf4 v1.2.1
random_data = np.random.random(size=(4, 6))
dim0 = [0, 1, 2, 3]
dim1 = [0, 2, 1, 3, 5, 4] # We will sort this in a later step
da = xr.DataArray(
data=random_data,
dims=("dim0", "dim1"),
coords={"dim0": dim0, "dim1": dim1},
name="randovar",
)
ds = da.to_dataset()
with self.roundtrip(ds) as ondisk:
inds = np.argsort(dim1)
ds2 = ondisk.isel(dim1=inds)
# Older versions of NetCDF4 raise an exception here, and if so we
# want to ensure we improve (that is, replace) the error message
try:
ds2.randovar.values
except IndexError as err:
assert "first by calling .load" in str(err)
def test_setncattr_string(self):
list_of_strings = ["list", "of", "strings"]
one_element_list_of_strings = ["one element"]
one_string = "one string"
attrs = {
"foo": list_of_strings,
"bar": one_element_list_of_strings,
"baz": one_string,
}
ds = Dataset({"x": ("y", [1, 2, 3], attrs)}, attrs=attrs)
with self.roundtrip(ds) as actual:
for totest in [actual, actual["x"]]:
assert_array_equal(list_of_strings, totest.attrs["foo"])
assert_array_equal(one_element_list_of_strings, totest.attrs["bar"])
assert one_string == totest.attrs["baz"]
@requires_netCDF4
class TestNetCDF4AlreadyOpen:
def test_base_case(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
v = nc.createVariable("x", "int")
v[...] = 42
nc = nc4.Dataset(tmp_file, mode="r")
store = backends.NetCDF4DataStore(nc)
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
def test_group(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
group = nc.createGroup("g")
v = group.createVariable("x", "int")
v[...] = 42
nc = nc4.Dataset(tmp_file, mode="r")
store = backends.NetCDF4DataStore(nc.groups["g"])
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
nc = nc4.Dataset(tmp_file, mode="r")
store = backends.NetCDF4DataStore(nc, group="g")
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
with nc4.Dataset(tmp_file, mode="r") as nc:
with pytest.raises(ValueError, match="must supply a root"):
backends.NetCDF4DataStore(nc.groups["g"], group="g")
def test_deepcopy(self):
# regression test for https://github.com/pydata/xarray/issues/4425
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
nc.createDimension("x", 10)
v = nc.createVariable("y", np.int32, ("x",))
v[:] = np.arange(10)
h5 = nc4.Dataset(tmp_file, mode="r")
store = backends.NetCDF4DataStore(h5)
with open_dataset(store) as ds:
copied = ds.copy(deep=True)
expected = Dataset({"y": ("x", np.arange(10))})
assert_identical(expected, copied)
@requires_netCDF4
@requires_dask
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
class TestNetCDF4ViaDaskData(TestNetCDF4Data):
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if open_kwargs is None:
open_kwargs = {}
if save_kwargs is None:
save_kwargs = {}
open_kwargs.setdefault("chunks", -1)
with TestNetCDF4Data.roundtrip(
self, data, save_kwargs, open_kwargs, allow_cleanup_failure
) as ds:
yield ds
def test_unsorted_index_raises(self):
# Skip when using dask because dask rewrites indexers to getitem,
# dask first pulls items by block.
pass
def test_dataset_caching(self):
# caching behavior differs for dask
pass
def test_write_inconsistent_chunks(self):
# Construct two variables with the same dimensions, but different
# chunk sizes.
x = da.zeros((100, 100), dtype="f4", chunks=(50, 100))
x = DataArray(data=x, dims=("lat", "lon"), name="x")
x.encoding["chunksizes"] = (50, 100)
x.encoding["original_shape"] = (100, 100)
y = da.ones((100, 100), dtype="f4", chunks=(100, 50))
y = DataArray(data=y, dims=("lat", "lon"), name="y")
y.encoding["chunksizes"] = (100, 50)
y.encoding["original_shape"] = (100, 100)
# Put them both into the same dataset
ds = Dataset({"x": x, "y": y})
with self.roundtrip(ds) as actual:
assert actual["x"].encoding["chunksizes"] == (50, 100)
assert actual["y"].encoding["chunksizes"] == (100, 50)
@requires_zarr
class ZarrBase(CFEncodedBase):
DIMENSION_KEY = "_ARRAY_DIMENSIONS"
def create_zarr_target(self):
raise NotImplementedError
@contextlib.contextmanager
def create_store(self):
with self.create_zarr_target() as store_target:
yield backends.ZarrStore.open_group(store_target, mode="w")
def save(self, dataset, store_target, **kwargs):
return dataset.to_zarr(store=store_target, **kwargs)
@contextlib.contextmanager
def open(self, store_target, **kwargs):
with xr.open_dataset(store_target, engine="zarr", **kwargs) as ds:
yield ds
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with self.create_zarr_target() as store_target:
self.save(data, store_target, **save_kwargs)
with self.open(store_target, **open_kwargs) as ds:
yield ds
@pytest.mark.parametrize("consolidated", [False, True, None])
def test_roundtrip_consolidated(self, consolidated):
expected = create_test_data()
with self.roundtrip(
expected,
save_kwargs={"consolidated": True},
open_kwargs={"backend_kwargs": {"consolidated": True}},
) as actual:
self.check_dtypes_roundtripped(expected, actual)
assert_identical(expected, actual)
def test_read_non_consolidated_warning(self):
expected = create_test_data()
with self.create_zarr_target() as store:
expected.to_zarr(store, consolidated=False)
with pytest.warns(
RuntimeWarning,
match="Failed to open Zarr store with consolidated",
):
with xr.open_zarr(store) as ds:
assert_identical(ds, expected)
def test_with_chunkstore(self):
expected = create_test_data()
with self.create_zarr_target() as store_target, self.create_zarr_target() as chunk_store:
save_kwargs = {"chunk_store": chunk_store}
self.save(expected, store_target, **save_kwargs)
open_kwargs = {"backend_kwargs": {"chunk_store": chunk_store}}
with self.open(store_target, **open_kwargs) as ds:
assert_equal(ds, expected)
@requires_dask
def test_auto_chunk(self):
original = create_test_data().chunk()
with self.roundtrip(original, open_kwargs={"chunks": None}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as original
assert v.chunks == original[k].chunks
@requires_dask
@pytest.mark.filterwarnings("ignore:Specified Dask chunks")
def test_manual_chunk(self):
original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3})
# Using chunks = None should return non-chunked arrays
open_kwargs = {"chunks": None}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
# uniform arrays
for i in range(2, 6):
rechunked = original.chunk(chunks=i)
open_kwargs = {"chunks": i}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as rechunked
assert v.chunks == rechunked[k].chunks
chunks = {"dim1": 2, "dim2": 3, "dim3": 5}
rechunked = original.chunk(chunks=chunks)
open_kwargs = {
"chunks": chunks,
"backend_kwargs": {"overwrite_encoded_chunks": True},
}
with self.roundtrip(original, open_kwargs=open_kwargs) as actual:
for k, v in actual.variables.items():
assert v.chunks == rechunked[k].chunks
with self.roundtrip(actual) as auto:
# encoding should have changed
for k, v in actual.variables.items():
assert v.chunks == rechunked[k].chunks
assert_identical(actual, auto)
assert_identical(actual.load(), auto.load())
@requires_dask
def test_warning_on_bad_chunks(self):
original = create_test_data().chunk({"dim1": 4, "dim2": 3, "dim3": 3})
bad_chunks = (2, {"dim2": (3, 3, 2, 1)})
for chunks in bad_chunks:
kwargs = {"chunks": chunks}
with pytest.warns(UserWarning):
with self.roundtrip(original, open_kwargs=kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
good_chunks = ({"dim2": 3}, {"dim3": (6, 4)}, {})
for chunks in good_chunks:
kwargs = {"chunks": chunks}
with assert_no_warnings():
with self.roundtrip(original, open_kwargs=kwargs) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
@requires_dask
def test_deprecate_auto_chunk(self):
original = create_test_data().chunk()
with pytest.raises(TypeError):
with self.roundtrip(original, open_kwargs={"auto_chunk": True}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# chunk size should be the same as original
assert v.chunks == original[k].chunks
with pytest.raises(TypeError):
with self.roundtrip(original, open_kwargs={"auto_chunk": False}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
assert v._in_memory == (k in actual.dims)
# there should be no chunks
assert v.chunks is None
@requires_dask
def test_write_uneven_dask_chunks(self):
# regression for GH#2225
original = create_test_data().chunk({"dim1": 3, "dim2": 4, "dim3": 3})
with self.roundtrip(original, open_kwargs={"chunks": {}}) as actual:
for k, v in actual.data_vars.items():
print(k)
assert v.chunks == actual[k].chunks
def test_chunk_encoding(self):
# These datasets have no dask chunks. All chunking specified in
# encoding
data = create_test_data()
chunks = (5, 5)
data["var2"].encoding.update({"chunks": chunks})
with self.roundtrip(data) as actual:
assert chunks == actual["var2"].encoding["chunks"]
# expect an error with non-integer chunks
data["var2"].encoding.update({"chunks": (5, 4.5)})
with pytest.raises(TypeError):
with self.roundtrip(data) as actual:
pass
@requires_dask
def test_chunk_encoding_with_dask(self):
# These datasets DO have dask chunks. Need to check for various
# interactions between dask and zarr chunks
ds = xr.DataArray((np.arange(12)), dims="x", name="var1").to_dataset()
# - no encoding specified -
# zarr automatically gets chunk information from dask chunks
ds_chunk4 = ds.chunk({"x": 4})
with self.roundtrip(ds_chunk4) as actual:
assert (4,) == actual["var1"].encoding["chunks"]
# should fail if dask_chunks are irregular...
ds_chunk_irreg = ds.chunk({"x": (5, 4, 3)})
with pytest.raises(ValueError, match=r"uniform chunk sizes."):
with self.roundtrip(ds_chunk_irreg) as actual:
pass
# should fail if encoding["chunks"] clashes with dask_chunks
badenc = ds.chunk({"x": 4})
badenc.var1.encoding["chunks"] = (6,)
with pytest.raises(NotImplementedError, match=r"named 'var1' would overlap"):
with self.roundtrip(badenc) as actual:
pass
# unless...
with self.roundtrip(badenc, save_kwargs={"safe_chunks": False}) as actual:
# don't actually check equality because the data could be corrupted
pass
# if dask chunks (4) are an integer multiple of zarr chunks (2) it should not fail...
goodenc = ds.chunk({"x": 4})
goodenc.var1.encoding["chunks"] = (2,)
with self.roundtrip(goodenc) as actual:
pass
# if initial dask chunks are aligned, size of last dask chunk doesn't matter
goodenc = ds.chunk({"x": (3, 3, 6)})
goodenc.var1.encoding["chunks"] = (3,)
with self.roundtrip(goodenc) as actual:
pass
goodenc = ds.chunk({"x": (3, 6, 3)})
goodenc.var1.encoding["chunks"] = (3,)
with self.roundtrip(goodenc) as actual:
pass
# ... also if the last chunk is irregular
ds_chunk_irreg = ds.chunk({"x": (5, 5, 2)})
with self.roundtrip(ds_chunk_irreg) as actual:
assert (5,) == actual["var1"].encoding["chunks"]
# re-save Zarr arrays
with self.roundtrip(ds_chunk_irreg) as original:
with self.roundtrip(original) as actual:
assert_identical(original, actual)
# but itermediate unaligned chunks are bad
badenc = ds.chunk({"x": (3, 5, 3, 1)})
badenc.var1.encoding["chunks"] = (3,)
with pytest.raises(
NotImplementedError, match=r"would overlap multiple dask chunks"
):
with self.roundtrip(badenc) as actual:
pass
# - encoding specified -
# specify compatible encodings
for chunk_enc in 4, (4,):
ds_chunk4["var1"].encoding.update({"chunks": chunk_enc})
with self.roundtrip(ds_chunk4) as actual:
assert (4,) == actual["var1"].encoding["chunks"]
# TODO: remove this failure once synchronized overlapping writes are
# supported by xarray
ds_chunk4["var1"].encoding.update({"chunks": 5})
with pytest.raises(NotImplementedError, match=r"named 'var1' would overlap"):
with self.roundtrip(ds_chunk4) as actual:
pass
# override option
with self.roundtrip(ds_chunk4, save_kwargs={"safe_chunks": False}) as actual:
# don't actually check equality because the data could be corrupted
pass
def test_hidden_zarr_keys(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
zarr_group = store.ds
# check that a variable hidden attribute is present and correct
# JSON only has a single array type, which maps to list in Python.
# In contrast, dims in xarray is always a tuple.
for var in expected.variables.keys():
dims = zarr_group[var].attrs[self.DIMENSION_KEY]
assert dims == list(expected[var].dims)
with xr.decode_cf(store):
# make sure it is hidden
for var in expected.variables.keys():
assert self.DIMENSION_KEY not in expected[var].attrs
# put it back and try removing from a variable
del zarr_group.var2.attrs[self.DIMENSION_KEY]
with pytest.raises(KeyError):
with xr.decode_cf(store):
pass
@pytest.mark.parametrize("group", [None, "group1"])
def test_write_persistence_modes(self, group):
original = create_test_data()
# overwrite mode
with self.roundtrip(
original,
save_kwargs={"mode": "w", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# don't overwrite mode
with self.roundtrip(
original,
save_kwargs={"mode": "w-", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# make sure overwriting works as expected
with self.create_zarr_target() as store:
self.save(original, store)
# should overwrite with no error
self.save(original, store, mode="w", group=group)
with self.open(store, group=group) as actual:
assert_identical(original, actual)
with pytest.raises(ValueError):
self.save(original, store, mode="w-")
# check append mode for normal write
with self.roundtrip(
original,
save_kwargs={"mode": "a", "group": group},
open_kwargs={"group": group},
) as actual:
assert_identical(original, actual)
# check append mode for append write
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w", group=group)
ds_to_append.to_zarr(store_target, append_dim="time", group=group)
original = xr.concat([ds, ds_to_append], dim="time")
actual = xr.open_dataset(store_target, group=group, engine="zarr")
assert_identical(original, actual)
def test_compressor_encoding(self):
original = create_test_data()
# specify a custom compressor
import zarr
blosc_comp = zarr.Blosc(cname="zstd", clevel=3, shuffle=2)
save_kwargs = dict(encoding={"var1": {"compressor": blosc_comp}})
with self.roundtrip(original, save_kwargs=save_kwargs) as ds:
actual = ds["var1"].encoding["compressor"]
# get_config returns a dictionary of compressor attributes
assert actual.get_config() == blosc_comp.get_config()
def test_group(self):
original = create_test_data()
group = "some/random/path"
with self.roundtrip(
original, save_kwargs={"group": group}, open_kwargs={"group": group}
) as actual:
assert_identical(original, actual)
def test_encoding_kwarg_fixed_width_string(self):
# not relevant for zarr, since we don't use EncodedStringCoder
pass
# TODO: someone who understand caching figure out whether caching
# makes sense for Zarr backend
@pytest.mark.xfail(reason="Zarr caching not implemented")
def test_dataset_caching(self):
super().test_dataset_caching()
def test_append_write(self):
super().test_append_write()
def test_append_with_mode_rplus_success(self):
original = Dataset({"foo": ("x", [1])})
modified = Dataset({"foo": ("x", [2])})
with self.create_zarr_target() as store:
original.to_zarr(store)
modified.to_zarr(store, mode="r+")
with self.open(store) as actual:
assert_identical(actual, modified)
def test_append_with_mode_rplus_fails(self):
original = Dataset({"foo": ("x", [1])})
modified = Dataset({"bar": ("x", [2])})
with self.create_zarr_target() as store:
original.to_zarr(store)
with pytest.raises(
ValueError, match="dataset contains non-pre-existing variables"
):
modified.to_zarr(store, mode="r+")
def test_append_with_invalid_dim_raises(self):
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w")
with pytest.raises(
ValueError, match="does not match any existing dataset dimensions"
):
ds_to_append.to_zarr(store_target, append_dim="notvalid")
def test_append_with_no_dims_raises(self):
with self.create_zarr_target() as store_target:
Dataset({"foo": ("x", [1])}).to_zarr(store_target, mode="w")
with pytest.raises(ValueError, match="different dimension names"):
Dataset({"foo": ("y", [2])}).to_zarr(store_target, mode="a")
def test_append_with_append_dim_not_set_raises(self):
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w")
with pytest.raises(ValueError, match="different dimension sizes"):
ds_to_append.to_zarr(store_target, mode="a")
def test_append_with_mode_not_a_raises(self):
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w")
with pytest.raises(ValueError, match="cannot set append_dim unless"):
ds_to_append.to_zarr(store_target, mode="w", append_dim="time")
def test_append_with_existing_encoding_raises(self):
ds, ds_to_append, _ = create_append_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target, mode="w")
with pytest.raises(ValueError, match="but encoding was provided"):
ds_to_append.to_zarr(
store_target,
append_dim="time",
encoding={"da": {"compressor": None}},
)
def test_check_encoding_is_consistent_after_append(self):
ds, ds_to_append, _ = create_append_test_data()
# check encoding consistency
with self.create_zarr_target() as store_target:
import zarr
compressor = zarr.Blosc()
encoding = {"da": {"compressor": compressor}}
ds.to_zarr(store_target, mode="w", encoding=encoding)
ds_to_append.to_zarr(store_target, append_dim="time")
actual_ds = xr.open_dataset(store_target, engine="zarr")
actual_encoding = actual_ds["da"].encoding["compressor"]
assert actual_encoding.get_config() == compressor.get_config()
assert_identical(
xr.open_dataset(store_target, engine="zarr").compute(),
xr.concat([ds, ds_to_append], dim="time"),
)
def test_append_with_new_variable(self):
ds, ds_to_append, ds_with_new_var = create_append_test_data()
# check append mode for new variable
with self.create_zarr_target() as store_target:
xr.concat([ds, ds_to_append], dim="time").to_zarr(store_target, mode="w")
ds_with_new_var.to_zarr(store_target, mode="a")
combined = xr.concat([ds, ds_to_append], dim="time")
combined["new_var"] = ds_with_new_var["new_var"]
assert_identical(combined, xr.open_dataset(store_target, engine="zarr"))
@requires_dask
def test_to_zarr_compute_false_roundtrip(self):
from dask.delayed import Delayed
original = create_test_data().chunk()
with self.create_zarr_target() as store:
delayed_obj = self.save(original, store, compute=False)
assert isinstance(delayed_obj, Delayed)
# make sure target store has not been written to yet
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(original, actual)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(original, actual)
@requires_dask
def test_to_zarr_append_compute_false_roundtrip(self):
from dask.delayed import Delayed
ds, ds_to_append, _ = create_append_test_data()
ds, ds_to_append = ds.chunk(), ds_to_append.chunk()
with pytest.warns(SerializationWarning):
with self.create_zarr_target() as store:
delayed_obj = self.save(ds, store, compute=False, mode="w")
assert isinstance(delayed_obj, Delayed)
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(ds, actual)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(ds, actual)
delayed_obj = self.save(
ds_to_append, store, compute=False, append_dim="time"
)
assert isinstance(delayed_obj, Delayed)
with pytest.raises(AssertionError):
with self.open(store) as actual:
assert_identical(
xr.concat([ds, ds_to_append], dim="time"), actual
)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(xr.concat([ds, ds_to_append], dim="time"), actual)
@pytest.mark.parametrize("chunk", [False, True])
def test_save_emptydim(self, chunk):
if chunk and not has_dask:
pytest.skip("requires dask")
ds = Dataset({"x": (("a", "b"), np.empty((5, 0))), "y": ("a", [1, 2, 5, 8, 9])})
if chunk:
ds = ds.chunk({}) # chunk dataset to save dask array
with self.roundtrip(ds) as ds_reload:
assert_identical(ds, ds_reload)
@pytest.mark.parametrize("consolidated", [False, True])
@pytest.mark.parametrize("compute", [False, True])
@pytest.mark.parametrize("use_dask", [False, True])
def test_write_region(self, consolidated, compute, use_dask):
if (use_dask or not compute) and not has_dask:
pytest.skip("requires dask")
zeros = Dataset({"u": (("x",), np.zeros(10))})
nonzeros = Dataset({"u": (("x",), np.arange(1, 11))})
if use_dask:
zeros = zeros.chunk(2)
nonzeros = nonzeros.chunk(2)
with self.create_zarr_target() as store:
zeros.to_zarr(
store,
consolidated=consolidated,
compute=compute,
encoding={"u": dict(chunks=2)},
)
if compute:
with xr.open_zarr(store, consolidated=consolidated) as actual:
assert_identical(actual, zeros)
for i in range(0, 10, 2):
region = {"x": slice(i, i + 2)}
nonzeros.isel(region).to_zarr(
store, region=region, consolidated=consolidated
)
with xr.open_zarr(store, consolidated=consolidated) as actual:
assert_identical(actual, nonzeros)
@pytest.mark.parametrize("mode", [None, "r+", "a"])
def test_write_region_mode(self, mode):
zeros = Dataset({"u": (("x",), np.zeros(10))})
nonzeros = Dataset({"u": (("x",), np.arange(1, 11))})
with self.create_zarr_target() as store:
zeros.to_zarr(store)
for region in [{"x": slice(5)}, {"x": slice(5, 10)}]:
nonzeros.isel(region).to_zarr(store, region=region, mode=mode)
with xr.open_zarr(store) as actual:
assert_identical(actual, nonzeros)
@requires_dask
def test_write_preexisting_override_metadata(self):
"""Metadata should be overridden if mode="a" but not in mode="r+"."""
original = Dataset(
{"u": (("x",), np.zeros(10), {"variable": "original"})},
attrs={"global": "original"},
)
both_modified = Dataset(
{"u": (("x",), np.ones(10), {"variable": "modified"})},
attrs={"global": "modified"},
)
global_modified = Dataset(
{"u": (("x",), np.ones(10), {"variable": "original"})},
attrs={"global": "modified"},
)
only_new_data = Dataset(
{"u": (("x",), np.ones(10), {"variable": "original"})},
attrs={"global": "original"},
)
with self.create_zarr_target() as store:
original.to_zarr(store, compute=False)
both_modified.to_zarr(store, mode="a")
with self.open(store) as actual:
# NOTE: this arguably incorrect -- we should probably be
# overriding the variable metadata, too. See the TODO note in
# ZarrStore.set_variables.
assert_identical(actual, global_modified)
with self.create_zarr_target() as store:
original.to_zarr(store, compute=False)
both_modified.to_zarr(store, mode="r+")
with self.open(store) as actual:
assert_identical(actual, only_new_data)
with self.create_zarr_target() as store:
original.to_zarr(store, compute=False)
# with region, the default mode becomes r+
both_modified.to_zarr(store, region={"x": slice(None)})
with self.open(store) as actual:
assert_identical(actual, only_new_data)
def test_write_region_errors(self):
data = Dataset({"u": (("x",), np.arange(5))})
data2 = Dataset({"u": (("x",), np.array([10, 11]))})
@contextlib.contextmanager
def setup_and_verify_store(expected=data):
with self.create_zarr_target() as store:
data.to_zarr(store)
yield store
with self.open(store) as actual:
assert_identical(actual, expected)
# verify the base case works
expected = Dataset({"u": (("x",), np.array([10, 11, 2, 3, 4]))})
with setup_and_verify_store(expected) as store:
data2.to_zarr(store, region={"x": slice(2)})
with setup_and_verify_store() as store:
with pytest.raises(
ValueError,
match=re.escape(
"cannot set region unless mode='a', mode='r+' or mode=None"
),
):
data.to_zarr(store, region={"x": slice(None)}, mode="w")
with setup_and_verify_store() as store:
with pytest.raises(TypeError, match=r"must be a dict"):
data.to_zarr(store, region=slice(None))
with setup_and_verify_store() as store:
with pytest.raises(TypeError, match=r"must be slice objects"):
data2.to_zarr(store, region={"x": [0, 1]})
with setup_and_verify_store() as store:
with pytest.raises(ValueError, match=r"step on all slices"):
data2.to_zarr(store, region={"x": slice(None, None, 2)})
with setup_and_verify_store() as store:
with pytest.raises(
ValueError,
match=r"all keys in ``region`` are not in Dataset dimensions",
):
data.to_zarr(store, region={"y": slice(None)})
with setup_and_verify_store() as store:
with pytest.raises(
ValueError,
match=r"all variables in the dataset to write must have at least one dimension in common",
):
data2.assign(v=2).to_zarr(store, region={"x": slice(2)})
with setup_and_verify_store() as store:
with pytest.raises(
ValueError, match=r"cannot list the same dimension in both"
):
data.to_zarr(store, region={"x": slice(None)}, append_dim="x")
with setup_and_verify_store() as store:
with pytest.raises(
ValueError,
match=r"variable 'u' already exists with different dimension sizes",
):
data2.to_zarr(store, region={"x": slice(3)})
@requires_dask
def test_encoding_chunksizes(self):
# regression test for GH2278
# see also test_encoding_chunksizes_unlimited
nx, ny, nt = 4, 4, 5
original = xr.Dataset(
{}, coords={"x": np.arange(nx), "y": np.arange(ny), "t": np.arange(nt)}
)
original["v"] = xr.Variable(("x", "y", "t"), np.zeros((nx, ny, nt)))
original = original.chunk({"t": 1, "x": 2, "y": 2})
with self.roundtrip(original) as ds1:
assert_equal(ds1, original)
with self.roundtrip(ds1.isel(t=0)) as ds2:
assert_equal(ds2, original.isel(t=0))
@requires_dask
def test_chunk_encoding_with_partial_dask_chunks(self):
original = xr.Dataset(
{"x": xr.DataArray(np.random.random(size=(6, 8)), dims=("a", "b"))}
).chunk({"a": 3})
with self.roundtrip(
original, save_kwargs={"encoding": {"x": {"chunks": [3, 2]}}}
) as ds1:
assert_equal(ds1, original)
@requires_dask
def test_chunk_encoding_with_larger_dask_chunks(self):
original = xr.Dataset({"a": ("x", [1, 2, 3, 4])}).chunk({"x": 2})
with self.roundtrip(
original, save_kwargs={"encoding": {"a": {"chunks": [1]}}}
) as ds1:
assert_equal(ds1, original)
@requires_cftime
def test_open_zarr_use_cftime(self):
ds = create_test_data()
with self.create_zarr_target() as store_target:
ds.to_zarr(store_target)
ds_a = xr.open_zarr(store_target)
assert_identical(ds, ds_a)
ds_b = xr.open_zarr(store_target, use_cftime=True)
assert xr.coding.times.contains_cftime_datetimes(ds_b.time)
def test_write_read_select_write(self):
# Test for https://github.com/pydata/xarray/issues/4084
ds = create_test_data()
# NOTE: using self.roundtrip, which uses open_dataset, will not trigger the bug.
with self.create_zarr_target() as initial_store:
ds.to_zarr(initial_store, mode="w")
ds1 = xr.open_zarr(initial_store)
# Combination of where+squeeze triggers error on write.
ds_sel = ds1.where(ds1.coords["dim3"] == "a", drop=True).squeeze("dim3")
with self.create_zarr_target() as final_store:
ds_sel.to_zarr(final_store, mode="w")
@requires_zarr
class TestZarrDictStore(ZarrBase):
@contextlib.contextmanager
def create_zarr_target(self):
yield {}
@requires_zarr
class TestZarrDirectoryStore(ZarrBase):
@contextlib.contextmanager
def create_zarr_target(self):
with create_tmp_file(suffix=".zarr") as tmp:
yield tmp
@requires_zarr
@requires_fsspec
def test_zarr_storage_options():
pytest.importorskip("aiobotocore")
ds = create_test_data()
store_target = "memory://test.zarr"
ds.to_zarr(store_target, storage_options={"test": "zarr_write"})
ds_a = xr.open_zarr(store_target, storage_options={"test": "zarr_read"})
assert_identical(ds, ds_a)
@requires_scipy
class TestScipyInMemoryData(CFEncodedBase, NetCDF3Only):
engine = "scipy"
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, "w")
def test_to_netcdf_explicit_engine(self):
# regression test for GH1321
Dataset({"foo": 42}).to_netcdf(engine="scipy")
def test_bytes_pickle(self):
data = Dataset({"foo": ("x", [1, 2, 3])})
fobj = data.to_netcdf()
with self.open(fobj) as ds:
unpickled = pickle.loads(pickle.dumps(ds))
assert_identical(unpickled, data)
@requires_scipy
class TestScipyFileObject(CFEncodedBase, NetCDF3Only):
engine = "scipy"
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, "w")
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file() as tmp_file:
with open(tmp_file, "wb") as f:
self.save(data, f, **save_kwargs)
with open(tmp_file, "rb") as f:
with self.open(f, **open_kwargs) as ds:
yield ds
@pytest.mark.skip(reason="cannot pickle file objects")
def test_pickle(self):
pass
@pytest.mark.skip(reason="cannot pickle file objects")
def test_pickle_dataarray(self):
pass
@requires_scipy
class TestScipyFilePath(CFEncodedBase, NetCDF3Only):
engine = "scipy"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.ScipyDataStore(tmp_file, mode="w") as store:
yield store
def test_array_attrs(self):
ds = Dataset(attrs={"foo": [[1, 2], [3, 4]]})
with pytest.raises(ValueError, match=r"must be 1-dimensional"):
with self.roundtrip(ds):
pass
def test_roundtrip_example_1_netcdf_gz(self):
with open_example_dataset("example_1.nc.gz") as expected:
with open_example_dataset("example_1.nc") as actual:
assert_identical(expected, actual)
def test_netcdf3_endianness(self):
# regression test for GH416
with open_example_dataset("bears.nc", engine="scipy") as expected:
for var in expected.variables.values():
assert var.dtype.isnative
@requires_netCDF4
def test_nc4_scipy(self):
with create_tmp_file(allow_cleanup_failure=True) as tmp_file:
with nc4.Dataset(tmp_file, "w", format="NETCDF4") as rootgrp:
rootgrp.createGroup("foo")
with pytest.raises(TypeError, match=r"pip install netcdf4"):
open_dataset(tmp_file, engine="scipy")
@requires_netCDF4
class TestNetCDF3ViaNetCDF4Data(CFEncodedBase, NetCDF3Only):
engine = "netcdf4"
file_format = "NETCDF3_CLASSIC"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode="w", format="NETCDF3_CLASSIC"
) as store:
yield store
def test_encoding_kwarg_vlen_string(self):
original = Dataset({"x": ["foo", "bar", "baz"]})
kwargs = dict(encoding={"x": {"dtype": str}})
with pytest.raises(ValueError, match=r"encoding dtype=str for vlen"):
with self.roundtrip(original, save_kwargs=kwargs):
pass
@requires_netCDF4
class TestNetCDF4ClassicViaNetCDF4Data(CFEncodedBase, NetCDF3Only):
engine = "netcdf4"
file_format = "NETCDF4_CLASSIC"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode="w", format="NETCDF4_CLASSIC"
) as store:
yield store
@requires_scipy_or_netCDF4
class TestGenericNetCDFData(CFEncodedBase, NetCDF3Only):
# verify that we can read and write netCDF3 files as long as we have scipy
# or netCDF4-python installed
file_format = "netcdf3_64bit"
def test_write_store(self):
# there's no specific store to test here
pass
@requires_scipy
def test_engine(self):
data = create_test_data()
with pytest.raises(ValueError, match=r"unrecognized engine"):
data.to_netcdf("foo.nc", engine="foobar")
with pytest.raises(ValueError, match=r"invalid engine"):
data.to_netcdf(engine="netcdf4")
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with pytest.raises(ValueError, match=r"unrecognized engine"):
open_dataset(tmp_file, engine="foobar")
netcdf_bytes = data.to_netcdf()
with pytest.raises(ValueError, match=r"unrecognized engine"):
open_dataset(BytesIO(netcdf_bytes), engine="foobar")
def test_cross_engine_read_write_netcdf3(self):
data = create_test_data()
valid_engines = set()
if has_netCDF4:
valid_engines.add("netcdf4")
if has_scipy:
valid_engines.add("scipy")
for write_engine in valid_engines:
for format in self.netcdf3_formats:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format=format, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
# hack to allow test to work:
# coord comes back as DataArray rather than coord,
# and so need to loop through here rather than in
# the test function (or we get recursion)
[
assert_allclose(data[k].variable, actual[k].variable)
for k in data.variables
]
def test_encoding_unlimited_dims(self):
ds = Dataset({"x": ("y", np.arange(10.0))})
with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
# Regression test for https://github.com/pydata/xarray/issues/2134
with self.roundtrip(ds, save_kwargs=dict(unlimited_dims="y")) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
ds.encoding = {"unlimited_dims": ["y"]}
with self.roundtrip(ds) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
# Regression test for https://github.com/pydata/xarray/issues/2134
ds.encoding = {"unlimited_dims": "y"}
with self.roundtrip(ds) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
@requires_h5netcdf
@requires_netCDF4
@pytest.mark.filterwarnings("ignore:use make_scale(name) instead")
class TestH5NetCDFData(NetCDF4Base):
engine = "h5netcdf"
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore.open(tmp_file, "w")
@pytest.mark.filterwarnings("ignore:complex dtypes are supported by h5py")
@pytest.mark.parametrize(
"invalid_netcdf, warntype, num_warns",
[
pytest.param(
None,
FutureWarning,
1,
marks=pytest.mark.skipif(has_h5netcdf_0_12, reason="raises"),
),
pytest.param(
False,
FutureWarning,
1,
marks=pytest.mark.skipif(has_h5netcdf_0_12, reason="raises"),
),
(True, None, 0),
],
)
def test_complex(self, invalid_netcdf, warntype, num_warns):
expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))})
save_kwargs = {"invalid_netcdf": invalid_netcdf}
with pytest.warns(warntype) as record:
with self.roundtrip(expected, save_kwargs=save_kwargs) as actual:
assert_equal(expected, actual)
recorded_num_warns = 0
if warntype:
for warning in record:
if issubclass(warning.category, warntype) and (
"complex dtypes" in str(warning.message)
):
recorded_num_warns += 1
assert recorded_num_warns == num_warns
@requires_h5netcdf_0_12
@pytest.mark.parametrize("invalid_netcdf", [None, False])
def test_complex_error(self, invalid_netcdf):
import h5netcdf
expected = Dataset({"x": ("y", np.ones(5) + 1j * np.ones(5))})
save_kwargs = {"invalid_netcdf": invalid_netcdf}
with pytest.raises(
h5netcdf.CompatibilityError, match="are not a supported NetCDF feature"
):
with self.roundtrip(expected, save_kwargs=save_kwargs) as actual:
assert_equal(expected, actual)
def test_numpy_bool_(self):
# h5netcdf loads booleans as numpy.bool_, this type needs to be supported
# when writing invalid_netcdf datasets in order to support a roundtrip
expected = Dataset({"x": ("y", np.ones(5), {"numpy_bool": np.bool_(True)})})
save_kwargs = {"invalid_netcdf": True}
with self.roundtrip(expected, save_kwargs=save_kwargs) as actual:
assert_identical(expected, actual)
def test_cross_engine_read_write_netcdf4(self):
# Drop dim3, because its labels include strings. These appear to be
# not properly read with python-netCDF4, which converts them into
# unicode instead of leaving them as bytes.
data = create_test_data().drop_vars("dim3")
data.attrs["foo"] = "bar"
valid_engines = ["netcdf4", "h5netcdf"]
for write_engine in valid_engines:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
assert_identical(data, actual)
def test_read_byte_attrs_as_unicode(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, "w") as nc:
nc.foo = b"bar"
with open_dataset(tmp_file) as actual:
expected = Dataset(attrs={"foo": "bar"})
assert_identical(expected, actual)
def test_encoding_unlimited_dims(self):
ds = Dataset({"x": ("y", np.arange(10.0))})
with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
ds.encoding = {"unlimited_dims": ["y"]}
with self.roundtrip(ds) as actual:
assert actual.encoding["unlimited_dims"] == set("y")
assert_equal(ds, actual)
def test_compression_encoding_h5py(self):
ENCODINGS = (
# h5py style compression with gzip codec will be converted to
# NetCDF4-Python style on round-trip
(
{"compression": "gzip", "compression_opts": 9},
{"zlib": True, "complevel": 9},
),
# What can't be expressed in NetCDF4-Python style is
# round-tripped unaltered
(
{"compression": "lzf", "compression_opts": None},
{"compression": "lzf", "compression_opts": None},
),
# If both styles are used together, h5py format takes precedence
(
{
"compression": "lzf",
"compression_opts": None,
"zlib": True,
"complevel": 9,
},
{"compression": "lzf", "compression_opts": None},
),
)
for compr_in, compr_out in ENCODINGS:
data = create_test_data()
compr_common = {
"chunksizes": (5, 5),
"fletcher32": True,
"shuffle": True,
"original_shape": data.var2.shape,
}
data["var2"].encoding.update(compr_in)
data["var2"].encoding.update(compr_common)
compr_out.update(compr_common)
data["scalar"] = ("scalar_dim", np.array([2.0]))
data["scalar"] = data["scalar"][0]
with self.roundtrip(data) as actual:
for k, v in compr_out.items():
assert v == actual["var2"].encoding[k]
def test_compression_check_encoding_h5py(self):
"""When mismatched h5py and NetCDF4-Python encodings are expressed
in to_netcdf(encoding=...), must raise ValueError
"""
data = Dataset({"x": ("y", np.arange(10.0))})
# Compatible encodings are graciously supported
with create_tmp_file() as tmp_file:
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={
"x": {
"compression": "gzip",
"zlib": True,
"compression_opts": 6,
"complevel": 6,
}
},
)
with open_dataset(tmp_file, engine="h5netcdf") as actual:
assert actual.x.encoding["zlib"] is True
assert actual.x.encoding["complevel"] == 6
# Incompatible encodings cause a crash
with create_tmp_file() as tmp_file:
with pytest.raises(
ValueError, match=r"'zlib' and 'compression' encodings mismatch"
):
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={"x": {"compression": "lzf", "zlib": True}},
)
with create_tmp_file() as tmp_file:
with pytest.raises(
ValueError,
match=r"'complevel' and 'compression_opts' encodings mismatch",
):
data.to_netcdf(
tmp_file,
engine="h5netcdf",
encoding={
"x": {
"compression": "gzip",
"compression_opts": 5,
"complevel": 6,
}
},
)
def test_dump_encodings_h5py(self):
# regression test for #709
ds = Dataset({"x": ("y", np.arange(10.0))})
kwargs = {"encoding": {"x": {"compression": "gzip", "compression_opts": 9}}}
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.x.encoding["zlib"]
assert actual.x.encoding["complevel"] == 9
kwargs = {"encoding": {"x": {"compression": "lzf", "compression_opts": None}}}
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert actual.x.encoding["compression"] == "lzf"
assert actual.x.encoding["compression_opts"] is None
@requires_h5netcdf
@requires_netCDF4
class TestH5NetCDFAlreadyOpen:
def test_open_dataset_group(self):
import h5netcdf
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
group = nc.createGroup("g")
v = group.createVariable("x", "int")
v[...] = 42
kwargs = {}
if Version(h5netcdf.__version__) >= Version("0.10.0") and Version(
h5netcdf.core.h5py.__version__
) >= Version("3.0.0"):
kwargs = dict(decode_vlen_strings=True)
h5 = h5netcdf.File(tmp_file, mode="r", **kwargs)
store = backends.H5NetCDFStore(h5["g"])
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
h5 = h5netcdf.File(tmp_file, mode="r", **kwargs)
store = backends.H5NetCDFStore(h5, group="g")
with open_dataset(store) as ds:
expected = Dataset({"x": ((), 42)})
assert_identical(expected, ds)
def test_deepcopy(self):
import h5netcdf
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode="w") as nc:
nc.createDimension("x", 10)
v = nc.createVariable("y", np.int32, ("x",))
v[:] = np.arange(10)
kwargs = {}
if Version(h5netcdf.__version__) >= Version("0.10.0") and Version(
h5netcdf.core.h5py.__version__
) >= Version("3.0.0"):
kwargs = dict(decode_vlen_strings=True)
h5 = h5netcdf.File(tmp_file, mode="r", **kwargs)
store = backends.H5NetCDFStore(h5)
with open_dataset(store) as ds:
copied = ds.copy(deep=True)
expected = Dataset({"y": ("x", np.arange(10))})
assert_identical(expected, copied)
@requires_h5netcdf
class TestH5NetCDFFileObject(TestH5NetCDFData):
engine = "h5netcdf"
def test_open_badbytes(self):
with pytest.raises(ValueError, match=r"HDF5 as bytes"):
with open_dataset(b"\211HDF\r\n\032\n", engine="h5netcdf"):
pass
with pytest.raises(
ValueError, match=r"match in any of xarray's currently installed IO"
):
with open_dataset(b"garbage"):
pass
with pytest.raises(ValueError, match=r"can only read bytes"):
with open_dataset(b"garbage", engine="netcdf4"):
pass
with pytest.raises(
ValueError, match=r"not the signature of a valid netCDF4 file"
):
with open_dataset(BytesIO(b"garbage"), engine="h5netcdf"):
pass
def test_open_twice(self):
expected = create_test_data()
expected.attrs["foo"] = "bar"
with pytest.raises(ValueError, match=r"read/write pointer not at the start"):
with create_tmp_file() as tmp_file:
expected.to_netcdf(tmp_file, engine="h5netcdf")
with open(tmp_file, "rb") as f:
with open_dataset(f, engine="h5netcdf"):
with open_dataset(f, engine="h5netcdf"):
pass
@requires_scipy
def test_open_fileobj(self):
# open in-memory datasets instead of local file paths
expected = create_test_data().drop_vars("dim3")
expected.attrs["foo"] = "bar"
with create_tmp_file() as tmp_file:
expected.to_netcdf(tmp_file, engine="h5netcdf")
with open(tmp_file, "rb") as f:
with open_dataset(f, engine="h5netcdf") as actual:
assert_identical(expected, actual)
f.seek(0)
with open_dataset(f) as actual:
assert_identical(expected, actual)
f.seek(0)
with BytesIO(f.read()) as bio:
with open_dataset(bio, engine="h5netcdf") as actual:
assert_identical(expected, actual)
f.seek(0)
with pytest.raises(TypeError, match="not a valid NetCDF 3"):
open_dataset(f, engine="scipy")
# TODO: this additional open is required since scipy seems to close the file
# when it fails on the TypeError (though didn't when we used
# `raises_regex`?). Ref https://github.com/pydata/xarray/pull/5191
with open(tmp_file, "rb") as f:
f.seek(8)
with pytest.raises(
ValueError,
match="match in any of xarray's currently installed IO",
):
with pytest.warns(
RuntimeWarning,
match=re.escape("'h5netcdf' fails while guessing"),
):
open_dataset(f)
@requires_h5netcdf
@requires_dask
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
class TestH5NetCDFViaDaskData(TestH5NetCDFData):
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
open_kwargs.setdefault("chunks", -1)
with TestH5NetCDFData.roundtrip(
self, data, save_kwargs, open_kwargs, allow_cleanup_failure
) as ds:
yield ds
def test_dataset_caching(self):
# caching behavior differs for dask
pass
def test_write_inconsistent_chunks(self):
# Construct two variables with the same dimensions, but different
# chunk sizes.
x = da.zeros((100, 100), dtype="f4", chunks=(50, 100))
x = DataArray(data=x, dims=("lat", "lon"), name="x")
x.encoding["chunksizes"] = (50, 100)
x.encoding["original_shape"] = (100, 100)
y = da.ones((100, 100), dtype="f4", chunks=(100, 50))
y = DataArray(data=y, dims=("lat", "lon"), name="y")
y.encoding["chunksizes"] = (100, 50)
y.encoding["original_shape"] = (100, 100)
# Put them both into the same dataset
ds = Dataset({"x": x, "y": y})
with self.roundtrip(ds) as actual:
assert actual["x"].encoding["chunksizes"] == (50, 100)
assert actual["y"].encoding["chunksizes"] == (100, 50)
@pytest.fixture(params=["scipy", "netcdf4", "h5netcdf", "pynio", "zarr"])
def readengine(request):
return request.param
@pytest.fixture(params=[1, 20])
def nfiles(request):
return request.param
@pytest.fixture(params=[5, None])
def file_cache_maxsize(request):
maxsize = request.param
if maxsize is not None:
with set_options(file_cache_maxsize=maxsize):
yield maxsize
else:
yield maxsize
@pytest.fixture(params=[True, False])
def parallel(request):
return request.param
@pytest.fixture(params=[None, 5])
def chunks(request):
return request.param
# using pytest.mark.skipif does not work so this a work around
def skip_if_not_engine(engine):
if engine == "netcdf4":
pytest.importorskip("netCDF4")
elif engine == "pynio":
pytest.importorskip("Nio")
else:
pytest.importorskip(engine)
@requires_dask
@pytest.mark.filterwarnings("ignore:use make_scale(name) instead")
def test_open_mfdataset_manyfiles(
readengine, nfiles, parallel, chunks, file_cache_maxsize
):
# skip certain combinations
skip_if_not_engine(readengine)
if ON_WINDOWS:
pytest.skip("Skipping on Windows")
randdata = np.random.randn(nfiles)
original = Dataset({"foo": ("x", randdata)})
# test standard open_mfdataset approach with too many files
with create_tmp_files(nfiles) as tmpfiles:
writeengine = readengine if readengine != "pynio" else "netcdf4"
# split into multiple sets of temp files
for ii in original.x.values:
subds = original.isel(x=slice(ii, ii + 1))
if writeengine != "zarr":
subds.to_netcdf(tmpfiles[ii], engine=writeengine)
else: # if writeengine == "zarr":
subds.to_zarr(store=tmpfiles[ii])
# check that calculation on opened datasets works properly
with open_mfdataset(
tmpfiles,
combine="nested",
concat_dim="x",
engine=readengine,
parallel=parallel,
chunks=chunks if (not chunks and readengine != "zarr") else "auto",
) as actual:
# check that using open_mfdataset returns dask arrays for variables
assert isinstance(actual["foo"].data, dask_array_type)
assert_identical(original, actual)
@requires_netCDF4
@requires_dask
def test_open_mfdataset_can_open_path_objects():
dataset = os.path.join(os.path.dirname(__file__), "data", "example_1.nc")
with open_mfdataset(Path(dataset)) as actual:
assert isinstance(actual, Dataset)
@requires_netCDF4
@requires_dask
def test_open_mfdataset_list_attr():
"""
Case when an attribute of type list differs across the multiple files
"""
from netCDF4 import Dataset
with create_tmp_files(2) as nfiles:
for i in range(2):
f = Dataset(nfiles[i], "w")
f.createDimension("x", 3)
vlvar = f.createVariable("test_var", np.int32, ("x"))
# here create an attribute as a list
vlvar.test_attr = [f"string a {i}", f"string b {i}"]
vlvar[:] = np.arange(3)
f.close()
ds1 = open_dataset(nfiles[0])
ds2 = open_dataset(nfiles[1])
original = xr.concat([ds1, ds2], dim="x")
with xr.open_mfdataset(
[nfiles[0], nfiles[1]], combine="nested", concat_dim="x"
) as actual:
assert_identical(actual, original)
@requires_scipy_or_netCDF4
@requires_dask
class TestOpenMFDatasetWithDataVarsAndCoordsKw:
coord_name = "lon"
var_name = "v1"
@contextlib.contextmanager
def setup_files_and_datasets(self, fuzz=0):
ds1, ds2 = self.gen_datasets_with_common_coord_and_time()
# to test join='exact'
ds1["x"] = ds1.x + fuzz
with create_tmp_file() as tmpfile1:
with create_tmp_file() as tmpfile2:
# save data to the temporary files
ds1.to_netcdf(tmpfile1)
ds2.to_netcdf(tmpfile2)
yield [tmpfile1, tmpfile2], [ds1, ds2]
def gen_datasets_with_common_coord_and_time(self):
# create coordinate data
nx = 10
nt = 10
x = np.arange(nx)
t1 = np.arange(nt)
t2 = np.arange(nt, 2 * nt, 1)
v1 = np.random.randn(nt, nx)
v2 = np.random.randn(nt, nx)
ds1 = Dataset(
data_vars={self.var_name: (["t", "x"], v1), self.coord_name: ("x", 2 * x)},
coords={"t": (["t"], t1), "x": (["x"], x)},
)
ds2 = Dataset(
data_vars={self.var_name: (["t", "x"], v2), self.coord_name: ("x", 2 * x)},
coords={"t": (["t"], t2), "x": (["x"], x)},
)
return ds1, ds2
@pytest.mark.parametrize(
"combine, concat_dim", [("nested", "t"), ("by_coords", None)]
)
@pytest.mark.parametrize("opt", ["all", "minimal", "different"])
@pytest.mark.parametrize("join", ["outer", "inner", "left", "right"])
def test_open_mfdataset_does_same_as_concat(self, combine, concat_dim, opt, join):
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
if combine == "by_coords":
files.reverse()
with open_mfdataset(
files, data_vars=opt, combine=combine, concat_dim=concat_dim, join=join
) as ds:
ds_expect = xr.concat([ds1, ds2], data_vars=opt, dim="t", join=join)
assert_identical(ds, ds_expect)
@pytest.mark.parametrize(
["combine_attrs", "attrs", "expected", "expect_error"],
(
pytest.param("drop", [{"a": 1}, {"a": 2}], {}, False, id="drop"),
pytest.param(
"override", [{"a": 1}, {"a": 2}], {"a": 1}, False, id="override"
),
pytest.param(
"no_conflicts", [{"a": 1}, {"a": 2}], None, True, id="no_conflicts"
),
pytest.param(
"identical",
[{"a": 1, "b": 2}, {"a": 1, "c": 3}],
None,
True,
id="identical",
),
pytest.param(
"drop_conflicts",
[{"a": 1, "b": 2}, {"b": -1, "c": 3}],
{"a": 1, "c": 3},
False,
id="drop_conflicts",
),
),
)
def test_open_mfdataset_dataset_combine_attrs(
self, combine_attrs, attrs, expected, expect_error
):
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# Give the files an inconsistent attribute
for i, f in enumerate(files):
ds = open_dataset(f).load()
ds.attrs = attrs[i]
ds.close()
ds.to_netcdf(f)
if expect_error:
with pytest.raises(xr.MergeError):
xr.open_mfdataset(
files,
combine="nested",
concat_dim="t",
combine_attrs=combine_attrs,
)
else:
with xr.open_mfdataset(
files,
combine="nested",
concat_dim="t",
combine_attrs=combine_attrs,
) as ds:
assert ds.attrs == expected
def test_open_mfdataset_dataset_attr_by_coords(self):
"""
Case when an attribute differs across the multiple files
"""
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# Give the files an inconsistent attribute
for i, f in enumerate(files):
ds = open_dataset(f).load()
ds.attrs["test_dataset_attr"] = 10 + i
ds.close()
ds.to_netcdf(f)
with xr.open_mfdataset(files, combine="nested", concat_dim="t") as ds:
assert ds.test_dataset_attr == 10
def test_open_mfdataset_dataarray_attr_by_coords(self):
"""
Case when an attribute of a member DataArray differs across the multiple files
"""
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# Give the files an inconsistent attribute
for i, f in enumerate(files):
ds = open_dataset(f).load()
ds["v1"].attrs["test_dataarray_attr"] = i
ds.close()
ds.to_netcdf(f)
with xr.open_mfdataset(files, combine="nested", concat_dim="t") as ds:
assert ds["v1"].test_dataarray_attr == 0
@pytest.mark.parametrize(
"combine, concat_dim", [("nested", "t"), ("by_coords", None)]
)
@pytest.mark.parametrize("opt", ["all", "minimal", "different"])
def test_open_mfdataset_exact_join_raises_error(self, combine, concat_dim, opt):
with self.setup_files_and_datasets(fuzz=0.1) as (files, [ds1, ds2]):
if combine == "by_coords":
files.reverse()
with pytest.raises(ValueError, match=r"indexes along dimension"):
open_mfdataset(
files,
data_vars=opt,
combine=combine,
concat_dim=concat_dim,
join="exact",
)
def test_common_coord_when_datavars_all(self):
opt = "all"
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# open the files with the data_var option
with open_mfdataset(
files, data_vars=opt, combine="nested", concat_dim="t"
) as ds:
coord_shape = ds[self.coord_name].shape
coord_shape1 = ds1[self.coord_name].shape
coord_shape2 = ds2[self.coord_name].shape
var_shape = ds[self.var_name].shape
assert var_shape == coord_shape
assert coord_shape1 != coord_shape
assert coord_shape2 != coord_shape
def test_common_coord_when_datavars_minimal(self):
opt = "minimal"
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# open the files using data_vars option
with open_mfdataset(
files, data_vars=opt, combine="nested", concat_dim="t"
) as ds:
coord_shape = ds[self.coord_name].shape
coord_shape1 = ds1[self.coord_name].shape
coord_shape2 = ds2[self.coord_name].shape
var_shape = ds[self.var_name].shape
assert var_shape != coord_shape
assert coord_shape1 == coord_shape
assert coord_shape2 == coord_shape
def test_invalid_data_vars_value_should_fail(self):
with self.setup_files_and_datasets() as (files, _):
with pytest.raises(ValueError):
with open_mfdataset(files, data_vars="minimum", combine="by_coords"):
pass
# test invalid coord parameter
with pytest.raises(ValueError):
with open_mfdataset(files, coords="minimum", combine="by_coords"):
pass
@requires_dask
@requires_scipy
@requires_netCDF4
class TestDask(DatasetIOBase):
@contextlib.contextmanager
def create_store(self):
yield Dataset()
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
yield data.chunk()
# Override methods in DatasetIOBase - not applicable to dask
def test_roundtrip_string_encoded_characters(self):
pass
def test_roundtrip_coordinates_with_space(self):
pass
def test_roundtrip_numpy_datetime_data(self):
# Override method in DatasetIOBase - remove not applicable
# save_kwargs
times = pd.to_datetime(["2000-01-01", "2000-01-02", "NaT"])
expected = Dataset({"t": ("t", times), "t0": times[0]})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_cftime_datetime_data(self):
# Override method in DatasetIOBase - remove not applicable
# save_kwargs
from .test_coding_times import _all_cftime_date_types
date_types = _all_cftime_date_types()
for date_type in date_types.values():
times = [date_type(1, 1, 1), date_type(1, 1, 2)]
expected = Dataset({"t": ("t", times), "t0": times[0]})
expected_decoded_t = np.array(times)
expected_decoded_t0 = np.array([date_type(1, 1, 1)])
with self.roundtrip(expected) as actual:
abs_diff = abs(actual.t.values - expected_decoded_t)
assert (abs_diff <= np.timedelta64(1, "s")).all()
abs_diff = abs(actual.t0.values - expected_decoded_t0)
assert (abs_diff <= np.timedelta64(1, "s")).all()
def test_write_store(self):
# Override method in DatasetIOBase - not applicable to dask
pass
def test_dataset_caching(self):
expected = Dataset({"foo": ("x", [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert not actual.foo.variable._in_memory
actual.foo.values # no caching
assert not actual.foo.variable._in_memory
def test_open_mfdataset(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5),)
assert_identical(original, actual)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", chunks={"x": 3}
) as actual:
assert actual.foo.variable.data.chunks == ((3, 2, 3, 2),)
with pytest.raises(OSError, match=r"no files to open"):
open_mfdataset("foo-bar-baz-*.nc")
with pytest.raises(ValueError, match=r"wild-card"):
open_mfdataset("http://some/remote/uri")
@requires_fsspec
def test_open_mfdataset_no_files(self):
pytest.importorskip("aiobotocore")
# glob is attempted as of #4823, but finds no files
with pytest.raises(OSError, match=r"no files"):
open_mfdataset("http://some/remote/uri", engine="zarr")
def test_open_mfdataset_2d(self):
original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
with create_tmp_file() as tmp3:
with create_tmp_file() as tmp4:
original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5), (4, 4))
assert_identical(original, actual)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
chunks={"x": 3, "y": 2},
) as actual:
assert actual.foo.variable.data.chunks == (
(3, 2, 3, 2),
(2, 2, 2, 2),
)
def test_open_mfdataset_pathlib(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_2d_pathlib(self):
original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
with create_tmp_file() as tmp3:
with create_tmp_file() as tmp4:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
tmp3 = Path(tmp3)
tmp4 = Path(tmp4)
original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
with open_mfdataset(
[[tmp1, tmp2], [tmp3, tmp4]],
combine="nested",
concat_dim=["y", "x"],
) as actual:
assert_identical(original, actual)
def test_open_mfdataset_2(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(original, actual)
def test_attrs_mfdataset(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
# presumes that attributes inherited from
# first dataset loaded
assert actual.test1 == ds1.test1
# attributes from ds2 are not retained, e.g.,
with pytest.raises(AttributeError, match=r"no attribute"):
actual.test2
def test_open_mfdataset_attrs_file(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_files(2) as (tmp1, tmp2):
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2
) as actual:
# attributes are inherited from the master file
assert actual.attrs["test2"] == ds2.attrs["test2"]
# attributes from ds1 are not retained, e.g.,
assert "test1" not in actual.attrs
def test_open_mfdataset_attrs_file_path(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_files(2) as (tmp1, tmp2):
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs["test1"] = "foo"
ds2.attrs["test2"] = "bar"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested", attrs_file=tmp2
) as actual:
# attributes are inherited from the master file
assert actual.attrs["test2"] == ds2.attrs["test2"]
# attributes from ds1 are not retained, e.g.,
assert "test1" not in actual.attrs
def test_open_mfdataset_auto_combine(self):
original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp2, tmp1], combine="by_coords") as actual:
assert_identical(original, actual)
def test_open_mfdataset_raise_on_bad_combine_args(self):
# Regression test for unhelpful error shown in #5230
original = Dataset({"foo": ("x", np.random.randn(10)), "x": np.arange(10)})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with pytest.raises(ValueError, match="`concat_dim` has no effect"):
open_mfdataset([tmp1, tmp2], concat_dim="x")
@pytest.mark.xfail(reason="mfdataset loses encoding currently.")
def test_encoding_mfdataset(self):
original = Dataset(
{
"foo": ("t", np.random.randn(10)),
"t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")),
}
)
original.t.encoding["units"] = "days since 2010-01-01"
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(t=slice(5))
ds2 = original.isel(t=slice(5, 10))
ds1.t.encoding["units"] = "days since 2010-01-01"
ds2.t.encoding["units"] = "days since 2000-01-01"
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2], combine="nested") as actual:
assert actual.t.encoding["units"] == original.t.encoding["units"]
assert actual.t.encoding["units"] == ds1.t.encoding["units"]
assert actual.t.encoding["units"] != ds2.t.encoding["units"]
def test_preprocess_mfdataset(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
def preprocess(ds):
return ds.assign_coords(z=0)
expected = preprocess(original)
with open_mfdataset(
tmp, preprocess=preprocess, combine="by_coords"
) as actual:
assert_identical(expected, actual)
def test_save_mfdataset_roundtrip(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(actual, original)
def test_save_mfdataset_invalid(self):
ds = Dataset()
with pytest.raises(ValueError, match=r"cannot use mode"):
save_mfdataset([ds, ds], ["same", "same"])
with pytest.raises(ValueError, match=r"same length"):
save_mfdataset([ds, ds], ["only one path"])
def test_save_mfdataset_invalid_dataarray(self):
# regression test for GH1555
da = DataArray([1, 2])
with pytest.raises(TypeError, match=r"supports writing Dataset"):
save_mfdataset([da], ["dataarray"])
def test_save_mfdataset_pathlib_roundtrip(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset(
[tmp1, tmp2], concat_dim="x", combine="nested"
) as actual:
assert_identical(actual, original)
def test_open_and_do_math(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(tmp, combine="by_coords") as ds:
actual = 1.0 * ds
assert_allclose(original, actual, decode_bytes=False)
def test_open_mfdataset_concat_dim_none(self):
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
data = Dataset({"x": 0})
data.to_netcdf(tmp1)
Dataset({"x": np.nan}).to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim=None, combine="nested"
) as actual:
assert_identical(data, actual)
def test_open_mfdataset_concat_dim_default_none(self):
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
data = Dataset({"x": 0})
data.to_netcdf(tmp1)
Dataset({"x": np.nan}).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2], combine="nested") as actual:
assert_identical(data, actual)
def test_open_dataset(self):
original = Dataset({"foo": ("x", np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks={"x": 5}) as actual:
assert isinstance(actual.foo.variable.data, da.Array)
assert actual.foo.variable.data.chunks == ((5, 5),)
assert_identical(original, actual)
with open_dataset(tmp, chunks=5) as actual:
assert_identical(original, actual)
with open_dataset(tmp) as actual:
assert isinstance(actual.foo.variable.data, np.ndarray)
assert_identical(original, actual)
def test_open_single_dataset(self):
# Test for issue GH #1988. This makes sure that the
# concat_dim is utilized when specified in open_mfdataset().
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
dim = DataArray([100], name="baz", dims="baz")
expected = Dataset(
{"foo": (("baz", "x"), rnddata[np.newaxis, :])}, {"baz": [100]}
)
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset([tmp], concat_dim=dim, combine="nested") as actual:
assert_identical(expected, actual)
def test_open_multi_dataset(self):
# Test for issue GH #1988 and #2647. This makes sure that the
# concat_dim is utilized when specified in open_mfdataset().
# The additional wrinkle is to ensure that a length greater
# than one is tested as well due to numpy's implicit casting
# of 1-length arrays to booleans in tests, which allowed
# #2647 to still pass the test_open_single_dataset(),
# which is itself still needed as-is because the original
# bug caused one-length arrays to not be used correctly
# in concatenation.
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
dim = DataArray([100, 150], name="baz", dims="baz")
expected = Dataset(
{"foo": (("baz", "x"), np.tile(rnddata[np.newaxis, :], (2, 1)))},
{"baz": [100, 150]},
)
with create_tmp_file() as tmp1, create_tmp_file() as tmp2:
original.to_netcdf(tmp1)
original.to_netcdf(tmp2)
with open_mfdataset(
[tmp1, tmp2], concat_dim=dim, combine="nested"
) as actual:
assert_identical(expected, actual)
def test_dask_roundtrip(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
chunks = {"dim1": 4, "dim2": 4, "dim3": 4, "time": 10}
with open_dataset(tmp, chunks=chunks) as dask_ds:
assert_identical(data, dask_ds)
with create_tmp_file() as tmp2:
dask_ds.to_netcdf(tmp2)
with open_dataset(tmp2) as on_disk:
assert_identical(data, on_disk)
def test_deterministic_names(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
with open_mfdataset(tmp, combine="by_coords") as ds:
original_names = {k: v.data.name for k, v in ds.data_vars.items()}
with open_mfdataset(tmp, combine="by_coords") as ds:
repeat_names = {k: v.data.name for k, v in ds.data_vars.items()}
for var_name, dask_name in original_names.items():
assert var_name in dask_name
assert dask_name[:13] == "open_dataset-"
assert original_names == repeat_names
def test_dataarray_compute(self):
# Test DataArray.compute() on dask backend.
# The test for Dataset.compute() is already in DatasetIOBase;
# however dask is the only tested backend which supports DataArrays
actual = DataArray([1, 2]).chunk()
computed = actual.compute()
assert not actual._in_memory
assert computed._in_memory
assert_allclose(actual, computed, decode_bytes=False)
@pytest.mark.xfail
def test_save_mfdataset_compute_false_roundtrip(self):
from dask.delayed import Delayed
original = Dataset({"foo": ("x", np.random.randn(10))}).chunk()
datasets = [original.isel(x=slice(5)), original.isel(x=slice(5, 10))]
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp1:
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp2:
delayed_obj = save_mfdataset(
datasets, [tmp1, tmp2], engine=self.engine, compute=False
)
assert isinstance(delayed_obj, Delayed)
delayed_obj.compute()
with open_mfdataset(
[tmp1, tmp2], combine="nested", concat_dim="x"
) as actual:
assert_identical(actual, original)
def test_load_dataset(self):
with create_tmp_file() as tmp:
original = Dataset({"foo": ("x", np.random.randn(10))})
original.to_netcdf(tmp)
ds = load_dataset(tmp)
# this would fail if we used open_dataset instead of load_dataset
ds.to_netcdf(tmp)
def test_load_dataarray(self):
with create_tmp_file() as tmp:
original = Dataset({"foo": ("x", np.random.randn(10))})
original.to_netcdf(tmp)
ds = load_dataarray(tmp)
# this would fail if we used open_dataarray instead of
# load_dataarray
ds.to_netcdf(tmp)
@requires_scipy_or_netCDF4
@requires_pydap
@pytest.mark.filterwarnings("ignore:The binary mode of fromstring is deprecated")
class TestPydap:
def convert_to_pydap_dataset(self, original):
from pydap.model import BaseType, DatasetType, GridType
ds = DatasetType("bears", **original.attrs)
for key, var in original.data_vars.items():
v = GridType(key)
v[key] = BaseType(key, var.values, dimensions=var.dims, **var.attrs)
for d in var.dims:
v[d] = BaseType(d, var[d].values)
ds[key] = v
# check all dims are stored in ds
for d in original.coords:
ds[d] = BaseType(
d, original[d].values, dimensions=(d,), **original[d].attrs
)
return ds
@contextlib.contextmanager
def create_datasets(self, **kwargs):
with open_example_dataset("bears.nc") as expected:
pydap_ds = self.convert_to_pydap_dataset(expected)
actual = open_dataset(PydapDataStore(pydap_ds))
# TODO solve this workaround:
# netcdf converts string to byte not unicode
expected["bears"] = expected["bears"].astype(str)
yield actual, expected
def test_cmp_local_file(self):
with self.create_datasets() as (actual, expected):
assert_equal(actual, expected)
# global attributes should be global attributes on the dataset
assert "NC_GLOBAL" not in actual.attrs
assert "history" in actual.attrs
# we don't check attributes exactly with assertDatasetIdentical()
# because the test DAP server seems to insert some extra
# attributes not found in the netCDF file.
assert actual.attrs.keys() == expected.attrs.keys()
with self.create_datasets() as (actual, expected):
assert_equal(actual[{"l": 2}], expected[{"l": 2}])
with self.create_datasets() as (actual, expected):
assert_equal(actual.isel(i=0, j=-1), expected.isel(i=0, j=-1))
with self.create_datasets() as (actual, expected):
assert_equal(actual.isel(j=slice(1, 2)), expected.isel(j=slice(1, 2)))
with self.create_datasets() as (actual, expected):
indexers = {"i": [1, 0, 0], "j": [1, 2, 0, 1]}
assert_equal(actual.isel(**indexers), expected.isel(**indexers))
with self.create_datasets() as (actual, expected):
indexers = {
"i": DataArray([0, 1, 0], dims="a"),
"j": DataArray([0, 2, 1], dims="a"),
}
assert_equal(actual.isel(**indexers), expected.isel(**indexers))
def test_compatible_to_netcdf(self):
# make sure it can be saved as a netcdf
with self.create_datasets() as (actual, expected):
with create_tmp_file() as tmp_file:
actual.to_netcdf(tmp_file)
with open_dataset(tmp_file) as actual2:
actual2["bears"] = actual2["bears"].astype(str)
assert_equal(actual2, expected)
@requires_dask
def test_dask(self):
with self.create_datasets(chunks={"j": 2}) as (actual, expected):
assert_equal(actual, expected)
@network
@requires_scipy_or_netCDF4
@requires_pydap
class TestPydapOnline(TestPydap):
@contextlib.contextmanager
def create_datasets(self, **kwargs):
url = "http://test.opendap.org/opendap/hyrax/data/nc/bears.nc"
actual = open_dataset(url, engine="pydap", **kwargs)
with open_example_dataset("bears.nc") as expected:
# workaround to restore string which is converted to byte
expected["bears"] = expected["bears"].astype(str)
yield actual, expected
def test_session(self):
from pydap.cas.urs import setup_session
session = setup_session("XarrayTestUser", "Xarray2017")
with mock.patch("pydap.client.open_url") as mock_func:
xr.backends.PydapDataStore.open("http://test.url", session=session)
mock_func.assert_called_with("http://test.url", session=session)
@requires_scipy
@requires_pynio
class TestPyNio(CFEncodedBase, NetCDF3Only):
def test_write_store(self):
# pynio is read-only for now
pass
@contextlib.contextmanager
def open(self, path, **kwargs):
with open_dataset(path, engine="pynio", **kwargs) as ds:
yield ds
def test_kwargs(self):
kwargs = {"format": "grib"}
path = os.path.join(os.path.dirname(__file__), "data", "example")
with backends.NioDataStore(path, **kwargs) as store:
assert store._manager._kwargs["format"] == "grib"
def save(self, dataset, path, **kwargs):
return dataset.to_netcdf(path, engine="scipy", **kwargs)
def test_weakrefs(self):
example = Dataset({"foo": ("x", np.arange(5.0))})
expected = example.rename({"foo": "bar", "x": "y"})
with create_tmp_file() as tmp_file:
example.to_netcdf(tmp_file, engine="scipy")
on_disk = open_dataset(tmp_file, engine="pynio")
actual = on_disk.rename({"foo": "bar", "x": "y"})
del on_disk # trigger garbage collection
assert_identical(actual, expected)
@requires_cfgrib
class TestCfGrib:
def test_read(self):
expected = {
"number": 2,
"time": 3,
"isobaricInhPa": 2,
"latitude": 3,
"longitude": 4,
}
with open_example_dataset("example.grib", engine="cfgrib") as ds:
assert ds.dims == expected
assert list(ds.data_vars) == ["z", "t"]
assert ds["z"].min() == 12660.0
def test_read_filter_by_keys(self):
kwargs = {"filter_by_keys": {"shortName": "t"}}
expected = {
"number": 2,
"time": 3,
"isobaricInhPa": 2,
"latitude": 3,
"longitude": 4,
}
with open_example_dataset(
"example.grib", engine="cfgrib", backend_kwargs=kwargs
) as ds:
assert ds.dims == expected
assert list(ds.data_vars) == ["t"]
assert ds["t"].min() == 231.0
def test_read_outer(self):
expected = {
"number": 2,
"time": 3,
"isobaricInhPa": 2,
"latitude": 2,
"longitude": 3,
}
with open_example_dataset("example.grib", engine="cfgrib") as ds:
res = ds.isel(latitude=[0, 2], longitude=[0, 1, 2])
assert res.dims == expected
assert res["t"].min() == 231.0
@requires_pseudonetcdf
@pytest.mark.filterwarnings("ignore:IOAPI_ISPH is assumed to be 6370000")
class TestPseudoNetCDFFormat:
def open(self, path, **kwargs):
return open_dataset(path, engine="pseudonetcdf", **kwargs)
@contextlib.contextmanager
def roundtrip(
self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
):
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file(allow_cleanup_failure=allow_cleanup_failure) as path:
self.save(data, path, **save_kwargs)
with self.open(path, **open_kwargs) as ds:
yield ds
def test_ict_format(self):
"""
Open a CAMx file and test data variables
"""
stdattr = {
"fill_value": -9999.0,
"missing_value": -9999,
"scale": 1,
"llod_flag": -8888,
"llod_value": "N/A",
"ulod_flag": -7777,
"ulod_value": "N/A",
}
def myatts(**attrs):
outattr = stdattr.copy()
outattr.update(attrs)
return outattr
input = {
"coords": {},
"attrs": {
"fmt": "1001",
"n_header_lines": 29,
"PI_NAME": "Henderson, Barron",
"ORGANIZATION_NAME": "U.S. EPA",
"SOURCE_DESCRIPTION": "Example file with artificial data",
"MISSION_NAME": "JUST_A_TEST",
"VOLUME_INFO": "1, 1",
"SDATE": "2018, 04, 27",
"WDATE": "2018, 04, 27",
"TIME_INTERVAL": "0",
"INDEPENDENT_VARIABLE_DEFINITION": "Start_UTC",
"INDEPENDENT_VARIABLE": "Start_UTC",
"INDEPENDENT_VARIABLE_UNITS": "Start_UTC",
"ULOD_FLAG": "-7777",
"ULOD_VALUE": "N/A",
"LLOD_FLAG": "-8888",
"LLOD_VALUE": ("N/A, N/A, N/A, N/A, 0.025"),
"OTHER_COMMENTS": (
"www-air.larc.nasa.gov/missions/etc/" + "IcarttDataFormat.htm"
),
"REVISION": "R0",
"R0": "No comments for this revision.",
"TFLAG": "Start_UTC",
},
"dims": {"POINTS": 4},
"data_vars": {
"Start_UTC": {
"data": [43200.0, 46800.0, 50400.0, 50400.0],
"dims": ("POINTS",),
"attrs": myatts(units="Start_UTC", standard_name="Start_UTC"),
},
"lat": {
"data": [41.0, 42.0, 42.0, 42.0],
"dims": ("POINTS",),
"attrs": myatts(units="degrees_north", standard_name="lat"),
},
"lon": {
"data": [-71.0, -72.0, -73.0, -74.0],
"dims": ("POINTS",),
"attrs": myatts(units="degrees_east", standard_name="lon"),
},
"elev": {
"data": [5.0, 15.0, 20.0, 25.0],
"dims": ("POINTS",),
"attrs": myatts(units="meters", standard_name="elev"),
},
"TEST_ppbv": {
"data": [1.2345, 2.3456, 3.4567, 4.5678],
"dims": ("POINTS",),
"attrs": myatts(units="ppbv", standard_name="TEST_ppbv"),
},
"TESTM_ppbv": {
"data": [2.22, -9999.0, -7777.0, -8888.0],
"dims": ("POINTS",),
"attrs": myatts(
units="ppbv", standard_name="TESTM_ppbv", llod_value=0.025
),
},
},
}
chkfile = Dataset.from_dict(input)
with open_example_dataset(
"example.ict", engine="pseudonetcdf", backend_kwargs={"format": "ffi1001"}
) as ictfile:
assert_identical(ictfile, chkfile)
def test_ict_format_write(self):
fmtkw = {"format": "ffi1001"}
with open_example_dataset(
"example.ict", engine="pseudonetcdf", backend_kwargs=fmtkw
) as expected:
with self.roundtrip(
expected, save_kwargs=fmtkw, open_kwargs={"backend_kwargs": fmtkw}
) as actual:
assert_identical(expected, actual)
def test_uamiv_format_read(self):
"""
Open a CAMx file and test data variables
"""
camxfile = open_example_dataset(
"example.uamiv", engine="pseudonetcdf", backend_kwargs={"format": "uamiv"}
)
data = np.arange(20, dtype="f").reshape(1, 1, 4, 5)
expected = xr.Variable(
("TSTEP", "LAY", "ROW", "COL"),
data,
dict(units="ppm", long_name="O3".ljust(16), var_desc="O3".ljust(80)),
)
actual = camxfile.variables["O3"]
assert_allclose(expected, actual)
data = np.array([[[2002154, 0]]], dtype="i")
expected = xr.Variable(
("TSTEP", "VAR", "DATE-TIME"),
data,
dict(
long_name="TFLAG".ljust(16),
var_desc="TFLAG".ljust(80),
units="DATE-TIME".ljust(16),
),
)
actual = camxfile.variables["TFLAG"]
assert_allclose(expected, actual)
camxfile.close()
@requires_dask
def test_uamiv_format_mfread(self):
"""
Open a CAMx file and test data variables
"""
camxfile = open_example_mfdataset(
["example.uamiv", "example.uamiv"],
engine="pseudonetcdf",
concat_dim="TSTEP",
combine="nested",
backend_kwargs={"format": "uamiv"},
)
data1 = np.arange(20, dtype="f").reshape(1, 1, 4, 5)
data = np.concatenate([data1] * 2, axis=0)
expected = xr.Variable(
("TSTEP", "LAY", "ROW", "COL"),
data,
dict(units="ppm", long_name="O3".ljust(16), var_desc="O3".ljust(80)),
)
actual = camxfile.variables["O3"]
assert_allclose(expected, actual)
data = np.array([[[2002154, 0]]], dtype="i").repeat(2, 0)
attrs = dict(
long_name="TFLAG".ljust(16),
var_desc="TFLAG".ljust(80),
units="DATE-TIME".ljust(16),
)
dims = ("TSTEP", "VAR", "DATE-TIME")
expected = xr.Variable(dims, data, attrs)
actual = camxfile.variables["TFLAG"]
assert_allclose(expected, actual)
camxfile.close()
@pytest.mark.xfail(reason="Flaky; see GH3711")
def test_uamiv_format_write(self):
fmtkw = {"format": "uamiv"}
expected = open_example_dataset(
"example.uamiv", engine="pseudonetcdf", backend_kwargs=fmtkw
)
with self.roundtrip(
expected,
save_kwargs=fmtkw,
open_kwargs={"backend_kwargs": fmtkw},
allow_cleanup_failure=True,
) as actual:
assert_identical(expected, actual)
expected.close()
def save(self, dataset, path, **save_kwargs):
import PseudoNetCDF as pnc
pncf = pnc.PseudoNetCDFFile()
pncf.dimensions = {
k: pnc.PseudoNetCDFDimension(pncf, k, v) for k, v in dataset.dims.items()
}
pncf.variables = {
k: pnc.PseudoNetCDFVariable(
pncf, k, v.dtype.char, v.dims, values=v.data[...], **v.attrs
)
for k, v in dataset.variables.items()
}
for pk, pv in dataset.attrs.items():
setattr(pncf, pk, pv)
pnc.pncwrite(pncf, path, **save_kwargs)
@requires_rasterio
@contextlib.contextmanager
def create_tmp_geotiff(
nx=4,
ny=3,
nz=3,
transform=None,
transform_args=default_value,
crs=default_value,
open_kwargs=None,
additional_attrs=None,
):
if transform_args is default_value:
transform_args = [5000, 80000, 1000, 2000.0]
if crs is default_value:
crs = {
"units": "m",
"no_defs": True,
"ellps": "WGS84",
"proj": "utm",
"zone": 18,
}
# yields a temporary geotiff file and a corresponding expected DataArray
import rasterio
from rasterio.transform import from_origin
if open_kwargs is None:
open_kwargs = {}
with create_tmp_file(suffix=".tif", allow_cleanup_failure=ON_WINDOWS) as tmp_file:
# allow 2d or 3d shapes
if nz == 1:
data_shape = ny, nx
write_kwargs = {"indexes": 1}
else:
data_shape = nz, ny, nx
write_kwargs = {}
data = np.arange(nz * ny * nx, dtype=rasterio.float32).reshape(*data_shape)
if transform is None:
transform = from_origin(*transform_args)
if additional_attrs is None:
additional_attrs = {
"descriptions": tuple(f"d{n + 1}" for n in range(nz)),
"units": tuple(f"u{n + 1}" for n in range(nz)),
}
with rasterio.open(
tmp_file,
"w",
driver="GTiff",
height=ny,
width=nx,
count=nz,
crs=crs,
transform=transform,
dtype=rasterio.float32,
**open_kwargs,
) as s:
for attr, val in additional_attrs.items():
setattr(s, attr, val)
s.write(data, **write_kwargs)
dx, dy = s.res[0], -s.res[1]
a, b, c, d = transform_args
data = data[np.newaxis, ...] if nz == 1 else data
expected = DataArray(
data,
dims=("band", "y", "x"),
coords={
"band": np.arange(nz) + 1,
"y": -np.arange(ny) * d + b + dy / 2,
"x": np.arange(nx) * c + a + dx / 2,
},
)
yield tmp_file, expected
@requires_rasterio
class TestRasterio:
@requires_scipy_or_netCDF4
def test_serialization(self):
with create_tmp_geotiff(additional_attrs={}) as (tmp_file, expected):
# Write it to a netcdf and read again (roundtrip)
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as rioda:
with create_tmp_file(suffix=".nc") as tmp_nc_file:
rioda.to_netcdf(tmp_nc_file)
with xr.open_dataarray(tmp_nc_file) as ncds:
assert_identical(rioda, ncds)
def test_utm(self):
with create_tmp_geotiff() as (tmp_file, expected):
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert rioda.attrs["scales"] == (1.0, 1.0, 1.0)
assert rioda.attrs["offsets"] == (0.0, 0.0, 0.0)
assert rioda.attrs["descriptions"] == ("d1", "d2", "d3")
assert rioda.attrs["units"] == ("u1", "u2", "u3")
assert isinstance(rioda.attrs["crs"], str)
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
np.testing.assert_array_equal(
rioda.attrs["nodatavals"], [np.NaN, np.NaN, np.NaN]
)
# Check no parse coords
with pytest.warns(DeprecationWarning), xr.open_rasterio(
tmp_file, parse_coordinates=False
) as rioda:
assert "x" not in rioda.coords
assert "y" not in rioda.coords
def test_non_rectilinear(self):
from rasterio.transform import from_origin
# Create a geotiff file with 2d coordinates
with create_tmp_geotiff(
transform=from_origin(0, 3, 1, 1).rotation(45), crs=None
) as (tmp_file, _):
# Default is to not parse coords
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as rioda:
assert "x" not in rioda.coords
assert "y" not in rioda.coords
assert "crs" not in rioda.attrs
assert rioda.attrs["scales"] == (1.0, 1.0, 1.0)
assert rioda.attrs["offsets"] == (0.0, 0.0, 0.0)
assert rioda.attrs["descriptions"] == ("d1", "d2", "d3")
assert rioda.attrs["units"] == ("u1", "u2", "u3")
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
# See if a warning is raised if we force it
with pytest.warns(Warning, match="transformation isn't rectilinear"):
with xr.open_rasterio(tmp_file, parse_coordinates=True) as rioda:
assert "x" not in rioda.coords
assert "y" not in rioda.coords
def test_platecarree(self):
with create_tmp_geotiff(
8,
10,
1,
transform_args=[1, 2, 0.5, 2.0],
crs="+proj=latlong",
open_kwargs={"nodata": -9765},
) as (tmp_file, expected):
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert rioda.attrs["scales"] == (1.0,)
assert rioda.attrs["offsets"] == (0.0,)
assert isinstance(rioda.attrs["descriptions"], tuple)
assert isinstance(rioda.attrs["units"], tuple)
assert isinstance(rioda.attrs["crs"], str)
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
np.testing.assert_array_equal(rioda.attrs["nodatavals"], [-9765.0])
# rasterio throws a Warning, which is expected since we test rasterio's defaults
@pytest.mark.filterwarnings("ignore:Dataset has no geotransform")
def test_notransform(self):
# regression test for https://github.com/pydata/xarray/issues/1686
import rasterio
# Create a geotiff file
with create_tmp_file(suffix=".tif") as tmp_file:
# data
nx, ny, nz = 4, 3, 3
data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx)
with rasterio.open(
tmp_file,
"w",
driver="GTiff",
height=ny,
width=nx,
count=nz,
dtype=rasterio.float32,
) as s:
s.descriptions = ("nx", "ny", "nz")
s.units = ("cm", "m", "km")
s.write(data)
# Tests
expected = DataArray(
data,
dims=("band", "y", "x"),
coords={
"band": [1, 2, 3],
"y": [0.5, 1.5, 2.5],
"x": [0.5, 1.5, 2.5, 3.5],
},
)
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert rioda.attrs["scales"] == (1.0, 1.0, 1.0)
assert rioda.attrs["offsets"] == (0.0, 0.0, 0.0)
assert rioda.attrs["descriptions"] == ("nx", "ny", "nz")
assert rioda.attrs["units"] == ("cm", "m", "km")
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
def test_indexing(self):
with create_tmp_geotiff(
8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs="+proj=latlong"
) as (tmp_file, expected):
with pytest.warns(DeprecationWarning), xr.open_rasterio(
tmp_file, cache=False
) as actual:
# tests
# assert_allclose checks all data + coordinates
assert_allclose(actual, expected)
assert not actual.variable._in_memory
# Basic indexer
ind = {"x": slice(2, 5), "y": slice(5, 7)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": slice(1, 2), "x": slice(2, 5), "y": slice(5, 7)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": slice(1, 2), "x": slice(2, 5), "y": 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# orthogonal indexer
ind = {
"band": np.array([2, 1, 0]),
"x": np.array([1, 0]),
"y": np.array([0, 2]),
}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": np.array([2, 1, 0]), "x": np.array([1, 0]), "y": 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": 0, "x": np.array([0, 0]), "y": np.array([1, 1, 1])}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# minus-stepped slice
ind = {"band": np.array([2, 1, 0]), "x": slice(-1, None, -1), "y": 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": np.array([2, 1, 0]), "x": 1, "y": slice(-1, 1, -2)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# empty selection
ind = {"band": np.array([2, 1, 0]), "x": 1, "y": slice(2, 2, 1)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {"band": slice(0, 0), "x": 1, "y": 2}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# vectorized indexer
ind = {
"band": DataArray([2, 1, 0], dims="a"),
"x": DataArray([1, 0, 0], dims="a"),
"y": np.array([0, 2]),
}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {
"band": DataArray([[2, 1, 0], [1, 0, 2]], dims=["a", "b"]),
"x": DataArray([[1, 0, 0], [0, 1, 0]], dims=["a", "b"]),
"y": 0,
}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# Selecting lists of bands is fine
ex = expected.isel(band=[1, 2])
ac = actual.isel(band=[1, 2])
assert_allclose(ac, ex)
ex = expected.isel(band=[0, 2])
ac = actual.isel(band=[0, 2])
assert_allclose(ac, ex)
# Integer indexing
ex = expected.isel(band=1)
ac = actual.isel(band=1)
assert_allclose(ac, ex)
ex = expected.isel(x=1, y=2)
ac = actual.isel(x=1, y=2)
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=1, y=2)
ac = actual.isel(band=0, x=1, y=2)
assert_allclose(ac, ex)
# Mixed
ex = actual.isel(x=slice(2), y=slice(2))
ac = actual.isel(x=[0, 1], y=[0, 1])
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=1, y=slice(5, 7))
ac = actual.isel(band=0, x=1, y=slice(5, 7))
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=slice(2, 5), y=2)
ac = actual.isel(band=0, x=slice(2, 5), y=2)
assert_allclose(ac, ex)
# One-element lists
ex = expected.isel(band=[0], x=slice(2, 5), y=[2])
ac = actual.isel(band=[0], x=slice(2, 5), y=[2])
assert_allclose(ac, ex)
def test_caching(self):
with create_tmp_geotiff(
8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs="+proj=latlong"
) as (tmp_file, expected):
# Cache is the default
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as actual:
# This should cache everything
assert_allclose(actual, expected)
# once cached, non-windowed indexing should become possible
ac = actual.isel(x=[2, 4])
ex = expected.isel(x=[2, 4])
assert_allclose(ac, ex)
@requires_dask
def test_chunks(self):
with create_tmp_geotiff(
8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs="+proj=latlong"
) as (tmp_file, expected):
# Chunk at open time
with pytest.warns(DeprecationWarning), xr.open_rasterio(
tmp_file, chunks=(1, 2, 2)
) as actual:
import dask.array as da
assert isinstance(actual.data, da.Array)
assert "open_rasterio" in actual.data.name
# do some arithmetic
ac = actual.mean()
ex = expected.mean()
assert_allclose(ac, ex)
ac = actual.sel(band=1).mean(dim="x")
ex = expected.sel(band=1).mean(dim="x")
assert_allclose(ac, ex)
@pytest.mark.xfail(
not has_dask, reason="without dask, a non-serializable lock is used"
)
def test_pickle_rasterio(self):
# regression test for https://github.com/pydata/xarray/issues/2121
with create_tmp_geotiff() as (tmp_file, expected):
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as rioda:
temp = pickle.dumps(rioda)
with pickle.loads(temp) as actual:
assert_equal(actual, rioda)
def test_ENVI_tags(self):
rasterio = pytest.importorskip("rasterio", minversion="1.0a")
from rasterio.transform import from_origin
# Create an ENVI file with some tags in the ENVI namespace
# this test uses a custom driver, so we can't use create_tmp_geotiff
with create_tmp_file(suffix=".dat") as tmp_file:
# data
nx, ny, nz = 4, 3, 3
data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx)
transform = from_origin(5000, 80000, 1000, 2000.0)
with rasterio.open(
tmp_file,
"w",
driver="ENVI",
height=ny,
width=nx,
count=nz,
crs={
"units": "m",
"no_defs": True,
"ellps": "WGS84",
"proj": "utm",
"zone": 18,
},
transform=transform,
dtype=rasterio.float32,
) as s:
s.update_tags(
ns="ENVI",
description="{Tagged file}",
wavelength="{123.000000, 234.234000, 345.345678}",
fwhm="{1.000000, 0.234000, 0.000345}",
)
s.write(data)
dx, dy = s.res[0], -s.res[1]
# Tests
coords = {
"band": [1, 2, 3],
"y": -np.arange(ny) * 2000 + 80000 + dy / 2,
"x": np.arange(nx) * 1000 + 5000 + dx / 2,
"wavelength": ("band", np.array([123, 234.234, 345.345678])),
"fwhm": ("band", np.array([1, 0.234, 0.000345])),
}
expected = DataArray(data, dims=("band", "y", "x"), coords=coords)
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert isinstance(rioda.attrs["crs"], str)
assert isinstance(rioda.attrs["res"], tuple)
assert isinstance(rioda.attrs["is_tiled"], np.uint8)
assert isinstance(rioda.attrs["transform"], tuple)
assert len(rioda.attrs["transform"]) == 6
# from ENVI tags
assert isinstance(rioda.attrs["description"], str)
assert isinstance(rioda.attrs["map_info"], str)
assert isinstance(rioda.attrs["samples"], str)
def test_geotiff_tags(self):
# Create a geotiff file with some tags
with create_tmp_geotiff() as (tmp_file, _):
with pytest.warns(DeprecationWarning), xr.open_rasterio(tmp_file) as rioda:
assert isinstance(rioda.attrs["AREA_OR_POINT"], str)
@requires_dask
def test_no_mftime(self):
# rasterio can accept "filename" urguments that are actually urls,
# including paths to remote files.
# In issue #1816, we found that these caused dask to break, because
# the modification time was used to determine the dask token. This
# tests ensure we can still chunk such files when reading with
# rasterio.
with create_tmp_geotiff(
8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs="+proj=latlong"
) as (tmp_file, expected):
with mock.patch("os.path.getmtime", side_effect=OSError):
with pytest.warns(DeprecationWarning), xr.open_rasterio(
tmp_file, chunks=(1, 2, 2)
) as actual:
import dask.array as da
assert isinstance(actual.data, da.Array)
assert_allclose(actual, expected)
@network
def test_http_url(self):
# more examples urls here
# http://download.osgeo.org/geotiff/samples/
url = "http://download.osgeo.org/geotiff/samples/made_up/ntf_nord.tif"
with pytest.warns(DeprecationWarning), xr.open_rasterio(url) as actual:
assert actual.shape == (1, 512, 512)
# make sure chunking works
with pytest.warns(DeprecationWarning), xr.open_rasterio(
url, chunks=(1, 256, 256)
) as actual:
import dask.array as da
assert isinstance(actual.data, da.Array)
def test_rasterio_environment(self):
import rasterio
with create_tmp_geotiff() as (tmp_file, expected):
# Should fail with error since suffix not allowed
with pytest.raises(Exception):
with rasterio.Env(GDAL_SKIP="GTiff"):
with pytest.warns(DeprecationWarning), xr.open_rasterio(
tmp_file
) as actual:
assert_allclose(actual, expected)
@pytest.mark.xfail(reason="rasterio 1.1.1 is broken. GH3573")
def test_rasterio_vrt(self):
import rasterio
# tmp_file default crs is UTM: CRS({'init': 'epsg:32618'}
with create_tmp_geotiff() as (tmp_file, expected):
with rasterio.open(tmp_file) as src:
with rasterio.vrt.WarpedVRT(src, crs="epsg:4326") as vrt:
expected_shape = (vrt.width, vrt.height)
expected_crs = vrt.crs
expected_res = vrt.res
# Value of single pixel in center of image
lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2)
expected_val = next(vrt.sample([(lon, lat)]))
with pytest.warns(DeprecationWarning), xr.open_rasterio(vrt) as da:
actual_shape = (da.sizes["x"], da.sizes["y"])
actual_crs = da.crs
actual_res = da.res
actual_val = da.sel(dict(x=lon, y=lat), method="nearest").data
assert actual_crs == expected_crs
assert actual_res == expected_res
assert actual_shape == expected_shape
assert expected_val.all() == actual_val.all()
@pytest.mark.filterwarnings(
"ignore:open_rasterio is Deprecated in favor of rioxarray."
)
def test_rasterio_vrt_with_transform_and_size(self):
# Test open_rasterio() support of WarpedVRT with transform, width and
# height (issue #2864)
# https://github.com/rasterio/rasterio/1768
rasterio = pytest.importorskip("rasterio", minversion="1.0.28")
from affine import Affine
from rasterio.warp import calculate_default_transform
with create_tmp_geotiff() as (tmp_file, expected):
with rasterio.open(tmp_file) as src:
# Estimate the transform, width and height
# for a change of resolution
# tmp_file initial res is (1000,2000) (default values)
trans, w, h = calculate_default_transform(
src.crs, src.crs, src.width, src.height, resolution=500, *src.bounds
)
with rasterio.vrt.WarpedVRT(
src, transform=trans, width=w, height=h
) as vrt:
expected_shape = (vrt.width, vrt.height)
expected_res = vrt.res
expected_transform = vrt.transform
with xr.open_rasterio(vrt) as da:
actual_shape = (da.sizes["x"], da.sizes["y"])
actual_res = da.res
actual_transform = Affine(*da.transform)
assert actual_res == expected_res
assert actual_shape == expected_shape
assert actual_transform == expected_transform
def test_rasterio_vrt_with_src_crs(self):
# Test open_rasterio() support of WarpedVRT with specified src_crs
# https://github.com/rasterio/rasterio/1768
rasterio = pytest.importorskip("rasterio", minversion="1.0.28")
# create geotiff with no CRS and specify it manually
with create_tmp_geotiff(crs=None) as (tmp_file, expected):
src_crs = rasterio.crs.CRS({"init": "epsg:32618"})
with rasterio.open(tmp_file) as src:
assert src.crs is None
with rasterio.vrt.WarpedVRT(src, src_crs=src_crs) as vrt:
with pytest.warns(DeprecationWarning), xr.open_rasterio(vrt) as da:
assert da.crs == src_crs
@network
def test_rasterio_vrt_network(self):
# Make sure loading w/ rasterio give same results as xarray
import rasterio
# use same url that rasterio package uses in tests
prefix = "https://landsat-pds.s3.amazonaws.com/L8/139/045/"
image = "LC81390452014295LGN00/LC81390452014295LGN00_B1.TIF"
httpstif = prefix + image
with rasterio.Env(aws_unsigned=True):
with rasterio.open(httpstif) as src:
with rasterio.vrt.WarpedVRT(src, crs="epsg:4326") as vrt:
expected_shape = vrt.width, vrt.height
expected_res = vrt.res
# Value of single pixel in center of image
lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2)
expected_val = next(vrt.sample([(lon, lat)]))
with pytest.warns(DeprecationWarning), xr.open_rasterio(vrt) as da:
actual_shape = da.sizes["x"], da.sizes["y"]
actual_res = da.res
actual_val = da.sel(dict(x=lon, y=lat), method="nearest").data
assert actual_shape == expected_shape
assert actual_res == expected_res
assert expected_val == actual_val
class TestEncodingInvalid:
def test_extract_nc4_variable_encoding(self):
var = xr.Variable(("x",), [1, 2, 3], {}, {"foo": "bar"})
with pytest.raises(ValueError, match=r"unexpected encoding"):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
var = xr.Variable(("x",), [1, 2, 3], {}, {"chunking": (2, 1)})
encoding = _extract_nc4_variable_encoding(var)
assert {} == encoding
# regression test
var = xr.Variable(("x",), [1, 2, 3], {}, {"shuffle": True})
encoding = _extract_nc4_variable_encoding(var, raise_on_invalid=True)
assert {"shuffle": True} == encoding
# Variables with unlim dims must be chunked on output.
var = xr.Variable(("x",), [1, 2, 3], {}, {"contiguous": True})
encoding = _extract_nc4_variable_encoding(var, unlimited_dims=("x",))
assert {} == encoding
def test_extract_h5nc_encoding(self):
# not supported with h5netcdf (yet)
var = xr.Variable(("x",), [1, 2, 3], {}, {"least_sigificant_digit": 2})
with pytest.raises(ValueError, match=r"unexpected encoding"):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
class MiscObject:
pass
@requires_netCDF4
class TestValidateAttrs:
def test_validating_attrs(self):
def new_dataset():
return Dataset({"data": ("y", np.arange(10.0))}, {"y": np.arange(10)})
def new_dataset_and_dataset_attrs():
ds = new_dataset()
return ds, ds.attrs
def new_dataset_and_data_attrs():
ds = new_dataset()
return ds, ds.data.attrs
def new_dataset_and_coord_attrs():
ds = new_dataset()
return ds, ds.coords["y"].attrs
for new_dataset_and_attrs in [
new_dataset_and_dataset_attrs,
new_dataset_and_data_attrs,
new_dataset_and_coord_attrs,
]:
ds, attrs = new_dataset_and_attrs()
attrs[123] = "test"
with pytest.raises(TypeError, match=r"Invalid name for attr: 123"):
ds.to_netcdf("test.nc")
ds, attrs = new_dataset_and_attrs()
attrs[MiscObject()] = "test"
with pytest.raises(TypeError, match=r"Invalid name for attr: "):
ds.to_netcdf("test.nc")
ds, attrs = new_dataset_and_attrs()
attrs[""] = "test"
with pytest.raises(ValueError, match=r"Invalid name for attr '':"):
ds.to_netcdf("test.nc")
# This one should work
ds, attrs = new_dataset_and_attrs()
attrs["test"] = "test"
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = {"a": 5}
with pytest.raises(TypeError, match=r"Invalid value for attr 'test'"):
ds.to_netcdf("test.nc")
ds, attrs = new_dataset_and_attrs()
attrs["test"] = MiscObject()
with pytest.raises(TypeError, match=r"Invalid value for attr 'test'"):
ds.to_netcdf("test.nc")
ds, attrs = new_dataset_and_attrs()
attrs["test"] = 5
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = 3.14
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = [1, 2, 3, 4]
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = (1.9, 2.5)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = np.arange(5)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = "This is a string"
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs["test"] = ""
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
@requires_scipy_or_netCDF4
class TestDataArrayToNetCDF:
def test_dataarray_to_netcdf_no_name(self):
original_da = DataArray(np.arange(12).reshape((3, 4)))
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_dataarray_to_netcdf_with_name(self):
original_da = DataArray(np.arange(12).reshape((3, 4)), name="test")
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_dataarray_to_netcdf_coord_name_clash(self):
original_da = DataArray(
np.arange(12).reshape((3, 4)), dims=["x", "y"], name="x"
)
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_open_dataarray_options(self):
data = DataArray(np.arange(5), coords={"y": ("x", range(5))}, dims=["x"])
with create_tmp_file() as tmp:
data.to_netcdf(tmp)
expected = data.drop_vars("y")
with open_dataarray(tmp, drop_variables=["y"]) as loaded:
assert_identical(expected, loaded)
@requires_scipy
def test_dataarray_to_netcdf_return_bytes(self):
# regression test for GH1410
data = xr.DataArray([1, 2, 3])
output = data.to_netcdf()
assert isinstance(output, bytes)
def test_dataarray_to_netcdf_no_name_pathlib(self):
original_da = DataArray(np.arange(12).reshape((3, 4)))
with create_tmp_file() as tmp:
tmp = Path(tmp)
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
@requires_scipy_or_netCDF4
def test_no_warning_from_dask_effective_get():
with create_tmp_file() as tmpfile:
with assert_no_warnings():
ds = Dataset()
ds.to_netcdf(tmpfile)
@requires_scipy_or_netCDF4
def test_source_encoding_always_present():
# Test for GH issue #2550.
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp) as ds:
assert ds.encoding["source"] == tmp
def _assert_no_dates_out_of_range_warning(record):
undesired_message = "dates out of range"
for warning in record:
assert undesired_message not in str(warning.message)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_use_cftime_standard_calendar_default_in_range(calendar):
x = [0, 1]
time = [0, 720]
units_date = "2000-01-01"
units = "days since 2000-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
x_timedeltas = np.array(x).astype("timedelta64[D]")
time_timedeltas = np.array(time).astype("timedelta64[D]")
decoded_x = np.datetime64(units_date, "ns") + x_timedeltas
decoded_time = np.datetime64(units_date, "ns") + time_timedeltas
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with warnings.catch_warnings(record=True) as record:
with open_dataset(tmp_file) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_cftime
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2500])
def test_use_cftime_standard_calendar_default_out_of_range(calendar, units_year):
import cftime
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True)
decoded_time = cftime.num2date(
time, units, calendar, only_use_cftime_datetimes=True
)
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.warns(SerializationWarning):
with open_dataset(tmp_file) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
@requires_cftime
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_true(calendar, units_year):
import cftime
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True)
decoded_time = cftime.num2date(
time, units, calendar, only_use_cftime_datetimes=True
)
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with warnings.catch_warnings(record=True) as record:
with open_dataset(tmp_file, use_cftime=True) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_use_cftime_false_standard_calendar_in_range(calendar):
x = [0, 1]
time = [0, 720]
units_date = "2000-01-01"
units = "days since 2000-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
x_timedeltas = np.array(x).astype("timedelta64[D]")
time_timedeltas = np.array(time).astype("timedelta64[D]")
decoded_x = np.datetime64(units_date, "ns") + x_timedeltas
decoded_time = np.datetime64(units_date, "ns") + time_timedeltas
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with warnings.catch_warnings(record=True) as record:
with open_dataset(tmp_file, use_cftime=False) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2500])
def test_use_cftime_false_standard_calendar_out_of_range(calendar, units_year):
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}-01-01"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.raises((OutOfBoundsDatetime, ValueError)):
open_dataset(tmp_file, use_cftime=False)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_false_nonstandard_calendar(calendar, units_year):
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}"
original = DataArray(x, [("time", time)], name="x")
original = original.to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.raises((OutOfBoundsDatetime, ValueError)):
open_dataset(tmp_file, use_cftime=False)
@pytest.mark.parametrize("engine", ["netcdf4", "scipy"])
def test_invalid_netcdf_raises(engine):
data = create_test_data()
with pytest.raises(ValueError, match=r"unrecognized option 'invalid_netcdf'"):
data.to_netcdf("foo.nc", engine=engine, invalid_netcdf=True)
@requires_zarr
def test_encode_zarr_attr_value():
# array -> list
arr = np.array([1, 2, 3])
expected = [1, 2, 3]
actual = backends.zarr.encode_zarr_attr_value(arr)
assert isinstance(actual, list)
assert actual == expected
# scalar array -> scalar
sarr = np.array(1)[()]
expected = 1
actual = backends.zarr.encode_zarr_attr_value(sarr)
assert isinstance(actual, int)
assert actual == expected
# string -> string (no change)
expected = "foo"
actual = backends.zarr.encode_zarr_attr_value(expected)
assert isinstance(actual, str)
assert actual == expected
@requires_zarr
def test_extract_zarr_variable_encoding():
var = xr.Variable("x", [1, 2])
actual = backends.zarr.extract_zarr_variable_encoding(var)
assert "chunks" in actual
assert actual["chunks"] is None
var = xr.Variable("x", [1, 2], encoding={"chunks": (1,)})
actual = backends.zarr.extract_zarr_variable_encoding(var)
assert actual["chunks"] == (1,)
# does not raise on invalid
var = xr.Variable("x", [1, 2], encoding={"foo": (1,)})
actual = backends.zarr.extract_zarr_variable_encoding(var)
# raises on invalid
var = xr.Variable("x", [1, 2], encoding={"foo": (1,)})
with pytest.raises(ValueError, match=r"unexpected encoding parameters"):
actual = backends.zarr.extract_zarr_variable_encoding(
var, raise_on_invalid=True
)
@requires_zarr
@requires_fsspec
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_open_fsspec():
import fsspec
import zarr
if not hasattr(zarr.storage, "FSStore") or not hasattr(
zarr.storage.FSStore, "getitems"
):
pytest.skip("zarr too old")
ds = open_dataset(os.path.join(os.path.dirname(__file__), "data", "example_1.nc"))
m = fsspec.filesystem("memory")
mm = m.get_mapper("out1.zarr")
ds.to_zarr(mm) # old interface
ds0 = ds.copy()
ds0["time"] = ds.time + pd.to_timedelta("1 day")
mm = m.get_mapper("out2.zarr")
ds0.to_zarr(mm) # old interface
# single dataset
url = "memory://out2.zarr"
ds2 = open_dataset(url, engine="zarr")
xr.testing.assert_equal(ds0, ds2)
# single dataset with caching
url = "simplecache::memory://out2.zarr"
ds2 = open_dataset(url, engine="zarr")
xr.testing.assert_equal(ds0, ds2)
# multi dataset
url = "memory://out*.zarr"
ds2 = open_mfdataset(url, engine="zarr")
xr.testing.assert_equal(xr.concat([ds, ds0], dim="time"), ds2)
# multi dataset with caching
url = "simplecache::memory://out*.zarr"
ds2 = open_mfdataset(url, engine="zarr")
xr.testing.assert_equal(xr.concat([ds, ds0], dim="time"), ds2)
@requires_h5netcdf
@requires_netCDF4
def test_load_single_value_h5netcdf(tmp_path):
"""Test that numeric single-element vector attributes are handled fine.
At present (h5netcdf v0.8.1), the h5netcdf exposes single-valued numeric variable
attributes as arrays of length 1, as opposed to scalars for the NetCDF4
backend. This was leading to a ValueError upon loading a single value from
a file, see #4471. Test that loading causes no failure.
"""
ds = xr.Dataset(
{
"test": xr.DataArray(
np.array([0]), dims=("x",), attrs={"scale_factor": 1, "add_offset": 0}
)
}
)
ds.to_netcdf(tmp_path / "test.nc")
with xr.open_dataset(tmp_path / "test.nc", engine="h5netcdf") as ds2:
ds2["test"][0].load()
@requires_zarr
@requires_dask
@pytest.mark.parametrize(
"chunks", ["auto", -1, {}, {"x": "auto"}, {"x": -1}, {"x": "auto", "y": -1}]
)
def test_open_dataset_chunking_zarr(chunks, tmp_path):
encoded_chunks = 100
dask_arr = da.from_array(
np.ones((500, 500), dtype="float64"), chunks=encoded_chunks
)
ds = xr.Dataset(
{
"test": xr.DataArray(
dask_arr,
dims=("x", "y"),
)
}
)
ds["test"].encoding["chunks"] = encoded_chunks
ds.to_zarr(tmp_path / "test.zarr")
with dask.config.set({"array.chunk-size": "1MiB"}):
expected = ds.chunk(chunks)
with open_dataset(
tmp_path / "test.zarr", engine="zarr", chunks=chunks
) as actual:
xr.testing.assert_chunks_equal(actual, expected)
@requires_zarr
@requires_dask
@pytest.mark.parametrize(
"chunks", ["auto", -1, {}, {"x": "auto"}, {"x": -1}, {"x": "auto", "y": -1}]
)
@pytest.mark.filterwarnings("ignore:Specified Dask chunks")
def test_chunking_consintency(chunks, tmp_path):
encoded_chunks = {}
dask_arr = da.from_array(
np.ones((500, 500), dtype="float64"), chunks=encoded_chunks
)
ds = xr.Dataset(
{
"test": xr.DataArray(
dask_arr,
dims=("x", "y"),
)
}
)
ds["test"].encoding["chunks"] = encoded_chunks
ds.to_zarr(tmp_path / "test.zarr")
ds.to_netcdf(tmp_path / "test.nc")
with dask.config.set({"array.chunk-size": "1MiB"}):
expected = ds.chunk(chunks)
with xr.open_dataset(
tmp_path / "test.zarr", engine="zarr", chunks=chunks
) as actual:
xr.testing.assert_chunks_equal(actual, expected)
with xr.open_dataset(tmp_path / "test.nc", chunks=chunks) as actual:
xr.testing.assert_chunks_equal(actual, expected)
def _check_guess_can_open_and_open(entrypoint, obj, engine, expected):
assert entrypoint.guess_can_open(obj)
with open_dataset(obj, engine=engine) as actual:
assert_identical(expected, actual)
@requires_netCDF4
def test_netcdf4_entrypoint(tmp_path):
entrypoint = NetCDF4BackendEntrypoint()
ds = create_test_data()
path = tmp_path / "foo"
ds.to_netcdf(path, format="netcdf3_classic")
_check_guess_can_open_and_open(entrypoint, path, engine="netcdf4", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="netcdf4", expected=ds)
path = tmp_path / "bar"
ds.to_netcdf(path, format="netcdf4_classic")
_check_guess_can_open_and_open(entrypoint, path, engine="netcdf4", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="netcdf4", expected=ds)
assert entrypoint.guess_can_open("http://something/remote")
assert entrypoint.guess_can_open("something-local.nc")
assert entrypoint.guess_can_open("something-local.nc4")
assert entrypoint.guess_can_open("something-local.cdf")
assert not entrypoint.guess_can_open("not-found-and-no-extension")
path = tmp_path / "baz"
with open(path, "wb") as f:
f.write(b"not-a-netcdf-file")
assert not entrypoint.guess_can_open(path)
@requires_scipy
def test_scipy_entrypoint(tmp_path):
entrypoint = ScipyBackendEntrypoint()
ds = create_test_data()
path = tmp_path / "foo"
ds.to_netcdf(path, engine="scipy")
_check_guess_can_open_and_open(entrypoint, path, engine="scipy", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="scipy", expected=ds)
with open(path, "rb") as f:
_check_guess_can_open_and_open(entrypoint, f, engine="scipy", expected=ds)
contents = ds.to_netcdf(engine="scipy")
_check_guess_can_open_and_open(entrypoint, contents, engine="scipy", expected=ds)
_check_guess_can_open_and_open(
entrypoint, BytesIO(contents), engine="scipy", expected=ds
)
path = tmp_path / "foo.nc.gz"
with gzip.open(path, mode="wb") as f:
f.write(contents)
_check_guess_can_open_and_open(entrypoint, path, engine="scipy", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="scipy", expected=ds)
assert entrypoint.guess_can_open("something-local.nc")
assert entrypoint.guess_can_open("something-local.nc.gz")
assert not entrypoint.guess_can_open("not-found-and-no-extension")
assert not entrypoint.guess_can_open(b"not-a-netcdf-file")
@requires_h5netcdf
def test_h5netcdf_entrypoint(tmp_path):
entrypoint = H5netcdfBackendEntrypoint()
ds = create_test_data()
path = tmp_path / "foo"
ds.to_netcdf(path, engine="h5netcdf")
_check_guess_can_open_and_open(entrypoint, path, engine="h5netcdf", expected=ds)
_check_guess_can_open_and_open(
entrypoint, str(path), engine="h5netcdf", expected=ds
)
with open(path, "rb") as f:
_check_guess_can_open_and_open(entrypoint, f, engine="h5netcdf", expected=ds)
assert entrypoint.guess_can_open("something-local.nc")
assert entrypoint.guess_can_open("something-local.nc4")
assert entrypoint.guess_can_open("something-local.cdf")
assert not entrypoint.guess_can_open("not-found-and-no-extension")
@requires_netCDF4
@pytest.mark.parametrize("str_type", (str, np.str_))
def test_write_file_from_np_str(str_type, tmpdir) -> None:
# https://github.com/pydata/xarray/pull/5264
scenarios = [str_type(v) for v in ["scenario_a", "scenario_b", "scenario_c"]]
years = range(2015, 2100 + 1)
tdf = pd.DataFrame(
data=np.random.random((len(scenarios), len(years))),
columns=years,
index=scenarios,
)
tdf.index.name = "scenario"
tdf.columns.name = "year"
tdf = tdf.stack()
tdf.name = "tas"
txr = tdf.to_xarray()
txr.to_netcdf(tmpdir.join("test.nc"))
| pydata/xarray | xarray/tests/test_backends.py | Python | apache-2.0 | 214,655 | [
"NetCDF"
] | 629a75d3acb00fbbac22b59ef9d0a4d5613054346a64f75eb2ddc841a576c60b |
#!/usr/bin/env python
"""
Support for uframe stream route(s), utilized for stream information.
"""
__author__ = 'Edna Donoughe'
from flask import (jsonify, request, current_app)
from ooiservices.app import db
from ooiservices.app.uframe import uframe as api
from ooiservices.app.main.errors import (internal_server_error, bad_request)
from ooiservices.app.uframe.stream_tools import (get_stream_list, get_streams_for_rd, get_stream_for_stream_model)
from operator import itemgetter
from copy import deepcopy
from ooiservices.app.uframe.common_tools import iso_to_timestamp
from ooiservices.app.models import DisabledStreams
from ooiservices.app.uframe.stream_tools import get_instrument_list
@api.route('/instrument_list')
def get_instruments_list():
"""
12529. Get list of instrument for data catalog.
"""
try:
refresh = False
if request.args.get('refresh') and request.args.get('refresh') == "true":
refresh = True
retval = get_instrument_list(refresh=refresh)
print '\n debug -- instrument_list: %d' % len(retval)
if not retval or retval is None:
message = 'The instrument list did not return a value.'
return bad_request(message)
return jsonify(instruments=retval)
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message)
@api.route('/streams_for/<string:rd>')
def get_streams_for(rd):
"""
12529. Get all streams for a reference designator.
Remove depth, water_depth, latitude and longitude once review of proposed changes has been completed.
http://localhost:4000/uframe/streams_for/CE01ISSM-MFD35-00-DCLENG000
{
"streams": [
{
"array_name": "Coastal Endurance",
"assembly_name": "Seafloor Multi-Function Node (MFN)",
"depth": 25.0,
"display_name": "Data Concentrator Logger (DCL)",
"end": "2017-05-12T03:33:46.923Z",
"iris_enabled": false,
"latitude": 44.65833,
"long_display_name": "Coastal Endurance Oregon Inshore Surface Mooring - Seafloor Multi-Function Node (MFN) - Data Concentrator Logger (DCL)",
"longitude": -124.09583,
"platform_name": "Oregon Inshore Surface Mooring",
"rds_enabled": false,
"reference_designator": "CE01ISSM-MFD35-00-DCLENG000",
"site_name": "Oregon Inshore Surface Mooring",
"start": "2016-09-30T17:20:18.821Z",
"stream": "cg_dcl_eng_dcl_dlog_status",
"stream_dataset": "Engineering",
"stream_display_name": "Data Logger Status",
"stream_method": "telemetered",
"stream_name": "telemetered_cg-dcl-eng-dcl-dlog-status",
"stream_type": "telemetered",
"water_depth": 25.0
},
...
]
}
"""
try:
"""
rd = 'GS01SUMO-RII11-02-ADCPSN010'
rd = 'CE01ISSM-MFC31-00-CPMENG000'
rd = 'CE01ISSM-MFD35-00-DCLENG000'
"""
retval = get_streams_for_rd(rd)
return jsonify(streams=retval)
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message)
# Deprecate
@api.route('/stream')
def get_streams_list():
""" Get streams (list of dictionaries); used in the data catalog.
List of request.args used in this function:
'sort', 'order', 'min', 'concepts', 'search', 'startDate', 'endDate' and 'startAt'
"""
retval = get_stream_list()
if not retval or retval is None:
message = 'The stream list did not return a value.'
return internal_server_error(message)
try:
is_reverse = True
if request.args.get('sort') and request.args.get('sort') != "":
sort_by = request.args.get('sort')
if request.args.get('order') and request.args.get('order') != "":
order = request.args.get('order')
if order == 'reverse':
is_reverse = False
else:
sort_by = 'end'
retval = sorted(retval, key=itemgetter(sort_by), reverse=is_reverse)
except (TypeError, KeyError) as e:
return retval
# lets create a container to work with
temp_list = []
# instantiate a disabled streams model cursor
disabled_streams = DisabledStreams().query.all()
# grab the json from the instance of all it's data
disabled_streams = [disabled_stream.to_json() \
for disabled_stream in disabled_streams]
# look over each of the items in the disabled streams
'''
@param search_terms:
the list of strings that will be searched against
@param data_set:
the main data being parsed
'''
def _parse_lists(search_terms, data_set):
included_streams = []
'''
@param term:
The search term that will be checking aginst
@param subset:
The data being searched
@param length:
A very specific argument for choosing what part of the reference
designator to use
'''
def _gen(term, subset, length):
result = []
for obj in subset:
if obj['reference_designator'][:length] not in term:
result.append(obj)
return result
for stream in search_terms:
# establish a match criteria, so we know at what level to delete
match_on = len(stream['refDes'])
# TODO: Refactor this to use a method
if match_on == 2:
if len(included_streams) is 0:
included_streams = _gen(stream['refDes'], data_set, match_on)
else:
included_streams = _gen(stream['refDes'], included_streams, match_on)
elif match_on == 8:
if len(included_streams) is 0:
included_streams = _gen(stream['refDes'], data_set, match_on)
else:
included_streams = _gen(stream['refDes'], included_streams, match_on)
elif match_on == 11:
if len(included_streams) is 0:
included_streams = _gen(stream['refDes'], data_set, match_on)
else:
included_streams = _gen(stream['refDes'], included_streams, match_on)
elif match_on == 27:
if len(included_streams) is 0:
included_streams = _gen(stream['refDes'], data_set, match_on)
else:
included_streams = _gen(stream['refDes'], included_streams, match_on)
if len(included_streams) is 0:
return data_set
return included_streams
retval = _parse_lists(disabled_streams, retval)
# If 'min' is provided and enabled, the filter the data.
if request.args.get('min') == 'True':
for obj in retval:
try:
if 'parameter_id' in obj:
del obj['parameter_id']
if 'units' in obj:
del obj['units']
if 'variable_type' in obj:
del obj['variable_type']
if 'variable_types' in obj:
del obj['variable_types']
if 'download' in obj:
del obj['download']
if 'variables' in obj:
del obj['variables']
if 'variables_shape' in obj:
del obj['variables_shape']
except KeyError as e:
print e
# If 'concepts' provided, then filter the data
if request.args.get('concepts') and request.args.get('concepts') != "":
return_list = []
search_term = str(request.args.get('concepts')).split()
search_set = set(search_term)
for subset in search_set:
for item in retval:
if subset.lower() in str(item['reference_designator']).lower():
return_list.append(item)
retval = return_list
# If 'search' parameter(s) provided, then filter the data.
if request.args.get('search') and request.args.get('search') != "":
return_list = []
search_term = str(request.args.get('search')).split()
search_set = set(search_term)
for subset in search_set:
if len(return_list) > 0:
ven_subset = []
ven_set = deepcopy(retval)
for item in ven_set:
if subset.lower() in str(item['array_name']).lower():
ven_subset.append(item)
elif subset.lower() in str(item['site_name']).lower():
ven_subset.append(item)
elif subset.lower() in str(item['assembly_name']).lower():
ven_subset.append(item)
elif subset.lower() in str(item['reference_designator']).lower():
ven_subset.append(item)
elif subset.lower() in str(item['stream_name']).lower():
ven_subset.append(item)
elif 'platform_name' in item:
if subset.lower() in str(item['platform_name']).lower():
ven_subset.append(item)
elif 'parameter_display_name' in item:
if subset.lower() in str(item['parameter_display_name']).lower():
ven_subset.append(item)
elif 'long_display_name' in item:
if subset.lower() in str(item['long_display_name']).lower():
ven_subset.append(item)
retval = ven_subset
else:
for item in retval:
if subset.lower() in str(item['array_name']).lower():
return_list.append(item)
elif subset.lower() in str(item['site_name']).lower():
return_list.append(item)
elif subset.lower() in str(item['assembly_name']).lower():
return_list.append(item)
elif subset.lower() in str(item['reference_designator']).lower():
return_list.append(item)
elif subset.lower() in str(item['stream_name']).lower():
return_list.append(item)
elif 'platform_name' in item:
if subset.lower() in str(item['platform_name']).lower():
return_list.append(item)
elif 'parameter_display_name' in item:
if subset.lower() in str(item['parameter_display_name']).lower():
return_list.append(item)
elif 'long_display_name' in item:
if subset.lower() in str(item['long_display_name']).lower():
return_list.append(item)
retval = return_list
# If 'startDate' and 'endDate' provided, then use to filter the data.
if request.args.get('startDate') and request.args.get('endDate') != "":
# setup a temporary container for the result and convert the time
return_val = []
search_start_date = float(request.args.get('startDate'))/1000.0
search_end_date = float(request.args.get('endDate'))/1000.0
# loop over the current return value and begin parsing
for obj in retval:
obj_end_date = iso_to_timestamp(obj['end'])
# add to the return_val if the obj has a end date
# greater than or equal to 'startDate' and an end date
# less than or equal to 'endDate'
# ** we are only filtering by END date **
if obj_end_date >= search_start_date and obj_end_date <= search_end_date:
return_val.append(obj)
# assign the new list to retval
retval = return_val
# If 'startAt' provided, then use to filter the data.
if request.args.get('startAt'):
start_at = int(request.args.get('startAt'))
count = int(request.args.get('count'))
total = int(len(retval))
retval_slice = retval[start_at:(start_at + count)]
result = jsonify({"count": count,
"total": total,
"startAt": start_at,
"streams": retval_slice})
#return result
return jsonify(streams=result)
else:
return jsonify(streams=retval)
@api.route('/get_stream_for_model/<string:reference_designator>/<string:stream_method>/<string:stream>', methods=['GET'])
def get_stream_model_data(reference_designator, stream_method, stream):
""" Get complete stream dictionary with legacy content, including parameters, for UI stream model.
"""
debug = False
try:
if debug:
print '\n debug -- reference_designator: ', reference_designator
print '\n debug -- stream_method: ', stream_method
print '\n debug -- stream: ', stream
stream_content = get_stream_for_stream_model(reference_designator, stream_method, stream)
return jsonify({'stream_content': stream_content}), 200
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message)
@api.route('/stream/parameters/<string:reference_designator>/<string:stream_method>/<string:stream>', methods=['GET'])
def get_stream_parameters(reference_designator, stream_method, stream):
""" Get stream name and pdid; return dictionary of key/value pairs.
Used by UI client to return netcdf request arg by selected parameter id values (new parameters argument)
(sample: ¶meters=3795,3796 for netcdf request arguments on netcdf download.)
Sample request:
http://localhost:4000/uframe/stream/parameters/GA01SUMO-RII11-02-FLORDG032/telemetered/flord_g_ctdbp_p_dcl_instrument
Sample response:
{
"parameters": {
"Chlorophyll-A Measurement (V)": 3796,
"Chlorophyll-a Concentration (ug L-1)": 22,
"Date and Time String (1)": 93,
"Driver Timestamp, UTC (seconds since 1900-01-01)": 11,
"Ingestion Timestamp, UTC (seconds since 1900-01-01)": 863,
"Internal Timestamp, UTC (seconds since 1900-01-01)": 12,
"Optical Backscatter Measurement (V)": 3795,
"Port Timestamp, UTC (seconds since 1900-01-01)": 10,
"Preferred Timestamp (1)": 16,
"Time, UTC (seconds since 1900-01-01)": 7,
"Total Volume Scattering Coefficient (m-1 sr-1)": 24
}
}
"""
try:
key_value_map = {}
stream_content = get_stream_for_stream_model(reference_designator, stream_method, stream)
if stream_content and stream_content is not None:
keys = stream_content['parameter_display_name']
values = stream_content['parameter_id']
int_values = []
for value in values:
value = value.replace('pd', '')
int_value = int(value)
int_values.append(int_value)
key_value_map = dict(zip(keys, int_values))
return jsonify({'parameters': key_value_map}), 200
except Exception as err:
message = str(err)
current_app.logger.info(message)
return bad_request(message)
@api.route('/disabled_streams', methods=['GET', 'POST'])
@api.route('/disabled_streams/<int:id>', methods=['DELETE'])
def disabled_streams(id=None):
""" Process GET, POST and DELETE for disabled streams.
@method GET:
Returns the list of all the disabled streams from our database.
@method POST:
@params: ID
Create a new 'disabled streams' in our local database.
@method DELETE:
@params: ID
Delete a disabled streams identifier from our local database.
"""
if request.method == 'GET':
disabled_streams = DisabledStreams.query.all()
return jsonify({'disabled_streams': [disabled_stream.to_json() for disabled_stream in disabled_streams]})
elif request.method == 'POST':
try:
# Get the json payload
payload = json.loads(request.data)
# Create a new instance of the disabled streams with the data
disabled_stream = DisabledStreams.from_json(payload)
# Add to the database
db.session.add(disabled_stream)
db.session.commit()
return jsonify({ 'disabled_streams': 'Stream Disabled!'}), 200
except Exception as err:
message = str(err)
# roll it back if there is a problem.
db.session.rollback()
db.session.commit()
return bad_request(message)
elif request.method == 'DELETE':
try:
# get the item to delete
disabled_stream = DisabledStreams.query.get_or_404(id)
# obliterate it form the db
db.session.delete(disabled_stream)
db.session.commit()
return jsonify({'message': 'Stream Enabled!'}), 200
except Exception as err:
message = str(err)
# roll it back if there is a problem.
db.session.rollback()
db.session.commit()
return bad_request('Problem activating stream: %s' % message) | asascience-open/ooi-ui-services | ooiservices/app/uframe/streams.py | Python | apache-2.0 | 17,509 | [
"NetCDF"
] | ffcf238308ca5bd26a470b2ad25834f1803a4c51b335dc8fd3e09a0b113ca51b |
#!/usr/bin/env python
"""
Generate the images and rst files for gallery of SfePy examples.
The following steps need to be made to regenerate the documentation with the
updated example files:
1. Generate the files:
- for sfepy.org deployment::
$ ./script/gen_gallery.py -l ../doc-devel
- for local test build run from ./::
$ ./script/gen_gallery.py -l doc/_build/html/
2. remove doc/examples/::
$ rm -rf doc/examples/
3. copy gallery/examples/ to doc/::
$ cp -a gallery/examples/ doc/
4. regenerate the documentation::
$ python setup.py htmldocs
Additional steps for sfepy.org deployment:
- copy doc/_build/html/ to <sfepy.org>/doc-devel/
- copy gallery/gallery.html and gallery/images/ to <sfepy.org>/
"""
from __future__ import absolute_import
import sys
import six
sys.path.append('.')
import os
import tempfile
import glob
import re
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import matplotlib.image as image
import sfepy
from sfepy.base.base import (get_default, ordered_iteritems,
import_file, output, Struct)
from sfepy.base.ioutils import (ensure_path, locate_files, remove_files,
edit_filename)
from sfepy.postprocess.domain_specific import DomainSpecificPlot
omits = [
'vibro_acoustic3d_mid.py',
'its2D_5.py',
'linear_elastic_probes.py',
'__init__.py',
]
omit_dirs = [
re.compile('.*output.*/').match,
]
custom = {
'acoustics/acoustics3d.py' : {
'_p_1' : {
'view' : (44, 57, 0.24, [-0.004, -0.007, 0.09]),
'roll' : 0,
},
'_p_2' : {
'view' : (-99, 120, 0.4, [0.0, 0.0, 0.07]),
'roll' : 141,
},
},
'acoustics/vibro_acoustic3d.py' : {
'_p1' : {
'view' : (45.0, 54.7, 1.47, [0.0, 0.0, 0.05]),
'roll' : -120,
},
'_p2' : {
'view' : (45.0, 54.7, 1.47, [0.0, 0.0, 0.15]),
'roll' : -120,
},
'_w' : {
'view' : (0.0, 0.0, 0.86, [0.0, 0.0, 0.1]),
'roll' : 0,
},
'_g0' : {
'view' : (0.0, 0.0, 0.86, [0.0, 0.0, 0.1]),
'roll' : 0,
},
},
'diffusion/laplace_1d.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
't' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
},
'view' : (-90, 90, 1.5, [0, 0, 0]),
'roll' : 0,
'opacity' : {'wireframe' : 0.3},
},
},
'diffusion/laplace_coupling_lcbcs.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u1' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
'u2' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
},
'view' : (-82, 50, 3.6, [-0.43, -0.55, 0.4]),
'roll' : -23,
'opacity' : {'wireframe' : 0.3},
},
},
'diffusion/poisson_iga.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
't' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
},
'view' : (55, 39, 6.6, [-0.35, -0.29, 0.35]),
'roll' : 15,
'opacity' : {'wireframe' : 0.3},
},
},
'diffusion/sinbc.py' : {
'_t' : {
'is_wireframe' : True,
'domain_specific' : {
't' : DomainSpecificPlot('plot_warp_scalar',
['rel_scaling=1']),
},
'view' : (-170, 30, 4.7, [0.34, 0.23, -0.26]),
'roll' : 71,
'opacity' : {'wireframe' : 0.3},
},
'_grad' : {
'opacity' : {'surface' : 0.3},
'view' : (-170, 30, 4.7, [0.34, 0.23, -0.26]),
'roll' : 71,
},
},
'linear_elasticity/elastic_contact_planes.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1']),
},
'view' : (-82, 47, 3.4, [-0.5, -0.24, -0.2]),
'roll' : -8.4,
'opacity' : {'wireframe' : 0.3},
},
},
'linear_elasticity/elastic_contact_sphere.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1']),
},
'view' : (-82, 47, 3.4, [-0.5, -0.24, -0.2]),
'roll' : -8.4,
'opacity' : {'wireframe' : 0.3},
},
},
'linear_elasticity/elastic_shifted_periodic.py' : {
'' : {
'is_wireframe' : True,
'only_names' : ['u'],
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1',
'color_kind="scalars"',
'color_name="von_mises_stress"']),
},
'view' : (142, 39, 16, [-4.7, -2.1, -1.9]),
'roll' : 8.4,
'opacity' : {'wireframe' : 0.3},
},
},
'linear_elasticity/linear_elastic_iga.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1']),
},
'view' : (-37, 51, 1.5, [-0.28, -0.29, 0.0]),
'roll' : -51.5,
'opacity' : {'wireframe' : 0.2},
},
},
'linear_elasticity/shell10x_cantilever.py' : {
'' : {
'is_wireframe' : True,
'domain_specific' : {
'u_disp' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1']),
},
'view' : (-45, 81, 0.59, [-0.075, 0.023, 0.093]),
'roll' : -75.0,
'opacity' : {'wireframe' : 0.5},
},
},
'navier_stokes/stokes_slip_bc.py' : {
'' : {
'view' : (-63, 52, 5.2, [-1.5, -0.65, 0.12]),
'roll' : -32,
'resolution' : (800, 600),
'layout' : 'col',
'rel_scaling' : 0.1,
},
},
'navier_stokes/stokes_slip_bc_penalty.py' : {
'' : {
'view' : (-63, 52, 5.2, [-1.5, -0.65, 0.12]),
'roll' : -32,
'resolution' : (800, 600),
'layout' : 'col',
'rel_scaling' : 0.1,
},
},
'multi_physics/thermo_elasticity_ess.py' : {
'' : {
'is_wireframe' : True,
'only_names' : ['u'],
'domain_specific' : {
'u' : DomainSpecificPlot('plot_displacements',
['rel_scaling=1000',
'color_kind="scalars"',
'color_name="T"']),
},
'view' : (-51, 71, 12.9, [-2.3, -2.4, -0.2]),
'roll' : -65,
'opacity' : {'wireframe' : 0.3},
},
}
}
def _omit(filename):
omit = False
base = os.path.basename(filename)
if base in omits:
omit = True
for omit_dir in omit_dirs:
if omit_dir(filename) is not None:
omit = True
break
return omit
def _get_fig_filenames(ebase, images_dir):
fig_base = os.path.splitext(ebase)[0].replace(os.path.sep, '-')
yield fig_base
if ebase in custom:
suffixes = sorted(custom[ebase].keys())
for suffix in suffixes:
fig_filename = os.path.join(images_dir, fig_base + suffix + '.png')
yield fig_filename
else:
fig_filename = os.path.join(images_dir, fig_base + '.png')
yield fig_filename
def _get_fig_filename(ebase, images_dir, suffix):
fig_base = os.path.splitext(ebase)[0].replace(os.path.sep, '-')
fig_filename = os.path.join(images_dir, fig_base + suffix + '.png')
return fig_filename
def _make_sphinx_path(path, relative=False):
if relative:
aux = path.replace(sfepy.data_dir, '')
prefix = ('..' + os.path.sep) * aux.count(os.path.sep)
sphinx_path = prefix[:-1] + aux
else:
sphinx_path = path.replace(sfepy.data_dir, '/..')
return sphinx_path
def generate_images(images_dir, examples_dir):
"""
Generate images from results of running examples found in
`examples_dir` directory.
The generated images are stored to `images_dir`,
"""
from sfepy.applications import solve_pde
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.utils import mlab
from sfepy.solvers.ts_solvers import StationarySolver
prefix = output.prefix
output_dir = tempfile.mkdtemp()
trunk = os.path.join(output_dir, 'result')
options = Struct(output_filename_trunk=trunk,
output_format='vtk',
save_ebc=False,
save_ebc_nodes=False,
save_regions=False,
save_field_meshes=False,
save_regions_as_groups=False,
solve_not=False)
default_views = {'' : {}}
ensure_path(images_dir + os.path.sep)
view = Viewer('', offscreen=False)
for ex_filename in locate_files('*.py', examples_dir):
if _omit(ex_filename): continue
output.level = 0
output.prefix = prefix
ebase = ex_filename.replace(examples_dir, '')[1:]
output('trying "%s"...' % ebase)
try:
problem, state = solve_pde(ex_filename, options=options)
except KeyboardInterrupt:
raise
except:
problem = None
output('***** failed! *****')
if problem is not None:
if ebase in custom:
views = custom[ebase]
else:
views = default_views
tsolver = problem.get_solver()
if isinstance(tsolver, StationarySolver):
suffix = None
else:
suffix = tsolver.ts.suffix % (tsolver.ts.n_step - 1)
filename = problem.get_output_name(suffix=suffix)
for suffix, kwargs in six.iteritems(views):
fig_filename = _get_fig_filename(ebase, images_dir, suffix)
fname = edit_filename(filename, suffix=suffix)
output('displaying results from "%s"' % fname)
disp_name = fig_filename.replace(sfepy.data_dir, '')
output('to "%s"...' % disp_name.lstrip(os.path.sep))
view.filename = fname
view(scene=view.scene, show=False, is_scalar_bar=True, **kwargs)
view.save_image(fig_filename)
mlab.clf()
output('...done')
remove_files(output_dir)
output('...done')
def generate_thumbnails(thumbnails_dir, images_dir, scale=0.3):
"""
Generate thumbnails into `thumbnails_dir` corresponding to images in
`images_dir`.
"""
ensure_path(thumbnails_dir + os.path.sep)
output('generating thumbnails...')
filenames = glob.glob(os.path.join(images_dir, '*.png'))
for fig_filename in filenames:
ebase = fig_filename.replace(sfepy.data_dir, '').lstrip(os.path.sep)
output('"%s"' % ebase)
base = os.path.basename(fig_filename)
thumb_filename = os.path.join(thumbnails_dir, base)
image.thumbnail(fig_filename, thumb_filename, scale=scale)
output('...done')
_index = """\
.. _%s-gallery-examples-index:
%s
%s
.. toctree::
:maxdepth: 2
"""
_image = '.. image:: %s'
_include = """\
.. _%s:
%s
%s
**Description**
%s
%s
:download:`source code <%s>`
.. literalinclude:: %s
"""
def generate_rst_files(rst_dir, examples_dir, images_dir):
"""
Generate Sphinx rst files for examples in `examples_dir` with images
in `images_dir` and put them into `rst_dir`.
Returns
-------
dir_map : dict
The directory mapping of examples and corresponding rst files.
"""
ensure_path(rst_dir + os.path.sep)
output('generating rst files...')
dir_map = {}
for ex_filename in locate_files('*.py', examples_dir):
if _omit(ex_filename): continue
ebase = ex_filename.replace(examples_dir, '')[1:]
base_dir = os.path.dirname(ebase)
rst_filename = os.path.basename(ex_filename).replace('.py', '.rst')
dir_map.setdefault(base_dir, []).append((ex_filename, rst_filename))
for dirname, filenames in six.iteritems(dir_map):
filenames = sorted(filenames, key=lambda a: a[1])
dir_map[dirname ] = filenames
# Main index.
mfd = open(os.path.join(rst_dir, 'index.rst'), 'w')
mfd.write(_index % ('sfepy', 'Examples', '=' * 8))
for dirname, filenames in ordered_iteritems(dir_map):
full_dirname = os.path.join(rst_dir, dirname)
ensure_path(full_dirname + os.path.sep)
# Subdirectory index.
ifd = open(os.path.join(full_dirname, 'index.rst'), 'w')
ifd.write(_index % (dirname, dirname, '=' * len(dirname)))
for ex_filename, rst_filename in filenames:
full_rst_filename = os.path.join(full_dirname, rst_filename)
output('"%s"' % full_rst_filename.replace(rst_dir, '')[1:])
rst_filename_ns = rst_filename.replace('.rst', '')
ebase = ex_filename.replace(examples_dir, '')[1:]
rst_ex_filename = _make_sphinx_path(ex_filename)
docstring = get_default(import_file(ex_filename).__doc__,
'missing description!')
ifd.write(' %s\n' % rst_filename_ns)
fig_include = ''
fig_base = next(_get_fig_filenames(ebase, images_dir))
for fig_filename in _get_fig_filenames(ebase, images_dir):
rst_fig_filename = _make_sphinx_path(fig_filename)
if os.path.exists(fig_filename):
fig_include += _image % rst_fig_filename + '\n'
# Example rst file.
fd = open(full_rst_filename, 'w')
fd.write(_include % (fig_base, ebase, '=' * len(ebase),
docstring,
fig_include,
rst_ex_filename, rst_ex_filename))
fd.close()
ifd.close()
mfd.write(' %s/index\n' % dirname)
mfd.close()
output('...done')
return dir_map
_gallery_template_file = os.path.join(sfepy.top_dir,
'doc/gallery_template.html')
_link_template = """\
<div class="figure">
<a class="reference external image-reference" href="../%s">
<img alt="%s" src="%s" />
</a>
<p class="caption">
<a class="reference internal" href="../%s"><em>%s</em></a>
</p>
</div>
<div class="toctree-wrapper compound">
</div>
"""
_side_links="<li><a class='reference internal' href='#%s'>%s</a></li>"
_div_line ="""\
<div class="section" id="%s">
<h2>%s<a class="headerlink" href="\#%s" title="Permalink to this headline">
</a></h2>
%s
<div style="clear: both"></div></div>
"""
def generate_gallery_html(examples_dir, output_filename, gallery_dir,
rst_dir, thumbnails_dir, dir_map, link_prefix):
"""
Generate the gallery html file with thumbnail images and links to
examples.
Parameters
----------
output_filename : str
The output html file name.
gallery_dir : str
The top level directory of gallery files.
rst_dir : str
The full path to rst files of examples within `gallery_dir`.
thumbnails_dir : str
The full path to thumbnail images within `gallery_dir`.
dir_map : dict
The directory mapping returned by `generate_rst_files()`
link_prefix : str, optional
The prefix to prepend to links to individual pages of examples.
"""
output('generating %s...' % output_filename)
with open(_gallery_template_file, 'r') as fd:
gallery_template = fd.read()
div_lines=[]
sidebar = []
for dirname, filenames in ordered_iteritems(dir_map):
full_dirname = os.path.join(rst_dir, dirname)
dirnamenew = dirname.replace("_"," ")
sidebarline = _side_links % (dirname, dirnamenew.title())
lines = []
for ex_filename, rst_filename in filenames:
full_rst_filename = os.path.join(full_dirname, rst_filename)
ebase = full_rst_filename.replace(rst_dir, '')[1:]
ebase = edit_filename(ebase, new_ext='.py')
link_base = full_rst_filename.replace(gallery_dir, '')[1:]
link = os.path.join(link_prefix,
os.path.splitext(link_base)[0] + '.html')
next(_get_fig_filenames(ebase, thumbnails_dir))
for thumbnail_filename in _get_fig_filenames(ebase,
thumbnails_dir):
if not os.path.isfile(thumbnail_filename):
# Skip examples with no image (= failed examples).
continue
thumbnail_name = thumbnail_filename.replace(gallery_dir,
'')[1:]
path_to_file = os.path.join(examples_dir,ebase)
docstring = get_default(import_file(path_to_file).__doc__,
'missing description!')
docstring = docstring.replace('e.g.', 'eg:')
docstring = docstring.split('.')
line = _link_template % (link,os.path.splitext(ebase)[0],
thumbnail_name,link,docstring[0]+'.')
lines.append(line)
if(len(lines)!=0):
div_lines.append(_div_line % (dirname, dirnamenew.title(),
dirname, '\n'.join(lines)))
sidebar.append(sidebarline)
fd = open(output_filename, 'w')
fd.write(gallery_template % ((link_prefix,) * 7
+ ('\n'.join(sidebar), '\n'.join(div_lines))))
fd.close()
output('...done')
helps = {
'examples_dir' :
'directory containing examples [default: %(default)s]',
'images_dir' :
'directory where to store gallery images [default: gallery/images]',
'no_images' :
'do not (re)generate images and thumbnails',
'output_filename' :
'output file name [default: %(default)s]',
'link_prefix' :
'prefix to be prepended to links to examples pages in gallery '
'[default: %(default)s]',
}
def main():
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-e', '--examples-dir', metavar='directory',
action='store', dest='examples_dir',
default='examples', help=helps['examples_dir'])
parser.add_argument('-i', '--images-dir', metavar='directory',
action='store', dest='images_dir',
default=None, help=helps['images_dir'])
parser.add_argument('-n', '--no-images',
action='store_true', dest='no_images',
default=False, help=helps['no_images'])
parser.add_argument('-o', '--output', metavar='output_filename',
action='store', dest='output_filename',
default='gallery/gallery.html',
help=helps['output_filename'])
parser.add_argument('-l', '--link-prefix', metavar='prefix',
action='store', dest='link_prefix',
default='http://sfepy.org/doc-devel',
help=helps['link_prefix'])
options = parser.parse_args()
examples_dir = os.path.realpath(options.examples_dir)
output_filename = os.path.realpath(options.output_filename)
gallery_dir = os.path.dirname(output_filename)
images_dir = get_default(options.images_dir,
os.path.join(gallery_dir, 'images'))
thumbnails_dir = os.path.join(images_dir, 'thumbnails')
rst_dir = os.path.join(gallery_dir, 'examples')
if not options.no_images:
generate_images(images_dir, examples_dir)
generate_thumbnails(thumbnails_dir, images_dir)
dir_map = generate_rst_files(rst_dir, examples_dir, images_dir)
generate_gallery_html(examples_dir,output_filename, gallery_dir,
rst_dir, thumbnails_dir, dir_map,
link_prefix=options.link_prefix)
if __name__ == '__main__':
main()
| lokik/sfepy | script/gen_gallery.py | Python | bsd-3-clause | 21,255 | [
"VTK"
] | b7209a82b9fca0e8d425bdb8210bab0e1b3160c0b6ce94bd7351bb8dbd62eff2 |
import cgi
import json
import os
import posixpath
import shutil
import tempfile
from urllib.parse import urlparse
from apistar.compat import DownloadedFile
class BaseDecoder:
media_type = None
def decode(self, bytestring, **options):
raise NotImplementedError()
class JSONDecoder(BaseDecoder):
media_type = 'application/json'
def decode(self, response):
"""
Return raw JSON data.
"""
content = response.content.decode('utf-8')
return json.loads(content)
class TextDecoder(BaseDecoder):
media_type = 'text/*'
def decode(self, response):
return response.text
class DownloadDecoder(BaseDecoder):
"""
A codec to handle raw file downloads, such as images and other media.
"""
media_type = '*/*'
def __init__(self, download_dir=None):
"""
`download_dir` - If `None` then downloaded files will be temporary files
that are deleted on close. If set to a value, then downloaded files
will be saved to this directory, and will not be automatically deleted.
"""
self._delete_on_close = download_dir is None
self.download_dir = download_dir
def decode(self, response):
base_url = response.url
content_type = response.headers.get('content-type')
content_disposition = response.headers.get('content-disposition')
# Write the download to a temporary .download file.
fd, temp_path = tempfile.mkstemp(suffix='.download')
with os.fdopen(fd, 'wb') as file_handle:
for chunk in response.iter_content(chunk_size=4096):
file_handle.write(chunk)
# Determine the output filename.
output_filename = _get_filename(base_url, content_type, content_disposition)
# Determine the output directory.
output_dir = self.download_dir
if output_dir is None:
output_dir = os.path.dirname(temp_path)
# Determine the full output path.
output_path = os.path.join(output_dir, output_filename)
# Move the temporary download file to the final location.
if output_path != temp_path:
output_path = _unique_output_path(output_path)
shutil.move(temp_path, output_path)
# Open the file and return the file object.
output_file = open(output_path, 'rb')
downloaded = DownloadedFile(output_file, output_path, delete=self._delete_on_close)
downloaded.basename = output_filename
return downloaded
def _guess_extension(content_type):
"""
Python's `mimetypes.guess_extension` is no use because it simply returns
the first of an unordered set. We use the same set of media types here,
but take a reasonable preference on what extension to map to.
"""
return {
'application/javascript': '.js',
'application/msword': '.doc',
'application/octet-stream': '.bin',
'application/oda': '.oda',
'application/pdf': '.pdf',
'application/pkcs7-mime': '.p7c',
'application/postscript': '.ps',
'application/vnd.apple.mpegurl': '.m3u',
'application/vnd.ms-excel': '.xls',
'application/vnd.ms-powerpoint': '.ppt',
'application/x-bcpio': '.bcpio',
'application/x-cpio': '.cpio',
'application/x-csh': '.csh',
'application/x-dvi': '.dvi',
'application/x-gtar': '.gtar',
'application/x-hdf': '.hdf',
'application/x-latex': '.latex',
'application/x-mif': '.mif',
'application/x-netcdf': '.nc',
'application/x-pkcs12': '.p12',
'application/x-pn-realaudio': '.ram',
'application/x-python-code': '.pyc',
'application/x-sh': '.sh',
'application/x-shar': '.shar',
'application/x-shockwave-flash': '.swf',
'application/x-sv4cpio': '.sv4cpio',
'application/x-sv4crc': '.sv4crc',
'application/x-tar': '.tar',
'application/x-tcl': '.tcl',
'application/x-tex': '.tex',
'application/x-texinfo': '.texinfo',
'application/x-troff': '.tr',
'application/x-troff-man': '.man',
'application/x-troff-me': '.me',
'application/x-troff-ms': '.ms',
'application/x-ustar': '.ustar',
'application/x-wais-source': '.src',
'application/xml': '.xml',
'application/zip': '.zip',
'audio/basic': '.au',
'audio/mpeg': '.mp3',
'audio/x-aiff': '.aif',
'audio/x-pn-realaudio': '.ra',
'audio/x-wav': '.wav',
'image/gif': '.gif',
'image/ief': '.ief',
'image/jpeg': '.jpe',
'image/png': '.png',
'image/svg+xml': '.svg',
'image/tiff': '.tiff',
'image/vnd.microsoft.icon': '.ico',
'image/x-cmu-raster': '.ras',
'image/x-ms-bmp': '.bmp',
'image/x-portable-anymap': '.pnm',
'image/x-portable-bitmap': '.pbm',
'image/x-portable-graymap': '.pgm',
'image/x-portable-pixmap': '.ppm',
'image/x-rgb': '.rgb',
'image/x-xbitmap': '.xbm',
'image/x-xpixmap': '.xpm',
'image/x-xwindowdump': '.xwd',
'message/rfc822': '.eml',
'text/css': '.css',
'text/csv': '.csv',
'text/html': '.html',
'text/plain': '.txt',
'text/richtext': '.rtx',
'text/tab-separated-values': '.tsv',
'text/x-python': '.py',
'text/x-setext': '.etx',
'text/x-sgml': '.sgml',
'text/x-vcard': '.vcf',
'text/xml': '.xml',
'video/mp4': '.mp4',
'video/mpeg': '.mpeg',
'video/quicktime': '.mov',
'video/webm': '.webm',
'video/x-msvideo': '.avi',
'video/x-sgi-movie': '.movie'
}.get(content_type, '')
def _unique_output_path(path):
"""
Given a path like '/a/b/c.txt'
Return the first available filename that doesn't already exist,
using an incrementing suffix if needed.
For example: '/a/b/c.txt' or '/a/b/c (1).txt' or '/a/b/c (2).txt'...
"""
basename, ext = os.path.splitext(path)
idx = 0
while os.path.exists(path):
idx += 1
path = "%s (%d)%s" % (basename, idx, ext)
return path
def _safe_filename(filename):
"""
Sanitize output filenames, to remove any potentially unsafe characters.
"""
filename = os.path.basename(filename)
keepcharacters = (' ', '.', '_', '-')
filename = ''.join(
char for char in filename
if char.isalnum() or char in keepcharacters
).strip().strip('.')
return filename
def _get_filename_from_content_disposition(content_disposition):
"""
Determine an output filename based on the `Content-Disposition` header.
"""
params = value, params = cgi.parse_header(content_disposition)
if 'filename' in params:
filename = params['filename']
return _safe_filename(filename)
return None
def _get_filename_from_url(url, content_type=None):
"""
Determine an output filename based on the download URL.
"""
parsed = urlparse(url)
final_path_component = posixpath.basename(parsed.path.rstrip('/'))
filename = _safe_filename(final_path_component)
suffix = _guess_extension(content_type or '')
if filename:
if '.' not in filename:
return filename + suffix
return filename
return 'download' + suffix
def _get_filename(base_url, content_type=None, content_disposition=None):
"""
Determine an output filename to use for the download.
"""
filename = None
if content_disposition:
filename = _get_filename_from_content_disposition(content_disposition)
if filename is not None:
return filename
return _get_filename_from_url(base_url, content_type)
| tomchristie/apistar | apistar/client/decoders.py | Python | bsd-3-clause | 7,825 | [
"NetCDF"
] | ecf5f34da12962a56d62e4e2c0cac03d4dcf677e3c5b93688dd04c10d4b6fb57 |
"""
Support for control of ElkM1 sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.elkm1/
"""
from homeassistant.components.elkm1 import (
DOMAIN as ELK_DOMAIN, create_elk_entities, ElkEntity)
DEPENDENCIES = [ELK_DOMAIN]
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Create the Elk-M1 sensor platform."""
if discovery_info is None:
return
elk = hass.data[ELK_DOMAIN]['elk']
entities = create_elk_entities(
hass, elk.counters, 'counter', ElkCounter, [])
entities = create_elk_entities(
hass, elk.keypads, 'keypad', ElkKeypad, entities)
entities = create_elk_entities(
hass, [elk.panel], 'panel', ElkPanel, entities)
entities = create_elk_entities(
hass, elk.settings, 'setting', ElkSetting, entities)
entities = create_elk_entities(
hass, elk.zones, 'zone', ElkZone, entities)
async_add_entities(entities, True)
def temperature_to_state(temperature, undefined_temperature):
"""Convert temperature to a state."""
return temperature if temperature > undefined_temperature else None
class ElkSensor(ElkEntity):
"""Base representation of Elk-M1 sensor."""
def __init__(self, element, elk, elk_data):
"""Initialize the base of all Elk sensors."""
super().__init__(element, elk, elk_data)
self._state = None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
class ElkCounter(ElkSensor):
"""Representation of an Elk-M1 Counter."""
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:numeric'
def _element_changed(self, element, changeset):
self._state = self._element.value
class ElkKeypad(ElkSensor):
"""Representation of an Elk-M1 Keypad."""
@property
def temperature_unit(self):
"""Return the temperature unit."""
return self._temperature_unit
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._temperature_unit
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:thermometer-lines'
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.util import username
attrs = self.initial_attrs()
attrs['area'] = self._element.area + 1
attrs['temperature'] = self._element.temperature
attrs['last_user_time'] = self._element.last_user_time.isoformat()
attrs['last_user'] = self._element.last_user + 1
attrs['code'] = self._element.code
attrs['last_user_name'] = username(self._elk, self._element.last_user)
attrs['last_keypress'] = self._element.last_keypress
return attrs
def _element_changed(self, element, changeset):
self._state = temperature_to_state(self._element.temperature, -40)
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes and update entity state."""
await super().async_added_to_hass()
self.hass.data[ELK_DOMAIN]['keypads'][
self._element.index] = self.entity_id
class ElkPanel(ElkSensor):
"""Representation of an Elk-M1 Panel."""
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:home"
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
attrs = self.initial_attrs()
attrs['system_trouble_status'] = self._element.system_trouble_status
return attrs
def _element_changed(self, element, changeset):
if self._elk.is_connected():
self._state = 'Paused' if self._element.remote_programming_status \
else 'Connected'
else:
self._state = 'Disconnected'
class ElkSetting(ElkSensor):
"""Representation of an Elk-M1 Setting."""
@property
def icon(self):
"""Icon to use in the frontend."""
return 'mdi:numeric'
def _element_changed(self, element, changeset):
self._state = self._element.value
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.const import SettingFormat
attrs = self.initial_attrs()
attrs['value_format'] = SettingFormat(
self._element.value_format).name.lower()
return attrs
class ElkZone(ElkSensor):
"""Representation of an Elk-M1 Zone."""
@property
def icon(self):
"""Icon to use in the frontend."""
from elkm1_lib.const import ZoneType
zone_icons = {
ZoneType.FIRE_ALARM.value: 'fire',
ZoneType.FIRE_VERIFIED.value: 'fire',
ZoneType.FIRE_SUPERVISORY.value: 'fire',
ZoneType.KEYFOB.value: 'key',
ZoneType.NON_ALARM.value: 'alarm-off',
ZoneType.MEDICAL_ALARM.value: 'medical-bag',
ZoneType.POLICE_ALARM.value: 'alarm-light',
ZoneType.POLICE_NO_INDICATION.value: 'alarm-light',
ZoneType.KEY_MOMENTARY_ARM_DISARM.value: 'power',
ZoneType.KEY_MOMENTARY_ARM_AWAY.value: 'power',
ZoneType.KEY_MOMENTARY_ARM_STAY.value: 'power',
ZoneType.KEY_MOMENTARY_DISARM.value: 'power',
ZoneType.KEY_ON_OFF.value: 'toggle-switch',
ZoneType.MUTE_AUDIBLES.value: 'volume-mute',
ZoneType.POWER_SUPERVISORY.value: 'power-plug',
ZoneType.TEMPERATURE.value: 'thermometer-lines',
ZoneType.ANALOG_ZONE.value: 'speedometer',
ZoneType.PHONE_KEY.value: 'phone-classic',
ZoneType.INTERCOM_KEY.value: 'deskphone'
}
return 'mdi:{}'.format(
zone_icons.get(self._element.definition, 'alarm-bell'))
@property
def device_state_attributes(self):
"""Attributes of the sensor."""
from elkm1_lib.const import (
ZoneLogicalStatus, ZonePhysicalStatus, ZoneType)
attrs = self.initial_attrs()
attrs['physical_status'] = ZonePhysicalStatus(
self._element.physical_status).name.lower()
attrs['logical_status'] = ZoneLogicalStatus(
self._element.logical_status).name.lower()
attrs['definition'] = ZoneType(
self._element.definition).name.lower()
attrs['area'] = self._element.area + 1
attrs['bypassed'] = self._element.bypassed
attrs['triggered_alarm'] = self._element.triggered_alarm
return attrs
@property
def temperature_unit(self):
"""Return the temperature unit."""
from elkm1_lib.const import ZoneType
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
from elkm1_lib.const import ZoneType
if self._element.definition == ZoneType.TEMPERATURE.value:
return self._temperature_unit
if self._element.definition == ZoneType.ANALOG_ZONE.value:
return 'V'
return None
def _element_changed(self, element, changeset):
from elkm1_lib.const import ZoneLogicalStatus, ZoneType
from elkm1_lib.util import pretty_const
if self._element.definition == ZoneType.TEMPERATURE.value:
self._state = temperature_to_state(self._element.temperature, -60)
elif self._element.definition == ZoneType.ANALOG_ZONE.value:
self._state = self._element.voltage
else:
self._state = pretty_const(ZoneLogicalStatus(
self._element.logical_status).name)
| tinloaf/home-assistant | homeassistant/components/sensor/elkm1.py | Python | apache-2.0 | 7,872 | [
"Elk"
] | 27365069ee26b8ae48381e10142fde6850e74e35678770939a46c4abb45a5458 |
# noqa: D100
import base64
import collections
import keyword
import re
from pathlib import Path
from urllib.parse import urlparse
# These mimetypes will be encoded in base64 when embedded in requests.
# I'm sure there is a more elegant solution than this... https://pypi.org/project/binaryornot/ ?
BINARY_MIMETYPES = [
"application/x-zipped-shp",
"application/vnd.google-earth.kmz",
"image/tiff; subtype=geotiff",
"image/tiff; application=geotiff",
"application/x-netcdf",
"application/octet-stream",
"application/zip",
"application/x-gzip",
"application/x-gtar",
"application/x-tgz",
]
XML_MIMETYPES = ["application/xml", "application/gml+xml", "text/xml"]
DEFAULT_ENCODING = "utf-8"
def fix_url(url):
"""If url is a local path, add a file:// scheme."""
return urlparse(url, scheme="file").geturl()
def is_url(url):
"""Return whether value is a valid URL."""
if url is None:
return False
parsed_url = urlparse(url)
if not parsed_url.scheme:
return False
else:
return True
def is_opendap_url(url):
"""
Check if a provided url is an OpenDAP url.
The DAP Standard specifies that a specific tag must be included in the
Content-Description header of every request. This tag is one of:
"dods-dds" | "dods-das" | "dods-data" | "dods-error"
So we can check if the header starts with `dods`.
Note that this might not work with every DAP server implementation.
"""
import requests
from requests.exceptions import ConnectionError, InvalidSchema, MissingSchema
try:
content_description = requests.head(url, timeout=5).headers.get(
"Content-Description"
)
except (ConnectionError, MissingSchema, InvalidSchema):
return False
if content_description:
return content_description.lower().startswith("dods")
else:
return False
def is_file(path):
"""Return True if `path` is a valid file."""
if not path:
ok = False
elif isinstance(path, Path):
p = path
else:
p = Path(path[:255])
try:
ok = p.is_file()
except Exception:
ok = False
return ok
def sanitize(name):
"""Lower-case name and replace all non-ascii chars by `_`.
If name is a Python keyword (like `return`) then add a trailing `_`.
"""
new_name = re.sub(r"\W|^(?=\d)", "_", name.lower())
if keyword.iskeyword(new_name):
new_name = new_name + "_"
return new_name
def delist(data):
"""If data is a sequence with a single element, returns this element, otherwise return the sequence."""
if (
isinstance(data, collections.abc.Iterable)
and not isinstance(data, str)
and len(data) == 1
):
return data[0]
return data
def embed(value, mimetype=None, encoding=None):
"""Return the content of the file, either as a string or base64 bytes.
Returns
-------
str
encoded content string and actual encoding
"""
if hasattr(
value, "read"
): # File-like, we don't know if it's open in bytes or string.
content = value.read()
else:
if isinstance(value, Path):
path = str(value)
else:
u = urlparse(value)
path = u.path
if is_file(path):
mode = "rb" if mimetype in BINARY_MIMETYPES else "r"
with open(path, mode) as fp:
content = fp.read()
else:
content = value
return _encode(content, mimetype, encoding)
def _encode(content, mimetype, encoding):
"""Encode in base64 if mimetype is a binary type."""
if mimetype in BINARY_MIMETYPES:
# An error here might be due to a bad file path. Check that the file exists.
return base64.b64encode(content), "base64"
else:
if encoding is None:
encoding = DEFAULT_ENCODING
if isinstance(content, bytes):
return content.decode(encoding), encoding
else:
return content, encoding
# Do we need to escape content that is not HTML safe ?
# return u'<![CDATA[{}]]>'.format(content)
def guess_type(url, supported):
"""Guess the mime type of the file link.
If the mimetype is not recognized, default to the first supported value.
Parameters
----------
url : str, Path
Path or URL to file.
supported : list, tuple
Supported mimetypes.
Returns
-------
mimetype, encoding
"""
import mimetypes
try:
mime, enc = mimetypes.guess_type(str(url), strict=False)
except TypeError:
mime, enc = None, None
# Special cases
# -------------
# netCDF
if (
mime == "application/x-netcdf"
and "dodsC" in str(url)
and "application/x-ogc-dods" in supported
):
mime = "application/x-ogc-dods"
# ZIP
zips = ["application/zip", "application/x-zipped-shp"]
if mime not in supported:
if mime in zips and set(zips).intersection(supported):
mime = set(zips).intersection(supported).pop()
# GeoJSON
if mime == "application/json" and "application/geo+json" in supported:
mime = "application/geo+json"
# FIXME: Verify whether this code is needed. Remove if not.
# # GeoTIFF (workaround since this mimetype isn't correctly understoud)
# if mime == "image/tiff" and (".tif" in url or ".tiff" in "url"):
# mime = "image/tiff; subtype=geotiff"
#
# All the various XML schemes
# TODO
# If unrecognized, default to the first supported mimetype
if mime is None:
mime = supported[0]
else:
if mime not in supported:
raise ValueError(f"mimetype {mime} not in supported mimetypes {supported}.")
return mime, enc
| bird-house/birdy | birdy/utils.py | Python | apache-2.0 | 5,856 | [
"NetCDF"
] | b5b5e8e58c7a27c6489d856033b13e737a4f03106478e5be471692a1e147a01f |
"""
This module contains functions related to the calculation of powder diffraction
patterns from area detector data.
"""
import numpy.ma as ma
from scipy.interpolate import griddata
import itertools
import numpy as np
from scipy.ndimage.filters import gaussian_filter
import utils
from output import log
import config
DEFAULT_SMOOTHING = 0.
# hbar c in eV * Angstrom
HBARC = 1973.
def get_detid_parameters(detid):
"""
Given a detector ID, extract its detector geometry parameters from
config.py and return them.
"""
paramdict = config.detinfo_map[detid].geometry
(phi, x0, y0, alpha, r) = paramdict['phi'], paramdict['x0'],\
paramdict['y0'], paramdict['alpha'], paramdict['r']
return (phi, x0, y0, alpha, r)
def get_x_y(imarray, phi, x0, y0, alpha, r):
"""
Given CSPAD geometry parameters and an assembeled image data array, return
two arrays (each of the same shape as the image data) with values replaced
by row/column indices.
"""
length, width = imarray.shape
y = np.vstack(np.ones(width)*i for i in range(length))
ecks = np.vstack([1 for i in range(length)])
x = np.hstack(ecks*i for i in range(width))
return x, y
def get_beta_rho(imarray, phi, x0, y0, alpha, r):
"""
Given CSPAD geometry parameters and an assembeled image data array, return
(1) an array (of the same shape as the image data) with 2theta scattering
angle values and (2) an array (of the same shape as the image data) with
rho (distance) values.
"""
x, y = get_x_y(imarray, phi, x0, y0, alpha, r)
try:
x2 = -np.cos(phi) *(x-x0) + np.sin(phi) * (y-y0)
y2 = -np.sin(phi) * (x-x0) - np.cos(phi) * (y-y0)
except AttributeError:
raise AttributeError("Missing geometry data in config.py")
rho = (r**2 + x2**2 + y2**2)**0.5
y1 = y2 * np.cos(alpha) + r * np.sin(alpha)
z1 = - y2 * np.sin(alpha) + r * np.cos(alpha)
# beta is the twotheta value for a given (x,y)
beta = np.arctan2((y1**2 + x2**2)**0.5, z1) * 180 / np.pi
return beta, rho
def get_phi2(imarray, detid):
"""
Given CSPAD geometry parameters and an assembeled image data array, return
(1) an array (of the same shape as the image data) with values of phi2,
the azimuthal angle with respect to the x-ray beam.
"""
(phi, x0, y0, alpha, r) = get_detid_parameters(detid)
x, y = get_x_y(imarray, phi, x0, y0, alpha, r)
try:
x2 = -np.cos(phi) *(x-x0) + np.sin(phi) * (y-y0)
y2 = -np.sin(phi) * (x-x0) - np.cos(phi) * (y-y0)
except AttributeError:
raise AttributeError("Missing geometry data in config.py")
y1 = y2 * np.cos(alpha) + r * np.sin(alpha)
z1 = - y2 * np.sin(alpha) + r * np.cos(alpha)
phi2 = np.arctan2(y1, z1)
return phi2
def select_phi2(imarray, phi2_0, delta_phi2, detid):
"""
Mask out all values outside of the specified phi2 range.
"""
result = imarray.copy()
phi2 = get_phi2(imarray, detid)
result = np.where(np.logical_and(phi2 > phi2_0 - delta_phi2/2, phi2 < phi2_0 + delta_phi2/2), result, 0.)
return result
# translate(phi, x0, y0, alpha, r)
# Produces I vs theta values for imarray. For older versions, see bojangles_old.py
# Inputs: detector configuration parameters and diffraction image
# Outputs: lists of intensity and 2theta values (data)
def translate(phi, x0, y0, alpha, r, imarray, fiducial_ellipses = None):
# fiducial ellipse width
ew = .1
# beta is the twotheta value for a given (x,y)
beta, rho = get_beta_rho(imarray, phi, x0, y0, alpha, r)
if fiducial_ellipses is not None:
fiducial_value = np.max(np.nan_to_num(imarray))/5.
for ang in fiducial_ellipses:
imarray = np.where(np.logical_and(beta > ang - ew, beta < ang + ew), fiducial_value, imarray)
newpoints = np.vstack((beta.flatten(), imarray.flatten()))
return newpoints.T, imarray
def binData(mi, ma, stepsize, valenza = True):
"""
Input: a minimum, a maximum, and a stepsize
Output: a list of bins
"""
log( "creating angle bins")
binangles = list()
binangles.append(mi)
i = mi
while i < ma-(stepsize/2):
i += stepsize
binangles.append(i)
return binangles
#@utils.eager_persist_to_file("cache/xrd.process_imarray/")
def process_imarray(detid, imarray, nbins = 1000,
fiducial_ellipses = None, bgsub = True, compound_list = [],
pre_integration_smoothing = 0,
**kwargs):
"""
Given a detector ID and assembeled CSPAD image data array, compute the
powder pattern.
Outputs: data in bins, intensity vs. theta, as lists (NOT numpy arrays)
"""
if bgsub and not compound_list:
bgsub = False
log( "Overriding bg_sub to False due to empty compound_list")
@utils.eager_persist_to_file('cache/xrd/process_imarray/expanded_mask')
def expanded_mask(arr):
"""
Return a boolean array that masks out zero values
in and their neighbors in the input array.\
"""
import numpy.ma as ma
import maskmaker
mask = ma.make_mask(np.ones_like(arr))
mask = np.where(arr == 0, False, True)
return maskmaker.makemask(mask, 2 * pre_integration_smoothing)
# TODO: make this take dataset as an argument
(phi, x0, y0, alpha, r) = get_detid_parameters(detid)
if bgsub:
imarray = subtract_background_full_frame(imarray, detid, compound_list)
mask = expanded_mask(imarray)
imarray = gaussian_filter(imarray, pre_integration_smoothing) * mask
data, imarray = translate(phi, x0, y0, alpha, r, imarray, fiducial_ellipses = fiducial_ellipses)
thetas = data[:,0]
intens = data[:,1]
# algorithm for binning the data
ma = max(thetas)
mi = min(thetas)
stepsize = (ma - mi)/(nbins)
binangles = binData(mi, ma, stepsize)
numPix = [0] * (nbins+1)
intenValue = [0] * (nbins+1)
log( "putting data in bins" )
# find which bin each theta lies in and add it to count
for j,theta in enumerate(thetas):
if intens[j] != 0:
k = int(np.floor((theta-mi)/stepsize))
numPix[k]=numPix[k]+1
intenValue[k]=intenValue[k]+intens[j]
# form average by dividing total intensity by the number of pixels
log( "adjusting intensity")
adjInten = np.nan_to_num((np.array(intenValue)/np.array(numPix)))
# if np.min(adjInten) < 0:
# log( "WARNING: Negative values have been suppressed in final powder pattern (may indicate background subtraction with an inadequate data mask).")
# adjInten[adjInten < 0.] = 0.
return binangles, list( adjInten ), imarray
# From: http://stackoverflow.com/questions/7997152/python-3d-polynomial-surface-fit-order-dependent
def polyfit2d(x, y, z, order=3):
ncols = (order + 1)**2
G = np.zeros((x.size, ncols))
ij = itertools.product(range(order+1), range(order+1))
for k, (i,j) in enumerate(ij):
G[:,k] = x**i * y**j
m, _, _, _ = np.linalg.lstsq(G, z)
return m
def polyval2d(x, y, m):
order = int(np.sqrt(len(m))) - 1
ij = itertools.product(range(order+1), range(order+1))
z = np.zeros_like(x)
for a, (i,j) in zip(m, ij):
z += a * x**i * y**j
return z
def trim_array(imarray):
"""
Trim the input array if it isn't square (the above 2d polynomial fitting
function requires an nxn matrix). Returns a view of the original array.
"""
dimx, dimy = imarray.shape
difference = dimy - dimx
if difference:
if difference > 0:
trimmed = imarray[:, :dimy - difference]
elif difference < 0:
trimmed = imarray[:dimx + difference, :]
else:
trimmed = imarray
return trimmed
def pad_array(imarray):
"""
Pad the input array if it isn't square (the above 2d polynomial fitting
function requires an nxn matrix). Returns a new array.
"""
dimx, dimy = imarray.shape
difference = dimy - dimx
if difference:
if difference > 0:
padded = np.vstack((imarray, np.zeros((difference, dimy))))
elif difference < 0:
padded = np.hstack((imarray, np.zeros((dimx, -difference))))
else:
padded = imarray
return padded
def interp_2d_nearest_neighbor(imarray, detid, smoothing = DEFAULT_SMOOTHING):
"""
Return a background frame for imarray.
The background level is computed by masking pixels in imarray located
near powder peaks and replacing their values using a 2d-interpolation
between non-masked regions of the frame.
Keyword arguments:
-smoothing: standard deviation of gaussian smoothing kernel to apply
to the interpolated background.
-method: interpolation mode for scipy.interpolate.griddata
"""
# TODO: use a better 2d-interpolation than nearest neighbor
geometry_params = get_detid_parameters(detid)
dimx, dimy = np.shape(imarray)
gridx, gridy = map(lambda arr: 1. * arr, get_x_y(imarray, *geometry_params))
# flattened values of all pixels
x, y = gridx.flatten(), gridy.flatten()
z = imarray.flatten()
z_good = np.where(z != 0)[0]
if len(z_good) > 0:
resampled = griddata(np.array([x[z_good], y[z_good]]).T, z[z_good], (gridx, gridy), method = 'nearest')
else:
resampled = imarray
smoothed = gaussian_filter(resampled, smoothing)
return smoothed, resampled
@utils.eager_persist_to_file('cache/xrd/CTinterpolation')
def CTinterpolation(imarray, detid, smoothing = 10):
"""
Do a 2d interpolation to fill in zero values of a 2d ndarray.
Uses scipy.interpolate import CloughTocher2DInterpolator.
Arguments:
imarray : np.ndarray
detid : string
smoothing : numeric
"""
from scipy.interpolate import CloughTocher2DInterpolator as ct
geometry_params = get_detid_parameters(detid)
dimx, dimy = np.shape(imarray)
gridx, gridy = map(lambda arr: 1. * arr, get_x_y(imarray, *geometry_params))
def interp_2d(imarray):
# flattened values of all pixels
z = imarray.flatten()
z_good = np.where(z != 0)[0]
if len(z_good) == 0:
return np.zeros_like(imarray)
else:
x, y = gridx.flatten(), gridy.flatten()
xgood, ygood = x[z_good], y[z_good]
points = np.vstack((xgood, ygood)).T
values = z[z_good]
interpolator = ct(points, values)
return interpolator(x, y).reshape(imarray.shape)
# Input to the CT interpolation is a smoothed NN interpolation
# This pre-interpolation step, combined with a sufficiently large value of
# smoothing, is often necessary to prevent the interpolation from
# oscillating/overshooting.
smoothNN, _ = interp_2d_nearest_neighbor(imarray, detid, smoothing = smoothing)
smooth_masked = np.where(np.isclose(imarray, 0), 0., smoothNN)
CTinterpolated = interp_2d(smooth_masked)
# Fill in NAN values from outside the convex hull of the interpolated points
combined = np.where(np.isnan(CTinterpolated), smoothNN, CTinterpolated)
return combined
def get_background_full_frame(imarray, detid, compound_list, smoothing = DEFAULT_SMOOTHING,
width = config.peak_width):
# TODO: reorganize this and the other background-calculation function
"""
Calculate a background frame from imarray and return the result.
Keyword arguments:
-smoothing: standard deviation of gaussian smoothing kernel to apply
to the interpolated background.
-width: angular width of regions (centered on powder peaks) that will
be excluded from the source array from which the background is
interpolated.
"""
# If compound_list is empty the computed background will include all our
# signal.
if not compound_list:
raise ValueError("compounds_list is empty")
bgfit = imarray.copy()
# mask based on good pixels
pixel_mask = utils.combine_masks((bgfit != 0), [], transpose = True)
# mask based on powder peak locations
powder_mask = make_powder_ring_mask(detid, bgfit, compound_list, width = width)
# union of the two masks
combined_mask = powder_mask & pixel_mask
bgfit[~combined_mask] = 0.
# TODO: Need a mechanism for getting dataset-specific paths.
if utils.isroot():
np.save('powder_with_cutout.npy', bgfit)
# compute interpolated background
bg = CTinterpolation(bgfit, detid, smoothing = smoothing)
# zero out bad/nonexistent pixels
bg[~pixel_mask] = 0.
return bg
def subtract_background_full_frame(imarray, detid, compound_list, smoothing = DEFAULT_SMOOTHING,
width = config.peak_width):
"""
Background-subtract imarray and return the result.
This function does not mutate imarray.
Keyword arguments:
-smoothing: standard deviation of gaussian smoothing kernel to apply
to the interpolated background.
-width: angular width of regions (centered on powder peaks) that will
be excluded from the source array from which the background is
interpolated.
"""
# TODO: might be good to log intermediate stages
bg_smooth = get_background_full_frame(imarray, detid, compound_list, smoothing = smoothing, width = width)
utils.save_image('detector_images/last_bg.png', bg_smooth)
result = imarray - bg_smooth
return result
def get_powder_angles(compound, peak_threshold = 0.02, filterfunc = lambda x: True):
"""
Accessor function for powder data in config.py
Returns a list of Bragg peak angles, filtered using filterfunc.
"""
if compound in config.powder_angles:
return filter(filterfunc, config.powder_angles[compound])
else:
energy = config.photon_energy
fname = utils.resource_path('data/' + compound + '.csv')
try:
powder_q, intensities = np.genfromtxt(fname, delimiter = ',').T
except IOError:
raise IOError("Simulated diffraction file " + fname + ": not found")
powder_q = powder_q[intensities > np.max(intensities) * peak_threshold]
powder_angles = 2 * np.arcsin(powder_q * HBARC / (2 * energy))
powder_angles = powder_angles[~np.isnan(powder_angles)]
return filter(filterfunc, list(np.rad2deg(powder_angles)))
def make_powder_ring_mask(detid, imarray, compound_list, width = config.peak_width):
"""
Given a detector ID, assembeled image data array, and list of
polycrystalline compounds in the target, return a mask that
excludes pixels located near powder peaks.
"""
angles = []
for compound in compound_list:
try:
compound_xrd = get_powder_angles(compound)
except KeyError:
raise KeyError("No XRD reference data found for compound: " + compound)
if isinstance(compound_xrd, list): # compound_xrd is a list of angles
angles = angles + compound_xrd
else: # compound_xrd is a path
# TODO: implement this
raise NotImplementedError("compound_xrd path")
# Initialize mask to all True
mask = ma.make_mask(np.ones(np.shape(imarray)))
(phi, x0, y0, alpha, r) = get_detid_parameters(detid)
betas, rho = get_beta_rho(imarray, phi, x0, y0, alpha, r)
for ang in angles:
mask = np.where(np.logical_and(betas > ang - width/2., betas < ang + width/2.), False, mask)
return mask
| hoidn/LCLS | dataccess/dataccess/geometry.py | Python | gpl-3.0 | 15,603 | [
"Gaussian"
] | 2e79a715b25e14da09d203476f26b9827cc674caf833fbf8d58e846dcaf9c797 |
"""
Tests for ConvMolFeaturizer.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "GPL"
import unittest
import os
import sys
import numpy as np
import rdkit
from deepchem.feat.mol_graphs import ConvMol
from deepchem.feat.mol_graphs import MultiConvMol
from deepchem.feat.graph_features import ConvMolFeaturizer
class TestConvMolFeaturizer(unittest.TestCase):
"""
Test ConvMolFeaturizer featurizes properly.
"""
def test_carbon_nitrogen(self):
"""Test on carbon nitrogen molecule"""
# Note there is a central carbon of degree 4, with 3 carbons and
# one nitrogen of degree 1 (connected only to central carbon).
raw_smiles = ['C[N+](C)(C)C']
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mols = featurizer.featurize(mols)
mol = mols[0]
# 5 atoms in compound
assert mol.get_num_atoms() == 5
# Get the adjacency lists grouped by degree
deg_adj_lists = mol.get_deg_adjacency_lists()
assert np.array_equal(deg_adj_lists[0],
np.zeros([0,0], dtype=np.int32))
# The 4 outer atoms connected to central carbon
assert np.array_equal(deg_adj_lists[1],
np.array([[4], [4], [4], [4]], dtype=np.int32))
assert np.array_equal(deg_adj_lists[2],
np.zeros([0,2], dtype=np.int32))
assert np.array_equal(deg_adj_lists[3],
np.zeros([0,3], dtype=np.int32))
# Central carbon connected to everything else.
assert np.array_equal(deg_adj_lists[4],
np.array([[0, 1, 2, 3]], dtype=np.int32))
assert np.array_equal(deg_adj_lists[5],
np.zeros([0,5], dtype=np.int32))
assert np.array_equal(deg_adj_lists[6],
np.zeros([0,6], dtype=np.int32))
def test_single_carbon(self):
"""Test that single carbon atom is featurized properly."""
raw_smiles = ['C']
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# Only one carbon
assert mol.get_num_atoms() == 1
# No bonds, so degree adjacency lists are empty
deg_adj_lists = mol.get_deg_adjacency_lists()
assert np.array_equal(deg_adj_lists[0],
np.zeros([1,0], dtype=np.int32))
assert np.array_equal(deg_adj_lists[1],
np.zeros([0,1], dtype=np.int32))
assert np.array_equal(deg_adj_lists[2],
np.zeros([0,2], dtype=np.int32))
assert np.array_equal(deg_adj_lists[3],
np.zeros([0,3], dtype=np.int32))
assert np.array_equal(deg_adj_lists[4],
np.zeros([0,4], dtype=np.int32))
assert np.array_equal(deg_adj_lists[5],
np.zeros([0,5], dtype=np.int32))
assert np.array_equal(deg_adj_lists[6],
np.zeros([0,6], dtype=np.int32))
def test_alkane(self):
"""Test on simple alkane"""
raw_smiles = ['CCC']
mols = [rdkit.Chem.MolFromSmiles(s) for s in raw_smiles]
featurizer = ConvMolFeaturizer()
mol_list = featurizer.featurize(mols)
mol = mol_list[0]
# 3 carbonds in alkane
assert mol.get_num_atoms() == 3
deg_adj_lists = mol.get_deg_adjacency_lists()
assert np.array_equal(deg_adj_lists[0],
np.zeros([0,0], dtype=np.int32))
# Outer two carbonds are connected to central carbon
assert np.array_equal(deg_adj_lists[1],
np.array([[2], [2]], dtype=np.int32))
# Central carbon connected to outer two
assert np.array_equal(deg_adj_lists[2],
np.array([[0,1]], dtype=np.int32))
assert np.array_equal(deg_adj_lists[3],
np.zeros([0,3], dtype=np.int32))
assert np.array_equal(deg_adj_lists[4],
np.zeros([0,4], dtype=np.int32))
assert np.array_equal(deg_adj_lists[5],
np.zeros([0,5], dtype=np.int32))
assert np.array_equal(deg_adj_lists[6],
np.zeros([0,6], dtype=np.int32))
| bowenliu16/deepchem | deepchem/feat/tests/test_graph_features.py | Python | gpl-3.0 | 4,376 | [
"RDKit"
] | d6bc919f372734c659685e59c9b54f4ecaf070a5fe9f4b9f8a59ea37c1bf885f |
#!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys
sys.path.append('./')
from androguard.core.bytecodes import apk, dvm
from androguard.core.analysis.analysis import uVMAnalysis
from androguard.decompiler.dad.decompile import DvMethod
from androguard.decompiler.dad.instruction import (Constant,
BinaryCompExpression)
class DemoEmulator(object):
def __init__(self, graph):
self.graph = graph
self.loop = []
self.mem = {}
def init(self, key, value):
self.mem[key] = value
def visit(self, node):
if node not in self.loop:
node.visit(self)
def visit_ins(self, ins):
return ins.visit(self)
def visit_loop_node(self, loop):
self.loop.append(loop)
follow = loop.get_loop_follow()
if loop.looptype.pretest():
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
while loop.visit_cond(self):
loop.true.visit(self)
self.loop.pop()
if follow is not None:
self.visit(follow)
def visit_cond_node(self, cond):
follow = cond.get_if_follow()
if follow is not None:
has_else = not (follow in (cond.true, cond.false))
cnd = cond.visit_cond(self)
if cnd:
cond.true.visit(self)
elif has_else:
cond.false.visit(self)
self.visit(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs):
self.visit(sucs[0])
def visit_return_node(self, ret):
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_constant(self, cst):
return cst
def visit_variable(self, var):
return self.mem[var]
def visit_param(self, param):
return param
def visit_assign(self, lhs, rhs):
if lhs is None:
rhs.visit(self)
else:
self.mem[lhs.v] = rhs.visit(self)
def visit_astore(self, array, index, rhs):
array = array.visit(self)
if isinstance(index, Constant):
idx = index.visit(self, 'I')
else:
idx = index.visit(self)
self.mem[array][idx] = rhs.visit(self)
def visit_return_void(self):
pass
def visit_aload(self, array, index):
arr = array.visit(self)
idx = index.visit(self)
return self.mem[arr][idx]
def visit_alength(self, array):
return len(self.mem[array.visit(self)])
def visit_binary_expression(self, op, arg1, arg2):
arg1 = arg1.visit(self)
if not isinstance(arg1, int):
arg1 = ord(arg1)
arg2 = arg2.visit(self)
if not isinstance(arg2, int):
arg2 = ord(arg2)
return eval('%s %s %s' % (arg1, op, arg2))
def visit_unary_expression(self, op, arg):
arg.visit(self)
def visit_cast(self, op, arg):
return arg.visit(self)
def visit_cond_expression(self, op, arg1, arg2):
arg1 = arg1.visit(self)
if not isinstance(arg1, int):
arg1 = ord(arg1)
arg2 = arg2.visit(self)
if not isinstance(arg2, int):
arg2 = ord(arg2)
return eval('%s %s %s' % (arg1, op, arg2))
def visit_get_static(self, cls, name):
return self.mem[name]
TEST = './apks/pacsec/magicspiral.apk'
vm = dvm.DalvikVMFormat(apk.APK(TEST).get_dex())
vma = uVMAnalysis(vm)
method = vm.get_method('crypt')[0]
amethod = vma.get_method(method)
dvmethod = DvMethod(amethod)
dvmethod.process() # build IR Form / control flow...
graph = dvmethod.graph
visitor = DemoEmulator(graph)
l = [94, 42, 93, 88, 3, 2, 95, 2, 13, 85, 11, 2, 19, 1, 125, 19, 0, 102, 30, 24,
19, 99, 76, 21, 102, 22, 26, 111, 39, 125, 2, 44, 80, 10, 90, 5, 119, 100,
119, 60, 4, 87, 79, 42, 52]
visitor.init(dvmethod.lparams[0], l)
KEYVALUE = '6^)(9-p35a%3#4S!4S0)$Yt%^&5(j.g^&o(*0)$Yv!#O@6GpG@=+3j.&6^)(0-=1'
visitor.init('KEYVALUE', '[BKEYVALUE')
visitor.init('[BKEYVALUE', KEYVALUE)
visitor.init('keylen', len(KEYVALUE))
method.show()
def show_mem(visitor):
print 'Memory[4]: %s' % visitor.mem[4]
print '==> %r' % ''.join(chr(i) for i in visitor.mem[4])
show_mem(visitor)
print '\nStarting visit...',
graph.get_entry().visit(visitor)
print ' done !\n'
show_mem(visitor)
| xtiankisutsa/MARA_Framework | tools/androguard/demos/dad_emul.py | Python | lgpl-3.0 | 5,275 | [
"VisIt"
] | 9f45a55c42aef2e26f92f95488902040bc689337162271bc6fcaec290f28b8a1 |
"""
Module to hold a RecipeTable class.
"""
import math
import warnings
import random
import pickle
import scipy.sparse as sparse
from tqdm import tqdm
import myio.myio as myio
import statistics.cluster as cluster
import chef_global.debug as debug
class RecipeTable:
"""
A class to hold all of the recipes.
Can be saved and loaded to/from disk.
"""
def __init__(self, recipes=[]):
self.__recipes = []
for recipe in recipes:
self.__recipes.append(recipe)
self.__clusters = []
def __iter__(self):
for recipe in self.__recipes:
yield recipe
def __len__(self):
return len(self.__recipes)
def add_recipe(self, recipe):
"""
Takes a Recipe object and adds it to
the list of recipes held here.
@param recipe: The recipe to add.
@return: void
"""
self.__recipes.append(recipe)
def calculate_typical_consecutive_zeros(self):
"""
Calculates the typical number of consecutive zeros
in a typical feature vector. Useful for Golomb compression.
@return: The typical number of consecutive zeros
"""
return 1268.1495099738602 # Cached value (since calculating it takes 5 hours)
print("Calculating the typical number of zeros...")
num_ingredients = 0
total_total = 0
for recipe in tqdm(self):
for ingredient in recipe:
num_ingredients += 1
fv = self.ingredient_to_feature_vector(ingredient)
last = 0
num_sequences = 0
num_consec = 0
total = 0
for r in fv:
if r is 1 and last is 0:
num_sequences += 1
total += num_consec
num_consec = 0
last = 1
elif r is 1 and last is 1:
last = 1
elif r is 0 and last is 1:
num_consec += 1
last = 0
elif r is 0 and last is 0:
num_consec += 1
last = 0
avg_consec = total / num_sequences
total_total += avg_consec
total_avg = total_total / num_ingredients
return total_avg
def compute_stats(self):
"""
Computes the average and standard deviation of
recipe lengths.
@return: tuple of the form (avg, stdev)
"""
s = 0
for rec in self:
s += len(rec)
avg = s / len(self)
s = 0
for rec in self:
l = len(rec)
diff = l - avg
diff_sqr = diff * diff
s += diff_sqr
std = math.sqrt(s / len(self))
return avg, std
def get_all_ingredients(self):
"""
Gets the set of all unique ingredients found in all
of the recipes.
@return: The ingredients
"""
ingredients = []
for recipe in self:
for ingredient in recipe:
ingredients.append(ingredient)
return set(ingredients)
def get_cluster(self, index):
"""
Gets the cluster with the given index.
@param index: The index to use to retrieve the cluster
@return: A cluster object whose index is index
"""
if index >= len(self.__clusters):
return None
else:
return self.__clusters[index]
def get_random_ingredient(self, seed=0):
"""
Gets a random ingredient from the table.
@param seed: The random seed.
@return: random ingredient
"""
random.seed(seed)
recipe = []
while len(recipe) == 0:
recipe_index = random.randint(0, len(self.__recipes) - 1)
recipe = self.__recipes[recipe_index]
if len(recipe) == 0:
return recipe[0]
else:
ingredient_index = random.randint(0, len(recipe) - 1)
ingredient = recipe[ingredient_index]
return ingredient
def get_random_number(self):
"""
Gets a random number of ingredients from a Gaussian distribution
centered around the average number of ingredients in a recipe.
Can return 0, but no negatives.
@return: The random number.
"""
avg, std = self.compute_stats()
num = random.gauss(avg, std)
num = 0 if num < 0 else num
return int(num + 0.5)
def get_recipes(self):
"""
Gets all the recipes from the table.
@return: The list of RecipeObjects.
"""
return self.__recipes
def ingredient_to_feature_vector(self, ingredient):
"""
Generates a vector of the form:
Recipe 0 Recipe 1 Recipe 2 ...
0 1 0
Where a 0 means the ingredient is not present in that recipe and a 1
means it is.
This means the return value is a list of the form [0, 1, 0, ...] where
each index represents a recipe and the value at that index represents
present or not.
@param ingredient: The ingredient for which to generate a feature vector
@return: The feature vector
"""
to_ret = [1 if recipe.has(ingredient) else 0 for recipe in self.__recipes]
return to_ret
def ingredient_to_feature_vector_sparse(self, ingredient):
"""
Does exactly the same thing as ingredient_to_feature_vector, but
in a sparse format, specifically, it returns a csr_matrix.
"""
list_form = self.ingredient_to_feature_vector(ingredient)
lil_matrix_form = sparse.lil_matrix(list_form)
return lil_matrix_form.tocsr()
def load_in_clusters(self, clusters):
"""
Loads the clusters into the recipe table.
@param clusters: list of cluster objects
@return: void
"""
max_index = 0
for c in clusters:
max_index = c.index if c.index > max_index else max_index
for i in range(max_index + 1):
self.__clusters.append("empty")
for c in clusters:
self.__clusters[c.index] = c
def save_to_disk(obj, path="tmp/recipe_table"):
"""
Saves the object to the disk in the given path.
@param obj: The object to save.
@param path: The path to save at.
@return: The path it was saved to.
"""
pickle.dump(obj, open(path, 'wb'))
return path
def load_from_disk(path="tmp/recipe_table"):
"""
Loads the pickled object from the path.
@param path: The path to the pickled object.
@return: The object.
"""
obj = pickle.load(open(path, 'rb'))
return obj
| MaxStrange/swedish_chef | statistics/recipe_table.py | Python | apache-2.0 | 6,856 | [
"Gaussian"
] | 29ea11c035e590102a6bd8ce7dc381c4580357da90967205069985f5b55bb4dc |
"""
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/matplotlib/pylab.py | Python | gpl-3.0 | 10,245 | [
"Gaussian"
] | 0f447994286e8380c22b7bc288aea50c5f4a8e7d7eadc370eddce12c94a48945 |
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import imp
import json
import os
import shlex
import zipfile
from io import BytesIO
# from Ansible
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.utils.unicode import to_bytes, to_unicode
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.plugins import strategy
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
# we've moved the module_common relative to the snippets, so fix the path
_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ZIPLOADER_TEMPLATE = u'''%(shebang)s
%(coding)s
ZIPLOADER_WRAPPER = True # For test-module script to tell this is a ZIPLOADER_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import base64
import shutil
import zipfile
import tempfile
import subprocess
if sys.version_info < (3,):
bytes = str
PY3 = False
else:
unicode = str
PY3 = True
try:
# Python-2.6+
from io import BytesIO as IOStream
except ImportError:
# Python < 2.6
from StringIO import StringIO as IOStream
ZIPDATA = """%(zipdata)s"""
def invoke_module(module, modlib_path, json_params):
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath))
else:
os.environ['PYTHONPATH'] = modlib_path
p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(json_params)
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ziploader
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, 'ansible_module_%(ansible_module)s.py')
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'w')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'w')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# This differs slightly from default Ansible execution of Python modules
# as it passes the arguments to the module via a file instead of stdin.
# Set pythonpath to the debug dir
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath))
else:
os.environ['PYTHONPATH'] = basedir
p = subprocess.Popen([%(interpreter)s, script_path, args_path], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
elif command == 'excommunicate':
# This attempts to run the module in-process (by importing a main
# function and then calling it). It is not the way ansible generally
# invokes the module so it won't work in every case. It is here to
# aid certain debuggers which work better when the code doesn't change
# from one process to another but there may be problems that occur
# when using this that are only artifacts of how we're invoking here,
# not actual bugs (as they don't affect the real way that we invoke
# ansible modules)
# stub the args and python path
sys.argv = ['%(ansible_module)s', args_path]
sys.path.insert(0, basedir)
from ansible_module_%(ansible_module)s import main
main()
print('WARNING: Module returned to wrapper instead of exiting')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
if __name__ == '__main__':
#
# See comments in the debug() method for information on debugging
#
ZIPLOADER_PARAMS = %(params)s
if PY3:
ZIPLOADER_PARAMS = ZIPLOADER_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
temp_path = tempfile.mkdtemp(prefix='ansible_')
zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')
modlib = open(zipped_mod, 'wb')
modlib.write(base64.b64decode(ZIPDATA))
modlib.close()
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ZIPLOADER_PARAMS)
else:
z = zipfile.ZipFile(zipped_mod, mode='r')
module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py')
f = open(module, 'wb')
f.write(z.read('ansible_module_%(ansible_module)s.py'))
f.close()
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(zipped_mod, mode='a')
# py3: zipped_mod will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% zipped_mod
sitecustomize = sitecustomize.encode('utf-8')
z.writestr('sitecustomize.py', sitecustomize)
z.close()
exitcode = invoke_module(module, zipped_mod, ZIPLOADER_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except OSError:
# tempdir creation probably failed
pass
sys.exit(exitcode)
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ZIPLOADER_TEMPLATE = ZIPLOADER_TEMPLATE
else:
# ZIPLOADER_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ZIPLOADER_TEMPLATE = _strip_comments(ZIPLOADER_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
if node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = task_vars[interpreter_config].strip()
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
tree = ast.parse(data)
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', [_SNIPPET_PATH])
py_module_name = ('six',)
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(_SNIPPET_PATH, *py_module_name[:-idx])])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s or %s' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
if py_module_name not in py_module_names:
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
py_module_cache[py_module_name + ('__init__',)] = _slurp(os.path.join(os.path.join(_SNIPPET_PATH, *py_module_name), '__init__.py'))
normalized_modules.add(py_module_name + ('__init__',))
else:
py_module_cache[py_module_name] = module_info[0].read()
module_info[0].close()
normalized_modules.add(py_module_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = _slurp('%s.py' % os.path.join(_SNIPPET_PATH, *py_pkg_name))
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = module_data[:1024]
return bool(start.translate(None, textchars))
def _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ziploader to format the module itself.
if _is_binary(module_data):
module_substyle = module_style = 'binary'
elif REPLACER in module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
module_data = module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in module_data:
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
python_repred_params = repr(json.dumps(params))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ziploader_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ZIPLOADER: using cached module: %s' % cached_module_filename)
zipdata = open(cached_module_filename, 'rb').read()
else:
if module_name in strategy.action_write_locks:
display.debug('ZIPLOADER: Using lock for %s' % module_name)
lock = strategy.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ZIPLOADER: Using generic lock for %s' % module_name)
lock = strategy.action_write_locks[None]
display.debug('ZIPLOADER: Acquiring lock')
with lock:
display.debug('ZIPLOADER: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ZIPLOADER: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
zf.writestr('ansible/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\ntry:\n from ansible.release import __version__,__author__\nexcept ImportError:\n __version__="' + to_bytes(__version__) + b'"\n __author__="' + to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('ansible_module_%s.py' % module_name, module_data)
py_module_cache = { ('__init__',): b'' }
recursive_finder(module_name, module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.mkdir(lookup_path)
display.debug('ZIPLOADER: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ZIPLOADER: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ZIPLOADER: Done creating module')
if zipdata is None:
display.debug('ZIPLOADER: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
zipdata = open(cached_module_filename, 'rb').read()
except IOError:
raise AnsibleError('A different worker process failed to create module file. Look at traceback for that process for debugging information.')
zipdata = to_unicode(zipdata, errors='strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
if shebang is None:
shebang = u'#!/usr/bin/python'
executable = interpreter.split(u' ', 1)
if len(executable) == 2 and executable[0].endswith(u'env'):
# Handle /usr/bin/env python style interpreter settings
interpreter = u"'{0}', '{1}'".format(*executable)
else:
# Still have to enclose the parts of the interpreter in quotes
# because we're substituting it into the template as a python
# string
interpreter = u"'{0}'".format(interpreter)
output.write(to_bytes(ACTIVE_ZIPLOADER_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
)))
module_data = output.getvalue()
elif module_substyle == 'powershell':
# Module replacer for jsonargs and windows
lines = module_data.split(b'\n')
for line in lines:
if REPLACER_WINDOWS in line:
ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
output.write(ps_data)
py_module_names.add((b'powershell',))
continue
output.write(line + b'\n')
module_data = output.getvalue()
module_args_json = to_bytes(json.dumps(module_args))
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# Sanity check from 1.x days. This is currently useless as we only
# get here if we are going to substitute powershell.ps1 into the
# module anyway. Leaving it for when/if we add other powershell
# module_utils files.
if (b'powershell',) not in py_module_names:
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ziploader) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
module_data = module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
module_data = module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='strict')
module_data = module_data.replace(b'syslog.LOG_USER', facility)
return (module_data, module_style, shebang)
# ******************************************************************************
def modify_module(module_name, module_path, module_args, task_vars=dict(), module_compression='ZIP_STORED'):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, there's equivalent conventions like this:
# POWERSHELL_COMMON
which results in the inclusion of the common code from powershell.ps1
"""
with open(module_path, 'rb') as f:
# read in the module source
module_data = f.read()
(module_data, module_style, shebang) = _find_snippet_imports(module_name, module_data, module_path, module_args, task_vars, module_compression)
if module_style == 'binary':
return (module_data, module_style, to_unicode(shebang, nonstring='passthru'))
elif shebang is None:
lines = module_data.split(b"\n", 1)
if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter = to_bytes(interpreter)
new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='strict', nonstring='passthru')
if new_shebang:
lines[0] = shebang = new_shebang
if os.path.basename(interpreter).startswith(b'python'):
lines.insert(1, to_bytes(ENCODING_STRING))
else:
# No shebang, assume a binary module?
pass
module_data = b"\n".join(lines)
else:
shebang = to_bytes(shebang, errors='strict')
return (module_data, module_style, to_unicode(shebang, nonstring='passthru'))
| Censio/ansible-dev | lib/ansible/executor/module_common.py | Python | gpl-3.0 | 33,971 | [
"VisIt"
] | 52ad4f408280d4bd7805225da8bba0b183a942c36c90d6916d08976135f4eba3 |
#
# @file TestCVTerms_newSetters.py
# @brief CVTerms unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestCVTerms_newSetters.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestCVTerms_newSetters(unittest.TestCase):
def test_CVTerm_addResource(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
resource = "GO6666";
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
i = term.addResource( "")
self.assert_( i == libsbml.LIBSBML_OPERATION_FAILED )
xa = term.getResources()
self.assert_( xa.getLength() == 0 )
i = term.addResource(resource)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
xa = term.getResources()
self.assert_( xa.getLength() == 1 )
self.assert_(( "rdf:resource" == xa.getName(0) ))
self.assert_(( "GO6666" == xa.getValue(0) ))
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_removeResource(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
resource = "GO6666";
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
term.addResource(resource)
xa = term.getResources()
self.assert_( xa.getLength() == 1 )
i = term.removeResource( "CCC")
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
xa = term.getResources()
self.assert_( xa.getLength() == 1 )
i = term.removeResource(resource)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
xa = term.getResources()
self.assert_( xa.getLength() == 0 )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_setBiolQualifierType(self):
term = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setBiologicalQualifierType(libsbml.BQB_IS)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_IS )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
i = term.setQualifierType(libsbml.MODEL_QUALIFIER)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setBiologicalQualifierType(libsbml.BQB_IS)
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_setModelQualifierType(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
self.assert_( term != None )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setModelQualifierType(libsbml.BQM_IS)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_IS )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
self.assert_( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
i = term.setModelQualifierType(libsbml.BQM_IS)
self.assert_( i == libsbml.LIBSBML_INVALID_ATTRIBUTE_VALUE )
self.assert_( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assert_( term.getBiologicalQualifierType() == libsbml.BQB_UNKNOWN )
self.assert_( term.getModelQualifierType() == libsbml.BQM_UNKNOWN )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestCVTerms_newSetters))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| TheCoSMoCompany/biopredyn | Prototype/src/libsbml-5.10.0/src/bindings/python/test/annotation/TestCVTerms_newSetters.py | Python | bsd-3-clause | 6,074 | [
"VisIt"
] | dc1984d8b5366f5ca32322ab42e523dd2027d7d2cb25dad8c6500ea7455d7ef5 |
# Parsing congress.gov/members/
import requests
from lxml import html
from lxml.html import fromstring
# FIRST: Get Senators and Reps's websites.
# create dictionary
takeadict = []
# get info from website
page = requests.get('https://www.congress.gov/members?pageSize=250&q={"congress":"115"}')
tree = html.fromstring(page.content)
# get senator urls
hrefs = tree.xpath('//select[@id="members-senators"]/option/@value')
del hrefs[0]
# get representative urls
representativehrefs = tree.xpath('//select[@id="members-representatives"]/option/@value')
del representativehrefs[0]
# put senators in dictionary
for i in range(441):
hrefs.append( representativehrefs[ i ] )
#SECOND: Yo visit all those links tho. And get all the data.
for href in range(1):
hpage = requests.get( hrefs[href] )
htree = html.fromstring( hpage.content )
# get names
names = htree.xpath('//h1[@class="legDetail"]/text()')
# get state, district and time in congress
statedistrictcon = htree.xpath('//table[@class="standard01 lateral01"]/tbody/tr/td/text()')
# put those in separate lists
states = []
districts = []
times =[]
inc = 0
for cnt in range( len(statedistrictcon) ):
if inc == 0:
states.append(statedistrictcon[cnt])
inc = inc + 1
elif inc == 1:
districts.append(statedistrictcon[cnt])
inc = inc + 1
elif inc == 2:
times.append(statedistrictcon[cnt])
inc = 0
# get website
websites = htree.xpath('//table[@class="standard01 nomargin"]/tr/td/a/@href')
# get address, phone number and party
contacts = htree.xpath('//table[@class="standard01 nomargin"]/tr/td/text()')
# put those in separate lists
addresses = []
phones = []
parties = []
inc = 0
for cnt in range( len(contacts) ):
if inc == 0 or inc == 1:
inc = inc + 1
elif inc == 2:
addresses.append(contacts[cnt])
inc = inc + 1
elif inc == 3:
phones.append(contacts[cnt])
inc = inc + 1
elif inc == 4:
parties.append(contacts[cnt])
inc = 0
# do something with them before the for loop ends probably. like put them in a database.
# for cnt in range(541)
# names[cnt]
# states[cnt]
# districts[cnt]
# times[cnt]
# websites[cnt]
# address[cnt]
# phones[cnt]
# parties[cnt]
| walke469/spartahack-17 | parsingstep2forreal.py | Python | bsd-2-clause | 2,479 | [
"VisIt"
] | f937c757d189d4eaaff47ab296e271ba3b0ef903ae3bcfd24ca1213a2546f2fb |
from pathlib import Path
import json
import numpy as np
import pytest
import psi4
from psi4.driver.procrouting.response.scf_response import tdscf_excitations
from .utils import *
## marks
# reference type
UHF = pytest.mark.unrestricted
RHF_singlet = pytest.mark.restricted_singlet
RHF_triplet = pytest.mark.restricted_triplet
# functional types
hf = pytest.mark.hf
lda = pytest.mark.lda
gga = pytest.mark.gga
hyb_gga = pytest.mark.hyb_gga
hyb_gga_lrc = pytest.mark.hyb_gga_lrc
# response type
RPA = pytest.mark.RPA
TDA = pytest.mark.TDA
@pytest.fixture
def reference_data():
# Reference data generated using G09.E01
with open(Path(__file__).parent / "tdscf_reference_data.json") as f:
reference_data = json.load(f)
return reference_data
@pytest.fixture
def molecules():
smols = {
# Canonical unrestricted system
"CH2":
"""0 3
C 0.000000 0.000000 0.159693
H -0.000000 0.895527 -0.479080
H -0.000000 -0.895527 -0.479080
no_reorient
no_com
""",
# Canonical restricted system
"H2O":
"""0 1
O 0.000000 0.000000 0.135446
H -0.000000 0.866812 -0.541782
H -0.000000 -0.866812 -0.541782
no_reorient
no_com
""",
# Canonical chiral system
"H2O2":
"""0 1
O 0.000000 0.695000 -0.092486
O -0.000000 -0.695000 -0.092486
H -0.388142 0.895249 0.739888
H 0.388142 -0.895249 0.739888
no_reorient
no_com
""",
# Slightly larger chiral system
"METHYLOXIRANE":
"""0 1
C 0.152133 -0.035800 0.485797
C -1.039475 0.615938 -0.061249
C 1.507144 0.097806 -0.148460
O -0.828215 -0.788248 -0.239431
H 0.153725 -0.249258 1.552136
H -1.863178 0.881921 0.593333
H -0.949807 1.214210 -0.962771
H 2.076806 -0.826189 -0.036671
H 2.074465 0.901788 0.325106
H 1.414895 0.315852 -1.212218
no_reorient
no_com
""",
}
return {k: psi4.core.Molecule.from_string(v) for k, v in smols.items()}
def _oscillator_strength(e: float, tm: np.ndarray, gauge: str = "L") -> float:
if gauge == "L":
return ((2 * e) / 3) * np.sum(tm**2)
else:
return (2 / (3 * e)) * np.sum(tm**2)
def _rotatory_strength(e: float, etm: np.ndarray, mtm: np.ndarray, gauge: str = "L") -> float:
"""Compute the rotatory strength from the G09 reference values.
Notes
-----
1. Signs are flipped with respect to the definition!
2. The magnetic dipole moment is really the angular momentum, so we scale
it by 1/2 (Bohr magneton) to get the magnetic dipole.
"""
if gauge == "L":
return -np.einsum("i,i", etm, 0.5 * mtm)
else:
return np.einsum("i,i", etm, 0.5 * mtm) / e
@pytest.mark.tdscf
@pytest.mark.parametrize("mol,ref,func,ptype,basis", [
pytest.param( "CH2", 'UHF', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, UHF, RPA, pytest.mark.quick]),
pytest.param( "CH2", 'UHF', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, UHF, TDA, pytest.mark.quick]),
pytest.param( "H2O", 'RHF-1', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, RHF_singlet, RPA, pytest.mark.quick]),
pytest.param( "H2O", 'RHF-1', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, RHF_singlet, TDA, pytest.mark.quick]),
pytest.param( "H2O", 'RHF-3', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, RHF_triplet, RPA, pytest.mark.quick]),
pytest.param( "H2O", 'RHF-3', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, RHF_triplet, TDA, pytest.mark.quick]),
pytest.param( "H2O2", 'RHF-1', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, RHF_singlet, RPA, pytest.mark.quick]),
pytest.param( "H2O2", 'RHF-1', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, RHF_singlet, TDA, pytest.mark.quick]),
pytest.param( "H2O2", 'RHF-3', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, RHF_triplet, RPA, pytest.mark.quick]),
pytest.param( "H2O2", 'RHF-3', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, RHF_triplet, TDA, pytest.mark.quick]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, RHF_singlet, RPA, pytest.mark.quick]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, RHF_singlet, TDA, pytest.mark.quick]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'SVWN', 'RPA', 'cc-pvdz', marks=[lda, RHF_triplet, RPA, pytest.mark.quick]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'SVWN', 'TDA', 'cc-pvdz', marks=[lda, RHF_triplet, TDA, pytest.mark.quick]),
pytest.param( "CH2", 'UHF', 'HF', 'RPA', 'cc-pvdz', marks=[hf, UHF, RPA, pytest.mark.quick]),
pytest.param( "CH2", 'UHF', 'HF', 'TDA', 'cc-pvdz', marks=[hf, UHF, TDA, pytest.mark.quick]),
pytest.param( "H2O", 'RHF-1', 'HF', 'RPA', 'cc-pvdz', marks=[hf, RHF_singlet, RPA, pytest.mark.quick]),
pytest.param( "H2O", 'RHF-1', 'HF', 'TDA', 'cc-pvdz', marks=[hf, RHF_singlet, TDA, pytest.mark.quick]),
pytest.param( "H2O", 'RHF-3', 'HF', 'RPA', 'cc-pvdz', marks=[hf, RHF_triplet, RPA, pytest.mark.quick]),
pytest.param( "H2O", 'RHF-3', 'HF', 'TDA', 'cc-pvdz', marks=[hf, RHF_triplet, TDA, pytest.mark.quick]),
pytest.param( "H2O2", 'RHF-1', 'HF', 'RPA', 'cc-pvdz', marks=[hf, RHF_singlet, RPA, pytest.mark.quick]),
pytest.param( "H2O2", 'RHF-1', 'HF', 'TDA', 'cc-pvdz', marks=[hf, RHF_singlet, TDA, pytest.mark.quick]),
pytest.param( "H2O2", 'RHF-3', 'HF', 'RPA', 'cc-pvdz', marks=[hf, RHF_triplet, RPA, pytest.mark.quick]),
pytest.param( "H2O2", 'RHF-3', 'HF', 'TDA', 'cc-pvdz', marks=[hf, RHF_triplet, TDA, pytest.mark.quick]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'HF', 'RPA', 'cc-pvdz', marks=[hf, RHF_singlet, RPA]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'HF', 'TDA', 'cc-pvdz', marks=[hf, RHF_singlet, TDA]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'HF', 'RPA', 'cc-pvdz', marks=[hf, RHF_triplet, RPA]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'HF', 'TDA', 'cc-pvdz', marks=[hf, RHF_triplet, TDA]),
pytest.param( "CH2", 'UHF', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, UHF, RPA]),
pytest.param( "CH2", 'UHF', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, UHF, TDA]),
pytest.param( "H2O", 'RHF-1', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, RHF_singlet, RPA]),
pytest.param( "H2O", 'RHF-1', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, RHF_singlet, TDA]),
pytest.param( "H2O", 'RHF-3', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, RHF_triplet, RPA]),
pytest.param( "H2O", 'RHF-3', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, RHF_triplet, TDA]),
pytest.param( "H2O2", 'RHF-1', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, RHF_singlet, RPA]),
pytest.param( "H2O2", 'RHF-1', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, RHF_singlet, TDA]),
pytest.param( "H2O2", 'RHF-3', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, RHF_triplet, RPA]),
pytest.param( "H2O2", 'RHF-3', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, RHF_triplet, TDA]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, RHF_singlet, RPA]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, RHF_singlet, TDA]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'HCTH93', 'RPA', 'cc-pvdz', marks=[gga, RHF_triplet, RPA]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'HCTH93', 'TDA', 'cc-pvdz', marks=[gga, RHF_triplet, TDA]),
pytest.param( "CH2", 'UHF', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, UHF, RPA]),
pytest.param( "CH2", 'UHF', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, UHF, TDA]),
pytest.param( "H2O", 'RHF-1', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, RHF_singlet, RPA]),
pytest.param( "H2O", 'RHF-1', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, RHF_singlet, TDA]),
pytest.param( "H2O", 'RHF-3', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, RHF_triplet, RPA]),
pytest.param( "H2O", 'RHF-3', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, RHF_triplet, TDA]),
pytest.param( "H2O2", 'RHF-1', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, RHF_singlet, RPA]),
pytest.param( "H2O2", 'RHF-1', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, RHF_singlet, TDA]),
pytest.param( "H2O2", 'RHF-3', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, RHF_triplet, RPA]),
pytest.param( "H2O2", 'RHF-3', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, RHF_triplet, TDA]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, RHF_singlet, RPA]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, RHF_singlet, TDA]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'PBE0', 'RPA', 'cc-pvdz', marks=[hyb_gga, RHF_triplet, RPA]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'PBE0', 'TDA', 'cc-pvdz', marks=[hyb_gga, RHF_triplet, TDA]),
pytest.param( "CH2", 'UHF', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, UHF, RPA]),
pytest.param( "CH2", 'UHF', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, UHF, TDA]),
pytest.param( "H2O", 'RHF-1', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_singlet, RPA]),
pytest.param( "H2O", 'RHF-1', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_singlet, TDA]),
pytest.param( "H2O", 'RHF-3', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_triplet, RPA]),
pytest.param( "H2O", 'RHF-3', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_triplet, TDA]),
pytest.param( "H2O2", 'RHF-1', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_singlet, RPA]),
pytest.param( "H2O2", 'RHF-1', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_singlet, TDA]),
pytest.param( "H2O2", 'RHF-3', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_triplet, RPA]),
pytest.param( "H2O2", 'RHF-3', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_triplet, TDA]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_singlet, RPA]),
pytest.param( "METHYLOXIRANE", 'RHF-1', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_singlet, TDA]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'wB97X', 'RPA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_triplet, RPA]),
pytest.param( "METHYLOXIRANE", 'RHF-3', 'wB97X', 'TDA', 'cc-pvdz', marks=[hyb_gga_lrc, RHF_triplet, TDA]),
]) # yapf: disable
def test_tdscf(mol, ref, func, ptype, basis, molecules, reference_data):
# expected failures
if (ref == 'RHF-3') and (func != "HF"):
pytest.xfail("RKS Vx kernel only Spin Adapted for Singlet")
elif (ref == 'UHF' and func != 'SVWN'):
pytest.xfail("UKS Vx kernel bug for non-LDA")
molecule = molecules[mol]
psi4.set_options({'scf_type': 'pk', 'e_convergence': 8, 'd_convergence': 8, 'save_jk': True})
if ref == "UHF":
psi4.set_options({'reference': 'UHF'})
molecule.reset_point_group('c1')
_, wfn = psi4.energy(f"{func}/{basis}", return_wfn=True, molecule=molecule)
out = tdscf_excitations(wfn,
states=4,
maxiter=30,
r_convergence=1.0e-6,
triplets="ONLY" if ref == "RHF-3" else "NONE",
tda=True if ptype == "TDA" else False)
ref_v = reference_data[f"{mol}_{ref}_{func}_{ptype}"]
for i, my_v in enumerate(out):
# compare excitation energies
ref_e = ref_v[i]["EXCITATION ENERGY"]
assert compare_values(ref_e,
my_v["EXCITATION ENERGY"],
f"{mol}_{ref}_{func}_{ptype}-ROOT_{i+1} Excitation energy",
atol=2.0e-4)
ref_edtm_L = np.array(ref_v[i]["LENGTH MU"])
# compare length-gauge oscillator strength
ref_f_L = _oscillator_strength(ref_e, ref_edtm_L, "L")
assert compare_values(ref_f_L,
my_v["OSCILLATOR STRENGTH (LEN)"],
f"{mol}_{ref}_{func}_{ptype}-ROOT_{i+1} Length-gauge oscillator strength",
atol=1.0e-3)
ref_edtm_V = np.array(ref_v[i]["VELOCITY MU"])
# compare velocity-gauge oscillator strengths
ref_f_V = _oscillator_strength(ref_e, ref_edtm_V, "V")
assert compare_values(ref_f_V,
my_v["OSCILLATOR STRENGTH (VEL)"],
f"{mol}_{ref}_{func}_{ptype}-ROOT_{i+1} Velocity-gauge oscillator strength",
atol=1.0e-2)
ref_mdtm = np.array(ref_v[i]["M"])
# compare length-gauge rotatory strengths
ref_R_L = _rotatory_strength(ref_e, ref_edtm_L, ref_mdtm, "L")
assert compare_values(ref_R_L,
my_v["ROTATORY STRENGTH (LEN)"],
f"{mol}_{ref}_{func}_{ptype}-ROOT_{i+1} Length-gauge rotatory strength",
atol=2.0e-3)
# compare velocity-gauge rotatory strengths
ref_R_V = _rotatory_strength(ref_e, ref_edtm_V, ref_mdtm, "V")
assert compare_values(ref_R_V,
my_v["ROTATORY STRENGTH (VEL)"],
f"{mol}_{ref}_{func}_{ptype}-ROOT_{i+1} Velocity-gauge rotatory strength",
atol=2.0e-3)
| jgonthier/psi4 | tests/pytests/test_tdscf_excitations.py | Python | lgpl-3.0 | 13,651 | [
"Psi4"
] | 899b434e607ef809674f54bb917ba8a5d72ee9d0b8b673bf2160ac6e6dbaf625 |
"""
Constructive Solid Geometry file formats.
"""
from galaxy.datatypes import data
from galaxy.datatypes.binary import Binary
from galaxy.datatypes.data import get_file_peek
from galaxy.datatypes.data import nice_size
from galaxy.datatypes.metadata import MetadataElement
from galaxy import util
MAX_HEADER_LINES = 500
MAX_LINE_LEN = 2000
COLOR_OPTS = ['COLOR_SCALARS', 'red', 'green', 'blue']
class Ply(object):
"""
The PLY format describes an object as a collection of vertices,
faces and other elements, along with properties such as color and
normal direction that can be attached to these elements. A PLY
file contains the description of exactly one object.
"""
# Add metadata elements.
MetadataElement(name="file_format", default=None, desc="File format",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="vertex", default=None, desc="Vertex",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="face", default=None, desc="Face",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="other_elements", default=[], desc="Other elements",
readonly=True, optional=True, visible=True, no_value=[])
def sniff(self, filename, subtype):
"""
The structure of a typical PLY file:
Header, Vertex List, Face List, (lists of other elements)
"""
with open(filename, "r") as fh:
if not self._is_ply_header(fh, subtype):
return False
return True
return False
def _is_ply_header(self, fh, subtype):
"""
The header is a series of carriage-return terminated lines of
text that describe the remainder of the file.
"""
valid_header_items = ['comment', 'obj_info', 'element', 'property']
# Line 1: ply
line = get_next_line(fh)
if line != 'ply':
return False
# Line 2: format ascii 1.0
line = get_next_line(fh)
if line.find(subtype) < 0:
return False
stop_index = 0
while True:
line = get_next_line(fh)
stop_index += 1
if line == 'end_header':
return True
items = line.split()
if items[0] not in valid_header_items:
return False
if stop_index > MAX_HEADER_LINES:
# If this is a PLY file, there must be an unusually
# large number of comments.
break
return False
def set_meta(self, dataset, **kwd):
if dataset.has_data():
with open(dataset.file_name) as fh:
for line in fh:
line = line.strip()
if not line:
continue
if line.startswith('format'):
items = line.split()
dataset.metadata.file_format = items[1]
elif line == 'end_header':
# Metadata is complete.
break
elif line.startswith('element'):
items = line.split()
if items[1] == 'face':
dataset.metadata.face = int(items[2])
elif items[1] == 'vertex':
dataset.metadata.vertex = int(items[2])
else:
element_tuple = (items[1], int(items[2]))
dataset.metadata.other_elements.append(element_tuple)
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
dataset.blurb = "Faces: %s, Vertices: %s" % (str(dataset.metadata.face), str(dataset.metadata.vertex))
else:
dataset.peek = 'File does not exist'
dataset.blurb = 'File purged from disc'
def display_peek(self, dataset):
try:
return dataset.peek
except:
return "Ply file (%s)" % (nice_size(dataset.get_size()))
class PlyAscii(Ply, data.Text):
file_ext = "plyascii"
def __init__(self, **kwd):
data.Text.__init__(self, **kwd)
def sniff(self, filename):
return super(PlyAscii, self).sniff(filename, subtype='ascii')
class PlyBinary(Ply, Binary):
file_ext = "plybinary"
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
def sniff(self, filename):
return super(PlyBinary, self).sniff(filename, subtype='binary')
Binary.register_sniffable_binary_format("plybinary", "plybinary", PlyBinary)
class Vtk(object):
"""
The Visualization Toolkit provides a number of source and writer objects to
read and write popular data file formats. The Visualization Toolkit also
provides some of its own file formats.
There are two different styles of file formats available in VTK. The simplest
are the legacy, serial formats that are easy to read and write either by hand
or programmatically. However, these formats are less flexible than the XML
based file formats which support random access, parallel I/O, and portable
data compression and are preferred to the serial VTK file formats whenever
possible.
All keyword phrases are written in ASCII form whether the file is binary or
ASCII. The binary section of the file (if in binary form) is the data proper;
i.e., the numbers that define points coordinates, scalars, cell indices, and
so forth.
Binary data must be placed into the file immediately after the newline (\n)
character from the previous ASCII keyword and parameter sequence.
TODO: only legacy formats are currently supported and support for XML formats
should be added.
"""
# Add metadata elements.
MetadataElement(name="vtk_version", default=None, desc="Vtk version",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="file_format", default=None, desc="File format",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="dataset_type", default=None, desc="Dataset type",
readonly=True, optional=True, visible=True, no_value=None)
# STRUCTURED_GRID data_type.
MetadataElement(name="dimensions", default=[], desc="Dimensions",
readonly=True, optional=True, visible=True, no_value=[])
MetadataElement(name="origin", default=[], desc="Origin",
readonly=True, optional=True, visible=True, no_value=[])
MetadataElement(name="spacing", default=[], desc="Spacing",
readonly=True, optional=True, visible=True, no_value=[])
# POLYDATA data_type (Points element is also a component of UNSTRUCTURED_GRID..
MetadataElement(name="points", default=None, desc="Points",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="vertices", default=None, desc="Vertices",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="lines", default=None, desc="Lines",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="polygons", default=None, desc="Polygons",
readonly=True, optional=True, visible=True, no_value=None)
MetadataElement(name="triangle_strips", default=None, desc="Triangle strips",
readonly=True, optional=True, visible=True, no_value=None)
# UNSTRUCTURED_GRID data_type.
MetadataElement(name="cells", default=None, desc="Cells",
readonly=True, optional=True, visible=True, no_value=None)
# Additional elements not categorized by data_type.
MetadataElement(name="field_names", default=[], desc="Field names",
readonly=True, optional=True, visible=True, no_value=[])
# The keys in the field_components map to the list of field_names in the above element
# which ensures order for select list options that are built from it.
MetadataElement(name="field_components", default={}, desc="Field names and components",
readonly=True, optional=True, visible=True, no_value={})
def sniff(self, filename, subtype):
"""
VTK files can be either ASCII or binary, with two different
styles of file formats: legacy or XML. We'll assume if the
file contains a valid VTK header, then it is a valid VTK file.
"""
with open(filename, "r") as fh:
if self._is_vtk_header(fh, subtype):
return True
return False
return False
def _is_vtk_header(self, fh, subtype):
"""
The Header section consists of at least 4, but possibly
5 lines. This is tricky because sometimes the 4th line
is blank (in which case the 5th line consists of the
data_kind) or the 4th line consists of the data_kind (in
which case the 5th line is blank).
"""
data_kinds = ['STRUCTURED_GRID', 'POLYDATA', 'UNSTRUCTURED_GRID']
def check_data_kind(line):
for data_kind in data_kinds:
if line.find(data_kind) >= 0:
return True
return False
# Line 1: vtk DataFile Version 3.0
line = get_next_line(fh)
if line.find('vtk') < 0:
return False
# Line 2: can be anything - skip it
line = get_next_line(fh)
# Line 3: ASCII or BINARY
line = get_next_line(fh)
if line.find(subtype) < 0:
return False
# Line 4:
line = get_next_line(fh)
if line:
return check_data_kind(line)
# line 5:
line = get_next_line(fh)
if line:
return check_data_kind(line)
return False
def set_meta(self, dataset, **kwd):
if dataset.has_data():
dataset.metadata.field_names = []
dataset.metadata.field_components = {}
dataset_type = None
field_components = {}
dataset_structure_complete = False
processing_field_section = False
with open(dataset.file_name) as fh:
for i, line in enumerate(fh):
line = line.strip()
if not line:
continue
if i < 3:
dataset = self.set_initial_metadata(i, line, dataset)
elif dataset.metadata.file_format == 'ASCII' or not util.is_binary(line):
if dataset_structure_complete:
"""
The final part of legacy VTK files describes the dataset attributes.
This part begins with the keywords POINT_DATA or CELL_DATA, followed
by an integer number specifying the number of points or cells,
respectively. Other keyword/data combinations then define the actual
dataset attribute values (i.e., scalars, vectors, tensors, normals,
texture coordinates, or field data). Dataset attributes are supported
for both points and cells.
Each type of attribute data has a dataName associated with it. This is
a character string (without embedded whitespace) used to identify a
particular data. The dataName is used by the VTK readers to extract
data. As a result, more than one attribute data of the same type can be
included in a file. For example, two different scalar fields defined
on the dataset points, pressure and temperature, can be contained in
the same file. If the appropriate dataName is not specified in the VTK
reader, then the first data of that type is extracted from the file.
"""
items = line.split()
if items[0] == 'SCALARS':
# Example: SCALARS surface_field double 3
# Scalar definition includes specification of a lookup table. The
# definition of a lookup table is optional. If not specified, the
# default VTK table will be used, and tableName should be
# "default". Also note that the numComp variable is optional. By
# default the number of components is equal to one. The parameter
# numComp must range between (1,4) inclusive; in versions of VTK
# prior to vtk2.3 this parameter was not supported.
field_name = items[1]
dataset.metadata.field_names.append(field_name)
try:
num_components = int(items[-1])
except:
num_components = 1
field_component_indexes = [str(i) for i in range(num_components)]
field_components[field_name] = field_component_indexes
elif items[0] == 'FIELD':
# The dataset consists of CELL_DATA.
# FIELD FieldData 2
processing_field_section = True
num_fields = int(items[-1])
fields_processed = []
elif processing_field_section:
if len(fields_processed) == num_fields:
processing_field_section = False
else:
try:
float(items[0])
# Don't process the cell data.
# 0.0123457 0.197531
except:
# Line consists of arrayName numComponents numTuples dataType.
# Example: surface_field1 1 12 double
field_name = items[0]
dataset.metadata.field_names.append(field_name)
num_components = int(items[1])
field_component_indexes = [str(i) for i in range(num_components)]
field_components[field_name] = field_component_indexes
fields_processed.append(field_name)
elif line.startswith('CELL_DATA'):
# CELL_DATA 3188
dataset_structure_complete = True
dataset.metadata.cells = int(line.split()[1])
elif line.startswith('POINT_DATA'):
# POINT_DATA 1876
dataset_structure_complete = True
dataset.metadata.points = int(line.split()[1])
else:
dataset, dataset_type = self.set_structure_metadata(line, dataset, dataset_type)
if len(field_components) > 0:
dataset.metadata.field_components = field_components
def set_initial_metadata(self, i, line, dataset):
if i == 0:
# The first part of legacy VTK files is the file version and
# identifier. This part contains the single line:
# # vtk DataFile Version X.Y
dataset.metadata.vtk_version = line.lower().split('version')[1]
# The second part of legacy VTK files is the header. The header
# consists of a character string terminated by end-of-line
# character \n. The header is 256 characters maximum. The header
# can be used to describe the data and include any other pertinent
# information. We skip the header line...
elif i == 2:
# The third part of legacy VTK files is the file format. The file
# format describes the type of file, either ASCII or binary. On
# this line the single word ASCII or BINARY must appear.
dataset.metadata.file_format = line
return dataset
def set_structure_metadata(self, line, dataset, dataset_type):
"""
The fourth part of legacy VTK files is the dataset structure. The
geometry part describes the geometry and topology of the dataset.
This part begins with a line containing the keyword DATASET followed
by a keyword describing the type of dataset. Then, depending upon
the type of dataset, other keyword/ data combinations define the
actual data.
"""
if dataset_type is None and line.startswith('DATASET'):
dataset_type = line.split()[1]
dataset.metadata.dataset_type = dataset_type
if dataset_type == 'STRUCTURED_GRID':
# The STRUCTURED_GRID format supports 1D, 2D, and 3D structured
# grid datasets. The dimensions nx, ny, nz must be greater
# than or equal to 1. The point coordinates are defined by the
# data in the POINTS section. This consists of x-y-z data values
# for each point.
if line.startswith('DIMENSIONS'):
# DIMENSIONS 10 5 1
dataset.metadata.dimensions = [line.split()[1:]]
elif line.startswith('ORIGIN'):
# ORIGIN 0 0 0
dataset.metadata.origin = [line.split()[1:]]
elif line.startswith('SPACING'):
# SPACING 1 1 1
dataset.metadata.spacing = [line.split()[1:]]
elif dataset_type == 'POLYDATA':
# The polygonal dataset consists of arbitrary combinations
# of surface graphics primitives vertices, lines, polygons
# and triangle strips. Polygonal data is defined by the POINTS,
# VERTICES, LINES, POLYGONS, or TRIANGLE_STRIPS sections.
if line.startswith('POINTS'):
# POINTS 18 float
dataset.metadata.points = int(line.split()[1])
elif line.startswith('VERTICES'):
dataset.metadata.vertices = int(line.split()[1])
elif line.startswith('LINES'):
# LINES 5 17
dataset.metadata.lines = int(line.split()[1])
elif line.startswith('POLYGONS'):
# POLYGONS 6 30
dataset.metadata.polygons = int(line.split()[1])
elif line.startswith('TRIANGLE_STRIPS'):
# TRIANGLE_STRIPS 2212 16158
dataset.metadata.triangle_strips = int(line.split()[1])
elif dataset_type == 'UNSTRUCTURED_GRID':
# The unstructured grid dataset consists of arbitrary combinations
# of any possible cell type. Unstructured grids are defined by points,
# cells, and cell types.
if line.startswith('POINTS'):
# POINTS 18 float
dataset.metadata.points = int(line.split()[1])
if line.startswith('CELLS'):
# CELLS 756 3024
dataset.metadata.cells = int(line.split()[1])
return dataset, dataset_type
def get_blurb(self, dataset):
blurb = ""
if dataset.metadata.vtk_version is not None:
blurb += 'VTK Version %s' % str(dataset.metadata.vtk_version)
if dataset.metadata.dataset_type is not None:
if blurb:
blurb += ' '
blurb += str(dataset.metadata.dataset_type)
return blurb or 'VTK data'
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = get_file_peek(dataset.file_name, is_multi_byte=is_multi_byte)
dataset.blurb = self.get_blurb(dataset)
else:
dataset.peek = 'File does not exist'
dataset.blurb = 'File purged from disc'
def display_peek(self, dataset):
try:
return dataset.peek
except:
return "Vtk file (%s)" % (nice_size(dataset.get_size()))
class VtkAscii(Vtk, data.Text):
file_ext = "vtkascii"
def __init__(self, **kwd):
data.Text.__init__(self, **kwd)
def sniff(self, filename):
return super(VtkAscii, self).sniff(filename, subtype='ASCII')
class VtkBinary(Vtk, Binary):
file_ext = "vtkbinary"
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
def sniff(self, filename):
return super(VtkBinary, self).sniff(filename, subtype='BINARY')
Binary.register_sniffable_binary_format("vtkbinary", "vtkbinary", VtkBinary)
# Utility functions
def get_next_line(fh):
line = fh.readline(MAX_LINE_LEN)
return line.strip()
| icaoberg/cellorganizer-galaxy-tools | datatypes/constructive_solid_geometry.py | Python | gpl-3.0 | 21,604 | [
"Galaxy",
"VTK"
] | 2378c2cf3213ada19e05778e2dd8c2ebb1db84ebda6e62cf174bb14d032dc1cc |
""" Just an utilities collector
"""
import types
from DIRAC import S_OK, S_ERROR
def checkArgumentFormat( path ):
""" returns {'/this/is/an/lfn.1':False, '/this/is/an/lfn.2':False ...}
"""
if type( path ) in types.StringTypes:
return S_OK( {path:False} )
elif type( path ) == types.ListType:
return S_OK( dict( [( url, False ) for url in path if type( url ) in types.StringTypes] ) )
elif type( path ) == types.DictType:
return S_OK( path )
else:
return S_ERROR( "Utils.checkArgumentFormat: Supplied path is not of the correct format." )
| sposs/DIRAC | Resources/Utilities/Utils.py | Python | gpl-3.0 | 577 | [
"DIRAC"
] | 02fefa2c3e8d1fcaa1c428ad340595ecd0d9bec23db9526501d5fe0e08a3d9c2 |
"""
View for Courseware Index
"""
# pylint: disable=attribute-defined-outside-init
import logging
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.views import redirect_to_login
from django.core.urlresolvers import reverse
from django.http import Http404
from django.template.context_processors import csrf
from django.utils.decorators import method_decorator
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import View
from opaque_keys.edx.keys import CourseKey
from web_fragments.fragment import Fragment
from edxmako.shortcuts import render_to_response, render_to_string
from lms.djangoapps.courseware.exceptions import CourseAccessRedirect
from lms.djangoapps.experiments.utils import get_experiment_user_metadata_context
from lms.djangoapps.gating.api import get_entrance_exam_score_ratio, get_entrance_exam_usage_key
from lms.djangoapps.grades.course_grade_factory import CourseGradeFactory
from openedx.core.djangoapps.crawlers.models import CrawlersConfig
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.monitoring_utils import set_custom_metrics_for_course_key
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference
from openedx.core.djangoapps.util.user_messages import PageLevelMessages
from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace, WaffleFlagNamespace, CourseWaffleFlag
from openedx.core.djangolib.markup import HTML, Text
from openedx.features.course_experience import COURSE_OUTLINE_PAGE_FLAG, default_course_url_name
from openedx.features.course_experience.views.course_sock import CourseSockFragmentView
from openedx.features.enterprise_support.api import data_sharing_consent_required
from shoppingcart.models import CourseRegistrationCode
from student.views import is_course_blocked
from util.views import ensure_valid_course_key
from xmodule.modulestore.django import modulestore
from xmodule.x_module import STUDENT_VIEW
from .views import CourseTabView
from ..access import has_access
from ..access_utils import check_course_open_for_learner
from ..courses import get_course_with_access, get_current_child, get_studio_url
from ..entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
user_can_skip_entrance_exam,
user_has_passed_entrance_exam
)
from ..masquerade import setup_masquerade
from ..model_data import FieldDataCache
from ..module_render import get_module_for_descriptor, toc_for_course
log = logging.getLogger("edx.courseware.views.index")
TEMPLATE_IMPORTS = {'urllib': urllib}
CONTENT_DEPTH = 2
class CoursewareIndex(View):
"""
View class for the Courseware page.
"""
@cached_property
def enable_anonymous_courseware_access(self):
waffle_flag = CourseWaffleFlag(WaffleFlagNamespace(name='seo'), 'enable_anonymous_courseware_access')
return waffle_flag.is_enabled(self.course_key)
@method_decorator(ensure_csrf_cookie)
@method_decorator(cache_control(no_cache=True, no_store=True, must_revalidate=True))
@method_decorator(ensure_valid_course_key)
@method_decorator(data_sharing_consent_required)
def get(self, request, course_id, chapter=None, section=None, position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right
chapter.
If neither chapter or section are specified, displays the user's most
recent chapter, or the first chapter if this is the user's first visit.
Arguments:
request: HTTP request
course_id (unicode): course id
chapter (unicode): chapter url_name
section (unicode): section url_name
position (unicode): position in module, eg of <sequential> module
"""
self.course_key = CourseKey.from_string(course_id)
if not (request.user.is_authenticated() or self.enable_anonymous_courseware_access):
return redirect_to_login(request.get_full_path())
self.original_chapter_url_name = chapter
self.original_section_url_name = section
self.chapter_url_name = chapter
self.section_url_name = section
self.position = position
self.chapter, self.section = None, None
self.course = None
self.url = request.path
try:
set_custom_metrics_for_course_key(self.course_key)
self._clean_position()
with modulestore().bulk_operations(self.course_key):
self.course = get_course_with_access(
request.user, 'load', self.course_key,
depth=CONTENT_DEPTH,
check_if_enrolled=not self.enable_anonymous_courseware_access,
)
self.is_staff = has_access(request.user, 'staff', self.course)
self._setup_masquerade_for_effective_user()
return self.render(request)
except Exception as exception: # pylint: disable=broad-except
return CourseTabView.handle_exceptions(request, self.course, exception)
def _setup_masquerade_for_effective_user(self):
"""
Setup the masquerade information to allow the request to
be processed for the requested effective user.
"""
self.real_user = self.request.user
self.masquerade, self.effective_user = setup_masquerade(
self.request,
self.course_key,
self.is_staff,
reset_masquerade_data=True
)
# Set the user in the request to the effective user.
self.request.user = self.effective_user
def render(self, request):
"""
Render the index page.
"""
self._redirect_if_needed_to_pay_for_course()
self._prefetch_and_bind_course(request)
if self.course.has_children_at_depth(CONTENT_DEPTH):
self._reset_section_to_exam_if_required()
self.chapter = self._find_chapter()
self.section = self._find_section()
if self.chapter and self.section:
self._redirect_if_not_requested_section()
self._save_positions()
self._prefetch_and_bind_section()
if not request.user.is_authenticated():
qs = urllib.urlencode({
'course_id': self.course_key,
'enrollment_action': 'enroll',
'email_opt_in': False,
})
PageLevelMessages.register_warning_message(
request,
Text(_("You are not signed in. To see additional course content, {sign_in_link} or "
"{register_link}, and enroll in this course.")).format(
sign_in_link=HTML('<a href="{url}">{sign_in_label}</a>').format(
sign_in_label=_('sign in'),
url='{}?{}'.format(reverse('signin_user'), qs),
),
register_link=HTML('<a href="/{url}">{register_label}</a>').format(
register_label=_('register'),
url='{}?{}'.format(reverse('register_user'), qs),
),
)
)
return render_to_response('courseware/courseware.html', self._create_courseware_context(request))
def _redirect_if_not_requested_section(self):
"""
If the resulting section and chapter are different from what was initially
requested, redirect back to the index page, but with an updated URL that includes
the correct section and chapter values. We do this so that our analytics events
and error logs have the appropriate URLs.
"""
if (
self.chapter.url_name != self.original_chapter_url_name or
(self.original_section_url_name and self.section.url_name != self.original_section_url_name)
):
raise CourseAccessRedirect(
reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course_key),
'chapter': self.chapter.url_name,
'section': self.section.url_name,
},
)
)
def _clean_position(self):
"""
Verify that the given position is an integer. If it is not positive, set it to 1.
"""
if self.position is not None:
try:
self.position = max(int(self.position), 1)
except ValueError:
raise Http404(u"Position {} is not an integer!".format(self.position))
def _redirect_if_needed_to_pay_for_course(self):
"""
Redirect to dashboard if the course is blocked due to non-payment.
"""
redeemed_registration_codes = []
if self.request.user.is_authenticated():
self.real_user = User.objects.prefetch_related("groups").get(id=self.real_user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=self.course_key,
registrationcoderedemption__redeemed_by=self.real_user
)
if is_course_blocked(self.request, redeemed_registration_codes, self.course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
# TODO Update message to account for the fact that the user is not authenticated.
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
self.real_user,
unicode(self.course_key),
)
raise CourseAccessRedirect(reverse('dashboard'))
def _reset_section_to_exam_if_required(self):
"""
Check to see if an Entrance Exam is required for the user.
"""
if not user_can_skip_entrance_exam(self.effective_user, self.course):
exam_chapter = get_entrance_exam_content(self.effective_user, self.course)
if exam_chapter and exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
self.chapter_url_name = exam_chapter.url_name
self.section_url_name = exam_section.url_name
def _get_language_preference(self):
"""
Returns the preferred language for the actual user making the request.
"""
language_preference = settings.LANGUAGE_CODE
if self.request.user.is_authenticated():
language_preference = get_user_preference(self.real_user, LANGUAGE_KEY)
return language_preference
def _is_masquerading_as_student(self):
"""
Returns whether the current request is masquerading as a student.
"""
return self.masquerade and self.masquerade.role == 'student'
def _is_masquerading_as_specific_student(self):
"""
Returns whether the current request is masqueurading as a specific student.
"""
return self._is_masquerading_as_student() and self.masquerade.user_name
def _find_block(self, parent, url_name, block_type, min_depth=None):
"""
Finds the block in the parent with the specified url_name.
If not found, calls get_current_child on the parent.
"""
child = None
if url_name:
child = parent.get_child_by(lambda m: m.location.name == url_name)
if not child:
# User may be trying to access a child that isn't live yet
if not self._is_masquerading_as_student():
raise Http404('No {block_type} found with name {url_name}'.format(
block_type=block_type,
url_name=url_name,
))
elif min_depth and not child.has_children_at_depth(min_depth - 1):
child = None
if not child:
child = get_current_child(parent, min_depth=min_depth, requested_child=self.request.GET.get("child"))
return child
def _find_chapter(self):
"""
Finds the requested chapter.
"""
return self._find_block(self.course, self.chapter_url_name, 'chapter', CONTENT_DEPTH - 1)
def _find_section(self):
"""
Finds the requested section.
"""
if self.chapter:
return self._find_block(self.chapter, self.section_url_name, 'section')
def _prefetch_and_bind_course(self, request):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
self.course_key,
self.effective_user,
self.course,
depth=CONTENT_DEPTH,
read_only=CrawlersConfig.is_crawler(request),
)
self.course = get_module_for_descriptor(
self.effective_user,
self.request,
self.course,
self.field_data_cache,
self.course_key,
course=self.course,
)
def _prefetch_and_bind_section(self):
"""
Prefetches all descendant data for the requested section and
sets up the runtime, which binds the request user to the section.
"""
# Pre-fetch all descendant data
self.section = modulestore().get_item(self.section.location, depth=None, lazy=False)
self.field_data_cache.add_descriptor_descendents(self.section, depth=None)
# Bind section to user
self.section = get_module_for_descriptor(
self.effective_user,
self.request,
self.section,
self.field_data_cache,
self.course_key,
self.position,
course=self.course,
)
def _save_positions(self):
"""
Save where we are in the course and chapter.
"""
save_child_position(self.course, self.chapter_url_name)
save_child_position(self.chapter, self.section_url_name)
def _create_courseware_context(self, request):
"""
Returns and creates the rendering context for the courseware.
Also returns the table of contents for the courseware.
"""
course_url_name = default_course_url_name(self.course.id)
course_url = reverse(course_url_name, kwargs={'course_id': unicode(self.course.id)})
courseware_context = {
'csrf': csrf(self.request)['csrf_token'],
'course': self.course,
'course_url': course_url,
'chapter': self.chapter,
'section': self.section,
'init': '',
'fragment': Fragment(),
'staff_access': self.is_staff,
'masquerade': self.masquerade,
'supports_preview_menu': True,
'studio_url': get_studio_url(self.course, 'course'),
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
'bookmarks_api_url': reverse('bookmarks'),
'language_preference': self._get_language_preference(),
'disable_optimizely': not WaffleSwitchNamespace('RET').is_enabled('enable_optimizely_in_courseware'),
'section_title': None,
'sequence_title': None,
'disable_accordion': COURSE_OUTLINE_PAGE_FLAG.is_enabled(self.course.id),
}
courseware_context.update(
get_experiment_user_metadata_context(
self.course,
self.effective_user,
)
)
table_of_contents = toc_for_course(
self.effective_user,
self.request,
self.course,
self.chapter_url_name,
self.section_url_name,
self.field_data_cache,
)
courseware_context['accordion'] = render_accordion(
self.request,
self.course,
table_of_contents['chapters'],
)
courseware_context['course_sock_fragment'] = CourseSockFragmentView().render_to_fragment(
request, course=self.course)
# entrance exam data
self._add_entrance_exam_to_context(courseware_context)
# staff masquerading data
if not check_course_open_for_learner(self.effective_user, self.course):
# Disable student view button if user is staff and
# course is not yet visible to students.
courseware_context['disable_student_access'] = True
courseware_context['supports_preview_menu'] = False
if self.section:
# chromeless data
if self.section.chrome:
chrome = [s.strip() for s in self.section.chrome.lower().split(",")]
if 'accordion' not in chrome:
courseware_context['disable_accordion'] = True
if 'tabs' not in chrome:
courseware_context['disable_tabs'] = True
# default tab
if self.section.default_tab:
courseware_context['default_tab'] = self.section.default_tab
# section data
courseware_context['section_title'] = self.section.display_name_with_default
section_context = self._create_section_context(
table_of_contents['previous_of_active_section'],
table_of_contents['next_of_active_section'],
)
courseware_context['fragment'] = self.section.render(STUDENT_VIEW, section_context)
if self.section.position and self.section.has_children:
display_items = self.section.get_display_items()
if display_items:
try:
courseware_context['sequence_title'] = display_items[self.section.position - 1] \
.display_name_with_default
except IndexError:
log.exception(
"IndexError loading courseware for user %s, course %s, section %s, position %d. Total items: %d. URL: %s",
self.real_user.username,
self.course.id,
self.section.display_name_with_default,
self.section.position,
len(display_items),
self.url,
)
raise
return courseware_context
def _add_entrance_exam_to_context(self, courseware_context):
"""
Adds entrance exam related information to the given context.
"""
if course_has_entrance_exam(self.course) and getattr(self.chapter, 'is_entrance_exam', False):
courseware_context['entrance_exam_passed'] = user_has_passed_entrance_exam(self.effective_user, self.course)
courseware_context['entrance_exam_current_score'] = get_entrance_exam_score_ratio(
CourseGradeFactory().read(self.effective_user, self.course),
get_entrance_exam_usage_key(self.course),
)
def _create_section_context(self, previous_of_active_section, next_of_active_section):
"""
Returns and creates the rendering context for the section.
"""
def _compute_section_url(section_info, requested_child):
"""
Returns the section URL for the given section_info with the given child parameter.
"""
return "{url}?child={requested_child}".format(
url=reverse(
'courseware_section',
args=[unicode(self.course_key), section_info['chapter_url_name'], section_info['url_name']],
),
requested_child=requested_child,
)
# NOTE (CCB): Pull the position from the URL for un-authenticated users. Otherwise, pull the saved
# state from the data store.
position = None if self.request.user.is_authenticated() else self.position
section_context = {
'activate_block_id': self.request.GET.get('activate_block_id'),
'requested_child': self.request.GET.get("child"),
'progress_url': reverse('progress', kwargs={'course_id': unicode(self.course_key)}),
'user_authenticated': self.request.user.is_authenticated(),
'position': position,
}
if previous_of_active_section:
section_context['prev_url'] = _compute_section_url(previous_of_active_section, 'last')
if next_of_active_section:
section_context['next_url'] = _compute_section_url(next_of_active_section, 'first')
# sections can hide data that masquerading staff should see when debugging issues with specific students
section_context['specific_masquerade'] = self._is_masquerading_as_specific_student()
return section_context
def render_accordion(request, course, table_of_contents):
"""
Returns the HTML that renders the navigation for the given course.
Expects the table_of_contents to have data on each chapter and section,
including which ones are active.
"""
context = dict(
[
('toc', table_of_contents),
('course_id', unicode(course.id)),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format),
] + TEMPLATE_IMPORTS.items()
)
return render_to_string('courseware/accordion.html', context)
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, child in enumerate(seq_module.get_display_items(), start=1):
if child.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule, course=None):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(
user,
request,
parent_descriptor,
field_data_cache,
current_module.location.course_key,
course=course
)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
| lduarte1991/edx-platform | lms/djangoapps/courseware/views/index.py | Python | agpl-3.0 | 23,622 | [
"VisIt"
] | 3a208cc52c8989eb909364497485cc2b38a88b2a8df1e90ea5ccaa707f293ab7 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
#
"""
Distance analysis --- :mod:`MDAnalysis.analysis.distances`
==========================================================
This module provides functions to rapidly compute distances between
atoms or groups of atoms.
:func:`dist` and :func:`between` can take atom groups that do not even
have to be from the same :class:`~MDAnalysis.core.universe.Universe`.
.. SeeAlso:: :mod:`MDAnalysis.lib.distances`
"""
from __future__ import absolute_import
__all__ = ['distance_array', 'self_distance_array',
'contact_matrix', 'dist', 'between']
import numpy as np
from MDAnalysis.lib.distances import distance_array, self_distance_array
from MDAnalysis.lib.c_distances import contact_matrix_no_pbc, contact_matrix_pbc
from MDAnalysis.lib.NeighborSearch import AtomNeighborSearch
import warnings
import logging
logger = logging.getLogger("MDAnalysis.analysis.distances")
try:
from scipy import sparse
except ImportError:
sparse = None
msg = "scipy.sparse could not be imported: some functionality will " \
"not be available in contact_matrix()"
warnings.warn(msg, category=ImportWarning)
logger.warn(msg)
del msg
def contact_matrix(coord, cutoff=15.0, returntype="numpy", box=None):
'''Calculates a matrix of contacts.
There is a fast, high-memory-usage version for small systems
(*returntype* = 'numpy'), and a slower, low-memory-usage version for
larger systems (*returntype* = 'sparse').
If *box* dimensions are passed then periodic boundary conditions
are applied.
Parameters
---------
coord : array
Array of coordinates of shape ``(N, 3)`` and dtype float32.
cutoff : float, optional, default 15
Particles within `cutoff` are considered to form a contact.
returntype : string, optional, default "numpy"
Select how the contact matrix is returned.
* ``"numpy"``: return as an ``(N. N)`` :class:`numpy.ndarray`
* ``"sparse"``: return as a :class:`scipy.sparse.lil_matrix`
box : array-like or ``None``, optional, default ``None``
Simulation cell dimensions in the form of
:attr:`MDAnalysis.trajectory.base.Timestep.dimensions` when
periodic boundary conditions should be taken into account for
the calculation of contacts.
Returns
-------
array or sparse matrix
The contact matrix is returned in a format determined by the `returntype`
keyword.
Note
----
:mod:`scipy.sparse` is require for using *sparse* matrices; if it cannot
be imported then an `ImportError` is raised.
See Also
--------
:mod:`MDAnalysis.analysis.contacts` for native contact analysis
.. versionchanged:: 0.11.0
Keyword *suppress_progmet* and *progress_meter_freq* were removed.
'''
if returntype == "numpy":
adj = (distance_array(coord, coord, box=box) < cutoff)
return adj
elif returntype == "sparse":
if sparse is None:
# hack: if we are running with minimal dependencies then scipy was
# not imported and we have to bail here (see scipy import at top)
raise ImportError("For sparse matrix functionality you need to "
"import scipy.")
# Initialize square List of Lists matrix of dimensions equal to number
# of coordinates passed
sparse_contacts = sparse.lil_matrix((len(coord), len(coord)), dtype='bool')
if box is not None:
# with PBC
contact_matrix_pbc(coord, sparse_contacts, box, cutoff)
else:
# without PBC
contact_matrix_no_pbc(coord, sparse_contacts, cutoff)
return sparse_contacts
def dist(A, B, offset=0):
"""Return distance between atoms in two atom groups.
The distance is calculated atom-wise. The residue ids are also
returned because a typical use case is to look at CA distances
before and after an alignment. Using the `offset` keyword one can
also add a constant offset to the resids which facilitates
comparison with PDB numbering.
Arguments
---------
A, B : AtomGroup
:class:`~MDAnalysis.core.groups.AtomGroup` with the
same number of atoms
offset : integer or tuple, optional, default 0
An integer `offset` is added to *resids_A* and *resids_B* (see
below) in order to produce PDB numbers.
If `offset` is :class:`tuple` then ``offset[0]`` is added to
*resids_A* and ``offset[1]`` to *resids_B*. Note that one can
actually supply numpy arrays of the same length as the atom
group so that an individual offset is added to each resid.
Returns
-------
resids_A : array
residue ids of the `A` group (possibly changed with `offset`)
resids_B : array
residue ids of the `B` group (possibly changed with `offset`)
distances : array
distances between the atoms
"""
if A.atoms.n_atoms != B.atoms.n_atoms:
raise ValueError("AtomGroups A and B do not have the same number of atoms")
try:
off_A, off_B = offset
except (TypeError, ValueError):
off_A = off_B = int(offset)
residues_A = np.array(A.resids) + off_A
residues_B = np.array(B.resids) + off_B
r = A.positions - B.positions
d = np.sqrt(np.sum(r * r, axis=1))
return np.array([residues_A, residues_B, d])
def between(group, A, B, distance):
"""Return sub group of `group` that is within `distance` of both `A` and `B`
This function is not aware of periodic boundary conditions.
Can be used to find bridging waters or molecules in an interface.
Similar to "*group* and (AROUND *A* *distance* and AROUND *B* *distance*)".
Parameters
----------
group : AtomGroup
Find members of `group` that are between `A` and `B`
A : AtomGroup
B : AtomGroup
`A` and `B` are the groups of atoms between which atoms in
`group` are searched for. The function works is more
efficient if `group` is bigger than either `A` or `B`.
distance : float
maximum distance for an atom to be counted as in the vicinity of
`A` or `B`
Returns
-------
AtomGroup
:class:`~MDAnalysis.core.groups.AtomGroup` of atoms that
fulfill the criterion
.. versionadded: 0.7.5
"""
ns_group = AtomNeighborSearch(group)
resA = set(ns_group.search(A, distance))
resB = set(ns_group.search(B, distance))
return sum(sorted(resB.intersection(resA)))
| kain88-de/mdanalysis | package/MDAnalysis/analysis/distances.py | Python | gpl-2.0 | 7,523 | [
"MDAnalysis"
] | 6bbf768b2b6183ee4c2cfc8956e6f00cea1cfb76e20190ee34d7ce14093b17e1 |
""" The FileCatalogClient is a class representing the client of the DIRAC File Catalog
"""
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.TransferClient import TransferClient
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOMSAttributeForGroup, getDNForUsername
from DIRAC.Resources.Catalog.Utilities import checkCatalogArguments
from DIRAC.Resources.Catalog.FileCatalogClientBase import FileCatalogClientBase
__RCSID__ = "$Id$"
class FileCatalogClient(FileCatalogClientBase):
""" Client code to the DIRAC File Catalogue
"""
# The list of methods below is defining the client interface
READ_METHODS = FileCatalogClientBase.READ_METHODS + \
['isFile', 'getFileMetadata',
'getReplicas', 'getReplicaStatus', 'getFileSize', 'isDirectory', 'getDirectoryReplicas',
'listDirectory', 'getDirectoryMetadata', 'getDirectorySize', 'getDirectoryContents',
'getLFNForPFN', 'getLFNForGUID', 'findFilesByMetadata', 'getMetadataFields',
'findDirectoriesByMetadata', 'getReplicasByMetadata', 'findFilesByMetadataDetailed',
'findFilesByMetadataWeb', 'getCompatibleMetadata', 'getMetadataSet', 'getDatasets',
'getFileDescendents', 'getFileAncestors', 'getDirectoryUserMetadata', 'getFileUserMetadata',
'checkDataset', 'getDatasetParameters', 'getDatasetFiles', 'getDatasetAnnotation']
WRITE_METHODS = [
'createLink',
'removeLink',
'addFile',
'addFileAncestors',
'setFileStatus',
'addReplica',
'removeReplica',
'removeFile',
'setReplicaStatus',
'setReplicaHost',
'setReplicaProblematic',
'createDirectory',
'setDirectoryStatus',
'removeDirectory',
'changePathMode',
'changePathOwner',
'changePathGroup',
'addMetadataField',
'deleteMetadataField',
'setMetadata',
'setMetadataBulk',
'removeMetadata',
'addMetadataSet',
'addDataset',
'addDatasetAnnotation',
'removeDataset',
'updateDataset',
'freezeDataset',
'releaseDataset',
'addUser',
'deleteUser',
'addGroup',
'deleteGroup',
'repairCatalog',
'rebuildDirectoryUsage']
NO_LFN_METHODS = [
'findFilesByMetadata',
'addMetadataField',
'deleteMetadataField',
'getMetadataFields',
'setMetadata',
'setMetadataBulk',
'removeMetadata',
'getDirectoryUserMetadata',
'findDirectoriesByMetadata',
'getReplicasByMetadata',
'findFilesByMetadataDetailed',
'findFilesByMetadataWeb',
'getCompatibleMetadata',
'addMetadataSet',
'getMetadataSet',
'getFileUserMetadata',
'getLFNForGUID',
'addUser',
'deleteUser',
'addGroup',
'deleteGroup',
'repairCatalog',
'rebuildDirectoryUsage']
ADMIN_METHODS = ['addUser', 'deleteUser', 'addGroup', 'deleteGroup', 'getUsers', 'getGroups',
'getCatalogCounters', 'repairCatalog', 'rebuildDirectoryUsage']
def __init__(self, url=None, **kwargs):
""" Constructor function.
"""
self.serverURL = 'DataManagement/FileCatalog' if not url else url
super(FileCatalogClient, self).__init__(self.serverURL, **kwargs)
##################################################################################
#
##################################################################################
@checkCatalogArguments
def getReplicas(self, lfns, allStatus=False, timeout=120):
""" Get the replicas of the given files
"""
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.getReplicas(lfns, allStatus)
if not result['OK']:
return result
vo = getVOfromProxyGroup().get('Value', None)
lfnDict = result['Value']
seDict = result['Value'].get('SEPrefixes', {})
for lfn in lfnDict['Successful']:
for se in lfnDict['Successful'][lfn]:
if not lfnDict['Successful'][lfn][se]:
# The PFN was not returned, construct it on the fly
# For some VO's the prefix can be non-standard
voPrefix = seDict.get("VOPrefix", {}).get(se, {}).get(vo)
sePrefix = seDict.get(se, '')
prefix = voPrefix if voPrefix else sePrefix
lfnDict['Successful'][lfn][se] = prefix + lfn
return S_OK(lfnDict)
@checkCatalogArguments
def setReplicaProblematic(self, lfns, revert=False):
"""
Set replicas to problematic.
:param lfn lfns: has to be formated this way :
{ lfn : { se1 : pfn1, se2 : pfn2, ...}, ...}
:param revert: If True, remove the problematic flag
:return: { successful : { lfn : [ ses ] } : failed : { lfn : { se : msg } } }
"""
# This method does a batch treatment because the setReplicaStatus can only take one replica per lfn at once
#
# Illustration :
#
# lfns {'L2': {'S1': 'P3'}, 'L3': {'S3': 'P5', 'S2': 'P4', 'S4': 'P6'}, 'L1': {'S2': 'P2', 'S1': 'P1'}}
#
# loop1: lfnSEs {'L2': ['S1'], 'L3': ['S3', 'S2', 'S4'], 'L1': ['S2', 'S1']}
# loop1 : batch {'L2': {'Status': 'P', 'SE': 'S1', 'PFN': 'P3'},
# 'L3': {'Status': 'P', 'SE': 'S4', 'PFN': 'P6'},
# 'L1': {'Status': 'P', 'SE': 'S1', 'PFN': 'P1'}}
#
# loop2: lfnSEs {'L2': [], 'L3': ['S3', 'S2'], 'L1': ['S2']}
# loop2 : batch {'L3': {'Status': 'P', 'SE': 'S2', 'PFN': 'P4'}, 'L1': {'Status': 'P', 'SE': 'S2', 'PFN': 'P2'}}
#
# loop3: lfnSEs {'L3': ['S3'], 'L1': []}
# loop3 : batch {'L3': {'Status': 'P', 'SE': 'S3', 'PFN': 'P5'}}
#
# loop4: lfnSEs {'L3': []}
# loop4 : batch {}
successful = {}
failed = {}
status = 'AprioriGood' if revert else 'Trash'
# { lfn : [ se1, se2, ...], ...}
lfnsSEs = dict((lfn, [se for se in lfns[lfn]]) for lfn in lfns)
while lfnsSEs:
# { lfn : { 'SE' : se1, 'PFN' : pfn1, 'Status' : status }, ... }
batch = {}
for lfn in lfnsSEs.keys():
# If there are still some Replicas (SE) for the given LFN, we put it in the next batch
# else we remove the entry from the lfnsSEs dict
if lfnsSEs[lfn]:
se = lfnsSEs[lfn].pop()
batch[lfn] = {'SE': se, 'PFN': lfns[lfn][se], 'Status': status}
else:
del lfnsSEs[lfn]
# Happens when there is nothing to treat anymore
if not batch:
break
res = self.setReplicaStatus(batch)
if not res['OK']:
for lfn in batch:
failed.setdefault(lfn, {})[batch[lfn]['SE']] = res['Message']
continue
for lfn in res['Value']['Failed']:
failed.setdefault(lfn, {})[batch[lfn]['SE']] = res['Value']['Failed'][lfn]
for lfn in res['Value']['Successful']:
successful.setdefault(lfn, []).append(batch[lfn]['SE'])
return S_OK({'Successful': successful, 'Failed': failed})
@checkCatalogArguments
def listDirectory(self, lfn, verbose=False, timeout=120):
""" List the given directory's contents
"""
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.listDirectory(lfn, verbose)
if not result['OK']:
return result
# Force returned directory entries to be LFNs
for entryType in ['Files', 'SubDirs', 'Links']:
for path in result['Value']['Successful']:
entryDict = result['Value']['Successful'][path][entryType]
for fname in entryDict.keys():
detailsDict = entryDict.pop(fname)
lfn = os.path.join(path, os.path.basename(fname))
entryDict[lfn] = detailsDict
return result
@checkCatalogArguments
def getDirectoryMetadata(self, lfns, timeout=120):
''' Get standard directory metadata
'''
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.getDirectoryMetadata(lfns)
if not result['OK']:
return result
# Add some useful fields
for path in result['Value']['Successful']:
owner = result['Value']['Successful'][path]['Owner']
group = result['Value']['Successful'][path]['OwnerGroup']
res = getDNForUsername(owner)
if res['OK']:
result['Value']['Successful'][path]['OwnerDN'] = res['Value'][0]
else:
result['Value']['Successful'][path]['OwnerDN'] = ''
result['Value']['Successful'][path]['OwnerRole'] = getVOMSAttributeForGroup(group)
return result
@checkCatalogArguments
def removeDirectory(self, lfn, recursive=False, timeout=120):
""" Remove the directory from the File Catalog. The recursive keyword is for the ineterface.
"""
rpcClient = self._getRPC(timeout=timeout)
return rpcClient.removeDirectory(lfn)
@checkCatalogArguments
def getDirectoryReplicas(self, lfns, allStatus=False, timeout=120):
""" Find all the given directories' replicas
"""
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.getDirectoryReplicas(lfns, allStatus)
if not result['OK']:
return result
seDict = result['Value'].get('SEPrefixes', {})
for path in result['Value']['Successful']:
pathDict = result['Value']['Successful'][path]
for fname in pathDict.keys():
detailsDict = pathDict.pop(fname)
lfn = '%s/%s' % (path, os.path.basename(fname))
for se in detailsDict:
if not detailsDict[se] and se in seDict:
detailsDict[se] = seDict[se] + lfn
pathDict[lfn] = detailsDict
return result
def findFilesByMetadata(self, metaDict, path='/', timeout=120):
""" Find files given the meta data query and the path
"""
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.findFilesByMetadata(metaDict, path)
if not result['OK']:
return result
if isinstance(result['Value'], list):
return result
elif isinstance(result['Value'], dict):
# Process into the lfn list
fileList = []
for dir_, fList in result['Value'].items():
for fi in fList:
fileList.append(dir_ + '/' + fi)
result['Value'] = fileList
return result
else:
return S_ERROR('Illegal return value type %s' % type(result['Value']))
def getFileUserMetadata(self, path, timeout=120):
"""Get the meta data attached to a file, but also to
the its corresponding directory
"""
directory = "/".join(path.split("/")[:-1])
rpcClient = self._getRPC(timeout=timeout)
result = rpcClient.getFileUserMetadata(path)
if not result['OK']:
return result
fmeta = result['Value']
result = rpcClient.getDirectoryUserMetadata(directory)
if not result['OK']:
return result
fmeta.update(result['Value'])
return S_OK(fmeta)
########################################################################
# Path operations (not updated)
#
@checkCatalogArguments
def changePathOwner(self, lfns, recursive=False, timeout=120):
""" Get replica info for the given list of LFNs
"""
return self._getRPC(timeout=timeout).changePathOwner(lfns, recursive)
@checkCatalogArguments
def changePathGroup(self, lfns, recursive=False, timeout=120):
""" Get replica info for the given list of LFNs
"""
return self._getRPC(timeout=timeout).changePathGroup(lfns, recursive)
@checkCatalogArguments
def changePathMode(self, lfns, recursive=False, timeout=120):
""" Get replica info for the given list of LFNs
"""
return self._getRPC(timeout=timeout).changePathMode(lfns, recursive)
########################################################################
# ACL Operations
#
@checkCatalogArguments
def getPathPermissions(self, lfns, timeout=120):
""" Determine the ACL information for a supplied path
"""
return self._getRPC(timeout=timeout).getPathPermissions(lfns)
@checkCatalogArguments
def hasAccess(self, paths, opType, timeout=120):
""" Determine if the given op can be performed on the paths
The OpType is all the operations exported
"""
return self._getRPC(timeout=timeout).hasAccess(paths, opType)
###################################################################
#
# User/Group write operations
#
def addUser(self, userName, timeout=120):
""" Add a new user to the File Catalog """
return self._getRPC(timeout=timeout).addUser(userName)
def deleteUser(self, userName, timeout=120):
""" Delete user from the File Catalog """
return self._getRPC(timeout=timeout).deleteUser(userName)
def addGroup(self, groupName, timeout=120):
""" Add a new group to the File Catalog """
return self._getRPC(timeout=timeout).addGroup(groupName)
def deleteGroup(self, groupName, timeout=120):
""" Delete group from the File Catalog """
return self._getRPC(timeout=timeout).deleteGroup(groupName)
###################################################################
#
# User/Group read operations
#
def getUsers(self, timeout=120):
""" Get all the users defined in the File Catalog """
return self._getRPC(timeout=timeout).getUsers()
def getGroups(self, timeout=120):
""" Get all the groups defined in the File Catalog """
return self._getRPC(timeout=timeout).getGroups()
########################################################################
#
# Path read operations
#
@checkCatalogArguments
def exists(self, lfns, timeout=120):
""" Check whether the supplied paths exists """
return self._getRPC(timeout=timeout).exists(lfns)
########################################################################
#
# File write operations
#
@checkCatalogArguments
def addFile(self, lfns, timeout=120):
""" Register supplied files """
return self._getRPC(timeout=timeout).addFile(lfns)
@checkCatalogArguments
def removeFile(self, lfns, timeout=120):
""" Remove the supplied lfns """
return self._getRPC(timeout=timeout).removeFile(lfns)
@checkCatalogArguments
def setFileStatus(self, lfns, timeout=120):
""" Remove the supplied lfns """
return self._getRPC(timeout=timeout).setFileStatus(lfns)
@checkCatalogArguments
def addReplica(self, lfns, timeout=120):
""" Register supplied replicas """
return self._getRPC(timeout=timeout).addReplica(lfns)
@checkCatalogArguments
def removeReplica(self, lfns, timeout=120):
""" Remove the supplied replicas """
return self._getRPC(timeout=timeout).removeReplica(lfns)
@checkCatalogArguments
def setReplicaStatus(self, lfns, timeout=120):
""" Set the status for the supplied replicas """
return self._getRPC(timeout=timeout).setReplicaStatus(lfns)
@checkCatalogArguments
def setReplicaHost(self, lfns, timeout=120):
""" Change the registered SE for the supplied replicas """
return self._getRPC(timeout=timeout).setReplicaHost(lfns)
@checkCatalogArguments
def addFileAncestors(self, lfns, timeout=120):
"""Add file ancestor information for the given dict of LFNs.
:param dict lfns: {lfn1: {'Ancestor': [ancestorLFNs]}, lfn2: {'Ancestors': ...}}
"""
return self._getRPC(timeout=timeout).addFileAncestors(lfns)
########################################################################
#
# File read operations
#
@checkCatalogArguments
def isFile(self, lfns, timeout=120):
""" Check whether the supplied lfns are files """
return self._getRPC(timeout=timeout).isFile(lfns)
@checkCatalogArguments
def getFileSize(self, lfns, timeout=120):
""" Get the size associated to supplied lfns """
return self._getRPC(timeout=timeout).getFileSize(lfns)
@checkCatalogArguments
def getFileMetadata(self, lfns, timeout=120):
""" Get the metadata associated to supplied lfns """
return self._getRPC(timeout=timeout).getFileMetadata(lfns)
@checkCatalogArguments
def getReplicaStatus(self, lfns, timeout=120):
""" Get the status for the supplied replicas """
return self._getRPC(timeout=timeout).getReplicaStatus(lfns)
@checkCatalogArguments
def getFileAncestors(self, lfns, depths, timeout=120):
""" Get the status for the supplied replicas """
return self._getRPC(timeout=timeout).getFileAncestors(lfns, depths)
@checkCatalogArguments
def getFileDescendents(self, lfns, depths, timeout=120):
""" Get the status for the supplied replicas """
return self._getRPC(timeout=timeout).getFileDescendents(lfns, depths)
def getLFNForGUID(self, guids, timeout=120):
"""Get the matching lfns for given guids"""
return self._getRPC(timeout=timeout).getLFNForGUID(guids)
########################################################################
#
# Directory write operations
#
@checkCatalogArguments
def createDirectory(self, lfns, timeout=120):
""" Create the supplied directories """
return self._getRPC(timeout=timeout).createDirectory(lfns)
########################################################################
#
# Directory read operations
#
@checkCatalogArguments
def isDirectory(self, lfns, timeout=120):
""" Determine whether supplied path is a directory """
return self._getRPC(timeout=timeout).isDirectory(lfns)
@checkCatalogArguments
def getDirectorySize(self, lfns, longOut=False, fromFiles=False, timeout=120):
""" Get the size of the supplied directory """
return self._getRPC(timeout=timeout).getDirectorySize(lfns, longOut, fromFiles)
########################################################################
#
# Administrative database operations
#
def getCatalogCounters(self, timeout=120):
""" Get the number of registered directories, files and replicas in various tables """
return self._getRPC(timeout=timeout).getCatalogCounters()
def rebuildDirectoryUsage(self, timeout=120):
""" Rebuild DirectoryUsage table from scratch """
return self._getRPC(timeout=timeout).rebuildDirectoryUsage()
def repairCatalog(self, timeout=120):
""" Repair the catalog inconsistencies """
return self._getRPC(timeout=timeout).repairCatalog()
########################################################################
# Metadata Catalog Operations
#
def addMetadataField(self, fieldName, fieldType, metaType='-d', timeout=120):
""" Add a new metadata field of the given type
"""
return self._getRPC(timeout=timeout).addMetadataField(fieldName, fieldType, metaType)
def deleteMetadataField(self, fieldName, timeout=120):
""" Delete the metadata field
"""
return self._getRPC(timeout=timeout).deleteMetadataField(fieldName)
def getMetadataFields(self, timeout=120):
""" Get all the metadata fields
"""
return self._getRPC(timeout=timeout).getMetadataFields()
def setMetadata(self, path, metadatadict, timeout=120):
""" Set metadata parameter for the given path
"""
return self._getRPC(timeout=timeout).setMetadata(path, metadatadict)
def setMetadataBulk(self, pathMetadataDict, timeout=120):
""" Set metadata parameter for the given path
"""
return self._getRPC(timeout=timeout).setMetadataBulk(pathMetadataDict)
def removeMetadata(self, pathMetadataDict, timeout=120):
""" Remove the specified metadata for the given path
"""
return self._getRPC(timeout=timeout).removeMetadata(pathMetadataDict)
def getDirectoryUserMetadata(self, path, timeout=120):
""" Get all the metadata valid for the given directory path
"""
return self._getRPC(timeout=timeout).getDirectoryUserMetadata(path)
def findDirectoriesByMetadata(self, metaDict, path='/', timeout=120):
""" Find all the directories satisfying the given metadata set
"""
return self._getRPC(timeout=timeout).findDirectoriesByMetadata(metaDict, path)
def getReplicasByMetadata(self, metaDict, path='/', allStatus=False, timeout=120):
""" Find all the files satisfying the given metadata set
"""
return self._getRPC(timeout=timeout).getReplicasByMetadata(metaDict, path, allStatus)
def findFilesByMetadataDetailed(self, metaDict, path='/', timeout=120):
""" Find all the files satisfying the given metadata set
"""
return self._getRPC(timeout=timeout).findFilesByMetadataDetailed(metaDict, path)
def findFilesByMetadataWeb(self, metaDict, path, startItem, maxItems, timeout=120):
""" Find files satisfying the given metadata set
"""
return self._getRPC(timeout=timeout).findFilesByMetadataWeb(metaDict, path, startItem, maxItems)
def getCompatibleMetadata(self, metaDict, path='/', timeout=120):
""" Get metadata values compatible with the given metadata subset
"""
return self._getRPC(timeout=timeout).getCompatibleMetadata(metaDict, path)
def addMetadataSet(self, setName, setDict, timeout=120):
""" Add a new metadata set
"""
return self._getRPC(timeout=timeout).addMetadataSet(setName, setDict)
def getMetadataSet(self, setName, expandFlag, timeout=120):
""" Add a new metadata set
"""
return self._getRPC(timeout=timeout).getMetadataSet(setName, expandFlag)
#########################################################################################
#
# Dataset manipulation methods
#
@checkCatalogArguments
def addDataset(self, datasets, timeout=120):
""" Add a new dynamic dataset defined by its meta query
"""
return self._getRPC(timeout=timeout).addDataset(datasets)
@checkCatalogArguments
def addDatasetAnnotation(self, datasetDict, timeout=120):
""" Add annotation to an already created dataset
"""
return self._getRPC(timeout=timeout).addDatasetAnnotation(datasetDict)
@checkCatalogArguments
def removeDataset(self, datasets, timeout=120):
""" Check the given dynamic dataset for changes since its definition
"""
return self._getRPC(timeout=timeout).removeDataset(datasets)
@checkCatalogArguments
def checkDataset(self, datasets, timeout=120):
""" Check the given dynamic dataset for changes since its definition
"""
return self._getRPC(timeout=timeout).checkDataset(datasets)
@checkCatalogArguments
def updateDataset(self, datasets, timeout=120):
""" Update the given dynamic dataset for changes since its definition
"""
return self._getRPC(timeout=timeout).updateDataset(datasets)
@checkCatalogArguments
def getDatasets(self, datasets, timeout=120):
""" Get parameters of the given dynamic dataset as they are stored in the database
"""
return self._getRPC(timeout=timeout).getDatasets(datasets)
@checkCatalogArguments
def getDatasetParameters(self, datasets, timeout=120):
""" Get parameters of the given dynamic dataset as they are stored in the database
"""
return self._getRPC(timeout=timeout).getDatasetParameters(datasets)
@checkCatalogArguments
def getDatasetAnnotation(self, datasets, timeout=120):
""" Get annotation of the given datasets
"""
return self._getRPC(timeout=timeout).getDatasetAnnotation(datasets)
@checkCatalogArguments
def freezeDataset(self, datasets, timeout=120):
""" Freeze the contents of the dataset making it effectively static
"""
return self._getRPC(timeout=timeout).freezeDataset(datasets)
@checkCatalogArguments
def releaseDataset(self, datasets, timeout=120):
""" Release the contents of the frozen dataset allowing changes in its contents
"""
return self._getRPC(timeout=timeout).releaseDataset(datasets)
@checkCatalogArguments
def getDatasetFiles(self, datasets, timeout=120):
""" Get lfns in the given dataset
two lines !
"""
return self._getRPC(timeout=timeout).getDatasetFiles(datasets)
#############################################################################
def getSEDump(self, seName, outputFilename):
"""
Dump the content of an SE in the given file.
The file contains a list of [lfn,checksum,size] dumped as csv,
separated by '|'
:param seName: name of the StorageElement
:param outputFilename: path to the file where to dump it
:returns: result from the TransferClient
"""
dfc = TransferClient(self.serverURL)
return dfc.receiveFile(outputFilename, seName)
| fstagni/DIRAC | Resources/Catalog/FileCatalogClient.py | Python | gpl-3.0 | 24,110 | [
"DIRAC"
] | 7db9c339d5010fcd9ce655d50d8fa2b5be87f8cf5a25a0325013672f6fb97bc9 |
import openbabel as ob
import pybel
import sys
import logging
import os
import subprocess
import time
import sqlite3
import numpy as np
np.set_printoptions(threshold=np.inf, linewidth=150)
from GaussianHelper import *
from collections import deque, defaultdict
from seam_ts_search import *
def printMol(mol,fileFormat = "gjf", keywords = None, printOut = False):
conv = ob.OBConversion()
conv.SetOutFormat(fileFormat)
if printOut:
logging.info("printing the molecule")
logging.info(conv.WriteString(mol, True))
if keywords is not None:
conv.AddOption("k", ob.OBConversion.OUTOPTIONS, keywords)
if fileFormat == 'svg':
conv.AddOption("C", ob.OBConversion.OUTOPTIONS)
tmpMol = ob.OBMol(mol)
tmpMol.DeleteHydrogens()
return conv.WriteString(tmpMol, True)
elif fileFormat == 'gjf':
conv.AddOption("b", ob.OBConversion.OUTOPTIONS)
return conv.WriteString(mol, True)
def printAtom(atom):
logging.debug('atom index {}, atomic number {}'.format(atom.GetIdx(), atom.GetAtomicNum()))
def printBond(bond):
atom1 = bond.GetBeginAtom()
atom2 = bond.GetEndAtom()
logging.debug('bond index {} - {}, atomic number {} - {}'.format(atom1.GetIdx(), atom2.GetIdx(), atom1.GetAtomicNum(), atom2.GetAtomicNum()))
def getCanonicalSmiles(mol):
conv = ob.OBConversion()
conv.SetOutFormat("can")
result = conv.WriteString(mol, True)
return '.'.join(sorted(result.split('.')))
def strToMol(type, s):
if s is None:
print("string is None in strToMol")
conv = ob.OBConversion()
conv.SetInFormat(type)
mol = ob.OBMol()
success = conv.ReadString(mol,s)
if success:
return mol
else:
logging.error("converting failure from {} to molecule".format(type))
raise SmilesError("Failed to convert {} to molecule".format(type))
def smilesToFilename(smiles):
fileName = ''
for c in smiles:
if c == '/':
fileName += 'z'
continue
if c == '\\':
fileName += 'x'
continue
if c == '#':
fileName += '^'
continue
# if c == '(' or c == ')':
# fileName += '\\'
fileName += c
return fileName
def smilesToSysCall(smiles):
fileName = smilesToFilename(smiles)
call = ''
for c in fileName:
if c == '(' or c == ')' or c == '$':
call += '\\'
call += c
return call
def numValenceElectron(atomicNumber):
if atomicNumber <= 2:
return atomicNumber
elif atomicNumber <= 10:
return atomicNumber - 2
elif atomicNumber <= 18:
return atomicNumber - 10
elif atomicNumber <= 30:
return atomicNumber - 18
elif atomicNumber <= 36:
return atomicNumber - 28
elif atomicNumber <= 48:
return atomicNumber - 36
elif atomicNumber <= 54:
return atomicNumber - 46
else:
print('Atomic number not supported in calculating the number of valence electrons. Either it is from the 6th row and below or it is an invalid number')
return 0
def atomTotalBondOrder(atom):
nBonds = 0
for bond in ob.OBAtomBondIter(atom):
nBonds += bond.GetBondOrder()
return nBonds
def molToMat(mol):
n = mol.NumAtoms()
mat = np.array([[0 for _i in range(n+1)] for _j in range(n+3)])
for i in range(1, n+1):
mat[i][0] = mol.GetAtom(i).GetAtomicNum()
mat[0][i] = mat[i][0]
for atom in ob.OBMolAtomIter(mol):
i = atom.GetIdx()
nBonds = 0
for bond in ob.OBAtomBondIter(atom):
nBonds += bond.GetBondOrder()
nonBondingElecs = numValenceElectron(atom.GetAtomicNum()) - nBonds - atom.GetFormalCharge()
mat[i][i] = nonBondingElecs
mat[n+1][i] = nBonds
mat[n+2][i] = atom.GetFormalCharge()
for bond in ob.OBMolBondIter(mol):
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
mat[i][j] = bond.GetBondOrder()
mat[j][i] = mat[i][j]
return mat
def matToMol(mat):
n = len(mat) - 3
mol = ob.OBMol()
mol.BeginModify()
for i in range(1, n+1):
mol.NewAtom(i)
atom = mol.GetAtom(i)
atom.SetAtomicNum(mat[i][0])
atom.SetFormalCharge(mat[n+2][i])
for i in range(1, n+1):
for j in range(1, i):
if mat[i][j] != 0:
mol.AddBond(i, j, mat[i][j])
return mol
def separateFragments(pymol):
mol = pymol.OBMol
nAtom = len(pymol.atoms)
unvisited = set(range(1, nAtom+1))
q = deque([1])
fragments = [set()]
while q or unvisited:
if q:
curr = q.popleft()
unvisited.remove(curr)
fragments[-1].add(curr)
else:
curr = unvisited.pop()
fragments.append(set([curr]))
atom = mol.GetAtom(curr)
for nbr in ob.OBAtomAtomIter(atom):
nbrNum = nbr.GetIdx()
if nbrNum in unvisited:
q.append(nbrNum)
coords = []
for atom in pymol:
coords.append(list(atom.coords))
nFragments = len(fragments)
delta = -(nFragments-1) * 1.5
for fragment in fragments:
for atomIdx in fragment:
x, y, z = coords[atomIdx-1]
coords[atomIdx-1] = [x + delta, y + delta, z + delta]
delta += 5.0
coords = [item for sublist in coords for item in sublist ]
c_coords = ob.double_array(coords)
mol.SetCoordinates(c_coords)
class EnergyReadingError(Exception):
def __init__(self, value):
self.message = value
def __str__(self):
return repr(self.message)
class SmilesError(Exception):
def __init__(self, value):
self.message = value
def __str__(self):
return repr(self.message)
class ReactionGraphEdge:
def __init__(self, fromNode, node, eSources, eTargets):
self.fromNode = fromNode
self.node = node
self.eSources = list(eSources)
self.eTargets = list(eTargets)
self.ts = None
self.tsEnergy = 0.0
self.onPath = False
class ReactionGraphNode:
def __init__(self, mol = None, smiles = None, molStringFormat = "smi", depth = None):
if mol is not None:
self.mol = ob.OBMol(mol)
self.smiles = getCanonicalSmiles(mol)
elif smiles is not None:
self.mol = strToMol('smi', smiles)
self.smiles = smiles
else:
logging.warning("a molecule is needed to create a ReactionGraphNode")
sys.exit()
self.neighbors = {}
self.depths = []
self.energy = 0.0
self.onPath = False
if depth is not None:
self.depths.append(depth)
class ReactionRoute:
def __init__(self, reactantString = None, productString = None, inputJson = None):
# An equality holds for all atom: total bond order + # of non-bonding electrons = # of valence electrons + formal charge
# This gives rise to the following rules for each atom. Atomic number determines the number of valence electrons, formal charge is formal charge, then there is total bond order. Once these three is fixed, the Luis structure is determined.
self._allowedCoordNum = {(1,-1):[],
(1,0):[1],
(1,1):[0],
(3,0):[3],
(3,1):[],
(4,0):[2],
(5,-1):[2],
(5,0):[3],
(5,1):[4],
# (6,-1):[3],
(6,0):[4], # there is a bug in smiles about carbene, so we are not allowing carbene here.
(6,1):[3],
(7,-1):[2],
(7,0):[3],
(7,1):[4],
(8,-1):[1],
(8,0):[2],
(8,1):[3],
(9,-1):[0],
(9,0):[1],
(11,1):[],
(12,0):[2],
(13,0):[3],
(14,0):[4],
(15,0):[3,5],
(15,1):[4],
(16,0):[2,3],
(17,-1):[0],
(17,0):[1],
(17,1):[],
(35,0):[1],
(35,-1):[0],
(35,1):[2]}
self._minFc = {}
for pair, tboList in self._allowedCoordNum.items():
if tboList != []:
if pair[0] not in self._minFc:
self._minFc[pair[0]] = pair[1]
else:
self._minFc[pair[0]] = min(self._minFc[pair[0]], pair[1])
self._outputLevel = 2
self._maxStep = 3
self._maxExtraStep = 1
self._doCalculation = False
self._structureScreen = True
self._energyScreen = True
self._intermediateThresh = 200.0
self._gaussianKeywords = "# pm6 3-21g opt"
self._doTsSearch = False
self._tsThresh = 200.0
self._gaussianTsKeywords = '# pm6 3-21g opt=(ts,noeigen,calcfc,maxcyc=100)'
self._energyBaseLine = 0.0
self.ignoreList = set()
self.activeList = set()
self._invalidStructures = set()
self._reactantString = reactantString
self._productString = productString
self._targetLeastStep = 100
self._targetFound = False
self._reactionMap = {}
self._energyMap = {}
self._fragmentEnergyMap = {}
# self._brokenBonds = []
# self._createdBonds = []
self._gsub = False
self._save = True
self._pathOnly = True
self._preEnergyScreen = False
self._matrixForm = True
self._filterFc = True
self._noProduct = False
if inputJson is not None:
self.inputJson(inputJson)
def inputJson(self, inputJson):
import json
params = json.loads(inputJson)
if 'reactant' in params:
self._reactantString = params['reactant']
if 'product' in params:
self._productString = params['product']
if 'maxStep' in params:
self._maxStep = params['maxStep']
if 'maxExtraStep' in params:
self._maxExtraStep = params['maxExtraStep']
if 'doCalculation' in params:
self._doCalculation = params['doCalculation']
if 'structureScreen' in params:
self._structureScreen = params['structureScreen']
if 'energyScreen' in params:
self._energyScreen = params['energyScreen']
if 'intermediateThresh' in params:
self._intermediateThresh = params['intermediateThresh']
if 'gaussianKeywords' in params:
self._gaussianKeywords = params['gaussianKeywords']
if 'doTsSearch' in params:
self._doTsSearch = params['doTsSearch']
if 'tsThresh' in params:
self._tsThresh = params['tsThresh']
if 'gaussianTsKeywords' in params:
self._gaussianTsKeywords = params['gaussianTsKeywords']
if 'ignoreList' in params:
self.ignoreList = set(params['ignoreList'])
if 'activeList' in params:
self.activeList = set(params['activeList'])
if 'gsub' in params:
self._gsub = params['gsub']
if 'outputLevel' in params:
self._outputLevel = params['outputLevel']
if 'save' in params:
self._save = params['save']
if 'pathOnly' in params:
self._pathOnly = params['pathOnly']
if 'preEnergyScreen' in params:
self._preEnergyScreen = params['preEnergyScreen']
if 'matrixForm' in params:
self._matrixForm = params['matrixForm']
def canBreakOrFormBond(self, atom, breakOrForm, nElec):
# Decide if an atom can break or form bond in a certain way (get or lose certain number of electrons)
formalCharge = atom.GetFormalCharge()
atomicNum = atom.GetAtomicNum()
nBonds = 0
for bond in ob.OBAtomBondIter(atom):
nBonds += bond.GetBondOrder()
if breakOrForm.lower() == "break":
nBondChange = -1
elif breakOrForm.lower() == "form":
nBondChange = 1
try:
if nBonds + nBondChange in self._allowedCoordNum[(atomicNum, formalCharge+nBondChange*(nElec-1))]:
return 1
else:
return 0
except KeyError:
return 0
def checkLuisRule(self, *args, **kwargs):
for arg in args:
if type(arg) is int:
atom = kwargs['mol'].GetAtom(arg)
pair = (atom.GetAtomicNum(), atom.GetFormalCharge())
if pair not in self._allowedCoordNum or atomTotalBondOrder(atom) not in self._allowedCoordNum[pair]:
return False
elif type(arg) is tuple:
if not self.checkLuisRule(*arg, mol=kwargs['mol']):
return False
elif type(arg) is ob.OBMol:
for atom in ob.OBMolAtomIter(arg):
if not self.checkLuisRule(atom, mol=arg):
return False
elif type(arg) is ob.OBBond:
if not self.checkLuisRule(arg.GetBeginAtom()) or not self.checkLuisRule(arg.GetEndAtom()):
return False
elif type(arg) is ob.OBAtom:
pair = (arg.GetAtomicNum(), arg.GetFormalCharge())
if pair not in self._allowedCoordNum or atomTotalBondOrder(atom) not in self._allowedCoordNum[pair]:
return False
return True
def obeyLuisRule(self, atom, nBondChange, nElectronChange):
# Decide if an atom can break or form bond in a certain way (get or lose certain number of electrons)
formalCharge = atom.GetFormalCharge()
atomicNum = atom.GetAtomicNum()
nBonds = atomTotalBondOrder(atom)
if abs(nElectronChange) == 2:
formalChargeChange = -nElectronChange / 2
try:
if nBonds + nBondChange in self._allowedCoordNum[(atomicNum, formalCharge + formalChargeChange)]:
return 1
else:
return 0
except KeyError:
return 0
# def createNewBond(self, mol, atom1, atom2, elecFromAtom1, elecFromAtom2):
# # Create a bond in the searching process. Keeps track of bond order, formal charge, self._createdBonds and self._brokenBonds
# bond = atom1.GetBond(atom2)
# mol.BeginModify()
# if bond == None:
# bondOrder = 0
# bond = mol.NewBond()
# bond.SetBegin(atom1)
# bond.SetEnd(atom2)
# bond.SetBondOrder(1)
# atom1.AddBond(bond)
# atom2.AddBond(bond)
# else:
# bondOrder = bond.GetBondOrder()
# bond.SetBondOrder(bond.GetBondOrder()+1)
# if elecFromAtom1 == 0 and elecFromAtom2 == 2:
# atom1.SetFormalCharge(atom1.GetFormalCharge()-1)
# atom2.SetFormalCharge(atom2.GetFormalCharge()+1)
# elif elecFromAtom1 == 2 and elecFromAtom2 == 0:
# atom1.SetFormalCharge(atom1.GetFormalCharge()+1)
# atom2.SetFormalCharge(atom2.GetFormalCharge()-1)
# mol.EndModify()
# if (atom1.GetIdx(), atom2.GetIdx(), elecFromAtom1, elecFromAtom2, bondOrder+1) not in self._brokenBonds:
# # If the bond has been broken before, this is just restoring it, so the corresponding record in self._brokenBonds will be deleted and no new record is added.
# self._createdBonds.append((atom1.GetIdx(), atom2.GetIdx(), elecFromAtom1, elecFromAtom2, bondOrder))
# logging.debug("adding ({}, {}, {}, {}, {}) to createdBonds".format(atom1.GetIdx(), atom2.GetIdx(), elecFromAtom1, elecFromAtom2, bondOrder))
# else:
# # If the bond has not been broken before, it is a newly changed bond. We add the record to self._createdBonds.
# self._brokenBonds.remove((atom1.GetIdx(), atom2.GetIdx(), elecFromAtom1, elecFromAtom2, bondOrder+1))
# logging.debug("removing ({}, {}, {}, {}, {}) from brokenBonds".format(atom1.GetIdx(), atom2.GetIdx(), elecFromAtom1, elecFromAtom2, bondOrder+1))
# logging.debug("new bond {} - {} is formed".format(atom1.GetIdx(), atom2.GetIdx()))
# return bond
def moveElec(self, mol, atom1Idx, atom2Idx, atom3Idx, nElec):
mol.BeginModify()
atom1 = None if atom1Idx is None else mol.GetAtom(atom1Idx)
atom2 = None if atom2Idx is None else mol.GetAtom(atom2Idx)
atom3 = None if atom3Idx is None else mol.GetAtom(atom3Idx)
if atom1 is None: # lone pair (atom2) to bond (atom2 - atom3)
atom2.SetFormalCharge(atom2.GetFormalCharge()+1)
# ob.OBPairData(atom2.GetData('nLonePair')).SetValue(str(int(ob.OBPairData(atom.GetData('nLonePair')).GetValue())-2))
bond = mol.GetBond(atom2, atom3)
if bond is None:
mol.AddBond(atom2.GetIdx(), atom3.GetIdx(), 1)
else:
bond.SetBondOrder(bond.GetBondOrder()+1)
atom3.SetFormalCharge(atom3.GetFormalCharge()-1)
elif atom3 is None: # bond (atom1 - atom2) to lone pair (atom2)
bond = mol.GetBond(atom1, atom2)
bondOrder = bond.GetBondOrder()
print('bondorder is {}'.format(bondOrder))
if bondOrder == 1:
mol.DeleteBond(bond)
else:
bond.SetBondOrder(bondOrder - 1)
atom1.SetFormalCharge(atom1.GetFormalCharge()+1)
atom2.SetFormalCharge(atom2.GetFormalCharge()-1)
# ob.OBPairData(atom2.GetData('nLonePair')).SetValue(str(int(ob.OBPairData(atom.GetData('nLonePair')).GetValue())+2))
else: # bond1 (atom1 - atom2) to bond2 (atom2 - atom3)
bond1 = mol.GetBond(atom1, atom2)
bond2 = mol.GetBond(atom2, atom3)
atom1.SetFormalCharge(atom2.GetFormalCharge()+1)
atom3.SetFormalCharge(atom3.GetFormalCharge()-1)
bondOrder1 = bond1.GetBondOrder()
if bondOrder1 == 1:
mol.DeleteBond(bond1)
else:
bond1.SetBondOrder(bondOrder1-1)
if bond2 is None:
mol.AddBond(atom2.GetIdx(), atom3.GetIdx(), 1)
else:
bond2.SetBondOrder(bond2.GetBondOrder()+1)
mol.EndModify()
printMol(mol, printOut=True)
for bond in ob.OBMolBondIter(mol):
printBond(bond)
def changeFormalCharge(self, mol, idx, change):
atom = mol.GetAtom(idx)
atom.SetFormalCharge(atom.GetFormalCharge()+change)
def changeBondOrder(self, mol, i, j, change):
bond = mol.GetBond(i, j)
if bond is None:
mol.AddBond(i, j, 1)
else:
bondOrder = bond.GetBondOrder()
if bondOrder == 1 and change == -1:
mol.DeleteBond(bond)
else:
bond.SetBondOrder(bondOrder + change)
def oxidize(self, mol, eSource):
if type(eSource) is int:
self.changeFormalCharge(mol, eSource, +2)
elif type(eSource) is tuple:
i, j = eSource
self.changeBondOrder(mol, i, j, -1)
self.changeFormalCharge(mol, i, +1)
self.changeFormalCharge(mol, j, +1)
def reduce(self, mol, eTarget):
if type(eTarget) is int:
self.changeFormalCharge(mol, eTarget, -2)
elif type(eTarget) is tuple:
i, j = eTarget
self.changeBondOrder(mol, i, j, +1)
self.changeFormalCharge(mol, i, -1)
self.changeFormalCharge(mol, j, -1)
# def breakBond(self, mol, atom1, atom2, elecToAtom1, elecToAtom2):
# # Break a bond in the searching process. Keeps track of bond order, formal charge, self._createdBonds and self._brokenBonds
# bond = atom1.GetBond(atom2)
# if bond != None:
# mol.BeginModify()
# bondOrder = bond.GetBondOrder()
# if bond.GetBondOrder() == 1:
# mol.DeleteBond(bond)
# elif bond.GetBondOrder() >= 2:
# bond.SetBondOrder(bond.GetBondOrder()-1)
# if elecToAtom1 == 0 and elecToAtom2 == 2:
# atom1.SetFormalCharge(atom1.GetFormalCharge()+1)
# atom2.SetFormalCharge(atom2.GetFormalCharge()-1)
# elif elecToAtom1 == 2 and elecToAtom2 == 0:
# atom1.SetFormalCharge(atom1.GetFormalCharge()-1)
# atom2.SetFormalCharge(atom2.GetFormalCharge()+1)
# mol.EndModify()
# logging.debug("bond {} - {} is broken".format(atom1.GetIdx(), atom2.GetIdx()))
# if (atom1.GetIdx(), atom2.GetIdx(), elecToAtom1, elecToAtom2, bondOrder-1) not in self._createdBonds:
# logging.debug("adding ({}, {}, {}, {}, {}) to brokenBonds".format(atom1.GetIdx(), atom2.GetIdx(), elecToAtom1, elecToAtom2, bondOrder))
# self._brokenBonds.append((atom1.GetIdx(), atom2.GetIdx(), elecToAtom1, elecToAtom2, bondOrder))
# else:
# logging.debug("removing ({}, {}, {}, {}, {}) from createdBonds".format(atom1.GetIdx(), atom2.GetIdx(), elecToAtom1, elecToAtom2, bondOrder-1))
# self._createdBonds.remove((atom1.GetIdx(), atom2.GetIdx(), elecToAtom1, elecToAtom2, bondOrder-1))
# return True
# else:
# logging.warning("No bond is found between atom {} and atom {}".format(atom1.GetIdx(), atom2.GetIdx()))
# return False
# def isInvalidStructure(self, mol):
# # A structure will be invalid if two adjacent atoms both are charged.
# for atom1 in ob.OBMolAtomIter(mol):
# if atom1.GetFormalCharge() != 0:
# for atom2 in ob.OBMolAtomIter(mol):
# if atom1.GetIdx() != atom2.GetIdx() and atom2.GetFormalCharge() != 0:
# return True
# nBonds = 0
# for bond in ob.OBAtomBondIter(atom1):
# nBonds += bond.GetBondOrder()
# if atom1.GetAtomicNum() == 35 and nBonds == 2:
# for atom2 in ob.OBAtomAtomIter(atom1):
# if atom2.GetAtomicNum() == 1:
# return True
# return False
def doGaussian(self, mol, fullFileName, smiles):
conn = sqlite3.connect('reactionroute.db')
cursor = conn.cursor()
records = cursor.execute('select energy from jobArchive where smiles == ? and keywords == ?',
(smiles, self._gaussianKeywords))
logging.debug('database connection established')
record = records.fetchone()
if record:
logging.debug('{} record found in the database'.format(smiles))
print('{} record found in the database'.format(smiles))
conn.close()
return record[0]
else:
logging.debug('{} not found in the database, doing calculations...'.format(smiles))
molCopy = ob.OBMol(mol)
molCopy.SetTitle("ReactionRoute")
inputFile = open("gaussian/"+fullFileName+".gjf", 'w')
op3d = ob.OBOp.FindType("gen3d")
op3d.Do(molCopy, '3')
inputFile.write(printMol(molCopy, fileFormat = "gjf", keywords = self._gaussianKeywords))
inputFile.close()
gaussianCall = ''
for i, c in enumerate(fullFileName):
if (c == '(' or c == ')' or c == '$') and i > 0 and fullFileName[i-1] != '\\':
gaussianCall += '\\'
gaussianCall += c
if self._gsub:
print("gsub -fastq gaussian/"+gaussianCall+".gjf")
logging.info("gsub -fastq gaussian/"+gaussianCall+".gjf")
output = subprocess.check_output('cd gaussian; gsub -fastq '+gaussianCall+'.gjf; cd ..', shell=True)
# print output
jobId = output.split()[7]
while True:
time.sleep(10)
outputQstat = subprocess.check_output('qstat', shell=True)
if jobId not in outputQstat:
break
else:
print("gdv gaussian/"+gaussianCall+".gjf")
logging.info("gdv gaussian/"+gaussianCall+".gjf")
os.system("gdv gaussian/"+gaussianCall+".gjf")
molDict = logParser('gaussian/'+fullFileName+'.log')
if molDict['result'] == 'Normal':
cursor.execute('insert into jobArchive (smiles, keywords, formula, energy) values (?, ?, ?, ?)',
(smiles, self._gaussianKeywords, molDict['formula'], molDict['energy']))
molCopyEnergy = molDict['energy']
else:
logging.error("First gaussian run failed. Trying second time with op3d.do(frag, 'dist')")
inputFile = open("gaussian/"+fullFileName+".gjf", 'w')
op3d = ob.OBOp.FindType("gen3d")
op3d.Do(molCopy, 'dist')
inputFile.write(printMol(molCopy, fileFormat = "gjf", keywords = self._gaussianKeywords))
inputFile.close()
os.system("gdv gaussian/"+gaussianCall+".gjf")
molDict = logParser('gaussian/'+fullFileName+'.log')
if molDict['result'] == 'Normal':
cursor.execute('insert into jobArchive (smiles, keywords, formula, energy) values (?, ?, ?, ?)',
(smiles, self._gaussianKeywords, molDict['formula'], molDict['energy']))
molCopyEnergy = molDict['energy']
else:
cursor.execute('insert into jobArchive (smiles, keywords, formula, energy) values (?, ?, ?, ?)',
(smiles, self._gaussianKeywords, 'error', -999999999.0))
logging.error("Second gaussian run failed. ")
molCopyEnergy = -999999999.0
conn.commit()
conn.close()
return molCopyEnergy
def computeQMEnergy(self, mol, software, method, fragmentEnergyMap = None):
if not os.path.isdir(software):
os.system("mkdir "+software)
molCopy = strToMol('smi', printMol(mol, 'smi'))
molCopy.AddHydrogens()
smiles = getCanonicalSmiles(molCopy)
fileName = smilesToFilename(smiles)
if software.lower() == "gaussian" or software.lower() == "gauss":
logging.debug('using Gaussian to calculate...')
logging.debug('the molecule that is about to be separated is {}'.format(smiles))
# printMol(molCopy, fileFormat = "sdf", printOut = True)
fragments = molCopy.Separate()
logging.debug('after separate')
logging.debug('there are {} fragments'.format(len(fragments)))
if len(fragments) >= 2:
energySum = 0.0
for i, frag in enumerate(fragments):
fragSmiles = getCanonicalSmiles(frag)
logging.debug('fragment{} = {}'.format(i, fragSmiles))
if fragSmiles in fragmentEnergyMap:
frag.SetEnergy(fragmentEnergyMap[fragSmiles])
logging.debug("this fragment's energy has been calculated. It's %d kcal/mol"%(fragmentEnergyMap[fragSmiles]))
else:
fragmentEnergy = self.doGaussian(frag, fileName+str(i), fragSmiles)
logging.debug("the energy of this fragment is %d kcal/mol"%(fragmentEnergy))
frag.SetEnergy(fragmentEnergy)
fragmentEnergyMap[fragSmiles] = fragmentEnergy
energySum += fragmentEnergyMap[fragSmiles]
logging.info("The energy of the molecule is %d kcal/mol"%(energySum))
return energySum
else:
return self.doGaussian(molCopy, fileName, smiles)
def getGaussianEnergy(self, fileName):
f = open(fileName, 'r')
outputChunk = ''
for line in f:
if '\\' in line:
outputChunk += line.strip()
outputList = outputChunk.split('\\')
for word in outputList:
if "HF=" in word:
logging.debug("HF="+word[3:])
return 627.5*float(word[3:])
raise EnergyReadingError("Can't read energy from gaussian output %s"%(fileName))
# def getOxidations(self, mat, compact=True): # O(n^2) time, O(n) space
# oxidations = set()
# for i in range(1, self.nAtom):
# for j in range(1, i):
# if mat[i][j] >= 1:
# oxidations.add((i, j))
# for i in range(1, self.nAtom):
# if mat[i][i] >= 2:
# oxidations.add((i, i))
#
# return oxidations
#
# def checkMat(self, mat, delta=None):
# if delta is not None:
# for i, j in delta:
# mat[i][j] += delta[(i, j)]
# result = True
# for i in range(1, self.nAtom + 1):
# nBonds = sum(mat[i][1:]) - mat[i][i]
# formalCharge = numValenceElectron(mat[i][0]) - nBonds + mat[i][i]
# if nBonds not in self._allowedCoordNum[(mat[i][0], formalCharge)]:
# result = False
# break
# if delta is not None:
# for i, j in delta:
# mat[i][j] -= delta[(i, j)]
# return result
def checkChangeTable(self, molMat, changeTable, tboChange, fcChange):
for atom in tboChange.keys() + fcChange.keys():
if molMat[self.nAtom+1][atom] + tboChange[atom] not in self._allowedCoordNum.get((molMat[0][atom], molMat[self.nAtom+2][atom] + fcChange[atom]), []):
return False
return True
def applyChanges(self, molMat, changeTable, tboChange, fcChange):
for item in changeTable.items():
molMat[item[0][0]][item[0][1]] += item[1]
for item in tboChange.items():
molMat[self.nAtom+1][item[0]] += item[1]
for item in fcChange.items():
molMat[self.nAtom+2][item[0]] += item[1]
def isomerSearch(self):
reactantMol = strToMol('smi', self._reactantString)
self._reactantString = getCanonicalSmiles(reactantMol)
logging.info("reactant = {}".format(self._reactantString))
reactantMol.AddHydrogens()
printMol(reactantMol, fileFormat = "gjf", printOut = True)
if not self._noProduct:
productMol = strToMol('smi', self._productString)
self._productString = getCanonicalSmiles(productMol)
logging.info("product = {}".format(self._productString))
self.nAtom = reactantMol.NumAtoms()
if self.activeList and not self.ignoreList:
allset = set(range(1, self.nAtom+1))
self.ignoreList = allset - self.activeList
elif self.ignoreList and not self.activeList:
allset = set(range(1, self.nAtom+1))
self.activeList = allset - self.ignoreList
elif not self.activeList and not self.ignoreList:
self.activeList = set(range(1, self.nAtom+1))
logging.info("ignoreList = {}".format(self.ignoreList))
q = deque()
head = ReactionGraphNode(mol=reactantMol)
q.append(head)
self._reactionMap[self._reactantString] = head
self._energyMap = {self._reactantString: 0.0}
if self._doCalculation:
self._energyBaseLine = self.computeQMEnergy(reactantMol, "gaussian", self._gaussianKeywords, self._fragmentEnergyMap)
else:
self._energyBaseLine = 0.0
head.energy = 0.0
nStep = 0
while q: # start Breadth-First-Search
qSize = len(q)
nStep += 1
logging.info("=========================================================")
logging.info(" nStep = "+str(nStep))
if nStep >= self._maxStep or nStep > self._targetLeastStep + self._maxExtraStep:
logging.info("step number {}, exceeding maximum step {}".format(nStep, min(self._maxStep, self._targetLeastStep+self._maxExtraStep)))
break
for nNode in range(qSize): # process intermediates one generation at a time
logging.info("***************************************************")
logging.info(" processing a new molecule")
currNode = q.popleft()
if currNode.smiles == self._productString:
continue
currMol = ob.OBMol(currNode.mol)
oxidations = {'bond': set(), 'atom': set()}
for atom in ob.OBMolAtomIter(currMol):
nLonePair = numValenceElectron(atom.GetAtomicNum()) - atomTotalBondOrder(atom) + atom.GetFormalCharge()
if nLonePair > 0:
oxidations['atom'].add(atom)
for bond in ob.OBMolBondIter(currMol):
oxidations['bond'].add(bond)
def addMol(oxidized, reduced, tempMat=None):
logging.debug('in addMol')
logging.debug('oxidized: {}\nreduced: {}'.format(oxidized, reduced))
if self._matrixForm:
# logging.debug('\n'+str(tempMat))
newMol = matToMol(tempMat)
newMolSmiles = getCanonicalSmiles(newMol)
logging.info("newSmiles = "+newMolSmiles)
if newMolSmiles == self._productString:
logging.info("target found!!!")
self._targetLeastStep = nStep
self._targetFound = True
# if self._structureScreen:
# if newMolSmiles in self._invalidStructures:
# logging.info("This molecule is invalid according to isInvalidStructure, not adding it")
# return
# elif self.isInvalidStructure(newMol):
# logging.info("This molecule is invalid according to isInvalidStructure, not adding it")
# self._invalidStructures.add(newMolSmiles)
# return
if newMolSmiles not in self._reactionMap:
logging.info("new molecule found! Adding it to the map")
if self._doCalculation and self._preEnergyScreen:
absoluteEnergy = self.computeQMEnergy(newMol, "gaussian", self._gaussianKeywords,self._fragmentEnergyMap)
logging.debug("absoluteEnergy is %f kcal/mol"%(absoluteEnergy))
logging.debug("energy base line is "+str(self._energyBaseLine))
energy = absoluteEnergy - self._energyBaseLine
logging.info("relative energy is %f kcal/mol"%(energy))
self._energyMap[newMolSmiles] = energy
logging.info("Screening energy")
if energy - currNode.energy < self._intermediateThresh:
logging.info("low energy intermediate found, adding it to the map...")
newNode = ReactionGraphNode(mol=newMol, depth=nStep)
newNode.energy = energy
self._reactionMap[newMolSmiles] = newNode
if newMolSmiles not in currNode.neighbors:
logging.info('adding the edge')
currNode.neighbors[newMolSmiles] = ReactionGraphEdge(currNode, newNode, oxidized, reduced)
q.append(newNode)
else:
logging.info("energy too high, discarded")
else:
newNode = ReactionGraphNode(mol=newMol, depth=nStep)
self._reactionMap[newMolSmiles] = newNode
if newMolSmiles not in currNode.neighbors:
logging.info('adding the edge')
currNode.neighbors[newMolSmiles] = ReactionGraphEdge(currNode, newNode, oxidized, reduced)
q.append(newNode)
else:
logging.info("This molecule has been processed")
if currNode.smiles != newMolSmiles:
# self._reactionMap[newMolSmiles].depths.append(nStep)
if newMolSmiles not in currNode.neighbors:
logging.debug("adding {} - {}".format(currNode.smiles, newMolSmiles))
logging.debug("Although this molecule has been added to reactionMap, it reveals a new route. Adding only the edge...")
currNode.neighbors[newMolSmiles] = ReactionGraphEdge(currNode, self._reactionMap[newMolSmiles], oxidized, reduced)
logging.debug("finish adding this molecule, no matter added or not")
# ====================the end of addMol====================
if self._matrixForm:
if self._filterFc:
molMat = molToMat(currMol)
logging.debug('\n'+str(molMat))
eSources = set()
for i in self.activeList:
if molMat[i][i] > 0:
eSources.add((i,))
for j in self.activeList:
if j < i and molMat[i][j] > 0:
eSources.add((i, j))
logging.debug('eSources = {}'.format(eSources))
def countChanges(atoms, redox): # redox = -1 if oxidation else 1
if len(atoms) is 1:
i = atoms[0]
changeTable[(i, i)] += 2 * redox
fcChange[i] -= 2 * redox
else:
i, j = atoms
changeTable[(i, j)] += 1 * redox
changeTable[(j, i)] += 1 * redox
fcChange[i] -= 1 * redox
fcChange[j] -= 1 * redox
tboChange[i] += 1 * redox
tboChange[j] += 1 * redox
logging.debug('one pair')
for eSource1 in eSources:
canReduce = set()
eTargets = set()
for i in self.activeList:
if molMat[self.nAtom+2][i] > self._minFc[molMat[0][i]]:
canReduce.add(i)
for atom in eSource1:
canReduce.add(atom)
canReduce = list(canReduce)
for i in range(len(canReduce)):
eTargets.add((canReduce[i], ))
for j in range(i):
eTargets.add((canReduce[i], canReduce[j]))
logging.debug('eSource1: {}'.format(eSource1))
logging.debug('eTargets: {}'.format(eTargets))
for eTarget1 in eTargets:
if set(eTarget1) == set(eSource1):
continue
changeTable = defaultdict(int)
tboChange = defaultdict(int) # total bond order change
fcChange = defaultdict(int) # formal charge change
countChanges(eSource1, -1)
countChanges(eTarget1, 1)
if self.checkChangeTable(molMat, changeTable, tboChange, fcChange):
# logging.debug('\n This molecule is qualified. ----------------------- ')
tempMat = np.array(molMat)
self.applyChanges(tempMat, changeTable, tboChange, fcChange)
# logging.debug('\n'+str(tempMat))
addMol([eSource1], [eTarget1], tempMat)
logging.debug('finishing this eTargets')
logging.debug('two pairs')
for eSource1 in eSources:
for eSource2 in eSources:
if set(eSource1) == set(eSource2): # we don't want to oxidize a specie twice. e.g. triple bond -> single bond
continue
eTargets = set()
canReduce = set()
for i in self.activeList:
if molMat[self.nAtom+2][i] > self._minFc[molMat[0][i]]:
canReduce.add(i)
for atom in eSource1:
canReduce.add(atom)
for atom in eSource2:
canReduce.add(atom)
canReduce = list(canReduce)
for i in range(len(canReduce)):
eTargets.add((canReduce[i], ))
for j in range(i):
eTargets.add((canReduce[i], canReduce[j]))
logging.debug('eSource1 = {}, eSource2 = {}'.format(eSource1, eSource2))
logging.debug('eTargets: {}'.format(eTargets))
for eTarget1 in eTargets:
for eTarget2 in eTargets:
if set(eTarget1) == set(eSource1) or set(eTarget2) == set(eSource2) or \
set(eTarget1) == set(eSource2) or set(eTarget2) == set(eSource1):
continue
changeTable = defaultdict(int)
tboChange = defaultdict(int) # total bond order change
fcChange = defaultdict(int) # formal charge change
countChanges(eSource1, -1)
countChanges(eTarget1, 1)
countChanges(eSource2, -1)
countChanges(eTarget2, 1)
if self.checkChangeTable(molMat, changeTable, tboChange, fcChange):
# logging.debug('\n This molecule is qualified. ----------------------- ')
tempMat = np.array(molMat)
self.applyChanges(tempMat, changeTable, tboChange, fcChange)
# logging.debug('\n'+str(tempMat))
addMol([eSource1, eSource2], [eTarget1, eTarget2], tempMat)
logging.debug('finishing this eTargets')
else: # no filter at ox/red level
molMat = molToMat(currMol)
logging.debug('\n'+str(molMat))
eSources, eTargets = set(), set()
for i in range(1, self.nAtom+1):
if molMat[i][i] > 0:
eSources.add((i,))
eTargets.add((i,))
for j in range(1, i):
if molMat[i][j] > 0:
eSources.add((i, j))
eTargets.add((i, j))
logging.debug(eTargets)
def countChanges(atoms, redox): # redox = -1 if oxidation else 1
if len(atoms) is 1:
changeTable[(atoms[0], atoms[0])] += 2 * redox
fcChange[atoms[0]] -= 2 * redox
else:
changeTable[atoms[0], atoms[1]] += 1 * redox
changeTable[atoms[1], atoms[0]] += 1 * redox
fcChange[atoms[0]] -= 1 * redox
fcChange[atoms[1]] -= 1 * redox
tboChange[atoms[0]] += 1 * redox
tboChange[atoms[1]] += 1 * redox
for eSource1 in eSources:
for eTarget1 in eTargets:
changeTable = defaultdict(int)
tboChange = defaultdict(int) # total bond order change
fcChange = defaultdict(int) # formal charge change
countChanges(eSource1, -1)
countChanges(eTarget1, 1)
if self.checkChangeTable(molMat, changeTable, tboChange, fcChange):
# logging.debug('\n This molecule is qualified. ----------------------- ')
tempMat = np.array(molMat)
self.applyChanges(tempMat, changeTable, tboChange, fcChange)
# logging.debug('\n'+str(tempMat))
addMol([eSource1], [eTarget1], tempMat)
for eSource1 in eSources:
for eSource2 in eSources:
for eTarget1 in eTargets:
for eTarget2 in eTargets:
changeTable = defaultdict(int)
tboChange = defaultdict(int) # total bond order change
fcChange = defaultdict(int) # formal charge change
countChanges(eSource1, -1)
countChanges(eTarget1, 1)
countChanges(eSource2, -1)
countChanges(eTarget2, 1)
if self.checkChangeTable(molMat, changeTable, tboChange, fcChange):
# logging.debug('\n This molecule is qualified. ----------------------- ')
tempMat = np.array(molMat)
self.applyChanges(tempMat, changeTable, tboChange, fcChange)
# logging.debug('\n'+str(tempMat))
addMol([eSource1, eSource2], [eTarget1, eTarget2], tempMat)
else:
eSources = set()
for atom in oxidations['atom']:
eSources.add(atom.GetIdx())
for bond in oxidations['bond']:
eSources.add((bond.GetBeginAtom().GetIdx(), bond.GetEndAtom().GetIdx()))
eTargets = set()
for i in range(1, self.nAtom+1):
eTargets.add(i)
for j in range(i+1, self.nAtom+1):
eTargets.add((i, j))
for eSource1 in eSources:
for eTarget1 in eTargets:
newMol = ob.OBMol(currMol)
self.oxidize(newMol, eSource1)
self.reduce(newMol, eTarget1)
if self.checkLuisRule(eSource1, eTarget1, mol=newMol):
addMol([eSource1], [eTarget1])
for eSource1 in eSources:
for eSource2 in eSources:
for eTarget1 in eTargets:
for eTarget2 in eTargets:
newMol = ob.OBMol(currMol)
self.oxidize(newMol, eSource1)
self.oxidize(newMol, eSource2)
self.reduce(newMol, eTarget1)
self.reduce(newMol, eTarget2)
if self.checkLuisRule(eSource1, eSource2, eTarget1, eTarget2, mol=newMol):
addMol([eSource1, eSource2], [eTarget1, eTarget2])
# Now consider all possible elementary reaction rule.
# Make a bond. Only considering two electron transfer.
# for atom1 in zeroElecGivers:
# for atom2 in twoElecGivers:
# if atom1 is not atom2:
# logging.debug("<bondBreaking, bondForming> = <0,1>")
# self.createNewBond(newMol, atom1, atom2, 0, 2)
# addMol()
# logging.debug("restoring")
# self.breakBond(newMol, atom1, atom2, 0, 2)
#
# for atom1 in twoElecTakers:
# # first bond changing is bond breaking, let's start looping over the atom that takes two electrons.
# logging.info("--------attempting non-cyclic concerted two bonds breakings and two bond formations---------")
# logging.debug("atom1 is {}, {}".format(atom1.GetIdx(), atom1.GetAtomicNum()))
# bondsOfAtom1 = [bond for bond in ob.OBAtomBondIter(atom1)]
# for brokenBond1 in bondsOfAtom1:
# atom2 = brokenBond1.GetNbrAtom(atom1)
# logging.debug("atom2 is {}, {}".format(atom2.GetIdx(), atom2.GetAtomicNum()))
# if atom2 is None:
# logging.error("atom2 is None!!!!")
# if atom2.GetIdx() in self.ignoreList:
# continue
# logging.debug("try breaking first bond {} - {}".format(atom1.GetIdx(), atom2.GetIdx()))
# # import pdb; pdb.set_trace()
# for tempAtom in zeroElecTakers:
# if atom2.GetIdx() == tempAtom.GetIdx():
# logging.debug("if finishing bond...")
# if self.breakBond(newMol, atom1, atom2, 2, 0) is None:
# logging.warning("bond {} - {} breaking failed".format(atom1.GetIdx(),atom2.GetIdx()))
# continue
# addMol()
# logging.debug("restoring : ")
# self.createNewBond(newMol, atom1, atom2, 2, 0)
# logging.debug("if not finishing... breaking first bond {} - {}".format(atom1.GetIdx(), atom2.GetIdx()))
# if self.breakBond(newMol, atom1, atom2, 2, 0) is None:
# logging.warning("bond {} - {} breaking failed".format(atom1.GetIdx(),atom2.GetIdx()))
# continue
#
# for atom3 in ob.OBMolAtomIter(newMol):
# logging.debug("atom3 is {}, {}".format(atom3.GetIdx(), atom3.GetAtomicNum()))
# if atom3.GetIdx() in self.ignoreList or atom3 == atom1 or atom3 == atom2:
# continue
# logging.debug("try making first bond {} - {}".format(atom2.GetIdx(), atom3.GetIdx()))
# for tempAtom in twoElecGivers:
# if atom3.GetIdx() == tempAtom.GetIdx():
# logging.debug("if finishing bond...")
# if self.createNewBond(newMol, atom2, atom3, 0, 2) is None:
# logging.warning("bond {} - {} creation failed".format(atom2.GetIdx(),atom3.GetIdx()))
# continue
# addMol()
# logging.debug("restoring...")
# self.breakBond(newMol, atom2, atom3, 0, 2)
# logging.debug("if not finishing... creating first bond {} - {}".format(atom2.GetIdx(), atom3.GetIdx()))
# formedBond1 = self.createNewBond(newMol, atom2, atom3, 0, 2)
# if formedBond1 is None:
# logging.warning("bond {} - {} creation failed".format(atom2.GetIdx(), atom3.GetIdx()))
# continue
# nNewBond = 0
# if formedBond1.GetBondOrder() == 1:
# nNewBond += 1
# bondsOfAtom3 = [bond for bond in ob.OBAtomBondIter(atom3)]
# for brokenBond2 in bondsOfAtom3:
# atom4 = brokenBond2.GetNbrAtom(atom3)
# logging.debug("atom4 is {}, {}".format(atom4.GetIdx(), atom4.GetAtomicNum()))
# if atom4.GetIdx() in self.ignoreList or atom4 == atom2 or atom4 == atom1:
# continue
# logging.debug("try breaking second bond {} - {}".format(atom3.GetIdx(), atom4.GetIdx()))
# for tempAtom in zeroElecTakers:
# if atom4.GetIdx() == tempAtom.GetIdx():
# logging.debug("if finishing bond...")
# if self.breakBond(newMol, atom3, atom4, 2, 0) is None:
# logging.warning("bond {} - {} breaking failed".format(atom3.GetIdx(),atom4.GetIdx()))
# continue
# addMol()
# logging.debug("restoring...")
# self.createNewBond(newMol, atom3, atom4, 2, 0)
# logging.debug("if not finishing... breaking second bond {} - {}".format(atom3.GetIdx(), atom4.GetIdx()))
# if self.breakBond(newMol, atom3, atom4, 2, 0) is None:
# logging.warning("bond {} - {} breaking failed".format(atom3.GetIdx(),atom4.GetIdx()))
# continue
# # for tempAtom in twoElecGivers:
# # if atom5.GetIdx() == tempAtom.GetIdx():
# for atom5 in twoElecGivers:
# logging.debug("atom5 is {}, {}".format(atom5.GetIdx(), atom5.GetAtomicNum()))
# if atom5 == atom1 or atom5 == atom3 or atom5 == atom4:
# continue
# logging.debug("try making second bond {} - {}".format(atom4.GetIdx(), atom5.GetIdx()))
# formedBond2 = self.createNewBond(newMol, atom4, atom5, 0, 2)
# if formedBond2 is None:
# logging.warning("bond {} - {} creation failed".format(atom4.GetIdx(), atom5.GetIdx()))
# continue
# nNewBond2 = nNewBond
# if formedBond2.GetBondOrder() == 1:
# nNewBond2 = nNewBond2 + 1
# if nNewBond2 >= 2 and atom5.GetIdx() != atom2.GetIdx():
# logging.debug("nNewBond2 = {}".format(nNewBond2))
# logging.debug("we have two newly formed single bonds now, trying to rewind")
# logging.debug("restoring...")
# self.breakBond(newMol, atom4, atom5, 0, 2)
# continue
# addMol()
# logging.debug("restoring...")
# self.breakBond(newMol, atom4, atom5, 0, 2)
# logging.debug("restoring...")
# self.createNewBond(newMol, atom3, atom4, 2, 0)
# logging.debug("restoring...")
# self.breakBond(newMol, atom2, atom3, 0, 2)
# logging.debug("restoring...")
# self.createNewBond(newMol, atom1, atom2, 2, 0)
#
# for atom1 in ob.OBMolAtomIter(newMol):
# logging.debug("atom1 is {}, {}".format(atom1.GetIdx(), atom1.GetAtomicNum()))
# if atom1.GetIdx() in self.ignoreList:
# logging.debug("atom1 is ignored")
# continue
# logging.info("--------attempting cyclic concerted two bonds breakings and two bond formations----------")
# bondsOfAtom1 = [bond for bond in ob.OBAtomBondIter(atom1)]
# for brokenBond1 in bondsOfAtom1:
# atom2 = brokenBond1.GetNbrAtom(atom1)
# logging.debug("atom2 is {}, {}".format(atom2.GetIdx(), atom2.GetAtomicNum()))
# if atom2.GetIdx() in self.ignoreList:
# logging.debug("atom2 is ignored")
# continue
# logging.debug("try breaking first bond {} - {}".format(atom1.GetIdx(), atom2.GetIdx()))
# if self.breakBond(newMol, atom1, atom2, 2, 0) is None:
# logging.warning("bond {} - {} breaking failed".format(atom1.GetIdx(),atom2.GetIdx()))
# continue
# for atom3 in ob.OBMolAtomIter(newMol):
# if atom3.GetIdx() in self.ignoreList:
# logging.debug("atom3 is ignored")
# continue
# if atom3 == atom1 or atom3 == atom2:
# continue
# logging.debug("try making first bond {} - {}".format(atom2.GetIdx(), atom3.GetIdx()))
# formedBond1 = self.createNewBond(newMol, atom2, atom3, 0, 2)
# if formedBond1 is None:
# logging.warning("bond {} - {} creation failed".format(atom2.GetIdx(), atom3.GetIdx()))
# continue
# nNewBond = 0
# if formedBond1.GetBondOrder() == 1:
# nNewBond += 1
# bondsOfAtom3 = [bond for bond in ob.OBAtomBondIter(atom3)]
# for brokenBond2 in bondsOfAtom3:
# atom4 = brokenBond2.GetNbrAtom(atom3)
# logging.debug("atom4 is {}, {}".format(atom4.GetIdx(), atom4.GetAtomicNum()))
# if atom4.GetIdx() in self.ignoreList or atom4 == atom2 or atom4 == atom1:
# continue
# logging.debug("try breaking second bond {} - {}".format(atom3.GetIdx(), atom4.GetIdx()))
# if self.breakBond(newMol, atom3, atom4, 2, 0) is None:
# logging.warning("bond {} - {} breaking failed".format(atom3.GetIdx(),atom4.GetIdx()))
# continue
# logging.debug("try making second bond {} - {}".format(atom4.GetIdx(), atom1.GetIdx()))
# formedBond2 = self.createNewBond(newMol, atom4, atom1, 0, 2)
# if formedBond2 is None:
# logging.warning("bond {} - {} creation failed".format(atom4.GetIdx(),atom1.GetIdx()))
# nNewBond2 = nNewBond
# if formedBond2.GetBondOrder() == 1:
# nNewBond2 += 1
# if nNewBond2 >= 2 and (newMol.GetBond(atom1, atom3) or newMol.GetBond(atom2, atom4)):
# # 1 - 2 break 1--2 1 2
# # 2 - 3 form ---> | |
# # 3 - 4 break 4--3 4 3
# # 4 - 1 form
# # This is allowed only if there is no bond between 1 - 3 and 2 - 4.
# # If 1 - 3 were bonded this would just be a group exchange of 2 and 4. The same for 2 - 4.
# logging.debug("simple group exchange is not allowed. rewinding...")
# logging.debug("restoring...")
# self.breakBond(newMol, atom4, atom1, 0, 2)
# self.createNewBond(newMol, atom3, atom4, 2, 0)
# continue
# addMol()
# logging.debug("restoring...")
# self.breakBond(newMol, atom4, atom1, 0, 2)
# logging.debug("restoring...")
# self.createNewBond(newMol, atom3, atom4, 2, 0)
# logging.debug("restoring...")
# self.breakBond(newMol, atom2, atom3, 0, 2)
# logging.debug("restoring...")
# self.createNewBond(newMol, atom1, atom2, 2, 0)
if not self._noProduct:
logging.info("targetSmiles = "+self._productString)
else:
logging.info('no target provided')
logging.info("targetLeastStep = {}".format(self._targetLeastStep))
logging.info("===============End of the isomer search===============")
if self._productString in self._reactionMap:
return head, self._reactionMap[self._productString]
else:
logging.info("target not found")
return head, None
def printTextReactionMap(self, head):
q = deque()
q.append(ReactionGraphEdge(None, head, [], []))
visited = set()
while len(q) > 0:
qSize = len(q)
print("\n------------------------")
for nLevel in range(qSize):
currEdge = q.popleft()
# currNode, brokenBonds, createdBonds = q.popleft()
print(currEdge.node.smiles, 'b ', currEdge.eSources, 'c ', currEdge.eTargets),
if currEdge.node.smiles not in visited:
visited.add(currEdge.node.smiles)
for molSmiles, nextEdge in currEdge.node.neighbors.items():
q.append(nextEdge)
print
def printGraphicReactionMap(self, head):
q = deque()
q.append(ReactionGraphEdge(None, head, [], []))
visited = set()
if not os.path.isdir("dot"):
os.system("mkdir dot")
if not os.path.isdir("static/pics"):
os.system("mkdir static/pics")
with open("dot/dot.gv","w") as dotFile:
dotFile.write("digraph G {\nconcentrate = true\n")
edges = []
nNodes = 0
while len(q) > 0:
qSize = len(q)
nNodes += qSize
for nLevel in range(qSize):
currEdge = q.popleft()
if currEdge.node.smiles not in visited:
visited.add(currEdge.node.smiles)
fileString = smilesToFilename(currEdge.node.smiles)
formatString = 'svg'
with open("static/pics/"+fileString+'.'+formatString, 'w') as picFile:
picFile.write(printMol(strToMol('smi', currEdge.node.smiles), "svg"))
if self._doCalculation:
dotFile.write(" \""+currEdge.node.smiles+"\" [image = \"../static/pics/"+fileString+'.'+formatString+"\", label = \""+str(currEdge.node.energy)+" kcal/mol\", shape = none, labelloc = b]\n")
else:
dotFile.write(" \""+currEdge.node.smiles+"\" [image = \"../static/pics/"+fileString+'.'+formatString+"\", label = \"\", shape = none, labelloc = b]\n")
for molSmiles, nextEdge in currEdge.node.neighbors.items():
if self._pathOnly and nextEdge.onPath or not self._pathOnly:
q.append(nextEdge)
edges.append((currEdge.node.smiles, nextEdge.node.smiles))
if self._doTsSearch:
dotFile.write(' "{}" -> "{}" [ label="{:<8}" ];\n'.format(currEdge.node.smiles, nextEdge.node.smiles, str(nextEdge.tsEnergy)))
else:
dotFile.write(' "{}" -> "{}";\n'.format(currEdge.node.smiles, nextEdge.node.smiles))
dotFile.write("}\n")
dotFile.write('//nNodes = {}\n'.format(nNodes))
dotFile.write('//nEdges = {}\n'.format(len(edges)))
return edges
def findDfsPath(self, head, end, paths, targetLeastStep, path = None):
if path is None:
path = [head]
else:
path.append(head)
if len(path) > targetLeastStep + self._maxExtraStep:
return
if head == end:
paths.append(path)
return
for molSmiles, edge in head.neighbors.items():
if edge.node not in path:
self.findDfsPath(edge.node, end, paths, targetLeastStep, path = list(path))
def labelPathItems(self, paths, head):
head.onPath = True
for path in paths:
breakPath = False
for i, node in enumerate(path):
if breakPath:
break
if i+1 < len(path):
if self._doCalculation and self._energyScreen and not self._preEnergyScreen:
if path[i+1].smiles not in self._energyMap:
absoluteEnergy = self.computeQMEnergy(path[i+1].mol, "gaussian", self._gaussianKeywords, self._fragmentEnergyMap)
logging.debug("absoluteEnergy is %f kcal/mol"%(absoluteEnergy))
logging.debug("energy base line is "+str(self._energyBaseLine))
energy = absoluteEnergy - self._energyBaseLine
logging.info("relative energy is %f kcal/mol"%(energy))
logging.info("Screening energy")
self._energyMap[path[i+1].smiles] = energy
else:
logging.debug("energy already calculated")
energy = self._energyMap[path[i+1].smiles]
if energy - path[i].energy < self._intermediateThresh:
logging.info("low energy intermediate found, marking it as onPath")
path[i+1].energy = energy
node.neighbors[path[i+1].smiles].onPath = True
path[i+1].onPath = True
else:
logging.info("energy too high, discarded")
breakPath = True
else:
node.neighbors[path[i+1].smiles].onPath = True
path[i+1].onPath = True
def printGraphicPathMap(self, paths):
if not os.path.isdir("dot"):
os.system("mkdir dot")
if not os.path.isdir("static/pics"):
os.system("mkdir static/pics")
dotFile = open("dot/paths.gv", 'w')
dotFile.write("digraph paths {")
visitedNode = set()
visitedEdge = set()
for path in paths:
for i, node in enumerate(path):
if node not in visitedNode:
visitedNode.add(node)
if self._doCalculation:
node.energy = self.computeQMEnergy(node.mol, "gaussian", self._gaussianKeywords, self._fragmentEnergyMap)
dotFile.write(" \"" + node.smiles + "\" [image = \"../static/pics/" + smilesToFilename(node.smiles) + ".svg\", label = \""+ str(node.energy) + " kcal/mol\", shape = none, labelloc = b]\n")
if i < len(path)-1:
if (node, path[i+1]) not in visitedEdge:
visitedEdge.add((node, path[i+1]))
dotFile.write(" \"" + node.smiles + "\" -> \"" + path[i+1].smiles + "\";\n")
dotFile.write("}\n")
def getTsEstim(self, node, edge):
mol1 = pybel.readstring('sdf', pybel.Molecule(node.mol).write('sdf'))
mol1.make3D('uff')
for bondData in edge.eTargets:
self.createNewBond(mol1.OBMol, mol1.atoms[bondData[0]-1].OBAtom, mol1.atoms[bondData[1]-1].OBAtom, bondData[2], bondData[3])
mol1.localopt('uff')
mol2 = pybel.readstring('sdf', mol1.write('sdf'))
for bondData in edge.eTargets:
self.breakBond(mol1.OBMol, mol1.atoms[bondData[0]-1].OBAtom, mol1.atoms[bondData[1]-1].OBAtom, bondData[2], bondData[3])
for bondData in edge.eSources:
self.breakBond(mol2.OBMol, mol2.atoms[bondData[0]-1].OBAtom, mol2.atoms[bondData[1]-1].OBAtom, bondData[2], bondData[3])
try:
return SeamTsSearch(mol1, mol2, 'uff')
except TsEstimConvergeError:
print("TS estimate convergence failure")
logging.error("TS estimate convergence failure (SeamTsSearch fails)")
return None
def findTsOnPath(self, head):
preQ = [edge for edge in head.neighbors.values() if edge.onPath]
q = deque(preQ)
visitedEdge = set()
if not os.path.isdir('gaussian'):
os.system('mkdir gaussian')
if os.path.isdir('gaussian/ts'):
os.system('rm -f gaussian/ts/*')
else:
os.system('mkdir gaussian/ts')
while q:
currEdge = q.popleft()
visitedEdge.add((currEdge.fromNode.smiles, currEdge.node.smiles))
print('\n========finding TS=======')
print(currEdge.fromNode.smiles, '->', currEdge.node.smiles)
print(visitedEdge)
if (currEdge.node.smiles, currEdge.fromNode.smiles) in visitedEdge:
print('reversed TS is calculated before')
print(currEdge.node.neighbors)
try:
reverseEdge = currEdge.node.neighbors[currEdge.fromNode.smiles]
except KeyError:
import pdb; pdb.set_trace()
currEdge.ts = reverseEdge.ts
currEdge.tsEnergy = reverseEdge.tsEnergy
else:
print('calculating TS')
if len(currEdge.eSources) == 0:
print('pure bond forming reaction, energy goes downhill only, no TS')
return
if len(currEdge.eTargets) == 0:
print('pure bond breaking reaction, energy goes uphill only, no TS')
return
mol = currEdge.fromNode.mol
currTs = self.getTsEstim(currEdge.fromNode, currEdge)
if currTs is not None:
print('TS esitimate:')
print(currTs.write('mol'))
currEdge.ts = currTs
filename = smilesToFilename(currEdge.fromNode.smiles) + '-' + smilesToFilename(currEdge.node.smiles)
currTs.title = "ReactionRoute.findTsOnPath"
opt = {'k': self._gaussianTsKeywords}
currTs.write('gjf', 'gaussian/ts/'+filename+'.com', overwrite=True, opt=opt)
currTs.title = ''
gaussianCall = ''
for c in filename:
if c == '(' or c == ')' or c == '$':
gaussianCall += '\\'
gaussianCall += c
print("gdv gaussian/ts/"+gaussianCall+".com")
logging.info("gdv gaussian/ts/"+gaussianCall+".com")
success = os.system('gdv gaussian/ts/'+gaussianCall+'.com')
try:
absoluteTsEnergy = self.getGaussianEnergy('gaussian/ts/'+filename+'.log')
currEdge.tsEnergy = absoluteTsEnergy - self._energyBaseLine
print('TS successfully calculated. The energy is {}'.format(currEdge.tsEnergy))
except EnergyReadingError:
currEdge.tsEnergy = 'gauTS E'
else:
print('TS calculation failed')
currEdge.ts = None
currEdge.tsEnergy = 'tsEstim'
for molSmiles, nextEdge in currEdge.node.neighbors.items():
if nextEdge.onPath and (currEdge.node.smiles, molSmiles) not in visitedEdge:
print('adding {} -> {} to the queue'.format(currEdge.node.smiles, molSmiles))
q.append(nextEdge)
if __name__ == "__main__":
logging.basicConfig(filename = "log", level=logging.DEBUG, filemode='w')
rr = ReactionRoute()
flags = {}
inputName = None
for i, arg in enumerate(sys.argv):
if arg[0] == '-':
if i+1 < len(sys.argv) and sys.argv[i+1][0] != '-':
flags[arg[1:]] = sys.argv[i+1]
else:
flags[arg[1:]] = ''
if 'j' in flags:
inputName = flags['j'][:-5]
with open(inputName+'.json') as f:
rr.inputJson(f.read())
if 'r' in flags:
rr._reactantString = flags['r']
if 'p' in flags:
rr._productString = flags['p']
if 'e' in flags:
rr._doCalculation = True
rr._energyScreen = True
if 'q' in flags:
rr._gsub = True
if 'n' in flags:
rr._noProduct = True
if 'a' in flags:
pymol = pybel.readstring('smi', rr._reactantString)
mol = pymol.OBMol
builder = ob.OBBuilder()
builder.Build(mol)
separateFragments(pymol)
pymol.title = 'for select active atoms'
pymol.addh()
pymol.localopt()
with open('activeatoms.com', 'w') as f:
f.write(pymol.write('gjf'))
exit()
import cProfile
# cProfile.run('head, target= rr.isomerSearch()')
head, target= rr.isomerSearch()
# rr.printTextReactionMap(head)
if target is not None and not rr._noProduct:
paths = []
rr.findDfsPath(head, target, paths, rr._targetLeastStep)
rr.labelPathItems(paths, head)
else:
rr._pathOnly = False
if rr._doTsSearch:
rr.findTsOnPath(head)
edges = rr.printGraphicReactionMap(head)
print(edges)
if inputName is not None:
with open('dot/{}.gv'.format(inputName), 'w') as dotF:
with open('{}.json'.format(inputName)) as inputF:
for line in inputF:
dotF.write('//{}'.format(line))
with open('dot/dot.gv') as dotF_origin:
dotF.write(dotF_origin.read())
print("dot -Tsvg dot/dot.gv -o dot/{}.svg".format(inputName))
os.system("cd dot")
os.system('dot -Tsvg dot.gv -o {}.svg'.format(inputName))
os.system('cd ..') | sxhexe/reaction-route-search | reactionroute_web/reaction/search/reactionroute.py | Python | mit | 77,435 | [
"Gaussian",
"PyMOL",
"Pybel"
] | e7f263812c68cfda525a5fb093a19500b292177c37ebeae21ae0a8e53f34c9bf |
# -*- coding: utf-8 -*-
from kivy.lang import Builder
from kivy.uix.modalview import ModalView
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
from kivymd.button import MDFlatButton, MDIconButton
from kivymd.theming import ThemableBehavior
from kivymd.elevationbehavior import ElevationBehavior
from kivy.properties import ObjectProperty, ListProperty
from kivymd.label import MDLabel
from kivy.metrics import dp
from kivy.utils import get_color_from_hex
from kivymd.color_definitions import colors
Builder.load_string("""
#:import SingleLineTextField kivymd.textfields.SingleLineTextField
#:import MDTabbedPanel kivymd.tabs.MDTabbedPanel
#:import MDTab kivymd.tabs.MDTab
<MDThemePicker>:
size_hint: (None, None)
size: dp(260), dp(120)+dp(290)
pos_hint: {'center_x': .5, 'center_y': .5}
canvas:
Color:
rgb: app.theme_cls.primary_color
Rectangle:
size: dp(260), dp(120)
pos: root.pos[0], root.pos[1] + root.height-dp(120)
Color:
rgb: app.theme_cls.bg_normal
Rectangle:
size: dp(260), dp(290)
pos: root.pos[0], root.pos[1] + root.height-(dp(120)+dp(290))
MDFlatButton:
pos: root.pos[0]+root.size[0]-dp(72), root.pos[1] + dp(10)
text: "Close"
on_release: root.dismiss()
MDLabel:
font_style: "Headline"
text: "Change theme"
size_hint: (None, None)
size: dp(160), dp(50)
pos_hint: {'center_x': 0.5, 'center_y': 0.9}
MDTabbedPanel:
size_hint: (None, None)
size: dp(260), root.height-dp(135)
pos_hint: {'center_x': 0.5, 'center_y': 0.475}
id: tab_panel
tab_display_mode:'text'
MDTab:
name: 'color'
text: "Theme Color"
BoxLayout:
spacing: dp(4)
size_hint: (None, None)
size: dp(270), root.height # -dp(120)
pos_hint: {'center_x': 0.532, 'center_y': 0.89}
orientation: 'vertical'
BoxLayout:
size_hint: (None, None)
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
size: dp(230), dp(40)
pos: self.pos
halign: 'center'
orientation: 'horizontal'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Red')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Red'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Pink')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Pink'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Purple')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Purple'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('DeepPurple')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'DeepPurple'
BoxLayout:
size_hint: (None, None)
pos_hint: {'center_x': .5, 'center_y': 0.5}
size: dp(230), dp(40)
pos: self.pos
halign: 'center'
orientation: 'horizontal'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Indigo')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Indigo'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Blue')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Blue'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('LightBlue')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'LightBlue'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Cyan')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Cyan'
BoxLayout:
size_hint: (None, None)
pos_hint: {'center_x': .5, 'center_y': 0.5}
size: dp(230), dp(40)
pos: self.pos
halign: 'center'
orientation: 'horizontal'
padding: 0, 0, 0, dp(1)
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Teal')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Teal'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Green')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Green'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('LightGreen')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'LightGreen'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Lime')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Lime'
BoxLayout:
size_hint: (None, None)
pos_hint: {'center_x': .5, 'center_y': 0.5}
size: dp(230), dp(40)
pos: self.pos
orientation: 'horizontal'
halign: 'center'
padding: 0, 0, 0, dp(1)
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Yellow')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Yellow'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Amber')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Amber'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Orange')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Orange'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('DeepOrange')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'DeepOrange'
BoxLayout:
size_hint: (None, None)
pos_hint: {'center_x': .5, 'center_y': 0.5}
size: dp(230), dp(40)
#pos: self.pos
orientation: 'horizontal'
padding: 0, 0, 0, dp(1)
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Brown')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Brown'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('Grey')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'Grey'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
#pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: root.rgb_hex('BlueGrey')
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.primary_palette = 'BlueGrey'
BoxLayout:
MDIconButton:
size: dp(40), dp(40)
size_hint: (None, None)
canvas:
Color:
rgba: app.theme_cls.bg_normal
Ellipse:
size: self.size
pos: self.pos
disabled: True
MDTab:
name: 'style'
text: "Theme Style"
BoxLayout:
size_hint: (None, None)
pos_hint: {'center_x': .3, 'center_y': 0.5}
size: self.size
pos: self.pos
halign: 'center'
spacing: dp(10)
BoxLayout:
halign: 'center'
size_hint: (None, None)
size: dp(100), dp(100)
pos: self.pos
pos_hint: {'center_x': .3, 'center_y': 0.5}
MDIconButton:
size: dp(100), dp(100)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: 1, 1, 1, 1
Ellipse:
size: self.size
pos: self.pos
Color:
rgba: 0, 0, 0, 1
Line:
width: 1.
circle: (self.center_x, self.center_y, 50)
on_release: app.theme_cls.theme_style = 'Light'
BoxLayout:
halign: 'center'
size_hint: (None, None)
size: dp(100), dp(100)
MDIconButton:
size: dp(100), dp(100)
pos: self.pos
size_hint: (None, None)
canvas:
Color:
rgba: 0, 0, 0, 1
Ellipse:
size: self.size
pos: self.pos
on_release: app.theme_cls.theme_style = 'Dark'
""")
class MDThemePicker(ThemableBehavior, FloatLayout, ModalView, ElevationBehavior):
# background_color = ListProperty([0, 0, 0, 0])
time = ObjectProperty()
def __init__(self, **kwargs):
super(MDThemePicker, self).__init__(**kwargs)
def rgb_hex(self, col):
return get_color_from_hex(colors[col][self.theme_cls.accent_hue])
if __name__ == "__main__":
from kivy.app import App
from kivymd.theming import ThemeManager
class ThemePickerApp(App):
theme_cls = ThemeManager()
def build(self):
main_widget = Builder.load_string("""
#:import MDRaisedButton kivymd.button.MDRaisedButton
#:import MDThemePicker kivymd.theme_picker.MDThemePicker
FloatLayout:
MDRaisedButton:
size_hint: None, None
pos_hint: {'center_x': .5, 'center_y': .5}
size: 3 * dp(48), dp(48)
center_x: self.parent.center_x
text: 'Open theme picker'
on_release: MDThemePicker().open()
opposite_colors: True
""")
return main_widget
ThemePickerApp().run()
| PeterSurda/PyBitmessage | src/kivymd/theme_picker.py | Python | mit | 18,042 | [
"Amber"
] | d309bbdf22d64b0f395dae6da46f9b80daac31d1feb83519ed5694a245de6688 |
# This script contains the functions used for drawing the programs output images. The functions are called from graphic-output.py when they are needed.
import argparse, math
from PIL import Image, ImageDraw, ImageFont, ImageOps
from StringIO import StringIO
#Common arguments
parser = argparse.ArgumentParser()
parser.add_argument('-my_mut', action="store", dest = 'my_mut') #snp or lin
parser.add_argument('-m', action="store", dest = 'mode', default = 'P')
parser.add_argument('-pname', action="store", dest='project_name')
parser.add_argument('-cross', action="store", dest='my_cross')
parser.add_argument('-snp_analysis_type', action="store", dest='my_snp_analysis_type')
parser.add_argument('-gff', action="store", dest = 'gff') #Genome feature file
parser.add_argument('-iva', action="store", dest = 'input_va') #Output de varanalyzer
parser.add_argument('-rrl', action="store", dest = 'rrl') #Regulatory region lenght
parser.add_argument('-f', action="store", dest = 'output_html')
#Arguments for point mutation mapping graphic output
parser.add_argument('-asnp', action="store", dest = 'input_snp')
parser.add_argument('-bsnp', action="store", dest = 'input_f_snp') #Fasta genome input
#Arguments for large insertions mapping graphic output
parser.add_argument('-a', action="store", dest = 'input')
parser.add_argument('-b', action="store", dest = 'input_f') #Fasta genome input
parser.add_argument('-ins_pos', action="store", dest = 'ins_pos')
args = parser.parse_args()
project = args.project_name
def red(p):
if len(str(p)) <= 3:
r = p
elif len(str(p)) == 4:
r = str(p)[:1] + ' kb'
elif len(str(p)) == 5:
r = str(p)[:2] + ' kb'
elif len(str(p)) == 6:
r = '0' + '.' + str(p)[:2] + ' Mb'
elif len(str(p)) == 7:
r = str(p)[:1] + '.' + str(p)[1:3] + ' Mb'
elif len(str(p)) == 8:
r = str(p)[:2] + ' Mb'
elif len(str(p)) == 9:
r = str(p)[:3] + ' Mb'
return r;
args = parser.parse_args()
#############################################################################################################
# #
# SNP - Alelic frequence VS Chromosome position #
# #
#############################################################################################################
def fa_vs_pos():
#Input 1
input1 = args.input_snp
f1 = open(input1, 'r')
lines = f1.readlines()
#Input 2
input2 = args.input_f_snp
f2 = open(input2, 'r')
lines_f = f2.readlines()
contig_source = args.input_f_snp
# Function to parse fasta file (based on one of the Biopython IOs)
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith('>'):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
# Read contig fasta file
contig_lengths = list()
with open(contig_source) as fp:
fastalist = list()
for name_contig, seq_contig in read_fasta(fp):
innerlist = list()
innerlist.append(name_contig.strip('>'))
innerlist.append(len(seq_contig))
fastalist.append(innerlist)
contig_lengths.append(len(seq_contig))
max_contig_len = 0
for i in contig_lengths:
if int(i) > max_contig_len:
max_contig_len = int(i)
#FA vs POS graphs
for i in fastalist:
if int(i[1]) > 1000000:
wide=int(880*float(i[1])/max_contig_len) + 120 #<-------------------------------------------------------------------------------- SET IMAGE SIZE
height=500
im = Image.new("RGB", (wide, int(height)), (255,255,255))
draw = ImageDraw.Draw(im)
#get fonts from foler 'fonts'
fnt2 = ImageFont.truetype('fonts/VeraMono.ttf', 14)
r = red(int(i[1]))
if 'Mb' in r:
max_graph_x = int(i[1]) + 10000
elif 'kb' in r:
max_graph_x = int(i[1])
#Scaling factors
scaling_factor_x = (max_graph_x)/(wide - 120) #nts/pixel
scaling_factor_y = (1.001/(63/100.0*height)) #af/pixels
#Candidate region
if args.my_mut == 'snp':
binput = open(project + '/1_intermediate_files/map_info.txt', 'r')
#Retrieving candidate region coordinates
for line in binput:
if line.startswith('?'):
sp = line.split()
chromosome = sp[1].strip().lower()
chromosome_candidate = chromosome
if chromosome == i[0].lower():
if int(sp[2]) > 0 :
cr_start = int(sp[2])
else:
cr_start = 0
if int(sp[3]) < int(i[1]) :
cr_end = int(sp[3])
else:
cr_end = int(i[1])
#Drawing candidate region:
if chromosome_candidate == i[0].lower():
cr_start_im = int(cr_start/scaling_factor_x) + 70
cr_end_im = int(cr_end/scaling_factor_x) + 70
draw.rectangle( [cr_start_im, int(15/100.0*height), cr_end_im, int(80/100.0*height)], fill=(249, 222, 252) )
# af_candidates: framing the candidate region
if args.my_mut == 'af_candidates':
binput = open(project + '/1_intermediate_files/map_info.txt', 'r')
#Retrieving candidate region coordinates
for line in binput:
if line.startswith('?'):
sp = line.split()
chromosome = sp[1].strip().lower()
chromosome_candidate = chromosome
if chromosome == i[0].lower():
cr_start_raw = int(sp[2])
cr_end_raw = int(sp[3])
if int(sp[2]) > 0 :
cr_start = int(sp[2])
else:
cr_start = 0
if int(sp[3]) < int(i[1]) :
cr_end = int(sp[3])
else:
cr_end = int(i[1])
if chromosome_candidate == i[0].lower():
#Drawing a frame for the candidate region:
cr_start_im = int(cr_start/scaling_factor_x) + 70
cr_end_im = int(cr_end/scaling_factor_x) + 70
fa_img_08 = int(80/100.0*height) - int(0.8/scaling_factor_y) - 1
draw.rectangle( [cr_start_im, int(15/100.0*height)+1, cr_end_im+1, fa_img_08], fill=(249, 222, 252), outline=(112, 112, 112) )
#Drawing a dotted line in the frame
cr_middle = ((cr_start_raw + cr_end_raw)/2)/scaling_factor_x + 70
h = int(16/100.0*height)
while h in range(int(15/100.0*height), fa_img_08):
draw.line((cr_middle, h) + (cr_middle, h+5), fill=(255, 0, 0, 0), width=1)
h = h + 10
#snps
r, g, b = 31, 120, 180
if args.my_mut == 'af_control':
r, g, b = 245, 120, 44
for l, line in enumerate(lines):
sp = line.split()
if i[0].lower() == sp[0].lower() :
fa = float(sp[6])/(float(sp[6])+float(sp[5]))
fa_img = int(80/100.0*height) - int(fa/scaling_factor_y) - 1
pos_img = int(int(sp[1])/scaling_factor_x) + 70
draw.ellipse((pos_img-2, fa_img-2, pos_img+2, fa_img+2), fill=(r, g, b))
if args.my_snp_analysis_type == 'f2wt' and args.my_mut == 'snp':
#Filler variants
problem_var = open(project + '/1_intermediate_files/filler_variants.va', 'r')
for line in problem_var:
sp = line.split()
if i[0].lower() == sp[0].lower():
#f2 mut
fa = float(sp[8])/(float(sp[8])+float(sp[7]))
fa_img = int(80/100.0*height) - int(fa/scaling_factor_y)
pos_img = int(int(sp[1])/scaling_factor_x) + int(70)
draw.ellipse((pos_img-2, fa_img-2, pos_img+2, fa_img+2), fill=(237, 194, 168))
#f2wt snps
fa = float(sp[6])/(float(sp[6])+float(sp[5]))
fa_img = int(80/100.0*height) - int(fa/scaling_factor_y) - 1
pos_img = int(int(sp[1])/scaling_factor_x) + 70
draw.ellipse((pos_img-2, fa_img-2, pos_img+2, fa_img+2), fill=(167, 190, 206))
'''
# Problem variants
problem_var = open(project + '/1_intermediate_files/F2_filtered.va', 'r')
for line in problem_var:
sp = line.split()
if i[0].lower() == sp[0].lower():
fa = float(sp[6])/(float(sp[6])+float(sp[5]))
fa_img = int(80/100.0*height) - int(fa/scaling_factor_y)
pos_img = int(int(sp[1])/scaling_factor_x) + int(70)
draw.ellipse((pos_img-2, fa_img-2, pos_img+2, fa_img+2), fill=(167, 190, 206))
#Control variants
problem_var = open(project + '/1_intermediate_files/control_filtered.va', 'r')
for line in problem_var:
sp = line.split()
if i[0].lower() == sp[0].lower():
fa = float(sp[6])/(float(sp[6])+float(sp[5]))
fa_img = int(80/100.0*height) - int(fa/scaling_factor_y)
pos_img = int(int(sp[1])/scaling_factor_x) + int(70)
draw.ellipse((pos_img-2, fa_img-2, pos_img+2, fa_img+2), fill=(237, 194, 168))
'''
#Mapping variants
for l, line in enumerate(lines):
sp = line.split()
if i[0].lower() == sp[0].lower():
#f2 mut
fa = float(sp[8])/(float(sp[8])+float(sp[7]))
fa_img = int(80/100.0*height) - int(fa/scaling_factor_y)
pos_img = int(int(sp[1])/scaling_factor_x) + int(70)
draw.ellipse((pos_img-2, fa_img-2, pos_img+2, fa_img+2), fill=(245, 120, 44))
#f2wt snps
fa = float(sp[6])/(float(sp[6])+float(sp[5]))
fa_img = int(80/100.0*height) - int(fa/scaling_factor_y) - 1
pos_img = int(int(sp[1])/scaling_factor_x) + 70
draw.ellipse((pos_img-2, fa_img-2, pos_img+2, fa_img+2), fill=(31, 120, 180))
my_cross = str(args.my_cross)
#Boost / mm
if args.my_mut == 'snp':
binput = open(project + '/1_intermediate_files/map_info.txt', 'r')
blines = binput.readlines()
#Boost line
if my_cross == 'oc' :
for b, bline in enumerate(blines):
sp = bline.split()
if bline.startswith('!'):
boost_max = float(sp[3])
for b, bline in enumerate(blines):
sp = bline.split()
if bline.startswith('@') and sp[4].lower().strip('>') == i[0].lower():
boost_value = float(sp[3].strip())/boost_max
boost_value_img = int(80/100.0*height) - int(boost_value/scaling_factor_y )
window_position = int(sp[1])
window_position_img = int(window_position/scaling_factor_x) + 70
try:
draw.line(((window_position_img, boost_value_img) + (window_position_img_2, boost_value_img_2)), fill=(255, 0, 0, 0), width=1)
window_position_img_2 = window_position_img
boost_value_img_2 = boost_value_img
except:
window_position_img_2 = window_position_img
boost_value_img_2 = boost_value_img
window_position_img = None
boost_value_img = None
window_position_img_2 = None
boost_value_img_2 = None
#MM line
if my_cross == 'oc' :
for b, bline in enumerate(blines):
sp = bline.split()
if bline.startswith('@') and sp[4].lower().strip('>') == i[0].lower():
mm_value = float(sp[2].strip())
mm_value_img = int(80/100.0*height) - int(mm_value/scaling_factor_y )
window_position = int(sp[1])
window_position_img = int(window_position/scaling_factor_x) + 70
try:
draw.line(((window_position_img, mm_value_img) + (window_position_img_2, mm_value_img_2)), fill=(46, 255, 0), width=1)
window_position_img_2 = window_position_img
mm_value_img_2 = mm_value_img
except:
window_position_img_2 = window_position_img
mm_value_img_2 = mm_value_img
if my_cross == 'bc' :
r, g, bl = 46, 255, 0
if args.my_snp_analysis_type == 'f2wt':
r, g, bl = 255, 0, 255
for b, bline in enumerate(blines):
sp = bline.split()
if bline.startswith('@') and sp[3].lower().strip('>') == i[0].lower():
mm_value = float(sp[2].strip())
mm_value_img = int(80/100.0*height) - int(mm_value/scaling_factor_y )
window_position = int(sp[1])
window_position_img = int(window_position/scaling_factor_x) + 70
try:
draw.line(((window_position_img, mm_value_img) + (window_position_img_2, mm_value_img_2)), fill=(r, g, bl), width=1)
window_position_img_2 = window_position_img
mm_value_img_2 = mm_value_img
except:
window_position_img_2 = window_position_img
mm_value_img_2 = mm_value_img
window_position_img = None
mm_value_img = None
window_position_img_2 = None
mm_value_img_2 = None
#Axes
draw.line(((wide - 49), int(15/100.0*height)) + ((wide - 49), int(80/100.0*height)), fill=(255, 255, 255, 0), width=2) #cleanup
draw.line((68, int(15/100.0*height)) + (68, int(80/100.0*height)), fill=(0, 0, 0, 0), width=1) #Y axis
draw.line((68, int(80/100.0*height)) + ((wide - 50), int(80/100.0*height)), fill=(0, 0, 0, 0), width=1) #X axis
draw.line(((wide - 50), int(15/100.0*height)) + ((wide - 50), int(80/100.0*height)), fill=(0, 0, 0, 0), width=1) #-Y axis
draw.line((68, int(15/100.0*height)) + ((wide - 50), int(15/100.0*height)), fill=(0, 0, 0, 0), width=1) #-X axis
#Axis rulers_____________________
#X Axis
if int(i[1]) > 1000000:
mbs = int(0/scaling_factor_x) + 68
x_tag = 0
while mbs in range(68, wide-50):
draw.line((mbs, int(81/100.0*height) ) + (mbs, int(80/100.0*height)), fill=(0, 0, 0, 0), width=1)
if len(str(x_tag)) == 1:
draw.text(((mbs - 4), (int(81.8/100.0*height))), (str(x_tag).strip()), font=fnt2, fill=(0,0,0,255))
elif len(str(x_tag)) == 2:
draw.text(((mbs - 8), (int(81.8/100.0*height))), (str(x_tag).strip()), font=fnt2, fill=(0,0,0,255))
mbs = mbs + 1000000/scaling_factor_x +1
x_tag = x_tag + 1
elif int(i[1]) <= 1000000:
mbs = int(0/scaling_factor_x) + 68
x_tag = 0
while mbs in range(68, wide-50):
draw.line((mbs, int(81/100.0*height) ) + (mbs, int(80/100.0*height)), fill=(0, 0, 0, 0), width=1)
draw.text(((mbs - 4*len(str(x_tag))), (int(81.8/100.0*height))), (str(x_tag).strip()), font=fnt2, fill=(0,0,0,255))
mbs = mbs + 100000/scaling_factor_x +1
x_tag = x_tag + 100000
#Y axis
fa_img_0 = int(80/100.0*height) - int(0/scaling_factor_y) - 1
fa_img_1 = int(80/100.0*height) - int(1/scaling_factor_y) - 1
fa_img_05 = int(80/100.0*height) - int(0.5/scaling_factor_y) - 1
fa_img_025 = int(80/100.0*height) - int(0.25/scaling_factor_y) - 1
fa_img_075 = int(80/100.0*height) - int(0.75/scaling_factor_y) - 1
draw.line(( 68 , fa_img_0 +1) + ( 63 , fa_img_0 +1 ), fill=(0, 0, 0, 0), width=1)
draw.line(( 68 , fa_img_1 ) + ( 63 , fa_img_1 ), fill=(0, 0, 0, 0), width=1)
draw.line(( 68 , fa_img_05 ) + ( 63 , fa_img_05 ), fill=(0, 0, 0, 0), width=1)
draw.line(( 68 , fa_img_025 ) + ( 65 , fa_img_025 ), fill=(0, 0, 0, 0), width=1)
draw.line(( 68 , fa_img_075 ) + ( 65 , fa_img_075 ), fill=(0, 0, 0, 0), width=1)
draw.text(((48), fa_img_0-6), ( '0' ), font=fnt2, fill=(0,0,0,255))
draw.text(((32), fa_img_1-8), ( '1.0' ), font=fnt2, fill=(0,0,0,255))
draw.text(((32), fa_img_05-8), ( '0.5' ), font=fnt2, fill=(0,0,0,255))
#Y axis label
txt=Image.new('L', (140, 20))
d = ImageDraw.Draw(txt)
d.text( (0, 0), "Allele frequency", font=fnt2, fill=255)
w=txt.rotate(90, expand=1)
im.paste( ImageOps.colorize(w, (0,0,0), (0,0,0)), (2,150), w)
#X axis label
if int(i[1]) > 1000000: x_title = str(i[0]) + ' (Mb)'
if int(i[1]) <= 1000000: x_title = str(i[0]) + ' (bp)'
w, h = draw.textsize(str(x_title))
draw.text((( (wide-120)/2- w/2 +70), (int(87/100.0*height))), (x_title), font=fnt2, fill=(0,0,0,255))
#Crop and save image, specifying the format with the extension
w, h = im.size
if args.my_mut == 'snp':
im.crop((0, 60, w-0, h-40)).save(project + '/3_workflow_output/mapping_' + str(i[0]) + '.png')
if args.my_mut == 'af_control':
im.crop((0, 60, w-0, h-40)).save(project + '/3_workflow_output/control_' + str(i[0]) + '.png')
if args.my_mut == 'af_sample':
im.crop((0, 60, w-0, h-40)).save(project + '/3_workflow_output/problem_' + str(i[0]) + '.png')
if args.my_mut == 'af_candidates':
im.crop((0, 60, w-0, h-40)).save(project + '/3_workflow_output/candidates_' + str(i[0]) + '.png')
#############################################################################################################
# #
# SNP - Alelic frequence VS Chromosome position - Zoom in candidate region #
# #
#############################################################################################################
def candidates_zoom():
#Input 1
input1 = open(project + '/3_workflow_output/candidate_variants.txt', 'r')
lines = input1.readlines()
#Input 2
input2 = open(project + '/1_intermediate_files/map_info.txt', 'r')
lines_map = input2.readlines()
#Input fasta
contig_source = args.input_f_snp
# Function to parse fasta file (based on one of the Biopython IOs)
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith('>'):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
# Read contig fasta file
contig_lengths = list()
with open(contig_source) as fp:
fastalist = list()
for name_contig, seq_contig in read_fasta(fp):
innerlist = list()
innerlist.append(name_contig.strip('>'))
innerlist.append(len(seq_contig))
fastalist.append(innerlist)
contig_lengths.append(len(seq_contig))
for line in lines_map:
if line.startswith('?'):
sp = line.split()
chromosome = str(sp[1]).lower().strip()
for i in fastalist:
if i[0].strip().lower() == chromosome:
chromosome_length = int(i[1])
reg_min = sp[2]
reg_min_real = reg_min
if int(reg_min) < 0 : reg_min = '0'
reg_max = sp[3]
reg_max_real = reg_max
if int(reg_max) > chromosome_length : reg_max = chromosome_length
#Create image
wide=1000
height=500
im = Image.new("RGB", (wide, int(height)), (255,255,255))
draw = ImageDraw.Draw(im)
#get fonts from foler 'fonts'
fnt2 = ImageFont.truetype('fonts/VeraMono.ttf', 14)
max_graph_x = int(int(reg_max) - int(reg_min))
#Scaling factors
scaling_factor_x = (max_graph_x)/(wide - 120) #nts/pixel
scaling_factor_y = float(0.201/(63/100.0*height)) #fa/pixels
scaling_factor_y_1 = float(1.001/(63/100.0*height)) #fa/pixels
#shading
draw.rectangle( [70, int(15/100.0*height), wide-50, int(80/100.0*height)], fill=(249, 222, 252) )
#snps
r, g, b = 31, 120, 180
for l, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split()
fa = float(sp[8]) - 0.8
fa_img = int(80/100.0*height) - int(fa/scaling_factor_y)
pos_img = int((int(sp[2])-int(reg_min))/scaling_factor_x) + 70
draw.ellipse((pos_img-2, fa_img-2, pos_img+2, fa_img+2), fill=(r, g, b))
#Peak line
peak = (((int(reg_max_real) + int(reg_min_real))/2) - int(reg_min))/scaling_factor_x + 70
if (((int(reg_max) + int(reg_min_real))/2) - int(reg_min)) < 0: peak = 70
h = int(16/100.0*height)
while h in range(int(15/100.0*height), int(80/100.0*height)):
draw.line((peak, h) + (peak, h+7), fill=(255, 0, 0, 0), width=1)
h = h + 14
#Axes
draw.line(((wide - 49), int(15/100.0*height)) + ((wide - 49), int(80/100.0*height)), fill=(255, 255, 255, 0), width=2) #cleanup
draw.line((68, int(15/100.0*height)) + (68, int(80/100.0*height)), fill=(0, 0, 0, 0), width=1) #Y axis
draw.line((68, int(80/100.0*height)) + ((wide - 50), int(80/100.0*height)), fill=(0, 0, 0, 0), width=1) #X axis
draw.line(((wide - 50), int(15/100.0*height)) + ((wide - 50), int(80/100.0*height)), fill=(0, 0, 0, 0), width=1) #-Y axis
draw.line((68, int(15/100.0*height)) + ((wide - 50), int(15/100.0*height)), fill=(0, 0, 0, 0), width=1) #-X axis
#Axis rulers_____________________
#X Axis
mark = 68
mark_2 = 68 + 500000/scaling_factor_x/5
x_tag = int(reg_min)
while mark in range(68, wide-50):
draw.line((mark, int(81/100.0*height) ) + (mark, int(80/100.0*height)), fill=(0, 0, 0, 0), width=1)
w, h = draw.textsize(str(x_tag))
draw.text(((mark - w/2 -4), (int(81.8/100.0*height))), (str(x_tag).strip()), font=fnt2, fill=(0,0,0,255))
mark = mark + 500000/scaling_factor_x
x_tag = x_tag + 500000
#Y axis
fa_img_0 = int(80/100.0*height) - int(0/scaling_factor_y) -1
fa_img_09 = int(80/100.0*height) - int(0.1/scaling_factor_y) -1
fa_img_1 = int(80/100.0*height) - int(0.2/scaling_factor_y) -1
fa_img_95 = int(80/100.0*height) - int(0.15/scaling_factor_y) -1
fa_img_85 = int(80/100.0*height) - int(0.05/scaling_factor_y) -1
draw.line(( 68 , fa_img_0 +1) + ( 63 , fa_img_0 +1 ), fill=(0, 0, 0, 0), width=1)
draw.line(( 68 , fa_img_09 +1) + ( 63 , fa_img_09 +1 ), fill=(0, 0, 0, 0), width=1)
draw.line(( 68 , fa_img_1 +1) + ( 63 , fa_img_1 +1 ), fill=(0, 0, 0, 0), width=1)
draw.line(( 68 , fa_img_95 +1) + ( 65 , fa_img_95 +1 ), fill=(0, 0, 0, 0), width=1)
draw.line(( 68 , fa_img_85 +1) + ( 65 , fa_img_85 +1 ), fill=(0, 0, 0, 0), width=1)
draw.text(((32), fa_img_0-8), ( '0.8' ), font=fnt2, fill=(0,0,0,255))
draw.text(((32), fa_img_1-8), ( '1.0' ), font=fnt2, fill=(0,0,0,255))
draw.text(((32), fa_img_09-8), ( '0.9' ), font=fnt2, fill=(0,0,0,255))
#Y axis label
txt=Image.new('L', (140, 20))
d = ImageDraw.Draw(txt)
d.text( (0, 0), "Allele frequency", font=fnt2, fill=255)
w=txt.rotate(90, expand=1)
im.paste( ImageOps.colorize(w, (0,0,0), (0,0,0)), (2,150), w)
#X axis label
x_title = chromosome + ' (bp)'
w, h = draw.textsize(str(x_title))
draw.text((( (wide-120)/2- w/2 +70), (int(87/100.0*height))), (x_title), font=fnt2, fill=(0,0,0,255))
#Crop and save image, specifying the format with the extension
w, h = im.size
if args.my_mut == 'snp':
im.crop((0, 60, w-0, h-40)).save(project + '/3_workflow_output/candidates_' + chromosome + '_zoom.png')
#############################################################################################################
# #
# SNP - Alelic frequence VS Chromosome position - LEGEND #
# #
#############################################################################################################
def legend():
wide = 250
high = 310
im = Image.new("RGB", (wide, high), (255,255,255))
draw = ImageDraw.Draw(im)
fnt2 = ImageFont.truetype('fonts/VeraMono.ttf', 14)
w = 10
h = 10
length = high - 10
width = wide - 10
#legend box
draw.line((w, h) + (width, h), fill=256, width=1)
draw.line((w, h) + (w, length), fill=256, width=1)
draw.line((width, length) + (width, h), fill=256, width=1)
draw.line((w, length) + (width, length), fill=256, width=1)
#legend items
draw.text((w+20, h+20), 'Legend:', font=fnt2, fill=(0,0,0,255))
draw.ellipse((w+40-2, h+60-2, w+40+2, h+60+2), fill=(31, 120, 180))
draw.text((w+60, h+52), 'F2 problem SNPs', font=fnt2, fill=(0,0,0,255))
draw.ellipse((w+40-2, h+90-2, w+40+2, h+90+2), fill=(245, 120, 44))
draw.text((w+60, h+82), 'Control SNPs', font=fnt2, fill=(0,0,0,255))
draw.line((w+38, h+120) + (w+44, h+120), fill=(46, 255, 0), width=2)
draw.text((w+60, h+112), 'SMA', font=fnt2, fill=(0,0,0,255))
draw.line((w+38, h+150) + (w+44, h+150), fill=(255, 0, 0), width=2)
draw.text((w+60, h+142), 'Boost', font=fnt2, fill=(0,0,0,255))
draw.line((w+38, h+180) + (w+44, h+180), fill=(255, 0, 255), width=2)
draw.text((w+60, h+172), 'AF difference', font=fnt2, fill=(0,0,0,255))
draw.line((w+37, h+210) + (w+45, h+210), fill=(255, 0, 0), width=2)
draw.line((w+40, h+210) + (w+42, h+210), fill=(255, 255, 255), width=2)
draw.text((w+60, h+202), 'Selected position', font=fnt2, fill=(0,0,0,255))
draw.line((w+38, h+240) + (w+44, h+240), fill=(249, 222, 252), width=8)
draw.text((w+60, h+232), 'Candidate region', font=fnt2, fill=(0,0,0,255))
draw.rectangle( [w+38, h+244, w+44, h+236], fill=None, outline=(0,0,0) )
draw.line((w+38, h+270) + (w+44, h+270), fill=(255, 252, 232), width=8)
draw.text((w+60, h+262), 'Selected chromosome', font=fnt2, fill=(0,0,0,255))
draw.rectangle( [w+38, h+274, w+44, h+266], fill=None, outline=(0,0,0) )
im.save(project + '/3_workflow_output/legend.png')
#############################################################################################################
# #
# LIN - GENOME OVERVIEW & HISTOGRAMS #
# #
#############################################################################################################
def insertions_overview_and_histograms():
#Input 1
input = args.input
f1 = open(input, 'r')
lines = f1.readlines()
#__________________________________________Insertions overview image_____________________________________________________________
#________________________________________________________________________________________________________________________________
#Input 2
finput = args.input_f
f2 = open(finput, 'r')
flines = f2.readlines()
#define a superlist with innerlists, each of them containing all the info of each contig
superlist = list()
#Input 3
input_pos = args.ins_pos
f3 = open(input_pos, 'r')
lines_pos = f3.readlines()
#Create a list with all the genome contigs
contigs = []
length = 0
n = 0
#dict_contigs = dict()
lengthlist = list()
#read fasta file to determine number of contigs
for i, line in enumerate(flines):
if line.startswith('>'): #fasta sequences start with '>'
sp = line.split(' ') #because some names have whitespaces and extra info that is not written to sam file
cont = sp[0].strip() #strip() is to remove the '\r\n' hidden chars
cont = cont[1:] #to remove the first char of string (>)
if cont not in contigs:
contigs.append(cont)
innerlist = list()
innerlist.append(cont)
superlist.append(innerlist)
#Calculate the width of the image acording to the number of contigs
num_contigs = 0
for c in superlist:
num_contigs+=1
contigs_image_length = 65 * num_contigs + 60
im = Image.new("RGB", (1000, contigs_image_length+120), (255,255,255))
contig_source = args.input_f
# Function to parse fasta file (based on one of the Biopython IOs)
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith('>'):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
# Read contig fasta file
with open(contig_source) as fp:
fastalist = list()
for name_contig, seq_contig in read_fasta(fp):
innerlist = list()
innerlist.append(name_contig.strip('>'))
innerlist.append(len(seq_contig))
fastalist.append(innerlist)
try:
max_list = list()
for c in fastalist:
max_list.append(int(c[1]))
max_length = max(max_list)
except:
max_length = fastalist[0][1]
mb_max = int(math.ceil(float(max_length)/1000000))
for c in superlist:
for l in fastalist:
if c[0] == l[0]:
c.append(l[1])
contigs_scaling_factor = mb_max*1000000 / 850.0
#translate real chromosome lengths into image coordinates
contig_counter = 1
contig_xi_coord = 100
for c in superlist:
contig_xf_coord = int(contig_xi_coord + c[1]/contigs_scaling_factor)
contig_y_coord = ((contigs_image_length / num_contigs) * contig_counter)
contig_counter +=1
c.append(contig_y_coord)
c.append(contig_xi_coord)
c.append(contig_xf_coord)
#add insertions aproximate position to superlist
for c in superlist:
positions_list = list()
for i, line in enumerate(lines_pos):
if not line.startswith('#'):
sp = line.split('\t')
contig = str(sp[1].strip()).lower()
insertion_pos = int(sp[2])
if contig == c[0].strip().lower() and insertion_pos not in positions_list:
positions_list.append(insertion_pos)
c.append(positions_list)
#initialize draw
draw = ImageDraw.Draw(im)
#get fonts from foler 'fonts'
fnt3 = ImageFont.truetype('fonts/VeraMono.ttf', 14)
tab = 50
number = 1
#Drawing the chromosomes:
for c in superlist:
previous_pos_x = 0
previous_chr = 'none'
draw.line((c[3], c[2]) + (c[4], c[2]), fill=(31, 120, 180), width=13)
draw.text(((c[3]), (c[2] -55)), c[0], font=fnt3, fill=(0,0,0,255))
for i in c[5]:
d = abs(i/contigs_scaling_factor+contig_xi_coord - previous_pos_x)
if d > 21 or previous_chr != c[0]:
draw.polygon([((i/contigs_scaling_factor+contig_xi_coord), (c[2]-8)), (((i/contigs_scaling_factor+contig_xi_coord)+10), (c[2]-18)), ((i/contigs_scaling_factor + contig_xi_coord)-10, c[2]-18)], fill = (200, 0, 0, 200))
draw.line((((i/contigs_scaling_factor+contig_xi_coord), (c[2]-8)), ((i/contigs_scaling_factor+contig_xi_coord)+10), (c[2]-18)), fill=256, width=1)
draw.line((((i/contigs_scaling_factor+contig_xi_coord)+10), (c[2]-18)) + ((i/contigs_scaling_factor + contig_xi_coord)-10, (c[2]-18)), fill=256, width=1)
draw.line(((i/contigs_scaling_factor+contig_xi_coord), (c[2]-8)) + ((i/contigs_scaling_factor + contig_xi_coord)-10, (c[2]-18)), fill=256, width=1)
draw.text(((i/contigs_scaling_factor+contig_xi_coord -4), (c[2]-35)), ( str(number)), font=fnt3, fill=(0,0,0,255))
else:
draw.polygon([((i/contigs_scaling_factor+contig_xi_coord), (c[2]+8)), (((i/contigs_scaling_factor+contig_xi_coord)-10), (c[2]+18)), ((i/contigs_scaling_factor + contig_xi_coord)+10, c[2]+18)], fill = (200, 0, 0, 200))
draw.line((((i/contigs_scaling_factor+contig_xi_coord), (c[2]+8)), ((i/contigs_scaling_factor+contig_xi_coord)-10), (c[2]+18)), fill=256, width=1)
draw.line((((i/contigs_scaling_factor+contig_xi_coord)-10), (c[2]+18)) + ((i/contigs_scaling_factor + contig_xi_coord)+10, (c[2]+18)), fill=256, width=1)
draw.line(((i/contigs_scaling_factor+contig_xi_coord), (c[2]+8)) + ((i/contigs_scaling_factor + contig_xi_coord)+10, (c[2]+18)), fill=256, width=1)
draw.text(((i/contigs_scaling_factor+contig_xi_coord -4), (c[2]+20)), ( str(number)), font=fnt3, fill=(0,0,0,255))
number = number + 1
previous_pos_x = i/contigs_scaling_factor+contig_xi_coord
previous_chr = c[0]
#Chromosome caps
image_file_left = StringIO(open("./fonts/left_cap.png",'rb').read())
image_file_right = StringIO(open("./fonts/right_cap.png",'rb').read())
cap_left = Image.open(image_file_left)
cap_right = Image.open(image_file_right)
im.paste(cap_left, (c[3], c[2]-6))
im.paste(cap_right, (c[4], c[2]-6))
#Axis
draw.line((100, contigs_image_length + 50) + (950, contigs_image_length + 50), fill=256, width=1)
mb = 850.0/mb_max
m = 100.0
tag = 'Position (Mb)'
num = 0
draw.text((480, contigs_image_length + 88), str(tag), font=fnt3, fill=256)
while int(m) in range(100, 951):
draw.line(( int(m), contigs_image_length + 48) + (int(m), contigs_image_length + 52), fill=256, width=1)
w, h = draw.textsize(str(num))
draw.text((int(m) - w/2 -2, contigs_image_length + 57), str(num), font=fnt3, fill=256)
num = num + 1
m = m + mb
im.save(project + "/3_workflow_output/insertions_overview.png")
#_________________________________________________________Local and paired analysis graphs________________________________________________________________
#_________________________________________________________________________________________________________________________________________________________
if args.mode == 'pe':
insertions = list()
for i, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split('\t')
insertion = str(sp[2]).strip()
if insertion not in insertions and insertion != '-':
insertions.append(insertion)
for e in insertions:
try:
del region_min
except:
pass
region_max = 0
rd_max_paired = 0
rd_max_local = 0
for i, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split('\t')
#Max and min for genome region in graphic
if sp[2] == e and sp[0] == 'PAIRED' :
if int(sp[3]) > region_max:
region_max = int(sp[3])
else:
try:
if sp[3] < region_min:
region_min = int(sp[3])
except:
region_min = int(sp[3])
#Max and min read depth
if sp[2] == e and sp[0] == 'PAIRED' and sp[5].strip() != 'TOTAL':
if int(sp[4]) > rd_max_paired:
rd_max_paired = int(sp[4])
if sp[2] == e and sp[0] == 'LOCAL_RD' and sp[5].strip() != 'TOTAL_RD':
if int(sp[4]) > rd_max_local:
rd_max_local = int(sp[4])
rd_max_paired = rd_max_paired + 1
rd_max_local = rd_max_local + 1
region_max = region_max + 100
if region_min > 200:
region_min = region_min - 100
#Images, axes and title
im = Image.new("RGB", (1000, 1000), (255,255,255))
draw = ImageDraw.Draw(im)
draw.line((120, 449) + (900, 449), fill=256, width=1) #x axis paired
draw.line((120, 150) + (120, 449), fill=256, width=1) #y axis paired
draw.line((120, 754) + (900, 754), fill=256, width=1) #x axis local
draw.line((120, 455) + (120, 754), fill=256, width=1) #y axis local
draw.line((120, 150) + (900, 150), fill=256, width=1) #-x axis paired
draw.line((900, 150) + (900, 449), fill=256, width=1) #-y axis paired
draw.line((120, 455) + (900, 455), fill=256, width=1) #-x axis local
draw.line((900, 455) + (900, 754), fill=256, width=1) #-y axis local
draw.text(((450), (795)), ('Nucleotide'), font=fnt3, fill=(0,0,0,255))
draw.text(((140), (155)), ('Flanking unpaired alignments'), font=fnt3, fill=(0,0,0,255))
draw.text(((140), (460)), ('Flanking local alignments'), font=fnt3, fill=(0,0,0,255))
#Y axis label
txt=Image.new('L', (500, 50))
d = ImageDraw.Draw(txt)
d.text( (0, 0), "Read depth (x)", font=fnt3, fill=255)
w=txt.rotate(90, expand=1)
im.paste( ImageOps.colorize(w, (0,0,0), (0,0,0)), (35,0), w)
#Scaling factors
nucleotides = region_max - region_min
scaling_factor_x = nucleotides/780.0
scaling_factor_y_paired = rd_max_paired/280.0
scaling_factor_y_local = rd_max_local/280.0
#LOCAL/PAIRED GRAPHICS
for i, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split('\t')
ins_contig = sp[1]
if sp[2] == e and sp[0].strip() == 'PAIRED' and sp[5].strip() != 'TOTAL':
raw_x_position = int(sp[3])
img_x_position = int(raw_x_position/scaling_factor_x)
img_relative_x_position = img_x_position - int(region_min/scaling_factor_x) + 121
raw_y_position = int(sp[4])
img_y_position_p = 450 - int(raw_y_position/scaling_factor_y_paired)
#draw
if sp[5].strip() == 'R':
draw.line((img_relative_x_position, 448) + (img_relative_x_position, img_y_position_p), fill=(64, 159, 65, 100), width=1)
elif sp[5].strip() == 'F':
draw.line((img_relative_x_position, 448) + (img_relative_x_position, img_y_position_p), fill=(31, 120, 180, 100), width=1)
img_relative_y_position_2_r = float('inf')
img_relative_y_position_2_l = float('inf')
cand_pos_l = 'none'
cand_pos_r = 'none'
for i, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split('\t')
ins_contig = sp[1]
if sp[2] == e and sp[0].strip() == 'LOCAL_RD' and sp[5].strip() != 'TOTAL_RD':
raw_x_position = int(sp[3])
img_x_position = int(raw_x_position/scaling_factor_x)
img_relative_x_position = img_x_position - int(region_min/scaling_factor_x) + 121
raw_y_position = int(sp[4])
img_y_position_l = int(raw_y_position/scaling_factor_y_local)
img_relative_y_position = 755 - img_y_position_l
#draw
if sp[5].strip() == 'RIGHT_RD':
draw.line((img_relative_x_position, 753) + (img_relative_x_position, img_relative_y_position), fill=(64, 159, 65, 100), width=1)
if img_relative_y_position < img_relative_y_position_2_r:
cand_pos_r = img_relative_x_position
img_relative_y_position_2_r = img_relative_y_position
if sp[5].strip() == 'LEFT_RD':
draw.line((img_relative_x_position, 753) + (img_relative_x_position, img_relative_y_position), fill=(31, 120, 180, 100), width=1)
if img_relative_y_position <= img_relative_y_position_2_l:
cand_pos_l = img_relative_x_position
img_relative_y_position_2_l = img_relative_y_position
#Candidate regions
for i, line in enumerate(lines):
if line.startswith('@#'):
if 'inf' not in line:
sp = line.split(',')
if int(sp[2].strip()) == int(e):
cr = [int(sp[0].strip('@#')), int(sp[1].strip())]
cr_min = min(cr)
cr_max = max(cr)
draw.line((((120 +int(sp[0].strip('@#'))/scaling_factor_x - int(region_min/scaling_factor_x)) , 448) + ((120 +int(sp[0].strip('@#'))/scaling_factor_x - int(region_min/scaling_factor_x)) , 151)), fill=(147, 147, 147, 0), width=1)
draw.line((((120 +int(sp[1].strip())/scaling_factor_x - int(region_min/scaling_factor_x)) , 448) + ((120 +int(sp[1].strip())/scaling_factor_x - int(region_min/scaling_factor_x)) , 151)), fill=(147, 147, 147, 0), width=1)
#Candidate position:
if cand_pos_r != 'none' and cand_pos_l == 'none':
draw.line((cand_pos_r-1 , 456) + (cand_pos_r-1 , 753), fill=(147, 147, 147, 0), width=1)
if cand_pos_l != 'none' and cand_pos_r == 'none':
draw.line((cand_pos_l-1 , 456) + (cand_pos_l-1 , 753), fill=(147, 147, 147, 0), width=1)
if cand_pos_r != 'none' and cand_pos_l != 'none':
draw.line((cand_pos_r-1 , 456) + (cand_pos_r-1 , 753), fill=(147, 147, 147, 0), width=1)
#Axis anotations
#x Axis
x_p = 120 + int(100/scaling_factor_x)
x_p_2 = 120 + int(200/scaling_factor_x)
ruler = region_min + 100
while x_p in range(120, 900):
draw.line((x_p, 755) + (x_p, 762), fill=256, width=1)
w, h = draw.textsize(str(ruler))
draw.text((x_p - w/2 - 5, 766), (str(ruler)), font=fnt3, fill=(0,0,0,255))
ruler = ruler + 200
x_p = int(x_p + (200/scaling_factor_x)) #Ruler with 200 nts separations
while x_p_2 in range(120, 900):
draw.line((x_p_2, 755) + (x_p_2, 758), fill=256, width=1)
x_p_2 = int(x_p_2 + (200/scaling_factor_x)) #Ruler with 100 nts separations
#y Axis - paired
if rd_max_paired > 20:
y_p = 450 - int(5/scaling_factor_y_paired)
counter = 10
while y_p in range(150, 451):
draw.line((120, y_p) + (115, y_p), fill=256, width=1)
draw.text((90, y_p-8), ( str(counter)), font=fnt3, fill=(0,0,0,255))
counter = counter + 10
y_p = int(y_p - (10/scaling_factor_y_paired))
if 20 >= rd_max_paired > 10:
y_p = 450 - int(5/scaling_factor_y_paired)
counter = 5
while y_p in range(150, 451):
draw.line((120, y_p) + (115, y_p), fill=256, width=1)
draw.text((90, y_p-8), ( str(counter)), font=fnt3, fill=(0,0,0,255))
counter = counter + 5
y_p = int(y_p - (5/scaling_factor_y_paired))
if rd_max_paired <= 10:
y_p = 450 - int(1/scaling_factor_y_paired)
counter = 1
while y_p in range(150, 451):
draw.line((120, y_p) + (115, y_p), fill=256, width=1)
draw.text((90, y_p-8), ( str(counter)), font=fnt3, fill=(0,0,0,255))
counter = counter + 1
y_p = int(y_p - (1/scaling_factor_y_paired))
#y Axis - local
if rd_max_local > 20:
y_p = 755 - int(5/scaling_factor_y_local)
counter = 5
while y_p in range(455, 751):
draw.line((120, y_p) + (115, y_p), fill=256, width=1)
draw.text((90, y_p-8), ( str(counter)), font=fnt3, fill=(0,0,0,255))
counter = counter + 5
y_p = int(y_p - (5/scaling_factor_y_local))
if 20 >= rd_max_local > 10:
y_p = 755 - int(5/scaling_factor_y_local)
counter = 5
while y_p in range(455, 751):
draw.line((120, y_p) + (115, y_p), fill=256, width=1)
draw.text((90, y_p-8), ( str(counter)), font=fnt3, fill=(0,0,0,255))
counter = counter + 5
y_p = int(y_p - (5/scaling_factor_y_local))
if rd_max_local <= 10:
y_p = 755 - int(1/scaling_factor_y_local)
counter = 1
while y_p in range(455, 751):
draw.line((120, y_p) + (115, y_p), fill=256, width=1)
draw.text((90, y_p-8), ( str(counter)), font=fnt3, fill=(0,0,0,255))
counter = counter + 1
y_p = int(y_p - (1/scaling_factor_y_local))
#Legend paired________________________________________________________________________________________
#w and h can be used to re-position the legend in the figure
w = 690
h = 160
#legend box
draw.polygon([(w,h), (w,h+100), (w+200,h+100), (w+200,h) ], fill = (255, 255, 255, 0))
draw.line((w, h) + (w+200, h), fill=256, width=1)
draw.line((w, h) + (w, h+100), fill=256, width=1)
draw.line((w+200, h) + (w+200, h+100), fill=256, width=1)
draw.line((w, h+100) + (w+200, h+100), fill=256, width=1)
draw.text((w+10, h+10), 'Legend:', font=fnt3, fill=(0,0,0,255))
#legend items
draw.text((w+45, h+32), 'Forward reads', font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+32+7) + (w+35, h+32+7), fill=(31, 120, 180), width=10)
draw.text((w+45, h+52), 'Reverse reads ', font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+52+7) + (w+35, h+52+7), fill=(64, 159, 65), width=10)
draw.text((w+45, h+72), 'Candidate region', font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+72+8) + (w+35, h+72+8), fill=(147, 147, 147), width=1)
#Legend local________________________________________________________________________________________
#w and h can be used to re-position the legend in the figure
w = 690
h = 465
#legend box
draw.polygon([(w,h), (w,h+100), (w+200,h+100), (w+200,h) ], fill = (255, 255, 255, 0))
draw.line((w, h) + (w+200, h), fill=256, width=1)
draw.line((w, h) + (w, h+100), fill=256, width=1)
draw.line((w+200, h) + (w+200, h+100), fill=256, width=1)
draw.line((w, h+100) + (w+200, h+100), fill=256, width=1)
draw.text((w+10, h+10), 'Legend:', font=fnt3, fill=(0,0,0,255))
#legend items
draw.text((w+45, h+32), """3' clipped reads """, font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+32+7) + (w+35, h+32+7), fill=(31, 120, 180), width=10)
draw.text((w+45, h+52), """5' clipped reads """, font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+52+7) + (w+35, h+52+7), fill=(64, 159, 65), width=10)
draw.text((w+45, h+72), 'Predicted position', font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+72+8) + (w+35, h+72+8), fill=(147, 147, 147), width=1)
#save image, specifying the format with the extension
w, h = im.size
im.crop((0, 100, w, h-100)).save(project + '/3_workflow_output/img_1_ins_' + str(e) + '.png')
#_________________________________________________________________________________________________________________________________________________________
if args.mode == 'se':
insertions = list()
for i, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split('\t')
insertion = str(sp[2]).strip()
if insertion not in insertions and insertion != '-':
insertions.append(insertion)
for e in insertions:
try:
del region_min
except:
pass
region_max = 0
rd_max_local = 0
for i, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split('\t')
#Max and min for genome region in graphic
if sp[2] == e:
if int(sp[3]) > region_max:
region_max = int(sp[3])
else:
try:
if sp[3] < region_min:
region_min = int(sp[3])
except:
region_min = int(sp[3])
#Max read depth
if sp[2] == e and sp[0] == 'LOCAL_RD' and sp[5].strip() != 'TOTAL_RD':
if int(sp[4]) > rd_max_local:
rd_max_local = int(sp[4]) + 1
region_max = region_max + 100
if region_min > 200:
region_min = region_min - 100
#Images, axes and title
im = Image.new("RGB", (1000, 600), (255,255,255))
draw = ImageDraw.Draw(im)
draw.line((120, 450) + (900, 450), fill=256, width=1) #x axis
draw.line((120, 150) + (120, 450), fill=256, width=1) #y axis
draw.line((120, 150) + (900, 150), fill=256, width=1) #-x axis
draw.line((900, 150) + (900, 450), fill=256, width=1) #-y axis
draw.text(((450), (500)), ('Nucleotide'), font=fnt3, fill=(0,0,0,255))
#Y axis label
txt=Image.new('L', (150, 30))
d = ImageDraw.Draw(txt)
d.text( (0, 0), "Read depth (x)", font=fnt3, fill=255)
w=txt.rotate(90, expand=1)
im.paste( ImageOps.colorize(w, (0,0,0), (0,0,0)), (35,200), w)
#Scaling factors
nucleotides = region_max - region_min
scaling_factor_x = nucleotides/780.0
scaling_factor_y_local = rd_max_local/280.0
img_relative_y_position_2_r = float('inf')
img_relative_y_position_2_l = float('inf')
cand_pos_l = 'none'
cand_pos_r = 'none'
#LOCAL GRAPHICS
for i, line in enumerate(lines):
if not line.startswith('@'):
sp = line.split('\t')
ins_contig = sp[1]
if sp[2] == e and sp[0].strip() == 'LOCAL_RD' and sp[5].strip() != 'TOTAL_RD':
raw_x_position = int(sp[3])
img_x_position = int(raw_x_position/scaling_factor_x)
img_relative_x_position = img_x_position - int(region_min/scaling_factor_x) + 121
raw_y_position = int(sp[4])
img_y_position_l = int(raw_y_position/scaling_factor_y_local)
img_relative_y_position = 450 - img_y_position_l
#draw
if sp[5].strip() == 'RIGHT_RD':
draw.line((img_relative_x_position, 449) + (img_relative_x_position, img_relative_y_position), fill=(64, 159, 65, 100), width=3)
if img_relative_y_position < img_relative_y_position_2_r:
cand_pos_r = img_relative_x_position
img_relative_y_position_2_r = img_relative_y_position
if sp[5].strip() == 'LEFT_RD':
draw.line((img_relative_x_position, 449) + (img_relative_x_position, img_relative_y_position), fill=(31, 120, 180, 100), width=3)
if img_relative_y_position <= img_relative_y_position_2_l:
cand_pos_l = img_relative_x_position
img_relative_y_position_2_l = img_relative_y_position
#Candidate position
if cand_pos_r != 'none' and cand_pos_l == 'none':
draw.line((cand_pos_r-1 , 449) + (cand_pos_r-1 , 151), fill=(147, 147, 147, 0), width=1)
if cand_pos_l != 'none' and cand_pos_r == 'none':
draw.line((cand_pos_l-1 , 449) + (cand_pos_l-1 , 151), fill=(147, 147, 147, 0), width=1)
if cand_pos_r != 'none' and cand_pos_l != 'none':
draw.line((cand_pos_r-1 , 449) + (cand_pos_r-1 , 151), fill=(147, 147, 147, 0), width=1)
#Axis anotations
#x Axis
x_p = 120 + int(25/scaling_factor_x)
x_p_2 = 120 + int(50/scaling_factor_x)
ruler = region_min + 25
while x_p in range(120, 900):
draw.line((x_p, 450) + (x_p, 457), fill=256, width=1)
w, h = draw.textsize(str(ruler))
draw.text((x_p - w/2 - 5, 460), (str(ruler)), font=fnt3, fill=(0,0,0,255))
ruler = ruler + 50
x_p = int(x_p + (50/scaling_factor_x)) #Ruler with 50 nts separations
while x_p_2 in range(120, 900):
draw.line((x_p_2, 450) + (x_p_2, 455), fill=256, width=1)
x_p_2 = int(x_p_2 + (50/scaling_factor_x)) #Ruler with 25 nts separations
#y Axis
if rd_max_local > 8:
y_p = 450 - int(5/scaling_factor_y_local)
counter = 5
while y_p in range(150, 451):
draw.line((120, y_p) + (115, y_p), fill=256, width=1)
draw.text((90, y_p-8), ( str(counter)), font=fnt3, fill=(0,0,0,255))
counter = counter + 5
y_p = int(y_p - (5/scaling_factor_y_local))
if rd_max_local < 8:
y_p = 450 - int(1/scaling_factor_y_local)
counter = 1
while y_p in range(150, 451):
draw.line((120, y_p) + (115, y_p), fill=256, width=1)
draw.text((90, y_p-8), ( str(counter)), font=fnt3, fill=(0,0,0,255))
counter = counter + 1
y_p = int(y_p - (1/scaling_factor_y_local))
#Legend local________________________________________________________________________________________ <---------------------------------------------
#w and h can be used to re-position the legend in the figure
w = 690
h = 160
#legend box
draw.polygon([(w,h), (w,h+100), (w+200,h+100), (w+200,h) ], fill = (255, 255, 255, 0))
draw.line((w, h) + (w+200, h), fill=256, width=1)
draw.line((w, h) + (w, h+100), fill=256, width=1)
draw.line((w+200, h) + (w+200, h+100), fill=256, width=1)
draw.line((w, h+100) + (w+200, h+100), fill=256, width=1)
draw.text((w+10, h+10), 'Legend:', font=fnt3, fill=(0,0,0,255))
#legend items
draw.text((w+45, h+32), """3' clipped reads """, font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+32+7) + (w+35, h+32+7), fill=(31, 120, 180), width=10)
draw.text((w+45, h+52), """5' clipped reads """, font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+52+7) + (w+35, h+52+7), fill=(64, 159, 65), width=10)
draw.text((w+45, h+72), 'Predicted position', font=fnt3, fill=(0,0,0,255))
draw.line((w+10, h+72+8) + (w+35, h+72+8), fill=(147, 147, 147), width=1)
#save image, specifying the format with the extension
w, h = im.size
im.crop((0, 100, w, h-50)).save(project + '/3_workflow_output/img_1_ins_' + str(e) + '.png')
#############################################################################################################
# #
# GENE PLOT #
# #
#############################################################################################################
def gene_plot():
if args.my_mut == 'lin':
#Input 1
input = args.input
f1 = open(input, 'r')
lines = f1.readlines()
#Input varanalyzer
input = args.input_va
f3 = open(input, 'r')
lines_va = f3.readlines()
#Input gff
input = args.gff
f4 = open(input, 'r')
lines_gff = f4.readlines()
# Function to parse fasta file (based on one of the Biopython IOs)
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith('>'):
if name: yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
#We create an 'intermediate list', which will contain the information necesary for the gene plot gathered from the output file of the varanalyzer module, the
#sorted_insertions.txt file and the genome feature file. Intermediate list format:
# ['Chr', 'insertion/snp position', 'insertion number / -', 'gene name', [ref-aa, alt-aa, pos-aa, ref-base, alt-base, strand], [list of gene features: 'type', 'start', 'end'], [list of positions(required for calculations)]]
intermediate_list = list()
for i, line in enumerate(lines_va):
if not line.startswith('@'):
sp = line.split('\t')
if sp[5].strip() != 'nh':
temp_list = list()
temp_list.append(sp[1])
temp_list.append(sp[2])
temp_list.append('-')
temp_list.append(sp[9])
refalt = [sp[12].strip(), sp[13].strip(), sp[11].strip(), sp[3].strip(), sp[4].strip(), sp[8].strip()]
temp_list.append(refalt)
intermediate_list.append(temp_list)
if args.my_mut == 'lin':
for p in intermediate_list:
for i, line in enumerate(lines):
sp = line.split()
if p[0].lower().strip() == sp[1].lower().strip() and sp[2] not in p[2]: #and 'TOTAL' in sp[5]
if int(p[1]) == int(sp[3]) or int(p[1]) == (int(sp[3]) - 1):
p[2] = sp[2]
for p in intermediate_list:
features = list()
positions = list()
refalt = list()
for g, line in enumerate(lines_gff):
if not line.startswith('#'):
sp = line.split('\t')
if p[3] in sp[8]:
feature = [sp[2], sp[3], sp[4]]
positions.append(int(sp[3]))
positions.append(int(sp[4]))
features.append(feature)
p.append(features)
p.append(positions)
for p in intermediate_list:
p[5].append(['rr', int(args.rrl)])
if p[4][5].strip() == '+':
p[6].append(min(p[6]) - int(args.rrl))
if p[4][5].strip() == '-':
p[6].append(max(p[6]) + int(args.rrl))
#Drawing the genes:
for p in intermediate_list:
wide=1000 #<-------------------------------------------------------------------------------- SET IMAGE SIZE
height=(35/100.0)*wide
im = Image.new("RGB", (wide, int(height)), (255,255,255))
draw = ImageDraw.Draw(im)
gene_max_raw = max(p[6])
gene_min_raw = min(p[6])
gene_length = gene_max_raw - gene_min_raw
gene_px_length = float((0.7)*wide)
gene_scaling_factor = gene_length/gene_px_length #bp/pixel
#Fonts
fnt2 = ImageFont.truetype('fonts/arial.ttf', int(0.016*wide))
fnt3 = ImageFont.truetype('fonts/arial.ttf', int(0.024*wide))
fnt4 = ImageFont.truetype('fonts/arial.ttf', int(0.02*wide))
#Gene name
draw.text((int(0.05*wide), int(0.03*wide)), (str(p[3])), font=fnt3, fill=(0,0,0,255))
#Gene baseline
if p[4][5] == '+':
draw.line((int(0.15*wide) + int(int(args.rrl)/gene_scaling_factor), int(180/350.0*height)) + (int(0.15*wide) + gene_px_length, int(180/350.0*height)), fill=(14, 54, 119), width=int(0.004*wide))
if p[4][5] == '-':
draw.line((int(0.15*wide), int(180/350.0*height)) + (int(0.15*wide) + gene_px_length - int(int(args.rrl)/gene_scaling_factor), int(180/350.0*height)), fill=(14, 54, 119), width=int(0.004*wide))
#Gene features
atg_list = list()
if p[4][5] == '+':
for e in p[5]:
if e[0].strip() == 'rr':
inicio = int(0.15*wide)
fin = int(0.15*wide) + int(e[1])/gene_scaling_factor
step = int(0.005*wide)
s = inicio
while s in range(inicio, int(fin)):
draw.line((s, int(180/350.0*height)) + (s + step, int(180/350.0*height)), fill=(14, 54, 119), width=int(0.004*wide))
s = s + step*2
if p[4][5] == '-':
for e in p[5]:
if e[0].strip() == 'rr':
fin = int(0.85*wide)
inicio = int(0.85*wide) - int(e[1])/gene_scaling_factor
step = int(0.005*wide)
s = int(inicio)
while s in range(int(inicio), int(fin)):
draw.line((s, int(180/350.0*height)) + (s + step, int(180/350.0*height)), fill=(14, 54, 119), width=int(0.004*wide))
s = s + step*2
for e in p[5]:
if e[0].strip() == 'exon':
inicio = int((int(e[1]) - gene_min_raw)/gene_scaling_factor) + int(0.15*wide)
fin = int((int(e[2]) - gene_min_raw)/gene_scaling_factor) + int(0.15*wide)
draw.line((inicio, int(180/350.0*height)) + (fin, int(180/350.0*height)), fill=(59, 119, 214), width=int(0.02*wide))
draw.line((inicio, int(170/350.0*height)) + (fin, int(170/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((inicio, int(190/350.0*height)) + (fin+1, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((inicio, int(170/350.0*height)) + (inicio, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((fin, int(170/350.0*height)) + (fin, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
if p[4][5] == '+':
draw.line((inicio, int(170/350.0*height)) + (inicio, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
if p[4][5] == '-':
draw.line((fin, int(170/350.0*height)) + (fin, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
for e in p[5]:
if 'utr' in (e[0].strip()).lower() and 'five' in (e[0].strip()).lower(): # Backup UTR drawing
inicio = int((int(e[1]) - gene_min_raw)/gene_scaling_factor) + int(0.15*wide)
fin = int((int(e[2]) - gene_min_raw)/gene_scaling_factor) + int(0.15*wide)
draw.line((inicio, int(180/350.0*height)) + (fin, int(180/350.0*height)), fill=(188, 209, 242), width=int(0.02*wide))
draw.line((inicio, int(170/350.0*height)) + (fin, int(170/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((inicio, int(190/350.0*height)) + (fin, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((inicio, int(170/350.0*height)) + (inicio, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((fin, int(170/350.0*height)) + (fin, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
if 'utr' in (e[0].strip()).lower() and 'three' in (e[0].strip()).lower(): # Backup UTR drawing
inicio = int((int(e[1]) - gene_min_raw)/gene_scaling_factor) + int(0.15*wide)
fin = int((int(e[2]) - gene_min_raw)/gene_scaling_factor) + int(0.15*wide)
draw.line((inicio, int(180/350.0*height)) + (fin, int(180/350.0*height)), fill=(188, 209, 242), width=int(0.02*wide))
draw.line((inicio, int(170/350.0*height)) + (fin, int(170/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((inicio, int(190/350.0*height)) + (fin, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
if p[4][5] == '+':
draw.line((inicio, int(170/350.0*height)) + (inicio, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
if p[4][5] == '-':
draw.line((fin, int(170/350.0*height)) + (fin, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((inicio, int(170/350.0*height)) + (inicio, int(190/350.0*height)), fill=(0, 4, 71, 0), width=2)
#Gene direction
if p[4][5] == '+':
draw.polygon([(int(0.84*wide), int(168/350.0*height)), (int(0.851*wide) , int(168/350.0*height)), (int(0.851*wide), int(181/350.0*height))], fill = (255, 255, 255, 0))
draw.polygon([(int(0.84*wide), int(192/350.0*height)), (int(0.851*wide) , int(192/350.0*height)), (int(0.851*wide), int(180/350.0*height))], fill = (255, 255, 255, 0))
draw.line((int(0.841*wide), int(170/350.0*height)) + (int(0.85*wide), int(180/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((int(0.841*wide), int(190/350.0*height)) + (int(0.851*wide), int(180/350.0*height)), fill=(0, 4, 71, 0), width=2)
if p[4][5] == '-':
draw.polygon([(int(0.16*wide), int(168/350.0*height)), (int(0.149*wide) , int(168/350.0*height)), (int(0.149*wide), int(180/350.0*height))], fill = (255, 255, 255, 0))
draw.polygon([(int(0.16*wide), int(192/350.0*height)), (int(0.149*wide) , int(192/350.0*height)), (int(0.149*wide), int(180/350.0*height))], fill = (255, 255, 255, 0))
draw.line((int(0.158*wide), int(170.5/350.0*height)) + (int(0.148*wide), int(180.5/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((int(0.159*wide), int(190/350.0*height)) + (int(0.149*wide), int(180/350.0*height)), fill=(0, 4, 71, 0), width=2)
draw.line((int(0.15*wide), int(169/350.0*height)) + (int(0.16*wide), int(169/350.0*height)), fill=(255, 255, 255, 0), width=1)
#Scale bar
if args.my_mut == 'snp':
scale = 100
scale_tag = '100 bp'
w, h = draw.textsize(str(scale_tag))
px_scale = float(scale/gene_scaling_factor)
draw.line((int(0.91*wide) - int(px_scale) - w/2 + px_scale/2, int(110/350.0*height)) + (int(0.91*wide) - w/2 + px_scale/2, int(110/350.0*height)), fill=(0, 0, 0, 0), width=int(0.002*wide))
draw.text((int(0.87*wide), int(117.8/350.0*height)), (scale_tag), font=fnt2, fill=(0,0,0,255))
if args.my_mut == 'lin':
scale = 100
scale_tag = '100 bp'
w, h = draw.textsize(str(scale_tag))
px_scale = float(scale/gene_scaling_factor)
draw.line((int(0.91*wide) - int(px_scale) - w/2 + px_scale/2, int(250/350.0*height)) + (int(0.91*wide) - w/2 + px_scale/2, int(250/350.0*height)), fill=(0, 0, 0, 0), width=int(0.002*wide))
draw.text((int(0.87*wide), int(257.8/350.0*height)), (scale_tag), font=fnt2, fill=(0,0,0,255))
#Insertion triangle and info
if args.my_mut == 'lin':
ins_pos = int((int(p[1]) - gene_min_raw)/gene_scaling_factor) + int(0.15*wide)
draw.polygon([(ins_pos, int(170/350.0*height)), (ins_pos - int(0.02*wide), int(170/350.0*height) - int(0.025*wide)), (ins_pos + int(0.02*wide), int(170/350.0*height) - int(0.025*wide))], fill = (200, 0, 0, 200))
draw.line((ins_pos, int(170/350.0*height)) + (ins_pos - int(0.02*wide), int(170/350.0*height) - int(0.025*wide)), fill=(0, 0, 0, 0), width=1)
draw.line((ins_pos, int(170/350.0*height)) + (ins_pos + int(0.02*wide), int(170/350.0*height) - int(0.025*wide)), fill=(0, 0, 0, 0), width=1)
draw.line((ins_pos - int(0.02*wide), int(170/350.0*height) - int(0.025*wide)) + (ins_pos + int(0.02*wide), int(170/350.0*height) - int(0.025*wide)), fill=(0, 0, 0, 0), width=1)
draw.text((ins_pos - int(0.04*wide), int(0.115*wide)), ('Insertion ' + str(p[2])), font=fnt4, fill=(0,0,0,255))
#SNP arrow and info
if args.my_mut == 'snp':
snp_pos = int((int(p[1]) - gene_min_raw)/gene_scaling_factor) + int(0.15*wide)
draw.line((snp_pos, int(194/350.0*height)) + (snp_pos , int(194/350.0*height) + int(0.03*wide)), fill=(180, 0, 0, 0), width=int(0.005*wide))
draw.polygon([(snp_pos, int(191/350.0*height)), (snp_pos - int(0.01*wide), int(191/350.0*height) + int(0.01*wide)), (snp_pos + int(0.01*wide), int(191/350.0*height) + int(0.01*wide))], fill = (200, 0, 0, 200))
#Aa change
if p[4][0].strip() != '-' :
aach = 'yes'
draw.text((int(snp_pos - int(0.092*wide)), int(0.75*height)), (
str(p[4][0])+ ' (' + str(p[4][2]) +')' + ' ' +
str(p[4][1])), font=fnt4, fill=(0,0,0,255))
str_len = len(str(p[4][2]))
else:
aach = 'no'
#Base change
draw.text((int(snp_pos - int(0.036*wide)), int(0.67*height)), (
str(p[4][3]) + ' ' +
str(p[4][4])), font=fnt4, fill=(0,0,0,255))
#Arrows
#Base
image_file = StringIO(open("./fonts/arrowright.png",'rb').read())
arrow = Image.open(image_file)
arrow = arrow.resize((47, 47), Image.ANTIALIAS)
arrow = arrow.crop((12, 17, 47, 34))
im.paste(arrow, (int(snp_pos - int(0.013*wide)), int(0.685*height)))
#Aa
if aach == "yes":
image_file = StringIO(open("./fonts/arrowright.png",'rb').read())
arrow = Image.open(image_file)
arrow = arrow.resize((47, 47), Image.ANTIALIAS)
arrow = arrow.crop((12, 17, 47, 34))
#We paste the arrow in a different position depending of the number of characters that the aa position has:
if str_len == 1: im.paste(arrow, (int(snp_pos - int(0.041*wide)), int(0.765*height)))
if str_len == 2: im.paste(arrow, (int(snp_pos - int(0.030*wide)), int(0.765*height)))
if str_len == 3: im.paste(arrow, (int(snp_pos - int(0.019*wide)), int(0.765*height)))
if str_len == 4: im.paste(arrow, (int(snp_pos - int(0.008*wide)), int(0.765*height)))
if str_len == 5: im.paste(arrow, (int(snp_pos + int(0.003*wide)), int(0.765*height)))
#save image, specifying the format with the extension. For SNP images we save them with diferent sizes depending on if theres an aminoacid change or not
w, h = im.size
if args.my_mut == 'lin':
im.crop((70, 100, w-20, h-60)).save(project + '/3_workflow_output/gene_plot_' + str(args.my_mut) + '_' + str(p[2]) + '_gene_' + str(p[3])+ '.png')
if args.my_mut == 'snp' and aach == 'no':
im.crop((70, 100, w-20, h-70)).save(project + '/3_workflow_output/gene_plot_' + str(args.my_mut) + '_' + str(p[1]) + '_gene_' + str(p[3])+ '.png')
if args.my_mut == 'snp' and aach == 'yes':
im.crop((70, 100, w-20, h-40)).save(project + '/3_workflow_output/gene_plot_' + str(args.my_mut) + '_' + str(p[1]) + '_gene_' + str(p[3])+ '.png')
| davidwilson-85/easymap | graphic_output/draw.py | Python | gpl-3.0 | 62,463 | [
"Biopython"
] | aed775cb38b22673eeed5832322cb605b28c49d1e013a8f677c9da02a513ee9c |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Author: Matteo Giantomassi <matteo.giantomassiNOSPAM AT uclouvain.be>
# Date: October 11, 2016
from spack import *
class Abinit(AutotoolsPackage):
"""ABINIT is a package whose main program allows one to find the total
energy, charge density and electronic structure of systems made of
electrons and nuclei (molecules and periodic solids) within
Density Functional Theory (DFT), using pseudopotentials and a planewave
or wavelet basis.
ABINIT also includes options to optimize the geometry according to the
DFT forces and stresses, or to perform molecular dynamics
simulations using these forces, or to generate dynamical matrices,
Born effective charges, and dielectric tensors, based on Density-Functional
Perturbation Theory, and many more properties. Excited states can be
computed within the Many-Body Perturbation Theory (the GW approximation and
the Bethe-Salpeter equation), and Time-Dependent Density Functional Theory
(for molecules). In addition to the main ABINIT code, different utility
programs are provided.
"""
homepage = 'https://www.abinit.org/'
url = 'https://www.abinit.org/sites/default/files/packages/abinit-8.6.3.tar.gz'
version('9.4.2', sha256='d40886f5c8b138bb4aa1ca05da23388eb70a682790cfe5020ecce4db1b1a76bc')
version('8.10.3', sha256='ed626424b4472b93256622fbb9c7645fa3ffb693d4b444b07d488771ea7eaa75')
version('8.10.2', sha256='4ee2e0329497bf16a9b2719fe0536cc50c5d5a07c65e18edaf15ba02251cbb73')
version('8.8.2', sha256='15216703bd56a799a249a112b336d07d733627d3756487a4b1cb48ebb625c3e7')
version('8.6.3', sha256='82e8d071088ab8dc1b3a24380e30b68c544685678314df1213180b449c84ca65')
version('8.2.2', sha256='e43544a178d758b0deff3011c51ef7c957d7f2df2ce8543366d68016af9f3ea1')
# Versions before 8.0.8b are not supported.
version('8.0.8b', sha256='37ad5f0f215d2a36e596383cb6e54de3313842a0390ce8d6b48a423d3ee25af2')
variant('mpi', default=True,
description='Builds with MPI support. Requires MPI2+')
variant('openmp', default=False,
description='Enables OpenMP threads. Use threaded FFTW3')
variant('scalapack', default=False,
description='Enables scalapack support. Requires MPI')
variant('wannier90', default=False,
description='Enables the Wannier90 library')
variant('libxml2', default=False,
description='Enable libxml2 support, used by multibinit')
variant('optimization-flavor', default='standard', multi=False,
values=('safe', 'standard', 'aggressive'),
description='Select the optimization flavor to use.')
variant('install-tests', default=False,
description='Install test cases')
# Add dependencies
depends_on('atompaw')
depends_on('blas')
depends_on('lapack')
# Require MPI2+
depends_on('mpi@2:', when='+mpi')
depends_on('scalapack', when='+scalapack+mpi')
depends_on('fftw-api')
depends_on('netcdf-fortran')
depends_on('netcdf-c+mpi', when='+mpi')
depends_on('netcdf-c~mpi', when='~mpi')
depends_on('hdf5+mpi', when='+mpi')
depends_on('hdf5~mpi', when='~mpi')
depends_on("wannier90+shared", when='+wannier90+mpi')
# constrain libxc version
depends_on('libxc')
depends_on('libxc@:2', when='@:8')
# libxml2
depends_on('libxml2', when='@9:+libxml2')
# Cannot ask for +scalapack if it does not depend on MPI
conflicts('+scalapack', when='~mpi')
# Cannot ask for +wannier90 if it does not depend on MPI
conflicts('+wannier90', when='~mpi')
# libxml2 needs version 9 and above
conflicts('+libxml2', when='@:8')
conflicts('%gcc@7:', when='@:8.8')
conflicts('%gcc@9:', when='@:8.10')
# need openmp threading for abinit+openmp
# TODO: The logic here can be reversed with the new concretizer. Instead of
# using `conflicts`, `depends_on` could be used instead.
mkl_message = 'Need to set dependent variant to threads=openmp'
conflicts('+openmp',
when='^fftw~openmp',
msg='Need to request fftw +openmp')
conflicts('+openmp',
when='^intel-mkl threads=none',
msg=mkl_message)
conflicts('+openmp',
when='^intel-mkl threads=tbb',
msg=mkl_message)
conflicts('+openmp',
when='^intel-parallel-studio +mkl threads=none',
msg=mkl_message)
patch('rm_march_settings.patch', when='@:8')
patch('rm_march_settings_v9.patch', when='@9:')
# Fix detection of Fujitsu compiler
# Fix configure not to collect the option that causes an error
# Fix intent(out) and unnecessary rewind to avoid compile error
patch('fix_for_fujitsu.patch', when='%fj')
def configure_args(self):
spec = self.spec
options = []
options += self.with_or_without('libxml2')
oapp = options.append
oapp('--with-optim-flavor={0}'
.format(self.spec.variants['optimization-flavor'].value))
if '+wannier90' in spec:
if '@:8' in spec:
oapp('--with-wannier90-libs=-L{0}'
.format(spec['wannier90'].prefix.lib + ' -lwannier -lm'))
oapp('--with-wannier90-incs=-I{0}'
.format(spec['wannier90'].prefix.modules))
oapp('--with-wannier90-bins={0}'
.format(spec['wannier90'].prefix.bin))
oapp('--enable-connectors')
oapp('--with-dft-flavor=atompaw+libxc+wannier90')
else:
options.extend([
'WANNIER90_CPPFLAGS=-I{0}'.format(
spec['wannier90'].prefix.modules),
'WANNIER90_LIBS=-L{0} {1}'.format(
spec['wannier90'].prefix.lib, '-lwannier'),
])
else:
if '@:8' in spec:
oapp('--with-dft-flavor=atompaw+libxc')
else:
'--without-wannier90',
if '+mpi' in spec:
oapp('CC={0}'.format(spec['mpi'].mpicc))
oapp('CXX={0}'.format(spec['mpi'].mpicxx))
oapp('FC={0}'.format(spec['mpi'].mpifc))
# MPI version:
# let the configure script auto-detect MPI support from mpi_prefix
if '@:8' in spec:
oapp('--enable-mpi=yes')
else:
oapp('--with-mpi')
else:
if '@:8' in spec:
oapp('--enable-mpi=no')
else:
oapp('--without-mpi')
# Activate OpenMP in Abinit Fortran code.
if '+openmp' in spec:
oapp('--enable-openmp=yes')
else:
oapp('--enable-openmp=no')
# BLAS/LAPACK/SCALAPACK-ELPA
linalg = spec['lapack'].libs + spec['blas'].libs
if '^mkl' in spec:
linalg_flavor = 'mkl'
elif '@9:' in spec and '^openblas' in spec:
linalg_flavor = 'openblas'
else:
linalg_flavor = 'custom'
if '+scalapack' in spec:
linalg = spec['scalapack'].libs + linalg
if '@:8' in spec:
linalg_flavor = 'scalapack+{0}'.format(linalg_flavor)
if '@:8' in spec:
oapp('--with-linalg-libs={0}'.format(linalg.ld_flags))
else:
oapp('LINALG_LIBS={0}'.format(linalg.ld_flags))
oapp('--with-linalg-flavor={0}'.format(linalg_flavor))
if '^mkl' in spec:
fftflavor = 'dfti'
elif '^fftw' in spec:
if '+openmp' in spec:
fftflavor, fftlibs = 'fftw3-threads', '-lfftw3_omp -lfftw3 -lfftw3f'
else:
fftflavor, fftlibs = 'fftw3', '-lfftw3 -lfftw3f'
oapp('--with-fft-flavor={0}'.format(fftflavor))
if '@:8' in spec:
if '^mkl' in spec:
oapp('--with-fft-incs={0}'.format(spec['fftw-api'].headers.cpp_flags))
oapp('--with-fft-libs={0}'.format(spec['fftw-api'].libs.ld_flags))
elif '^fftw' in spec:
options.extend([
'--with-fft-incs={0}'.format(spec['fftw'].headers.cpp_flags),
'--with-fft-libs=-L{0} {1}'.format(
spec['fftw'].prefix.lib, fftlibs),
])
else:
if '^mkl' in spec:
options.extend([
'FFT_CPPFLAGS={0}'.format(spec['fftw-api'].headers.cpp_flags),
'FFT_LIBs={0}'.format(spec['fftw-api'].libs.ld_flags),
])
elif '^fftw' in spec:
options.extend([
'FFTW3_CPPFLAGS={0}'.format(spec['fftw'].headers.cpp_flags),
'FFTW3_LIBS=-L{0} {1}'.format(
spec['fftw'].prefix.lib, fftlibs),
])
# LibXC library
libxc = spec['libxc:fortran']
if '@:8' in spec:
options.extend([
'--with-libxc-incs={0}'.format(libxc.headers.cpp_flags),
'--with-libxc-libs={0}'.format(libxc.libs.ld_flags + ' -lm')
])
else:
oapp('--with-libxc={0}'.format(libxc.prefix))
# Netcdf4/HDF5
hdf5 = spec['hdf5:hl']
netcdfc = spec['netcdf-c']
netcdff = spec['netcdf-fortran:shared']
if '@:8' in spec:
oapp('--with-trio-flavor=netcdf')
# Since version 8, Abinit started to use netcdf4 + hdf5 and we have
# to link with the high level HDF5 library
options.extend([
'--with-netcdf-incs={0}'.format(netcdff.headers.cpp_flags),
'--with-netcdf-libs={0}'.format(
netcdff.libs.ld_flags + ' ' + hdf5.libs.ld_flags
),
])
else:
options.extend([
'--with-netcdf={0}'.format(netcdfc.prefix),
'--with-netcdf-fortran={0}'.format(netcdff.prefix),
])
if self.spec.satisfies('%fj'):
oapp('FCFLAGS_MODDIR=-M{0}'.format(join_path(
self.stage.source_path, 'src/mods')))
return options
def check(self):
"""This method is called after the build phase if tests have been
explicitly activated by user.
"""
make('check')
# the tests directly execute abinit. thus failing with MPI
# TODO: run tests in tests/ via the builtin runtests.py
# requires Python with numpy, pyyaml, pandas
if '~mpi' in self.spec:
make('tests_in')
def install(self, spec, prefix):
make('install')
if '+install-tests' in spec:
install_tree('tests', spec.prefix.tests)
| LLNL/spack | var/spack/repos/builtin/packages/abinit/package.py | Python | lgpl-2.1 | 10,975 | [
"ABINIT",
"NetCDF",
"Wannier90"
] | eaf29bd1ee9e2f05b9653dfee78d16c359544f0ac2e54dde7f07567cbc771ba1 |
"""
Verify the validity of IR.
"""
from __future__ import print_function, division, absolute_import
import functools
from .types import (Boolean, Integral, Real, Array, Struct, Pointer,
Vector, resolve_typedef)
from .ir import Function, Block, Value, Operation, Constant
from .traversal import visit, combine
from . import ops
from .pattern import match
from .utils import findallops
#===------------------------------------------------------------------===
# Utils
#===------------------------------------------------------------------===
class VerifyError(Exception):
"""Raised when we fail to verify IR"""
def unique(items):
"""Assert uniqueness of items"""
seen = set()
for item in items:
if item in seen:
raise VerifyError("Item not unique", item)
seen.add(item)
#===------------------------------------------------------------------===
# Entry points
#===------------------------------------------------------------------===
@match
def verify(value, env=None):
if isinstance(value, Function):
verify_function(value)
elif isinstance(value, Block):
verify_operations(value)
elif isinstance(value, Operation):
verify_operation(value)
else:
assert isinstance(value, Value)
return value, env
def op_verifier(func):
"""Verifying decorator for functions return a new (list of) Op"""
@functools.wraps(func)
def wrapper(*a, **kw):
op = func(*a, **kw)
if not isinstance(op, list):
op = [op]
for op in op:
verify_op_syntax(op)
return op
return wrapper
#===------------------------------------------------------------------===
# Internal verification
#===------------------------------------------------------------------===
def verify_module(mod):
"""Verify a pykit module"""
assert not set.intersection(set(mod.functions), set(mod.globals))
for function in mod.functions.itervalues():
verify_function(function)
def verify_function(func):
try:
_verify_function(func)
except Exception as e:
raise VerifyError("Error verifying function %s: %s" % (func.name, e))
def _verify_function(func):
"""Verify a pykit function"""
# Verify arguments
assert len(func.args) == len(func.type.argtypes)
# Verify return presence and type
restype = func.type.restype
if not restype.is_void and not restype.is_opaque:
rets = findallops(func, 'ret')
for ret in rets:
arg, = ret.args
assert arg.type == restype, (arg.type, restype)
verify_uniqueness(func)
verify_block_order(func)
verify_operations(func)
verify_uses(func)
verify_semantics(func)
def verify_uniqueness(func):
"""Verify uniqueness of register names and labels"""
unique(block.name for block in func.blocks)
unique(op for block in func.blocks for op in block)
unique(op.result for block in func.blocks for op in block)
def verify_block_order(func):
"""Verify block order according to dominator tree"""
from pykit.analysis import cfa
flow = cfa.cfg(func)
dominators = cfa.compute_dominators(func, flow)
visited = set()
for block in func.blocks:
visited.add(block.name)
for dominator in dominators[block.name]:
if dominator not in visited:
raise VerifyError("Dominator %s does not precede block %s" % (
dominator, block.name))
def verify_operations(func_or_block):
"""Verify all operations in the function or block"""
for op in func_or_block.ops:
verify_operation(op)
def verify_operation(op):
"""Verify a single Op"""
assert op.block is not None, op
assert op.result is not None, op
verify_op_syntax(op)
def verify_op_syntax(op):
"""
Verify the syntactic structure of the Op (arity, List/Value/Const, etc)
"""
if op.opcode not in ops.op_syntax:
return
syntax = ops.op_syntax[op.opcode]
vararg = syntax and syntax[-1] == ops.Star
args = op.args
if vararg:
syntax = syntax[:-1]
args = args[:len(syntax)]
assert len(syntax) == len(args), (op, syntax)
for arg, expected in zip(args, syntax):
msg = (op, arg)
if expected == ops.List:
assert isinstance(arg, list), msg
elif expected == ops.Const:
assert isinstance(arg, Constant), msg
elif expected == ops.Value:
if op.opcode == "alloca":
assert arg is None or isinstance(arg, Value), msg
else:
assert isinstance(arg, Value), msg
elif expected == ops.Any:
assert isinstance(arg, (Value, list)), msg
elif expected == ops.Obj:
pass
else:
raise ValueError("Invalid meta-syntax?", msg, expected)
def verify_uses(func):
"""Verify the def-use chains"""
# NOTE: verify should be importable from any pass!
from pykit.analysis import defuse
uses = defuse.defuse(func)
diff = set.difference(set(uses), set(func.uses))
assert not diff, diff
# assert uses == func.uses, (uses, func.uses)
# ______________________________________________________________________
class Verifier(object):
"""Semantic verification of all operations"""
def verify_semantics(func, env=None):
verifier = combine(Verifier(), env and env.get("verify.handlers"))
visit(verifier, func)
# ______________________________________________________________________
class LowLevelVerifier(object):
def op_unary(self, op):
assert type(op.type) in (Integral, Real)
def op_binary(self, op):
assert type(op.type) in (Integral, Real)
def op_compare(self, op):
assert type(op.type) in (Boolean,)
left, right = op.args
assert left.type == right.type
assert type(left.type) in (Boolean, Integral, Real)
def op_getfield(self, op):
struct, attr = op.args
assert struct.type.is_struct
def op_setfield(self, op):
struct, attr, value = op.args
assert struct.type.is_struct
def verify_lowlevel(func):
"""
Assert that the function is lowered for code generation.
"""
for op in func.ops:
assert type(resolve_typedef(op.type)) in (
Boolean, Array, Integral, Real, Struct, Pointer, Function, Vector), op
| FrancescAlted/blaze | blaze/compute/air/verification.py | Python | bsd-3-clause | 6,458 | [
"VisIt"
] | 7f0cbbe429575b4317976efbd897fd48f72ad3a18eb05474fba801027cba834c |
# Author: Prabhu Ramachandran <prabhu_r at users dot sf dot net>
# Copyright (c) 2006, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance
from tvtk.api import tvtk
# Local imports
from mayavi.filters.filter_base import FilterBase
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `Delaunay3D` class.
######################################################################
class Delaunay3D(FilterBase):
"""Performs a 3D Delaunay triangulation using the tvtk.Delaunay3D
class.
"""
# The version of this class. Used for persistence.
__version__ = 0
# The actual TVTK filter that this class manages.
filter = Instance(tvtk.Delaunay3D, args=(), allow_none=False, record=True)
input_info = PipelineInfo(datasets=['structured_grid', 'poly_data',
'unstructured_grid'],
attribute_types=['any'],
attributes=['any'])
output_info = PipelineInfo(datasets=['unstructured_grid'],
attribute_types=['any'],
attributes=['any'])
| dmsurti/mayavi | mayavi/filters/delaunay3d.py | Python | bsd-3-clause | 1,240 | [
"Mayavi"
] | bfeca808b04246648bafc3846e75b0e180b5eb54d4020d1cee7a72be8326f964 |
"""
Alpha diversity measures (:mod:`skbio.diversity.alpha`)
=======================================================
.. currentmodule:: skbio.diversity.alpha
This package provides implementations of various alpha diversity measures,
including measures of richness, dominance, and evenness. Some functions
generate confidence intervals (CIs). These functions have the suffix ``_ci``.
All alpha diversity measures accept a vector of counts within a single sample,
where each count is, for example, the number of observations of a particular
Operational Taxonomic Unit, or OTU. We use the term "OTU" here very loosely, as
these could be counts of any type of feature/observation (e.g., bacterial
species). We'll refer to this vector as the *counts vector* or simply *counts*
throughout the documentation. Some of these metrics incorporate phylogeny.
These metrics take a tree (``skbio.TreeNode``) and a list of OTU ids mapping
the counts to tips in the tree, in addition to the counts vector.
The counts vector must be one-dimensional and contain integers representing the
number of individuals seen (or *counted*) for a particular OTU. Negative values
are not allowed; the counts vector may only contain integers greater than or
equal to zero.
The counts vector is `array_like`: anything that can be converted into a 1-D
numpy array is acceptable input. For example, you can provide a numpy array or
a native Python list and the results should be identical.
If the input to an alpha diversity measure does not meet the above
requirements, the function will raise either a ``ValueError`` or a
``TypeError``, depending on the condition that is violated.
.. note:: There are different ways that samples are represented in the
ecological literature and in related software. The alpha diversity measures
provided here *always* assume that the input contains abundance data: each
count represents the number of individuals seen for a particular OTU in the
sample. For example, if you have two OTUs, where 3 individuals were observed
from one of the OTUs and only a single individual was observed from the
other, you could represent this data in the following forms (among others):
As a vector of counts. This is the expected type of input for the alpha
diversity measures in this module. There are 3 individuals from the OTU at
index 0, and 1 individual from the OTU at index 1:
>>> counts = [3, 1]
As a vector of indices. The OTU at index 0 is observed 3 times, while the
OTU at index 1 is observed 1 time:
>>> indices = [0, 0, 0, 1]
As a vector of frequencies. We have 1 OTU that is a singleton and 1 OTU that
is a tripleton. We do not have any 0-tons or doubletons:
>>> frequencies = [0, 1, 0, 1]
Always use the first representation (a counts vector) with this module.
Functions
---------
.. autosummary::
:toctree: generated/
ace
berger_parker_d
brillouin_d
chao1
chao1_ci
dominance
doubles
enspie
equitability
esty_ci
faith_pd
fisher_alpha
gini_index
goods_coverage
heip_e
kempton_taylor_q
lladser_ci
lladser_pe
margalef
mcintosh_d
mcintosh_e
menhinick
michaelis_menten_fit
observed_otus
osd
robbins
shannon
simpson
simpson_e
singles
strong
Examples
--------
>>> from skbio.diversity.alpha import observed_otus
>>> import numpy as np
Assume we have the following abundance data for a sample, represented as a
counts vector:
>>> counts = [1, 0, 0, 4, 1, 2, 3, 0]
We can count the number of OTUs:
>>> observed_otus(counts)
5
Note that OTUs with counts of zero are ignored.
In the previous example, we provided a Python list as input. We can also
provide other types of input that are `array_like`:
>>> observed_otus((1, 0, 0, 4, 1, 2, 3, 0)) # tuple
5
>>> observed_otus(np.array([1, 0, 0, 4, 1, 2, 3, 0])) # numpy array
5
All of the alpha diversity measures work in this manner.
Other metrics include ``singles``, which tells us how many OTUs are observed
exactly one time (i.e., are *singleton* OTUs), and ``doubles``, which tells us
how many OTUs are observed exactly two times (i.e., are *doubleton* OTUs).
Let's see how many singletons and doubletons there are in the sample:
>>> from skbio.diversity.alpha import singles, doubles
>>> singles(counts)
2
>>> doubles(counts)
1
Phylogenetic diversity metrics additionally incorporate the relative
relatedness of the OTUs in the calculation, and therefore require a tree and
a mapping of counts to OTU (tip) ids in the tree. Here we'll apply Faith's
Phylogenetic Diversity (PD) metric to the sample:
>>> from skbio import TreeNode
>>> from skbio.diversity.alpha import faith_pd
>>> from io import StringIO
>>> tree = TreeNode.read(StringIO(
... u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
... u'(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5,'
... u'OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
>>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7', 'OTU8']
>>> faith_pd(counts, otu_ids, tree)
6.95
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import TestRunner
from ._ace import ace
from ._chao1 import chao1, chao1_ci
from ._base import (
berger_parker_d, brillouin_d, dominance, doubles, enspie, equitability,
esty_ci, faith_pd, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q,
margalef, mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit,
observed_otus, osd, robbins, shannon, simpson, simpson_e, singles, strong)
from ._gini import gini_index
from ._lladser import lladser_pe, lladser_ci
__all__ = ['ace', 'chao1', 'chao1_ci', 'berger_parker_d', 'brillouin_d',
'dominance', 'doubles', 'enspie', 'equitability', 'esty_ci',
'faith_pd', 'fisher_alpha', 'goods_coverage', 'heip_e',
'kempton_taylor_q', 'margalef', 'mcintosh_d', 'mcintosh_e',
'menhinick', 'michaelis_menten_fit', 'observed_otus', 'osd',
'robbins', 'shannon', 'simpson', 'simpson_e', 'singles', 'strong',
'gini_index', 'lladser_pe', 'lladser_ci']
test = TestRunner(__file__).test
| SamStudio8/scikit-bio | skbio/diversity/alpha/__init__.py | Python | bsd-3-clause | 6,544 | [
"scikit-bio"
] | 55dd1c7127d6b27992374f08e66bdaeaa57d03c6e68010dc7906f2ff09ec10a2 |
import _thread as thread
import os
import platform
import tempfile
import time
from logging import getLogger
from subprocess import Popen
from pulsar.managers import status
from pulsar.managers.base.directory import DirectoryBaseManager
from .util import kill_pid
log = getLogger(__name__)
JOB_FILE_SUBMITTED = "submitted"
JOB_FILE_PID = "pid"
try:
from galaxy.util.commands import new_clean_env
except ImportError:
# We can drop this once we require galaxy-util >=21.01
def new_clean_env():
"""
Returns a minimal environment to use when invoking a subprocess
"""
env = {}
for k in ("HOME", "PATH", "TMPDIR"):
if k in os.environ:
env[k] = os.environ[k]
if "TMPDIR" not in env:
env["TMPDIR"] = os.path.abspath(tempfile.gettempdir())
# Set LC_CTYPE environment variable to enforce UTF-8 file encoding.
# This is needed e.g. for Python < 3.7 where
# `locale.getpreferredencoding()` (also used by open() to determine the
# default file encoding) would return `ANSI_X3.4-1968` without this.
env["LC_CTYPE"] = "C.UTF-8"
return env
class BaseUnqueuedManager(DirectoryBaseManager):
def _record_submission(self, job_id):
self._job_directory(job_id).store_metadata(JOB_FILE_SUBMITTED, 'true')
def _get_status(self, job_id):
job_directory = self._job_directory(job_id)
if self._was_cancelled(job_id):
job_status = status.CANCELLED
elif job_directory.has_metadata(JOB_FILE_PID):
job_status = status.RUNNING
elif job_directory.has_metadata(JOB_FILE_SUBMITTED):
job_status = status.QUEUED
else:
job_status = status.COMPLETE
return job_status
def _finish_execution(self, job_id):
self._job_directory(job_id).remove_metadata(JOB_FILE_SUBMITTED)
def _prepare_run(self, job_id, command_line, dependencies_description, env, setup_params=None):
self._check_execution_with_tool_file(job_id, command_line)
self._record_submission(job_id)
if platform.system().lower() == "windows":
# TODO: Don't ignore requirements and env without warning. Ideally
# process them or at least warn about them being ignored.
command_line = self._expand_command_line(command_line, dependencies_description, job_directory=self.job_directory(job_id).job_directory)
else:
command_line = self._setup_job_file(
job_id,
command_line,
dependencies_description=dependencies_description,
env=env,
setup_params=setup_params
)
return command_line
def _start_monitor(self, *args, **kwd):
if kwd.get("background", True):
thread.start_new_thread(self._monitor_execution, args)
else:
self._monitor_execution(*args)
# Job Locks (for status updates). Following methods are locked.
# _finish_execution(self, job_id)
# _get_status(self, job_id)
# _is_cancelled(self, job_id)
# _record_pid(self, job_id, pid)
# _get_pid_for_killing_or_cancel(self, job_id)
#
class Manager(BaseUnqueuedManager):
"""
A simple job manager that just directly runs jobs as given (no
queueing). Preserved for compatibilty with older versions of Pulsar
client code where Galaxy is used to maintain queue (like Galaxy's
local job runner).
"""
manager_type = "unqueued"
def __init__(self, name, app, **kwds):
super().__init__(name, app, **kwds)
def __get_pid(self, job_id):
pid = None
try:
pid = self._job_directory(job_id).load_metadata(JOB_FILE_PID)
if pid is not None:
pid = int(pid)
except Exception:
pass
return pid
def _get_job_lock(self, job_id):
return self._job_directory(job_id).lock()
def get_status(self, job_id):
with self._get_job_lock(job_id):
return self._get_status(job_id)
def kill(self, job_id):
log.info("Attempting to kill job with job_id %s" % job_id)
job_lock = self._get_job_lock(job_id)
with job_lock:
pid = self._get_pid_for_killing_or_cancel(job_id)
if pid:
log.info("Attempting to kill pid %s" % pid)
kill_pid(pid)
def _monitor_execution(self, job_id, proc, stdout, stderr):
try:
proc.wait()
stdout.close()
stderr.close()
return_code = proc.returncode
# job_script might have set return code so use that if set, otherwise use this one.
# Should there be someway to signal failure if this is non-0 in that case?
self._write_return_code_if_unset(job_id, str(return_code))
finally:
with self._get_job_lock(job_id):
self._finish_execution(job_id)
# with job lock
def _finish_execution(self, job_id):
super()._finish_execution(job_id)
self._job_directory(job_id).remove_metadata(JOB_FILE_PID)
# with job lock
def _get_status(self, job_id):
return super()._get_status(job_id)
# with job lock
def _was_cancelled(self, job_id):
return super()._was_cancelled(job_id)
# with job lock
def _record_pid(self, job_id, pid):
self._job_directory(job_id).store_metadata(JOB_FILE_PID, str(pid))
# with job lock
def _get_pid_for_killing_or_cancel(self, job_id):
job_status = self._get_status(job_id)
if job_status not in [status.RUNNING, status.QUEUED]:
return
pid = self.__get_pid(job_id)
self._record_cancel(job_id)
if pid is None:
self._job_directory(job_id).remove_metadata(JOB_FILE_SUBMITTED)
return pid
def _run(self, job_id, command_line, background=True):
with self._get_job_lock(job_id):
if self._was_cancelled(job_id):
return
proc, stdout, stderr = self._proc_for_job_id(job_id, command_line)
with self._get_job_lock(job_id):
self._record_pid(job_id, proc.pid)
self._start_monitor(job_id, proc, stdout, stderr, background=background)
def _proc_for_job_id(self, job_id, command_line):
job_directory = self.job_directory(job_id)
working_directory = job_directory.working_directory()
stdout = self._open_standard_output(job_id)
stderr = self._open_standard_error(job_id)
proc = execute(command_line=command_line,
working_directory=working_directory,
stdout=stdout,
stderr=stderr)
return proc, stdout, stderr
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
command_line = self._prepare_run(job_id, command_line, dependencies_description=dependencies_description, env=env, setup_params=setup_params)
self._run(job_id, command_line)
class CoexecutionManager(BaseUnqueuedManager):
"""Manager that managers one job in a pod-like environment.
Assume some process in another container will execute the command.
"""
manager_type = "coexecution"
def __init__(self, name, app, **kwds):
super().__init__(name, app, **kwds)
def get_status(self, job_id):
return self._get_status(job_id)
def kill(self, job_id):
log.info("Attempting to kill job with job_id %s - unimplemented in CoexecutionManager..." % job_id)
def _monitor_execution(self, job_id):
return_code_path = self._return_code_path(job_id)
# Write dummy JOB_FILE_PID so get_status thinks this job is running.
self._job_directory(job_id).store_metadata(JOB_FILE_PID, "1")
try:
while not os.path.exists(return_code_path):
time.sleep(0.1)
print("monitoring for %s" % return_code_path)
continue
print("found return code path...")
self._job_directory(job_id).remove_metadata(JOB_FILE_PID)
time.sleep(1)
finally:
self._finish_execution(job_id)
def launch(self, job_id, command_line, submit_params={}, dependencies_description=None, env=[], setup_params=None):
command_line = self._prepare_run(job_id, command_line, dependencies_description=dependencies_description, env=env, setup_params=setup_params)
job_directory = self.job_directory(job_id)
working_directory = job_directory.working_directory()
command_line += " > '{}' 2> '{}'".format(
self._stdout_path(job_id),
self._stderr_path(job_id),
)
command_line = "cd '{}'; sh {}".format(working_directory, command_line)
self._write_command_line(job_id, command_line)
self._start_monitor(job_id)
def execute(command_line, working_directory, stdout, stderr):
preexec_fn = None
if platform.system() != 'Windows':
preexec_fn = os.setpgrp
proc = Popen(
args=command_line,
shell=True,
cwd=working_directory,
stdout=stdout,
stderr=stderr,
preexec_fn=preexec_fn,
env=new_clean_env(),
)
return proc
__all__ = ['Manager']
| galaxyproject/pulsar | pulsar/managers/unqueued.py | Python | apache-2.0 | 9,371 | [
"Galaxy"
] | c0f01d847ea8af552fa1f4177a1e22338e5766d927fd61b6cc8f7725107869b5 |
"""In-memory representation of interfaces and other data structures.
The objects in this module are used to build a representation of an XML interface
file in memory.
@see: L{reader} constructs these data-structures
@see: U{http://0install.net/interface-spec.html} description of the domain model
@var defaults: Default values for the 'default' attribute for <environment> bindings of
well-known variables.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os, re, locale, sys
from zeroinstall import SafeException, version
from zeroinstall.injector.namespaces import XMLNS_IFACE
from zeroinstall.injector import qdom
from zeroinstall import support, zerostore
# Element names for bindings in feed files
binding_names = frozenset(['environment', 'overlay', 'executable-in-path', 'executable-in-var'])
_dependency_names = frozenset(['requires', 'restricts'])
network_offline = 'off-line'
network_minimal = 'minimal'
network_full = 'full'
network_levels = (network_offline, network_minimal, network_full)
stability_levels = {} # Name -> Stability
defaults = {
'PATH': '/bin:/usr/bin',
'XDG_CONFIG_DIRS': '/etc/xdg',
'XDG_DATA_DIRS': '/usr/local/share:/usr/share',
}
class InvalidInterface(SafeException):
"""Raised when parsing an invalid feed."""
feed_url = None
def __init__(self, message, ex = None):
if ex:
try:
message += "\n\n(exact error: %s)" % ex
except:
# Some Python messages have type str but contain UTF-8 sequences.
# (e.g. IOException). Adding these to a Unicode 'message' (e.g.
# after gettext translation) will cause an error.
import codecs
decoder = codecs.lookup('utf-8')
decex = decoder.decode(str(ex), errors = 'replace')[0]
message += "\n\n(exact error: %s)" % decex
SafeException.__init__(self, message)
def __unicode__(self):
if hasattr(SafeException, '__unicode__'):
# Python >= 2.6
if self.feed_url:
return _('%s [%s]') % (SafeException.__unicode__(self), self.feed_url)
return SafeException.__unicode__(self)
else:
return support.unicode(SafeException.__str__(self))
def _split_arch(arch):
"""Split an arch into an (os, machine) tuple. Either or both parts may be None."""
if not arch:
return None, None
elif '-' not in arch:
raise SafeException(_("Malformed arch '%s'") % arch)
else:
osys, machine = arch.split('-', 1)
if osys == '*': osys = None
if machine == '*': machine = None
return osys, machine
def _join_arch(osys, machine):
if osys == machine == None: return None
return "%s-%s" % (osys or '*', machine or '*')
def _best_language_match(options):
(language, encoding) = locale.getlocale()
if language:
# xml:lang uses '-', while LANG uses '_'
language = language.replace('_', '-')
else:
language = 'en-US'
return (options.get(language, None) or # Exact match (language+region)
options.get(language.split('-', 1)[0], None) or # Matching language
options.get('en', None)) # English
class Stability(object):
"""A stability rating. Each implementation has an upstream stability rating and,
optionally, a user-set rating."""
__slots__ = ['level', 'name', 'description']
def __init__(self, level, name, description):
self.level = level
self.name = name
self.description = description
assert name not in stability_levels
stability_levels[name] = self
def __cmp__(self, other):
return cmp(self.level, other.level)
def __lt__(self, other):
if isinstance(other, Stability):
return self.level < other.level
else:
return NotImplemented
def __eq__(self, other):
if isinstance(other, Stability):
return self.level == other.level
else:
return NotImplemented
def __str__(self):
return self.name
def __repr__(self):
return _("<Stability: %s>") % self.description
def process_binding(e):
"""Internal"""
if e.name == 'environment':
mode = {
None: EnvironmentBinding.PREPEND,
'prepend': EnvironmentBinding.PREPEND,
'append': EnvironmentBinding.APPEND,
'replace': EnvironmentBinding.REPLACE,
}[e.getAttribute('mode')]
binding = EnvironmentBinding(e.getAttribute('name'),
insert = e.getAttribute('insert'),
default = e.getAttribute('default'),
value = e.getAttribute('value'),
mode = mode,
separator = e.getAttribute('separator'))
if not binding.name: raise InvalidInterface(_("Missing 'name' in binding"))
if binding.insert is None and binding.value is None:
raise InvalidInterface(_("Missing 'insert' or 'value' in binding"))
if binding.insert is not None and binding.value is not None:
raise InvalidInterface(_("Binding contains both 'insert' and 'value'"))
return binding
elif e.name == 'executable-in-path':
return ExecutableBinding(e, in_path = True)
elif e.name == 'executable-in-var':
return ExecutableBinding(e, in_path = False)
elif e.name == 'overlay':
return OverlayBinding(e.getAttribute('src'), e.getAttribute('mount-point'))
else:
raise Exception(_("Unknown binding type '%s'") % e.name)
def process_depends(item, local_feed_dir):
"""Internal"""
# Note: also called from selections
attrs = item.attrs
dep_iface = item.getAttribute('interface')
if not dep_iface:
raise InvalidInterface(_("Missing 'interface' on <%s>") % item.name)
if dep_iface.startswith('.'):
if local_feed_dir:
dep_iface = os.path.abspath(os.path.join(local_feed_dir, dep_iface))
# (updates the element too, in case we write it out again)
attrs['interface'] = dep_iface
else:
raise InvalidInterface(_('Relative interface URI "%s" in non-local feed') % dep_iface)
if item.name == 'restricts':
dependency = InterfaceRestriction(dep_iface, element = item)
else:
dependency = InterfaceDependency(dep_iface, element = item)
for e in item.childNodes:
if e.uri != XMLNS_IFACE: continue
if e.name in binding_names:
dependency.bindings.append(process_binding(e))
elif e.name == 'version':
dependency.restrictions.append(
VersionRangeRestriction(not_before = parse_version(e.getAttribute('not-before')),
before = parse_version(e.getAttribute('before'))))
return dependency
def N_(message): return message
insecure = Stability(0, N_('insecure'), _('This is a security risk'))
buggy = Stability(5, N_('buggy'), _('Known to have serious bugs'))
developer = Stability(10, N_('developer'), _('Work-in-progress - bugs likely'))
testing = Stability(20, N_('testing'), _('Stability unknown - please test!'))
stable = Stability(30, N_('stable'), _('Tested - no serious problems found'))
packaged = Stability(35, N_('packaged'), _('Supplied by the local package manager'))
preferred = Stability(40, N_('preferred'), _('Best of all - must be set manually'))
del N_
class Restriction(object):
"""A Restriction limits the allowed implementations of an Interface."""
__slots__ = []
def meets_restriction(self, impl):
"""Called by the L{solver.Solver} to check whether a particular implementation is acceptable.
@return: False if this implementation is not a possibility
@rtype: bool
"""
raise NotImplementedError(_("Abstract"))
class VersionRestriction(Restriction):
"""Only select implementations with a particular version number.
@since: 0.40"""
def __init__(self, version):
"""@param version: the required version number
@see: L{parse_version}; use this to pre-process the version number
"""
self.version = version
def meets_restriction(self, impl):
return impl.version == self.version
def __str__(self):
return _("(restriction: version = %s)") % format_version(self.version)
class VersionRangeRestriction(Restriction):
"""Only versions within the given range are acceptable"""
__slots__ = ['before', 'not_before']
def __init__(self, before, not_before):
"""@param before: chosen versions must be earlier than this
@param not_before: versions must be at least this high
@see: L{parse_version}; use this to pre-process the versions
"""
self.before = before
self.not_before = not_before
def meets_restriction(self, impl):
if self.not_before and impl.version < self.not_before:
return False
if self.before and impl.version >= self.before:
return False
return True
def __str__(self):
if self.not_before is not None or self.before is not None:
range = ''
if self.not_before is not None:
range += format_version(self.not_before) + ' <= '
range += 'version'
if self.before is not None:
range += ' < ' + format_version(self.before)
else:
range = 'none'
return _("(restriction: %s)") % range
class Binding(object):
"""Information about how the choice of a Dependency is made known
to the application being run."""
@property
def command(self):
""""Returns the name of the specific command needed by this binding, if any.
@since: 1.2"""
return None
class EnvironmentBinding(Binding):
"""Indicate the chosen implementation using an environment variable."""
__slots__ = ['name', 'insert', 'default', 'mode', 'value']
PREPEND = 'prepend'
APPEND = 'append'
REPLACE = 'replace'
def __init__(self, name, insert, default = None, mode = PREPEND, value=None, separator=None):
"""
mode argument added in version 0.28
value argument added in version 0.52
"""
self.name = name
self.insert = insert
self.default = default
self.mode = mode
self.value = value
if separator is None:
self.separator = os.pathsep
else:
self.separator = separator
def __str__(self):
return _("<environ %(name)s %(mode)s %(insert)s %(value)s>") % \
{'name': self.name, 'mode': self.mode, 'insert': self.insert, 'value': self.value}
__repr__ = __str__
def get_value(self, path, old_value):
"""Calculate the new value of the environment variable after applying this binding.
@param path: the path to the selected implementation
@param old_value: the current value of the environment variable
@return: the new value for the environment variable"""
if self.insert is not None:
extra = os.path.join(path, self.insert)
else:
assert self.value is not None
extra = self.value
if self.mode == EnvironmentBinding.REPLACE:
return extra
if old_value is None:
old_value = self.default or defaults.get(self.name, None)
if old_value is None:
return extra
if self.mode == EnvironmentBinding.PREPEND:
return extra + self.separator + old_value
else:
return old_value + self.separator + extra
def _toxml(self, doc, prefixes):
"""Create a DOM element for this binding.
@param doc: document to use to create the element
@return: the new element
"""
env_elem = doc.createElementNS(XMLNS_IFACE, 'environment')
env_elem.setAttributeNS(None, 'name', self.name)
if self.mode is not None:
env_elem.setAttributeNS(None, 'mode', self.mode)
if self.insert is not None:
env_elem.setAttributeNS(None, 'insert', self.insert)
else:
env_elem.setAttributeNS(None, 'value', self.value)
if self.default:
env_elem.setAttributeNS(None, 'default', self.default)
if self.separator:
env_elem.setAttributeNS(None, 'separator', self.separator)
return env_elem
class ExecutableBinding(Binding):
"""Make the chosen command available in $PATH.
@ivar in_path: True to add the named command to $PATH, False to store in named variable
@type in_path: bool
"""
__slots__ = ['qdom']
def __init__(self, qdom, in_path):
self.qdom = qdom
self.in_path = in_path
def __str__(self):
return str(self.qdom)
__repr__ = __str__
def _toxml(self, doc, prefixes):
return self.qdom.toDOM(doc, prefixes)
@property
def name(self):
return self.qdom.getAttribute('name')
@property
def command(self):
return self.qdom.getAttribute("command") or 'run'
class OverlayBinding(Binding):
"""Make the chosen implementation available by overlaying it onto another part of the file-system.
This is to support legacy programs which use hard-coded paths."""
__slots__ = ['src', 'mount_point']
def __init__(self, src, mount_point):
self.src = src
self.mount_point = mount_point
def __str__(self):
return _("<overlay %(src)s on %(mount_point)s>") % {'src': self.src or '.', 'mount_point': self.mount_point or '/'}
__repr__ = __str__
def _toxml(self, doc, prefixes):
"""Create a DOM element for this binding.
@param doc: document to use to create the element
@return: the new element
"""
env_elem = doc.createElementNS(XMLNS_IFACE, 'overlay')
if self.src is not None:
env_elem.setAttributeNS(None, 'src', self.src)
if self.mount_point is not None:
env_elem.setAttributeNS(None, 'mount-point', self.mount_point)
return env_elem
class Feed(object):
"""An interface's feeds are other interfaces whose implementations can also be
used as implementations of this interface."""
__slots__ = ['uri', 'os', 'machine', 'user_override', 'langs', 'site_package']
def __init__(self, uri, arch, user_override, langs = None, site_package = False):
self.uri = uri
# This indicates whether the feed comes from the user's overrides
# file. If true, writer.py will write it when saving.
self.user_override = user_override
self.os, self.machine = _split_arch(arch)
self.langs = langs
self.site_package = site_package
def __str__(self):
return "<Feed from %s>" % self.uri
__repr__ = __str__
arch = property(lambda self: _join_arch(self.os, self.machine))
class Dependency(object):
"""A Dependency indicates that an Implementation requires some additional
code to function. This is an abstract base class.
@ivar qdom: the XML element for this Dependency (since 0launch 0.51)
@type qdom: L{qdom.Element}
@ivar metadata: any extra attributes from the XML element
@type metadata: {str: str}
"""
__slots__ = ['qdom']
Essential = "essential" # Must select a version of the dependency
Recommended = "recommended" # Prefer to select a version
Restricts = "restricts" # Just adds restrictions without expressing any opinion
def __init__(self, element):
assert isinstance(element, qdom.Element), type(element) # Use InterfaceDependency instead!
self.qdom = element
@property
def metadata(self):
return self.qdom.attrs
def get_required_commands(self):
"""Return a list of command names needed by this dependency"""
return []
class InterfaceRestriction(Dependency):
"""A Dependency that restricts the possible choices of a Zero Install interface.
@ivar interface: the interface required by this dependency
@type interface: str
@ivar restrictions: a list of constraints on acceptable implementations
@type restrictions: [L{Restriction}]
@since: 1.10
"""
__slots__ = ['interface', 'restrictions']
def __init__(self, interface, restrictions = None, element = None):
Dependency.__init__(self, element)
assert isinstance(interface, (str, support.unicode))
assert interface
self.interface = interface
if restrictions is None:
self.restrictions = []
else:
self.restrictions = restrictions
importance = Dependency.Restricts
bindings = ()
def __str__(self):
return _("<Restriction on %(interface)s; %(restrictions)s>") % {'interface': self.interface, 'restrictions': self.restrictions}
class InterfaceDependency(InterfaceRestriction):
"""A Dependency on a Zero Install interface.
@ivar interface: the interface required by this dependency
@type interface: str
@ivar restrictions: a list of constraints on acceptable implementations
@type restrictions: [L{Restriction}]
@ivar bindings: how to make the choice of implementation known
@type bindings: [L{Binding}]
@since: 0.28
"""
__slots__ = ['bindings']
def __init__(self, interface, restrictions = None, element = None):
InterfaceRestriction.__init__(self, interface, restrictions, element)
self.bindings = []
def __str__(self):
return _("<Dependency on %(interface)s; bindings: %(bindings)s%(restrictions)s>") % {'interface': self.interface, 'bindings': self.bindings, 'restrictions': self.restrictions}
@property
def importance(self):
return self.qdom.getAttribute("importance") or Dependency.Essential
def get_required_commands(self):
"""Return a list of command names needed by this dependency"""
if self.qdom.name == 'runner':
commands = [self.qdom.getAttribute('command') or 'run']
else:
commands = []
for b in self.bindings:
c = b.command
if c is not None:
commands.append(c)
return commands
@property
def command(self):
if self.qdom.name == 'runner':
return self.qdom.getAttribute('command') or 'run'
return None
class RetrievalMethod(object):
"""A RetrievalMethod provides a way to fetch an implementation."""
__slots__ = []
class DownloadSource(RetrievalMethod):
"""A DownloadSource provides a way to fetch an implementation."""
__slots__ = ['implementation', 'url', 'size', 'extract', 'start_offset', 'type']
def __init__(self, implementation, url, size, extract, start_offset = 0, type = None):
self.implementation = implementation
self.url = url
self.size = size
self.extract = extract
self.start_offset = start_offset
self.type = type # MIME type - see unpack.py
class RenameStep(RetrievalMethod):
"""A Rename provides a way to rename / move a file within an implementation."""
__slots__ = ['source', 'dest']
def __init__(self, source, dest):
self.source = source
self.dest = dest
class Recipe(RetrievalMethod):
"""Get an implementation by following a series of steps.
@ivar size: the combined download sizes from all the steps
@type size: int
@ivar steps: the sequence of steps which must be performed
@type steps: [L{RetrievalMethod}]"""
__slots__ = ['steps']
def __init__(self):
self.steps = []
size = property(lambda self: sum([x.size for x in self.steps if isinstance(x, DownloadSource)]))
class DistributionSource(RetrievalMethod):
"""A package that is installed using the distribution's tools (including PackageKit).
@ivar install: a function to call to install this package
@type install: (L{handler.Handler}) -> L{tasks.Blocker}
@ivar package_id: the package name, in a form recognised by the distribution's tools
@type package_id: str
@ivar size: the download size in bytes
@type size: int
@ivar needs_confirmation: whether the user should be asked to confirm before calling install()
@type needs_confirmation: bool"""
__slots__ = ['package_id', 'size', 'install', 'needs_confirmation']
def __init__(self, package_id, size, install, needs_confirmation = True):
RetrievalMethod.__init__(self)
self.package_id = package_id
self.size = size
self.install = install
self.needs_confirmation = needs_confirmation
class Command(object):
"""A Command is a way of running an Implementation as a program."""
__slots__ = ['qdom', '_depends', '_local_dir', '_runner', '_bindings']
def __init__(self, qdom, local_dir):
"""@param qdom: the <command> element
@param local_dir: the directory containing the feed (for relative dependencies), or None if not local
"""
assert qdom.name == 'command', 'not <command>: %s' % qdom
self.qdom = qdom
self._local_dir = local_dir
self._depends = None
self._bindings = None
path = property(lambda self: self.qdom.attrs.get("path", None))
def _toxml(self, doc, prefixes):
return self.qdom.toDOM(doc, prefixes)
@property
def requires(self):
if self._depends is None:
self._runner = None
depends = []
for child in self.qdom.childNodes:
if child.name in _dependency_names:
dep = process_depends(child, self._local_dir)
depends.append(dep)
elif child.name == 'runner':
if self._runner:
raise InvalidInterface(_("Multiple <runner>s in <command>!"))
dep = process_depends(child, self._local_dir)
depends.append(dep)
self._runner = dep
self._depends = depends
return self._depends
def get_runner(self):
self.requires # (sets _runner)
return self._runner
def __str__(self):
return str(self.qdom)
@property
def bindings(self):
"""@since: 1.3"""
if self._bindings is None:
bindings = []
for e in self.qdom.childNodes:
if e.uri != XMLNS_IFACE: continue
if e.name in binding_names:
bindings.append(process_binding(e))
self._bindings = bindings
return self._bindings
class Implementation(object):
"""An Implementation is a package which implements an Interface.
@ivar download_sources: list of methods of getting this implementation
@type download_sources: [L{RetrievalMethod}]
@ivar feed: the feed owning this implementation (since 0.32)
@type feed: [L{ZeroInstallFeed}]
@ivar bindings: how to tell this component where it itself is located (since 0.31)
@type bindings: [Binding]
@ivar upstream_stability: the stability reported by the packager
@type upstream_stability: [insecure | buggy | developer | testing | stable | packaged]
@ivar user_stability: the stability as set by the user
@type upstream_stability: [insecure | buggy | developer | testing | stable | packaged | preferred]
@ivar langs: natural languages supported by this package
@type langs: str
@ivar requires: interfaces this package depends on
@type requires: [L{Dependency}]
@ivar commands: ways to execute as a program
@type commands: {str: Command}
@ivar metadata: extra metadata from the feed
@type metadata: {"[URI ]localName": str}
@ivar id: a unique identifier for this Implementation
@ivar version: a parsed version number
@ivar released: release date
@ivar local_path: the directory containing this local implementation, or None if it isn't local (id isn't a path)
@type local_path: str | None
@ivar requires_root_install: whether the user will need admin rights to use this
@type requires_root_install: bool
"""
# Note: user_stability shouldn't really be here
__slots__ = ['upstream_stability', 'user_stability', 'langs',
'requires', 'metadata', 'download_sources', 'commands',
'id', 'feed', 'version', 'released', 'bindings', 'machine']
def __init__(self, feed, id):
assert id
self.feed = feed
self.id = id
self.user_stability = None
self.upstream_stability = None
self.metadata = {} # [URI + " "] + localName -> value
self.requires = []
self.version = None
self.released = None
self.download_sources = []
self.langs = ""
self.machine = None
self.bindings = []
self.commands = {}
def get_stability(self):
return self.user_stability or self.upstream_stability or testing
def __str__(self):
return self.id
def __repr__(self):
return "v%s (%s)" % (self.get_version(), self.id)
def __cmp__(self, other):
"""Newer versions come first"""
d = cmp(other.version, self.version)
if d: return d
# If the version number is the same, just give a stable sort order, and
# ensure that two different implementations don't compare equal.
d = cmp(other.feed.url, self.feed.url)
if d: return d
return cmp(other.id, self.id)
def __hash__(self):
return self.id.__hash__()
def __eq__(self, other):
return self is other
def __le__(self, other):
if isinstance(other, Implementation):
if other.version < self.version: return True
elif other.version > self.version: return False
if other.feed.url < self.feed.url: return True
elif other.feed.url > self.feed.url: return False
return other.id <= self.id
else:
return NotImplemented
def get_version(self):
"""Return the version as a string.
@see: L{format_version}
"""
return format_version(self.version)
arch = property(lambda self: _join_arch(self.os, self.machine))
os = None
local_path = None
digests = None
requires_root_install = False
def _get_main(self):
""""@deprecated: use commands["run"] instead"""
main = self.commands.get("run", None)
if main is not None:
return main.path
return None
def _set_main(self, path):
""""@deprecated: use commands["run"] instead"""
if path is None:
if "run" in self.commands:
del self.commands["run"]
else:
self.commands["run"] = Command(qdom.Element(XMLNS_IFACE, 'command', {'path': path, 'name': 'run'}), None)
main = property(_get_main, _set_main)
def is_available(self, stores):
"""Is this Implementation available locally?
(a local implementation, an installed distribution package, or a cached ZeroInstallImplementation)
@rtype: bool
@since: 0.53
"""
raise NotImplementedError("abstract")
class DistributionImplementation(Implementation):
"""An implementation provided by the distribution. Information such as the version
comes from the package manager.
@ivar package_implementation: the <package-implementation> element that generated this impl (since 1.7)
@type package_implementation: L{qdom.Element}
@since: 0.28"""
__slots__ = ['distro', 'installed', 'package_implementation']
def __init__(self, feed, id, distro, package_implementation = None):
assert id.startswith('package:')
Implementation.__init__(self, feed, id)
self.distro = distro
self.installed = False
self.package_implementation = package_implementation
if package_implementation:
for child in package_implementation.childNodes:
if child.uri != XMLNS_IFACE: continue
if child.name == 'command':
command_name = child.attrs.get('name', None)
if not command_name:
raise InvalidInterface('Missing name for <command>')
self.commands[command_name] = Command(child, local_dir = None)
@property
def requires_root_install(self):
return not self.installed
def is_available(self, stores):
return self.installed
class ZeroInstallImplementation(Implementation):
"""An implementation where all the information comes from Zero Install.
@ivar digests: a list of "algorith=value" or "algorith_value" strings (since 0.45)
@type digests: [str]
@since: 0.28"""
__slots__ = ['os', 'size', 'digests', 'local_path']
def __init__(self, feed, id, local_path):
"""id can be a local path (string starting with /) or a manifest hash (eg "sha1=XXX")"""
assert not id.startswith('package:'), id
Implementation.__init__(self, feed, id)
self.size = None
self.os = None
self.digests = []
self.local_path = local_path
# Deprecated
dependencies = property(lambda self: dict([(x.interface, x) for x in self.requires
if isinstance(x, InterfaceRestriction)]))
def add_download_source(self, url, size, extract, start_offset = 0, type = None):
"""Add a download source."""
self.download_sources.append(DownloadSource(self, url, size, extract, start_offset, type))
def set_arch(self, arch):
self.os, self.machine = _split_arch(arch)
arch = property(lambda self: _join_arch(self.os, self.machine), set_arch)
def is_available(self, stores):
if self.local_path is not None:
return os.path.exists(self.local_path)
if self.digests:
path = stores.lookup_maybe(self.digests)
return path is not None
return False # (0compile creates fake entries with no digests)
class Interface(object):
"""An Interface represents some contract of behaviour.
@ivar uri: the URI for this interface.
@ivar stability_policy: user's configured policy.
Implementations at this level or higher are preferred.
Lower levels are used only if there is no other choice.
"""
__slots__ = ['uri', 'stability_policy', 'extra_feeds']
implementations = property(lambda self: self._main_feed.implementations)
name = property(lambda self: self._main_feed.name)
description = property(lambda self: self._main_feed.description)
summary = property(lambda self: self._main_feed.summary)
last_modified = property(lambda self: self._main_feed.last_modified)
feeds = property(lambda self: self.extra_feeds + self._main_feed.feeds)
metadata = property(lambda self: self._main_feed.metadata)
last_checked = property(lambda self: self._main_feed.last_checked)
def __init__(self, uri):
assert uri
if uri.startswith('http:') or uri.startswith('https:') or os.path.isabs(uri):
self.uri = uri
else:
raise SafeException(_("Interface name '%s' doesn't start "
"with 'http:' or 'https:'") % uri)
self.reset()
def _get_feed_for(self):
retval = {}
for key in self._main_feed.feed_for:
retval[key] = True
return retval
feed_for = property(_get_feed_for) # Deprecated (used by 0publish)
def reset(self):
self.extra_feeds = []
self.stability_policy = None
def get_name(self):
from zeroinstall.injector.iface_cache import iface_cache
feed = iface_cache.get_feed(self.uri)
if feed:
return feed.get_name()
return '(' + os.path.basename(self.uri) + ')'
def __repr__(self):
return _("<Interface %s>") % self.uri
def set_stability_policy(self, new):
assert new is None or isinstance(new, Stability)
self.stability_policy = new
def get_feed(self, url):
#import warnings
#warnings.warn("use iface_cache.get_feed instead", DeprecationWarning, 2)
for x in self.extra_feeds:
if x.uri == url:
return x
#return self._main_feed.get_feed(url)
return None
def get_metadata(self, uri, name):
return self._main_feed.get_metadata(uri, name)
@property
def _main_feed(self):
import warnings
warnings.warn("use the feed instead", DeprecationWarning, 3)
from zeroinstall.injector import policy
iface_cache = policy.get_deprecated_singleton_config().iface_cache
feed = iface_cache.get_feed(self.uri)
if feed is None:
return _dummy_feed
return feed
def _merge_attrs(attrs, item):
"""Add each attribute of item to a copy of attrs and return the copy.
@type attrs: {str: str}
@type item: L{qdom.Element}
@rtype: {str: str}
"""
new = attrs.copy()
for a in item.attrs:
new[str(a)] = item.attrs[a]
return new
def _get_long(elem, attr_name):
val = elem.getAttribute(attr_name)
if val is not None:
try:
val = int(val)
except ValueError:
raise SafeException(_("Invalid value for integer attribute '%(attribute_name)s': %(value)s") % {'attribute_name': attr_name, 'value': val})
return val
class ZeroInstallFeed(object):
"""A feed lists available implementations of an interface.
@ivar url: the URL for this feed
@ivar implementations: Implementations in this feed, indexed by ID
@type implementations: {str: L{Implementation}}
@ivar name: human-friendly name
@ivar summaries: short textual description (in various languages, since 0.49)
@type summaries: {str: str}
@ivar descriptions: long textual description (in various languages, since 0.49)
@type descriptions: {str: str}
@ivar last_modified: timestamp on signature
@ivar last_checked: time feed was last successfully downloaded and updated
@ivar local_path: the path of this local feed, or None if remote (since 1.7)
@type local_path: str | None
@ivar feeds: list of <feed> elements in this feed
@type feeds: [L{Feed}]
@ivar feed_for: interfaces for which this could be a feed
@type feed_for: set(str)
@ivar metadata: extra elements we didn't understand
"""
# _main is deprecated
__slots__ = ['url', 'implementations', 'name', 'descriptions', 'first_description', 'summaries', 'first_summary', '_package_implementations',
'last_checked', 'last_modified', 'feeds', 'feed_for', 'metadata', 'local_path']
def __init__(self, feed_element, local_path = None, distro = None):
"""Create a feed object from a DOM.
@param feed_element: the root element of a feed file
@type feed_element: L{qdom.Element}
@param local_path: the pathname of this local feed, or None for remote feeds"""
self.local_path = local_path
self.implementations = {}
self.name = None
self.summaries = {} # { lang: str }
self.first_summary = None
self.descriptions = {} # { lang: str }
self.first_description = None
self.last_modified = None
self.feeds = []
self.feed_for = set()
self.metadata = []
self.last_checked = None
self._package_implementations = []
if distro is not None:
import warnings
warnings.warn("distro argument is now ignored", DeprecationWarning, 2)
if feed_element is None:
return # XXX subclass?
assert feed_element.name in ('interface', 'feed'), "Root element should be <interface>, not %s" % feed_element
assert feed_element.uri == XMLNS_IFACE, "Wrong namespace on root element: %s" % feed_element.uri
main = feed_element.getAttribute('main')
#if main: warn("Setting 'main' on the root element is deprecated. Put it on a <group> instead")
if local_path:
self.url = local_path
local_dir = os.path.dirname(local_path)
else:
assert local_path is None
self.url = feed_element.getAttribute('uri')
if not self.url:
raise InvalidInterface(_("<interface> uri attribute missing"))
local_dir = None # Can't have relative paths
min_injector_version = feed_element.getAttribute('min-injector-version')
if min_injector_version:
if parse_version(min_injector_version) > parse_version(version):
raise InvalidInterface(_("This feed requires version %(min_version)s or later of "
"Zero Install, but I am only version %(version)s. "
"You can get a newer version from http://0install.net") %
{'min_version': min_injector_version, 'version': version})
for x in feed_element.childNodes:
if x.uri != XMLNS_IFACE:
self.metadata.append(x)
continue
if x.name == 'name':
self.name = x.content
elif x.name == 'description':
if self.first_description == None:
self.first_description = x.content
self.descriptions[x.attrs.get("http://www.w3.org/XML/1998/namespace lang", 'en')] = x.content
elif x.name == 'summary':
if self.first_summary == None:
self.first_summary = x.content
self.summaries[x.attrs.get("http://www.w3.org/XML/1998/namespace lang", 'en')] = x.content
elif x.name == 'feed-for':
feed_iface = x.getAttribute('interface')
if not feed_iface:
raise InvalidInterface(_('Missing "interface" attribute in <feed-for>'))
self.feed_for.add(feed_iface)
# Bug report from a Debian/stable user that --feed gets the wrong value.
# Can't reproduce (even in a Debian/stable chroot), but add some logging here
# in case it happens again.
logger.debug(_("Is feed-for %s"), feed_iface)
elif x.name == 'feed':
feed_src = x.getAttribute('src')
if not feed_src:
raise InvalidInterface(_('Missing "src" attribute in <feed>'))
if feed_src.startswith('http:') or feed_src.startswith('https:') or local_path:
if feed_src.startswith('.'):
feed_src = os.path.abspath(os.path.join(local_dir, feed_src))
langs = x.getAttribute('langs')
if langs: langs = langs.replace('_', '-')
self.feeds.append(Feed(feed_src, x.getAttribute('arch'), False, langs = langs))
else:
raise InvalidInterface(_("Invalid feed URL '%s'") % feed_src)
else:
self.metadata.append(x)
if not self.name:
raise InvalidInterface(_("Missing <name> in feed"))
if not self.summary:
raise InvalidInterface(_("Missing <summary> in feed"))
def process_group(group, group_attrs, base_depends, base_bindings, base_commands):
for item in group.childNodes:
if item.uri != XMLNS_IFACE: continue
if item.name not in ('group', 'implementation', 'package-implementation'):
continue
# We've found a group or implementation. Scan for dependencies,
# bindings and commands. Doing this here means that:
# - We can share the code for groups and implementations here.
# - The order doesn't matter, because these get processed first.
# A side-effect is that the document root cannot contain
# these.
depends = base_depends[:]
bindings = base_bindings[:]
commands = base_commands.copy()
for attr, command in [('main', 'run'),
('self-test', 'test')]:
value = item.attrs.get(attr, None)
if value is not None:
commands[command] = Command(qdom.Element(XMLNS_IFACE, 'command', {'name': command, 'path': value}), None)
for child in item.childNodes:
if child.uri != XMLNS_IFACE: continue
if child.name in _dependency_names:
dep = process_depends(child, local_dir)
depends.append(dep)
elif child.name == 'command':
command_name = child.attrs.get('name', None)
if not command_name:
raise InvalidInterface('Missing name for <command>')
commands[command_name] = Command(child, local_dir)
elif child.name in binding_names:
bindings.append(process_binding(child))
compile_command = item.attrs.get('http://zero-install.sourceforge.net/2006/namespaces/0compile command')
if compile_command is not None:
commands['compile'] = Command(qdom.Element(XMLNS_IFACE, 'command', {'name': 'compile', 'shell-command': compile_command}), None)
item_attrs = _merge_attrs(group_attrs, item)
if item.name == 'group':
process_group(item, item_attrs, depends, bindings, commands)
elif item.name == 'implementation':
process_impl(item, item_attrs, depends, bindings, commands)
elif item.name == 'package-implementation':
self._package_implementations.append((item, item_attrs, depends))
else:
assert 0
def process_impl(item, item_attrs, depends, bindings, commands):
id = item.getAttribute('id')
if id is None:
raise InvalidInterface(_("Missing 'id' attribute on %s") % item)
local_path = item_attrs.get('local-path')
if local_dir and local_path:
abs_local_path = os.path.abspath(os.path.join(local_dir, local_path))
impl = ZeroInstallImplementation(self, id, abs_local_path)
elif local_dir and (id.startswith('/') or id.startswith('.')):
# For old feeds
id = os.path.abspath(os.path.join(local_dir, id))
impl = ZeroInstallImplementation(self, id, id)
else:
impl = ZeroInstallImplementation(self, id, None)
if '=' in id:
# In older feeds, the ID was the (single) digest
impl.digests.append(id)
if id in self.implementations:
logger.warn(_("Duplicate ID '%(id)s' in feed '%(feed)s'"), {'id': id, 'feed': self})
self.implementations[id] = impl
impl.metadata = item_attrs
try:
version_mod = item_attrs.get('version-modifier', None)
if version_mod:
item_attrs['version'] += version_mod
del item_attrs['version-modifier']
version = item_attrs['version']
except KeyError:
raise InvalidInterface(_("Missing version attribute"))
impl.version = parse_version(version)
impl.commands = commands
impl.released = item_attrs.get('released', None)
impl.langs = item_attrs.get('langs', '').replace('_', '-')
size = item.getAttribute('size')
if size:
impl.size = int(size)
impl.arch = item_attrs.get('arch', None)
try:
stability = stability_levels[str(item_attrs['stability'])]
except KeyError:
stab = str(item_attrs['stability'])
if stab != stab.lower():
raise InvalidInterface(_('Stability "%s" invalid - use lower case!') % item_attrs.stability)
raise InvalidInterface(_('Stability "%s" invalid') % item_attrs['stability'])
if stability >= preferred:
raise InvalidInterface(_("Upstream can't set stability to preferred!"))
impl.upstream_stability = stability
impl.bindings = bindings
impl.requires = depends
for elem in item.childNodes:
if elem.uri != XMLNS_IFACE: continue
if elem.name == 'archive':
url = elem.getAttribute('href')
if not url:
raise InvalidInterface(_("Missing href attribute on <archive>"))
size = elem.getAttribute('size')
if not size:
raise InvalidInterface(_("Missing size attribute on <archive>"))
impl.add_download_source(url = url, size = int(size),
extract = elem.getAttribute('extract'),
start_offset = _get_long(elem, 'start-offset'),
type = elem.getAttribute('type'))
elif elem.name == 'manifest-digest':
for aname, avalue in elem.attrs.items():
if ' ' not in aname:
impl.digests.append(zerostore.format_algorithm_digest_pair(aname, avalue))
elif elem.name == 'recipe':
recipe = Recipe()
for recipe_step in elem.childNodes:
if recipe_step.uri == XMLNS_IFACE and recipe_step.name == 'archive':
url = recipe_step.getAttribute('href')
if not url:
raise InvalidInterface(_("Missing href attribute on <archive>"))
size = recipe_step.getAttribute('size')
if not size:
raise InvalidInterface(_("Missing size attribute on <archive>"))
recipe.steps.append(DownloadSource(None, url = url, size = int(size),
extract = recipe_step.getAttribute('extract'),
start_offset = _get_long(recipe_step, 'start-offset'),
type = recipe_step.getAttribute('type')))
elif recipe_step.uri == XMLNS_IFACE and recipe_step.name == 'rename':
source = recipe_step.getAttribute('source')
if not source:
raise InvalidInterface(_("Missing source attribute on <rename>"))
dest = recipe_step.getAttribute('dest')
if not dest:
raise InvalidInterface(_("Missing dest attribute on <rename>"))
recipe.steps.append(RenameStep(source=source, dest=dest))
else:
logger.info(_("Unknown step '%s' in recipe; skipping recipe"), recipe_step.name)
break
else:
impl.download_sources.append(recipe)
root_attrs = {'stability': 'testing'}
root_commands = {}
if main:
logger.info("Note: @main on document element is deprecated in %s", self)
root_commands['run'] = Command(qdom.Element(XMLNS_IFACE, 'command', {'path': main, 'name': 'run'}), None)
process_group(feed_element, root_attrs, [], [], root_commands)
def get_distro_feed(self):
"""Does this feed contain any <pacakge-implementation> elements?
i.e. is it worth asking the package manager for more information?
@return: the URL of the virtual feed, or None
@since: 0.49"""
if self._package_implementations:
return "distribution:" + self.url
return None
def get_package_impls(self, distro):
"""Find the best <pacakge-implementation> element(s) for the given distribution.
@param distro: the distribution to use to rate them
@type distro: L{distro.Distribution}
@return: a list of tuples for the best ranked elements
@rtype: [str]
@since: 0.49"""
best_score = 0
best_impls = []
for item, item_attrs, depends in self._package_implementations:
distro_names = item_attrs.get('distributions', '')
score_this_item = max(
distro.get_score(distro_name) if distro_name else 0.5
for distro_name in distro_names.split(' '))
if score_this_item > best_score:
best_score = score_this_item
best_impls = []
if score_this_item == best_score:
best_impls.append((item, item_attrs, depends))
return best_impls
def get_name(self):
return self.name or '(' + os.path.basename(self.url) + ')'
def __repr__(self):
return _("<Feed %s>") % self.url
def set_stability_policy(self, new):
assert new is None or isinstance(new, Stability)
self.stability_policy = new
def get_feed(self, url):
for x in self.feeds:
if x.uri == url:
return x
return None
def add_metadata(self, elem):
self.metadata.append(elem)
def get_metadata(self, uri, name):
"""Return a list of interface metadata elements with this name and namespace URI."""
return [m for m in self.metadata if m.name == name and m.uri == uri]
@property
def summary(self):
return _best_language_match(self.summaries) or self.first_summary
@property
def description(self):
return _best_language_match(self.descriptions) or self.first_description
def get_replaced_by(self):
"""Return the URI of the interface that replaced the one with the URI of this feed's URL.
This is the value of the feed's <replaced-by interface'...'/> element.
@return: the new URI, or None if it hasn't been replaced
@since: 1.7"""
for child in self.metadata:
if child.uri == XMLNS_IFACE and child.name == 'replaced-by':
new_uri = child.getAttribute('interface')
if new_uri and (new_uri.startswith('http:') or new_uri.startswith('https:') or self.local_path):
return new_uri
return None
class DummyFeed(object):
"""Temporary class used during API transition."""
last_modified = None
name = '-'
last_checked = property(lambda self: None)
implementations = property(lambda self: {})
feeds = property(lambda self: [])
summary = property(lambda self: '-')
description = property(lambda self: '')
def get_name(self): return self.name
def get_feed(self, url): return None
def get_metadata(self, uri, name): return []
_dummy_feed = DummyFeed()
if sys.version_info[0] > 2:
# Python 3
from functools import total_ordering
# (note: delete these two lines when generating epydoc)
Stability = total_ordering(Stability)
Implementation = total_ordering(Implementation)
# These could be replaced by urllib.parse.quote, except that
# it uses upper-case escapes and we use lower-case ones...
def unescape(uri):
"""Convert each %20 to a space, etc.
@rtype: str"""
uri = uri.replace('#', '/')
if '%' not in uri: return uri
return re.sub(b'%[0-9a-fA-F][0-9a-fA-F]',
lambda match: bytes([int(match.group(0)[1:], 16)]),
uri.encode('ascii')).decode('utf-8')
def escape(uri):
"""Convert each space to %20, etc
@rtype: str"""
return re.sub(b'[^-_.a-zA-Z0-9]',
lambda match: ('%%%02x' % ord(match.group(0))).encode('ascii'),
uri.encode('utf-8')).decode('ascii')
def _pretty_escape(uri):
"""Convert each space to %20, etc
: is preserved and / becomes #. This makes for nicer strings,
and may replace L{escape} everywhere in future.
@rtype: str"""
if os.name == "posix":
# Only preserve : on Posix systems
preserveRegex = b'[^-_.a-zA-Z0-9:/]'
else:
# Other OSes may not allow the : character in file names
preserveRegex = b'[^-_.a-zA-Z0-9/]'
return re.sub(preserveRegex,
lambda match: ('%%%02x' % ord(match.group(0))).encode('ascii'),
uri.encode('utf-8')).decode('ascii').replace('/', '#')
else:
# Python 2
def unescape(uri):
"""Convert each %20 to a space, etc.
@rtype: str"""
uri = uri.replace('#', '/')
if '%' not in uri: return uri
return re.sub('%[0-9a-fA-F][0-9a-fA-F]',
lambda match: chr(int(match.group(0)[1:], 16)),
uri).decode('utf-8')
def escape(uri):
"""Convert each space to %20, etc
@rtype: str"""
return re.sub('[^-_.a-zA-Z0-9]',
lambda match: '%%%02x' % ord(match.group(0)),
uri.encode('utf-8'))
def _pretty_escape(uri):
"""Convert each space to %20, etc
: is preserved and / becomes #. This makes for nicer strings,
and may replace L{escape} everywhere in future.
@rtype: str"""
if os.name == "posix":
# Only preserve : on Posix systems
preserveRegex = '[^-_.a-zA-Z0-9:/]'
else:
# Other OSes may not allow the : character in file names
preserveRegex = '[^-_.a-zA-Z0-9/]'
return re.sub(preserveRegex,
lambda match: '%%%02x' % ord(match.group(0)),
uri.encode('utf-8')).replace('/', '#')
def canonical_iface_uri(uri):
"""If uri is a relative path, convert to an absolute one.
A "file:///foo" URI is converted to "/foo".
An "alias:prog" URI expands to the URI in the 0alias script
Otherwise, return it unmodified.
@rtype: str
@raise SafeException: if uri isn't valid
"""
if uri.startswith('http://') or uri.startswith('https://'):
if uri.count("/") < 3:
raise SafeException(_("Missing / after hostname in URI '%s'") % uri)
return uri
elif uri.startswith('file:///'):
path = uri[7:]
elif uri.startswith('file:'):
if uri[5] == '/':
raise SafeException(_('Use file:///path for absolute paths, not {uri}').format(uri = uri))
path = os.path.abspath(uri[5:])
elif uri.startswith('alias:'):
from zeroinstall import alias
alias_prog = uri[6:]
if not os.path.isabs(alias_prog):
full_path = support.find_in_path(alias_prog)
if not full_path:
raise alias.NotAnAliasScript("Not found in $PATH: " + alias_prog)
else:
full_path = alias_prog
return alias.parse_script(full_path).uri
else:
path = os.path.realpath(uri)
if os.path.isfile(path):
return path
raise SafeException(_("Bad interface name '%(uri)s'.\n"
"(doesn't start with 'http:', and "
"doesn't exist as a local file '%(interface_uri)s' either)") %
{'uri': uri, 'interface_uri': path})
_version_mod_to_value = {
'pre': -2,
'rc': -1,
'': 0,
'post': 1,
}
# Reverse mapping
_version_value_to_mod = {}
for x in _version_mod_to_value: _version_value_to_mod[_version_mod_to_value[x]] = x
del x
_version_re = re.compile('-([a-z]*)')
def parse_version(version_string):
"""Convert a version string to an internal representation.
The parsed format can be compared quickly using the standard Python functions.
- Version := DottedList ("-" Mod DottedList?)*
- DottedList := (Integer ("." Integer)*)
@rtype: tuple (opaque)
@raise SafeException: if the string isn't a valid version
@since: 0.24 (moved from L{reader}, from where it is still available):"""
if version_string is None: return None
parts = _version_re.split(version_string)
if parts[-1] == '':
del parts[-1] # Ends with a modifier
else:
parts.append('')
if not parts:
raise SafeException(_("Empty version string!"))
l = len(parts)
try:
for x in range(0, l, 2):
part = parts[x]
if part:
parts[x] = list(map(int, parts[x].split('.')))
else:
parts[x] = [] # (because ''.split('.') == [''], not [])
for x in range(1, l, 2):
parts[x] = _version_mod_to_value[parts[x]]
return parts
except ValueError as ex:
raise SafeException(_("Invalid version format in '%(version_string)s': %(exception)s") % {'version_string': version_string, 'exception': ex})
except KeyError as ex:
raise SafeException(_("Invalid version modifier in '%(version_string)s': %(exception)s") % {'version_string': version_string, 'exception': ex})
def format_version(version):
"""Format a parsed version for display. Undoes the effect of L{parse_version}.
@see: L{Implementation.get_version}
@rtype: str
@since: 0.24"""
version = version[:]
l = len(version)
for x in range(0, l, 2):
version[x] = '.'.join(map(str, version[x]))
for x in range(1, l, 2):
version[x] = '-' + _version_value_to_mod[version[x]]
if version[-1] == '-': del version[-1]
return ''.join(version)
| timdiels/0install | zeroinstall/injector/model.py | Python | lgpl-2.1 | 49,858 | [
"VisIt"
] | 3ffe0e293d45d612d1ff0782eebf7b917fecf4e39448a0a25037f16ff6566191 |
'''Usage:
python monthly_visits.py <project_label>
This is sort of a contrived example meant to display different features.
'''
from scitran_client import ScitranClient, query, Projects
import sys
from collections import Counter
from fnmatch import fnmatch
client = ScitranClient()
# Search for the project via label
project = client.search(query(Projects).filter(Projects.label.match(sys.argv[1])))[0]
# fetch the sessions related to this project
sessions = client.request('projects/{}/sessions'.format(project['_id'])).json()
# count session by month by taking first 7 characters of date string:
# example: 2016-01-01T00:00:00, so first 7 are 2016-01
ct = Counter(
s['timestamp'][:7]
for s in sessions
)
# logging the months and visit counts
print 'month | number of visits'
for month, count in sorted(ct.items(), reverse=True):
print month, '|', count
# Let's find an image in our project to download
acquisition, f = next(
(a, f)
for s in sessions
for a in client.request('sessions/{}/acquisitions'.format(s['_id'])).json()
for f in a['files']
if fnmatch(f['name'], '*.png')
)
client.download_file('acquisitions', acquisition['_id'], f['name'], f['hash'], dest_dir='.')
| scitran/python-client | examples/monthly_visits.py | Python | gpl-3.0 | 1,214 | [
"VisIt"
] | c3c85d8996a9d353674e8c2cf08b51851017e25a1e75e65162659db8d13508c7 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
####################################################################################################
#
# Author: WishInLife
# QQ: 57956720
# QQ Group: 59160264
# E-Mail: wishinlife@qq.com
# Web Home: http://www.syncy.cn
# Update date: 2015-09-12
# VERSION: 2.5.3
# Required packages: kmod-nls-utf8, libopenssl, libcurl, python, python-curl, python-crypto
# If import python-crypto package, SyncY can support ARC4、Blowfish and AES encryption.
#
####################################################################################################
import sys
import os
import stat
import time
import re
import struct
import hashlib
from urllib import urlencode
import threading
import traceback
import json
import random
# import fcntl
# if '/usr/lib/python2.7/site-packages' not in sys.path:
# sys.path.append('/usr/lib/python2.7/site-packages')
import pycurl
import binascii
# import zlib
# import fileinput
try: # require python-crypto
from Crypto.Cipher import ARC4
from Crypto.Cipher import Blowfish
from Crypto.Cipher import AES
except ImportError, ex:
ARC4 = Blowfish = AES = None
# set config_file and pidfile for your config storage path.
if os.name == 'nt':
__CONFIG_FILE__ = './syncy'
__PIDFILE__ = './syncy.pid'
__CHARSET__ = 'GBK' # windows charset
__TMP_DIR__ = os.environ['TMP'].replace('\\', '/')
else:
__CONFIG_FILE__ = '/etc/config/syncy'
__PIDFILE__ = '/var/run/syncy.pid'
__CHARSET__ = 'UTF-8' # linux charset
__TMP_DIR__ = '/tmp'
if sys.getdefaultencoding() != __CHARSET__:
reload(sys)
sys.setdefaultencoding(__CHARSET__)
# Don't modify the following.
__VERSION__ = '2.5.3'
__DEBUG__ = False
__author__ = "WishInLife <wishinlife@qq.com>"
if os.name == 'nt':
import win32con
import win32file
import pywintypes
LOCK_SH = 0
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_UN = 8
__overlapped = pywintypes.OVERLAPPED()
def flock(fd, op):
fh = win32file._get_osfhandle(fd.fileno())
if op == LOCK_UN:
return win32file.UnlockFileEx(fh, 0, 0x0fff0000, __overlapped)
else:
return win32file.LockFileEx(fh, op, 0, 0x0fff0000, __overlapped)
def lockf(fd, op, length=0, start=0, whence=0):
fh = win32file._get_osfhandle(fd.fileno())
fsize = win32file.GetFileSize(fh)
if whence == 1:
start += fd.tell()
elif whence == 2:
start += fsize
if length == 0:
length = fsize
int32 = 2 ** 32
if op == LOCK_UN:
return win32file.UnlockFile(fh, int(start % int32), int(start / int32), int(length % int32), int(length / int32))
else:
return win32file.LockFile(fh, int(start % int32), int(start / int32), int(length % int32), int(length / int32))
else:
from fcntl import LOCK_EX, LOCK_SH, LOCK_NB, LOCK_UN, flock, lockf
LogLock = threading.Lock()
def printlog(msg):
LogLock.acquire()
print(msg)
LogLock.release()
def rename(src, dst):
if os.name == 'nt' and os.path.exists(dst):
os.remove(dst)
os.rename(src, dst)
class SyncY:
synccount = 0
errorcount = 0
failcount = 0
EXLock = threading.Lock()
TaskSemaphore = None
oldSTDERR = None
oldSTDOUT = None
syncydb = None
sydb = None
sydblen = None
syncData = None
basedirlen = None
syncpath = {}
extraslice = None
encryption = None
encryptkey = ''
stop = False
config = {
'syncylog' : '',
'blocksize' : 10,
'ondup' : 'rename',
'datacache' : 'on',
'excludefiles' : '',
'listnumber' : 100,
'retrytimes' : 3,
'retrydelay' : 3,
'maxsendspeed' : 0,
'maxrecvspeed' : 0,
'speedlimitperiod': '0-0',
'syncperiod' : '0-24',
'syncinterval' : 3600,
'tasknumber' : 2,
'threadnumber' : 2}
syre = {
'newname': re.compile(r'^(.*)(\.[^.]+)$'),
'pcspath': re.compile(r'^[\s\.\n].*|.*[/<>\\|\*\?:\"].*|.*[\s\.\n]$')}
syncytoken = {'synctotal': 0}
pcsroot = '/apps/SyncY'
synctask = {}
def __init__(self, argv=sys.argv[1:]):
self.__argv = argv
if len(self.__argv) == 0 or self.__argv[0] in ['compress', 'convert', 'rebuild']:
if os.path.exists(__PIDFILE__):
with open(__PIDFILE__, 'r') as pidh:
mypid = pidh.read()
try:
os.kill(int(mypid), 0)
except os.error:
pass
else:
print("SyncY is running!")
sys.exit(0)
with open(__PIDFILE__, 'w') as pidh:
pidh.write(str(os.getpid()))
if not (os.path.isfile(__CONFIG_FILE__)):
sys.stderr.write('%s ERROR: Config file "%s" does not exist.\n' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), __CONFIG_FILE__))
sys.exit(2)
with open(__CONFIG_FILE__, 'r') as sycfg:
line = sycfg.readline()
section = ''
while line:
if re.findall(r'^\s*#', line) or re.findall(r'^\s*$', line):
line = sycfg.readline()
continue
line = re.sub(r'#[^\']*$', '', line)
m = re.findall(r'\s*config\s+([^\s]+).*', line)
if m:
section = m[0].strip('\'')
if section == 'syncpath':
SyncY.syncpath[str(len(SyncY.syncpath))] = {}
line = sycfg.readline()
continue
m = re.findall(r'\s*option\s+([^\s]+)\s+\'([^\']*)\'', line)
if m:
if section == 'syncy':
if m[0][0].strip('\'') in ['blocksize', 'listnumber', 'syncinterval', 'threadnumber', 'tasknumber', 'retrytimes', 'retrydelay']:
SyncY.config[m[0][0].strip('\'')] = int(m[0][1])
elif m[0][0].strip('\'') in ['maxsendspeed', 'maxrecvspeed']:
if m[0][1].upper().find('K') > -1:
idx = m[0][1].upper().find('K')
SyncY.config[m[0][0].strip('\'')] = int(m[0][1][0:idx]) * 1024
elif m[0][1].upper().find('M') > -1:
idx = m[0][1].upper().find('M')
SyncY.config[m[0][0].strip('\'')] = int(m[0][1][0:idx]) * 1024 * 1024
else:
SyncY.config[m[0][0].strip('\'')] = int(m[0][1])
else:
SyncY.config[m[0][0].strip('\'')] = m[0][1]
elif section == 'syncytoken':
if m[0][0].strip('\'') in ['expires_in', 'refresh_date', 'compress_date', 'synctotal']:
SyncY.syncytoken[m[0][0].strip('\'')] = int(m[0][1])
else:
SyncY.syncytoken[m[0][0].strip('\'')] = m[0][1]
elif section == 'syncpath':
if m[0][0].strip('\'') == 'remotepath':
if m[0][1].lower().startswith('/apps/syncy'):
SyncY.syncpath[str(len(SyncY.syncpath) - 1)]['remotepath'] = m[0][1][11:]
elif m[0][1].lower().startswith('/我的应用程序/syncy'):
SyncY.syncpath[str(len(SyncY.syncpath) - 1)]['remotepath'] = m[0][1][len('/我的应用程序/syncy'):]
else:
SyncY.syncpath[str(len(SyncY.syncpath) - 1)]['remotepath'] = m[0][1]
else:
SyncY.syncpath[str(len(SyncY.syncpath) - 1)][m[0][0].strip('\'')] = m[0][1].replace('\\', '/')
line = sycfg.readline()
if SyncY.config['syncylog'] != '':
if not os.path.exists(os.path.dirname(SyncY.config['syncylog'])):
os.makedirs(os.path.dirname(SyncY.config['syncylog']))
if os.path.exists(SyncY.config['syncylog']) and os.path.isdir(SyncY.config['syncylog']):
SyncY.config['syncylog'] = self.__catpath(SyncY.config['syncylog'], 'syncy.log')
self.__save_config()
if SyncY.oldSTDERR is None and SyncY.config['syncylog'] != '' and len(self.__argv) != 0 and self.__argv[0] in ['sybind', 'cpbind']:
SyncY.oldSTDERR = sys.stderr
SyncY.oldSTDOUT = sys.stdout
sys.stderr = open(SyncY.config['syncylog'], 'a', 0)
sys.stdout = sys.stderr
if 'refresh_token' not in SyncY.syncytoken or SyncY.syncytoken['refresh_token'] == '' or (len(self.__argv) != 0 and self.__argv[0] in ['sybind', 'cpbind']):
sycurl = SYCurl()
if (('device_code' not in SyncY.syncytoken or SyncY.syncytoken['device_code'] == '') and len(self.__argv) == 0) or (len(self.__argv) != 0 and self.__argv[0] == 'sybind'):
retcode, responses = sycurl.request('https://www.syncy.cn/syserver', {}, {'method': 'bind_device', 'scope': 'basic,netdisk'}, 'POST', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
print('%s ERROR(Errno:%d): Get device code failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, responses['error_msg']))
sys.exit(3)
device_code = responses['device_code']
user_code = responses['user_code']
if len(self.__argv) != 0 and self.__argv[0] == 'sybind':
with open(__TMP_DIR__ + '/syncy.bind', 'w') as sybind:
sybind.write('{"user_code":"%s","device_code":"%s","time":%d}' % (user_code, device_code, int(time.time())))
sys.exit(0)
SyncY.syncytoken['device_code'] = device_code
print('Device binding Guide:')
print(' 1. Open web browser to visit:"https://openapi.baidu.com/device" and input user code to binding your baidu account.')
print(' ')
print(' 2. User code:\033[31m %s\033[0m' % user_code)
print(' (User code valid for 30 minutes.)')
print(' ')
raw_input(' 3. After granting access to the application, come back here and press [Enter] to continue.')
print(' ')
if len(self.__argv) != 0 and self.__argv[0] == 'cpbind':
with open(__TMP_DIR__ + '/syncy.bind', 'r') as sybind:
bindinfo = sybind.read()
bindinfo = json.loads(bindinfo)
os.remove(__TMP_DIR__ + '/syncy.bind')
if 'device_code' in bindinfo:
if int(time.time()) - int(bindinfo['time']) >= 1800:
sys.exit(4)
SyncY.syncytoken['device_code'] = bindinfo['device_code']
else:
sys.exit(5)
retcode, responses = sycurl.request('https://www.syncy.cn/syserver', {}, {'method': 'get_device_token', 'code': SyncY.syncytoken['device_code'], 'edition': 'python', 'ver': __VERSION__}, 'POST', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
print('%s ERROR(Errno:%d): Get device token failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, responses['error_msg']))
sys.exit(6)
SyncY.syncytoken['refresh_token'] = responses['refresh_token']
SyncY.syncytoken['access_token'] = responses['access_token']
SyncY.syncytoken['expires_in'] = int(responses['expires_in'])
SyncY.syncytoken['refresh_date'] = int(time.time())
SyncY.syncytoken['compress_date'] = int(time.time())
self.__save_config()
if len(self.__argv) != 0 and self.__argv[0] == 'cpbind':
sys.exit(0)
print('%s INFO: Get device token success.\n' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.oldSTDERR is None and SyncY.config['syncylog'] != '':
print('%s INFO: Running log output to log file %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), SyncY.config['syncylog']))
SyncY.oldSTDERR = sys.stderr
SyncY.oldSTDOUT = sys.stdout
sys.stderr = open(SyncY.config['syncylog'], 'a', 0)
sys.stdout = sys.stderr
try:
if SyncY.config['blocksize'] < 1:
SyncY.config['blocksize'] = 10
print('%s WARNING: "blocksize" must great than or equal to 1(M), set to default 10(M).' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['ondup'] != 'overwrite' and SyncY.config['ondup'] != 'rename':
SyncY.config['ondup'] = 'rename'
print('%s WARNING: ondup is invalid, set to default(overwrite).' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['datacache'] != 'on' and SyncY.config['datacache'] != 'off':
SyncY.config['datacache'] = 'on'
print('%s WARNING: "datacache" is invalid, set to default(on).' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['retrytimes'] < 0:
SyncY.config['retrytimes'] = 3
print('%s WARNING: "retrytimes" is invalid, set to default(3 times).' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['retrydelay'] < 0:
SyncY.config['retrydelay'] = 3
print('%s WARNING: "retrydelay" is invalid, set to default(3 second).' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['listnumber'] < 1:
SyncY.config['listnumber'] = 100
print('%s WARNING: "listnumber" must great than or equal to 1, set to default 100.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['syncinterval'] < 0:
SyncY.config['syncinterval'] = 3600
print('%s WARNING: "syncinterval" must great than or equal to 1, set to default 3600.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['maxsendspeed'] < 0:
SyncY.config['maxsendspeed'] = 0
print('%s WARNING: "maxsendspeed" must great than or equal to 0, set to default 0.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['maxrecvspeed'] < 0:
SyncY.config['maxrecvspeed'] = 0
print('%s WARNING: "maxrecvspeed" must great than or equal to 0, set to default 100.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['threadnumber'] < 1:
SyncY.config['threadnumber'] = 2
print('%s WARNING: "threadnumber" must great than or equal to 1, set to default 2.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
if SyncY.config['tasknumber'] < 1:
SyncY.config['tasknumber'] = 2
print('%s WARNING: "tasknumber" must great than or equal to 1, set to default 2.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
starthour, endhour = SyncY.config['speedlimitperiod'].split('-', 1)
if starthour == '' or endhour == '' or int(starthour) < 0 or int(starthour) > 23 or int(endhour) < 0 or int(endhour) > 24:
print('%s WARNING: "speedlimitperiod" is invalid, set to default(0-0), no limit.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
SyncY.config['speedlimitperiod'] = '0-0'
starthour, endhour = SyncY.config['syncperiod'].split('-', 1)
if starthour == '' or endhour == '' or int(starthour) < 0 or int(starthour) > 23 or int(endhour) < 0 or int(endhour) > 24 or endhour == starthour:
print('%s WARNING: "syncperiod" is invalid, set to default(0-24).' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
SyncY.config['syncperiod'] = '0-24'
except Exception, e:
print('%s ERROR: initialize parameters failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), e, traceback.format_exc()))
sys.exit(7)
self._excludefiles = SyncY.config['excludefiles'].replace('\\', '/').replace('.', '\.').replace('*', '.*').replace('?', '.?').split(';')
for i in xrange(len(self._excludefiles)):
self._excludefiles[i] = re.compile(eval('r"^' + self._excludefiles[i] + '$"'))
self._excludefiles.append(re.compile(r'^.*\.syy$'))
if (SyncY.syncytoken['refresh_date'] + SyncY.syncytoken['expires_in'] - 864000) < int(time.time()):
self.__check_expires()
SyncY.TaskSemaphore = threading.Semaphore(SyncY.config['tasknumber'])
size = 32768
while True:
try:
threading.stack_size(size)
threadtest = ThreadTest()
threadtest.start()
break
except threading.ThreadError:
threading.stack_size(0)
break
except RuntimeError:
threading.stack_size(0)
break
except ValueError:
if size < 512 * 1024:
size *= 2
else:
threading.stack_size(0)
break
def __del__(self):
if self.__class__.oldSTDERR is not None:
sys.stderr.flush()
sys.stderr.close()
sys.stderr = self.__class__.oldSTDERR
sys.stdout = self.__class__.oldSTDOUT
if os.path.exists(__PIDFILE__):
with open(__PIDFILE__, 'r') as pidh:
lckpid = pidh.read()
if os.getpid() == int(lckpid):
os.remove(__PIDFILE__)
@staticmethod
def synccount_increase():
SyncY.EXLock.acquire()
SyncY.synccount += 1
SyncY.EXLock.release()
@staticmethod
def errorcount_increase():
SyncY.EXLock.acquire()
SyncY.errorcount += 1
SyncY.EXLock.release()
@staticmethod
def failcount_increase():
SyncY.EXLock.acquire()
SyncY.failcount += 1
SyncY.EXLock.release()
@staticmethod
def reset_counter():
SyncY.EXLock.acquire()
SyncY.synccount = 0
SyncY.failcount = 0
SyncY.errorcount = 0
SyncY.EXLock.release()
@staticmethod
def __init_syncdata():
SyncY.syncData = {}
if os.path.exists(SyncY.syncydb):
with open(SyncY.syncydb, 'rb') as sydb:
flock(sydb, LOCK_SH)
sydb.seek(64)
datarec = sydb.read(64)
while datarec:
SyncY.syncData[datarec[0:16]] = datarec[16:]
datarec = sydb.read(64)
flock(sydb, LOCK_UN)
def __check_expires(self):
sycurl = SYCurl()
retcode, responses = sycurl.request('https://openapi.baidu.com/rest/2.0/passport/users/getLoggedInUser', {}, {'access_token': SyncY.syncytoken['access_token']}, 'POST', SYCurl.Normal)
responses = json.loads(responses)
if 'uid' in responses:
retcode, responses = sycurl.request('https://www.syncy.cn/syserver', {}, {'method': 'get_last_version', 'edition': 'python', 'ver': __VERSION__, 'uid': responses['uid'], 'code': SyncY.syncytoken['device_code']}, 'POST', SYCurl.Normal)
if retcode == 200 and responses.find('#') > -1:
(lastver, smessage) = responses.strip('\n').split('#', 1)
if lastver > __VERSION__:
printlog('%s WARNING: %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), smessage.encode(__CHARSET__)))
if (SyncY.syncytoken['refresh_date'] + SyncY.syncytoken['expires_in'] - 864000) > int(time.time()):
return
retcode, retbody = sycurl.request('https://www.syncy.cn/syserver', {}, {'method': 'refresh_access_token', 'refresh_token': SyncY.syncytoken['refresh_token'], 'code': SyncY.syncytoken['device_code']}, 'POST', SYCurl.Normal)
responses = json.loads(retbody)
try:
if retcode != 200:
printlog('%s ERROR(Errno:%d): Refresh access token failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, responses['error_msg']))
return 1
SyncY.syncytoken['refresh_token'] = responses['refresh_token']
SyncY.syncytoken['access_token'] = responses['access_token']
SyncY.syncytoken['expires_in'] = int(responses['expires_in'])
SyncY.syncytoken['refresh_date'] = int(time.time())
except KeyError:
printlog('%s ERROR(Errno:%d): Refresh access token failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, retbody))
return 1
self.__save_config()
printlog('%s INFO: Refresh access token success.' % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
return 0
@staticmethod
def __save_config():
with open('%s.tmp' % __CONFIG_FILE__, 'w') as sycfg:
sycfg.write("\nconfig syncy\n")
for key, value in SyncY.config.items():
sycfg.write("\toption %s '%s'\n" % (key, str(value)))
sycfg.write("\nconfig syncytoken\n")
for key, value in SyncY.syncytoken.items():
sycfg.write("\toption %s '%s'\n" % (key, str(value)))
for i in range(len(SyncY.syncpath)):
sycfg.write("\nconfig syncpath\n")
for key, value in SyncY.syncpath[str(i)].items():
sycfg.write("\toption %s '%s'\n" % (key, str(value)))
sycfg.flush()
os.fsync(sycfg.fileno())
if os.path.exists('%s.tmp' % __CONFIG_FILE__):
pmeta = os.stat(__CONFIG_FILE__)
rename('%s.tmp' % __CONFIG_FILE__, __CONFIG_FILE__)
if os.name == 'posix':
os.lchown(__CONFIG_FILE__, pmeta.st_uid, pmeta.st_gid)
os.chmod(__CONFIG_FILE__, pmeta.st_mode)
@staticmethod
def __catpath(*names):
fullpath = '/'.join(names)
fullpath = re.sub(r'/+', '/', fullpath)
fullpath = re.sub(r'/$', '', fullpath)
return fullpath
@staticmethod
def __get_newname(oldname):
nowtime = str(time.strftime("%Y%m%d%H%M%S", time.localtime()))
m = SyncY.syre['newname'].findall(oldname)
if m:
newname = m[0][0] + '_old_' + nowtime + m[0][1]
else:
newname = oldname + '_old_' + nowtime
return newname
@staticmethod
def __check_pcspath(pcsdirname, pcsfilename):
if len(pcsdirname) + len(pcsfilename) + 1 >= 1000:
printlog('%s ERROR: Length of PCS path(%s/%s) must less than 1000, skip upload.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), pcsdirname, pcsfilename))
return 1
if SyncY.syre['pcspath'].findall(pcsfilename):
printlog('%s ERROR: PCS path(%s/%s) is invalid, please check whether special characters exists in the path, skip upload the file.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), pcsdirname, pcsfilename))
return 1
return 0
@staticmethod
def __get_pcs_quota():
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/quota', {'method': 'info', 'access_token': SyncY.syncytoken['access_token']}, '', 'GET', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
printlog('%s ERROR(Errno:%d): Get pcs quota failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, responses['error_msg']))
return 1
printlog('%s INFO: PCS quota is %dG,used %dG.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), responses['quota'] / 1024 / 1024 / 1024, responses['used'] / 1024 / 1024 / 1024))
return 0
@staticmethod
def __get_pcs_filelist(pcspath, startindex, endindex):
if __DEBUG__:
printlog('%s Info(%s): Start get pcs file list(%d-%d) of "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, startindex, endindex, pcspath))
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'list', 'access_token': SyncY.syncytoken['access_token'], 'path': pcspath, 'limit': '%d-%d' % (startindex, endindex), 'by': 'name', 'order': 'asc'}, '', 'GET', SYCurl.Normal)
try:
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
if responses['error_code'] == 31066:
return 31066, []
else:
printlog('%s ERROR(Errno:%d): Get PCS file list of "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, pcspath, responses['error_msg']))
return 1, []
return 0, responses['list']
except Exception, e:
printlog('%s ERROR: Get PCS file list of "%s" failed. return code: %d, response body: %s.\n%s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), pcspath, retcode, str(responses), e, traceback.format_exc()))
return 1, []
finally:
del responses
if __DEBUG__:
printlog('%s Info(%s): Complete get pcs file list(%d-%d) of "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, startindex, endindex, pcspath))
@staticmethod
def __check_create_pcsdir(pcspath):
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'meta', 'access_token': SyncY.syncytoken['access_token'], 'path': pcspath}, '', 'GET', SYCurl.Normal)
try:
responses = json.loads(responses)
if retcode == 200 and responses['list'][0]['isdir'] == 1:
return 0
elif (retcode != 200 and responses['error_code'] == 31066) or (retcode == 200 and responses['list'][0]['isdir'] == 0):
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'mkdir', 'access_token': SyncY.syncytoken['access_token'], 'path': pcspath}, '', 'POST', SYCurl.Normal)
responses = json.loads(responses)
if retcode == 200 and responses['path'].encode(__CHARSET__) == pcspath:
return 0
printlog('%s ERROR(Errno:%d): Create PCS directory "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, pcspath, responses['error_msg']))
return 1
except Exception, e:
printlog('%s ERROR: Create PCS directory "%s" failed. return code: %d, response body: %s.\n%s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), pcspath, retcode, str(responses), e, traceback.format_exc()))
return 1
def __rm_localfile(self, delpath, slient=False):
try:
if os.path.isfile(delpath):
os.remove(delpath)
if not slient:
printlog('%s INFO: Delete local file "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), delpath))
elif os.path.isdir(delpath):
fnlist = os.listdir(delpath)
for i in xrange(len(fnlist)):
self.__rm_localfile('%s/%s' % (delpath, fnlist[i]), slient)
os.rmdir(delpath)
if not slient:
printlog('%s INFO: Delete local directory "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), delpath))
except Exception, e:
if not slient:
printlog('%s ERROR: Delete local file "%s" failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), delpath, e, traceback.format_exc()))
return 1
return 0
@staticmethod
def __rm_pcsfile(pcspath, slient=False):
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'delete', 'access_token': SyncY.syncytoken['access_token'], 'path': pcspath}, '', 'POST', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
if not slient:
printlog('%s ERROR(Errno:%d): Delete remote file or directory "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, pcspath, responses['error_msg']))
return 1
if not slient:
printlog('%s INFO: Delete remote file or directory "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), pcspath))
return 0
@staticmethod
def __mv_pcsfile(oldpcspath, newpcspath, slient=False):
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'move', 'access_token': SyncY.syncytoken['access_token'], 'from': oldpcspath, 'to': newpcspath}, '', 'POST', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
if not slient:
printlog('%s ERROR(Errno:%d): Move remote file or directory "%s" to "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, oldpcspath, newpcspath, responses['error_msg']))
return 1
if not slient:
printlog('%s INFO: Move remote file or directory "%s" to "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), oldpcspath, newpcspath))
return 0
@staticmethod
def __cp_pcsfile(srcpcspath, destpcspath):
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'copy', 'access_token': SyncY.syncytoken['access_token'], 'from': srcpcspath, 'to': destpcspath}, '', 'POST', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
printlog('%s ERROR(Errno:%d): Copy remote file or directory "%s" to "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, srcpcspath, destpcspath, responses['error_msg']))
return 1
printlog('%s INFO: Copy remote file or directory "%s" to "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), srcpcspath, destpcspath))
return 0
@staticmethod
def __get_pcs_filemeta(pcspath):
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'meta', 'access_token': SyncY.syncytoken['access_token'], 'path': pcspath}, '', 'GET', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
printlog('%s ERROR(Errno:%d): Get file\'s meta failed: %s, %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, pcspath, responses['error_msg']))
return 1, {}
return 0, responses['list'][0]
@staticmethod
def __upload_file_nosync(filepath, pcspath):
sycurl = SYCurl()
retcode, responses = sycurl.request('https://c.pcs.baidu.com/rest/2.0/pcs/file', {'method': 'upload', 'access_token': SyncY.syncytoken['access_token'], 'path': pcspath, 'ondup': 'newcopy'}, '0-%d' % (os.stat(filepath).st_size - 1), 'POST', SYCurl.Upload, filepath)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
printlog('%s ERROR(Errno:%d): Upload file to pcs failed: %s, %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, filepath, responses['error_msg']))
return 1
printlog('%s INFO: Upload file "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), filepath))
return 0
def __compress_data(self, pathname, sydbnew, sydb=None, sydblen=0):
fnlist = os.listdir(pathname)
fnlist.sort()
for fnname in fnlist:
if fnname[0:1] == '.':
continue
fullpath = '%s/%s' % (pathname, fnname)
if os.path.isdir(fullpath):
if SyncY.config['datacache'] == 'on':
self.__compress_data(fullpath, sydbnew)
else:
self.__compress_data(fullpath, sydbnew, sydb, sydblen)
elif os.path.isfile(fullpath):
fnmd5 = hashlib.md5(fullpath[SyncY.basedirlen:]).digest()
fnstat = os.stat(fullpath)
fmate = struct.pack('>qq', int(fnstat.st_mtime), fnstat.st_size)
if SyncY.config['datacache'] == 'on':
if fnmd5 in SyncY.syncData and SyncY.syncData[fnmd5][0:16] == fmate:
sydbnew.write('%s%s' % (fnmd5, SyncY.syncData[fnmd5]))
del SyncY.syncData[fnmd5]
else:
if sydb.tell() == sydblen:
sydb.seek(64)
datarec = sydb.read(64)
readlen = 64
while datarec and readlen <= sydblen - 64:
if datarec[0:32] == '%s%s' % (fnmd5, fmate):
sydbnew.write(datarec)
break
if readlen == sydblen - 64:
break
if sydb.tell() == sydblen:
sydb.seek(64)
datarec = sydb.read(64)
readlen += 64
return 0
def __start_compress(self, pathname=''):
if pathname == '':
mpath = []
for i in range(len(SyncY.syncpath)):
if SyncY.syncpath[str(i)]['synctype'].lower() not in ['4', 's', 'sync']:
mpath.append(SyncY.syncpath[str(i)]['localpath'])
printlog('%s INFO: Start compress sync data.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
else:
mpath = [pathname]
for ipath in mpath:
if ipath == '':
continue
SyncY.basedirlen = len(ipath)
SyncY.syncydb = '%s/.syncy.info.db' % ipath
newdbfile = '%s/.syncy.info.dbtmp' % ipath
if os.path.exists(SyncY.syncydb):
if os.path.exists(newdbfile):
os.remove(newdbfile)
self.__check_upgrade_syncdata(newdbfile)
with open(newdbfile, 'ab') as sydbnew:
if SyncY.config['datacache'] == 'on':
self.__init_syncdata()
self.__compress_data(ipath, sydbnew)
SyncY.syncData = None
else:
sydblen = os.stat(SyncY.syncydb).st_size
with open(SyncY.syncydb, 'rb') as sydb:
self.__compress_data(ipath, sydbnew, sydb, sydblen)
sydbnew.flush()
os.fsync(sydbnew.fileno())
rename(newdbfile, SyncY.syncydb)
if pathname == '':
SyncY.syncytoken['compress_date'] = int(time.time())
SyncY.syncytoken['synctotal'] = 0
self.__save_config()
printlog('%s INFO: Sync data compress completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
def __check_excludefiles(self, filepath):
for reexf in self._excludefiles:
if reexf.findall(filepath):
return 1
return 0
@staticmethod
def __check_syncstatus(fmd5, fmate, rmate, rmd5):
if rmd5 != '*':
rmd5 = rmd5.decode('hex')
if SyncY.config['datacache'] == 'on':
if fmd5 not in SyncY.syncData:
return 0
if rmd5 == '*' and rmate == '*' and SyncY.syncData[fmd5][0:16] == fmate:
return 1
elif fmate == '*' and SyncY.syncData[fmd5][16:] == rmate + rmd5:
return 1
elif SyncY.syncData[fmd5] == fmate + rmate + rmd5:
return 1
else:
if SyncY.sydb.tell() == SyncY.sydblen:
SyncY.sydb.seek(64)
datarec = SyncY.sydb.read(64)
readlen = 64
while datarec and readlen <= SyncY.sydblen - 64:
if rmd5 == '*' and rmate == '*' and datarec[0:32] == fmd5 + fmate:
return 1
elif fmate == '*' and datarec[16:] == rmate + rmd5:
return 1
elif datarec == fmd5 + fmate + rmate + rmd5:
return 1
if readlen == SyncY.sydblen - 64:
break
if SyncY.sydb.tell() == SyncY.sydblen:
SyncY.sydb.seek(64)
datarec = SyncY.sydb.read(64)
readlen += 64
return 0
def __syncy_upload(self, ldir, rdir):
fnlist = os.listdir(ldir)
fnlist.sort()
for fi in xrange(len(fnlist)):
lfullpath = '%s/%s' % (ldir, fnlist[fi])
fmtime = 0
fsize = 0
try:
if fnlist[fi][0:1] == '.' or self.__check_excludefiles(lfullpath) == 1 or self.__check_pcspath(rdir, fnlist[fi]) == 1:
continue
if __DEBUG__:
printlog('%s Info(%s): Start upload "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, lfullpath))
rfullpath = '%s/%s' % (rdir, fnlist[fi])
if os.path.isdir(lfullpath):
self.__syncy_upload(lfullpath, rfullpath)
else:
fmeta = os.stat(lfullpath)
fmtime = int(fmeta.st_mtime)
fsize = fmeta.st_size
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if self.__check_syncstatus(fnmd5, struct.pack('>qq', fmtime, fsize), '*', '*') == 0:
if SyncY.config['ondup'] == 'rename':
ondup = 'newcopy'
else:
ondup = 'overwrite'
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(SYTask.Upload, lfullpath, int(fmeta.st_mtime), fmeta.st_size, fnmd5, rfullpath, 0, 0, '', ondup)
synctask.start()
else:
continue
except struct.error, e:
printlog('%s ERROR: Struct.pack upload file mate(mtime:%d,size:%d) of "%s" error: %s\n%s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), fmtime, fsize, lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
except Exception, e:
printlog('%s ERROR: Upload file "%s" failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
return 0
def __syncy_uploadplus(self, ldir, rdir):
startidx = 0
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, SyncY.config['listnumber'])
if retcode != 0 and retcode != 31066:
self.errorcount_increase()
return 1
lfnlist = os.listdir(ldir)
lfnlist.sort()
while retcode == 0:
for i in xrange(len(rfnlist)):
rfullpath = rfnlist[i]['path'].encode(__CHARSET__)
fnname = os.path.basename(rfullpath)
lfullpath = '%s/%s' % (ldir, fnname)
try:
if self.__check_excludefiles(lfullpath) == 1:
continue
if os.path.exists(lfullpath):
for idx in xrange(len(lfnlist)):
if lfnlist[idx] == fnname:
del lfnlist[idx]
break
else:
continue
if __DEBUG__:
printlog('%s Info(%s): Start upload+ "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, lfullpath))
if (rfnlist[i]['isdir'] == 1 and os.path.isfile(lfullpath)) or (rfnlist[i]['isdir'] == 0 and os.path.isdir(lfullpath)):
if SyncY.config['ondup'] == 'rename':
fnnamenew = '%s/%s' % (rdir, self.__get_newname(fnname))
if len(fnnamenew) >= 1000:
printlog('%s ERROR: Rename failed, the length of PCS path "%s" must less than 1000, skip upload "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), fnnamenew, lfullpath))
self.failcount_increase()
continue
if self.__mv_pcsfile(rfullpath, fnnamenew, True) == 1:
printlog('%s ERROR: Rename "%s" failed, skip upload "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), rfullpath, lfullpath))
self.errorcount_increase()
continue
else:
self.__rm_pcsfile(rfullpath, True)
if os.path.isdir(lfullpath):
self.__syncy_uploadplus(lfullpath, rfullpath)
continue
else:
fmeta = os.stat(lfullpath)
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(SYTask.Upload, lfullpath, int(fmeta.st_mtime), fmeta.st_size, fnmd5, rfullpath, 0, 0, '', 'overwrite')
synctask.start()
elif rfnlist[i]['isdir'] == 1:
self.__syncy_uploadplus(lfullpath, rfullpath)
continue
else:
fmeta = os.stat(lfullpath)
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if self.__check_syncstatus(fnmd5, struct.pack('>qq', int(fmeta.st_mtime), fmeta.st_size), struct.pack('>qq', rfnlist[i]['mtime'], rfnlist[i]['size']), rfnlist[i]['md5']) == 1:
continue
if SyncY.config['ondup'] == 'rename':
fnnamenew = '%s/%s' % (rdir, self.__get_newname(fnname))
if len(fnnamenew) >= 1000:
printlog('%s ERROR: Rename failed, the length of PCS path "%s" must less than 1000, skip upload "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), fnnamenew, lfullpath))
self.failcount_increase()
continue
if self.__mv_pcsfile(rfullpath, fnnamenew, True) == 1:
printlog('%s ERROR: Rename "%s" failed, skip upload "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), rfullpath, lfullpath))
self.failcount_increase()
continue
else:
self.__rm_pcsfile(rfullpath, True)
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(SYTask.Upload, lfullpath, int(fmeta.st_mtime), fmeta.st_size, fnmd5, rfullpath, 0, 0, '', 'overwrite')
synctask.start()
except struct.error, e:
printlog('%s ERROR: Struct.pack file mate of "%s" error: %s\n%s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
except Exception, e:
printlog('%s ERROR: Upload file "%s" failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
if len(rfnlist) < SyncY.config['listnumber']:
break
startidx += SyncY.config['listnumber']
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, startidx + SyncY.config['listnumber'])
if retcode != 0:
self.errorcount_increase()
return 1
for idx in xrange(len(lfnlist)):
lfullpath = '%s/%s' % (ldir, lfnlist[idx])
try:
if lfnlist[idx][0:1] == '.' or self.__check_excludefiles(lfullpath) == 1 or self.__check_pcspath(rdir, lfnlist[idx]) == 1:
continue
rfullpath = '%s/%s' % (rdir, lfnlist[idx])
if os.path.isdir(lfullpath):
self.__syncy_uploadplus(lfullpath, rfullpath)
elif os.path.isfile(lfullpath):
fmeta = os.stat(lfullpath)
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(SYTask.Upload, lfullpath, int(fmeta.st_mtime), fmeta.st_size, fnmd5, rfullpath, 0, 0, '', 'overwrite')
synctask.start()
except struct.error, e:
printlog('%s ERROR: Struct.pack file mate of "%s" error: %s\n%s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
except Exception, e:
printlog('%s ERROR: Upload file "%s" failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
return 0
def __syncy_download(self, ldir, rdir):
startidx = 0
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, SyncY.config['listnumber'])
if retcode != 0:
self.errorcount_increase()
return 1
while retcode == 0:
for i in xrange(len(rfnlist)):
rfullpath = rfnlist[i]['path'].encode(__CHARSET__)
fnname = os.path.basename(rfullpath)
if self.__check_excludefiles(rfullpath) == 1:
continue
lfullpath = '%s/%s' % (ldir, fnname)
try:
if __DEBUG__:
printlog('%s Info(%s): Start download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, rfullpath))
if rfnlist[i]['isdir'] == 1:
if os.path.exists(lfullpath) and os.path.isfile(lfullpath):
if SyncY.config['ondup'] == 'rename':
fnnamenew = '%s/%s' % (ldir, self.__get_newname(fnname))
rename(lfullpath, fnnamenew)
else:
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.errorcount_increase()
continue
if not (os.path.exists(lfullpath)):
os.mkdir(lfullpath)
if os.name == 'posix':
pmeta = os.stat(ldir)
os.lchown(lfullpath, pmeta.st_uid, pmeta.st_gid)
os.chmod(lfullpath, pmeta.st_mode)
self.__syncy_download(lfullpath, rfullpath)
else:
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if not (os.path.exists(lfullpath + '.db.syy')):
if self.__check_syncstatus(fnmd5, '*', struct.pack('>qq', rfnlist[i]['mtime'], rfnlist[i]['size']), rfnlist[i]['md5']) == 1:
continue
if os.path.exists(lfullpath) and SyncY.config['ondup'] == 'rename':
fnnamenew = '%s/%s' % (ldir, self.__get_newname(fnname))
rename(lfullpath, fnnamenew)
elif os.path.exists(lfullpath):
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.failcount_increase()
continue
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(SYTask.Download, lfullpath, 0, 0, fnmd5, rfullpath, rfnlist[i]['mtime'], rfnlist[i]['size'], rfnlist[i]['md5'], 'overwrite')
synctask.start()
except struct.error, e:
printlog('%s ERROR: Struct.pack file mate of "%s" error: %s\n%s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
except Exception, e:
printlog('%s ERROR: Download file "%s" failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
if len(rfnlist) < SyncY.config['listnumber']:
break
startidx += SyncY.config['listnumber']
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, startidx + SyncY.config['listnumber'])
if retcode != 0:
self.errorcount_increase()
return 1
return 0
def __syncy_downloadplus(self, ldir, rdir):
startidx = 0
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, SyncY.config['listnumber'])
if retcode != 0:
self.errorcount_increase()
return 1
while retcode == 0:
for i in xrange(0, len(rfnlist), 1):
rfullpath = rfnlist[i]['path'].encode(__CHARSET__)
fnname = os.path.basename(rfullpath)
if self.__check_excludefiles(rfullpath) == 1:
continue
lfullpath = '%s/%s' % (ldir, fnname)
try:
if __DEBUG__:
printlog('%s Info(%s): Start download+ "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, rfullpath))
if rfnlist[i]['isdir'] == 1:
if os.path.exists(lfullpath) and os.path.isfile(lfullpath):
if SyncY.config['ondup'] == 'rename':
fnnamenew = '%s/%s' % (ldir, self.__get_newname(fnname))
rename(lfullpath, fnnamenew)
else:
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.errorcount_increase()
continue
if not (os.path.exists(lfullpath)):
os.mkdir(lfullpath)
if os.name == 'posix':
pmeta = os.stat(ldir)
os.lchown(lfullpath, pmeta.st_uid, pmeta.st_gid)
os.chmod(lfullpath, pmeta.st_mode)
self.__syncy_downloadplus(lfullpath, rfullpath)
else:
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if os.path.exists(lfullpath) and not (os.path.exists(lfullpath + '.db.syy')):
fmeta = os.stat(lfullpath)
if self.__check_syncstatus(fnmd5, struct.pack('>qq', int(fmeta.st_mtime), fmeta.st_size), struct.pack('>qq', rfnlist[i]['mtime'], rfnlist[i]['size']), rfnlist[i]['md5']) == 1:
continue
if SyncY.config['ondup'] == 'rename':
fnnamenew = '%s/%s' % (ldir, self.__get_newname(fnname))
rename(lfullpath, fnnamenew)
else:
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.failcount_increase()
continue
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(SYTask.Download, lfullpath, 0, 0, fnmd5, rfullpath, rfnlist[i]['mtime'], rfnlist[i]['size'], rfnlist[i]['md5'], 'overwrite')
synctask.start()
except struct.error, e:
printlog('%s ERROR: Struct.pack file mate of "%s" error: %s\n%s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
except Exception, e:
printlog('%s ERROR: Download file "%s" failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 1
if len(rfnlist) < SyncY.config['listnumber']:
break
startidx += SyncY.config['listnumber']
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, startidx + SyncY.config['listnumber'])
if retcode != 0:
self.errorcount_increase()
return 1
return 0
def __syncy_sync(self, ldir, rdir):
del_rfiles = 0
del_lfiles = 0
startidx = 0
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, SyncY.config['listnumber'])
if retcode != 0 and retcode != 31066:
self.errorcount_increase()
return 0, 0
lfnlist = os.listdir(ldir)
lfnlist.sort()
while retcode == 0:
for i in xrange(len(rfnlist)):
rfullpath = rfnlist[i]['path'].encode(__CHARSET__)
fnname = os.path.basename(rfullpath)
if self.__check_excludefiles(rfullpath) == 1:
continue
lfullpath = '%s/%s' % (ldir, fnname)
try:
if os.path.exists(lfullpath):
for idx in xrange(len(lfnlist)):
if lfnlist[idx] == fnname:
del lfnlist[idx]
break
if __DEBUG__:
printlog('%s Info(%s): Start sync "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, lfullpath))
if rfnlist[i]['isdir'] == 1:
if os.path.exists(lfullpath) and os.path.isfile(lfullpath):
fmeta = os.stat(lfullpath)
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if self.__check_syncstatus(fnmd5, struct.pack('>qq', int(fmeta.st_mtime), fmeta.st_size), '*', '*') == 1 or rfnlist[i]['mtime'] > int(fmeta.st_mtime):
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.failcount_increase()
continue
self.__syncy_downloadplus(lfullpath, rfullpath)
continue
else:
self.__rm_pcsfile(rfullpath, True)
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(SYTask.Upload, lfullpath, int(fmeta.st_mtime), fmeta.st_size, fnmd5, rfullpath, 0, 0, '', 'overwrite')
synctask.start()
elif not (os.path.exists(lfullpath)):
os.mkdir(lfullpath)
if os.name == 'posix':
pmeta = os.stat(ldir)
os.lchown(lfullpath, pmeta.st_uid, pmeta.st_gid)
os.chmod(lfullpath, pmeta.st_mode)
l_del, r_del = self.__syncy_sync(lfullpath, rfullpath)
if r_del > 0 and self.__get_pcs_filelist(rfullpath, 0, 10) == (0, []):
os.rmdir(lfullpath)
self.__rm_pcsfile(rfullpath, True)
del_rfiles += 1
continue
else:
self.__syncy_sync(lfullpath, rfullpath)
continue
else:
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
fmtime = 0
fsize = 0
rmkey = struct.pack('>qq', rfnlist[i]['mtime'], rfnlist[i]['size'])
if os.path.exists(lfullpath) and os.path.isdir(lfullpath):
if self.__check_syncstatus(fnmd5, '*', rmkey, rfnlist[i]['md5']) == 1:
if self.__rm_pcsfile(rfullpath, True) == 1:
printlog('%s ERROR: Delete remote file "%s" failed, skip sync "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), rfullpath, lfullpath))
self.errorcount_increase()
continue
self.__syncy_uploadplus(lfullpath, rfullpath)
continue
else:
if rfnlist[i]['mtime'] > int(os.stat(lfullpath).st_mtime):
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.failcount_increase()
continue
sync_op = SYTask.Download
else:
if self.__rm_pcsfile(rfullpath, True) == 1:
printlog('%s ERROR: Delete remote file "%s" failed, skip sync "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), rfullpath, lfullpath))
self.errorcount_increase()
continue
self.__syncy_uploadplus(lfullpath, rfullpath)
continue
elif os.path.exists(lfullpath):
fmeta = os.stat(lfullpath)
fmtime = int(fmeta.st_mtime)
fsize = fmeta.st_size
fmkey = struct.pack('>qq', fmtime, fsize)
if self.__check_syncstatus(fnmd5, fmkey, rmkey, rfnlist[i]['md5']) == 1:
continue
elif self.__check_syncstatus(fnmd5, fmkey, '*', '*') == 1:
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.failcount_increase()
continue
sync_op = SYTask.Download
elif self.__check_syncstatus(fnmd5, '*', rmkey, rfnlist[i]['md5']) == 1:
self.__rm_pcsfile(rfullpath, True)
sync_op = SYTask.Upload
elif os.path.exists('%s.db.syy' % lfullpath):
with open('%s.db.syy' % lfullpath, 'r') as infoh:
syyinfo = infoh.readline()
if syyinfo.strip('\n') == 'download:%s:%d' % (rfnlist[i]['md5'], rfnlist[i]['size']):
sync_op = SYTask.Download
else:
os.remove('%s.db.syy' % lfullpath)
if rfnlist[i]['mtime'] > fmtime:
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.failcount_increase()
continue
sync_op = SYTask.Download
else:
self.__rm_pcsfile(rfullpath, True)
sync_op = SYTask.Upload
elif rfnlist[i]['mtime'] > fmtime:
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.failcount_increase()
continue
sync_op = SYTask.Download
else:
self.__rm_pcsfile(rfullpath)
rfnlist[i]['mtime'] = 0
sync_op = SYTask.Upload
else:
if self.__check_syncstatus(fnmd5, '*', rmkey, rfnlist[i]['md5']) == 1:
if self.__rm_pcsfile(rfullpath) == 1:
self.failcount_increase()
else:
self.synccount_increase()
del_rfiles += 1
continue
else:
sync_op = SYTask.Download
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(sync_op, lfullpath, fmtime, fsize, fnmd5, rfullpath, rfnlist[i]['mtime'], rfnlist[i]['size'], rfnlist[i]['md5'], 'overwrite')
synctask.start()
except struct.error, e:
printlog('%s ERROR: Struct.pack file mate of "%s" error: %s\n%s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 0, 0
except Exception, e:
printlog('%s ERROR: Sync file "%s" failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 0, 0
if len(rfnlist) < SyncY.config['listnumber']:
break
startidx += SyncY.config['listnumber']
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, startidx + SyncY.config['listnumber'])
if retcode != 0:
self.errorcount_increase()
return 0, 0
for idx in xrange(len(lfnlist)):
lfullpath = '%s/%s' % (ldir, lfnlist[idx])
if lfnlist[idx][0:1] == '.' or self.__check_excludefiles(lfullpath) == 1 or self.__check_pcspath(rdir, lfnlist[idx]) == 1:
continue
rfullpath = '%s/%s' % (rdir, lfnlist[idx])
try:
if __DEBUG__:
printlog('%s Info(%s): Start sync(upload) "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, lfullpath))
if os.path.isdir(lfullpath):
if len(os.listdir(lfullpath)) == 0:
if self.__check_create_pcsdir(rfullpath) != 0:
self.failcount_increase()
else:
l_del, r_del = self.__syncy_sync(lfullpath, rfullpath)
if l_del > 0 and len(os.listdir(lfullpath)) == 0:
os.rmdir(lfullpath)
del_lfiles += 1
elif os.path.isfile(lfullpath):
fmeta = os.stat(lfullpath)
fmtime = int(fmeta.st_mtime)
fsize = fmeta.st_size
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if self.__check_syncstatus(fnmd5, struct.pack('>qq', fmtime, fsize), '*', '*') == 1:
if self.__rm_localfile(lfullpath, True) == 1:
printlog('%s ERROR: Delete local file "%s" failed, skip download "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, rfullpath))
self.failcount_increase()
else:
self.synccount_increase()
del_lfiles += 1
continue
elif os.path.exists('%s.db.syy' % lfullpath):
with open('%s.db.syy' % lfullpath, 'r') as infoh:
syyinfo = infoh.readline()
if syyinfo.strip('\n') != 'upload:%d:%d' % (fmtime, fsize):
if syyinfo[0:6] == 'upload':
os.remove('%s.db.syy' % lfullpath)
else:
os.remove(lfullpath)
os.remove('%s.db.syy' % lfullpath)
continue
if SyncY.TaskSemaphore.acquire():
synctask = SYTask(SYTask.Upload, lfullpath, fmtime, fsize, fnmd5, rfullpath, 0, 0, '', 'overwrite')
synctask.start()
except struct.error, e:
printlog('%s ERROR: Struct.pack file mate of "%s" error: %s\n%s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 0, 0
except Exception, e:
printlog('%s ERROR: Sync file "%s" failed. %s\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), lfullpath, e, traceback.format_exc()))
self.errorcount_increase()
return 0, 0
return del_lfiles, del_rfiles
def __start_sync(self):
self.__get_pcs_quota()
for i in range(len(SyncY.syncpath)):
if 'localpath' not in SyncY.syncpath[str(i)] or 'remotepath' not in SyncY.syncpath[str(i)] or 'synctype' not in SyncY.syncpath[str(i)] or 'enable' not in SyncY.syncpath[str(i)]:
printlog('%s ERROR: The %d\'s of syncpath setting is invalid.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), i + 1))
continue
if SyncY.syncpath[str(i)]['enable'] == '0':
continue
self.reset_counter()
localpath = self.__catpath(SyncY.syncpath[str(i)]['localpath'])
remotepath = self.__catpath(SyncY.pcsroot, SyncY.syncpath[str(i)]['remotepath'])
ipath = ('%s:%s:%s' % (localpath, remotepath, SyncY.syncpath[str(i)]['synctype']))
printlog('%s INFO: Start sync path "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
ckdir = 0
for rdir in remotepath.split('/'):
if re.findall(r'^[\s\.\n].*|.*[/<>\\|\*\?:\"].*|.*[\s\.\n]$', rdir):
ckdir = 1
break
if ckdir != 0:
printlog('%s ERROR: Sync "%s" failed, remote directory error.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
continue
if not (os.path.exists(localpath)):
os.mkdir(localpath)
if os.name == 'posix':
pmeta = os.stat(os.path.dirname(localpath))
os.lchown(localpath, pmeta.st_uid, pmeta.st_gid)
os.chmod(localpath, pmeta.st_mode)
if self.__check_create_pcsdir(remotepath) != 0:
printlog('%s ERROR Sync path: "%s" failed, remote directory is not exists and create directory failed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
continue
if localpath != '' and os.path.isdir(localpath):
SyncY.syncydb = '%s/.syncy.info.db' % localpath
SyncY.basedirlen = len(localpath)
if self.__check_upgrade_syncdata(SyncY.syncydb, localpath, remotepath) != 0:
printlog('%s ERROR: Upgrade sync info data(%s) failed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), SyncY.syncydb))
continue
if SyncY.config['datacache'] == 'on':
self.__init_syncdata()
else:
SyncY.sydblen = os.stat(SyncY.syncydb).st_size
SyncY.sydb = open(SyncY.syncydb, 'rb')
if 'extraslice' in SyncY.syncpath[str(i)] and 'encryption' in SyncY.syncpath[str(i)]:
printlog('%s ERROR Sync path: "%s" failed, extraslice and encryption can not used together.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
continue
if 'encryption' in SyncY.syncpath[str(i)] and SyncY.syncpath[str(i)]['encryption'] != '0' and SyncY.syncpath[str(i)]['encryption'] != '':
if SyncY.syncpath[str(i)]['encryption'].upper() in ['1', '2', '3', 'ARC4', 'BLOWFISH', 'AES']:
if ARC4 is None or Blowfish is None or AES is None:
printlog('%s ERROR Sync path: "%s" failed, this path is set to encryption, but "Crypto.Cipher" library is not import.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
continue
if 'encryptkey' not in SyncY.syncpath[str(i)]:
printlog('%s ERROR Sync path: "%s" failed, encryptkey is not set.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
continue
SyncY.encryption = SyncY.syncpath[str(i)]['encryption'].upper()
if SyncY.encryption == 'ARC4':
SyncY.encryption = '1'
elif SyncY.encryption == 'BLOWFISH':
SyncY.encryption = '2'
elif SyncY.encryption == 'AES':
SyncY.encryption = '3'
SyncY.encryptkey = SyncY.syncpath[str(i)]['encryptkey']
if len(SyncY.encryptkey) < 8 or len(SyncY.encryptkey) > 56:
printlog('%s ERROR Sync path: "%s" failed, encryptkey is invalid, the length of encryptkey must great-than-or-equal 8 and less-than-or-equal-to 56.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
continue
else:
printlog('%s ERROR Sync path: "%s" failed, encryption is invalid, must is ARC4 or BLOWFISH or AES.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
continue
else:
SyncY.encryption = '0'
SyncY.encryptkey = ''
if 'extraslice' in SyncY.syncpath[str(i)] and SyncY.syncpath[str(i)]['extraslice'] == '1':
SyncY.extraslice = True
else:
SyncY.extraslice = False
if SyncY.syncpath[str(i)]['synctype'].lower() in ['0', 'u', 'upload']:
self.__syncy_upload(localpath, remotepath)
elif SyncY.syncpath[str(i)]['synctype'].lower() in ['1', 'u+', 'upload+']:
self.__syncy_uploadplus(localpath, remotepath)
elif SyncY.syncpath[str(i)]['synctype'].lower() in ['2', 'd', 'download']:
self.__syncy_download(localpath, remotepath)
elif SyncY.syncpath[str(i)]['synctype'].lower() in ['3', 'd+', 'download+']:
self.__syncy_downloadplus(localpath, remotepath)
elif SyncY.syncpath[str(i)]['synctype'].lower() in ['4', 's', 'sync']:
self.__syncy_sync(localpath, remotepath)
else:
printlog('%s ERROR: The "synctype" of "%s" is invalid, must set to [0 - 4], skiped.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
continue
if SyncY.config['datacache'] == 'on':
SyncY.syncData = None
else:
SyncY.sydb.close()
while True:
if threading.activeCount() > 1 or len(SyncY.synctask) > 0:
time.sleep(3)
else:
if SyncY.syncpath[str(i)]['synctype'].lower() in ['2', 'd', 'download']:
SyncY.syncytoken['synctotal'] += SyncY.synccount
self.__save_config()
if SyncY.failcount == 0 and SyncY.errorcount == 0:
if SyncY.syncpath[str(i)]['synctype'].lower() not in ['2', 'd', 'download']:
self.__start_compress(SyncY.syncpath[str(i)]['localpath'])
printlog('%s INFO: Sync path "%s" complete, Success sync %d files.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath, SyncY.synccount))
else:
printlog('%s ERROR: Sync path "%s" failed, %d files success, %d files failed, %d errors occurred.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath, SyncY.synccount, SyncY.failcount, SyncY.errorcount))
break
else:
printlog('%s ERROR: Sync path "%s" failed, local directory is not exist or is normal file.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ipath))
self.__get_pcs_quota()
@staticmethod
def __test_chinese(tdir=''):
unicode_str = '\u4e2d\u6587\u8f6c\u7801\u6d4b\u8bd5'
unicode_str = eval('u"%s"' % unicode_str)
unicode_str = unicode_str.encode(__CHARSET__)
with open('%s/%s' % (tdir, unicode_str), 'w') as chnfn:
chnfn.write(unicode_str)
@staticmethod
def __data_convert():
mpath = SyncY.config['syncpath'].split(';')
for i in range(len(mpath)):
if mpath[i] == '':
continue
localdir = mpath[i].split(':')[0:1]
syncydb = '%s/.syncy.info.db' % localdir
if os.path.exists(syncydb):
syncydbtmp = '%s/.syncy.info.db1' % localdir
if os.path.exists(syncydbtmp):
os.remove(syncydbtmp)
with open(syncydb, 'r') as sydb:
syncinfo = sydb.readlines()
if len(syncinfo[0]) > 100 or len(syncinfo[0].split(' ')[0]) != 32:
printlog('%s ERROR: Convert sync data failed "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), mpath[i]))
continue
with open(syncydbtmp, 'wb') as sydbnew:
for j in xrange(len(syncinfo)):
rmd5, lmtime, lsize, lmd5 = syncinfo[j].split(' ')
rmd5 = rmd5.decode('hex')
lmtime = struct.pack('>I', lmtime)
lsize = struct.pack('>I', lsize % 4294967296)
lmd5 = lmd5.decode('hex')
sydbnew.write('%s%s%s%s' % (rmd5, lmtime, lsize, lmd5))
rename(syncydbtmp, syncydb)
def __rebuild(self, mpath):
if len(mpath) == 0:
mpath = range(len(SyncY.syncpath))
for i in mpath:
i = int(i)
if i >= len(SyncY.syncpath):
continue
printlog("%s INFO: Start rebuild sync data for directory '%s'." % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), SyncY.syncpath[str(i)]['localpath']))
localpath = self.__catpath(SyncY.syncpath[str(i)]['localpath'])
remotepath = self.__catpath(SyncY.pcsroot, SyncY.syncpath[str(i)]['remotepath'])
SyncY.basedirlen = len(SyncY.syncpath[str(i)]['localpath'])
SyncY.syncydb = '%s/.syncy.info.db' % SyncY.syncpath[str(i)]['localpath']
if os.path.exists(SyncY.syncydb):
rename(SyncY.syncydb, '%s.bak%s' % (SyncY.syncydb, str(int(time.time()))))
if 'extraslice' in SyncY.syncpath[str(i)] and SyncY.syncpath[str(i)]['extraslice'] == '1':
SyncY.extraslice = True
else:
SyncY.extraslice = False
self.__check_upgrade_syncdata(SyncY.syncydb)
with open(SyncY.syncydb, 'ab') as sydb:
ret = self.__rebuild_data(localpath, remotepath, sydb)
sydb.flush()
os.fsync(sydb.fileno())
if ret == 0:
printlog("%s INFO: Rebuild sync data completed for directory '%s'." % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), SyncY.syncpath[str(i)]['localpath']))
else:
printlog("%s ERROR: Rebuild sync data failed for directory '%s'." % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), SyncY.syncpath[str(i)]['localpath']))
def __rebuild_data(self, localpath, remotepath, sydb):
startidx = 0
retcode, rfnlist = self.__get_pcs_filelist(remotepath, startidx, SyncY.config['listnumber'])
if retcode != 0:
return 1
while retcode == 0:
for i in xrange(len(rfnlist)):
rfullpath = rfnlist[i]['path'].encode(__CHARSET__)
fnname = os.path.basename(rfullpath)
lfullpath = '%s/%s' % (localpath, fnname)
if self.__check_excludefiles(rfullpath) == 1 or self.__check_excludefiles(lfullpath) == 1:
continue
if rfnlist[i]['isdir'] == 1:
ret = self.__rebuild_data(lfullpath, rfullpath, sydb)
if ret != 0:
return 1
elif os.path.exists(lfullpath) and os.path.isfile(lfullpath):
fnstat = os.stat(lfullpath)
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
if SyncY.extraslice or rfnlist[i]['size'] == fnstat.st_size:
sydb.write('%s%s%s' % (fnmd5, struct.pack('>qqqq', int(fnstat.st_mtime), fnstat.st_size, rfnlist[i]['mtime'], rfnlist[i]['size']), rfnlist[i]['md5'].decode('hex')))
if len(rfnlist) < SyncY.config['listnumber']:
break
startidx += SyncY.config['listnumber']
retcode, rfnlist = self.__get_pcs_filelist(remotepath, startidx, startidx + SyncY.config['listnumber'])
if retcode != 0:
return 1
return 0
def __check_upgrade_syncdata(self, dbfile, ldir='', rdir=''):
dbkey = '.syncy.info.db'.ljust(16)
dbver = '2.0'.ljust(8)
dbauthor = 'Author:wishinlife'.ljust(20)
dbweb = 'http://www.syncy.cn'.ljust(20)
if not(os.path.exists(dbfile)):
with open(dbfile, 'wb', 0) as sydb:
sydb.write('%s%s%s%s' % (dbkey, dbver, dbauthor, dbweb))
sydb.flush()
os.fsync(sydb.fileno())
return 0
elif ldir != '' and rdir != '':
with open(dbfile, 'rb') as sydb:
flock(sydb, LOCK_EX)
datarec = sydb.read(40)
if datarec[0:24] == '%s%s' % (dbkey, dbver):
flock(sydb, LOCK_UN)
return 0
syncdata = {}
while datarec:
syncdata[datarec[24:]] = datarec[0:24]
datarec = sydb.read(40)
newdbfile = '%s.v2' % dbfile
printlog('%s INFO: Start upgrade sync info data(%s).' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), dbfile))
with open(newdbfile, 'wb') as sydbnew:
flock(sydbnew, LOCK_EX)
sydbnew.write('%s%s%s%s' % (dbkey, dbver, dbauthor, dbweb))
ret = self.__upgrade_syncdata(ldir, rdir, syncdata, sydbnew)
sydbnew.flush()
os.fsync(sydbnew.fileno())
flock(sydbnew, LOCK_UN)
flock(sydb, LOCK_UN)
if ret == 0:
rename(dbfile, dbfile + '.bak')
rename(newdbfile, dbfile)
printlog('%s INFO: Upgrade sync info data success, backup sync info data to "%s.bak".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), dbfile))
return 0
else:
os.remove(newdbfile)
printlog('%s ERROR: Upgrade sync info data(%s) failed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), dbfile))
return 1
else:
return 1
def __upgrade_syncdata(self, ldir, rdir, syncdata, sydbnew):
startidx = 0
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, SyncY.config['listnumber'])
if retcode != 0 and retcode != 31066:
return 1
lfnlist = []
if os.path.exists(ldir):
lfnlist = os.listdir(ldir)
lfnlist.sort()
while retcode == 0:
for i in xrange(len(rfnlist)):
rfullpath = rfnlist[i]['path'].encode(__CHARSET__)
fnname = os.path.basename(rfullpath)
if self.__check_excludefiles(rfullpath) == 1:
continue
lfullpath = '%s/%s' % (ldir, fnname)
if os.path.exists(lfullpath):
for idx in xrange(len(lfnlist)):
if lfnlist[idx] == fnname:
del lfnlist[idx]
break
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
oldfnmd5 = hashlib.md5('%s\n' % lfullpath[SyncY.basedirlen:]).digest()
if rfnlist[i]['isdir'] == 1:
if os.path.exists(lfullpath) and os.path.isfile(lfullpath):
fmeta = os.stat(lfullpath)
fmtime = int(fmeta.st_mtime)
fsize = fmeta.st_size
if oldfnmd5 not in syncdata:
if self.__upgrade_syncdata(lfullpath, rfullpath, syncdata, sydbnew) != 0:
return 1
elif struct.pack('>II', fmtime, fsize % 4294967296) == syncdata[oldfnmd5][16:24]:
sydbnew.write('%s%s%s' % (fnmd5, struct.pack('>qqqq', fmtime, fsize, fmtime, fsize), syncdata[oldfnmd5][0:16]))
else:
if self.__upgrade_syncdata(lfullpath, rfullpath, syncdata, sydbnew) != 0:
return 1
else:
if not os.path.exists(lfullpath):
if oldfnmd5 in syncdata and rfnlist[i]['md5'].decode('hex') == syncdata[oldfnmd5][0:16]:
sydbnew.write('%s%s%s' % (fnmd5, struct.pack('>qqqq', struct.unpack('>I', syncdata[oldfnmd5][16:20])[0], struct.unpack('>I', syncdata[oldfnmd5][20:24])[0], rfnlist[i]['mtime'], rfnlist[i]['size']), syncdata[0:16]))
elif os.path.exists(lfullpath) and os.path.isdir(lfullpath):
if oldfnmd5 not in syncdata:
if self.__upgrade_syncdata(lfullpath, rfullpath, syncdata, sydbnew) != 0:
return 1
elif rfnlist[i]['md5'].decode('hex') == syncdata[oldfnmd5][0:16]:
sydbnew.write('%s%s%s' % (fnmd5, struct.pack('>qqqq', struct.unpack('>I', syncdata[oldfnmd5][16:20])[0], struct.unpack('>I', syncdata[oldfnmd5][20:24])[0], rfnlist[i]['mtime'], rfnlist[i]['size']), syncdata[0:16]))
elif oldfnmd5 in syncdata:
fmeta = os.stat(lfullpath)
fmtime = int(fmeta.st_mtime)
fsize = fmeta.st_size
if struct.pack('>II', fmtime, fsize % 4294967296) == syncdata[oldfnmd5][16:24] and rfnlist[i]['md5'].decode('hex') == syncdata[oldfnmd5][0:16]:
sydbnew.write('%s%s%s' % (fnmd5, struct.pack('>qqqq', fmtime, fsize, rfnlist[i]['mtime'], rfnlist[i]['size']), syncdata[oldfnmd5][0:16]))
elif rfnlist[i]['md5'].decode('hex') == syncdata[oldfnmd5][0:16]:
sydbnew.write('%s%s%s' % (fnmd5, struct.pack('>qqqq', struct.unpack('>I', syncdata[oldfnmd5][16:20])[0], struct.unpack('>I', syncdata[oldfnmd5][20:24])[0], rfnlist[i]['mtime'], rfnlist[i]['size']), syncdata[0:16]))
elif struct.pack('>II', fmtime, fsize % 4294967296) == syncdata[oldfnmd5][16:24]:
sydbnew.write('%s%s%s' % (fnmd5, struct.pack('>qqqq', fmtime, fsize, struct.unpack('>I', syncdata[oldfnmd5][16:20])[0], struct.unpack('>I', syncdata[oldfnmd5][20:24])[0]), syncdata[0:16]))
if oldfnmd5 in syncdata:
del syncdata[oldfnmd5]
if len(rfnlist) < SyncY.config['listnumber']:
break
startidx += SyncY.config['listnumber']
retcode, rfnlist = self.__get_pcs_filelist(rdir, startidx, startidx + SyncY.config['listnumber'])
if retcode != 0:
return 1
for idx in xrange(len(lfnlist)):
lfullpath = '%s/%s' % (ldir, lfnlist[idx])
if lfnlist[idx][0:1] == '.' or self.__check_excludefiles(lfullpath) == 1:
continue
rfullpath = '%s/%s' % (rdir, lfnlist[idx])
if os.path.isdir(lfullpath):
if self.__upgrade_syncdata(lfullpath, rfullpath, syncdata, sydbnew) != 0:
return 1
elif os.path.isfile(lfullpath):
fmeta = os.stat(lfullpath)
fmtime = int(fmeta.st_mtime)
fsize = fmeta.st_size
fnmd5 = hashlib.md5(lfullpath[SyncY.basedirlen:]).digest()
oldfnmd5 = hashlib.md5('%s\n' % lfullpath[SyncY.basedirlen:]).digest()
if oldfnmd5 not in syncdata:
continue
elif struct.pack('>II', fmtime, fsize % 4294967296) == syncdata[oldfnmd5][16:24]:
sydbnew.write('%s%s%s' % (fnmd5, struct.pack('>qqqq', fmtime, fsize, fmtime, fsize), syncdata[oldfnmd5][0:16]))
del syncdata[oldfnmd5]
return 0
def start(self):
if len(self.__argv) == 0:
if SyncY.config['syncperiod'] == '':
self.__start_sync()
else:
starthour, endhour = SyncY.config['syncperiod'].split('-', 1)
curhour = time.localtime().tm_hour
starthour = int(starthour)
endhour = int(endhour)
while True:
if (endhour > starthour and starthour <= curhour < endhour) or (endhour < starthour and (curhour < starthour or curhour >= endhour)):
self.__start_sync()
self.__check_expires()
time.sleep(SyncY.config['syncinterval'])
else:
time.sleep(300)
curhour = time.localtime().tm_hour
elif self.__argv[0] == 'compress':
self.__start_compress()
elif self.__argv[0] == 'convert':
self.__data_convert()
elif self.__argv[0] == 'testchinese':
self.__test_chinese(self.__argv[1])
elif self.__argv[0] == 'rebuild':
self.__rebuild(self.__argv[1:])
elif os.path.isfile(self.__argv[0]):
fname = os.path.basename(self.__argv[0])
if len(self.__argv) == 2:
pcsdir = self.__catpath(SyncY.pcsroot, self.__argv[1])
else:
pcsdir = SyncY.pcsroot
if self.__check_pcspath(pcsdir, fname) == 0:
self.__upload_file_nosync(self.__argv[0], self.__catpath(pcsdir, fname))
elif not (self.__argv[0] in ["sybind", "cpbind"]):
printlog('%s WARNING: Unknown command "%s"' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), ' '.join(self.__argv)))
class SYCurl:
Normal = 0
Upload = 1
Download = 2
def __init__(self):
self.__response = ''
self.__op = None
self.__fd = None
self.__startpos = 0
self.__endpos = None
self.__buffer = ''
@staticmethod
def __init_cipher(crypt, key):
if crypt == '1':
return ARC4.new(key)
elif crypt == '2':
return Blowfish.new(key, Blowfish.MODE_CFB, segment_size=8)
elif crypt == '3':
return AES.new(key.ljust(32, '.')[0:32], AES.MODE_CFB, segment_size=8)
else:
return None
def __write_data(self, rsp):
if self.__op == SYCurl.Download:
if self.__startpos + len(self.__buffer) + len(rsp) - 1 > self.__endpos:
return 0
if SyncY.encryption == '0':
self.__fd.write(rsp)
self.__startpos += len(rsp)
else:
self.__buffer += rsp
while len(self.__buffer) >= 4096 or self.__startpos + len(self.__buffer) - 1 == self.__endpos:
cipher = self.__init_cipher(SyncY.encryption, SyncY.encryptkey)
self.__fd.write(cipher.decrypt(self.__buffer[0:4096]))
self.__startpos += 4096
self.__buffer = self.__buffer[4096:]
else:
self.__response += rsp
return len(rsp)
def __read_data(self, size):
if self.__startpos > self.__endpos:
return ''
elif self.__startpos + size - 1 > self.__endpos:
size = self.__endpos - self.__startpos + 1
if SyncY.encryption == '0':
self.__startpos += size
return self.__fd.read(size)
else:
while len(self.__buffer) < size:
rst = self.__fd.read(4096)
if rst:
cipher = self.__init_cipher(SyncY.encryption, SyncY.encryptkey)
self.__buffer += cipher.encrypt(rst)
else:
break
rst = self.__buffer[0:size]
self.__buffer = self.__buffer[size:]
self.__startpos += size
return rst
@staticmethod
def __write_header(rsp):
return len(rsp)
def request(self, url, querydata, rdata, method='POST', rtype=0, fnname=''):
retrycnt = -1
self.__op = rtype
while retrycnt < SyncY.config['retrytimes']:
retrycnt += 1
if __DEBUG__:
printlog('%s Info(%s): Start curl request(%s) %d times for %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, rdata, retrycnt, fnname))
if self.__op != SYCurl.Normal:
startpos, self.__endpos = rdata.split('-', 1)
startpos = self.__startpos = int(startpos)
self.__endpos = int(self.__endpos)
self.__response = ''
curl = pycurl.Curl()
try:
if querydata:
if 'path' in querydata:
querydata['path'] = querydata['path'].encode('UTF-8')
url += '?%s' % urlencode(querydata)
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.CONNECTTIMEOUT, 15)
curl.setopt(pycurl.LOW_SPEED_LIMIT, 1)
curl.setopt(pycurl.LOW_SPEED_TIME, 30)
curl.setopt(pycurl.USERAGENT, '')
curl.setopt(pycurl.HEADER, 0)
curl.setopt(pycurl.NOSIGNAL, 1)
curl.setopt(pycurl.WRITEFUNCTION, self.__write_data)
starthour, endhour = SyncY.config['speedlimitperiod'].split('-', 1)
starthour = int(starthour)
endhour = int(endhour)
curhour = time.localtime().tm_hour
if (endhour > starthour and starthour <= curhour < endhour) or (endhour < starthour and (curhour < starthour or curhour >= endhour)):
curl.setopt(pycurl.MAX_SEND_SPEED_LARGE, SyncY.config['maxsendspeed'] / SyncY.config['tasknumber'] / SyncY.config['threadnumber'])
curl.setopt(pycurl.MAX_RECV_SPEED_LARGE, SyncY.config['maxrecvspeed'] / SyncY.config['tasknumber'] / SyncY.config['threadnumber'])
if self.__op == SYCurl.Upload:
curl.setopt(pycurl.UPLOAD, 1)
curl.setopt(pycurl.READFUNCTION, self.__read_data)
curl.setopt(pycurl.INFILESIZE, self.__endpos - startpos + 1)
with open(fnname, 'rb') as self.__fd:
self.__fd.seek(startpos)
flock(self.__fd, LOCK_SH)
curl.perform()
flock(self.__fd, LOCK_UN)
elif self.__op == SYCurl.Download:
curl.setopt(pycurl.RANGE, rdata)
with open(fnname, 'rb+') as self.__fd:
self.__fd.seek(startpos)
lockf(self.__fd, LOCK_EX, self.__endpos - startpos + 1, startpos, 0)
curl.perform()
self.__fd.flush()
os.fsync(self.__fd.fileno())
lockf(self.__fd, LOCK_UN, self.__endpos - startpos + 1, startpos, 0)
else:
curl.setopt(pycurl.CUSTOMREQUEST, method)
if method == 'POST':
curl.setopt(pycurl.POSTFIELDS, urlencode(rdata))
curl.perform()
retcode = curl.getinfo(pycurl.HTTP_CODE)
if retcode < 400 or retcode == 404 or retrycnt == SyncY.config['retrytimes']:
if retcode != 200 and retcode != 206 and self.__response == '':
self.__response = '{"error_code":%d,"error_msg":"Returned by the server is not in the expected results."}' % retcode
return retcode, self.__response
else:
time.sleep(SyncY.config['retrydelay'])
except pycurl.error, error:
errno, errstr = error
if retrycnt == SyncY.config['retrytimes']:
return errno, '{"error_code":%d,"error_msg":"%s"}' % (errno, errstr)
except Exception, e:
return -1, '{"error_code":%d,"error_msg":"%s"}' % (-1, traceback.format_exc().replace('\n', '\\n').replace('"', '\''))
finally:
curl.close()
if __DEBUG__:
printlog('%s Info(%s): Complete curl request(%s) %d times for %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), threading.currentThread().name, rdata, retrycnt, fnname))
class ThreadTest(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, name='ThreadTest')
def run(self):
print('%s INFO: Multi thread test success.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
class SYTask(threading.Thread):
Upload = 1
Download = 2
def __init__(self, syncoperation, filepath, fmtime, fsize, fnmd5, pcspath, rmtime, rsize, rmd5, ondup):
threading.Thread.__init__(self)
self.__op = syncoperation
self.__filepath = filepath
self.__fmtime = fmtime
self.__fsize = fsize
self.__fnmd5 = fnmd5
self.__pcspath = pcspath
self.__rmtime = rmtime
self.__rsize = rsize
self.__rmd5 = rmd5
self.__ondup = ondup
SyncY.synctask[self.__fnmd5] = []
self.__blocksize = SyncY.config['blocksize'] * 1048576
if self.__op == self.Upload:
blocksize = int(self.__fsize / 1022) + 1
if blocksize > self.__blocksize:
self.__blocksize = (0x100000 - (blocksize & 0xfffff) & 0xfffff) + blocksize
def run(self):
if __DEBUG__:
printlog('%s Info(%s): start run task(op:%s) for %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, str(self.__op), self.__filepath))
try:
ret = 1
if self.__op == SYCurl.Upload:
if os.path.exists(self.__filepath + '.db.syy') or (SyncY.extraslice and self.__fsize > 0):
ret = self.__slice_uploadfile()
else:
if self.__fsize <= 262144:
ret = self.__upload_file()
elif SyncY.encryption != '0':
if self.__fsize <= self.__blocksize + 1048576:
ret = self.__upload_file()
else:
ret = self.__slice_uploadfile()
else:
ret = self.__rapid_uploadfile()
elif self.__op == SYCurl.Download:
ret = self.__download_file()
else:
printlog('%s ERROR: Unknown sync operation(%s) of threading operation.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__op))
SyncY.errorcount_increase()
if ret == 0:
SyncY.synccount_increase()
else:
SyncY.failcount_increase()
except Exception, e:
printlog('%s ERROR: Transfer task exception error occurred: %s .\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), e, traceback.format_exc()))
SyncY.failcount_increase()
finally:
del SyncY.synctask[self.__fnmd5]
SyncY.TaskSemaphore.release()
if __DEBUG__:
printlog('%s Info(%s): exit task(op:%s) for %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, str(self.__op), self.__filepath))
def __create_emptyfile(self):
with open('%s.syy' % self.__filepath, 'wb') as f:
try:
if self.__rsize > 0:
f.seek(self.__rsize - 1)
f.write('\0')
f.flush()
os.fsync(f.fileno())
except Exception, e:
printlog('%s ERROR: Create file "%s" failed. Exception: "%s".\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath, e, traceback.format_exc()))
return 1
return 0
@staticmethod
def __create_random_tmpfile(tmpfile, orig_filesize):
try:
randomsize = random.randint(16, 160)
with open(tmpfile, 'wb') as f:
for i in xrange(randomsize):
f.write(''.join(random.sample('zyxwvutsrqponmlkjihgfedcbaABCDEFGHIJKLMNOPQRSTUVWXYZ ,./;\'[]\\=-0987654321`~!@$%^&*()_+|}{":?><', 64)))
extrastr = '#syy#0#%d#%d' % (orig_filesize, randomsize * 64)
f.write(extrastr)
f.flush()
os.fsync(f.fileno())
return randomsize * 64 + len(extrastr)
except Exception, e:
printlog('%s ERROR: Create extra tmpfile "%s" failed. Exception: "%s".\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), tmpfile, e, traceback.format_exc()))
return 0
def __save_data(self):
with open(SyncY.syncydb, 'ab', 0) as sydb:
try:
flock(sydb, LOCK_EX)
rmd5 = self.__rmd5.decode('hex')
sydb.write('%s%s%s' % (self.__fnmd5, struct.pack('>qqqq', self.__fmtime, self.__fsize, self.__rmtime, self.__rsize), rmd5))
sydb.flush()
os.fsync(sydb.fileno())
flock(sydb, LOCK_UN)
except Exception, e:
printlog('%s ERROR: Save sync data failed (%s).\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), e, traceback.format_exc()))
def __md5sum(self):
with open(self.__filepath, 'rb') as fh:
m = hashlib.md5()
fbuffer = fh.read(8192)
while fbuffer:
m.update(fbuffer)
fbuffer = fh.read(8192)
cmd5 = m.hexdigest()
return cmd5
def __rapid_checkcode(self):
with open(self.__filepath, 'rb') as fh:
m = hashlib.md5()
fbuffer = fh.read(8192)
crc = 0
while fbuffer:
m.update(fbuffer)
crc = binascii.crc32(fbuffer, crc) & 0xffffffff
fbuffer = fh.read(8192)
cmd5 = m.hexdigest()
m = hashlib.md5()
fh.seek(0)
for i in range(32):
fbuffer = fh.read(8192)
m.update(fbuffer)
return '%x' % crc, cmd5, m.hexdigest()
def __upload_file(self):
if __DEBUG__:
printlog('%s Info(%s): start upload whole file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, self.__filepath))
sycurl = SYCurl()
retcode, responses = sycurl.request('https://c.pcs.baidu.com/rest/2.0/pcs/file', {'method': 'upload', 'access_token': SyncY.syncytoken['access_token'], 'path': self.__pcspath, 'ondup': self.__ondup}, '0-%d' % (os.stat(self.__filepath).st_size - 1), 'POST', SYCurl.Upload, self.__filepath)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
printlog('%s ERROR(Errno:%d): Upload file "%s" to PCS failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, self.__filepath, responses['error_msg']))
return 1
if responses['size'] == self.__fsize:
self.__rmd5 = responses['md5']
self.__rmtime = responses['mtime']
self.__rsize = responses['size']
else:
printlog('%s ERROR: Upload file "%s" failed, remote file size not equal to local.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'delete', 'access_token': SyncY.syncytoken['access_token'], 'path': self.__pcspath}, '', 'POST', SYCurl.Normal)
return 1
self.__save_data()
printlog('%s INFO: Upload file "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
return 0
def __rapid_uploadfile(self):
if __DEBUG__:
printlog('%s Info(%s): start rapid upload file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, self.__filepath))
crc, contentmd5, slicemd5 = self.__rapid_checkcode()
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'rapidupload', 'access_token': SyncY.syncytoken['access_token'], 'path': self.__pcspath, 'content-length': self.__fsize, 'content-md5': contentmd5, 'slice-md5': slicemd5, 'content-crc32': crc, 'ondup': self.__ondup}, '', 'POST', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
if responses['error_code'] == 31079:
printlog('%s INFO: File md5 not found, upload the whole file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
if self.__fsize <= self.__blocksize + 1048576:
return self.__upload_file()
else:
return self.__slice_uploadfile()
else:
printlog('%s ERROR(Errno:%d): Rapid upload file "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, self.__filepath, responses['error_msg']))
return 1
else:
self.__rmd5 = responses['md5']
time.sleep(1)
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'meta', 'access_token': SyncY.syncytoken['access_token'], 'path': self.__pcspath}, '', 'GET', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
printlog('%s ERROR(Errno:%d): File "%s" is rapid uploaded, but get remote file\'s mate failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, self.__filepath, responses['error_msg']))
return 1
responses = responses['list'][0]
if responses['size'] == self.__fsize:
self.__rmtime = responses['mtime']
self.__rsize = responses['size']
else:
printlog('%s ERROR: File "%s" is rapid uploaded, but remote file size not equal to local.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
return 1
self.__save_data()
printlog('%s INFO: Rapid upload file "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
return 0
def __slice_uploadfile(self):
if __DEBUG__:
printlog('%s Info(%s): start slice upload file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, self.__filepath))
if (self.__fsize > 20 * 1073741824 and not SyncY.extraslice) or (SyncY.extraslice and self.__fsize > 20 * 1073741824 - 10340):
printlog('%s ERROR: File "%s" size exceeds limit, max size must equal or less than 20G.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
return 1
if not os.path.exists('%s.db.syy' % self.__filepath):
with open('%s.db.syy' % self.__filepath, 'w') as ulfn:
ulfn.write('upload:%d:%d:%s\n' % (self.__fmtime, self.__fsize, SyncY.encryption))
SyncY.synctask[self.__fnmd5].append(['upload', self.__fmtime, self.__fsize, SyncY.encryption])
else:
with open('%s.db.syy' % self.__filepath, 'r') as ulfn:
ulinfo = ulfn.readline()
if ulinfo.strip('\n') != 'upload:%d:%d:%s' % (self.__fmtime, self.__fsize, SyncY.encryption):
with open('%s.db.syy' % self.__filepath, 'w') as ulfn:
ulfn.write('upload:%d:%d:%s\n' % (self.__fmtime, self.__fsize, SyncY.encryption))
printlog('%s INFO: Local file "%s" is modified or encryption algorithm is modified, reupload the whole file.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
SyncY.synctask[self.__fnmd5].append(['upload', self.__fmtime, self.__fsize, SyncY.encryption])
else:
with open('%s.db.syy' % self.__filepath, 'r') as ulfn:
SyncY.synctask[self.__fnmd5].append(ulfn.readline().strip('\n').split(':'))
SyncY.synctask[self.__fnmd5][0][2] = int(SyncY.synctask[self.__fnmd5][0][2])
ulinfo = ulfn.readline()
while ulinfo:
sliceinfo = ulinfo.strip('\n').split(':')[1:]
if sliceinfo and sliceinfo[2] == '0':
sliceinfo[2] = 2
SyncY.synctask[self.__fnmd5].append([int(sliceinfo[0]), int(sliceinfo[1]), int(sliceinfo[2]), sliceinfo[3]])
ulinfo = ulfn.readline()
printlog('%s INFO: Resuming slice upload file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
threadcond = threading.Condition()
if threadcond.acquire():
maxthnum = int(self.__fsize / self.__blocksize)
if SyncY.extraslice and maxthnum == 0:
maxthnum = 1
elif maxthnum > SyncY.config['threadnumber']:
maxthnum = SyncY.config['threadnumber']
SyncY.synctask[self.__fnmd5][0].append(maxthnum)
SyncY.synctask[self.__fnmd5][0].append([])
for i in range(maxthnum):
sythread = SYThread(threadcond, self.__fnmd5, self.__filepath, self.__pcspath, self.__blocksize)
sythread.start()
if SyncY.synctask[self.__fnmd5][0][4] > 0:
threadcond.wait()
threadcond.release()
if __DEBUG__:
printlog('%s Info(%s): all threads is exit for upload file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, self.__filepath))
if len(SyncY.synctask[self.__fnmd5][0][5]) > 0:
printlog('%s ERROR: Slice upload file "%s" failed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
return 1
param = {'block_list': []}
for i in xrange(1, len(SyncY.synctask[self.__fnmd5]), 1):
param['block_list'].append(SyncY.synctask[self.__fnmd5][i][3])
tmpsize = 0
if SyncY.extraslice:
tmpfile = '%s/syncy-%s-extraslice.tmp' % (__TMP_DIR__, self.name)
tmpsize = self.__create_random_tmpfile(tmpfile, self.__fsize)
if tmpsize > 0:
sycurl = SYCurl()
retcode, responses = sycurl.request('https://c.pcs.baidu.com/rest/2.0/pcs/file', {'method': 'upload', 'access_token': SyncY.syncytoken['access_token'], 'type': 'tmpfile'}, '%d-%d' % (0, tmpsize - 1), 'POST', SYCurl.Upload, tmpfile)
os.remove(tmpfile)
responses = json.loads(responses)
if retcode == 200:
param['block_list'].append(responses['md5'])
else:
printlog('%s ERROR(Errno:%d): Upload file "%s"\'s extra slice failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, self.__filepath, responses['error_msg']))
return 1
else:
printlog('%s ERROR: Upload file "%s"\'s extra slice failed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
return 1
sycurl = SYCurl()
retcode, responses = sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'createsuperfile', 'access_token': SyncY.syncytoken['access_token'], 'path': self.__pcspath, 'ondup': self.__ondup}, {'param': json.dumps(param)}, 'POST', SYCurl.Normal)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
printlog('%s ERROR(Errno:%d): Create superfile "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, self.__filepath, responses['error_msg']))
return 1
os.remove('%s.db.syy' % self.__filepath)
if responses['size'] == self.__fsize + tmpsize:
self.__rmd5 = responses['md5']
self.__rmtime = responses['mtime']
self.__rsize = responses['size']
else:
printlog('%s ERROR: Slice upload file "%s" failed, remote file size not equal to local.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
sycurl.request('https://pcs.baidu.com/rest/2.0/pcs/file', {'method': 'delete', 'access_token': SyncY.syncytoken['access_token'], 'path': self.__pcspath}, '', 'POST', SYCurl.Normal)
return 1
self.__save_data()
printlog('%s INFO: Slice upload file "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
return 0
def __download_file(self):
if __DEBUG__:
printlog('%s Info(%s): start download file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, self.__filepath))
if os.path.exists('%s.db.syy' % self.__filepath) and os.path.exists('%s.syy' % self.__filepath):
with open('%s.db.syy' % self.__filepath, 'r') as dlfn:
dlinfo = dlfn.readline()
if dlinfo.strip('\n') != 'download:%s:%d:%s' % (self.__rmd5, self.__rsize, SyncY.encryption):
with open('%s.db.syy' % self.__filepath, 'w') as dlfn:
dlfn.write('download:%s:%d:%s\n' % (self.__rmd5, self.__rsize, SyncY.encryption))
printlog('%s INFO: Remote file:"%s" is modified or encryption algorithm is modified, redownload the whole file.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__pcspath))
os.remove(self.__filepath)
else:
if os.path.exists('%s.syy' % self.__filepath):
printlog('%s INFO: Resuming download file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__pcspath))
else:
with open('%s.db.syy' % self.__filepath, 'w') as dlfn:
dlfn.write('download:%s:%d:%s\n' % (self.__rmd5, self.__rsize, SyncY.encryption))
else:
with open('%s.db.syy' % self.__filepath, 'w') as dlfn:
dlfn.write('download:%s:%d:%s\n' % (self.__rmd5, self.__rsize, SyncY.encryption))
if not os.path.exists('%s.syy' % self.__filepath) and self.__create_emptyfile() == 1:
return 1
if self.__rsize > 0:
if self.__rsize <= self.__blocksize + 1048576:
if __DEBUG__:
printlog('%s Info(%s): start download whole file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, self.__filepath))
sycurl = SYCurl()
retcode, responses = sycurl.request('https://d.pcs.baidu.com/rest/2.0/pcs/file', {'method': 'download', 'access_token': SyncY.syncytoken['access_token'], 'path': self.__pcspath}, '0-%d' % (self.__rsize - 1), 'GET', SYCurl.Download, '%s.syy' % self.__filepath)
if (retcode != 200 and retcode != 206) or responses != '':
if __DEBUG__:
printlog('%s Info(%s): download file "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, self.__filepath, responses))
responses = json.loads(responses)
printlog('%s ERROR(Errno:%d): Download file "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, self.__pcspath, responses['error_msg']))
return 1
else:
with open('%s.db.syy' % self.__filepath, 'r') as dlfn:
SyncY.synctask[self.__fnmd5].append(dlfn.readline().strip('\n').split(':'))
SyncY.synctask[self.__fnmd5][0][2] = int(SyncY.synctask[self.__fnmd5][0][2])
dlinfo = dlfn.readline()
while dlinfo:
sliceinfo = dlinfo.strip('\n').split(':')[1:]
if sliceinfo and sliceinfo[2] == '0':
sliceinfo[2] = 2
SyncY.synctask[self.__fnmd5].append([int(sliceinfo[0]), int(sliceinfo[1]), int(sliceinfo[2]), sliceinfo[3]])
dlinfo = dlfn.readline()
threadcond = threading.Condition()
if threadcond.acquire():
maxthnum = int(self.__rsize / self.__blocksize)
if maxthnum > SyncY.config['threadnumber']:
maxthnum = SyncY.config['threadnumber']
SyncY.synctask[self.__fnmd5][0].append(maxthnum)
SyncY.synctask[self.__fnmd5][0].append([])
for i in range(maxthnum):
sythread = SYThread(threadcond, self.__fnmd5, self.__filepath, self.__pcspath, self.__blocksize)
sythread.start()
if SyncY.synctask[self.__fnmd5][0][4] > 0:
threadcond.wait()
threadcond.release()
if __DEBUG__:
printlog('%s Info(%s): all threads is exit for download file "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, self.__filepath))
if len(SyncY.synctask[self.__fnmd5][0][5]) > 0:
printlog('%s ERROR: Download file "%s" failed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__pcspath))
return 1
if int(SyncY.synctask[self.__fnmd5][len(SyncY.synctask[self.__fnmd5]) - 1][1]) != self.__rsize - 1:
printlog('%s ERROR: Download file "%s" failed, not download all slice.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__pcspath))
return 1
os.remove('%s.db.syy' % self.__filepath)
if os.name == 'posix':
pmeta = os.stat(os.path.dirname('%s.syy' % self.__filepath))
os.lchown('%s.syy' % self.__filepath, pmeta.st_uid, pmeta.st_gid)
os.chmod('%s.syy' % self.__filepath, pmeta.st_mode - stat.S_IXUSR - stat.S_IXGRP - stat.S_IXOTH)
fmeta = os.stat('%s.syy' % self.__filepath)
if fmeta.st_size != self.__rsize:
printlog('%s ERROR: Download file "%s" failed, downloaded file size not equal to remote file size.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__pcspath))
os.remove('%s.syy' % self.__filepath)
return 1
if SyncY.extraslice:
with open('%s.syy' % self.__filepath, 'rb+') as f:
if self.__rsize > 50:
f.seek(self.__rsize - 36)
extraslice = f.read()
if extraslice:
extraslice = extraslice.split('#')
if len(extraslice) == 5 and extraslice[1] == 'syy':
if self.__rsize == int(extraslice[3]) + int(extraslice[4]) + len(extraslice[3]) + len(extraslice[4]) + 8:
f.seek(int(extraslice[3]))
f.truncate(int(extraslice[3]))
else:
printlog('%s ERROR: Remove file "%s"\'s extra slice failed, extra slice valid failed, please manual check and remove extra slice.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath))
if self.__rmtime != 0:
os.utime('%s.syy' % self.__filepath, (self.__rmtime, self.__rmtime))
self.__fmtime = self.__rmtime
self.__fsize = fmeta.st_size
rename('%s.syy' % self.__filepath, self.__filepath)
self.__save_data()
printlog('%s INFO: Download file "%s" completed.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__pcspath))
return 0
class SYThread(threading.Thread):
def __init__(self, threadcond, fnmd5, filepath, pcspath, blocksize):
threading.Thread.__init__(self)
self.__threadcond = threadcond
self.__fnmd5 = fnmd5
self.__filepath = filepath
self.__pcspath = pcspath
self.__blocksize = blocksize
def run(self):
if __DEBUG__:
printlog('%s Info(%s): start thread for %s: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, SyncY.synctask[self.__fnmd5][0][0], self.__filepath))
idx = 0
try:
if self.__threadcond.acquire():
idx, startpos, endpos = self.__get_next_slice()
self.__save_status()
self.__threadcond.release()
retcode = 0
responses = None
sycurl = SYCurl()
while True:
if idx == 0:
return 0
elif idx == -1:
return 1
if SyncY.synctask[self.__fnmd5][0][0] == 'upload':
if __DEBUG__:
printlog('%s Info(%s): Start upload slice(idx:%d) for "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, idx, self.__filepath))
retcode, responses = sycurl.request('https://c.pcs.baidu.com/rest/2.0/pcs/file', {'method': 'upload', 'access_token': SyncY.syncytoken['access_token'], 'type': 'tmpfile'}, '%d-%d' % (startpos, endpos), 'POST', SYCurl.Upload, self.__filepath)
responses = json.loads(responses)
if retcode != 200 or 'error_code' in responses:
printlog('%s ERROR(Errno:%d): Slice upload file "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, self.__filepath, responses['error_msg']))
status = 2
else:
status = 1
if __DEBUG__:
printlog('%s Info(%s): Complete upload slice(idx:%d) for "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, idx, self.__filepath))
elif SyncY.synctask[self.__fnmd5][0][0] == 'download':
if __DEBUG__:
printlog('%s Info(%s): Start download slice(idx:%d) for "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, idx, self.__filepath))
retcode, responses = sycurl.request('https://d.pcs.baidu.com/rest/2.0/pcs/file', {'method': 'download', 'access_token': SyncY.syncytoken['access_token'], 'path': self.__pcspath}, '%d-%d' % (startpos, endpos), 'GET', SYCurl.Download, '%s.syy' % self.__filepath)
if (retcode != 200 and retcode != 206) or responses != '':
if __DEBUG__:
printlog('%s Info(%s): Slice download(idx:%d) for "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, idx, self.__filepath, responses))
responses = json.loads(responses)
printlog('%s ERROR(Errno:%d): Slice download file "%s" failed: %s.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, self.__pcspath, responses['error_msg']))
status = 2
else:
status = 1
if __DEBUG__:
printlog('%s Info(%s): Complete download slice(idx:%d) for "%s".' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.name, idx, self.__filepath))
else:
printlog('%s ERROR: Unknown operation(%s) of threading operation.' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), SyncY.synctask[self.__fnmd5][0][0]))
return 1
if self.__threadcond.acquire():
SyncY.synctask[self.__fnmd5][idx][2] = status
if status == 1 and SyncY.synctask[self.__fnmd5][0][0] == 'upload':
SyncY.synctask[self.__fnmd5][idx][3] = responses['md5']
elif status == 2:
SyncY.synctask[self.__fnmd5][0][5].append(idx)
idx, startpos, endpos = self.__get_next_slice()
self.__save_status()
self.__threadcond.release()
if len(SyncY.synctask[self.__fnmd5][0][5]) > SyncY.config['threadnumber'] * 4:
return 1
retcode = 0
responses = None
except Exception, e:
printlog('%s ERROR: Transfer thread exception error occurred. return code: %d, response body: %s.\n%s .\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), retcode, str(responses), e, traceback.format_exc()))
finally:
if self.__threadcond.acquire():
if idx > 0:
SyncY.synctask[self.__fnmd5][idx][2] = 2
SyncY.synctask[self.__fnmd5][0][5].append(idx)
self.__save_status()
SyncY.synctask[self.__fnmd5][0][4] -= 1
if SyncY.synctask[self.__fnmd5][0][4] == 0:
self.__threadcond.notify()
self.__threadcond.release()
def __save_status(self):
try:
with open('%s.dbtmp.syy' % self.__filepath, 'w') as dbnew:
dbnew.write('%s:%s:%d:%s\n' % (SyncY.synctask[self.__fnmd5][0][0], SyncY.synctask[self.__fnmd5][0][1], SyncY.synctask[self.__fnmd5][0][2], SyncY.synctask[self.__fnmd5][0][3]))
for i in xrange(1, len(SyncY.synctask[self.__fnmd5]), 1):
dbnew.write('%d:%d:%d:%d:%s\n' % (i, SyncY.synctask[self.__fnmd5][i][0], SyncY.synctask[self.__fnmd5][i][1], SyncY.synctask[self.__fnmd5][i][2], SyncY.synctask[self.__fnmd5][i][3]))
dbnew.flush()
os.fsync(dbnew.fileno())
rename('%s.dbtmp.syy' % self.__filepath, '%s.db.syy' % self.__filepath)
except Exception, e:
printlog('%s ERROR: Exception error occurred on save task status(%s). \n%s .\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath, e, traceback.format_exc()))
def __save_status2(self, idx, startpos=0, endpos=0, status=0, rmd5='0' * 32):
with open('%s.db.syy' % self.__filepath, 'wb') as dbfile:
if idx < 1:
return
dbfile.seek(idx * 35)
dbfile.write(struct.pack('>H2qB16s', idx, startpos, endpos, status, rmd5.decode('hex')))
dbfile.flush()
os.fsync(dbfile.fileno())
dbfile.close()
def __get_next_slice(self):
try:
idx, startpos, endpos = (0, 0, 0)
for i in xrange(1, len(SyncY.synctask[self.__fnmd5]), 1):
if SyncY.synctask[self.__fnmd5][i][2] not in [1, 0] and i not in SyncY.synctask[self.__fnmd5][0][5]:
idx = i
startpos = SyncY.synctask[self.__fnmd5][i][0]
endpos = SyncY.synctask[self.__fnmd5][i][1]
SyncY.synctask[self.__fnmd5][i][2] = 0
break
if idx == 0:
idx = len(SyncY.synctask[self.__fnmd5])
if idx == 1:
startpos = 0
else:
startpos = SyncY.synctask[self.__fnmd5][idx - 1][1] + 1
filesize = SyncY.synctask[self.__fnmd5][0][2]
if startpos == filesize:
return 0, 0, 0
elif filesize - startpos > self.__blocksize + 1048576:
endpos = startpos + self.__blocksize - 1
else:
endpos = filesize - 1
SyncY.synctask[self.__fnmd5].append([startpos, endpos, 0, '0'])
return idx, startpos, endpos
except Exception, e:
printlog('%s ERROR: Exception error occurred on get next slice of task(%s). \n%s .\n%s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), self.__filepath, e, traceback.format_exc()))
return -1, 0, 0
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'version':
print(__VERSION__)
else:
sy = SyncY(sys.argv[1:])
sy.start()
sys.exit(0)
| openwrt-stuff/openwrt-extra | luci/applications/luci-syncy/root/usr/bin/syncy.py | Python | gpl-2.0 | 130,044 | [
"VisIt"
] | 833c06a2c73989aa1cde78014dc87d47670f6e4e22423b5d5a9bb6e4c80b521b |
'''
Filters for preprocessing lingual ultrasound data for dimensionality reduction.
TODO: include generalized dimensionality reduction utilities here.
'''
import numpy as np
from scipy.ndimage import median_filter
from scipy.ndimage.filters import gaussian_laplace
def normalize(frame):
"""
Normalize input image to range 0,1 and cast to float.
"""
mx = float(np.amax(frame))
mn = float(np.amin(frame))
norm = (frame-mn)/(mx-mn)
return norm
def norm_check(frame):
"""
Check if a frame consists of floats normalized on 0,1.
"""
if not np.issubdtype(frame.dtype, np.floating):
raise TypeError("Input data must be float arrays")
if not (frame >= 0.).all() and (frame <= 1.).all():
raise ValueError("Input data must be normalized to range 0,1")
def srad(frame, n_iter=300, lbda=0.05):
'''
Speckle-reducing anisotropic diffusion filter to reduce noise
typical of ultrasound images. Derived from MATLAB code in
Chris Carignan's TRACTUS repo
(https://github.com/ChristopherCarignan/TRACTUS/, in SRAD.m)
which is in turn derived from the original algorithm in
Yu, Y. & Acton, S. (2002), "Speckle Reducing Anisotropic
Diffusion", IEEE Transactions on Image Processing 11(11),
DOI 10.1109/TIP.2002.804276.
Inputs: frame, an ultrasound frame
n_iter: number of iterations (Y&A use 300)
lbda: lambda, AKA delta-t in Y&A (who use 0.05)
Outputs: J, filtered ultrasound frame.
'''
# checks on I for number/type
# TODO
# scale to [0,1]
I = normalize(frame)
# get image size
M,N = I.shape
# image indices, using boundary conditions
iN = np.concatenate((np.arange(0, 1), np.arange(0, M-1)), axis=0)
iS = np.concatenate((np.arange(1, M), np.arange(M-1, M)), axis=0)
jW = np.concatenate((np.arange(0, 1), np.arange(0, N-1)), axis=0)
jE = np.concatenate((np.arange(1, N), np.arange(N-1, N)), axis=0)
# log uncompress
I = np.exp(I)
# the algorithm itself
for n in range(0,n_iter):
# speckle scale fcn
# IC = I.copy()
# Iuniform = IC.crop(rect)
q0_squared = np.var(I) / (np.mean(I)**2)
# differences, element-by-element along each row moving from given direction (N, S, E, W)
dN = I[iN,:] - I
dS = I[iS,:] - I
dW = I[:,jW] - I
dE = I[:,jE] - I
# normalized discrete gradient magnitude squared (Yu and Acton eqn. 52, 53)
G2 = (dN**2 + dS**2 + dW**2 + dE**2) / I**2
# normalized discrete Laplacian (eqn. 54)
L = (dN + dS + dW + dE) / I
# instantaneous coefficient of variation (ICOV) (eqns. 31/35)
num = (.5*G2) - ((1/16)*(L**2))
den = (1. + ((.25)*L))**2
q_squared = num / (den + np.spacing(1))
# diffusion coefficient (eqn. 33) # TODO why is this also "den"?
den = (q_squared - q0_squared) / (q0_squared * (q0_squared + 1) + np.spacing(1))
c = 1 / (den + 1)
# saturate diffusion coefficient
c = np.where(c>0, 1, 0)
# divergence (eqn. 58)
cS = c[iS,:]
cE = c[:,jE]
D = (c * dN) + (cS * dS) + (c * dW) + (cE * dE)
# SRAD update fcn (eqn. 61)
I = I + (lbda/4) * D
# log (re)compress
J = np.log(I)
return J
def clean_frame(frame, median_radius=6, log_sigma=4):
"""
Cleanup function to be run on SRAD output. Median filter for
further denoising, followed by edge sharpening with a Laplacian
of Gaussian (LoG) mask.
Inputs: ndarray image, filter kernel settings
median_radius: median filter radius; should be odd integer
log_sigma: LoG sigma; controls kernel size
Output: cleaned; a processed ndarray
"""
# TODO provide default for median_radius that is
# sensitive to image dimensions
norm_check(frame)
# median filter
cleaned = median_filter(frame, median_radius)
# add LoG, protecting against overflow
logmask = gaussian_laplace(cleaned, log_sigma)
frame_ceil = np.finfo(frame.dtype).max
logmask = frame_ceil - logmask
np.putmask(cleaned, logmask < cleaned, logmask)
cleaned += frame_ceil - logmask
return cleaned
def noise_mask(frame):
"""
TODO - expects normed, but before SRAD
Adds random noise to an image. Possible processing step
to be carried out before SRAD.
Inputs:
frame - ultrasound image
Outputs:
noised - ultrasound image with added random noise
"""
norm_check(frame)
noisemask = np.random.random_sample(0, 1, size=frame.shape)
noised = frame + noise_mask # TODO truncate so no > 1
return noised
def roi(frame, lower, upper, left, right):
"""
Defines region of interest along ultrasound scan lines; returns
boolean array in which 1 indicates an area inside the RoI
and 0 outside the RoI. Can be multiplied with frame to mask.
Inputs:
frame: ultrasound data in ndarray
lower: bound of RoI further away from probe
upper: bound of RoI closer to probe
left:
Outputs:
mask: ndarray of same shape as frame containing mask
"""
if lower >= upper:
raise ValueError("ROI lower bound must be smaller than upper bound")
if left >= right:
raise ValueError("ROI left bound must be smaller than right bound")
mask = np.zeros(frame.shape, dtype=frame.dtype)
mask[lower:upper,left:right] = 1
return mask
def reconstruct_frame(vectors, values, num_components, image_shape, rescale=1):
'''
Access eigenvalues (from transformed data) and eigenvectors (from PCA) to reconstruct basis data
Assuming a sklearn.decomposition.PCA object called "pca" and some basis data, inputs are:
vectors: Eigenvectors, from pca.components_
values: Eigenvalues for a token in basis data, AKA a first-level element in output of pca.transform(basisdata).
Can also use on subsets (multiple tokens) of data, in which case mean of each eigenvalue is used.
num_components: Number of PCs, from pca.n_components.
image_shape: The height and width of the images in the basis data (i.e., a tuple from basisdata[0].shape).
Determines the dimensions of the "eigentongues" used in reconstruction.
rescale: defaults to 1, for no rescaling. Multiply basis data by this scalar factor for
display purposes.
'''
# for a given number of eigenvectors
# multiply each by its eigenvalues
if values.ndim > 1:
rec_values = np.mean(values, axis=0)
else:
rec_values = values
recon = None
for pc in range(num_components):
if recon is None:
recon = vectors[pc].reshape(image_shape) * rec_values[pc]
else:
recon += vectors[pc].reshape(image_shape) * rec_values[pc]
return rescale * recon
# TODO: group frames into training/test from a PD DataFrame
# TODO: PCA on arrays in short dimension (ideally, on frame bundles) - linked DataFrame?
# TODO: LDA on arrays - but what kind of object? DataFrame?
| mfaytak/image-pca | imgphon/ultrasound.py | Python | bsd-2-clause | 7,181 | [
"Gaussian"
] | 48680df857d571e7f1088d007f51f204ebef2c43c706ace446ccc4e1f3e47981 |
__author__ = "Rick Sherman"
import unittest2 as unittest
from nose.plugins.attrib import attr
from mock import patch
import os
import json
from jnpr.junos import Device
from jnpr.junos.factory.to_json import PyEzJSONEncoder, TableJSONEncoder, TableViewJSONEncoder
from jnpr.junos.op.routes import RouteSummaryTable
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
@attr('unit')
class TestToJson(unittest.TestCase):
@patch('ncclient.manager.connect')
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(host='1.1.1.1', user='rick', password='password123',
gather_facts=False)
self.dev.open()
def test_pyez_encoder_default(self):
with self.assertRaises(TypeError):
PyEzJSONEncoder.default(PyEzJSONEncoder(), 'test')
def test_table_encoder_default(self):
with self.assertRaises(TypeError):
TableJSONEncoder.default(TableJSONEncoder(), 'test')
def test_view_encoder_default(self):
with self.assertRaises(TypeError):
TableViewJSONEncoder.default(TableViewJSONEncoder(), 'test')
@patch('jnpr.junos.Device.execute')
def test_table_json(self, mock_execute):
mock_execute.side_effect = self._mock_manager
rst = RouteSummaryTable(self.dev)
rst.get()
resp = rst.to_json()
j = '{"ISP-1.inet.0": {"proto": {"Local": {"count": 1, "active": 1}, ' \
'"Direct": {"count": 3, "active": 3}}, "dests": 4, "holddown": 0, "active": 4, "hidden": 0, "total": 4}, ' \
'"ISP-2.inet.0": {"proto": {"Local": {"count": 1, "active": 1}, "Direct": {"count": 3, "active": 3}}, ' \
'"dests": 4, "holddown": 0, "active": 4, "hidden": 0, "total": 4}, "inet.0": {"proto": {"Static": {"count": 1, "active": 1}, ' \
'"Local": {"count": 4, "active": 4}, "Direct": {"count": 4, "active": 3}}, "dests": 8, "holddown": 0, "active": 8, "hidden": 0, "total": 9}}'
self.assertEqual(resp, j)
@patch('jnpr.junos.Device.execute')
def test_view_json(self, mock_execute):
mock_execute.side_effect = self._mock_manager
rst = RouteSummaryTable(self.dev)
rst.get()
resp = rst["ISP-1.inet.0"].to_json()
j = '{"ISP-1.inet.0": {"proto": {"Local": {"count": 1, "active": 1}, "Direct": {"count": 3, "active": 3}}, ' \
'"dests": 4, "holddown": 0, "active": 4, "hidden": 0, "total": 4}}'
self.assertEqual(resp, j)
@patch('jnpr.junos.Device.execute')
def test_json_rpc(self, mock_execute):
mock_execute.side_effect = self._mock_manager
resp = self.dev.rpc.get_software_information()
j = '{"package-information": {"comment": "JUNOS Software Release [12.1X46-D15.3]", "name": "junos"}, ' \
'"host-name": "firefly", "product-model": "firefly-perimeter", "product-name": "firefly-perimeter"}'
self.assertEqual(json.dumps(resp), j)
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__),
'rpc-reply', fname)
foo = open(fpath).read()
rpc_reply = NCElement(foo, self.dev._conn.
_device_handler.transform_reply())\
._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
device_params = kwargs['device_params']
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
if args:
return self._read_file(args[0].tag + '.xml')
| JamesNickerson/py-junos-eznc | tests/unit/factory/test_to_json.py | Python | apache-2.0 | 3,775 | [
"Firefly"
] | 8ad6c906d21a505dd6baed3f0a4a8f9557354d4c905a56065062f9df6f0f3809 |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2009 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
# If you enable unicode_literals, some stuff needs fixing for win+py2, see #7162
#from __future__ import print_function, unicode_literals
import gettext
import sys
import os
import codecs
import locale
import collections
import logging
LOG = logging.getLogger("." + __name__)
LOG.propagate = True
HAVE_ICU = False
_icu_err = None
_hdlr = None
# GrampsLocale initialization comes before command-line argument
# passing, so one must set the log level directly. The default is
# logging.WARN. Uncomment the following to change it to logging.DEBUG:
# LOG.setLevel(logging.DEBUG)
try:
from icu import Locale, Collator
HAVE_ICU = True
except ImportError:
try:
from PyICU import Locale, Collator
HAVE_ICU = True
except ImportError as err:
# No logger, save the warning message for later.
_icu_err = ("ICU not loaded because %s. Localization will be impaired. "
"Use your package manager to install PyICU" % str(err))
ICU_LOCALES = None
if HAVE_ICU:
ICU_LOCALES = Locale.getAvailableLocales()
# Map of languages for converting to Microsoft locales and naming
# locales for display to the user. It's important to add to this list
# when a new translation is added. Note the dummy _(): That's just to
# get xgettext to include the string in gramps.pot; actual translation
# is done in _get_language_string() below.
# (The gramps officially-supported language list is ALL_LINGUAS in setup.py)
_ = lambda x: x
_LOCALE_NAMES = {
'ar': ('Arabic_Saudi Arabia', '1256', _("Arabic")),
'bg': ('Bulgrian_Bulgaria', '1251', _("Bulgarian")),
'br': (None, None, _("Breton")), #Windows has no translation for Breton
'ca': ('Catalan_Spain', '1252', _("Catalan")),
'cs': ('Czech_Czech Republic', '1250', _("Czech")),
'da': ('Danish_Denmark', '1252', _("Danish")),
'de': ('German_Germany', '1252', _("German")),
'el': ('Greek_Greece', '1253', _("Greek")),
'en': ('English_United States', '1252', _("English (USA)")),
'en_GB': ('English_United Kingdom', '1252', _("English")),
'eo': (None, None, _("Esperanto")), #Windows has no translation for Esperanto
'es': ('Spanish_Spain', '1252', _("Spanish")),
'fi': ('Finnish_Finland', '1252', _("Finnish")),
'fr': ('French_France', '1252', _("French")),
'ga': (None, None, _("Gaelic")), #Windows has no translation for Gaelic
'he': ('Hebrew_Israel', '1255', _("Hebrew")),
'hr': ('Croatian_Croatia', '1250', _("Croatian")),
'hu': ('Hungarian_Hungary', '1250', _("Hungarian")),
'it': ('Italian_Italy', '1252', _("Italian")),
'ja': ('Japanese_Japan', '932', _("Japanese")),
'lt': ('Lithuanian_Lithuania', '1252', _("Lithuanian")),
'mk': (None, None, _("Macedonian")), #Windows has no translation for Macedonian
'nb': ('Norwegian_Norway', '1252', _("Norwegian Bokmal")),
'nl': ('Dutch_Netherlands', '1252', _("Dutch")),
'nn': ('Norwegian-Nynorsk_Norway', '1252', _("Norwegian Nynorsk")),
'pl': ('Polish_Poland', '1250', _("Polish")),
'pt_BR': ('Portuguese_Brazil', '1252', _("Portuguese (Brazil)")),
'pt_PT': ('Portuguese_Portugal', '1252', _("Portuguese (Portugal)")),
'ro': ('Romanian_Romania', '1250', _("Romanian")),
'ru': ('Russian_Russia', '1251', _("Russian")),
'sk': ('Slovak_Slovakia', '1250', _("Slovak"),),
'sl': ('Slovenian_Slovenia', '1250', _("Slovenian")),
'sq': ('Albanian_Albania', '1250', _("Albanian")),
'sr': ('Serbian(Cyrillic)_Serbia and Montenegro', '1251', None), #Gramps's Serbian translation is not yet ready
'sv': ('Swedish_Sweden', '1252', _("Swedish")),
'tr': ('Turkish_Turkey', '1254', _("Turkish")),
'uk': ('Ukrainian_Ukraine', '1251', _("Ukrainian")),
'vi': ('Vietnamese_Viet Nam', '1258', _("Vietnamese")),
'zh_CN': ('Chinese_China', '936', _("Chinese (Simplified)")),
'zh_TW': ('Chinese_Taiwan', '950', _("Chinese (Traditional)")),
}
def _check_mswin_locale(locale):
msloc = None
try:
msloc = _LOCALE_NAMES[locale[:5]][:2]
locale = locale[:5]
except KeyError:
try:
msloc = _LOCALE_NAMES[locale[:2]][:2]
locale = locale[:2]
except KeyError:
return (None, None)
return (locale, msloc)
def _check_mswin_locale_reverse(locale):
for (loc, msloc) in _LOCALE_NAMES.items():
if msloc and locale == msloc[0]:
return (loc, msloc[1])
return (None, None)
#------------------------------------------------------------------------
#
# GrampsLocale Class
#
#------------------------------------------------------------------------
class GrampsLocale(object):
"""
Encapsulate a locale. This class is a sort-of-singleton: The
first instance created will query the environment and OSX defaults
for missing parameters (precedence is parameters passed to the
constructor, environment variables LANG, LC_COLLATE, LC_TIME,
etc., and LANGUAGE, OSX defaults settings when that's the
platform). Subsequent calls to the constructor with no or
identical parameters will return the same Grampslocale
object. Construction with different parameters will result in a
new GrampsLocale instance with the specified parameters, but any
parameters left out will be filled in from the first instance.
:param localedir: The full path to the top level directory containing the
translation files. Defaults to sys.prefix/share/locale.
:param lang: A single locale value which is used for unset locale.LC_FOO
settings.
:param domain: The name of the applicable translation file. The default is
"gramps", indicating files in LC_MESSAGES named gramps.mo.
:param languages: String with a ':'-separated list of two or five character
codes corresponding to subidrectries in the localedir,
e.g.: "fr" or "zh_CN".
"""
DEFAULT_TRANSLATION_STR = "default"
__first_instance = None
encoding = None
def __new__(cls, localedir=None, lang=None, domain=None, languages=None):
if not GrampsLocale.__first_instance:
cls.__first_instance = super(GrampsLocale, cls).__new__(cls)
cls.__first_instance.initialized = False
return cls.__first_instance
if not cls.__first_instance.initialized:
raise RuntimeError("Second GrampsLocale created before first one was initialized")
if ((lang is None or lang == cls.__first_instance.lang)
and (localedir is None or localedir == cls.__first_instance.localedir)
and (domain is None or domain == cls.__first_instance.localedomain)
and (languages is None or len(languages) == 0 or
languages == cls.__first_instance.language)):
return cls.__first_instance
return super(GrampsLocale, cls).__new__(cls)
def _win_init_environment(self):
"""
The Windows implementation of Python ignores environment
variables when setting the locale; it only pays attention to
the control panel language settings -- which for practical
purposes limits one to the language for which one purchased
Windows. This function enables using alternative
localizations.
"""
if 'LANG' in os.environ:
(lang, loc) = _check_mswin_locale(os.environ['LANG'])
if loc:
locale.setlocale(locale.LC_ALL, '.'.join(loc))
self.lang = lang
self.encoding = loc[1]
else:
LOG.debug("%%LANG%% value %s not usable", os.environ['LANG'])
if not self.lang:
locale.setlocale(locale.LC_ALL, '')
(lang, encoding) = locale.getlocale()
loc = _check_mswin_locale_reverse(lang)
if loc[0]:
self.lang = loc[0]
self.encoding = loc[1]
else:
(lang, loc) = _check_mswin_locale(lang)
if lang:
self.lang = lang
self.encoding = loc[1]
else:
LOG.debug("No usable locale found in environment")
if not self.lang:
self.lang = 'C'
self.encoding = 'cp1252'
if 'LC_MESSAGES' in os.environ:
lang = self.check_available_translations(os.environ['LC_MESSAGES'])
if lang:
self.language = [lang]
else:
LOG.debug("No translation for %%LC_MESSAGES%% locale")
if 'LANGUAGE' in os.environ:
language = [x for x in [self.check_available_translations(l)
for l in os.environ["LANGUAGE"].split(":")]
if x]
if language:
self.language = language
else:
LOG.debug("No languages with translations found in %%LANGUAGES%%")
if not self.language:
self.language = [self.lang[:5]]
if 'COLLATION' in os.environ:
coll = os.environ['COLLATION']
if HAVE_ICU:
if coll[:2] in ICU_LOCALES:
self.collation = coll
else:
self.collation = self.lang
else:
(coll, loc) = _check_mswin_locale(coll)
if not loc:
(coll, loc) = _check_mswin_locale(self.lang)
self.collation = '.'.join(loc)
locale.setlocale(locale.LC_COLLATE, self.collation )
else:
if HAVE_ICU:
self.collation = self.lang
else:
(coll, loc) = _check_mswin_locale(self.lang)
if loc:
self.collation = '.'.join(loc)
else:
self.collation = 'C'
locale.setlocale(locale.LC_COLLATE, self.collation )
# We can't import datahandler stuff or we'll get a circular
# dependency, so we rely on the available translations list
if 'LC_TIME' in os.environ:
self.calendar = self.check_available_translations(os.environ['LC_TIME']) or self.lang
else:
self.calendar = self.lang
if 'LC_NUMERIC' in os.environ:
self.numeric = os.environ['LC_NUMERIC']
else:
self.numeric = self.lang
if 'LC_MONETARY' in os.environ:
self.currency = os.environ['LC_MONETARY']
else:
self.currency = self.lang
def _init_from_environment(self):
def _check_locale(locale):
if not locale[0]:
return False
lang = self.check_available_translations(locale[0])
if not lang:
return False
self.lang = locale[0]
self.encoding = locale[1]
self.language = [lang]
return True
_failure = False
try:
locale.setlocale(locale.LC_ALL, '')
if not _check_locale(locale.getlocale()):
if not _check_locale(locale.getdefaultlocale()):
LOG.debug("Usable locale not found, localization settings ignored.");
self.lang = 'C'
self.encoding = 'ascii'
self.language = ['en']
_failure = True
except locale.Error as err:
LOG.debug("Locale error %s, localization settings ignored.",
err);
self.lang = 'C'
self.encoding = 'ascii'
self.language = ['en']
_failure = True
#LC_MESSAGES
(loc, enc) = locale.getlocale(locale.LC_MESSAGES)
if loc:
language = self.check_available_translations(loc)
if language:
self.language = [language]
else:
LOG.debug("No translation for LC_MESSAGES locale %s", loc)
# $LANGUAGE overrides $LANG, $LC_MESSAGES
if "LANGUAGE" in os.environ:
language = [x for x in [self.check_available_translations(l)
for l in os.environ["LANGUAGE"].split(":")]
if x]
if language:
self.language = language
if not self.lang.startswith(self.language[0]):
LOG.debug("Overiding locale setting %s with LANGUAGE setting %s", self.lang, self.language[0])
elif _failure:
LOG.warning("No valid locale settings found, using US English")
if HAVE_ICU:
self.calendar = locale.getlocale(locale.LC_TIME)[0] or self.lang[:5]
self.collation = locale.getlocale(locale.LC_COLLATE)[0] or self.lang[:5]
else:
loc = locale.getlocale(locale.LC_TIME)
if loc and self.check_available_translations(loc[0]):
self.calendar = '.'.join(loc)
else:
self.calendar = self.lang
loc = locale.getlocale(locale.LC_COLLATE)
if loc and loc[0]:
self.collation = '.'.join(loc)
else:
self.collation = self.lang
if HAVE_ICU and 'COLLATION' in os.environ:
self.collation = os.environ['COLLATION']
loc = locale.getlocale(locale.LC_NUMERIC)
if loc and loc[0]:
self.numeric = '.'.join(loc)
else:
self.numeric = self.lang
loc = locale.getlocale(locale.LC_MONETARY)
if loc and loc[0]:
self.currency = '.'.join(loc)
else:
self.currency = self.lang
def _win_bindtextdomain(self, localedomain, localedir):
"""
Help routine for loading and setting up libintl attributes
Returns libintl
"""
from ctypes import cdll
try:
libintl = cdll.LoadLibrary('libintl-8')
libintl.bindtextdomain(localedomain, localedir)
libintl.textdomain(localedomain)
libintl.bind_textdomain_codeset(localedomain, "UTF-8")
except WindowsError:
LOG.warning("Localization library libintl not on %PATH%, localization will be incomplete")
def __init_first_instance(self):
"""
Initialize the primary locale from whatever might be
available. We only do this once, and the resulting
GrampsLocale is returned by default.
"""
global _hdlr
_hdlr = logging.StreamHandler()
_hdlr.setFormatter(logging.Formatter(fmt="%(name)s.%(levelname)s: %(message)s"))
LOG.addHandler(_hdlr)
#Now that we have a logger set up we can issue the icu error if needed.
if not HAVE_ICU:
LOG.warning(_icu_err)
# Even the first instance can be overridden by passing lang
# and languages to the constructor. If it isn't (which is the
# expected behavior), do platform-specific setup:
if not (self.lang and self.language):
if sys.platform == 'darwin':
from . import maclocale
maclocale.mac_setup_localization(self)
elif sys.platform == 'win32':
self._win_init_environment()
else:
self._init_from_environment()
else:
self.numeric = self.currency = self.calendar = self.collation = self.lang
if not self.lang:
self.lang = 'en_US.UTF-8'
if not self.language:
self.language.append('en')
if not self.localedir and not self.lang.startswith('en'):
LOG.warning("No translations for %s were found, setting localization to U.S. English", self.localedomain)
self.lang = 'en_US.UTF-8'
self.language = ['en']
#Next, we need to know what is the encoding from the native
#environment. This is used by python standard library funcions which
#localize their output, e.g. time.strftime(). NB: encoding is a class variable.
if not self.encoding:
self.encoding = (locale.getpreferredencoding()
or sys.getdefaultencoding())
LOG.debug("Setting encoding to %s", self.encoding)
#Ensure that output is encoded correctly to stdout and stderr. This is
#much less cumbersome and error-prone than encoding individual outputs
#and better handles the differences between Python 2 and Python 3:
try:
_encoding = sys.stdout.encoding or sys.getdefaultencoding()
except:
_encoding = "UTF-8"
if sys.version_info[0] < 3:
sys.stdout = codecs.getwriter(_encoding)(sys.stdout,
'backslashreplace')
sys.stderr = codecs.getwriter(_encoding)(sys.stderr,
'backslashreplace')
else:
sys.stdout = codecs.getwriter(_encoding)(sys.stdout.detach(),
'backslashreplace')
sys.stderr = codecs.getwriter(_encoding)(sys.stderr.detach(),
'backslashreplace')
# Make sure that self.lang and self.language are reflected
# back into the environment for Gtk to use when its
# initialized. If self.lang isn't 'C', make sure that it has a
# 'UTF-8' suffix, because that's all that GtkBuilder can
# digest.
# Gtk+ has an 'en' po, but we don't. This is worked-around for
# our GrampsTranslation class but that isn't used to retrieve
# translations in GtkBuilder (glade), a direct call to libintl
# (gettext) is. If 'en' is in the translation list it gets
# skipped in favor of the next language, which can cause
# inappropriate translations of strings in glade/ui files. To
# prevent this, if 'en' is in self.language it's the last
# entry:
if 'en' in self.language:
self.language = self.language[:self.language.index('en') + 1]
# Linux note: You'll get unsupported locale errors from Gtk
# and untranslated strings if the requisite UTF-8 locale isn't
# installed. This is particularly a problem on Debian and
# Debian-derived distributions which by default don't install
# a lot of locales.
lang = locale.normalize(self.language[0] if self.language[0] else 'C')
check_lang = lang.split('.')
if not check_lang[0] in ('C', 'en'):
if len(check_lang) < 2 or check_lang[1] not in ("utf-8", "UTF-8"):
lang = '.'.join((check_lang[0], 'UTF-8'))
os.environ["LANG"] = lang
#We need to convert 'en' and 'en_US' to 'C' to avoid confusing
#GtkBuilder when it's retrieving strings from our Glade files
#since we have neither an en.po nor an en_US.po.
os.environ["LANGUAGE"] = ':'.join(self.language)
# GtkBuilder uses GLib's g_dgettext wrapper, which oddly is bound
# with locale instead of gettext. Win32 doesn't support bindtextdomain.
if self.localedir:
if not sys.platform == 'win32':
locale.bindtextdomain(self.localedomain, self.localedir)
else:
self._win_bindtextdomain(self.localedomain,
self.localedir.encode('utf-8'))
def _init_secondary_locale(self):
"""
Init a secondary locale. Secondary locales are used to provide
an alternate localization to the one used for the UI; for
example, some reports offer the option to use a different
language.
"""
if not self.localedir:
LOG.warning("No Localedir provided, unable to find translations")
if not self.localedomain:
if _firstlocaledomain:
self.localedomain = _first.localedomain
else:
self.localedomain = "gramps"
_first = self._GrampsLocale__first_instance
if not self.lang and _first.lang:
self.lang = _first.lang
if not self.language:
if self.lang:
trans = self.check_available_translations(self.lang)
if trans:
self.language = [trans]
if not self.language and _first.language:
self.language = _first.language
self.calendar = self.collation = self.lang
def __init__(self, localedir=None, lang=None, domain=None, languages=None):
"""
Init a GrampsLocale. Run __init_first_instance() to set up the
environment if this is the first run. Return __first_instance
otherwise if called without arguments.
"""
global _hdlr
#initialized is special, used only for the "first instance",
#and created by __new__(). It's used to prevent re-__init__ing
#__first_instance when __new__() returns its pointer.
if hasattr(self, 'initialized') and self.initialized:
return
_first = self._GrampsLocale__first_instance
self.localedir = None
# Everything breaks without localedir, so get that set up
# first. Warnings are logged in _init_first_instance or
# _init_secondary_locale if this comes up empty.
if localedir and os.path.exists(os.path.abspath(localedir)):
self.localedir = localedir
elif (_first and hasattr(_first, 'localedir') and _first.localedir and
os.path.exists(os.path.abspath(_first.localedir))):
self.localedir = _first.localedir
else:
LOG.warn('Missing or invalid localedir %s; no translations will be available.', repr(localedir))
self.lang = lang
self.localedomain = domain or 'gramps'
if languages:
self.language = [x for x in [self.check_available_translations(l)
for l in languages.split(":")]
if x]
else:
self.language = None
if self == _first:
self._GrampsLocale__init_first_instance()
else:
self._init_secondary_locale()
self.icu_locales = {}
self.collator = None
if HAVE_ICU:
self.icu_locales["default"] = Locale.createFromName(self.lang)
if self.collation and self.collation != self.lang:
self.icu_locales["collation"] = Locale.createFromName(self.collation)
else:
self.icu_locales["collation"] = self.icu_locales["default"]
try:
self.collator = Collator.createInstance(self.icu_locales["collation"])
except ICUError as err:
LOG.warning("Unable to create collator: %s", str(err))
self.collator = None
try:
self.translation = self._get_translation(self.localedomain,
self.localedir,
self.language)
except ValueError:
LOG.warning("Unable to find translation for languages in %s, using US English", ':'.join(self.language))
self.translation = GrampsNullTranslations()
self.translation._language = "en"
if _hdlr:
LOG.removeHandler(_hdlr)
_hdlr = None
self._dd = self._dp = None
#Guards against running twice on the first instance.
self.initialized = True
def _get_translation(self, domain = None,
localedir = None,
languages=None):
"""
Get a translation of one of our classes. Doesn't return the
singleton so that it can be used by get_addon_translation()
"""
if not domain:
domain = self.localedomain
if not languages:
languages = self.language
if not localedir:
localedir = self.localedir
for lang in languages:
if gettext.find(domain, localedir, [lang]):
translator = gettext.translation(domain, localedir,
[lang],
class_ = GrampsTranslations)
translator._language = lang
return translator
elif lang.startswith("en") or lang.startswith("C"):
translator = GrampsNullTranslations()
translator._language = "en"
return translator
if not languages or len(languages) == 0:
LOG.warning("No language provided, using US English")
else:
raise ValueError("No usable translations in %s for " %
':'.join(languages))
translator = GrampsNullTranslations()
translator._language = "en"
return translator
def _get_language_string(self, lang_code):
"""
Given a language code of the form "lang_region", return a text string
representing that language.
"""
try:
lang = _LOCALE_NAMES[lang_code][2]
except KeyError:
try:
lang = _LOCALE_NAMES[lang_code[:2]][2]
except KeyError:
LOG.debug("Gramps has no translation for %s", lang_code)
lang = None
except IndexError as err:
LOG.debug("Bad Index for tuple %s\n" % _LOCALE_NAMES[lang_code][0])
lang = None
if lang:
return self.translation.gettext(lang)
return lang
#-------------------------------------------------------------------------
#
# Properties
#
#-------------------------------------------------------------------------
@property
def date_displayer(self):
"""
Return the locale's date displayer; if it hasn't already been
cached, set it from datehandler.LANG_TO_DISPLAY. If one isn't
available for the selected locale, attempt to fall back on the
first_instance's locale before settling on the 'C' displayer.
.. note:: This is the getter for the date_displayer property
"""
if self._dd:
return self._dd
from gramps.gen.config import config
try:
val = config.get('preferences.date-format')
except AttributeError:
val = 0;
from gramps.gen.datehandler import LANG_TO_DISPLAY as displayers
_first = self._GrampsLocale__first_instance
if self.calendar in displayers:
self._dd = displayers[self.calendar](val)
elif self.calendar[:2] in displayers:
self._dd = displayers[self.calendar[:2]](val)
elif self != _first and _first.calendar in displayers:
self._dd = displayers[_first.calendar](val)
elif self != _first and _first.calendar[:2] in displayers:
self._dd = displayers[_first.calendar[:2]](val)
else:
self._dd = displayers['C'](val)
return self._dd
@property
def date_parser(self):
"""
Return the locale's date parser; if it hasn't already been
cached, set it from datehandler.LANG_TO_PARSER. If one isn't
available for the selected locale, attempt to fall back on the
first_instance's locale before settling on the 'C' parser.
.. note:: This is the getter for the date_parser property
"""
if self._dp:
return self._dp
from gramps.gen.datehandler import LANG_TO_PARSER as parsers
_first = self._GrampsLocale__first_instance
if self.calendar in parsers:
self._dp = parsers[self.calendar]()
elif self.calendar[:2] in parsers:
self._dp = parsers[self.calendar]()
elif self != _first and _first.calendar in parsers:
self._dp = parsers[_first.calendar]()
elif self != _first and _first.calendar[:2] in parsers:
self._dp = parsers[_first.calendar[:2]]()
else:
self._dp = parsers['C']()
return self._dp
#-------------------------------------------------------------------------
#
# Public Functions
#
#-------------------------------------------------------------------------
def get_localedomain(self):
"""
Get the LOCALEDOMAIN used for the Gramps application.
Required by gui/glade.py to pass to Gtk.Builder
"""
return self.localedomain
def get_language_list(self):
"""
Return the list of configured languages. Used by
ViewManager.check_for_updates to select the language for the
addons descriptions.
"""
return self.language
def get_addon_translator(self, filename, domain="addon",
languages=None):
"""
Get a translator for an addon.
:param filename: filename of a file in directory with full path, or
None to get from self.
:param domain: the name of the .mo file under the LANG/LC_MESSAGES dir
:param languages: a list of languages to force
:returns: a gettext.translation object
Example::
_ = glocale.get_addon_translator(languages=["fr_BE.utf8"]).gettext
.. seealso:: the python gettext documentation.
Assumes path/filename = path/locale/LANG/LC_MESSAGES/addon.mo.
"""
gramps_translator = self._get_translation()
path = self.localedir
if filename:
path = os.path.join(os.path.dirname(os.path.abspath(filename)), "locale")
if languages:
addon_translator = self._get_translation(domain,
path,
languages=languages)
else:
addon_translator = self._get_translation(domain, path)
gramps_translator.add_fallback(addon_translator)
return gramps_translator # with a language fallback
def get_available_translations(self, localedir = None, localedomain = None):
"""
Get a list of available translations.
:returns: A list of translation languages.
:rtype: unicode[]
"""
languages = ["en"]
if not localedir and self.localedir:
localedir = self.localedir
else:
return languages
if not localedomain and self.localedomain:
localedomain = self.localedomain
else:
localedomain = 'gramps'
for langdir in os.listdir(self.localedir):
mofilename = os.path.join(localedir, langdir,
"LC_MESSAGES",
"%s.mo" % localedomain )
if os.path.exists(mofilename):
languages.append(langdir)
languages.sort()
return languages
def check_available_translations(self, locale):
"""
Test a locale for having a translation available
locale -- string with standard language code, locale code, or name
"""
if not self.localedir:
return None
#Note that this isn't a typo for self.language; self.languages
#is cached so we don't have to query the file system every
#time this function is called.
if not hasattr(self, 'languages'):
self.languages = self.get_available_translations()
if not locale:
return None
if locale[:5] in self.languages:
return locale[:5]
if locale[:2] in self.languages:
return locale[:2]
return None
def get_language_dict(self):
'''
return a dictionary of language names : codes for use by language
pickers.
'''
return {self._get_language_string(code) : code
for code in self.get_available_translations()
if self._get_language_string(code)}
def trans_objclass(self, objclass_str):
"""
Translates objclass_str into "... %s", where objclass_str
is 'Person', 'person', 'Family', 'family', etc.
"""
_ = self.translation.gettext
objclass = objclass_str.lower()
if objclass == "person":
return _("the person")
elif objclass == "family":
return _("the family")
elif objclass == "place":
return _("the place")
elif objclass == "event":
return _("the event")
elif objclass == "repository":
return _("the repository")
elif objclass == "note":
return _("the note")
elif objclass in ["media", "mediaobject"]:
return _("the media")
elif objclass == "source":
return _("the source")
elif objclass == "filter":
return _("the filter")
else:
return _("See details")
def sort_key(self, string):
"""
Return a value suitable to pass to the "key" parameter of sorted()
"""
if HAVE_ICU and self.collator:
#ICU can digest strings and unicode
return self.collator.getCollationKey(string).getByteArray()
else:
if sys.version_info[0] < 3 and isinstance(string, unicode):
string = string.encode("utf-8", "replace")
if sys.version_info[0] >= 3 and isinstance(string, bytes):
string = string.decode("utf-8", "replace")
try:
key = locale.strxfrm(string)
except Exception as err:
LOG.warn("Failed to obtain key for %s because %s",
self.collation, str(err))
return string
return key
def strcoll(self, string1, string2):
"""
Given two localized strings, compare them and return -1 if
string1 would sort first, 1 if string2 would, and 0 if
they are the same.
"""
key1 = self.sort_key(string1)
key2 = self.sort_key(string2)
return (-1 if key1 < key2 else (1 if key1 > key2 else 0))
def get_date(self, date):
"""
Return a string representing the date appropriate for the language being
translated.
:param date: The date to be represented.
:type date: :class:`~gen.lib.date.Date`
:returns: The date as text in the proper language.
:rtype: unicode
"""
return self.date_displayer.display(date)
def get_type(self, name):
"""
Return a string representing the name appropriate for the language being
translated.
:param name: The name type to be represented.
:returns: The name as text in the proper language.
:rtype: unicode
"""
from gramps.gen.lib.grampstype import GrampsType
return GrampsType.xml_str(name)
def format(self, format, val, grouping=False, monetary=False):
"""
Format a number in the current numeric locale. See python's
locale.format for details. ICU's formatting codes are
incompatible with locale's, so just use locale.format for now.
"""
return locale.format(format, val, grouping, monetary)
def format_string(self, format, val, grouping=False):
"""
Format a string in the current numeric locale. See python's
locale.format_string for details. ICU's message formatting codes are
incompatible with locale's, so just use locale.format_string
for now.
"""
return locale.format_string(format, val, grouping)
def float(self, val):
"""
Parse a string to a floating point number. Uses locale.atof(),
in future with ICU present will use icu.NumberFormat.parse().
"""
try:
return locale.atof(val)
except ValueError:
point = locale.localeconv()['decimal_point']
sep = locale.localeconv()['thousands_sep']
try:
if point == ',':
return locale.atof(val.replace(' ', sep).replace('.', sep))
elif point == '.':
return locale.atof(val.replace(' ', sep).replace(',', sep))
else:
return None
except ValueError:
return None
#-------------------------------------------------------------------------
#
# Translations Classes
#
#-------------------------------------------------------------------------
if sys.version_info < (3,0):
_LexemeBaseStr = unicode
_isstring = lambda s: isinstance(s, basestring)
else:
_LexemeBaseStr = str
_isstring = lambda s: isinstance(s, str)
class Lexeme(_LexemeBaseStr):
r"""
Created with :meth:`~GrampsTranslations.lexgettext`
.. rubric:: Example
Python code::
_ = lexgettext
dec = _("localized lexeme inflections||December")
xmas = _("lexeme||Christmas")
text = _("{holiday} is celebrated in {month}".format(
holiday=xmas, month=dec))
greeting = _("Merry {holiday}!").format(holiday=xmas)
XMAS = xmas.upper()
print ("\n".join([XMAS, text, greeting]))
Translation database (Russian example)::
msgid "lexeme||December"
msgstr "NOMINATIVE=декабрь|GENITIVE=декабря|ABLATIVE=декабрём|LOCATIVE=декабре"
msgid "lexeme||Christmas"
msgstr "NOMINATIVE=рождество|GENITIVE=рождества|ABLATIVE=рождеством"
msgid "{holiday} is celebrated in {month}"
msgstr "{holiday} празднуют в {month.f[LOCATIVE]}"
msgid "Merry {holiday}!"
msgstr "Счастливого {holiday.f[GENITIVE]}!"
Prints out::
In English locale:
CHRISTMAS
Christmas is celebrated in December
Merry Christmas!
In Russian locale:
РОЖДЕСТВО
рождество празднуют в декабре
Счастливого рождества!
.. rubric:: Description
Stores an arbitrary number of forms, e.g., inflections.
These forms are accessible under dictionary keys for each form.
The names of the forms are language-specific. They are assigned
by the human translator of the corresponding language (in XX.po)
as in the example above,
see :meth:`~GrampsTranslations.lexgettext` docs
for more info.
The translated format string can then refer to a specific form
of the lexeme using ``.``:attr:`~Lexeme.f` and square brackets:
``{holiday.f[GENITIVE]}``
expects holiday to be a Lexeme which has a form ``'GENITIVE'`` in it.
An instance of Lexeme can also be used as a regular unicode string.
In this case, the work will be delegated to the string for the very
first form provided in the translated string. In the example above,
``{holiday}`` in the translated string will expand to the Russian
nominative form for Christmas, and ``xmas.upper()`` will produce
the same nominative form in capital letters.
.. rubric:: Motivation
Lexeme is the term used in linguistics for the set of forms taken
by a particular word, e.g. cases for a noun or tenses for a verb.
Gramps often needs to compose sentences from several blocks of
text and single words, often by using python string formatting.
For instance, formatting a date range is done similarly to this::
_("Between {startdate_month} {startdate_year}"
"and {enddate_month} {enddate_year}").format(
startdate_month = m1,
startdate_year = y1,
enddate_month = m2,
enddate_year = y2)
To make such text translatable, the arguments injected into
format string need to bear all the linguistical information
on how to plug them into a sentence, i.e., the forms, depending
on the linguistic context of where the argument appears.
The format string needs to select the relevant linguistic form.
This is why ``m1`` and ``m2`` are instances of :class:`~Lexeme`.
On the other hand, for languages where there is no linguistic
variation in such sentences, the code needs not to be aware of
the underlying :class:`~Lexeme` complexity;
and so they can be processed just like simple strings
both when passed around in the code and when formatted.
"""
def __new__(cls, iterable, *args, **kwargs):
if _isstring(iterable):
newobj = _LexemeBaseStr.__new__(cls, iterable, *args, **kwargs)
else:
od = collections.OrderedDict(iterable)
l = list(od.values()) or [""]
newobj = _LexemeBaseStr.__new__(cls, l[0], *args, **kwargs)
newobj._forms = od
return newobj
def variants(self):
"""All lexeme forms, in the same order as given upon construction.
The first one returned is the default form, which is used when the
Lexeme instance is used in lieu of a string object.
Same as ``f.values()``"""
return self._forms.values()
@property
def f(self):
"""Dictionary of the lexeme forms"""
return self._forms
class GrampsTranslations(gettext.GNUTranslations):
"""
Overrides and extends gettext.GNUTranslations. See the Python gettext
"Class API" documentation for how to use this.
"""
def language(self):
"""
Return the target languge of this translations object.
"""
return self._language
def gettext(self, msgid):
"""
Obtain translation of gettext, return a unicode object
:param msgid: The string to translated.
:type msgid: unicode
:returns: Translation or the original.
:rtype: unicode
"""
# If msgid =="" then gettext will return po file header
# and that's not what we want.
if len(msgid.strip()) == 0:
return msgid
if sys.version_info[0] < 3:
return gettext.GNUTranslations.ugettext(self, msgid)
else:
return gettext.GNUTranslations.gettext(self, msgid)
def ngettext(self, singular, plural, num):
"""
The translation of singular/plural is returned unless the translation is
not available and the singular contains the separator. In that case,
the returned value is the singular.
:param singular: The singular form of the string to be translated.
may contain a context seperator
:type singular: unicode
:param plural: The plural form of the string to be translated.
:type plural: unicode
:param num: the amount for which to decide the translation
:type num: int
:returns: Translation or the original.
:rtype: unicode
"""
if sys.version_info[0] < 3:
return gettext.GNUTranslations.ungettext(self, singular,
plural, num)
else:
return gettext.GNUTranslations.ngettext(self, singular,
plural, num)
def sgettext(self, msgid, sep='|'):
"""
Strip the context used for resolving translation ambiguities.
The translation of msgid is returned unless the translation is
not available and the msgid contains the separator. In that case,
the returned value is the portion of msgid following the last
separator. Default separator is '|'.
:param msgid: The string to translated.
:type msgid: unicode
:param sep: The separator marking the context.
:type sep: unicode
:returns: Translation or the original with context stripped.
:rtype: unicode
"""
msgval = self.gettext(msgid)
if msgval == msgid:
sep_idx = msgid.rfind(sep)
msgval = msgid[sep_idx+1:]
return msgval
def lexgettext(self, msgid):
"""
Extract all inflections of the same lexeme,
stripping the '|'-separated context using :meth:`~sgettext`
The *resulting* message provided by the translator
is supposed to be '|'-separated as well.
The possible formats are either (1) a single string
for a language with no inflections, or (2) a list of
<inflection name>=<inflected form>, separated with '|'.
For example:
(1) "Uninflectable"
(2) "n=Inflected-nominative|g=Inflected-genitive|d=Inflected-dative"
See :class:`~Lexeme` documentation for detailed explanation and example.
:param msgid: The string to translated.
:type msgid: unicode
:returns: Translation or the original with context stripped.
:rtype: unicode (for option (1)) / Lexeme (option (2))
"""
variants = self.sgettext(msgid).split('|')
return Lexeme([v.split('=') for v in variants]
) if len(variants) > 1 else variants[0]
class GrampsNullTranslations(gettext.NullTranslations):
"""
Extends gettext.NullTranslations to provide the sgettext method.
Note that it's necessary for msgid to be unicode. If it's not,
neither will be the returned string.
"""
def sgettext(self, msgid, sep='|'):
msgval = self.gettext(msgid)
if msgval == msgid:
sep_idx = msgid.rfind(sep)
msgval = msgid[sep_idx+1:]
return msgval
lexgettext = sgettext
def language(self):
"""
The null translation returns the raw msgids, which are in English
"""
return "en"
| pmghalvorsen/gramps_branch | gramps/gen/utils/grampslocale.py | Python | gpl-2.0 | 46,947 | [
"Brian"
] | cae5ca795fcc83f159dfbcfa0e96f2245ddda170e63b4c58202228e0df17916d |
#!/usr/bin/python
######################################################################
# Ascii TMS Viewer
#
#--------------------------------------------------------------------
# Brian Hone | Initial Release
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
# Copyright (c) 2009 Brian Hone
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
######################################################################
""" Simple kml parser. Pulls Placemarks with Point data out of a KML file """
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import string, sys, StringIO
class Point:
""" Data Holder for a point """
def __init__( self ):
self.lat = None
self.lon = None
# end __init__
def __repr__( self ):
return "lat: %s, lon: %s" % ( self.lat, self.lon )
# end __repr__
# end class Point
class LinearRing:
""" Data Holder for a Linear Ring """
def __init__( self ):
self.points = []
# end __init__
def addPoint( self, lat, lon ):
p = Point()
p.lat = lat
p.lon = lon
self.points.append( p )
# end addPoint
def __repr__( self ):
s = ""
for p in self.points:
s = s + "%s\n" % p
return s
# end class Linear Ring
class Placemark:
""" Data Holder for a Placemark """
def __init__( self ):
self.name = ""
self.description = ""
self.point = None
self.linear_ring = None
self.has_point = 0
self.has_linear_ring = 0
# end __init__
def setPoint( self, lat, lon ):
self.point = Point()
self.point.lat = lat
self.point.lon = lon
self.has_point = 1
# end setCoordinates
def addLinearRingPoint( self, lat, lon ):
if not self.linear_ring:
self.linear_ring = LinearRing()
self.has_linear_ring = 1
self.linear_ring.addPoint( lat, lon )
# end addLinearRingPoint
def __repr__( self ):
return "name: %s, desc: %s, point: %s, linear_ring: %s" % ( self.name, self.description, self.point, self.linear_ring )
# end Placemark
class KML_Handler(ContentHandler):
""" SAX Parser for getting placemark data (point only) out of a KML File """
def __init__ (self):
self.Placemarks = []
self.isPointCoordsElement = 0
self.isLinearRingCoordsElement = 0
self.isPlacemarkElement = 0
self.isDescriptionElement = 0
self.isNameElement = 0
self.isPointElement = 0
self.isLinearRingElement = 0
self.currentPlacemark = None
self.isSchemaElement = 0
# end __init__
def startElement(self, name , attrs):
name = name.upper()
if name == "SCHEMA":
self.isSchemaElement = 1
if name == 'PLACEMARK':
self.isPlacemarkElement = 1
self.currentPlacemark = Placemark()
else:
if self.isPlacemarkElement == 1:
if name == 'DESCRIPTION':
self.isDescriptionElement = 1
self.description = ""
elif name == 'NAME':
self.isNameElement = 1
self.name = ""
elif name == "POINT":
self.isPointElement = 1
elif name == "LINEARRING":
self.isLinearRingElement = 1
elif name == 'COORDINATES':
if self.isPointElement == 1:
self.isPointCoordsElement = 1
self.point_coordinates = ""
elif self.isLinearRingElement == 1:
self.isLinearRingCoordsElement = 1
self.linear_ring_coordinates = ""
# end startElement
def characters (self, ch):
if self.isPlacemarkElement == 1:
if self.isDescriptionElement == 1:
self.description = ch
elif self.isNameElement == 1:
self.name = ch
elif self.isPointCoordsElement == 1: # only true if it's also a point element
self.point_coordinates = self.point_coordinates + ch
elif self.isLinearRingCoordsElement == 1: # only true if it's also a linear ring element
self.linear_ring_coordinates = self.linear_ring_coordinates + ch
else:
pass
# end characters
def endElement(self, name):
name = name.upper()
if name == "PLACEMARK":
self.isPlacemarkElement = 0
self.Placemarks.append( self.currentPlacemark )
self.currentPlacemark = None
elif name == "SCHEMA":
self.isSchemaElement = 0
if self.isPlacemarkElement:
if name == 'DESCRIPTION':
self.isDescriptionElement = 0
self.currentPlacemark.description = self.description
self.description = ""
elif name == 'NAME':
self.isNameElement = 0
self.currentPlacemark.name = self.name
self.name = ""
elif name == 'COORDINATES':
if self.isPointCoordsElement == 1:
self.isPointCoordsElement = 0
coords = [ float(x) for x in string.split ( string.lstrip( string.strip( self.point_coordinates)), "," ) ]
self.currentPlacemark.setPoint( coords[1], coords[0] )
self.point_coordinates = ""
elif self.isLinearRingElement == 1:
self.isLinearRingElement = 0
reader = StringIO.StringIO( self.linear_ring_coordinates )
coords = []
for line in reader.readlines():
line = string.strip( line )
if len(line):
coords = [ float(x) for x in string.split ( string.lstrip( line ), "," ) ]
self.currentPlacemark.addLinearRingPoint( coords[1], coords[0] )
self.linear_ring_coordinates = ""
elif name == "POINT":
self.isPointElement = 0
elif name == "LINEARRING":
self.isLinearRingElement = 0
else:
pass
# end endElement
# end KML_Handler
class kmlReader:
""" Class to read and provide access to data in a kml file """
def __init__( self, fname ):
self.filename = fname
self.parser = make_parser()
self.parser.setContentHandler(KML_Handler())
self.parser.parse(fname)
# end __init__
def getCoordinates( self ):
""" Get a vector of Placemark structures from a kml file """
return self.parser.getContentHandler().Placemarks
# end getPlacemarks
# end class kmlReader
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser('')
parser.add_argument('filename')
args = parser.parse_args()
fname = args.filename
reader = kmlReader( fname )
for c in reader.getCoordinates():
#print c.name, c.point.lat, c.point.lon
print c
| flailingsquirrel/asciimapper | KMLParser.py | Python | bsd-3-clause | 8,553 | [
"Brian"
] | 989991c5583b68b79d569303aa220575b724b4050e4a24a9a79e23b5c51d2a28 |
# Copyright (C) 2016 Arvid Fahlström Myrman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import functools
import numpy as np
import theano
def initializer(f):
@functools.wraps(f)
def wrapper(*args, dtype=theano.config.floatX, **kwargs):
def init(shape):
return f(*args, **kwargs, shape=shape).astype(dtype)
return init
return wrapper
@initializer
def constant(c, *, shape):
return np.ones(shape) * c
@initializer
def gaussian(mean, std, *, shape):
return np.random.normal(mean, std, shape)
@initializer
def glorot(c=1, *, shape):
return np.random.uniform(-np.sqrt(6 / (shape[-1] + shape[-2])),
np.sqrt(6 / (shape[-1] + shape[-2])), size=shape) * c
if __name__ == '__main__':
print(gaussian(0, 1, dtype=int)((3, 3)))
| arvidfm/masters-thesis | src/nn/init.py | Python | gpl-2.0 | 1,472 | [
"Gaussian"
] | 16a9bb0cdc3f3d4ac4347316e80131c922975319b6572ee8c009cfb89258583e |
"""
Copyright (c) 2009-2010 Marian Tietz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
import gtk
from gettext import gettext as _
from .. import com
from .. import config
from .output_window import OutputWindow
class WelcomeWindow(OutputWindow):
def __init__(self, *args, **kwargs):
OutputWindow.__init__(self, *args, **kwargs)
self.set_properties(
hscrollbar_policy=gtk.POLICY_AUTOMATIC,
vscrollbar_policy = gtk.POLICY_NEVER )
self.remove(self.textview)
self.table = gtk.Table(rows = 2, columns = 2)
self.table.set_homogeneous(False)
self.table.set_property("border-width", 12)
try:
self.pixbuf = gtk.icon_theme_get_default().load_icon("tekka",128,0)
except:
self.pixbuf = None
self.image = gtk.image_new_from_pixbuf(self.pixbuf)
# Create Header label
self.label = gtk.Label()
self.label.set_property("yalign", 1)
self.label.set_property("xalign", 0.05)
self.label.set_markup(
_("<big><b>Welcome to tekka!</b></big>"))
# Add Image to table
self.ibox = gtk.EventBox()
self.ibox.add(self.image)
self.table.attach(self.ibox, 0, 1, 0, 2,
xoptions=gtk.FILL|gtk.SHRINK,
yoptions = gtk.FILL|gtk.EXPAND)
# Add Label to table
self.lbox = gtk.EventBox()
self.lbox.add(self.label)
self.table.attach(self.lbox, 1, 2, 0, 1,
xoptions=gtk.FILL|gtk.EXPAND,
yoptions=gtk.FILL|gtk.EXPAND)
# Create Description label
self.descr = gtk.Label()
self.descr.set_properties(
xalign=0.05,
yalign=0.2,
selectable=True,
use_markup=True,
width_chars=30,
wrap=True)
# Add Description to table
self.dbox = gtk.EventBox()
self.dbox.add(self.descr)
self.table.attach(self.dbox, 1, 2, 1, 2,
xoptions=gtk.FILL|gtk.EXPAND,
yoptions=gtk.FILL|gtk.EXPAND)
def mod_bg(w, c):
# for debugging purposes
if False:
s = w.get_style().copy()
s.bg[gtk.STATE_NORMAL] = c
w.set_style(s)
mod_bg(self.lbox, gtk.gdk.Color("#FF0000"))
mod_bg(self.dbox, gtk.gdk.Color("#00FF00"))
mod_bg(self.ibox, gtk.gdk.Color("#0000FF"))
self.add_with_viewport(self.table)
if com.sushi.connected:
self.sushi_connected_cb(com.sushi)
else:
self.sushi_disconnected_cb(com.sushi)
com.sushi.g_connect("maki-connected", self.sushi_connected_cb)
com.sushi.g_connect("maki-disconnected", self.sushi_disconnected_cb)
def sushi_connected_cb(self, sushi):
s = _("You are connected to maki. The next step "
"is to connect to a server via the server "
"dialog in the tekka menu.")
self.descr.set_markup(s)
def sushi_disconnected_cb(self, sushi):
s = _("You are not connected to maki. "
"Without maki you can not connect to "
"servers or write messages.\n\n"
"If you are having problems running maki "
"visit http://sushi.ikkoku.de/ and look whether there is "
"a solution for your problem. Otherwise, feel free "
"to ask for support.")
self.descr.set_markup(s)
| sushi-irc/tekka | tekka/lib/welcome_window.py | Python | bsd-2-clause | 4,102 | [
"VisIt"
] | efec680b8f81950d33358cc7cc46b622415b14bd13b79a2664a9ab76d0ed84ca |
"""File I/O related routines."""
# Copyright (C) 2011 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from io import StringIO
import numpy as np
import yaml
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from phonopy.cui.settings import fracval
from phonopy.harmonic.force_constants import similarity_transformation
from phonopy.structure.atoms import PhonopyAtoms
from phonopy.structure.dataset import get_displacements_and_forces
from phonopy.structure.symmetry import Symmetry, elaborate_borns_and_epsilon
#
# FORCE_SETS
#
def write_FORCE_SETS(dataset, filename="FORCE_SETS"):
"""Write FORCE_SETS from dataset.
See more detail in ``get_FORCE_SETS_lines``.
"""
lines = get_FORCE_SETS_lines(dataset)
with open(filename, "w") as w:
w.write("\n".join(lines))
w.write("\n")
def get_FORCE_SETS_lines(dataset, forces=None):
"""Generate FORCE_SETS string.
See the format of dataset in the docstring of
Phonopy.dataset. Optionally, sets of forces of supercells
can be given. In this case, these forces are unnecessary to be stored
in the dataset.
"""
if "first_atoms" in dataset:
return _get_FORCE_SETS_lines_type1(dataset, forces=forces)
elif "displacements" in dataset:
if forces is not None:
dataset["forces"] = forces
return _get_FORCE_SETS_lines_type2(dataset)
def _get_FORCE_SETS_lines_type1(dataset, forces=None):
num_atom = dataset["natom"]
displacements = dataset["first_atoms"]
if forces is None:
_forces = [x["forces"] for x in dataset["first_atoms"]]
else:
_forces = forces
lines = []
lines.append("%-5d" % num_atom)
lines.append("%-5d" % len(displacements))
for count, disp in enumerate(displacements):
lines.append("")
lines.append("%-5d" % (disp["number"] + 1))
lines.append("%20.16f %20.16f %20.16f" % tuple(disp["displacement"]))
for f in _forces[count]:
lines.append("%15.10f %15.10f %15.10f" % tuple(f))
return lines
def _get_FORCE_SETS_lines_type2(dataset):
lines = []
for displacements, forces in zip(dataset["displacements"], dataset["forces"]):
for d, f in zip(displacements, forces):
lines.append(("%15.8f" * 6) % (tuple(d) + tuple(f)))
return lines
def parse_FORCE_SETS(natom=None, filename="FORCE_SETS", to_type2=False):
"""Parse FORCE_SETS from file.
to_type2 : bool
dataset of type2 is returned when True.
Returns
-------
dataset : dict
Displacement dataset. See Phonopy.dataset.
"""
with open(filename, "r") as f:
return _get_dataset(
f,
natom=natom,
to_type2=to_type2,
)
def parse_FORCE_SETS_from_strings(strings, natom=None, to_type2=False):
"""Parse FORCE_SETS from strings."""
return _get_dataset(StringIO(strings), natom=natom, to_type2=to_type2)
def _get_dataset(f, natom=None, to_type2=False):
first_line_ary = _get_line_ignore_blank(f).split()
f.seek(0)
if len(first_line_ary) == 1:
if natom is None or int(first_line_ary[0]) == natom:
dataset = _get_dataset_type1(f)
else:
msg = "Number of forces is not consistent with supercell setting."
raise RuntimeError(msg)
if to_type2:
disps, forces = get_displacements_and_forces(dataset)
return {"displacements": disps, "forces": forces}
else:
return dataset
elif len(first_line_ary) == 6:
return get_dataset_type2(f, natom)
def _get_dataset_type1(f):
set_of_forces = []
num_atom = int(_get_line_ignore_blank(f))
num_displacements = int(_get_line_ignore_blank(f))
for i in range(num_displacements):
line = _get_line_ignore_blank(f)
atom_number = int(line)
line = _get_line_ignore_blank(f).split()
displacement = np.array([float(x) for x in line])
forces_tmp = []
for j in range(num_atom):
line = _get_line_ignore_blank(f).split()
forces_tmp.append(np.array([float(x) for x in line]))
forces_tmp = np.array(forces_tmp, dtype="double")
forces = {
"number": atom_number - 1,
"displacement": displacement,
"forces": forces_tmp,
}
set_of_forces.append(forces)
dataset = {"natom": num_atom, "first_atoms": set_of_forces}
return dataset
def get_dataset_type2(f, natom):
"""Parse type2 FORCE_SETS text and return dataset."""
data = np.loadtxt(f, dtype="double")
if data.shape[1] != 6 or (natom and data.shape[0] % natom != 0):
msg = "Data shape of forces and displacements is incorrect."
raise RuntimeError(msg)
if natom:
data = data.reshape(-1, natom, 6)
displacements = data[:, :, :3]
forces = data[:, :, 3:]
else:
displacements = data[:, :3]
forces = data[:, 3:]
dataset = {
"displacements": np.array(displacements, dtype="double", order="C"),
"forces": np.array(forces, dtype="double", order="C"),
}
return dataset
def _get_line_ignore_blank(f):
line = f.readline().strip()
if line == "":
line = _get_line_ignore_blank(f)
return line
def collect_forces(f, num_atom, hook, force_pos, word=None):
"""General function to collect forces from lines of a text file.
Parameters
----------
f :
Text file pointer such as that returned by ``open(filename)``.
num_atom : int
Number of atoms in cell. Quit parsing when number of forces reaches this
number.
hook : str
When this word is found at a line, parsing will start from the next line.
force_pos : list
Positions of force values in `line.split()`.
word : str, optional
Lines containing this word is only parsed. Default is None.
Example
-------
The following is the abinit output.
...
cartesian forces (hartree/bohr) at end:
1 -0.00093686935947 -0.00000000000000 -0.00000000000000
2 0.00015427277409 -0.00000000000000 -0.00000000000000
3 -0.00000200377550 -0.00000000000000 -0.00000000000000
4 0.00000619017547 -0.00000000000000 -0.00000000000000
...
hook = "cartesian forces (eV/Angstrom)"
force_pos = [1, 2, 3]
"""
for line in f:
if hook in line:
break
forces = []
for line in f:
if line.strip() == "":
continue
if word is not None:
if word not in line:
continue
elems = line.split()
if len(elems) > force_pos[2]:
try:
forces.append([float(elems[i]) for i in force_pos])
except ValueError:
forces = []
break
else:
return False
if len(forces) == num_atom:
break
return forces
def iter_collect_forces(filename, num_atom, hook, force_pos, word=None, max_iter=1000):
"""Repeat ``collect_forces`` to get the last set of forces in the file.
Details of parameters are explained in ``collect_forces``.
"""
with open(filename) as f:
forces = []
prev_forces = []
for i in range(max_iter):
forces = collect_forces(f, num_atom, hook, force_pos, word=word)
if not forces:
forces = prev_forces[:]
break
else:
prev_forces = forces[:]
if i == max_iter - 1:
sys.stderr.write("Reached to max number of iterations (%d).\n" % max_iter)
return forces
#
# FORCE_CONSTANTS, force_constants.hdf5
#
def write_FORCE_CONSTANTS(force_constants, filename="FORCE_CONSTANTS", p2s_map=None):
"""Write force constants in text file format.
Parameters
----------
force_constants: ndarray
Force constants
shape=(n_satom,n_satom,3,3) or (n_patom,n_satom,3,3)
dtype=double
filename: str
Filename to be saved
p2s_map: ndarray
Primitive atom indices in supercell index system
dtype=intc
"""
lines = get_FORCE_CONSTANTS_lines(force_constants, p2s_map=p2s_map)
with open(filename, "w") as w:
w.write("\n".join(lines))
def get_FORCE_CONSTANTS_lines(force_constants, p2s_map=None):
"""Return text in FORCE_CONSTANTS format.
See also ``write_FORCE_CONSTANTS``.
"""
if p2s_map is not None and len(p2s_map) == force_constants.shape[0]:
indices = p2s_map
else:
indices = np.arange(force_constants.shape[0], dtype="intc")
lines = []
fc_shape = force_constants.shape
lines.append("%4d %4d" % fc_shape[:2])
for i, s_i in enumerate(indices):
for j in range(fc_shape[1]):
lines.append("%d %d" % (s_i + 1, j + 1))
for vec in force_constants[i][j]:
lines.append(("%22.15f" * 3) % tuple(vec))
return lines
def write_force_constants_to_hdf5(
force_constants,
filename="force_constants.hdf5",
p2s_map=None,
physical_unit=None,
compression=None,
):
"""Write force constants in hdf5 format.
Parameters
----------
force_constants: ndarray
Force constants
shape=(n_satom,n_satom,3,3) or (n_patom,n_satom,3,3)
dtype=double
filename: str
Filename to be saved
p2s_map: ndarray
Primitive atom indices in supercell index system
shape=(n_patom,)
dtype=intc
physical_unit : str, optional
Physical unit used for force contants. Default is None.
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None.
"""
try:
import h5py
except ImportError:
raise ModuleNotFoundError("You need to install python-h5py.")
with h5py.File(filename, "w") as w:
w.create_dataset(
"force_constants", data=force_constants, compression=compression
)
if p2s_map is not None:
w.create_dataset("p2s_map", data=p2s_map)
if physical_unit is not None:
dset = w.create_dataset(
"physical_unit", (1,), dtype="S%d" % len(physical_unit)
)
dset[0] = np.string_(physical_unit)
def parse_FORCE_CONSTANTS(filename="FORCE_CONSTANTS", p2s_map=None):
"""Parse FORCE_CONSTANTS.
Parameters
----------
filename : str, optional
Filename.
p2s_map : ndarray, optional
Primitive.p2s_map. Supplied, this is used to check file format consistency.
"""
with open(filename) as fcfile:
idx1 = []
line = fcfile.readline()
idx = [int(x) for x in line.split()]
if len(idx) == 1:
idx = [idx[0], idx[0]]
force_constants = np.zeros((idx[0], idx[1], 3, 3), dtype="double")
for i in range(idx[0]):
for j in range(idx[1]):
s_i = int(fcfile.readline().split()[0]) - 1
if s_i not in idx1:
idx1.append(s_i)
tensor = []
for k in range(3):
tensor.append([float(x) for x in fcfile.readline().split()])
force_constants[i, j] = tensor
check_force_constants_indices(idx, idx1, p2s_map, filename)
return force_constants
def read_force_constants_hdf5(
filename="force_constants.hdf5", p2s_map=None, return_physical_unit=False
):
"""Parse force_constants.hdf5.
Parameters
----------
filename : str, optional
Filename.
p2s_map : ndarray, optional
Primitive.p2s_map. Supplied, this is used to check file format consistency.
return_physical_unit : bool, optional
When True and physical_unit is in file, physical unit is returned.
Default is False.
"""
try:
import h5py
except ImportError:
raise ModuleNotFoundError("You need to install python-h5py.")
with h5py.File(filename, "r") as f:
if "fc2" in f:
key = "fc2"
elif "force_constants" in f:
key = "force_constants"
else:
raise RuntimeError("%s doesn't contain necessary information" % filename)
fc = f[key][:]
if "p2s_map" in f:
p2s_map_in_file = f["p2s_map"][:]
check_force_constants_indices(
fc.shape[:2], p2s_map_in_file, p2s_map, filename
)
if return_physical_unit:
if "physical_unit" in f:
physical_unit = f["physical_unit"][0].decode("utf-8")
else:
physical_unit = None
return fc, physical_unit
else:
return fc
def check_force_constants_indices(shape, indices, p2s_map, filename):
"""Check consistency of force constants data type."""
if shape[0] != shape[1] and p2s_map is not None:
if len(p2s_map) != len(indices) or (p2s_map != indices).any():
text = (
"%s file is inconsistent with the calculation setting. "
"PRIMITIVE_AXIS may not be set correctly."
) % filename
raise RuntimeError(text)
def parse_disp_yaml(filename="disp.yaml", return_cell=False):
"""Read disp.yaml or phonopy_disp.yaml.
This method was originally made for parsing disp.yaml. Later this
started to work for phonopy_disp.yaml, too. But now this method is not
allowed to read phonopy_disp.yaml because of existance of PhonopyYaml
class.
"""
with open(filename) as f:
new_dataset = {}
dataset = yaml.load(f, Loader=Loader)
if "phonopy" in dataset and "calculator" in dataset["phonopy"]:
new_dataset["calculator"] = dataset["phonopy"]["calculator"]
if "natom" in dataset:
natom = dataset["natom"]
elif "supercell" and "points" in dataset["supercell"]:
natom = len(dataset["supercell"]["points"])
else:
raise RuntimeError("%s doesn't contain necessary information.")
new_dataset["natom"] = natom
new_first_atoms = []
try:
displacements = dataset["displacements"]
except KeyError:
raise
if type(displacements[0]) is dict:
for first_atoms in displacements:
first_atoms["atom"] -= 1
atom1 = first_atoms["atom"]
disp1 = first_atoms["displacement"]
new_first_atoms.append({"number": atom1, "displacement": disp1})
new_dataset["first_atoms"] = new_first_atoms
if return_cell:
cell = get_cell_from_disp_yaml(dataset)
return new_dataset, cell
else:
return new_dataset
def write_disp_yaml_from_dataset(dataset, supercell, filename="disp.yaml"):
"""Write disp.yaml from dataset.
This function is obsolete, because disp.yaml is obsolete.
"""
displacements = [
(d["number"],) + tuple(d["displacement"]) for d in dataset["first_atoms"]
]
write_disp_yaml(displacements, supercell, filename=filename)
def write_disp_yaml(displacements, supercell, filename="disp.yaml"):
"""Write disp.yaml from displacements.
This function is obsolete, because disp.yaml is obsolete.
"""
lines = []
lines.append("natom: %4d" % supercell.get_number_of_atoms())
lines += _get_disp_yaml_lines(displacements, supercell)
lines.append(str(supercell))
with open(filename, "w") as w:
w.write("\n".join(lines))
def _get_disp_yaml_lines(displacements, supercell):
lines = []
lines.append("displacements:")
for i, disp in enumerate(displacements):
lines.append("- atom: %4d" % (disp[0] + 1))
lines.append(" displacement:")
lines.append(" [ %20.16f,%20.16f,%20.16f ]" % tuple(disp[1:4]))
return lines
#
# DISP (old phonopy displacement format)
#
def parse_DISP(filename="DISP"):
"""Parse DISP file.
This function is obsolete, because DISP is obsolete.
"""
with open(filename) as disp:
displacements = []
for line in disp:
if line.strip() != "":
a = line.split()
displacements.append(
[int(a[0]) - 1, float(a[1]), float(a[2]), float(a[3])]
)
return displacements
#
# Parse supercell in disp.yaml
#
def get_cell_from_disp_yaml(dataset):
"""Read cell from disp.yaml like file."""
if "lattice" in dataset:
lattice = dataset["lattice"]
if "points" in dataset:
data_key = "points"
pos_key = "coordinates"
elif "atoms" in dataset:
data_key = "atoms"
pos_key = "position"
else:
data_key = None
pos_key = None
try:
positions = [x[pos_key] for x in dataset[data_key]]
except KeyError:
msg = (
'"disp.yaml" format is too old. '
'Please re-create it as "phonopy_disp.yaml" to contain '
"supercell crystal structure information."
)
raise RuntimeError(msg)
symbols = [x["symbol"] for x in dataset[data_key]]
cell = PhonopyAtoms(
cell=lattice, scaled_positions=positions, symbols=symbols, pbc=True
)
return cell
else:
return get_cell_from_disp_yaml(dataset["supercell"])
#
# QPOINTS
#
def parse_QPOINTS(filename="QPOINTS"):
"""Read QPOINTS file."""
with open(filename, "r") as f:
num_qpoints = int(f.readline().strip())
qpoints = []
for i in range(num_qpoints):
qpoints.append([fracval(x) for x in f.readline().strip().split()])
return np.array(qpoints)
#
# BORN
#
def write_BORN(primitive, borns, epsilon, filename="BORN"):
"""Write BORN from NAC paramters."""
lines = get_BORN_lines(primitive, borns, epsilon)
with open(filename, "w") as w:
w.write("\n".join(lines))
def get_BORN_lines(
unitcell,
borns,
epsilon,
factor=None,
primitive_matrix=None,
supercell_matrix=None,
symprec=1e-5,
):
"""Generate text of BORN file."""
borns, epsilon, atom_indices = elaborate_borns_and_epsilon(
unitcell,
borns,
epsilon,
symmetrize_tensors=True,
primitive_matrix=primitive_matrix,
supercell_matrix=supercell_matrix,
symprec=symprec,
)
text = "# epsilon and Z* of atoms "
text += " ".join(["%d" % n for n in atom_indices + 1])
lines = [
text,
]
lines.append(("%13.8f " * 9) % tuple(epsilon.flatten()))
for z in borns:
lines.append(("%13.8f " * 9) % tuple(z.flatten()))
return lines
def parse_BORN(primitive, symprec=1e-5, is_symmetry=True, filename="BORN"):
"""Parse BORN file.
Parameters
----------
primitive : Primitive
Primitive cell.
symprec : float, optional
Symmetry tolerance. Default is 1e-5.
is_symmetry : bool, optional
When True, parse values are symmetrized. Default is True.
filename : str, optional
Filename.
"""
with open(filename, "r") as f:
return _parse_BORN_from_file_object(f, primitive, symprec, is_symmetry)
def parse_BORN_from_strings(strings, primitive, symprec=1e-5, is_symmetry=True):
"""Parse BORN file text.
See `parse_BORN` for parameters.
"""
f = StringIO(strings)
return _parse_BORN_from_file_object(f, primitive, symprec, is_symmetry)
def _parse_BORN_from_file_object(f, primitive, symprec, is_symmetry):
symmetry = Symmetry(primitive, symprec=symprec, is_symmetry=is_symmetry)
return get_born_parameters(f, primitive, symmetry)
def get_born_parameters(f, primitive, prim_symmetry):
"""Parse BORN file text.
Parameters
----------
f :
File pointer of BORN file.
primitive : Primitive
Primitive cell.
prim_symmetry : Symmetry
Symmetry of primitive cell.
"""
line_arr = f.readline().split()
if len(line_arr) < 1:
print("BORN file format of line 1 is incorrect")
return None
factor = None
G_cutoff = None
Lambda = None
if len(line_arr) > 0:
try:
factor = float(line_arr[0])
except (ValueError, TypeError):
factor = None
if len(line_arr) > 1:
try:
G_cutoff = float(line_arr[1])
except (ValueError, TypeError):
G_cutoff = None
if len(line_arr) > 2:
try:
Lambda = float(line_arr[2])
except (ValueError, TypeError):
Lambda = None
# Read dielectric constant
line = f.readline().split()
if not len(line) == 9:
print("BORN file format of line 2 is incorrect")
return None
dielectric = np.reshape([float(x) for x in line], (3, 3))
# Read Born effective charge
independent_atoms = prim_symmetry.get_independent_atoms()
borns = np.zeros((primitive.get_number_of_atoms(), 3, 3), dtype="double", order="C")
for i in independent_atoms:
line = f.readline().split()
if len(line) == 0:
print("Number of lines for Born effect charge is not enough.")
return None
if not len(line) == 9:
print("BORN file format of line %d is incorrect" % (i + 3))
return None
borns[i] = np.reshape([float(x) for x in line], (3, 3))
# Check that the number of atoms in the BORN file was correct
line = f.readline().split()
if len(line) > 0:
print(
"Too many atoms in the BORN file (it should only contain "
"symmetry-independent atoms)"
)
return None
_expand_borns(borns, primitive, prim_symmetry)
non_anal = {"born": borns, "factor": factor, "dielectric": dielectric}
if G_cutoff is not None:
non_anal["G_cutoff"] = G_cutoff
if Lambda is not None:
non_anal["Lambda"] = Lambda
return non_anal
def _expand_borns(borns, primitive: PhonopyAtoms, prim_symmetry: Symmetry):
# Expand Born effective charges to all atoms in the primitive cell
rotations = prim_symmetry.symmetry_operations["rotations"]
map_operations = prim_symmetry.get_map_operations()
map_atoms = prim_symmetry.get_map_atoms()
for i in range(len(primitive)):
# R_cart = L R L^-1
rot_cartesian = similarity_transformation(
primitive.cell.T, rotations[map_operations[i]]
)
# R_cart^T B R_cart^-T (inverse rotation is required to transform)
borns[i] = similarity_transformation(rot_cartesian.T, borns[map_atoms[i]])
#
# phonopy.yaml
#
def is_file_phonopy_yaml(filename, keyword="phonopy"):
"""Check whether the file is phonopy.yaml like file or not.
Parameters
----------
filename : str
Filename.
keyword : str
When this keyword is found in dict keys returned by yaml loader,
this function return True.
Example
-------
The initial part of phonopy_disp.yaml is like below.
phonopy:
version: 2.7.0
frequency_unit_conversion_factor: 15.633302
symmetry_tolerance: 1.00000e-05
configuration:
cell_filename: "POSCAR-unitcell"
create_displacements: ".true."
primitive_axes: "auto"
dim: "2 2 2"
...
"""
with open(filename, "r") as f:
try:
data = yaml.load(f, Loader=Loader)
if data is None:
return False
if keyword in data:
return True
else:
return False
except yaml.YAMLError:
return False
#
# e-v.dat, thermal_properties.yaml
#
def read_thermal_properties_yaml(filenames):
"""Read thermal_properties.yaml."""
thermal_properties = []
num_modes = []
num_integrated_modes = []
for filename in filenames:
with open(filename) as f:
tp_yaml = yaml.load(f, Loader=Loader)
thermal_properties.append(tp_yaml["thermal_properties"])
if "num_modes" in tp_yaml and "num_integrated_modes" in tp_yaml:
num_modes.append(tp_yaml["num_modes"])
num_integrated_modes.append(tp_yaml["num_integrated_modes"])
temperatures = [v["temperature"] for v in thermal_properties[0]]
temp = []
cv = []
entropy = []
fe_phonon = []
for i, tp in enumerate(thermal_properties):
temp.append([v["temperature"] for v in tp])
if not np.allclose(temperatures, temp):
msg = [
"",
]
msg.append("Check your input files")
msg.append("Disagreement of temperature range or step")
for t, fname in zip(temp, filenames):
msg.append(
"%s: Range [ %d, %d ], Step %f"
% (fname, int(t[0]), int(t[-1]), t[1] - t[0])
)
msg.append("")
msg.append("Stop phonopy-qha")
raise RuntimeError(msg)
cv.append([v["heat_capacity"] for v in tp])
entropy.append([v["entropy"] for v in tp])
fe_phonon.append([v["free_energy"] for v in tp])
# shape=(temperatures, volumes)
cv = np.array(cv).T
entropy = np.array(entropy).T
fe_phonon = np.array(fe_phonon).T
return (temperatures, cv, entropy, fe_phonon, num_modes, num_integrated_modes)
def read_v_e(filename):
"""Read v-e.dat file."""
data = _parse_QHA_data(filename)
if data.shape[1] != 2:
msg = "File format of %s is incorrect for reading e-v data." % filename
raise RuntimeError(msg)
volumes, electronic_energies = data.T
return volumes, electronic_energies
def read_efe(filename):
"""Read fe-v.dat (efe) file."""
data = _parse_QHA_data(filename)
temperatures = data[:, 0]
free_energies = data[:, 1:]
return temperatures, free_energies
def _parse_QHA_data(filename):
data = []
with open(filename) as f:
for line in f:
if line.strip() == "" or line.strip()[0] == "#":
continue
if "#" in line:
data.append([float(x) for x in line.split("#")[0].split()])
else:
data.append([float(x) for x in line.split()])
return np.array(data)
| atztogo/phonopy | phonopy/file_IO.py | Python | bsd-3-clause | 28,150 | [
"ABINIT",
"CRYSTAL",
"phonopy"
] | 6b9108fc29f70f3b0e17d5a22136def96dbfc44859cc270992713c177df70c56 |
#===============================================================================
#
# CUDATemplates.py
#
# This file is part of ANNarchy.
#
# Copyright (C) 2016-2018 Julien Vitay <julien.vitay@gmail.com>,
# Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ANNarchy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#===============================================================================
population_header = """/*
* ANNarchy-version: %(annarchy_version)s
*/
#pragma once
#include "ANNarchy.h"
// host defines
extern %(float_prec)s dt;
extern long int t;
// RNG - defined in ANNarchy.cu
extern unsigned long long global_seed;
extern void init_curand_states( int N, curandState* states, unsigned long long seed );
%(include_additional)s
%(include_profile)s
%(extern_global_operations)s
%(struct_additional)s
///////////////////////////////////////////////////////////////
// Main Structure for the population of id %(id)s (%(name)s)
///////////////////////////////////////////////////////////////
struct PopStruct%(id)s{
int size; // Number of neurons
bool _active; // Allows to shut down the whole population
int max_delay; // Maximum number of steps to store for delayed synaptic transmission
// CUDA launch configuration
cudaStream_t stream;
unsigned int _nb_blocks;
unsigned int _threads_per_block;
// Access functions used by cython wrapper
int get_size() { return size; }
void set_size(int s) { size = s; }
int get_max_delay() { return max_delay; }
void set_max_delay(int d) { max_delay = d; }
bool is_active() { return _active; }
void set_active(bool val) { _active = val; }
%(declare_spike_arrays)s
// Neuron specific parameters and variables
%(declare_parameters_variables)s
%(declare_delay)s
%(declare_FR)s
%(declare_additional)s
// Profiling
%(declare_profile)s
// Access methods to the parameters and variables
%(access_parameters_variables)s
%(access_additional)s
// Method called to initialize the data structures
void init_population() {
#ifdef _DEBUG
std::cout << "PopStruct%(id)s::init_population()" << std::endl;
#endif
_active = true;
//
// Launch configuration
_threads_per_block = 128;
_nb_blocks = static_cast<unsigned int>(ceil( static_cast<double>(size) / static_cast<double>(_threads_per_block) ) );
_nb_blocks = std::min<unsigned int>(_nb_blocks, 65535);
//
// Model equations/parameters
%(init_parameters_variables)s
%(init_spike)s
%(init_delay)s
%(init_FR)s
%(init_additional)s
%(init_profile)s
}
// Method called to reset the population
void reset() {
%(reset_spike)s
%(reset_delay)s
%(reset_additional)s
%(reset_read_flags)s
}
// Method to draw new random numbers
void update_rng() {
%(update_rng)s
}
// Method to enqueue output variables in case outgoing projections have non-zero delay
void update_delay() {
%(update_delay)s
}
// Method to dynamically change the size of the queue for delayed variables
void update_max_delay(int value) {
%(update_max_delay)s
}
// Main method to update neural variables
void update() {
%(update_variables)s
}
// Mean-firing rate computed on host
void update_FR() {
%(update_FR)s
}
// Stop condition
%(stop_condition)s
// Memory transfers
void host_to_device() {
%(host_to_device)s
}
void device_to_host() {
%(device_to_host)s
}
// Memory Management: track memory consumption
long int size_in_bytes() {
long int size_in_bytes = 0;
%(determine_size)s
return size_in_bytes;
}
// Memory Management: clear container
void clear() {
%(clear_container)s
}
};
"""
# c like definition of neuron attributes, where 'local' is used if values can vary across
# neurons, consequently 'global' is used if values are common to all neurons.Currently two
# types of sets are defined: openmp and cuda. In cuda case additional 'dirty' flags are
# created.
#
# Parameters:
#
# type: data type of the variable (double, float, int ...)
# name: name of the variable
# attr_type: either 'variable' or 'parameter'
#
attribute_decl = {
'local': """
// Local attribute %(name)s
std::vector< %(type)s > %(name)s;
%(type)s *gpu_%(name)s;
long int %(name)s_device_to_host;
bool %(name)s_host_to_device;
""",
'global': {
'parameter': """
// Global parameter %(name)s
%(type)s %(name)s;
""",
'variable': """
// Global variable %(name)s
%(type)s %(name)s;
%(type)s *gpu_%(name)s;
long int %(name)s_device_to_host;
bool %(name)s_host_to_device;
"""
}
}
# c like definition of accessors for neuron attributes, whereas 'local' is used if values can vary
# across neurons, consequently 'global' is used if values are common to all neurons. Currently two
# types of sets are defined: openmp and cuda. In cuda case additional 'dirty' flags are created for
# each variable.
#
# Parameters:
#
# type: data type of the variable (double, float, int ...)
# name: name of the variable
# attr_type: either 'variable' or 'parameter'
#
attribute_acc = {
'local_get_all': """
// Local %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(read_dirty_flag)s
return %(name)s;
}
""",
'local_get_single': """
// Local %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(read_dirty_flag)s
return %(name)s[rk];
}
""",
'local_set_all': """
// Local %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(name)s = value;
%(write_dirty_flag)s
return;
}
""",
'local_set_single': """
// Local %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(name)s[rk] = value;
%(write_dirty_flag)s
return;
}
""",
'global_get': """
// Global %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(read_dirty_flag)s
return %(name)s;
}
""",
'global_set': """
// Global %(attr_type)s %(name)s
if ( name.compare("%(name)s") == 0 ) {
%(name)s = value;
%(write_dirty_flag)s
return;
}
"""
}
attribute_template = {
'local': """
std::vector<%(ctype)s> get_local_attribute_all_%(ctype_name)s(std::string name) {
%(local_get1)s
// should not happen
std::cerr << "PopStruct%(id)s::get_local_attribute_all_%(ctype_name)s: " << name << " not found" << std::endl;
return std::vector<%(ctype)s>();
}
%(ctype)s get_local_attribute_%(ctype_name)s(std::string name, int rk) {
%(local_get2)s
// should not happen
std::cerr << "PopStruct%(id)s::get_local_attribute_%(ctype_name)s: " << name << " not found" << std::endl;
return static_cast<%(ctype)s>(0.0);
}
void set_local_attribute_all_%(ctype_name)s(std::string name, std::vector<%(ctype)s> value) {
%(local_set1)s
// should not happen
std::cerr << "PopStruct%(id)s::set_local_attribute_all_%(ctype_name)s: " << name << " not found" << std::endl;
}
void set_local_attribute_%(ctype_name)s(std::string name, int rk, %(ctype)s value) {
%(local_set2)s
// should not happen
std::cerr << "PopStruct%(id)s::set_local_attribute_%(ctype_name)s: " << name << " not found" << std::endl;
}
""",
'global': """
%(ctype)s get_global_attribute_%(ctype_name)s(std::string name) {
%(global_get)s
// should not happen
std::cerr << "PopStruct%(id)s::get_global_attribute_%(ctype_name)s: " << name << " not found" << std::endl;
return static_cast<%(ctype)s>(0.0);
}
void set_global_attribute_%(ctype_name)s(std::string name, %(ctype)s value) {
%(global_set)s
std::cerr << "PopStruct%(id)s::set_global_attribute_%(ctype_name)s: " << name << " not found" << std::endl;
}
"""
}
# Initialization of parameters due to the init_population method.
#
# Parameters:
#
# name: name of the variable
# init: initial value
attribute_cpp_init = {
'local': """
// Local %(attr_type)s %(name)s
%(name)s = std::vector<%(type)s>(size, %(init)s);
cudaMalloc(&gpu_%(name)s, size * sizeof(%(type)s));
cudaMemcpy(gpu_%(name)s, %(name)s.data(), size * sizeof(%(type)s), cudaMemcpyHostToDevice);
#ifdef _DEBUG
cudaError_t err_%(name)s = cudaGetLastError();
if ( err_%(name)s != cudaSuccess )
std::cout << " allocation of %(name)s failed: " << cudaGetErrorString(err_%(name)s) << std::endl;
#endif
// memory transfer flags
%(name)s_host_to_device = false;
%(name)s_device_to_host = t;
""",
'global': {
'parameter': """
// Global parameter %(name)s
%(name)s = 0.0;
""",
'variable': """
// Global variable %(name)s
%(name)s = %(init)s;
cudaMalloc(&gpu_%(name)s, sizeof(%(type)s));
cudaMemcpy(gpu_%(name)s, &%(name)s, sizeof(%(type)s), cudaMemcpyHostToDevice);
#ifdef _DEBUG
cudaError_t err_%(name)s = cudaGetLastError();
if ( err_%(name)s != cudaSuccess )
std::cout << " allocation of %(name)s failed: " << cudaGetErrorString(err_%(name)s) << std::endl;
#endif
"""
}
}
# We need to initialize the queue directly with the
# values (init) as the data arrays for the variables are
# only updated in front of a simulate call.
attribute_delayed = {
'local': {
'declare': """
std::deque< %(type)s* > gpu_delayed_%(var)s; // list of gpu arrays""",
'init': """
gpu_delayed_%(name)s = std::deque< %(type)s* >(max_delay, NULL);
std::vector<%(type)s> tmp_%(name)s = std::vector<%(type)s>( size, 0.0);
for ( int i = 0; i < max_delay; i++ ) {
cudaMalloc( (void**)& gpu_delayed_%(name)s[i], sizeof(%(type)s) * size);
cudaMemcpy(gpu_delayed_%(name)s[i], tmp_%(name)s.data(), size * sizeof(%(type)s), cudaMemcpyHostToDevice);
}
#ifdef _DEBUG
cudaError_t err_delay_%(name)s = cudaGetLastError();
if (err_delay_%(name)s != cudaSuccess)
std::cout << "pop%(id)s - init delay %(name)s: " << cudaGetErrorString(err_delay_%(name)s) << std::endl;
#endif
""",
'clear': """
for ( int i = 0; i < max_delay; i++ )
cudaFree( gpu_delayed_%(name)s[i] );
gpu_delayed_%(name)s.clear();
gpu_delayed_%(name)s.shrink_to_fit();
""",
'update': """
%(type)s* last_%(name)s = gpu_delayed_%(name)s.back();
gpu_delayed_%(name)s.pop_back();
gpu_delayed_%(name)s.push_front(last_%(name)s);
cudaMemcpy( last_%(name)s, gpu_%(name)s, sizeof(%(type)s) * size, cudaMemcpyDeviceToDevice );
#ifdef _DEBUG
cudaError_t err_delay_%(name)s = cudaGetLastError();
if (err_delay_%(name)s != cudaSuccess)
std::cout << "pop%(id)s - delay %(name)s: " << cudaGetErrorString(err_delay_%(name)s) << std::endl;
#endif
""",
# Implementation notice:
# to ensure correctness of results, we need transfer from host here. The corresponding
# gpu arrays gpu_%(name)s are not resetted at this point of time (they will be resetted
# if simulate() invoked.
'reset' : """
// reset %(name)s
for ( int i = 0; i < gpu_delayed_%(name)s.size(); i++ ) {
cudaMemcpy( gpu_delayed_%(name)s[i], %(name)s.data(), sizeof(%(type)s) * size, cudaMemcpyHostToDevice );
}
#ifdef _DEBUG
cudaError_t err_delay_%(name)s = cudaGetLastError();
if ( err_delay_%(name)s != cudaSuccess )
std::cout << "pop%(id)s - reset delayed %(name)s failed: " << cudaGetErrorString(err_delay_%(name)s) << std::endl;
#endif
""",
'resize': """
std::cerr << "ProjStruct::update_max_delay() is not implemented for local variables." << std::endl;
"""
},
'global': {
'declare': """
std::deque< %(type)s* > gpu_delayed_%(var)s; // list of gpu arrays""",
'init': """
gpu_delayed_%(name)s = std::deque< %(type)s* >(max_delay, NULL);
%(type)s tmp_%(name)s = static_cast<%(type)s>(0.0);
for ( int i = 0; i < max_delay; i++ ) {
cudaMalloc( (void**)& gpu_delayed_%(name)s[i], sizeof(%(type)s));
cudaMemcpy( gpu_delayed_%(name)s[i], &tmp_%(name)s, sizeof(%(type)s), cudaMemcpyDeviceToDevice );
}
#ifdef _DEBUG
cudaError_t err_delay_%(name)s = cudaGetLastError();
if (err_delay_%(name)s != cudaSuccess)
std::cout << "pop%(id)s - init delay %(name)s: " << cudaGetErrorString(err_delay_%(name)s) << std::endl;
#endif
""",
'clear': """
for ( int i = 0; i < max_delay; i++ )
cudaFree( gpu_delayed_%(name)s[i] );
gpu_delayed_%(name)s.clear();
gpu_delayed_%(name)s.shrink_to_fit();
""",
'update': """
%(type)s* last_%(name)s = gpu_delayed_%(name)s.back();
gpu_delayed_%(name)s.pop_back();
gpu_delayed_%(name)s.push_front(last_%(name)s);
cudaMemcpy( last_%(name)s, gpu_%(name)s, sizeof(%(type)s), cudaMemcpyDeviceToDevice );
#ifdef _DEBUG
cudaError_t err_delay_%(name)s = cudaGetLastError();
if (err_delay_%(name)s != cudaSuccess)
std::cout << "pop%(id)s - delay %(name)s: " << cudaGetErrorString(err_delay_%(name)s) << std::endl;
#endif
""",
# Implementation notice:
# to ensure correctness of results, we need transfer from host here. The corresponding
# gpu arrays gpu_%(name)s are not resetted at this point of time (they will be resetted
# if simulate() invoked.
'reset' : """
// reset %(name)s
for ( int i = 0; i < gpu_delayed_%(name)s.size(); i++ ) {
cudaMemcpy( gpu_delayed_%(name)s[i], &%(name)s, sizeof(%(type)s), cudaMemcpyHostToDevice );
}
#ifdef _DEBUG
cudaError_t err_delay_%(name)s = cudaGetLastError();
if ( err_delay_%(name)s != cudaSuccess )
std::cout << "pop%(id)s - reset delayed %(name)s failed: " << cudaGetErrorString(err_delay_%(name)s) << std::endl;
#endif
""",
'resize': """
std::cerr << "ProjStruct::update_max_delay() is not implemented for global variables. " << std::endl;
"""
}
}
# Transfer of variables before and after a simulation
#
# Parameters:
attribute_transfer = {
'HtoD_local': """
// %(attr_name)s: local
if( %(attr_name)s_host_to_device )
{
#ifdef _DEBUG
std::cout << "HtoD %(attr_name)s ( pop%(id)s )" << std::endl;
#endif
cudaMemcpy( gpu_%(attr_name)s, %(attr_name)s.data(), size * sizeof(%(type)s), cudaMemcpyHostToDevice);
%(attr_name)s_host_to_device = false;
#ifdef _DEBUG
cudaError_t err_%(attr_name)s = cudaGetLastError();
if ( err_%(attr_name)s != cudaSuccess )
std::cout << " error: " << cudaGetErrorString(err_%(attr_name)s) << std::endl;
#endif
}
""",
'HtoD_global': """
// %(attr_name)s: global
if( %(attr_name)s_host_to_device )
{
#ifdef _DEBUG
std::cout << "HtoD: %(attr_name)s ( pop%(id)s )" << std::endl;
#endif
cudaMemcpy( gpu_%(attr_name)s, &%(attr_name)s, sizeof(%(type)s), cudaMemcpyHostToDevice);
%(attr_name)s_host_to_device = false;
#ifdef _DEBUG
cudaError_t err_%(attr_name)s = cudaGetLastError();
if ( err_%(attr_name)s != cudaSuccess )
std::cout << " error: " << cudaGetErrorString(err_%(attr_name)s) << std::endl;
#endif
}
""",
'DtoH_local':"""
// %(attr_name)s: local
if( %(attr_name)s_device_to_host < t ) {
#ifdef _DEBUG
std::cout << "DtoH: %(attr_name)s ( pop%(id)s )" << std::endl;
#endif
cudaMemcpy( %(attr_name)s.data(), gpu_%(attr_name)s, size * sizeof(%(type)s), cudaMemcpyDeviceToHost);
#ifdef _DEBUG
cudaError_t err_%(attr_name)s = cudaGetLastError();
if ( err_%(attr_name)s != cudaSuccess )
std::cout << " error: " << cudaGetErrorString(err_%(attr_name)s) << std::endl;
#endif
%(attr_name)s_device_to_host = t;
}
""",
'DtoH_global':"""
// %(attr_name)s: global
cudaMemcpy( &%(attr_name)s, gpu_%(attr_name)s, sizeof(%(type)s), cudaMemcpyDeviceToHost);
"""
}
# Definition for the usage of CUDA device random
# number generators
#
# Parameters:
#
# rd_name:
# rd_update:
curand = {
'local': {
'decl': """
curandState* gpu_%(rd_name)s;
""",
'init': """
cudaMalloc((void**)&gpu_%(rd_name)s, size * sizeof(curandState));
init_curand_states( size, gpu_%(rd_name)s, global_seed );
""",
'clear': """
cudaFree(gpu_%(rd_name)s);
""",
},
'global': {
'decl': """
curandState* gpu_%(rd_name)s;
""",
'init': """
cudaMalloc((void**)&gpu_%(rd_name)s, sizeof(curandState));
init_curand_states( 1, gpu_%(rd_name)s, global_seed );
#ifdef _DEBUG
cudaError_t err_%(rd_name)s = cudaGetLastError();
if ( err_%(rd_name)s != cudaSuccess )
std::cout << "pop%(id)s - init_population: " << cudaGetErrorString(err_%(rd_name)s) << std::endl;
#endif
""",
'clear': ""
}
}
spike_specific = {
'spike': {
'declare':"""
// Structures for managing spikes
std::vector<long int> last_spike;
long int* gpu_last_spike;
std::vector<int> spiked;
int* gpu_spiked;
unsigned int spike_count;
unsigned int* gpu_spike_count;
""",
'init': """
// Spiking variables
spiked = std::vector<int>(size, 0);
cudaMalloc((void**)&gpu_spiked, size * sizeof(int));
cudaMemcpy(gpu_spiked, spiked.data(), size * sizeof(int), cudaMemcpyHostToDevice);
last_spike = std::vector<long int>(size, -10000L);
cudaMalloc((void**)&gpu_last_spike, size * sizeof(long int));
cudaMemcpy(gpu_last_spike, last_spike.data(), size * sizeof(long int), cudaMemcpyHostToDevice);
spike_count = 0;
cudaMalloc((void**)&gpu_spike_count, sizeof(unsigned int));
cudaMemcpy(gpu_spike_count, &spike_count, sizeof(unsigned int), cudaMemcpyHostToDevice);
""",
'reset': """
spiked = std::vector<int>(size, 0);
last_spike.clear();
last_spike = std::vector<long int>(size, -10000L);
spike_count = 0;
"""
},
'refractory': {
'declare': """
// Refractory period
std::vector<int> refractory;
int *gpu_refractory;
bool refractory_dirty;
std::vector<int> refractory_remaining;
int *gpu_refractory_remaining;
""",
'init': """
// Refractory period
refractory = std::vector<int>(size, 0);
cudaMalloc((void**)&gpu_refractory, size * sizeof(int));
refractory_remaining = std::vector<int>(size, 0);
cudaMemcpy(gpu_refractory, refractory.data(), size * sizeof(int), cudaMemcpyHostToDevice);
refractory_dirty = false;
cudaMalloc((void**)&gpu_refractory_remaining, size * sizeof(int));
cudaMemcpy(gpu_refractory_remaining, refractory_remaining.data(), size * sizeof(int), cudaMemcpyHostToDevice);
""",
'init_extern': """
// Refractory period
refractory_remaining = std::vector<int>(size, 0);
cudaMalloc((void**)&gpu_refractory_remaining, size * sizeof(int));
cudaMemcpy(gpu_refractory_remaining, refractory_remaining.data(), size * sizeof(int), cudaMemcpyHostToDevice);
""",
'reset': """
refractory_remaining.clear();
refractory_remaining = std::vector<int>(size, 0);
cudaMemcpy(gpu_refractory_remaining, refractory_remaining.data(), size * sizeof(int), cudaMemcpyHostToDevice);
""",
'pyx_wrapper': """
# Refractory period
cpdef np.ndarray get_refractory(self):
return np.array(pop%(id)s.refractory)
cpdef set_refractory(self, np.ndarray value):
pop%(id)s.refractory = value
pop%(id)s.refractory_dirty = True
"""
},
'init_event-driven': """
last_spike = std::vector<long int>(size, -10000L);
"""
}
# Contains all codes related to the population update
#
# 1st level distinguish 'local' and 'global' update
# 2nd level distinguish 'body', 'header' and 'call' template
population_update_kernel = {
'global': {
'body': """// Updating global variables of population %(id)s
__global__ void cuPop%(id)s_global_step( %(add_args)s )
{
%(pre_loop)s
%(global_eqs)s
}
""",
'header': "__global__ void cuPop%(id)s_global_step( %(add_args)s );\n",
'call': """
cuPop%(id)s_global_step<<< 1, 1, 0, pop%(id)s.stream >>>( %(add_args)s );
#ifdef _DEBUG
cudaError_t err_pop%(id)s_global_step = cudaGetLastError();
if( err_pop%(id)s_global_step != cudaSuccess) {
std::cout << "pop%(id)s_step: " << cudaGetErrorString(err_pop%(id)s_global_step) << std::endl;
exit(0);
}
#endif
"""
},
'local': {
'body': """// Updating local variables of population %(id)s
__global__ void cuPop%(id)s_local_step( %(add_args)s )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
%(pre_loop)s
while ( i < %(pop_size)s )
{
%(local_eqs)s
i += blockDim.x * gridDim.x;
}
}
""",
'header': "__global__ void cuPop%(id)s_local_step( %(add_args)s );\n",
'call': """
#if defined (__pop%(id)s_nb__)
cuPop%(id)s_local_step<<< __pop%(id)s_nb__, __pop%(id)s_tpb__, 0, pop%(id)s.stream >>>( %(add_args)s );
#else
cuPop%(id)s_local_step<<< pop%(id)s._nb_blocks, pop%(id)s._threads_per_block, 0, pop%(id)s.stream >>>( %(add_args)s );
#endif
#ifdef _DEBUG
cudaError_t err_pop%(id)s_local_step = cudaGetLastError();
if( err_pop%(id)s_local_step != cudaSuccess) {
std::cout << "pop%(id)s_step: " << cudaGetErrorString(err_pop%(id)s_local_step) << std::endl;
exit(0);
}
#endif
"""
}
}
spike_gather_kernel = {
'body': """
// gpu device kernel for population %(id)s
__global__ void cuPop%(id)s_spike_gather( unsigned int* num_events, %(default)s%(args)s )
{
int i = threadIdx.x;
%(decl)s
// Determine if neuron i emited a spike
while ( i < %(pop_size)s )
{
%(spike_gather)s
i += blockDim.x;
}
}
""",
'header': """
__global__ void cuPop%(id)s_spike_gather( unsigned int* num_events, %(default)s%(args)s );
""",
# As we use atomicAdd operations, multiple blocks are not
# working correctly, consequently spawn only one block.
'call': """
// Check if neurons emit a spike in population %(id)s
if ( pop%(id)s._active ) {
// Reset old events
clear_num_events<<< 1, 1, 0, pop%(id)s.stream >>>(pop%(id)s.gpu_spike_count);
// Compute current events
#if defined (__pop%(id)s_tpb__)
cuPop%(id)s_spike_gather<<< 1, __pop%(id)s_tpb__, 0, pop%(id)s.stream >>>(
pop%(id)s.gpu_spike_count,
/* default arguments */
%(default)s
/* other variables */
%(args)s );
#else
cuPop%(id)s_spike_gather<<< 1, pop%(id)s._threads_per_block, 0, pop%(id)s.stream >>>(
pop%(id)s.gpu_spike_count,
/* default arguments */
%(default)s
/* other variables */
%(args)s );
#endif
#ifdef _DEBUG
cudaError_t err_pop_spike_gather_%(id)s = cudaGetLastError();
if(err_pop_spike_gather_%(id)s != cudaSuccess)
std::cout << "pop%(id)s_spike_gather: " << cudaGetErrorString(err_pop_spike_gather_%(id)s) << std::endl;
#endif
// transfer back the spike counter (needed by record as well as launch psp - kernel)
cudaMemcpy( &pop%(id)s.spike_count, pop%(id)s.gpu_spike_count, sizeof(unsigned int), cudaMemcpyDeviceToHost );
#ifdef _DEBUG
cudaError_t err = cudaGetLastError();
if ( err != cudaSuccess )
std::cout << "record_spike_count: " << cudaGetErrorString(err) << std::endl;
#endif
// transfer back the spiked indices if there were any events (needed by record)
if (pop%(id)s.spike_count > 0)
{
cudaMemcpy( pop%(id)s.spiked.data(), pop%(id)s.gpu_spiked, pop%(id)s.spike_count*sizeof(int), cudaMemcpyDeviceToHost );
#ifdef _DEBUG
err = cudaGetLastError();
if ( err != cudaSuccess )
std::cout << "record_spike: " << cudaGetErrorString(err) << std::endl;
#endif
}
}
"""
}
#
# Final dictionary
cuda_templates = {
'population_header': population_header,
'attr_decl': attribute_decl,
'attr_acc': attribute_acc,
'accessor_template': attribute_template,
'attribute_cpp_init': attribute_cpp_init,
'attribute_delayed': attribute_delayed,
'attribute_transfer': attribute_transfer,
'rng': curand,
'spike_specific': spike_specific
}
| vitay/ANNarchy | ANNarchy/generator/Population/CUDATemplates.py | Python | gpl-2.0 | 25,938 | [
"NEURON"
] | 73cc5c7d59a38093f958fb0d864ac7eb2e3ff444905f08cb9f0f3279903851c6 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 11 17:20:36 2014
@author: Hera
"""
from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
import nplab.instrument.camera
import nplab.instrument.stage
from nplab.instrument import Instrument
import cv2
from scipy import ndimage
from traits.api import HasTraits, Button, Float, Int, Property, Range, Array, on_trait_change, Instance
from traitsui.api import View, VGroup, Item
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import time, threading
class CameraStageMapper(Instrument, HasTraits):
"""
This class sits between a camera and a stage, allowing coordinate conversion.
Coordinate Systems
------------------
We consider the centre of the image to be our current position, and give
the position of each pixel on the camera such that it would be brought to
the centre of the camera image by moving the stage to (-position).
"""
do_calibration = Button()
calibration_distance = Float(7, tooltip="Distance to move in each direction when calibrating, in um")
camera_to_sample = Array(shape=(2,2))
do_autofocus = Button()
autofocus_range = Range(0., 100., 5.)
autofocus_step = Range(0., 10., 0.5)
autofocus_default_ranges = [np.arange(-5,5,0.5),np.arange(-1,1,0.2)]
frames_to_discard = Int(1)
settling_time = Float(0.2)
disable_live_view = True
traits_view = View(
VGroup(
Item(name="calibration_distance"),
Item(name="do_calibration"),
Item(name="autofocus_range"),
Item(name="autofocus_step"),
Item(name="do_autofocus"),
Item(name="camera_to_sample"),
),
title="Camera-Stage Mapper",
)
def __init__(self, camera, stage):
super(CameraStageMapper, self).__init__()
self.camera = camera
self.stage = stage
self.camera_to_sample = np.identity(2)
self.camera_centre = (0.5,0.5)
self.camera.set_legacy_click_callback(self.move_to_camera_point)
self._action_lock = threading.Lock() #prevent us from doing two things involving motion at once!
############ Coordinate Conversion ##################
def camera_pixel_to_point(self, p):
"""convert pixel coordinates to point coordinates (normalised 0-1)"""
return old_div(np.array(p,dtype=float), \
np.array(self.camera.latest_frame.shape[0:2], dtype=float))
def camera_point_to_pixel(self, p):
"""convert point coordinates (normalised 0-1) to pixel"""
return np.array(p)*np.array(self.camera.latest_frame.shape[0:2])
def camera_pixel_to_sample(self, p):
return self.camera_point_to_sample(self.camera_pixel_to_point(p))
def camera_point_to_sample(self, p):
displacement = np.dot(np.array(p) - np.array(self.camera_centre),
self.camera_to_sample)
return self.camera_centre_position()[0:2] + displacement
def camera_point_displacement_to_sample(self, p):
"""Convert a displacement from camera point units to microns"""
return np.dot(np.array(p), self.camera_to_sample)
def camera_pixel_displacement_to_sample(self, p):
"""Convert from pixels to microns for relative moves"""
return self.camera_point_displacement_to_sample(self.camera_pixel_to_point(p))
############## Stage Control #####################
def move_to_camera_pixel(self, p):
"""bring the object at pixel p=(x,y) on the camera to the centre"""
return self.move_to_camera_point(*tuple(self.camera_pixel_to_point(p)))
def move_to_camera_point(self, x, y=None):
"""Move the stage to centre point (x,y) on the camera
(x,y) is the position on the camera, where x,y range from 0 to 1"""
if y is None:
p=x
else:
p=(x,y)
displacement = np.dot(np.array(p) - np.array(self.camera_centre),
self.camera_to_sample)
current_position = displacement +self.camera_centre_position()[0:2]
self.move_to_sample_position(current_position)
def move_to_sample_position(self, p):
"""Move the stage to centre sample position p on the camera"""
self.stage.move(-np.array(p))
def camera_centre_position(self):
"""return the position of the centre of the camera view, on the sample"""
return -self.stage.position
################## Closed loop stage control #################
def centre_on_feature(self, feature_image, search_size=(50,50), tolerance=0.3, max_iterations=10, **kwargs):
"""Adjust the stage slightly to centre on the given feature.
This should be called immediately after moving the stage to centre on a
feature in the image: first move the stage to bring that feature to the
centre, then call this function to fine-tune.
Arguments
=========
* feature_image: an RGB image of a feature. Must be
significantly smaller than the camera image.
* search_size: size of the area around the image centre to search, in
pixels. Should be a tuple of length 2.
* tolerance: how accurately we're going to centre (in um)
* max_iterations: maximum number of shifts
"""
shift=[999.,999.]
n=0
if self.disable_live_view:
camera_live_view = self.camera.live_view
self.camera.live_view = False
while np.sqrt(np.sum(np.array(shift)**2))>tolerance and n<max_iterations:
n+=1
try:
shift=self.centre_on_feature_iterate(feature_image,
search_size=search_size,
**kwargs)
print("Centring on feature: moving by %.2f, %.2f" % tuple(shift))
except:
print("Something went wrong with auto-centering - trying again.") #don't worry, we incremented N so this won't go on forever!
if np.sqrt(np.sum(np.array(shift)**2))>tolerance:
print("Performed %d iterations but did not converge on the feature to within %.3fum" % (n, tolerance))
else:
print("Centered on feature in %d iterations." % n)
if self.disable_live_view:
self.camera.live_view = camera_live_view #reenable live view if necessary
def centre_on_feature_iterate(self, feature_image, search_size=(50,50), image_filter=lambda x: x):
"""Measure the displacement of the sample and move to correct it.
Arguments:
feature_image : numpy.ndarray
This is the feature that should be at the centre of the camera. It
must be smaller than the camera image + search size.
search_size : (int, int)
The distance in pixels to search over. Defaults to (50,50).
image_filter : function (optional)
If supplied, run this function on the image before cross-correlating
(you can use this to cross-correlate in grayscale, for example).
"""
try:
self.flush_camera_and_wait()
current_image = image_filter(self.camera.color_image()) #get the current image
corr = cv2.matchTemplate(current_image,feature_image,cv2.TM_SQDIFF_NORMED) #correlate them: NB the match position is the MINIMUM
#restrict to just the search area, and invert so we find the maximum
corr = -corr[(corr.shape[0]/2. - search_size[0]/2.):(corr.shape[0]/2. + search_size[0]/2.),
(corr.shape[1]/2. - search_size[1]/2.):(corr.shape[1]/2. + search_size[1]/2.)] #invert the image so we can find a peak
corr += (corr.max()-corr.min())*0.1 - corr.max() #background-subtract 90% of maximum
corr = cv2.threshold(corr, 0, 0, cv2.THRESH_TOZERO)[1] #zero out any negative pixels - but there should always be > 0 nonzero pixels
peak = ndimage.measurements.center_of_mass(corr) #take the centroid (NB this is of grayscale values not just binary)
self.move_to_camera_pixel(np.array(peak) - np.array(corr.shape[0:2])/2.+np.array(current_image.shape[0:2])/2.)
return self.camera_pixel_displacement_to_sample(np.array(peak) - np.array(corr.shape[0:2])/2.)
except Exception as e:
print("Exception: ", e)
print("Corr: ", corr)
print("Feature: ", feature_image)
print("Feature Size: ", feature_image.shape)
print("Corr size: ", corr.shape)
print("Peak: ", peak)
print("sum(corr): ", np.sum(corr))
print("max(corr): ", np.max(corr))
raise e
########## Calibration ###############
@on_trait_change("do_calibration")
def calibrate_in_background(self):
threading.Thread(target=self.calibrate).start()
def calibrate(self, dx=None):
"""Move the stage in a square and set the transformation matrix."""
with self._action_lock:
if dx is None: dx=self.calibration_distance #use a sensible default
here = self.camera_centre_position()
pos = [np.array([i,j,0]) for i in [-dx,dx] for j in [-dx,dx]]
camera_pos = []
self.camera.update_latest_frame() # make sure we've got a fresh image
initial_image = self.camera.gray_image()
w, h, = initial_image.shape
template = initial_image[old_div(w,4):old_div(3*w,4),old_div(h,4):old_div(3*h,4)] #.astype(np.float)
#template -= cv2.blur(template, (21,21), borderType=cv2.BORDER_REPLICATE)
# self.calibration_template = template
# self.calibration_images = []
camera_live_view = self.camera.live_view
if self.disable_live_view:
self.camera.live_view = False
for p in pos:
self.move_to_sample_position(here + p)
self.flush_camera_and_wait()
current_image = self.camera.gray_image()
corr = cv2.matchTemplate(current_image,template,cv2.TM_SQDIFF_NORMED)
corr *= -1. #invert the image
corr += (corr.max()-corr.min())*0.1 - corr.max() ##
corr = cv2.threshold(corr, 0, 0, cv2.THRESH_TOZERO)[1]
# peak = np.unravel_index(corr.argmin(),corr.shape)
peak = ndimage.measurements.center_of_mass(corr)
camera_pos.append(peak - old_div((np.array(current_image.shape) - \
np.array(template.shape)),2))
# self.calibration_images.append({"image":current_image,"correlation":corr,"pos":p,"peak":peak})
self.move_to_sample_position(here)
self.flush_camera_and_wait()#otherwise we get a worrying "jump" when enabling live view...
self.camera.live_view = camera_live_view
#camera_pos now contains the displacements in pixels for each move
sample_displacement = np.array([-p[0:2] for p in pos]) #nb need to convert to 2D, and the stage positioning is flipped from sample coords
camera_displacement = np.array([self.camera_pixel_to_point(p) for p in camera_pos])
print("sample was moved (in um):\n",sample_displacement)
print("the image shifted (in fractions-of-a-camera):\n",camera_displacement)
A, res, rank, s = np.linalg.lstsq(camera_displacement, sample_displacement)
self.camera_to_sample = A
def flush_camera_and_wait(self):
"""take and discard a number of images from the camera to make sure the image is fresh
This functionality should really be in the camera, not the aligner!"""
time.sleep(self.settling_time)
for i in range(self.frames_to_discard):
self.camera.raw_image() #acquire, then discard, an image from the camera
######## Image Tiling ############
def acquire_tiled_image(self, n_images=(3,3), dest=None, overlap=0.33,
autofocus_args={},live_plot=False, downsample=8):
"""Raster-scan the stage and take images, which we can later tile.
Arguments:
@param: n_images: A tuple of length 2 specifying the number of images
to take in X and Y
@param: dest: An HDF5 Group object to store the images in. Each image
will be tagged with metadata to mark where it was taken. If no dest
is specified, a new group will be created in the current datafile.
@param: overlap: the fraction of each image to overlap with the
adjacent one (it's important this is high enough to match them up)
@param: autofocus_args: A dictionary of keyword arguments for the
autofocus that occurs before each image is taken. Set to None to
disable autofocusing.
"""
reset_interactive_mode = live_plot and not matplotlib.is_interactive()
if live_plot:
plt.ion()
fig = plt.figure()
axes = fig.add_subplot(111)
axes.set_aspect(1)
with self._action_lock:
if dest is None:
dest = self.create_data_group("tiled_image_%d") #or should this be in RAM??
centre_position = self.camera_centre_position()[0:2] #only 2D
x_indices = np.arange(n_images[0]) - (n_images[0] - 1)/2.0
y_indices = np.arange(n_images[1]) - (n_images[1] - 1)/2.0
for y_index in y_indices:
for x_index in x_indices:
position = centre_position + self.camera_point_displacement_to_sample(np.array([x_index, y_index]) * (1-overlap))
self.move_to_sample_position(position) #go to the raster point
if autofocus_args is not None:
self.autofocus(**autofocus_args)
self.flush_camera_and_wait() #wait for the camera to be ready/stage to settle
tile = dest.create_dataset("tile_%d",
data=self.camera.color_image(),
attrs=self.camera.metadata)
tile.attrs.create("stage_position",self.stage.position)
tile.attrs.create("camera_centre_position",self.camera_centre_position())
if live_plot:
#Plot the image, in sample coordinates
corner_points = np.array([self.camera_point_to_sample((xcorner,ycorner))
for ycorner in [0,1] for xcorner in [0,1]]) #positions of corners
plot_skewed_image(tile[::downsample, ::downsample, :],
corner_points, axes=axes)
fig.canvas.draw()
x_indices = x_indices[::-1] #reverse the X positions, so we do a snake-scan
dest.attrs.set("camera_to_sample",self.camera_to_sample)
dest.attrs.set("camera_centre",self.camera_centre)
self.move_to_sample_position(centre_position) #go back to the start point
if reset_interactive_mode:
plt.ioff()
return dest
######## Autofocus Stuff #########
def autofocus_merit_function(self): # we maximise this...
"""Take an image and calculate the focus metric, this is what we optimise.
Currently, this calculates the sum of the square of the Laplacian of the image
which should pick out sharp features quite effectively. It can, however, be
thrown off by very bright objects if the camera is saturated."""
self.flush_camera_and_wait()
# self.camera.update_latest_frame() #take an extra frame to make sure this one is fresh
img = self.camera.raw_image()
# return np.sum((img - cv2.blur(img,(21,21))).astype(np.single)**2)
return np.sum(cv2.Laplacian(cv2.cvtColor(img,cv2.COLOR_BGR2GRAY), ddepth=cv2.CV_32F)**2)
@on_trait_change("do_autofocus")
def autofocus_in_background(self):
def work():
self.autofocus_iterate(np.arange(old_div(-self.autofocus_range,2), old_div(self.autofocus_range,2), self.autofocus_step))
threading.Thread(target=work).start()
def autofocus_iterate(self, dz, method="centre_of_mass", noise_floor=0.3):
self._action_lock.acquire()
"""Move in z and take images. Move to the sharpest position."""
here = self.stage.position
positions = [here] #positions keeps track of where we sample
powers = [self.autofocus_merit_function()] #powers holds the value of the merit fn at each point
camera_live_view = self.camera.live_view
if self.disable_live_view:
self.camera.live_view = False
for z in dz:
self.stage.move(np.array([0,0,z])+here) #visit each point and evaluate merit function
# time.sleep(0.5)
positions.append(self.stage.position)
powers.append(self.autofocus_merit_function())
powers = np.array(powers)
positions = np.array(positions)
z = positions[:,2]
if method=="centre_of_mass":
threshold = powers.min() + (powers.max()-powers.min())*noise_floor #(powers.min() if len(powers)<4 else np.max([powers[z.argmin()],powers[z.argmax()]])) #ensure edges are zero
weights = powers - threshold
weights[weights<0] = 0. #zero out any negative values
if(np.sum(weights)==0):
new_position = positions[powers.argmax(),:]
else:
new_position = old_div(np.dot(weights, positions),np.sum(weights))
elif method=="parabola":
coefficients = np.polyfit(z, powers, deg=2) #fit a parabola
root = old_div(-coefficients[1],(2*coefficients[0])) #p = c[0]z**" + c[1]z + c[2] which has max (or min) at 2c[0]z + c[1]=0 i.e. z=-c[1]/2c[0]
if z.min() < root and root < z.max():
new_position = [here[0],here[1],root]
else:
new_position = positions[powers.argmax(),:]
else:
new_position = positions[powers.argmax(),:]
self.stage.move(new_position)
self.camera.live_view = camera_live_view
self._action_lock.release()
return new_position-here, positions, powers
def autofocus(self, ranges=None, max_steps=10):
"""move the stage to bring the sample into focus
Presently, it just does one iteration for each range passed in: usually
this would mean a coarse focus then a fine focus.
""" #NEEDS WORK!
if ranges is None:
ranges = self.autofocus_default_ranges
n=0
for r in ranges:
pos = self.autofocus_iterate(r)[0]
print("moving Z by %.3f" % pos[2])
n+=1
print("Autofocus: performed %d iterations" % n)
#if __name__ == '__main__':
#WARNING this is old, probably broken, code.
# import nplab.instrument.camera.lumenera as camera
# import nplab.instrument.stage.prior as prior_stage
# c = camera.Camera(0)
# s = prior_stage.ProScan()
#
# m = CameraStageMapper(c, s)
#
# m.autofocus_iterate(np.arange(-5,5,0.5))
# m.calibrate(5)
#
# c.edit_traits()
# m.edit_traits()
#
# def move_to_feature_at_point(x,y):
# #first, extract image of where we want to go
# p = m.camera_point_to_pixel([x,y])
# feature_image = c.color_image()[p[0]-25:p[0]+25, p[1]-25:p[1]+25]
# m.move_to_camera_point(x,y)
# time.sleep(0.5)
# shift=[999,999]
# while np.sqrt(np.sum(np.array(shift)**2))>0.5:
# shift=m.centre_on_feature(feature_image)
# print "moving by %.2f, %.2f" % tuple(shift)
#
#
# def close():
# c.close()
# s.close()
#
| nanophotonics/nplab | nplab/instrument/stage/camera_stage_mapper.py | Python | gpl-3.0 | 20,271 | [
"VisIt"
] | 731951204551e8ead4fcfeca324b7e63e60d2ec9ecf935b41b395ccf43d0c791 |
# Copyright (C) 2013, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import tempfile
from xml.dom import Node
from zeroinstall.zerostore import manifest
from zeroinstall import zerostore
from zeroinstall.support import tasks
from zeroinstall.injector import namespaces, qdom, model
def get_digest(unpack_dir, alg_name):
alg = manifest.get_algorithm(alg_name)
digest = alg.new_digest()
for line in alg.generate_manifest(unpack_dir):
digest.update((line + '\n').encode('utf-8'))
return alg.getID(digest)
def dom_to_qdom(delem):
attrs = {((ns + ' ' + name) if ns else name): value for (ns, name), value in delem.attributes.itemsNS()}
elem = qdom.Element(delem.namespaceURI, delem.localName, attrs)
for child in delem.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
elem.childNodes.append(dom_to_qdom(child))
return elem
# <archive href='http://example.com/foo/bar.zip'/> becomes:
# <archive href='bar.zip'/>
def basename_hrefs(method):
if isinstance(method, (model.DownloadSource, model.FileSource)):
if '/' in method.url:
method.url = method.url.rsplit('/', 1)[1]
elif isinstance(method, model.Recipe):
for step in method.steps:
basename_hrefs(step)
class FakeStore:
def get_tmp_dir_for(self, x):
return tempfile.mkdtemp(prefix = '0template-')
# Instead of checking the digest matches, we calculate it from the unpacked archive
class FakeStores:
def __init__(self, impl, real_stores):
self.impl = impl
self.stores = [FakeStore()]
self.real_stores = real_stores
def check_manifest_and_rename(self, required_digest, unpack_dir, dry_run = False):
implementation = self.impl
sha1new = get_digest(unpack_dir, 'sha1new')
if not implementation.getAttribute('id'):
implementation.setAttribute('id', sha1new)
digests = [sha1new]
def add_digest(alg_name):
digest_id = get_digest(unpack_dir, alg_name)
digests.append(digest_id)
name, value = zerostore.parse_algorithm_digest_pair(digest_id)
elem.setAttribute(alg_name, value)
have_manifest_elem = False
for elem in implementation.getElementsByTagNameNS(namespaces.XMLNS_IFACE, 'manifest-digest'):
have_manifest_elem = True
have_digests = False
for attr_name, value in elem.attributes.items():
if value: continue
add_digest(attr_name)
have_digests = True
if not have_digests:
add_digest('sha256new')
if not have_manifest_elem:
print("WARNING: no <manifest-digest> element found")
best_rating = -1
best_digest = None
for digest_id in digests:
alg_name, value = zerostore.parse_algorithm_digest_pair(digest_id)
alg = manifest.get_algorithm(alg_name)
if alg.rating > best_rating:
best_rating = alg.rating
best_digest = digest_id
# Cache if necessary (saves downloading it again later)
stores = self.real_stores
if stores.lookup_maybe(digests) is None:
stores.add_dir_to_cache(best_digest, unpack_dir)
def add_digests(feed_path, implementation, config):
root = qdom.Element(namespaces.XMLNS_IFACE, 'interface', {})
name = qdom.Element(namespaces.XMLNS_IFACE, 'name', {})
name.content = 'Test'
summary = qdom.Element(namespaces.XMLNS_IFACE, 'summary', {})
summary.content = 'testing'
test_impl = qdom.Element(namespaces.XMLNS_IFACE, 'implementation', {'id': 'sha1new=1', 'version': '0'})
root.childNodes = [name, summary, test_impl]
for child in implementation.childNodes:
if child.namespaceURI == namespaces.XMLNS_IFACE and child.localName in ('archive', 'file', 'recipe'):
test_impl.childNodes.append(dom_to_qdom(child))
feed = model.ZeroInstallFeed(root, local_path = feed_path)
impl, = feed.implementations.values()
assert impl.download_sources, "No retrieval methods in implementation!"
method, = impl.download_sources
basename_hrefs(method)
# When fetcher asks FakeStores to check the digest, FakeStores instead stores the actual
# digest on implementation.
fake_stores = FakeStores(implementation, config.stores)
blocker = config.fetcher.download_impl(impl, method, fake_stores)
tasks.wait_for_blocker(blocker)
| pombredanne/0template | digest.py | Python | lgpl-2.1 | 4,062 | [
"VisIt"
] | 1e3c78a65523feebc820a438d13fafb35f02b643ed5dcc208865e3ead938d928 |
from __future__ import print_function, division
import types
import pdb
import time
import sys
import os
import warnings
from six import StringIO
import numpy as np
import matplotlib.pyplot as plt
import pandas
from scipy import stats
import statsmodels.api as sm
def santizeTimestamp(timestamp):
if not isinstance(timestamp, pandas.Timestamp):
try:
timestamp = pandas.Timestamp(timestamp)
except:
raise ValueError('{} could not be coerced into a pandas.Timestamp')
return timestamp
def getSeason(date):
'''Defines the season from a given date.
Parameters
----------
date : datetime.datetime object or similar
Any object that represents a date and has `.month` and `.day`
attributes
Returns
-------
season : str
Notes
-----
Assumes that all seasons changed on the 22nd (e.g., all winters
start on Decemeber 22). This isn't strictly true, but it's good
enough for now.
'''
date = santizeTimestamp(date)
if (date.month == 12 and date.day >= 22) or \
(date.month in [1, 2]) or \
(date.month == 3 and date.day < 22):
return 'winter'
elif (date.month == 3 and date.day >= 22) or \
(date.month in [4, 5]) or \
(date.month == 6 and date.day < 22):
return 'spring'
elif (date.month == 6 and date.day >= 22) or \
(date.month in [7, 8]) or \
(date.month == 9 and date.day < 22):
return 'summer'
elif (date.month == 9 and date.day >= 22) or \
(date.month in [10, 11]) or \
(date.month == 12 and date.day < 22):
return 'autumn'
else: # pragma: no cover
raise ValueError('could not assign season to {}'.format(date))
def addSecondColumnLevel(levelval, levelname, olddf):
'''
Takes a simple index on a dataframe's columns and adds a new level
with a single value.
E.g., df.columns = ['res', 'qual'] -> [('Infl' ,'res'), ('Infl', 'qual')]
'''
if isinstance(olddf.columns, pandas.MultiIndex):
raise ValueError('Dataframe already has MultiIndex on columns')
colarray = [[levelval]*len(olddf.columns), olddf.columns]
colindex = pandas.MultiIndex.from_arrays(colarray)
newdf = olddf.copy()
newdf.columns = colindex
newdf.columns.names = [levelname, 'quantity']
return newdf
def getUniqueDataframeIndexVal(df, indexlevel):
'''
Confirms that a given level of a dataframe's index only has
one unique value. Useful for confirming consistent units.
Raises error if level is not a single value. Returns value
Otherwise
'''
index = np.unique(df.index.get_level_values(indexlevel).tolist())
if index.shape != (1,):
raise ValueError('index level "%s" is not unique!' % indexlevel)
return index[0]
def sigFigs(x, n, expthresh=5, tex=False, pval=False, forceint=False):
'''
Formats a number into a string with the correct number of sig figs.
Input:
x (numeric) : the number you want to round
n (int) : the number of sig figs it should have
tex (bool) : toggles the scientific formatting of the number
pval (bool) : if True and x < 0.001, will return "<0.001"
forceint : if true, simply returns int(x)
Typical Usage:
>>> print(sigFigs(1247.15, 3))
1250
>>> print(sigFigs(1247.15, 7))
1247.150
'''
# check on the number provided
if x is not None and not np.isinf(x) and not np.isnan(x):
# check on the sigFigs
if n < 1:
raise ValueError("number of sig figs must be greater than zero!")
# return a string value unaltered
#if type(x) == types.StringType:
if isinstance(x, str):
out = x
elif pval and x < 0.001:
out = "<0.001"
if tex:
out = '${}$'.format(out)
elif forceint:
out = '{:,.0f}'.format(x)
# logic to do all of the rounding
elif x != 0.0:
order = np.floor(np.log10(np.abs(x)))
if -1.0 * expthresh <= order <= expthresh:
decimal_places = int(n - 1 - order)
if decimal_places <= 0:
out = '{0:,.0f}'.format(round(x, decimal_places))
else:
fmt = '{0:,.%df}' % decimal_places
out = fmt.format(x)
else:
decimal_places = n - 1
if tex:
#raise NotImplementedError('no exponential tex formatting yet')
fmt = r'$%%0.%df \times 10 ^ {%d}$' % (decimal_places, order)
out = fmt % round(x / 10 ** order, decimal_places)
else:
fmt = '{0:.%de}' % decimal_places
out = fmt.format(x)
else:
out = str(round(x, n))
# with NAs and INFs, just return 'NA'
else:
out = 'NA'
return out
def _sig_figs(x):
'''
Wrapper around `utils.sigFig` so it only requires 1 argument for the
purpose of "apply"-ing it to a pandas dataframe
Input:
x : any number you want
Writes:
None
Returns
A string representation of `x` with 3 significant figures
'''
return sigFigs(x, n=3, tex=True)
def formatResult(result, qualifier, sigfigs=3):
""" Formats a results with its qualifier
Parameters
----------
results : float
The concentration or particulate strength
qualifier : string
The result's qualifier
sigfigs : int
The number of significant digits to which `result` should be
formatted
Returns
-------
formatted : string
Example
-------
>>> wqio.formatResult(1.23, '<', sigfigs=4)
"<1.230"
"""
return '{}{}'.format(qualifier, sigFigs(result, sigfigs))
def _boxplot_legend(ax, notch=False, shrink=2.5, fontsize=10, showmean=False):
'''
Help function to draw a boxplot legend.
Input:
ax (matplotlib axes object) : axes on which the legend
will be plotted
notch (bool, default False) : whether or not to include
notches (confidence intervals) around the median
shrink (float, default 2.5) : some measure of how far away
the annotation arrows should be from the elements to
which they point.
'''
# load static, randomly generated data
x = np.array([[
1.7117, 2.5470, 3.2817, 2.3303, 2.7066, 4.2024, 2.7184, 2.9790,
2.7782, 1.9440, 3.9939, 4.3938, 6.1780, 3.2937, 3.6596, 2.3589,
1.5408, 3.7236, 2.9327, 4.2844, 3.5441, 3.9499, 2.0023, 3.7872,
3.4989, 2.2898, 2.7913, 3.2796, 2.3650, 3.5436, 3.3459, 3.8699,
3.7448, 2.0149, 2.1290, 4.2193, 4.3932, 1.6687, 5.1053, 2.3849,
1.6996, 3.1484, 3.4078, 2.0051, 0.88211, 2.038, 3.3291, 2.3526,
1.4030, 2.7147
]])
# plot the boxplot
bpLeg = ax.boxplot(x, notch=notch, positions=[1], widths=0.5, bootstrap=10000)
# plot the mean
if showmean:
ax.plot(1, x.mean(), marker='o', mec='k', mfc='r', zorder=100)
# format stuff (colors and linewidths and whatnot)
plt.setp(bpLeg['whiskers'], color='k', linestyle='-', zorder=10)
plt.setp(bpLeg['boxes'], color='k', zorder=10)
plt.setp(bpLeg['medians'], color='r', zorder=5, linewidth=1.25)
plt.setp(bpLeg['fliers'], marker='o', mfc='none', mec='r', ms=4, zorder=10)
plt.setp(bpLeg['caps'], linewidth=0)
# positions of the boxplot elements
if notch:
x05, y05 = 1.00, bpLeg['caps'][0].get_ydata()[0]
x25, y25 = bpLeg['boxes'][0].get_xdata()[1], bpLeg['boxes'][0].get_ydata()[0]
x50, y50 = bpLeg['medians'][0].get_xdata()[1], bpLeg['medians'][0].get_ydata()[0]
x75, y75 = bpLeg['boxes'][0].get_xdata()[1], bpLeg['boxes'][0].get_ydata()[5]
x95, y95 = 1.00, bpLeg['caps'][1].get_ydata()[0]
xEx, yEx = 1.00, bpLeg['fliers'][0].get_ydata()[0]
xCIL, yCIL = bpLeg['boxes'][0].get_xdata()[0], bpLeg['boxes'][0].get_ydata()[2]
xCIU, yCIU = bpLeg['boxes'][0].get_xdata()[0], bpLeg['boxes'][0].get_ydata()[4]
else:
x05, y05 = bpLeg['caps'][0].get_xdata()[1], bpLeg['caps'][1].get_ydata()[0]
x25, y25 = bpLeg['boxes'][0].get_xdata()[0], bpLeg['boxes'][0].get_ydata()[0]
x50, y50 = bpLeg['medians'][0].get_xdata()[1], bpLeg['medians'][0].get_ydata()[0]
x75, y75 = bpLeg['boxes'][0].get_xdata()[0], bpLeg['boxes'][0].get_ydata()[2]
x95, y95 = bpLeg['caps'][1].get_xdata()[1], bpLeg['caps'][0].get_ydata()[0]
xEx, yEx = 0.95, bpLeg['fliers'][0].get_ydata()[0]
# axes formatting
ax.set_xlim([0, 2])
ax.set_yticks(range(9))
ax.set_yticklabels([])
ax.set_xticklabels([], fontsize=5)
ax.xaxis.set_ticks_position("none")
ax.yaxis.set_ticks_position("none")
# annotation formats
ap = dict(arrowstyle="->", shrinkB=shrink)
# text for the labels
note1 = r'{1) Interquartile range: $\mathrm{IQR} = \mathrm{Q3} - \mathrm{Q1}$}'
note2 = '{2) Geometric means are plotted only for bacteria data.\nOtherwise, arithmetic means are shown.'
legText = {
'5th': r'{Min. data $\ge \mathrm{Q1} - 1.5 \times \mathrm{IQR}$}',
'25th': r'{$25^{\mathrm{th}}\:\mathrm{percentile},\:\mathrm{Q}1$}',
'50th': r'{$50^{\mathrm{th}} \mathrm{percentile}$, median}',
'75th': r'{$75^{\mathrm{th}}\:\mathrm{percentile},\:\mathrm{Q3}$}',
'95th': r'{Max. data $\le \mathrm{Q3} + 1.5 \times \mathrm{IQR}$}',
'Out': r'{Outlier $>\mathrm{Q3} + 1.5 \times \mathrm{IQR}$} (note 1)',
'CIL': r'{Lower 95\% CI about the median}',
'CIU': r'{Upper 95\% CI about the median}',
'notes': 'Notes:\n%s\n%s' % (note1, note2),
'mean': r'Mean (note 2)'
}
# add notes
ax.annotate(legText['notes'], (0.05, 0.01), xycoords='data', fontsize=fontsize)
# label the mean
if showmean:
ax.annotate(legText['mean'], (1, x.mean()), xycoords='data',
xytext=(1.15, 5.75), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
# label the lower whisker
ax.annotate(legText['5th'], (x05, y05), xycoords='data',
xytext=(1.25, y05), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
# label 1st quartile
ax.annotate(legText['25th'], (x25, y25), xycoords='data',
xytext=(1.55, y25*0.75), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
# label the median
ax.annotate(legText['50th'], (x50, y50), xycoords='data',
xytext=(1.45, y50), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
# label the 3rd quartile
ax.annotate(legText['75th'], (x75, y75), xycoords='data',
xytext=(1.55, y75*1.25), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
# label the upper whisker
ax.annotate(legText['95th'], (x95, y95), xycoords='data',
xytext=(0.05, 7.00), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
# label an outlier
ax.annotate(legText['Out'], (xEx, yEx), xycoords='data',
xytext=(1.00, 7.50), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
# label confidence intervals around the mean
if notch:
ax.annotate(legText['CIL'], (xCIL, yCIL), xycoords='data',
xytext=(0.05, 0.75*yCIL), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
ax.annotate(legText['CIU'], (xCIU, yCIU), xycoords='data',
xytext=(0.05, 1.25*yCIU), textcoords='data', va='center',
arrowprops=ap, fontsize=fontsize)
# legend and grid formats
ax.set_frame_on(False)
ax.yaxis.grid(False, which='major')
ax.xaxis.grid(False, which='major')
def makeBoxplotLegend(filename='bmp/tex/boxplotlegend', figsize=4, **kwargs):
'''
Creates an explanatory diagram for boxplots. **kwargs are fed to _boxplot_legend
'''
# setup the figure
fig, ax = plt.subplots(figsize=(figsize, figsize))
# call the helper function that does the heavy lifting
_boxplot_legend(ax, **kwargs)
# optimize the figure's layout
fig.tight_layout()
# save and close
fig.savefig(filename + '.pdf', transparent=True, dpi=300)
fig.savefig(filename + '.png', transparent=True, dpi=300)
plt.close(fig)
def processFilename(filename):
'''
Sanitizes a filename. DON'T feed it a full path
Typical Usage
>>> processFilename('FigureBenzon/Inzo_1')
FigureBenzonInzo1
'''
badchars = [' ', ',', '+', '$', '_', '{', '}', '/', '&']
fn = filename
for bc in badchars:
fn = fn.replace(bc, '')
return fn
def constructPath(name, ext, *args):
'''
Builds a path from a filename, extension, and directories.
Infers the last directory from the extension
>>> print(constructPath('test1 2', 'tex', 'cvc', 'output'))
cvc/output/tex/test12.tex
'''
destinations = {
'png': 'img',
'pdf': 'img',
'csv': 'csv',
'tex': 'tex',
}
dest_dir = destinations[ext]
filename = processFilename(name + '.' + ext)
basepath = os.path.join(*args)
filepath = os.path.join(basepath, dest_dir, filename)
return filepath
def makeTablesFromCSVStrings(tablestring, texpath=None, csvpath=None):
'''
Takes a string already in CSV format and writes it to a CSV file and a
LaTeX table
Input:
tablestring (string) : CSV-formatted string of data
filename (string) : filename of the output files
Writes:
CSV and LaTeX files of the data
Returns:
None
'''
# make a dataframe of the csvstring
df = pandas.read_csv(StringIO(tablestring))
# write the CSV file
if csvpath is not None:
with open(csvpath, 'w') as csv:
csv.write(tablestring)
# write the LaTeX file
if texpath is not None:
with open(texpath, 'w') as tex:
tex.write(df.to_latex(index=False).replace("Unnamed: 1", ""))
def addStatsToOutputSummary(csvpath, index_cols=['Date'], na_values='--'):
'''
Reads a CSV file in to a pandas dataframe and appends stats to the bottom
Input:
filepath (string) : full path to a file *without* the extension
Writes:
1) a CSV file of the data in `filepath + '.csv'` with stats at the
bottom of the file
2) a LaTeX table version of the file above
Returns:
None
TODO: figure out what to do with -counts-
'''
# read in the data, pretending that the dates are strings
summary = pandas.read_csv(csvpath, parse_dates=False, index_col=index_cols,
na_values=na_values)
orig_cols = summary.columns.tolist()
# compute stats
stat_df = summary.describe()
# append stats to the bottom of the table
summary = summary.append(stat_df)
# set the index's name
summary.index.names = index_cols
# dump the CSV
summary[orig_cols].to_csv(csvpath, na_rep='--')
return summary
def addExternalValueToOutputSummary(csvpath, comparedict, comparecol,
index_cols=['Date'], na_values='--'):
# read in the data, pretending that the dates are strings
summary = pandas.read_csv(csvpath, parse_dates=False, index_col=index_cols,
na_values=na_values)
original_columns = summary.columns
# make the comparison values a dataframe
compare_df = pandas.DataFrame(sorted(list(comparedict.values())),
index=sorted(list(comparedict.keys())),
columns=[comparecol])
# append that df
summary = summary.append(compare_df)
# set the index's name
summary.index.names = index_cols
# dump the CSV
summary[original_columns].to_csv(csvpath, na_rep='--')
return summary
def sanitizeTex(texstring):
newstring = (
texstring.replace(r'\\%', r'\%')
.replace(r'\\', r'\tabularnewline')
.replace('\$', '$')
.replace('\_', '_')
.replace('ug/L', '\si[per-mode=symbol]{\micro\gram\per\liter}')
.replace(r'\textbackslashtimes', r'\times')
.replace(r'\textbackslash', '')
.replace(r'\textasciicircum', r'^')
.replace('\{', '{')
.replace('\}', '}')
)
return newstring
def csvToTex(csvpath, texpath, na_rep='--', float_format=_sig_figs, pcols=15,
addmidrules=None, replaceTBrules=True, replacestats=True):
'''
Convert data in CSV format to a LaTeX table
Input:
csvpath (string) : full name and file path of the input data file
texpath (string) : full name and file path of the output LaTeX file
na_rep (string, default "--") : how NA values should be written
float_format (fxn, default `_sig_figs`) : single input function that
will return the correct representation of floating point numbers
Writes:
A LaTeX table representation of the data found in `csvpath`
Returns:
None
'''
# read in the data pandas
data = pandas.read_csv(csvpath, parse_dates=False, na_values=[na_rep])
# open a new file and use pandas to dump the latex and close out
with open(texpath, 'w') as texfile:
data.to_latex(texfile, float_format=float_format, na_rep=na_rep,
index=False)
if pcols > 0:
lines = []
texfile = open(texpath, 'r')
header = texfile.readline()
header_sections = header.split('{')
old_col_def = header_sections[-1][:-2]
new_col_def = ''
for n in range(len(old_col_def)):
if n == 0:
new_col_def = new_col_def + 'l'
new_col_def = new_col_def + 'x{%smm}' % pcols
lines.append(header.replace(old_col_def, new_col_def))
rest_of_file = sanitizeTex(texfile.read())
if replaceTBrules:
rest_of_file = rest_of_file.replace("\\toprule", "\\midrule")
rest_of_file = rest_of_file.replace("\\bottomrule", "\\midrule")
if replacestats:
rest_of_file = rest_of_file.replace("std", "Std. Dev.")
rest_of_file = rest_of_file.replace("50\\%", "Median")
rest_of_file = rest_of_file.replace("25\\%", "25th Percentile")
rest_of_file = rest_of_file.replace("75\\%", "75th Percentile")
rest_of_file = rest_of_file.replace("count", "Count")
rest_of_file = rest_of_file.replace("mean", "Mean")
rest_of_file = rest_of_file.replace("min ", "Min. ")
rest_of_file = rest_of_file.replace("max", "Max.")
# XXX: omg hack
rest_of_file = rest_of_file.replace("AluMin.um", "Aluminum")
if addmidrules is not None:
if hasattr(addmidrules, 'append'):
for amr in addmidrules:
rest_of_file = rest_of_file.replace(amr, '\\midrule\n%s' % amr)
else:
rest_of_file = rest_of_file.replace(amr, '\\midrule\n%s' % addmidrules)
lines.extend(rest_of_file)
texfile.close()
texfile = open(texpath, 'w')
texfile.writelines(lines)
texfile.close()
def csvToXlsx(csvpath, xlsxpath, na_rep='--', float_format=None):
'''
Convert data in CSV format to a Excel workbook
Input:
csvpath (string) : full name and file path of the input data file
xlsxpath (string) : full name and file path of the output .xlsx file
na_rep (string, default "--") : how NA values should be written
float_format (fxn, default `_sig_figs`) : single input function that
will return the correct representation of floating point numbers
Writes:
A LaTeX table representation of the data found in `csvpath`
Returns:
None
'''
# read in the data pandas
data = pandas.read_csv(csvpath, parse_dates=False, na_values=[na_rep])
# use pandas to dump the excel file and close out
data.to_excel(xlsxpath, float_format=float_format, na_rep=na_rep, index=False)
def nested_getattr(baseobject, attribute):
'''
Returns the value of an attribute of an object that
is nested several layers deep.
Input:
baseobject : this seriously can be anything
attribute (string) : and string representation of what you want
Writes:
None
Output:
No telling. It depends on what you ask for.
Example:
>>> nested_getattr(dataset, 'influent.stats.mean')
'''
for attr in attribute.split('.'):
baseobject = getattr(baseobject, attr)
return baseobject
def normalize_units(dataframe, units_map, targetunit, paramcol='parameter',
rescol='Outflow_res', unitcol='Outflow_unit'):
try:
units_map[targetunit]
except KeyError:
raise ValueError('{0} is not contained in `units_map`'.format(targetunit))
# standardize units in the wqdata
dataframe['normalize'] = dataframe[unitcol].map(units_map.get)
if isinstance(targetunit, dict):
dataframe['targetunit'] = dataframe[paramcol].map(targetunit.get)
else:
dataframe['targetunit'] = targetunit
dataframe['convert'] = dataframe['targetunit'].map(units_map.get)
dataframe[rescol] = dataframe[rescol] * dataframe['normalize'] / dataframe['convert']
# reassign unites
dataframe[unitcol] = dataframe.targetunit
return dataframe
def normalize_units2(data, normFxn, convFxn, unitFxn, paramcol='parameter',
rescol='res', unitcol='unit', dlcol=None):
d = data.copy()
normalization = d[unitcol].apply(normFxn)
conversion = d[paramcol].apply(convFxn)
factor = normalization / conversion
d[rescol] *= factor
if dlcol is not None:
d[dlcol] *= factor
d.loc[:, unitcol] = d[paramcol].apply(unitFxn)
return d
def makeTexTable(tablefile, caption, sideways=False, footnotetext=None,
clearpage=False, pos='h!'):
if sideways:
tabletype = 'sidewaystable'
clearpage = True
else:
tabletype = 'table'
if clearpage:
clearpagetext = r'\clearpage'
else:
clearpagetext = ''
if footnotetext is None:
notes = ''
else:
notes = footnotetext
tablestring = r"""
\begin{%s}[%s]
\rowcolors{1}{CVCWhite}{CVCLightGrey}
\caption{%s}
\centering
\input{%s}
\end{%s}
%s
%s
""" % (tabletype, pos, caption, tablefile, tabletype, notes, clearpagetext)
return tablestring
def makeLongLandscapeTexTable(df, caption, label, footnotetext=None, index=False):
if footnotetext is None:
notes = ''
else:
notes = footnotetext
tabletexstring = df.to_latex(index=index, float_format=_sig_figs, na_rep='--')
valuelines = tabletexstring.split('\n')[4:-3]
valuestring = '\n'.join(valuelines)
def _multicol_format(args):
n, col = args
if n == 0:
align = 'l'
else:
align = 'p{16mm}'
return r"\multicolumn{1}{%s}{%s}" % (align, col.replace('%', r'\%'))
dfcols = df.columns.tolist()
colalignlist = ['c'] * len(dfcols)
colalignlist[0] = 'l'
colalignment = ''.join(colalignlist)
col_enum = list(enumerate(dfcols))
columns = ' &\n\t\t'.join(list(map(_multicol_format, col_enum)))
tablestring = r"""
\begin{landscape}
\centering
\rowcolors{1}{CVCWhite}{CVCLightGrey}
\begin{longtable}{%s}
\caption{%s} \label{%s} \\
\toprule
%s \\
\toprule
\endfirsthead
\multicolumn{%d}{c}
{{\bfseries \tablename\ \thetable{} -- continued from previous page}} \\
\toprule
%s \\
\toprule
\endhead
\toprule
\rowcolor{CVCWhite}
\multicolumn{%d}{r}{{Continued on next page...}} \\
\bottomrule
\endfoot
\bottomrule
\endlastfoot
%s
\end{longtable}
\end{landscape}
%s
\clearpage
""" % (colalignment, caption, label, columns, len(dfcols),
columns, len(dfcols), valuestring, notes)
return tablestring
def makeTexFigure(figFile, caption, pos='hb', clearpage=True):
'''
Create the LaTeX for include a figure in a document
Input:
figFile (string) : path to the image you want to include
caption (string) : what it should say in the figure's caption
pos (string, default 'hb') : placement preferences
(h='here' or b='below')
clearpage (bool, default True) : whether or not the LaTeX
command "\clearpage" should be called after the figure
Returns:
figurestring (string) : the LaTeX string to include a figure
in the appendix reports
'''
if clearpage:
clearpagetext = r'\clearpage'
else:
clearpagetext = ''
figurestring = r"""
\begin{figure}[%s] %% FIGURE
\centering
\includegraphics[scale=1.00]{%s}
\caption{%s}
\end{figure} %% FIGURE
%s
""" % (pos, figFile, caption, clearpagetext)
return figurestring
def stringify(value, fmt, attribute=None):
if attribute is not None and value is not None:
quantity = nested_getattr(value, attribute)
else:
quantity = value
if quantity is None:
return '--'
else:
return fmt % quantity
def pH2concentration(pH, *args):
'''
Takes a pH value and converts it to proton concentration
in mg/L
'''
# check that we recieved a valid input:
if pH < 0 or pH > 14:
raise ValueError('pH = %f but must be between 0 and 14' % pH)
# avogadro's number (items/mole)
avogadro = 6.0221413e+23
# mass of a proton (kg)
proton_mass = 1.672621777e-27
# grams per kilogram
kg2g = 1000
# milligrams per gram
g2mg = 1000
return 10**(-1*pH) * avogadro * proton_mass * kg2g * g2mg
def estimateFromLineParams(xdata, slope, intercept, xlog=False, ylog=False):
'''
Estimate the dependent of a linear fit given x-data and linear parameters
Parameters
----------
xdata : numpy array or pandas Series/DataFrame
The input independent variable of the fit
slope : float
Slope of the best-fit line
intercept : float
y-intercept of the best-fit line
xlog : bool (default = False)
Toggles whether or not the x-data are lognormally distributed
ylog : bool (default = False)
Toggles whether or not the y-data are lognormally distributed
Returns
-------
yhat : same type as xdata
Estimate of the dependent variable.
'''
x = np.array(xdata)
if ylog:
if xlog:
yhat = np.exp(intercept) * x ** slope
else:
yhat = np.exp(intercept) * np.exp(slope) ** x
else:
if xlog:
yhat = slope * np.log(x) + intercept
else:
yhat = slope * x + intercept
return yhat
def redefineIndexLevel(dataframe, levelname, value, criteria=None, dropold=True):
'''
Redefine a selection of BMPs into another or new category
Input:
dataframe : pandas DataFrame.
levelname : string
The name of the index level that needs to be modified. The catch
here is that this value needs to be valid after calling
`dataframe.reset_index()`. In otherwords, if you have a 3-level
column index and you want to modify the "Units" level of the index,
you should actually pass `("Units", "", "")`. Annoying, but that's
life right now.
value : string or int
The replacement value for the index level.
critera : function/lambda expression or None
This should return True/False in a manner consitent with the
`.select()` method of a pandas dataframe. See that docstring
for more info. If None, the redifinition will apply to the whole
dataframe.
dropold : optional bool (defaul is True)
Toggles the replacement (True) or addition (False) of the data
of the redefined BMPs into the the `data` dataframe.
Returns:
None
'''
if criteria is not None:
selection = dataframe.select(criteria)
else:
selection = dataframe.copy()
if dropold:
dataframe = dataframe.drop(selection.index)
selection.reset_index(inplace=True)
selection[levelname] = value
selection = selection.set_index(dataframe.index.names)
return dataframe.append(selection).sort_index()
def checkIntervalOverlap(interval1, interval2, oneway=False):
'''Checks if two numeric intervals overlaps
Parameters
----------
interval1, interval2 : array like
len = 2 sequences to compare
oneway : bool, default = False
if true, only checks that interval1 falls at least partially
inside interval2, but not the other way around
Returns
-------
bool
'''
test1 = np.min(interval2) <= np.max(interval1) <= np.max(interval2)
test2 = np.min(interval2) <= np.min(interval1) <= np.max(interval2)
if oneway:
return test1 or test2
else:
test3 = checkIntervalOverlap(interval2, interval1, oneway=True)
return test1 or test2 or test3
def makeTimestamp(row, datecol='sampledate', timecol='sampletime',
issuewarnings=False):
'''Makes a pandas.Timestamp from separate date/time columns
Parameters
----------
row : dict-like (ideallty a row in a dataframe)
datecol : optional string (default = 'sampledate')
Name of the column containing the dates
timecol : optional string (default = 'sampletime')
Name of the column containing the times
Returns
-------
tstamp : pandas.Timestamp
'''
fallback_datetime = pandas.Timestamp('1901-01-01 00:00')
if row[datecol] is None or pandas.isnull(row[datecol]):
fb_date = True
else:
fb_date = False
try:
date = pandas.Timestamp(row[datecol]).date()
except ValueError:
fb_date = True
if fb_date:
date = fallback_datetime.date()
if issuewarnings:
warnings.warn("Using fallback date from {}".format(row[datecol]))
if row[timecol] is None or pandas.isnull(row[timecol]):
fb_time = True
else:
fb_time = False
try:
time = pandas.Timestamp(row[timecol]).time()
except ValueError:
fb_time = True
if fb_time:
time = fallback_datetime.time()
if issuewarnings:
warnings.warn("Using fallback time from {}".format(row[timecol]))
dtstring = '{} {}'.format(date, time)
tstamp = pandas.Timestamp(dtstring)
return tstamp
def whiskers_and_fliers(x, q1, q3, transformout=None):
wnf = {}
if transformout is None:
transformout = lambda x: x
iqr = q3 - q1
# get low extreme
loval = q1 - (1.5 * iqr)
whislo = np.compress(x >= loval, x)
if len(whislo) == 0 or np.min(whislo) > q1:
whislo = q1
else:
whislo = np.min(whislo)
# get high extreme
hival = q3 + (1.5 * iqr)
whishi = np.compress(x <= hival, x)
if len(whishi) == 0 or np.max(whishi) < q3:
whishi = q3
else:
whishi = np.max(whishi)
wnf['fliers'] = np.hstack([
transformout(np.compress(x < whislo, x)),
transformout(np.compress(x > whishi, x))
])
wnf['whishi'] = transformout(whishi)
wnf['whislo'] = transformout(whislo)
return wnf
def getWaterYear(date):
""" Returns the water year of a given date
Parameters
----------
date : datetime-like
A datetime or Timestamp object
Returns
-------
wateryear : string
The water year of `date`
Example
-------
>>> import datetime
>>> import wqio
>>> x = datetime.datetime(2005, 11, 2)
>>> print(wqio.utils.getWaterYear(x))
'2005/2006'
"""
year = date.year
yearstring = '{}/{}'
if date.month >= 10:
return yearstring.format(year, year + 1)
else:
return yearstring.format(year - 1, year)
def fit_line(x, y, xhat=None, fitprobs=None, fitlogs=None, dist=None):
""" Fits a line to x-y data in various forms (raw, log, prob scales)
Parameters
----------
x, y : array-like
Independent and dependent data, respectively.
xhat : array-like or None, optional
The values at which yhat should should be estimated. If
not provided, falls back to the sorted values of ``x``.
fitprobs, fitlogs : str, options.
Defines how data should be transformed. Valid values are
'x', 'y', or 'both'. If using ``fitprobs``, variables should
be expressed as a percentage, i.e.,
Probablility transform = lambda x: ``dist``.ppf(x / 100.).
Log transform = lambda x: np.log(x).
Take care to not pass the same value to both ``fitlogs`` and
``figprobs`` as both transforms will be applied.
dist : scipy.stats distribution or None, optional
A fully-spec'd scipy.stats distribution such that ``dist.ppf``
can be called. If not provided, defaults to scipt.stats.norm.
Returns
-------
xhat, yhat : numpy arrays
Linear model estimates of ``x`` and ``y``.
results : a statmodels result object
The object returned by statsmodels.OLS.fit()
"""
def _check_fit_arg(arg, argname):
valid_args = ['x', 'y', 'both', None]
if arg not in valid_args:
msg = 'Valid value for {} ({}). Must be on of {}'
raise ValueError(msg.format(argname, arg, valid_args))
_check_fit_arg(fitprobs, "fitprobs")
_check_fit_arg(fitlogs, "fitlogs")
if xhat is None:
xhat = np.array([np.min(x), np.max(x)])
if dist is None:
dist = stats.norm
if fitprobs in ['x', 'both']:
x = dist.ppf(x/100.)
xhat = dist.ppf(np.array(xhat)/100.)
if fitprobs in ['y', 'both']:
y = dist.ppf(y/100.)
if fitlogs in ['x', 'both']:
x = np.log(x)
if fitlogs in ['y', 'both']:
y = np.log(y)
x = sm.add_constant(x)
model = model = sm.OLS(y, x)
results = model.fit()
yhat = estimateFromLineParams(xhat, results.params[1],
results.params[0],
xlog=fitlogs in ['x', 'both'],
ylog=fitlogs in ['y', 'both'])
if fitprobs in ['y', 'both']:
yhat = 100.* dist.cdf(yhat)
if fitprobs in ['x', 'both']:
xhat = 100.* dist.cdf(xhat)
return xhat, yhat, results
def processAndersonDarlingResults(ad_results):
""" Return a nice string of Anderson-Darling test results
Parameters
----------
ad_result : tuple or namedtuple
The packed output from scipt.stats.anderson
Returns
-------
result : str
A string representation of the confidence in the result.
"""
a2, crit, sig = ad_results
try:
ci = 100 - sig[a2 < crit][-1]
return '%0.1f%%' % (ci,)
except IndexError:
ci = 100 - sig[0]
return '<%0.1f%%' % (ci,)
class ProgressBar:
def __init__(self, sequence, width=50, labels=None, labelfxn=None):
'''Progress bar for notebookes:
Basic Usage:
>>> X = range(1000)
>>> pbar = utils.ProgressBar(X)
>>> for n, x in enumerate(X, 1):
>>> # do stuff with x
>>> pbar.animate(n)
'''
self.sequence = sequence
self.iterations = len(sequence)
self.labels = labels
self.labelfxn = labelfxn
self.prog_bar = '[]'
self.fill_char = '*'
self.width = width
self.__update_amount(0)
def animate(self, iter):
print('\r', self, end='')
sys.stdout.flush()
self.update_iteration(iter + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
if self.labels is None and self.labelfxn is None:
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
elif elapsed_iter <= self.iterations:
if self.labels is None:
label = self.labelfxn(self.sequence[elapsed_iter-1])
else:
label = self.labels[elapsed_iter-1]
self.prog_bar += ' %d of %s (%s)' % (elapsed_iter, self.iterations, label)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
| lucashtnguyen/wqio | wqio/utils/misc.py | Python | bsd-3-clause | 37,909 | [
"Avogadro"
] | eedcce97e341be3bbb0a9c4522a61cf9a0ad732fb4ed14e5ba615faf5eae8ff2 |
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
import sys
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<class 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> next(fl)
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> next(fl)
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* ``buffered`` enables buffering when required.
* ``c_index`` causes a C-order index to be tracked.
* ``f_index`` causes a Fortran-order index to be tracked.
* ``multi_index`` causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* ``common_dtype`` causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* ``copy_if_overlap`` causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* ``delay_bufalloc`` delays allocation of the buffers until
a reset() call is made. Allows ``allocate`` operands to
be initialized before their values are copied into the buffers.
* ``external_loop`` causes the ``values`` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* ``grow_inner`` allows the ``value`` array sizes to be made
larger than the buffer size when both ``buffered`` and
``external_loop`` is used.
* ``ranged`` allows the iterator to be restricted to a sub-range
of the iterindex values.
* ``refs_ok`` enables iteration of reference types, such as
object arrays.
* ``reduce_ok`` enables iteration of ``readwrite`` operands
which are broadcasted, also known as reduction operands.
* ``zerosize_ok`` allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
``readonly``, ``readwrite``, or ``writeonly`` must be specified.
* ``readonly`` indicates the operand will only be read from.
* ``readwrite`` indicates the operand will be read from and written to.
* ``writeonly`` indicates the operand will only be written to.
* ``no_broadcast`` prevents the operand from being broadcasted.
* ``contig`` forces the operand data to be contiguous.
* ``aligned`` forces the operand data to be aligned.
* ``nbo`` forces the operand data to be in native byte order.
* ``copy`` allows a temporary read-only copy if required.
* ``updateifcopy`` allows a temporary read-write copy if required.
* ``allocate`` causes the array to be allocated if it is None
in the ``op`` parameter.
* ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
* ``arraymask`` indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* ``writemasked`` indicates that only elements where the chosen
``arraymask`` operand is True will be written to.
* ``overlap_assume_elementwise`` can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when ``copy_if_overlap`` is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of ``allocate`` operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as `newaxis`.
itershape : tuple of ints, optional
The desired shape of the iterator. This allows ``allocate`` operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the ``delay_bufalloc`` flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the ``c_index`` or
the ``f_index`` flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the ``multi_index`` flag,
and the property `multi_index` can be used to retrieve it.
index
When the ``c_index`` or ``f_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
and ``has_index`` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
When the ``multi_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and ``has_multi_index`` is False.
ndim : int
The dimensions of the iterator.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over. Valid only before the iterator is
closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of ``operands`` at current iteration. Normally, this is a
tuple of array scalars, but if the flag ``external_loop`` is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the coordinates or index of an iterator, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol:
>>> def iter_add_py(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... for (a, b, c) in it:
... addop(a, b, out=c)
... return it.operands[2]
Here is the same function, but following the C-style pattern:
>>> def iter_add(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... while not it.finished:
... addop(it[0], it[1], out=it[2])
... it.iternext()
... return it.operands[2]
Here is an example outer product function:
>>> def outer_it(x, y, out=None):
... mulop = np.multiply
... it = np.nditer([x, y, out], ['external_loop'],
... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
... [-1] * x.ndim + list(range(y.ndim)),
... None])
... with it:
... for (a, b, c) in it:
... mulop(a, b, out=c)
... return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc:
>>> def luf(lamdaexpr, *args, **kwargs):
... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
... nargs = len(args)
... op = (kwargs.get('out',None),) + args
... it = np.nditer(op, ['buffered','external_loop'],
... [['writeonly','allocate','no_broadcast']] +
... [['readonly','nbo','aligned']]*nargs,
... order=kwargs.get('order','K'),
... casting=kwargs.get('casting','safe'),
... buffersize=kwargs.get('buffersize',0))
... while not it.finished:
... it[0] = lamdaexpr(*it[1:])
... it.iternext()
... return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags `"writeonly"` or `"readwrite"` are used the
operands may be views into the original data with the
`WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
context manager or the `nditer.close` method must be called before
using the result. The temporary data will be written back to the
original data when the `__exit__` function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
... [['writeonly', 'updateifcopy']],
... casting='unsafe',
... op_dtypes=[np.dtype('f4')]) as i:
... x = i.operands[0]
... x[:] = [-1, -2, -3]
... # a still unchanged here
>>> a, x
(array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
Context management and the `close` method appeared in version 1.15.0.
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> next(it)
(array(0), array(1))
>>> it2 = it.copy()
>>> next(it2)
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('operands',
"""
operands[`Slice`]
The array(s) to be iterated over. Valid only before the iterator is closed.
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
add_newdoc('numpy.core', 'nested_iters',
"""
Create nditers for use in nested loops
Create a tuple of `nditer` objects which iterate in nested loops over
different axes of the op argument. The first iterator is used in the
outermost loop, the last in the innermost loop. Advancing one will change
the subsequent iterators to point at its new element.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
axes : list of list of int
Each item is used as an "op_axes" argument to an nditer
flags, op_flags, op_dtypes, order, casting, buffersize (optional)
See `nditer` parameters of the same name
Returns
-------
iters : tuple of nditer
An nditer for each item in `axes`, outermost first
See Also
--------
nditer
Examples
--------
Basic usage. Note how y is the "flattened" version of
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
the first iter's axes as [1]
>>> a = np.arange(12).reshape(2, 3, 2)
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
>>> for x in i:
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
(0,)
(0, 0) 0
(0, 1) 1
(1, 0) 6
(1, 1) 7
(1,)
(0, 0) 2
(0, 1) 3
(1, 0) 8
(1, 1) 9
(2,)
(0, 0) 4
(0, 1) 5
(1, 0) 10
(1, 1) 11
""")
add_newdoc('numpy.core', 'nditer', ('close',
"""
close()
Resolve all writeback semantics in writeable operands.
.. versionadded:: 1.15.0
See Also
--------
:ref:`nditer-context-manager`
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[5., 6., 7.],
[6., 7., 8.],
[7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> next(row), next(col)
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #uninitialized
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format. Most builtin numeric types are
supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
The string separating numbers in the data; extra whitespace between
elements is also ignored.
.. deprecated:: 1.14
Passing ``sep=''``, the default, is deprecated since it will
trigger the deprecated binary mode of this function. This mode
interprets `string` as binary bytes, rather than ASCII text with
decimal numbers, an operation which is better spelt
``frombuffer(string, dtype, count)``. If `string` contains unicode
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
""")
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
compare_chararrays(a, b, cmp_op, rstrip)
Performs element-wise comparison of two string arrays using the
comparison operator specified by `cmp_op`.
Parameters
----------
a, b : array_like
Arrays to be compared.
cmp_op : {"<", "<=", "==", ">=", ">", "!="}
Type of comparison.
rstrip : Boolean
If True, the spaces at the end of Strings are removed before the comparison.
Returns
-------
out : ndarray
The output array of type Boolean with the same shape as a and b.
Raises
------
ValueError
If `cmp_op` is not valid.
TypeError
If at least one of `a` or `b` is a non-string array
Examples
--------
>>> a = np.array(["a", "b", "cde"])
>>> b = np.array(["a", "a", "dec"])
>>> np.compare_chararrays(a, b, ">", True)
array([False, True, False])
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='', offset=0)
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str or Path
Open file object or filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
Most builtin numeric types are supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
offset : int
The offset (in bytes) from the file's current position. Defaults to 0.
Only permitted for binary files.
.. versionadded:: 1.17.0
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import tempfile
>>> fname = tempfile.mkstemp()[1]
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = b'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use `numpy.linspace` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
.. deprecated:: 1.16
For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
For ndarray subclasses, define the ``__array_ufunc__`` method and
override the relevant ufunc.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric, but rarely associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
An example of a non-associative case:
>>> p = np.promote_types
>>> p('S', p('i1', 'u1'))
dtype('S6')
>>> p(p('S', 'i1'), 'u1')
dtype('S4')
""")
if sys.version_info.major < 3:
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[0.0e+000, 0.0e+000], # random
[ nan, 2.5e-323]])
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.shape
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.strides
:noindex:
.. automethod:: numpy.core._internal._ctypes.data_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.shape_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.strides_as
:noindex:
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the ``as_parameter`` attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
WRITEBACKIFCOPY (X)
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
UPDATEIFCOPY (U)
(Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<class 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
The shape property is usually used to get the current shape of an array,
but may also be used to reshape the array in-place by assigning a tuple of
array dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the size of
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: incompatible shape for a non-contiguous array
See Also
--------
numpy.reshape : similar function
ndarray.reshape : similar method
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Notes
-----
`a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
calculations that may overflow a fixed size integer type.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
The transposed array.
Same as ``self.transpose()``.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
See Also
--------
transpose
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__()
Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
Equivalent to ``a.copy(order='K')``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__(memo, /) -> Deep copy of array.
Used if :func:`copy.deepcopy` is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(state, /)
For unpickling.
The `state` argument must be a sequence that contains the following
elements:
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the max
integer/float value converted.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace=False)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Arrays of byte-strings are not swapped. The real and imaginary
parts of a complex number are swapped individually.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> list(map(hex, A))
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
Arrays of byte-strings are not swapped
>>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
array([b'ceg', b'fac'], dtype='|S3')
``A.newbyteorder().byteswap()`` produces an array with the same values
but different representation in memory
>>> A = np.array([1, 2, 3])
>>> A.view(np.uint8)
array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0], dtype=uint8)
>>> A.newbyteorder().byteswap(inplace=True)
array([1, 2, 3])
>>> A.view(np.uint8)
array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
0, 3], dtype=uint8)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None, **kwargs)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[2., 2.],
[2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[8., 8.],
[8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str or Path
A string naming the dump file.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[1.+1.j, 0.+0.j],
[0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[1., 0.],
[0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[1., 0.],
[0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.item(3)
1
>>> x.item(7)
0
>>> x.item((0, 1))
2
>>> x.item((2, 2))
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[2, 2, 6],
[1, 0, 6],
[1, 0, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None, keepdims=False)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that references or is referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]], dtype=int32)
>>> x
array([[1.0e+000, 1.5e-323, 1.5e-323],
[1.5e-323, 1.0e+000, 1.5e-323],
[1.5e-323, 1.5e-323, 1.0e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
unpickling can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
All flags can be accessed using the single (upper case) letter as well
as the full name.
Examples
--------
>>> y = np.array([[3, 1, 7],
... [2, 0, 0],
... [8, 5, 9]])
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set WRITEBACKIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind=None, order=None)
Sort an array in-place. Refer to `numpy.sort` for full documentation.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with datatype. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
numpy.argsort : Indirect sort.
numpy.lexsort : Indirect stable sort on multiple keys.
numpy.searchsorted : Find elements in sorted array.
numpy.partition: Partial sort.
Notes
-----
See `numpy.sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([(b'c', 1), (b'a', 2)],
dtype=[('x', 'S1'), ('y', '<i8')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
>>> a
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str or Path
An open file object, or a string containing a filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible builtin Python type, via
the `~numpy.ndarray.item` function.
If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
not be a list at all, but a simple Python scalar.
Parameters
----------
none
Returns
-------
y : object, or list of object, or list of list of object, or ...
The possibly nested list of array elements.
Notes
-----
The array may be recreated via ``a = np.array(a.tolist())``, although this
may sometimes lose precision.
Examples
--------
For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
except that ``tolist`` changes numpy scalars to Python scalars:
>>> a = np.uint32([1, 2])
>>> a_list = list(a)
>>> a_list
[1, 2]
>>> type(a_list[0])
<class 'numpy.uint32'>
>>> a_tolist = a.tolist()
>>> a_tolist
[1, 2]
>>> type(a_tolist[0])
<class 'int'>
Additionally, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
The base case for this recursion is a 0D array:
>>> a = np.array(1)
>>> list(a)
Traceback (most recent call last):
...
TypeError: iteration over a 0-d array
>>> a.tolist()
1
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
>>> x.tobytes()
b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array this has no effect, as a transposed vector is simply the
same vector. To convert a 1-D array into a 2D column vector, an additional
dimension must be added. `np.atleast2d(a).T` achieves this, as does
`a[:, np.newaxis]`.
For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
ndarray.reshape : Give a new shape to an array without changing its data.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> x
array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1, 3], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
ValueError: To change to a dtype of a different size, the array must be C-contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array(['0o12', '0o36', '0o144'], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['0o12', '0o36', '0o144'], dtype='<U5')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[8192, 521, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
... invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use `info`. For
example, ``np.info(np.sin)``. Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
Calling ufuncs:
===============
op(*x[, out], where=True, **kwargs)
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
*x : array_like
Input arrays.
out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
number of outputs; use None for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
provided, it will be returned. If not, `r` will be allocated and
may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
add_newdoc('numpy.core', 'ufunc', ('signature',
"""
Definition of the core elements a generalized ufunc operates on.
The signature determines how the dimensions of each input/output array
are split into core and loop dimensions:
1. Each dimension in the signature is matched to a dimension of the
corresponding passed-in array, starting from the end of the shape tuple.
2. Core dimensions assigned to the same label in the signature must have
exactly matching sizes, no broadcasting is performed.
3. The core dimensions are removed from all inputs and the remaining
dimensions are broadcast together, defining the loop dimensions.
Notes
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
For ufuncs that operate on scalars, the signature is None, which is
equivalent to '()' for every argument.
Examples
--------
>>> np.core.umath_tests.matrix_multiply.signature
'(m,n),(n,p)->(m,p)'
>>> np.linalg._umath_linalg.det.signature
'(m,m)->()'
>>> np.add.signature is None
True # equivalent to '(),()->()'
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is None, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
initial : scalar, optional
The value with which to start the reduction.
If the ufunc has no identity or the dtype is object, this defaults
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
.. versionadded:: 1.15.0
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `a`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
.. versionadded:: 1.17.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
You can use the ``initial`` keyword argument to initialize the reduction
with a different value, and ``where`` to select specific elements to include:
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
array([14., 14.])
>>> a = np.array([10., np.nan, 10])
>>> np.add.reduce(a, where=~np.isnan(a))
20.0
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
array([ 1., 10.])
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[1., 0.],
[0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[1., 0.],
[1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[1., 0.],
[1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[1., 1.],
[0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[12., 15., 18., 21.],
[12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
``a[indices] += b``, except that results are accumulated for elements that
are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> a
array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> a
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> a
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
dtype([('f1', '<u8'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', 'S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
dtype([('hello', '<i8', (3,)), ('world', 'V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', 'S1'), ('age', 'u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', 'S25'), ('age', 'u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
Examples
--------
>>> x = np.dtype('i4')
>>> x.alignment
4
>>> x = np.dtype(float)
>>> x.alignment
8
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types.
Examples
--------
>>> x = np.dtype(float)
>>> x.char
'd'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
`__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
and passing it directly to `np.dtype` will not accurately reconstruct
some dtypes (e.g., scalar and subarray dtypes).
Examples
--------
>>> x = np.dtype(float)
>>> x.descr
[('', '<f8')]
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.descr
[('name', '<U16'), ('grades', '<f8', (2,))]
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
The following example demonstrates that operations on this particular
dtype requires Python C-API.
Examples
--------
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.flags
16
>>> np.core.multiarray.NEEDS_PYAPI
16
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
Examples
--------
>>> arr = np.array([[1, 2], [3, 4]])
>>> arr.dtype
dtype('int64')
>>> arr.itemsize
8
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.itemsize
80
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
Examples
--------
>>> dt = np.dtype('i4')
>>> dt.kind
'i'
>>> dt = np.dtype('f8')
>>> dt.kind
'f'
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.kind
'V'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
Examples
--------
>>> x = np.dtype(float)
>>> x.name
'float64'
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.name
'void640'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
Examples
--------
>>> dt = np.dtype(str)
>>> dt.num
19
>>> dt = np.dtype(float)
>>> dt.num
12
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
Examples
--------
>>> dt = np.dtype(('i4', 4))
>>> dt.shape
(4,)
>>> dt = np.dtype(('i4', (2, 3)))
>>> dt.shape
(2, 3)
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
Examples
--------
>>> x = np.dtype(float)
>>> x.ndim
0
>>> x = np.dtype((float, 8))
>>> x.ndim
1
>>> x = np.dtype(('i4', (3, 4)))
>>> x.ndim
2
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
See Also
--------
dtype.base
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.subdtype
(dtype('float32'), (8,))
>>> x = numpy.dtype('i2')
>>> x.subdtype
>>>
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('base',
"""
Returns dtype for the base element of the subarrays,
regardless of their dimension or shape.
See Also
--------
dtype.subdtype
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.base
dtype('float32')
>>> x = numpy.dtype('i2')
>>> x.base
dtype('int16')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False])
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
The returned tuple can be passed as the second argument of `numpy.datetime64` and
`numpy.timedelta64`.
Parameters
----------
dtype : dtype
The dtype object, which must be a `datetime64` or `timedelta64` type.
Returns
-------
unit : str
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
is based.
count : int
The number of base units in a step.
Examples
--------
>>> dt_25s = np.dtype('timedelta64[25s]')
>>> np.datetime_data(dt_25s)
('s', 25)
>>> np.array(10, dt_25s).astype('timedelta64[s]')
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
numpy.datetime64('2010-01-01T00:00:00','25s')
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See also the corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
"""
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
"""
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
""")
##############################################################################
#
# Documentation for concrete scalar classes
#
##############################################################################
def numeric_type_aliases(aliases):
def type_aliases_gen():
for alias, doc in aliases:
try:
alias_type = getattr(_numerictypes, alias)
except AttributeError:
# The set of aliases that actually exist varies between platforms
pass
else:
yield (alias_type, alias, doc)
return list(type_aliases_gen())
possible_aliases = numeric_type_aliases([
('int8', '8-bit signed integer (-128 to 127)'),
('int16', '16-bit signed integer (-32768 to 32767)'),
('int32', '32-bit signed integer (-2147483648 to 2147483647)'),
('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'),
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
('uint8', '8-bit unsigned integer (0 to 255)'),
('uint16', '16-bit unsigned integer (0 to 65535)'),
('uint32', '32-bit unsigned integer (0 to 4294967295)'),
('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'),
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
('float96', '96-bit extended-precision floating-point number type'),
('float128', '128-bit extended-precision floating-point number type'),
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
])
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
o = getattr(_numerictypes, obj)
character_code = dtype(o).char
canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj)
alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases)
alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc)
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
docstring = """
{doc}
Character code: ``'{character_code}'``.
{canonical_name_doc}{alias_doc}
""".format(doc=doc.strip(), character_code=character_code,
canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
add_newdoc('numpy.core.numerictypes', obj, docstring)
add_newdoc_for_scalar_type('bool_', ['bool8'],
"""
Boolean type (True or False), stored as a byte.
""")
add_newdoc_for_scalar_type('byte', [],
"""
Signed integer type, compatible with C ``char``.
""")
add_newdoc_for_scalar_type('short', [],
"""
Signed integer type, compatible with C ``short``.
""")
add_newdoc_for_scalar_type('intc', [],
"""
Signed integer type, compatible with C ``int``.
""")
add_newdoc_for_scalar_type('int_', [],
"""
Signed integer type, compatible with Python `int` anc C ``long``.
""")
add_newdoc_for_scalar_type('longlong', [],
"""
Signed integer type, compatible with C ``long long``.
""")
add_newdoc_for_scalar_type('ubyte', [],
"""
Unsigned integer type, compatible with C ``unsigned char``.
""")
add_newdoc_for_scalar_type('ushort', [],
"""
Unsigned integer type, compatible with C ``unsigned short``.
""")
add_newdoc_for_scalar_type('uintc', [],
"""
Unsigned integer type, compatible with C ``unsigned int``.
""")
add_newdoc_for_scalar_type('uint', [],
"""
Unsigned integer type, compatible with C ``unsigned long``.
""")
add_newdoc_for_scalar_type('ulonglong', [],
"""
Signed integer type, compatible with C ``unsigned long long``.
""")
add_newdoc_for_scalar_type('half', [],
"""
Half-precision floating-point number type.
""")
add_newdoc_for_scalar_type('single', [],
"""
Single-precision floating-point number type, compatible with C ``float``.
""")
add_newdoc_for_scalar_type('double', ['float_'],
"""
Double-precision floating-point number type, compatible with Python `float`
and C ``double``.
""")
add_newdoc_for_scalar_type('longdouble', ['longfloat'],
"""
Extended-precision floating-point number type, compatible with C
``long double`` but not necessarily with IEEE 754 quadruple-precision.
""")
add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
"""
Complex number type composed of two single-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
"""
Complex number type composed of two double-precision floating-point
numbers, compatible with Python `complex`.
""")
add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
"""
Complex number type composed of two extended-precision floating-point
numbers.
""")
add_newdoc_for_scalar_type('object_', [],
"""
Any Python object.
""")
# TODO: work out how to put this on the base class, np.floating
for float_name in ('half', 'single', 'double', 'longdouble'):
add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
"""
{ftype}.as_integer_ratio() -> (int, int)
Return a pair of integers, whose ratio is exactly equal to the original
floating point number, and with a positive denominator.
Raise OverflowError on infinities and a ValueError on NaNs.
>>> np.{ftype}(10.0).as_integer_ratio()
(10, 1)
>>> np.{ftype}(0.0).as_integer_ratio()
(0, 1)
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
| jorisvandenbossche/numpy | numpy/core/_add_newdocs.py | Python | bsd-3-clause | 202,937 | [
"Brian"
] | 2ea71ca44333f444c31b88d7393ac19e8977c143b4853c3423524348e52c504f |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
sns.set()
import tensorflow as tf
from tensorflow_probability import distributions as tfd
# Import all kernel functions
from gl_rep.gp_kernel import *
class GLR(tf.keras.Model):
def __init__(self, global_encoder, local_encoder, decoder, time_length, data_dim, window_size=20,
kernel='cauchy', beta=1., lamda=1., M=1, sigma=1.0, length_scale=1.0, kernel_scales=1, p=100):
"""
Decoupled Global and Local Representation learning (GLR) model
Attributes:
global_encoder: Encoder model that learns the global representation for each time series sample
local_encoder: Encoder model that learns the local representation of time series windows over time
decoder: Decoder model that generated the time series sample distribution
time_length: Maximum length of the time series samples
data_dim: Input data dimension (number of features)
window_size: Length of the time series window to learn representations for
kernel: Gaussian Process kernels for different dimensions of local representations
beta: KL divergence weight in loss term
lamda: Counterfactual regularization weight in the loss term
M: Number of Monte-Carlo samples
lambda: Counterfactual regularization weight
length_scale: Kernel length scale
kernel_scales: number of different length scales over latent space dimensions
"""
super(GLR, self).__init__()
self.global_encoder = global_encoder
self.local_encoder = local_encoder
self.data_dim = data_dim
self.decoder = decoder
self.kernel = kernel
self.time_length = time_length
self.beta = beta
self.lamda = lamda
self.M = M
self.p = p # period for the periodic kernel
self.kernel_scales = kernel_scales
self.sigma = sigma
self.window_size = window_size
self.length_scale = length_scale
self.prior = None
self.latent_dim = local_encoder.zl_size
self.pz_scale_inv = None
self.pz_scale_log_abs_determinant = None
def encode(self, x, m_mask=None):
"""Encode the local and global representations of a batch of time series sample
Args:
x: Batch of time series samples with shape [batch_size, T, feature_size]
m_mask: Mask channel with the same size as x, indicating which samples are missing (1:missing 0: measured)
"""
assert len(x.shape) == 3, "Input should have shape: [batch_size, time_length, data_dim]"
x = tf.identity(x)
if m_mask is not None:
m_mask = tf.identity(m_mask)
m_mask = tf.cast(m_mask, dtype=tf.float32)
rnd_t = np.random.randint(0, int(x.shape[1] * 0.7))
global_sample_len = int(x.shape[1] * 0.3)
p_zg = self.global_encoder(x[:, rnd_t:rnd_t + global_sample_len, :],
mask=None if m_mask is None else m_mask[:, rnd_t:rnd_t + global_sample_len, :])
pz_t = self.local_encoder(x, mask=m_mask, window_size=self.window_size)
z_t = tf.transpose(pz_t.sample(), perm=(0, 2, 1))
return p_zg.mean(), z_t, pz_t
def decode(self, z_t, z_g):
"""Generate the time series sample from the local and global representations
Args:
z_t: Local representations over time with shape [batch_size, time windows, local representation size]
z_g: Global representation os time series sample [batch_size, global representation size]
"""
x_hat_dist = self.decoder(z_t, z_g, output_len=self.window_size)
return x_hat_dist
def __call__(self, input):
rnd_t = np.random.randint(0, input.shape[1] - 5 * self.window_size)
z_g = self.global_encoder(input[:, rnd_t:rnd_t + 5 * self.window_size, :])
pz_t = self.local_encoder(input, self.window_size)
z_t = tf.transpose(pz_t.sample(), perm=(0, 2, 1))
p_x_hat = self.decoder(z_t, z_g, output_len=self.window_size)
return p_x_hat, z_t, z_g
def _get_prior(self, time_length=None):
"""Estimate the prior over the local representations over time using the GP"""
if time_length is None:
time_length = self.time_length
if self.kernel_scales>self.latent_dim:
raise RuntimeError('Invalid kernel size')
if self.prior is None:
tiled_matrices = []
kernel_dim = self.latent_dim // len(self.kernel)
for i_k, kernel in enumerate(self.kernel):
if i_k==len(self.kernel)-1:
kernel_dim = self.latent_dim - kernel_dim*(len(self.kernel)-1)
# Compute kernel matrices for each latent dimension
kernel_matrices = []
for i in range(self.kernel_scales):
if kernel == "rbf":
kernel_matrices.append(rbf_kernel(time_length, self.length_scale / (2 ** i)))
elif kernel == "periodic":
kernel_matrices.append(periodic_kernel(time_length, self.length_scale / (2 ** i), self.p))
elif kernel == "diffusion":
kernel_matrices.append(diffusion_kernel(time_length, self.length_scale / (2 ** i)))
elif kernel == "matern":
kernel_matrices.append(matern_kernel(time_length, self.length_scale / (2 ** i)))
elif kernel == "cauchy":
kernel_matrices.append(cauchy_kernel(time_length, self.sigma, self.length_scale / (2 ** i)))
# Combine kernel matrices for each latent dimension
total = 0
for i in range(self.kernel_scales):
if i == self.kernel_scales - 1:
multiplier = kernel_dim - total
else:
multiplier = int(kernel_dim // (self.kernel_scales))
total += multiplier
tiled_matrices.append(tf.tile(tf.expand_dims(kernel_matrices[i], 0), [multiplier, 1, 1]))
kernel_matrix_tiled = tf.concat(tiled_matrices, axis=0)
assert kernel_matrix_tiled.shape[0] == self.latent_dim
white_noise = tf.eye(num_rows=time_length, num_columns=time_length, batch_shape=[self.latent_dim]) * 1e-5
prior = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros([self.latent_dim, time_length], dtype=tf.float32),
covariance_matrix=(kernel_matrix_tiled + white_noise))
return prior
def compute_loss(self, x, m_mask=None, x_len=None, return_parts=False, global_sample_len=None, is_train=True,):
"""Calculate the overall loss for a batch of samples x
Loss = NLL + beta*(KL_divergence_local (GP prior) + KL_divergence_global) + lamda*counterfactual_regularization
Args:
x: Batch of time series samples with shape [batch_size, T, feature_size]
m_mask: Mask channel with the same size as x, indicating which samples are missing (1:missing 0: measured)
x_len: Length of each time series sample
return_parts: Returns the overall loss if set to False, otherwise returns all the loss components
global_sample_len: Length of the time series sample to use for learning the global representation
"""
assert len(x.shape) == 3, "Input should have shape: [batch_size, time_length, data_dim]"
x = tf.tile(x, [self.M, 1, 1]) # shape=(M*batch_size, time, dimensions)
if global_sample_len is None:
global_sample_len = int(x.shape[1] *0.3)
if m_mask is not None:
m_mask = tf.cast(m_mask, dtype=tf.float32)
m_mask = tf.tile(m_mask, [self.M, 1, 1]) # shape=(M*batch_size, time, dimensions)
if x_len is not None:
x_len = tf.tile(x_len, [self.M]) # shape=(M*batch_size, time, dimensions)
rnd_t = np.random.randint(0, max(1, (min(x_len) -global_sample_len)))
else:
rnd_t = np.random.randint(0, x.shape[1]-global_sample_len)
pz = self._get_prior(time_length=x.shape[1] // self.window_size)
p_zg = self.global_encoder(x[:, rnd_t:rnd_t + global_sample_len, :],
mask=None if m_mask is None else m_mask[:, rnd_t:rnd_t + global_sample_len, :],
is_train=is_train)
z_g = p_zg.sample()
pz_t = self.local_encoder(x, mask=m_mask, window_size=self.window_size)
z_t = tf.transpose(pz_t.sample(), perm=(0, 2, 1))
x_hat_dist = self.decoder(z_t, z_g, output_len=self.window_size)
cf_loss = 0
if self.lamda!=0:
z_g_2 = tf.random.normal(shape=z_g.shape, stddev=1.)
cf_dist = self.decoder(z_t, z_g_2, output_len=self.window_size)
rnd_t_adv = np.random.randint(0, x.shape[1] - global_sample_len)
pos_zg = self.global_encoder(cf_dist.sample()[:, rnd_t_adv:rnd_t_adv + global_sample_len, :], mask=None)
cf_loss = tf.reduce_mean(tf.math.exp(pos_zg.log_prob(z_g)-pos_zg.log_prob(z_g_2)), -1)
nll = -x_hat_dist.log_prob(x) # shape=(M*batch_size, time, dimensions)
kl = tfd.kl_divergence(pz_t, pz) / (x.shape[1] // self.window_size)
kl = tf.reduce_mean(kl, 1) # shape=(M*batch_size, time, dimensions)
if m_mask is not None:
nll = tf.where(m_mask == 1, tf.zeros_like(nll), nll)
measured_ratio = (tf.reduce_sum(abs(tf.cast(m_mask, tf.float32) - 1), [1, 2]))/(m_mask.shape[1]*m_mask.shape[2])
kl = kl * measured_ratio
nll = tf.reduce_mean(nll, axis=[1,2])
kl_zg = tf.reduce_sum(tfd.kl_divergence(p_zg, tfd.Normal(loc=0, scale=1.)),-1)
elbo = -nll - self.beta * (kl + kl_zg) - self.lamda * cf_loss # shape=(M*batch_size)
elbo = tf.reduce_mean(elbo)
if return_parts:
nll = tf.reduce_mean(nll)
kl = tf.reduce_mean(kl)
cf_loss = tf.reduce_mean(cf_loss)
kl_zg = tf.reduce_mean(kl_zg)
return -elbo, nll, kl, cf_loss, kl_zg
else:
return -elbo
def get_trainable_vars(self):
"""Get the trainable parameters of the graph"""
self.compute_loss(x=tf.random.normal(shape=(10, self.time_length, self.data_dim), dtype=tf.float32),
m_mask=tf.zeros(shape=(10, self.time_length, self.data_dim), dtype=tf.float32))
return self.trainable_variables
def get_conditional_predictive(self, z_l, prediction_steps=1):
"""Estimate the posterior distribution of the future local representations conditioned on the past observations"""
history_used = z_l.shape[1]
next_z = []
prior = self._get_prior(time_length=history_used + prediction_steps)
z_obs = z_l[:, -history_used:, :]
mean = prior.mean()
covariance = prior.covariance()
mean_1 = mean[:, -prediction_steps:]
mean_2 = mean[:, :-prediction_steps]
cov_1_2 = covariance[:, -prediction_steps:, :-prediction_steps]
cov_2_1 = covariance[:, :-prediction_steps, -prediction_steps:]
cov_2_2 = covariance[:, :-prediction_steps, :-prediction_steps]
cov_1_1 = covariance[:, -prediction_steps:, -prediction_steps:]
for z_f in range(len(mean_1)):
cov_mult = tf.matmul(cov_1_2[z_f], tf.linalg.inv(cov_2_2[z_f]))
mean_cond = tf.expand_dims(tf.stack([mean_1[z_f]] * len(z_obs)), -1) + tf.matmul(
tf.tile(tf.expand_dims(cov_mult, 0), [len(z_obs), 1, 1]),
tf.expand_dims((z_obs[:, :, z_f] - mean_2[z_f]), -1))
cov_cond = cov_1_1[z_f] - tf.matmul(cov_mult, cov_2_1[z_f])
cond = tfd.MultivariateNormalTriL(loc=tf.squeeze(mean_cond, axis=-1), scale_tril=tf.linalg.cholesky(
tf.tile(tf.expand_dims(cov_cond, 0), [len(z_obs), 1, 1])))
z_pred = cond.sample() # Shape=[batch, prediction_step]
next_z.append(z_pred)
next_z = tf.stack(next_z, axis=-1) # Shape=[batch, prediction_step, zl_size]
return next_z
def _mi_upper_bound(self, x, m_mask=None):
"""Estimate the mutual information between the 2 set of representations"""
assert len(x.shape) == 3, "Input should have shape: [batch_size, time_length, data_dim]"
x = tf.identity(x) # in case x is not a Tensor already...
if m_mask is not None:
m_mask = tf.identity(m_mask) # in case m_mask is not a Tensor already...
m_mask = tf.cast(m_mask, dtype=tf.float32)
rnd_t = np.random.randint(0, int(x.shape[1] *0.7))
global_sample_len = int(x.shape[1] *0.3)
z_g = self.global_encoder(x[:, rnd_t:rnd_t + global_sample_len, :],
mask=None if m_mask is None else m_mask[:, rnd_t:rnd_t + global_sample_len, :])
pz_t = self.local_encoder(x, mask=m_mask, window_size=self.window_size)
z_t = tf.transpose(pz_t.sample(), perm=(0, 2, 1))
x_hat_dist = self.decoder(z_t, z_g, output_len=self.window_size)
mi_upper_bound = tf.zeros((len(x),))
mc_samples = 10
rnd_inds = np.random.randint(0, len(x), size=(len(x), mc_samples))
locs = pz_t.loc
scale_trils = pz_t.scale_tril
for ii in range(mc_samples):
pz_hat = tfd.MultivariateNormalTriL(loc=tf.gather(locs, rnd_inds[:,ii]), scale_tril=tf.gather(scale_trils, rnd_inds[:,ii]))
mi_upper_bound += tf.reduce_sum(tfd.kl_divergence(pz_t, pz_hat), axis=-1)
mi_upper_bound = mi_upper_bound/mc_samples
nll = -x_hat_dist.log_prob(x)
if m_mask is not None:
nll = tf.where(m_mask == 1, tf.zeros_like(nll), nll)
nll = tf.reduce_sum(nll, [1,2])
nll = tf.reduce_mean(nll)
mi_upper_bound = tf.reduce_mean(mi_upper_bound, 0)
mi = mi_upper_bound+nll
return mi
def train(self, trainset, validset, data, lr=1e-4, n_epochs=2):
"""Train the Global and Local representation learning (GLR) model
Args:
trainset: training dataset
validset: validation dataset
datae: Name of the dataset for training the model
lr: learning rate
n_epochs: Number of training epochs
"""
_ = tf.compat.v1.train.get_or_create_global_step()
trainable_vars = self.get_trainable_vars()
optimizer = tf.keras.optimizers.Adam(lr)
if not os.path.exists('./ckpt'):
os.mkdir('./ckpt')
summary_writer = tf.summary.create_file_writer("./logs/training_summary")
with summary_writer.as_default():
losses_train, losses_val = [], []
kl_train, kl_val = [], []
kl_zg_train, kl_zg_val = [], []
nll_train, nll_val = [], []
reg_train, reg_val = [], []
for epoch in range(n_epochs + 1):
epoch_loss, epoch_nll, epoch_kl, epoch_cf_reg, epoch_kl_zg = self.run_epoch(trainset, train=True,
optimizer=optimizer,
trainable_vars=trainable_vars)
if epoch % 2 == 0:
print('=' * 30)
print('Epoch %d' % epoch, '(Learning rate: %.5f)' % (lr))
losses_train.append(epoch_loss)
kl_train.append(epoch_kl)
kl_zg_train.append(epoch_kl_zg)
nll_train.append(epoch_nll)
reg_train.append(epoch_cf_reg)
print("Training loss = %.3f \t NLL = %.3f \t KL(local) = %.3f \t CF_reg = %.3f \t KL(zg) = %.3f"
% (epoch_loss, epoch_nll, epoch_kl, epoch_cf_reg, epoch_kl_zg))
epoch_loss, epoch_nll, epoch_kl, epoch_cf_reg, epoch_kl_zg = self.run_epoch(validset)
losses_val.append(epoch_loss)
kl_val.append(epoch_kl)
kl_zg_val.append(epoch_kl_zg)
nll_val.append(epoch_nll)
reg_val.append(epoch_cf_reg)
print("Validation loss = %.3f \t NLL = %.3f \t KL(local) = %.3f \t CF_reg = %.3f \t KL(zg) = %.3f"
% (epoch_loss, epoch_nll, epoch_kl, epoch_cf_reg, epoch_kl_zg))
self.save_weights('./ckpt/glr_%s_lambda%.1f' % (data, self.lamda))
# Plot overall losses
if not os.path.exists('./plots'):
os.mkdir('./plots')
plt.figure()
plt.plot(losses_train, label='Train loss')
plt.plot(losses_val, label='Validation loss')
plt.legend()
plt.savefig('./plots/glr_loss_%s_lambda%.1f.pdf' % (data, self.lamda))
# Plot different components of the loss term
f, axs = plt.subplots(nrows=1, ncols=3, figsize=(18, 6))
f.suptitle("Different segments of the loss term")
for i, ax in enumerate(axs):
if i == 0:
t_line = nll_train
v_line = nll_val
sub_title = "Negative Log Likelihood"
if i == 1:
t_line = kl_train
v_line = kl_val
sub_title = "KL Divergence"
if i == 2:
t_line = reg_train
v_line = reg_val
sub_title = "Counterfactual regularization"
ax.plot(t_line, label='Train')
ax.plot(v_line, label='Validation')
ax.set_title(sub_title)
ax.legend()
f.tight_layout()
plt.savefig('./plots/loss_components_%s_lambda%.1f.pdf' % (data, self.lamda))
def run_epoch(self, dataset, optimizer=None, train=False, trainable_vars=None):
"""Training epoch for time series encoder and decoder models
Args:
dataset: Epoch dataset
optimizer: tf Optimizer
train: True if it is an epoch run for training, False is it is an inference run
trainable_vars: List of trainable variables of the model
"""
epoch_loss, epoch_kl, epoch_kl_zg, epoch_nll, epoch_cf_loss = [], [], [], [], []
for i, batch in dataset.enumerate():
x_seq = batch[0]
mask_seq, x_lens = batch[1], batch[2]
global_sample_len = int(0.4 * x_seq.shape[1])
if train:
with tf.GradientTape() as gen_tape:
gen_loss = self.compute_loss(x_seq, m_mask=mask_seq, x_len=x_lens,
global_sample_len=global_sample_len)
gradients_of_generator = gen_tape.gradient(gen_loss, trainable_vars)
optimizer.apply_gradients(zip(gradients_of_generator, trainable_vars))
loss, nll, kl, cf_loss, kl_zg = self.compute_loss(x_seq, m_mask=mask_seq, x_len=x_lens,
global_sample_len=global_sample_len,
return_parts=True)
epoch_loss.append(loss.numpy())
epoch_nll.append(nll.numpy())
epoch_kl.append(kl.numpy())
epoch_kl_zg.append(kl_zg)
epoch_cf_loss.append(cf_loss)
return np.mean(epoch_loss), np.mean(epoch_nll), np.mean(epoch_kl), np.mean(epoch_cf_loss), np.mean(epoch_kl_zg)
| googleinterns/local_global_ts_representation | gl_rep/glr.py | Python | apache-2.0 | 20,436 | [
"Gaussian"
] | 78436d12e87f8f2571fcaa0da3dd925326d1237fdaf7d1631d0aeca680bc60c7 |
'''
Created on 14.04.2013
@author: bluesbreaker
'''
from logilab.astng.utils import LocalsVisitor
from logilab.astng.inspector import IdGeneratorMixIn
from logilab.astng.node_classes import *
from logilab.astng.scoped_nodes import Class, Function, Lambda
from logilab.astng.exceptions import InferenceError
from CSUStAn.astng.inspector import DuckLinker
import pydot
import re
from lxml import etree
JUMP_NODES = ( If, For, While, TryExcept, TryFinally, IfExp, With)
class UCFRLinker(IdGeneratorMixIn, DuckLinker):
'''
classdocs
'''
_root = None
''' dbg '''
_stop = False
_stack = {}
_dbg = False
_dbg1 = None
_project_name = None
_dbg_calls = set([])
_dbg_call_lookup = set([])
_getattr_calls = 0
_func_calls = 0
_class_calls = 0
_out_xml = None
_ids = set([])
_frames = set([])
''' Map of ASTNG calls to UCFR calls '''
_call_dict = {}
'''DEBUG'''
dbg = 0
def __init__(self, project_name, out_xml):
IdGeneratorMixIn.__init__(self)
DuckLinker.__init__(self)
self._project_name = project_name
self._out_xml = out_xml
def visit_project(self,node):
self._root = etree.Element("Project",name=self._project_name)
def write_result(self,node):
print self._dbg_calls
print self._dbg_call_lookup
all_calls = self._func_calls+self._class_calls+self._getattr_calls
print "Func calls ",self._func_calls,self._func_calls*100.0/all_calls,"%"
print "Class calls ",self._class_calls,self._class_calls*100.0/all_calls,"%"
print "Getattr calls ",self._getattr_calls,self._getattr_calls*100.0/all_calls,"%"
for t in self._root.xpath("//Target[@cfg_id]"):
if not int(t.get("cfg_id")) in self._ids:
t.attrib.pop("cfg_id")
f = open(self._out_xml,'w')
f.write(etree.tostring(self._root, pretty_print=True, encoding='utf-8', xml_declaration=True))
f.close()
def handle_id(self,func_node):
if isinstance(func_node.parent,Class):
if not hasattr(func_node, "id"):
func_node.id = self.generate_id()
func_node.visited=True
return func_node.id
else:
if hasattr(func_node.root(),'func_dict'):
func_node.root.func_dict = {}
if not func_node.root.func_dict.has_key(func_node.name):
func_node.root.func_dict[func_node.name] =self.generate_id()
return func_node.root.func_dict[func_node.name]
def visit_class(self,node):
DuckLinker.visit_class(self, node)
def get_frames(self):
return self._frames
def get_classes(self):
return self._classes
def visit_function(self,node):
self.dbg += 1
self._frames.add(node)
func_id = self.handle_id(node)
self._ids.add(func_id)
if(len(node.body)>8):
self._dbg = True
if isinstance(node.parent,Class):
func_node = etree.Element("Method",cfg_id=str(func_id),name=node.name,parent_class=node.parent.name,label=node.root().name)
class_node = node.parent
else:
func_node = etree.Element("Function",cfg_id=str(func_id),name=node.name,label=node.root().name)
class_node = None
self._stack[node] = func_node
self._root.append(func_node)
returns = set([])
''' extract duck typing '''
node.duck_info = {}
if(node.args.args is not None):
for arg in node.args.args:
if not arg.name == 'self':
node.duck_info[arg.name]={'attrs':set([]),'methods':{}}
self.extract_duck_types(node,class_node)
block_node = etree.Element("Block", type="START",id=str(0))
func_node.append(block_node)
id_count, prev = self.handle_flow_part(func_node,node.body, set([0]),1,returns)
id_count +=1
block_node = etree.Element("Block", type="END",id=str(id_count))
func_node.append(block_node)
''' Flows to the end of function '''
for p in prev.union(returns):
flow_node = etree.Element("Flow",from_id=str(p),to_id=str(id_count))
func_node.append(flow_node)
def extract_duck_types(self,node,class_node):
""" generate attrs and handle duck info about this attrs """
DuckLinker.handle_attrs(self, node, class_node)
if isinstance(node, (AssAttr,Getattr)):
if isinstance(node, Getattr):
self.handle_getattr_local(node, node.frame().duck_info,True)
elif isinstance(node, AssAttr):
self.handle_assattr_local(node, node.frame().duck_info)
''' Handle only 1 level Getattr-s'''
return
for child in node.get_children():
# Ignoring handling nested functions, it will be handled in another visit
if not isinstance(child, (Function,Lambda,Class)):
self.extract_duck_types(child,class_node)
def leave_function(self,node):
return
''' DEBUG '''
if self._stop:
del self._stack[node]
return
if not (self._dbg == True):
return
graph = pydot.Dot(graph_type='digraph')
block_dict = {}
for block in self._stack[node].iter("Block"):
dot_node = pydot.Node(block.get("id"),shape='record')
graph.add_node(dot_node)
block_dict[block.get("id")] = dot_node
for block in self._stack[node].iter("If"):
dot_node = pydot.Node('If '+block.get("id")+'\l'+block.get("test"),shape='diamond')
graph.add_node(dot_node)
block_dict[block.get("id")] = dot_node
for block in self._stack[node].iter("For"):
dot_node = pydot.Node('For '+block.get("id")+'\l'+block.get("iterate"),shape='diamond')
graph.add_node(dot_node)
block_dict[block.get("id")] = dot_node
for block in self._stack[node].iter("While"):
dot_node = pydot.Node('While '+block.get("id")+'\l'+block.get("test"),shape='diamond')
graph.add_node(dot_node)
block_dict[block.get("id")] = dot_node
for flow in self._stack[node].iter("Flow"):
dot_edge = pydot.Edge(block_dict[flow.get("from_id")],block_dict[flow.get("to_id")])
graph.add_edge(dot_edge)
graph.write_svg('cfg.svg')
f=open('cfg.txt','w')
f.write(node.as_string())
f.close()
del self._stack[node]
self._stop = True
def leave_project(self, node):
""" add complete class signatures """
DuckLinker.leave_project(self, node)
def handle_flow_part(self,func_node,flow_part, parent_ids,id_count,returns):
''' Handle sequential object of flow, e.g then or else body of If'''
prev=parent_ids
block_node = None
for child in flow_part:
if isinstance(child, Function):
''' Ignore function defined in another function body'''
continue
id_count+=1
curr_id = id_count
#block_node.append(subblock_node)
if(isinstance(child, (If, While, For, TryExcept, TryFinally, With)) or (block_node is None)):
for p in prev:
if (p not in returns):
flow_node = etree.Element("Flow", from_id=str(p), to_id=str(curr_id))
func_node.append(flow_node)
if isinstance(child, If):
if_node = etree.Element("If", id=str(id_count), test=child.test.__class__.__name__)
if_node.set("fromlineno",str(child.fromlineno))
if_node.set("col_offset",str(child.col_offset))
func_node.append(if_node)
id_count, prev = self.handle_cross(child, func_node, curr_id, id_count,returns)
block_node = None
elif isinstance(child, For):
for_node = etree.Element("For", id=str(id_count), iterate=child.iter.__class__.__name__)
for_node.set("fromlineno",str(child.fromlineno))
for_node.set("col_offset",str(child.col_offset))
func_node.append(for_node)
id_count, prev = self.handle_cross(child, func_node, curr_id, id_count,returns)
block_node = None
elif isinstance(child, While):
while_node = etree.Element("While", id=str(id_count), test=child.test.__class__.__name__)
while_node.set("fromlineno",str(child.fromlineno))
while_node.set("col_offset",str(child.col_offset))
func_node.append(while_node)
id_count, prev = self.handle_cross(child, func_node, curr_id, id_count,returns)
block_node = None
elif isinstance(child, (TryExcept, TryFinally, With)):
jump_node = etree.Element(child.__class__.__name__, id=str(id_count))
jump_node.set("fromlineno",str(child.fromlineno))
jump_node.set("col_offset",str(child.col_offset))
func_node.append(jump_node)
id_count, prev = self.handle_cross(child, func_node, curr_id, id_count,returns)
block_node = None
else:
if block_node is None:
block_node = etree.Element("Block", id=str(id_count))
block_node.set("fromlineno",str(child.fromlineno))
block_node.set("col_offset",str(child.col_offset))
func_node.append(block_node)
prev = set([id_count])
if(isinstance(child, Return)):
returns.add(id_count)
id_count += 1
self.handle_simple_node(child, block_node)
if(flow_part):
return id_count, prev
else:
return id_count, set([])
def handle_cross(self, node, func_node, parent_id,id_count,returns):
''' Handle conditional part of flow, e.g. If block'''
curr_id = id_count
parent_ids = set([])
if isinstance(node, If):
id_count, ids = self.handle_flow_part(func_node,node.body, set([curr_id]), id_count,returns)
parent_ids |=ids
id_count, ids = self.handle_flow_part(func_node,node.orelse, set([curr_id]), id_count,returns)
parent_ids |=ids
if (not node.orelse):
''' If there are no else then no direct block from if is needed'''
parent_ids.add(curr_id)
elif isinstance(node, (While, For)):
id_count, ids = self.handle_flow_part(func_node,node.body, set([curr_id]), id_count,returns)
for p in ids:
if (p not in returns):
flow_node = etree.Element("Flow", from_id=str(p), to_id=str(curr_id))
func_node.append(flow_node)
parent_ids.add(curr_id)
if (not node.orelse):
''' If there are no else then no direct block from if is needed'''
parent_ids.add(curr_id)
elif isinstance(node, TryExcept):
id_count, ids = self.handle_flow_part(func_node,node.body, set([curr_id]), id_count,returns)
parent_ids |=ids
for h in node.handlers:
id_count, ids = self.handle_flow_part(func_node,h.body, set([curr_id]), id_count,returns)
parent_ids |=ids
id_count, ids = self.handle_flow_part(func_node,node.orelse, set([curr_id]), id_count,returns)
parent_ids |=ids
elif isinstance(node, TryFinally):
id_count, ids = self.handle_flow_part(func_node,node.body, set([curr_id]), id_count,returns)
parent_ids |=ids
id_count, ids = self.handle_flow_part(func_node,node.finalbody, set([curr_id]), id_count,returns)
parent_ids |=ids
elif isinstance(node, With):
id_count, ids = self.handle_flow_part(func_node,node.body, set([curr_id]), id_count,returns)
parent_ids |=ids
return id_count, parent_ids
def handle_simple_node(self,node,block_node):
if isinstance(node, JUMP_NODES):
print "Warning! Ignored jump node at ", node.root
#self._dbg = True
elif isinstance(node, CallFunc):
call_node = etree.Element("Call")
self._dbg_calls.add(node.func.__class__.__name__)
call_node.set("fromlineno",str(node.fromlineno))
call_node.set("col_offset",str(node.col_offset))
if isinstance(node.func, Name):
space_type,called,called_id, label = self.handle_lookup(node.func, node.func.name)
if called == 'function':
self._func_calls += 1
elif called == 'class':
self._class_calls += 1
call_subnode = etree.Element("Direct",name=node.func.name)
if space_type is not None:
call_subnode.set("space_type",space_type)
target_subnode = etree.Element("Target")
call_subnode.append(target_subnode)
if called=='function':
target_subnode.set("type","function")
if label is not None:
target_subnode.set("label",label)
elif called=='class':
target_subnode.set("type","method")
class_subnode = etree.Element("TargetClass")
if label is not None:
class_subnode.set("label",label)
target_subnode.append(class_subnode)
else:
target_subnode.set("type","unknown")
if called_id is not None:
target_subnode.set("cfg_id",str(called_id))
call_node.append(call_subnode)
elif isinstance(node.func, Getattr):
self._getattr_calls += 1
call_subnode = etree.Element("Getattr")
call_subnode.set("name",node.func.attrname)
call_subnode.set("label",node.func.expr.as_string())
call_node.append(call_subnode)
block_node.append(call_node)
''' save call for further access '''
self._call_dict[node]=call_node
#print node.as_string(),node.func
#print node.scope().lookup(node.func)
for child in node.get_children():
self.handle_simple_node(child,block_node)
def get_call(self,call):
if self._call_dict.has_key(call):
return self._call_dict[call]
else:
return None
def handle_lookup(self,node,name,space_type=None):
lookup = node.lookup(name)
called = None
called_id = None
label = None
for asgn in lookup[1]:
if isinstance(asgn, Function):
if(space_type is None):
space_type = "internal"
called = "function"
label = asgn.root().name
if (label == '__builtin__') or (space_type == "external"):
''' No id generation for non-project calls '''
continue
called_id = self.handle_id(asgn)
elif isinstance(asgn, Class):
if(space_type is None):
space_type = "internal"
called = "class"
label = asgn.root().name
if label == '__builtin__':
continue
for cstr in [meth for meth in asgn.methods() if ((re.split('\W+', meth.parent.root().name)[0] == self._project_name)and(meth.name == '__init__'))]:
called_id = self.handle_id(cstr)
elif isinstance(asgn, From):
try:
module = asgn.do_import_module(asgn.modname)
if((space_type is None) and (re.split('\W+', module.name)[0] == self._project_name)):
space_type = "cross"
else:
space_type = "external"
label = asgn.root().name
# Here is the situation when we have lib/builtin module with same name that in project.
# It imports correctly and causes infinite recursion.
if label == '__builtin__' and space_type == "external" and module.name == asgn.modname:
raise InferenceError(module.name)
if not module.name.startswith(self._project_name):
raise InferenceError(module.name)
space_type,called,called_id, label = self.handle_lookup(module, name, space_type)
except InferenceError:
if(space_type is None):
space_type = "external"
self._dbg_call_lookup.add(asgn.__class__.__name__)
if isinstance(asgn,AssAttr):
print "DBG ",name,asgn.as_string(), asgn.root()
return space_type,called,called_id, label
| exbluesbreaker/csu-code-analysis | logilab-astng XML Generator/src/CSUStAn/astng/control_flow.py | Python | gpl-2.0 | 17,394 | [
"VisIt"
] | 34f7f0670bb9521dfb29be6d5f95088913b3f643dd8675228b6c5d836852325b |
# -*- coding: utf-8 -*-
import os
import logging
from core.modules.simulations_manager.weather.csv.CSVDatabaseWeatherSeries import CSVDatabaseWeatherSeries
__author__ = 'Federico Schmidt'
import csv
from netCDF4 import Dataset as NetCDF
import time
from datetime import datetime
import numpy as np
import copy
class WeatherNetCDFWriter:
reference_date = "1950-01-01"
def __init__(self):
pass
@staticmethod
def join_csv_files(dir_list, output_file_path, extract_rainfall=True, forecast_date=None, station_data=None):
# def join_csv_files(dir_list, output_file_path, extract_rainfall=True):
"""
Joins a list of CSV files into one NetCDF file that's compatible with pSIMS' format.
:param dir_list: The list of paths
:param output_file_path: Output file path, should be absolute.
:param extract_rainfall: Wether to extract rainfall data from weather series or not.
:param forecast_date: Only used it extract_rainfall is True.
:return: A dictionary if extract_rainfall is True, None otherwise.
"""
proc_start_time = time.time()
necdf_file_name = "%s_%s.psims.nc" % (station_data['grid_row'], station_data['grid_column'])
nectdf_file_path = os.path.join(output_file_path, necdf_file_name)
output_file = NetCDF(nectdf_file_path, 'w')
csv_files = dir_list
# If there's only one scenario, we call it '0' internally. Though the NetCDF output file won't have the
# 'scen' dimension defined. This is needed when we extract rainfall data.
scen_names = [0]
if len(csv_files) > 1:
# If there's more than one file, we extract the scenario name (the year of the climate series).
scen_names = [CSVDatabaseWeatherSeries.__scen_name__(i) for i in csv_files]
expected_variables = {'fecha', 'rad', 'tmax', 'tmin', 'prcp'}
var_units = {
"rad": "MJm-2",
"tmax": "C",
"tmin": "C",
"prcp": "mm"
}
# Define base dimensions, it's contents and units.
dims = ['latitude', 'longitude', 'time']
dim_var_contents = [
[0],
[0],
[]
]
dims_units = ['degrees_east', 'degrees_north', 'days since %s 00:00:00' % WeatherNetCDFWriter.reference_date]
rainfall_data = None
# Add scen dimension in case there is more than one weather file.
if len(csv_files) > 1:
dims = dims + ['scen']
dim_var_contents += [scen_names]
dims_units += ['Scenarios']
# Create dimensions.
for index, dim in enumerate(dims):
output_file.createDimension(dim)
dim_var = output_file.createVariable(dim, 'int32', (dim,))
dim_var.units = dims_units[index]
dim_var[:] = dim_var_contents[index]
variables_contents = {}
time_var_content = []
# Parse reference_date (str) to date.
ref_date = datetime.strptime(WeatherNetCDFWriter.reference_date, '%Y-%m-%d')
# Calculate forecast date as time difference (in days).
if forecast_date:
forecast_date = datetime.strptime(forecast_date, '%Y-%m-%d')
forecast_date = forecast_date - ref_date
forecast_date = forecast_date.days
# Loop through CSV weather files.
for scen_index, f in enumerate(csv_files):
# Unpack dictionary entry: (key, value).
scen_name = scen_names[scen_index]
csv_file = csv.reader(open(f), delimiter='\t')
csv_variables = []
csv_content = dict()
for r_index, row in enumerate(csv_file):
if r_index == 0:
# Header
csv_variables = row
# Check that the header variables match the expected variables.
if len(expected_variables.intersection(csv_variables)) != len(expected_variables):
raise RuntimeError("The variables in the CSV file \"%s\" don't match the expected ones (%s)." %
(csv_files[scen_index], expected_variables))
for column in row:
csv_content[column] = []
else:
for i, value in enumerate(row):
var_name = csv_variables[i]
csv_content[var_name].append(value)
csv_content['time'] = csv_content['fecha']
del csv_content['fecha']
# Calculate time diff in days for each date.
for i, day in enumerate(csv_content['time']):
day = datetime.strptime(day, '%Y-%m-%d')
delta = day - ref_date
csv_content['time'][i] = delta.days
# Initialize the content of the time variable for it to be written once we finish
# writting the other variables.
if len(time_var_content) == 0:
time_var_content = copy.deepcopy(csv_content['time'])
else:
# If it's already initialized, check that every CSV file has data for the same days.
if time_var_content != csv_content['time']:
raise RuntimeError("Dates do not match between CSV files.")
# Delete this variable to avoid trying to write it to the NetCDF file.
del csv_content['time']
# Loop through each variable in the CSV header.
for var_name in csv_content.keys():
if var_name not in expected_variables:
continue
if var_name in variables_contents:
var_array = variables_contents[var_name]
else:
# Initialize this variable.
shape = (len(csv_content[var_name]), 1, 1)
if len(csv_files) > 1:
shape = (len(csv_files),) + shape
var_array = np.empty(shape=shape)
var_array.fill(-99)
variables_contents[var_name] = var_array
# Write variable content.
if len(csv_files) > 1:
# The file index will be the scenario number.
var_array[scen_index, 0:len(csv_content[var_name]), 0, 0] = csv_content[var_name]
else:
var_array[:, 0, 0] = csv_content[var_name]
# Create the dimensions tuple to create variables in the NetCDF file.
dims = ('time', 'latitude', 'longitude')
if len(csv_files) > 1:
dims = ('scen',) + dims
start_time = time.time()
# Write variables to the NetCDF file.
for var_name in variables_contents:
netcdf_var = output_file.createVariable(var_name, 'float32', dims, fill_value=-99)
netcdf_var.units = var_units[var_name]
netcdf_var[:] = variables_contents[var_name]
# Check if we need to extract rainfall data.
if extract_rainfall:
rainfall_data = dict()
rain_variable = variables_contents['prcp']
# Convert time variable to Numpy array, otherwise we can't use array indexes.
time_var_content = np.array(time_var_content)
# Again, check if there's more than one climate serie.
if len(csv_files) > 1:
# Extract rainfall data until the date of forecast.
pre_forecast_time = time_var_content[time_var_content <= forecast_date]
rain = rain_variable[0, 0:len(pre_forecast_time), 0, 0]
rainy_days = np.where(rain > 0)[0]
rainfall_data['0'] = {
'dates': pre_forecast_time[rainy_days].tolist(),
'values': rain[rainy_days].tolist()
}
# Extract rainfall data for each scenario after the forecast date.
for i, year in enumerate(scen_names):
post_forecast_time = time_var_content[time_var_content > forecast_date]
post_forecast_start = len(time_var_content) - len(post_forecast_time)
rain = rain_variable[i, post_forecast_start:, 0, 0]
rainy_days = np.where(rain > 0)[0]
rainfall_data[str(year)] = {
'dates': post_forecast_time[rainy_days].tolist(),
'values': rain[rainy_days].tolist()
}
else:
rain = rain_variable[:, 0, 0]
rainy_days = np.where(rain > 0)[0]
rainfall_data['0'] = {
'dates': time_var_content[rainy_days].tolist(),
'values': rain[rainy_days].tolist()
}
time_var = output_file.variables['time']
time_var[:] = time_var_content
output_file.close()
logging.getLogger().debug('Write NetCDF file: %f.' % (time.time() - start_time))
logging.getLogger().debug("NetCDF file created: '%s'. Time: %s." %
(output_file_path, (time.time() - proc_start_time)))
result = os.path.exists(nectdf_file_path)
return result, rainfall_data
| schmidtfederico/PRinde | core/modules/simulations_manager/psims/WeatherNetCDFWriter.py | Python | gpl-2.0 | 9,342 | [
"NetCDF"
] | c1492aff4841580ed6ca77cd137675a4ad5d30a705e654903bfee3c3b858df22 |
# coding: utf-8
from __future__ import unicode_literals
"""
This module implements input and output processing from QChem.
"""
import copy
import re
import numpy as np
from string import Template
import six
from monty.io import zopen
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import Molecule
from pymatgen.core.units import Energy, FloatWithUnit
from pymatgen.serializers.json_coders import PMGSONable
from pymatgen.util.coord_utils import get_angle
from six.moves import map, zip
__author__ = "Xiaohui Qu"
__copyright__ = "Copyright 2013, The Electrolyte Genome Project"
__version__ = "0.1"
__maintainer__ = "Xiaohui Qu"
__email__ = "xhqu1981@gmail.com"
__date__ = "11/4/13"
class QcTask(PMGSONable):
"""
An object representing a QChem input file.
Args:
molecule: The input molecule. If it is None of string "read",
QChem will read geometry from checkpoint file. If it is a
Molecule object, QcInput will convert it into Cartesian
coordinates. Valid values: pymatgen Molecule object, "read", None
Defaults to None.
charge (int): Charge of the molecule. If None, charge on molecule is
used. Defaults to None.
spin_multiplicity (int): Spin multiplicity of molecule. Defaults to
None, which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
jobtype (str): The type the QChem job. "SP" for Single Point Energy,
"opt" for geometry optimization, "freq" for
vibrational frequency.
title (str): Comments for the job. Defaults to None. Which means the
$comment section will be discarded.
exchange (str): The exchange methods of the theory. Examples including:
"B" (in pure BLYP), "PW91", "PBE", "TPSS".
Defaults to "HF".
This parameter can also be common names of hybrid
functionals, such as B3LYP, TPSSh, XYGJOS. In such cases,
the correlation parameter should be left as None.
correlation (str): The correlation level of the theory. Example
including: "MP2", "RI-MP2", "CCSD(T)", "LYP", "PBE", "TPSS"
Defaults to None.
basis_set (str/dict): The basis set.
If it is a dict, each element can use different basis set.
aux_basis_set (str/dict): Auxiliary basis set. For methods,
like RI-MP2, XYG3, OXYJ-OS, auxiliary basis set is required.
If it is a dict, each element can use different auxiliary
basis set.
ecp: Effective core potential (ECP) to be used.
If it is a dict, each element can use different ECP.
rem_params (dict): The parameters supposed to write in the $rem
section. Dict of key/value pairs.
Example: {"scf_algorithm": "diis_gdm", "scf_max_cycles": 100}
optional_params (dict): The parameter for keywords other than $rem
section. Dict of key/value pairs.
Example: {"basis": {"Li": "cc-PVTZ", "B": "aug-cc-PVTZ",
"F": "aug-cc-PVTZ"} "ecp": {"Cd": "srsc", "Br": "srlc"}}
ghost_atom (list): List of ghost atoms indices. Indices start from 0.
The ghost atom will be represented in of the form of @element_symmbol
"""
optional_keywords_list = {"basis", "ecp", "empirical_dispersion",
"external_charges", "force_field_params",
"intracule", "isotopes", "aux_basis",
"localized_diabatization", "multipole_field",
"nbo", "occupied", "swap_occupied_virtual", "opt",
"pcm", "pcm_solvent", "plots", "qm_atoms", "svp",
"svpirf", "van_der_waals", "xc_functional",
"cdft", "efp_fragments", "efp_params"}
alternative_keys = {"job_type": "jobtype",
"symmetry_ignore": "sym_ignore",
"scf_max_cycles": "max_scf_cycles"}
alternative_values = {"optimization": "opt",
"frequency": "freq"}
zmat_patt = re.compile("^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
xyz_patt = re.compile("^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+"
"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(self, molecule=None, charge=None, spin_multiplicity=None,
jobtype='SP', title=None, exchange="HF", correlation=None,
basis_set="6-31+G*", aux_basis_set=None, ecp=None,
rem_params=None, optional_params=None, ghost_atoms=None):
self.mol = copy.deepcopy(molecule) if molecule else "read"
self.charge = charge
self.spin_multiplicity = spin_multiplicity
if isinstance(self.mol, six.string_types):
self.mol = self.mol.lower()
if self.mol != "read":
raise ValueError('The only accept text value for mol is "read"')
elif isinstance(self.mol, list):
for m in self.mol:
if not isinstance(m, Molecule):
raise ValueError("In case of type list, every element of mol must be a pymatgen Molecule")
if self.charge is None or self.spin_multiplicity is None:
raise ValueError("For fragments molecule section input, charge and spin_multiplicity "
"must be specificed")
total_charge = sum([m.charge for m in self.mol])
total_unpaired_electron = sum([m.spin_multiplicity-1 for m in self.mol])
if total_charge != self.charge:
raise ValueError("The charge of the molecule doesn't equal to the sum of the fragment charges")
if total_unpaired_electron % 2 != (self.spin_multiplicity - 1) % 2:
raise ValueError("Spin multiplicity of molecule and fragments doesn't match")
elif isinstance(self.mol, Molecule):
self.charge = charge if charge is not None else self.mol.charge
ghost_nelectrons = 0
if ghost_atoms:
for i in ghost_atoms:
site = self.mol.sites[i]
for sp, amt in site.species_and_occu.items():
ghost_nelectrons += sp.Z * amt
nelectrons = self.mol.charge + self.mol.nelectrons - ghost_nelectrons - self.charge
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError("Charge of {} and spin multiplicity of {} "
"is not possible for this molecule"
.format(self.charge, spin_multiplicity))
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
else:
raise ValueError("The molecule must be a pymatgen Molecule "
"object or read/None or list of pymatgen Molecule")
if (self.charge is None) != (self.spin_multiplicity is None):
raise ValueError("spin multiplicity must be set together")
if self.charge is not None and isinstance(self.mol, Molecule) and not ghost_atoms:
self.mol.set_charge_and_spin(self.charge, self.spin_multiplicity)
self.params = dict()
if title is not None:
self.params["comment"] = title
if "rem" not in self.params:
self.params["rem"] = dict()
self.params["rem"]["exchange"] = exchange.lower()
available_jobtypes = {"sp", "opt", "ts", "freq", "force", "rpath",
"nmr", "bsse", "eda", "pes_scan", "fsm", "aimd",
"pimc", "makeefp"}
jt = jobtype.lower()
if jt in self.alternative_values:
jt = self.alternative_values[jt]
if jt not in available_jobtypes:
raise ValueError("Job type " + jobtype + " is not supported yet")
self.params["rem"]["jobtype"] = jobtype.lower()
if correlation is not None:
self.params["rem"]["correlation"] = correlation.lower()
if rem_params is not None:
for k, v in rem_params.items():
k = k.lower()
if k in self.alternative_keys:
k = self.alternative_keys[k]
if isinstance(v, six.string_types):
v = str(v).lower()
if v in self.alternative_values:
# noinspection PyTypeChecker
v = self.alternative_values[v]
self.params["rem"][k] = v
elif isinstance(v, int) or isinstance(v, float):
self.params["rem"][k] = v
else:
raise ValueError("The value in $rem can only be Integer "
"or string")
if optional_params:
op_key = set([k.lower() for k in optional_params.keys()])
if len(op_key - self.optional_keywords_list) > 0:
invalid_keys = op_key - self.optional_keywords_list
raise ValueError(','.join(['$' + k for k in invalid_keys]) +
'is not a valid optional section')
self.params.update(optional_params)
self.set_basis_set(basis_set)
if aux_basis_set is None:
if self._aux_basis_required():
if isinstance(self.params["rem"]["basis"], six.string_types):
if self.params["rem"]["basis"].startswith("6-31+g"):
self.set_auxiliary_basis_set("rimp2-aug-cc-pvdz")
elif self.params["rem"]["basis"].startswith("6-311+g"):
self.set_auxiliary_basis_set("rimp2-aug-cc-pvtz")
if "aux_basis" not in self.params["rem"]:
raise ValueError("Auxiliary basis set is missing")
else:
self.set_auxiliary_basis_set(aux_basis_set)
if ecp:
self.set_ecp(ecp)
self.ghost_atoms = ghost_atoms
if self.ghost_atoms:
if not isinstance(self.ghost_atoms, list):
raise ValueError("ghost_atoms must be a list of integers")
for atom in self.ghost_atoms:
if not isinstance(atom, int):
raise ValueError("Each element of ghost atom list must an integer")
def _aux_basis_required(self):
if self.params["rem"]["exchange"] in ['xygjos', 'xyg3', 'lxygjos']:
return True
if 'correlation' in self.params["rem"]:
if self.params["rem"]["correlation"].startswith("ri"):
return True
def set_basis_set(self, basis_set):
if isinstance(basis_set, six.string_types):
self.params["rem"]["basis"] = str(basis_set).lower()
if basis_set.lower() not in ["gen", "mixed"]:
self.params.pop("basis", None)
elif isinstance(basis_set, dict):
self.params["rem"]["basis"] = "gen"
bs = dict()
for element, basis in basis_set.items():
bs[element.strip().capitalize()] = basis.lower()
self.params["basis"] = bs
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
basis_elements = set(self.params["basis"].keys())
if len(mol_elements - basis_elements) > 0:
raise ValueError("The basis set for elements " +
", ".join(
list(mol_elements - basis_elements)) +
" is missing")
if len(basis_elements - mol_elements) > 0:
raise ValueError("Basis set error: the molecule "
"doesn't contain element " +
", ".join(basis_elements - mol_elements))
elif isinstance(basis_set, list):
self.params["rem"]["basis"] = "mixed"
bs = [(a[0].capitalize(), a[1].lower()) for a in basis_set]
self.params["basis"] = bs
if len(self.mol) != len(basis_set):
raise ValueError("Must specific a basis set for every atom")
mol_elements = [site.species_string for site in self.mol.sites]
basis_elements = [a[0] for a in bs]
if mol_elements != basis_elements:
raise ValueError("Elements in molecule and mixed basis set don't match")
else:
raise Exception('Can\'t handle type "{}"'.format(type(basis_set)))
def set_auxiliary_basis_set(self, aux_basis_set):
if isinstance(aux_basis_set, six.string_types):
self.params["rem"]["aux_basis"] = aux_basis_set.lower()
if aux_basis_set.lower() not in ["gen", "mixed"]:
self.params.pop("aux_basis", None)
elif isinstance(aux_basis_set, dict):
self.params["rem"]["aux_basis"] = "gen"
bs = dict()
for element, basis in aux_basis_set.items():
bs[element.strip().capitalize()] = basis.lower()
self.params["aux_basis"] = bs
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
basis_elements = set(self.params["aux_basis"].keys())
if len(mol_elements - basis_elements) > 0:
raise ValueError("The auxiliary basis set for "
"elements " +
", ".join(
list(mol_elements - basis_elements)) +
" is missing")
if len(basis_elements - mol_elements) > 0:
raise ValueError("Auxiliary asis set error: the "
"molecule doesn't contain element " +
", ".join(basis_elements - mol_elements))
elif isinstance(aux_basis_set, list):
self.params["rem"]["aux_basis"] = "mixed"
bs = [(a[0].capitalize(), a[1].lower()) for a in aux_basis_set]
self.params["aux_basis"] = bs
if len(self.mol) != len(aux_basis_set):
raise ValueError("Must specific a auxiliary basis set for every atom")
mol_elements = [site.species_string for site in self.mol.sites]
basis_elements = [a[0] for a in bs]
if mol_elements != basis_elements:
raise ValueError("Elements in molecule and mixed basis set don't match")
else:
raise Exception('Can\'t handle type "{}"'.format(type(aux_basis_set)))
def set_ecp(self, ecp):
if isinstance(ecp, six.string_types):
self.params["rem"]["ecp"] = ecp.lower()
elif isinstance(ecp, dict):
self.params["rem"]["ecp"] = "gen"
potentials = dict()
for element, p in ecp.items():
potentials[element.strip().capitalize()] = p.lower()
self.params["ecp"] = potentials
if self.mol:
mol_elements = set([site.species_string for site
in self.mol.sites])
ecp_elements = set(self.params["ecp"].keys())
if len(ecp_elements - mol_elements) > 0:
raise ValueError("ECP error: the molecule "
"doesn't contain element " +
", ".join(ecp_elements - mol_elements))
@property
def molecule(self):
return self.mol
def set_memory(self, total=None, static=None):
"""
Set the maxium allowed memory.
Args:
total: The total memory. Integer. Unit: MBytes. If set to None,
this parameter will be neglected.
static: The static memory. Integer. Unit MBytes. If set to None,
this parameterwill be neglected.
"""
if total:
self.params["rem"]["mem_total"] = total
if static:
self.params["rem"]["mem_static"] = static
def set_max_num_of_scratch_files(self, num=16):
"""
In QChem, the size of a single scratch is limited 2GB. By default,
the max number of scratich is 16, which is cooresponding to 32GB
scratch space. If you want to use more scratch disk space, you need
to increase the number of scratch files:
Args:
num: The max number of the scratch files. (Integer)
"""
self.params["rem"]["max_sub_file_num"] = num
def set_scf_algorithm_and_iterations(self, algorithm="diis",
iterations=50):
"""
Set algorithm used for converging SCF and max number of SCF iterations.
Args:
algorithm: The algorithm used for converging SCF. (str)
iterations: The max number of SCF iterations. (Integer)
"""
available_algorithms = {"diis", "dm", "diis_dm", "diis_gdm", "gdm",
"rca", "rca_diis", "roothaan"}
if algorithm.lower() not in available_algorithms:
raise ValueError("Algorithm " + algorithm +
" is not available in QChem")
self.params["rem"]["scf_algorithm"] = algorithm.lower()
self.params["rem"]["max_scf_cycles"] = iterations
def set_scf_convergence_threshold(self, exponent=8):
"""
SCF is considered converged when the wavefunction error is less than
10**(-exponent).
In QChem, the default values are:
5 For single point energy calculations.
7 For geometry optimizations and vibrational analysis.
8 For SSG calculations
Args:
exponent: The exponent of the threshold. (Integer)
"""
self.params["rem"]["scf_convergence"] = exponent
def set_integral_threshold(self, thresh=12):
"""
Cutoff for neglect of two electron integrals. 10−THRESH (THRESH <= 14).
In QChem, the default values are:
8 For single point energies.
10 For optimizations and frequency calculations.
14 For coupled-cluster calculations.
Args:
thresh: The exponent of the threshold. (Integer)
"""
self.params["rem"]["thresh"] = thresh
def set_dft_grid(self, radical_points=128, angular_points=302,
grid_type="Lebedev"):
"""
Set the grid for DFT numerical integrations.
Args:
radical_points: Radical points. (Integer)
angular_points: Angular points. (Integer)
grid_type: The type of of the grid. There are two standard grids:
SG-1 and SG-0. The other two supported grids are "Lebedev" and
"Gauss-Legendre"
"""
available_lebedev_angular_points = {6, 18, 26, 38, 50, 74, 86, 110, 146,
170, 194, 230, 266, 302, 350, 434,
590, 770, 974, 1202, 1454, 1730,
2030, 2354, 2702, 3074, 3470, 3890,
4334, 4802, 5294}
if grid_type.lower() == "sg-0":
self.params["rem"]["xc_grid"] = 0
elif grid_type.lower() == "sg-1":
self.params["rem"]["xc_grid"] = 1
elif grid_type.lower() == "lebedev":
if angular_points not in available_lebedev_angular_points:
raise ValueError(str(angular_points) + " is not a valid "
"Lebedev angular points number")
self.params["rem"]["xc_grid"] = "{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
elif grid_type.lower() == "gauss-legendre":
self.params["rem"]["xc_grid"] = "-{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
else:
raise ValueError("Grid type " + grid_type + " is not supported "
"currently")
def set_scf_initial_guess(self, guess="SAD"):
"""
Set initial guess method to be used for SCF
Args:
guess: The initial guess method. (str)
"""
availabel_guesses = {"core", "sad", "gwh", "read", "fragmo"}
if guess.lower() not in availabel_guesses:
raise ValueError("The guess method " + guess + " is not supported "
"yet")
self.params["rem"]["scf_guess"] = guess.lower()
def set_geom_max_iterations(self, iterations):
"""
Set the max iterations of geometry optimization.
Args:
iterations: the maximum iterations of geometry optimization.
(Integer)
"""
self.params["rem"]["geom_opt_max_cycles"] = iterations
def set_geom_opt_coords_type(self, coords_type="internal_switch"):
"""
Set the coordinates system used in geometry optimization.
"cartesian" --- always cartesian coordinates.
"internal" --- always internal coordinates.
"internal-switch" --- try internal coordinates first, if fails, switch
to cartesian coordinates.
"z-matrix" --- always z-matrix coordinates.
"z-matrix-switch" --- try z-matrix first, if fails, switch to
cartesian coordinates.
Args:
coords_type: The type of the coordinates. (str)
"""
coords_map = {"cartesian": 0, "internal": 1, "internal-switch": -1,
"z-matrix": 2, "z-matrix-switch": -2}
if coords_type.lower() not in set(coords_map.keys()):
raise ValueError("Coodinate system " + coords_type + " is not "
"supported yet")
else:
self.params["rem"]["geom_opt_coords"] = \
coords_map[coords_type.lower()]
def scale_geom_opt_threshold(self, gradient=0.1, displacement=0.1,
energy=0.1):
"""
Adjust the convergence criteria of geometry optimization.
Args:
gradient: the scale factor for gradient criteria. If less than
1.0, you are tightening the threshold. The base value is
300 × 10E−6
displacement: the scale factor for atomic displacement. If less
then 1.0, you are tightening the threshold. The base value is
1200 × 10E−6
energy: the scale factor for energy change between successive
iterations. If less than 1.0, you are tightening the
threshold. The base value is 100 × 10E−8.
"""
if gradient < 1.0/(300-1) or displacement < 1.0/(1200-1) or \
energy < 1.0/(100-1):
raise ValueError("The geometry optimization convergence criteria "
"is too tight")
self.params["rem"]["geom_opt_tol_gradient"] = int(gradient * 300)
self.params["rem"]["geom_opt_tol_displacement"] = int(displacement *
1200)
self.params["rem"]["geom_opt_tol_energy"] = int(energy * 100)
def set_geom_opt_use_gdiis(self, subspace_size=None):
"""
Use GDIIS algorithm in geometry optimization.
Args:
subspace_size: The size of the DIIS subsapce. None for default
value. The default value is min(NDEG, NATOMS, 4) NDEG = number
of moleculardegrees of freedom.
"""
subspace_size = subspace_size if subspace_size is not None else -1
self.params["rem"]["geom_opt_max_diis"] = subspace_size
def disable_symmetry(self):
"""
Turn the symmetry off.
"""
self.params["rem"]["sym_ignore"] = True
self.params["rem"]["symmetry"] = False
def use_cosmo(self, dielectric_constant=78.4):
"""
Set the solvent model to COSMO.
Args:
dielectric_constant: the dielectric constant for the solvent.
"""
self.params["rem"]["solvent_method"] = "cosmo"
self.params["rem"]["solvent_dielectric"] = dielectric_constant
def use_pcm(self, pcm_params=None, solvent_params=None,
radii_force_field=None):
"""
Set the solvent model to PCM. Default parameters are trying to comply to
gaussian default value
Args:
pcm_params (dict): The parameters of "$pcm" section.
solvent_params (dict): The parameters of "pcm_solvent" section
radii_force_field (str): The force fied used to set the solute
radii. Default to UFF.
"""
self.params["pcm"] = dict()
self.params["pcm_solvent"] = dict()
default_pcm_params = {"Theory": "SSVPE",
"vdwScale": 1.1,
"Radii": "UFF"}
if not solvent_params:
solvent_params = {"Dielectric": 78.3553}
if pcm_params:
for k, v in pcm_params.items():
self.params["pcm"][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else v
for k, v in default_pcm_params.items():
if k.lower() not in self.params["pcm"].keys():
self.params["pcm"][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else v
for k, v in solvent_params.items():
self.params["pcm_solvent"][k.lower()] = v.lower() \
if isinstance(v, six.string_types) else copy.deepcopy(v)
self.params["rem"]["solvent_method"] = "pcm"
if radii_force_field:
self.params["pcm"]["radii"] = "bondi"
self.params["rem"]["force_fied"] = radii_force_field.lower()
def __str__(self):
sections = ["comment", "molecule", "rem"] + \
sorted(list(self.optional_keywords_list))
lines = []
for sec in sections:
if sec in self.params or sec == "molecule":
foramt_sec = self.__getattribute__("_format_" + sec)
lines.append("$" + sec)
lines.extend(foramt_sec())
lines.append("$end")
lines.append('\n')
return '\n'.join(lines)
def _format_comment(self):
lines = [' ' + self.params["comment"].strip()]
return lines
def _format_molecule(self):
lines = []
def inner_format_mol(m2, index_base):
mol_lines = []
for i, site in enumerate(m2.sites):
ghost = "@" if self.ghost_atoms \
and i + index_base in self.ghost_atoms else ""
atom = "{ghost:s}{element:s}".format(ghost=ghost,
element=site.species_string)
mol_lines.append(" {atom:<4} {x:>17.8f} {y:>17.8f} "
"{z:>17.8f}".format(atom=atom, x=site.x,
y=site.y, z=site.z))
return mol_lines
if self.charge is not None:
lines.append(" {charge:d} {multi:d}".format(charge=self
.charge, multi=self.spin_multiplicity))
if isinstance(self.mol, six.string_types) and self.mol == "read":
lines.append(" read")
elif isinstance(self.mol, list):
starting_index = 0
for m in self.mol:
lines.append("--")
lines.append(" {charge:d} {multi:d}".format(
charge=m.charge, multi=m.spin_multiplicity))
lines.extend(inner_format_mol(m, starting_index))
starting_index += len(m)
else:
lines.extend(inner_format_mol(self.mol, 0))
return lines
def _format_rem(self):
rem_format_template = Template(" {name:>$name_width} = "
"{value}")
name_width = 0
for name, value in self.params["rem"].items():
if len(name) > name_width:
name_width = len(name)
rem = rem_format_template.substitute(name_width=name_width)
lines = []
all_keys = set(self.params["rem"].keys())
priority_keys = ["jobtype", "exchange", "basis"]
additional_keys = all_keys - set(priority_keys)
ordered_keys = priority_keys + sorted(list(additional_keys))
for name in ordered_keys:
value = self.params["rem"][name]
lines.append(rem.format(name=name, value=value))
return lines
def _format_basis(self):
lines = []
if isinstance(self.params["basis"], dict):
for element in sorted(self.params["basis"].keys()):
basis = self.params["basis"][element]
lines.append(" " + element)
lines.append(" " + basis)
lines.append(" ****")
elif isinstance(self.params["basis"], list):
for i, (element, bs) in enumerate(self.params["basis"]):
lines.append(" {element:2s} {number:3d}".format(element=element, number=i+1))
lines.append(" {}".format(bs))
lines.append(" ****")
return lines
def _format_aux_basis(self):
lines = []
if isinstance(self.params["aux_basis"], dict):
for element in sorted(self.params["aux_basis"].keys()):
basis = self.params["aux_basis"][element]
lines.append(" " + element)
lines.append(" " + basis)
lines.append(" ****")
else:
for i, (element, bs) in enumerate(self.params["aux_basis"]):
lines.append(" {element:2s} {number:3d}".format(element=element, number=i+1))
lines.append(" {}".format(bs))
lines.append(" ****")
return lines
def _format_ecp(self):
lines = []
for element in sorted(self.params["ecp"].keys()):
ecp = self.params["ecp"][element]
lines.append(" " + element)
lines.append(" " + ecp)
lines.append(" ****")
return lines
def _format_pcm(self):
pcm_format_template = Template(" {name:>$name_width} "
"{value}")
name_width = 0
for name in self.params["pcm"].keys():
if len(name) > name_width:
name_width = len(name)
rem = pcm_format_template.substitute(name_width=name_width)
lines = []
for name in sorted(self.params["pcm"].keys()):
value = self.params["pcm"][name]
lines.append(rem.format(name=name, value=value))
return lines
def _format_pcm_solvent(self):
pp_format_template = Template(" {name:>$name_width} "
"{value}")
name_width = 0
for name in self.params["pcm_solvent"].keys():
if len(name) > name_width:
name_width = len(name)
rem = pp_format_template.substitute(name_width=name_width)
lines = []
all_keys = set(self.params["pcm_solvent"].keys())
priority_keys = []
for k in ["dielectric", "nonels", "nsolventatoms", "solventatom"]:
if k in all_keys:
priority_keys.append(k)
additional_keys = all_keys - set(priority_keys)
ordered_keys = priority_keys + sorted(list(additional_keys))
for name in ordered_keys:
value = self.params["pcm_solvent"][name]
if name == "solventatom":
for v in copy.deepcopy(value):
value = "{:<4d} {:<4d} {:<4d} {:4.2f}".format(*v)
lines.append(rem.format(name=name, value=value))
continue
lines.append(rem.format(name=name, value=value))
return lines
def as_dict(self):
if isinstance(self.mol, six.string_types):
mol_dict = self.mol
elif isinstance(self.mol, Molecule):
mol_dict = self.mol.as_dict()
elif isinstance(self.mol, list):
mol_dict = [m.as_dict() for m in self.mol]
else:
raise ValueError('Unknow molecule type "{}"'.format(type(self.mol)))
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": mol_dict,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"params": self.params}
if self.ghost_atoms:
d["ghost_atoms"] = self.ghost_atoms
return d
@classmethod
def from_dict(cls, d):
if d["molecule"] == "read":
mol = "read"
elif isinstance(d["molecule"], dict):
mol = Molecule.from_dict(d["molecule"])
elif isinstance(d["molecule"], list):
mol = [Molecule.from_dict(m) for m in d["molecule"]]
else:
raise ValueError('Unknow molecule type "{}"'.format(type(d["molecule"])))
jobtype = d["params"]["rem"]["jobtype"]
title = d["params"].get("comment", None)
exchange = d["params"]["rem"]["exchange"]
correlation = d["params"]["rem"].get("correlation", None)
basis_set = d["params"]["rem"]["basis"]
aux_basis_set = d["params"]["rem"].get("aux_basis", None)
ecp = d["params"]["rem"].get("ecp", None)
ghost_atoms = d.get("ghost_atoms", None)
optional_params = None
op_keys = set(d["params"].keys()) - {"comment", "rem"}
if len(op_keys) > 0:
optional_params = dict()
for k in op_keys:
optional_params[k] = d["params"][k]
return QcTask(molecule=mol, charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
jobtype=jobtype, title=title,
exchange=exchange, correlation=correlation,
basis_set=basis_set, aux_basis_set=aux_basis_set,
ecp=ecp, rem_params=d["params"]["rem"],
optional_params=optional_params,
ghost_atoms=ghost_atoms)
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt") as f:
return cls.from_string(f.read())
@classmethod
def from_string(cls, contents):
"""
Creates QcInput from a string.
Args:
contents: String representing a QChem input file.
Returns:
QcInput object
"""
mol = None
charge = None
spin_multiplicity = None
params = dict()
lines = contents.split('\n')
parse_section = False
section_name = None
section_text = []
ghost_atoms = None
for line_num, line in enumerate(lines):
l = line.strip().lower()
if len(l) == 0:
continue
if (not parse_section) and (l == "$end" or not l.startswith("$")):
raise ValueError("Format error, parsing failed")
if parse_section and l != "$end":
section_text.append(line)
if l.startswith("$") and not parse_section:
parse_section = True
section_name = l[1:]
available_sections = ["comment", "molecule", "rem"] + \
sorted(list(cls.optional_keywords_list))
if section_name not in available_sections:
raise ValueError("Unrecognized keyword " + line.strip() +
" at line " + str(line_num))
if section_name in params:
raise ValueError("duplicated keyword " + line.strip() +
"at line " + str(line_num))
if parse_section and l == "$end":
func_name = "_parse_" + section_name
if func_name not in QcTask.__dict__:
raise Exception(func_name + " is not implemented yet, "
"please implement it")
parse_func = QcTask.__dict__[func_name].__get__(None, QcTask)
if section_name == "molecule":
mol, charge, spin_multiplicity, ghost_atoms = parse_func(section_text)
else:
d = parse_func(section_text)
params[section_name] = d
parse_section = False
section_name = None
section_text = []
if parse_section:
raise ValueError("Format error. " + section_name + " is not "
"terminated")
jobtype = params["rem"]["jobtype"]
title = params.get("comment", None)
exchange = params["rem"].get("exchange", "hf")
correlation = params["rem"].get("correlation", None)
basis_set = params["rem"]["basis"]
aux_basis_set = params["rem"].get("aux_basis", None)
ecp = params["rem"].get("ecp", None)
optional_params = None
op_keys = set(params.keys()) - {"comment", "rem"}
if len(op_keys) > 0:
optional_params = dict()
for k in op_keys:
optional_params[k] = params[k]
return QcTask(molecule=mol, charge=charge,
spin_multiplicity=spin_multiplicity,
jobtype=jobtype, title=title,
exchange=exchange, correlation=correlation,
basis_set=basis_set, aux_basis_set=aux_basis_set,
ecp=ecp, rem_params=params["rem"],
optional_params=optional_params,
ghost_atoms=ghost_atoms)
@classmethod
def _parse_comment(cls, contents):
return '\n'.join(contents).strip()
@classmethod
def _parse_coords(cls, coord_lines):
"""
Helper method to parse coordinates. Copied from GaussianInput class.
"""
paras = {}
var_pattern = re.compile("^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1)] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and cls.xyz_patt.match(l):
m = cls.xyz_patt.match(l)
species.append(m.group(1))
toks = re.split("[,\s]+", l.strip())
if len(toks) > 4:
coords.append(list(map(float, toks[2:5])))
else:
coords.append(list(map(float, toks[1:4])))
elif cls.zmat_patt.match(l):
zmode = True
toks = re.split("[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0.0, 0.0, 0.0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array(
[0.0, 0.0, float(parameters[0])]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis,
angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(
coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(
coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub("\d", "", sp_str)
return sp.capitalize()
species = list(map(parse_species, species))
return Molecule(species, coords)
@classmethod
def _parse_molecule(cls, contents):
def parse_ghost_indices(coord_text_lines):
no_ghost_text = [l.replace("@", "") for l in coord_text_lines]
ghosts = []
for index, l in enumerate(coord_text_lines):
l = l.strip()
if not l:
break
if "@" in l:
ghosts.append(index)
return ghosts, no_ghost_text
text = copy.deepcopy(contents[:2])
charge_multi_pattern = re.compile('\s*(?P<charge>'
'[-+]?\d+)\s+(?P<multi>\d+)')
line = text.pop(0)
m = charge_multi_pattern.match(line)
if m:
charge = int(m.group("charge"))
spin_multiplicity = int(m.group("multi"))
line = text.pop(0)
else:
charge = None
spin_multiplicity = None
if line.strip().lower() == "read":
return "read", charge, spin_multiplicity, None
elif charge is None or spin_multiplicity is None:
raise ValueError("Charge or spin multiplicity is not found")
else:
if contents[1].strip()[0:2] == "--":
chunks = "\n".join(contents[2:]).split("--\n")
mol = []
ghost_atoms = []
starting_index = 0
for chunk in chunks:
frag_contents = chunk.split("\n")
m = charge_multi_pattern.match(frag_contents[0])
if m:
fragment_charge = int(m.group("charge"))
fragment_spin_multiplicity = int(m.group("multi"))
else:
raise Exception("charge and spin multiplicity must be specified for each fragment")
gh, coord_lines = parse_ghost_indices(frag_contents[1:])
fragment = cls._parse_coords(coord_lines)
fragment.set_charge_and_spin(fragment_charge, fragment_spin_multiplicity)
mol.append(fragment)
ghost_atoms.extend([i+starting_index for i in gh])
starting_index += len(fragment)
else:
ghost_atoms, coord_lines = parse_ghost_indices(contents[1:])
mol = cls._parse_coords(coord_lines)
if len(ghost_atoms) == 0:
mol.set_charge_and_spin(charge, spin_multiplicity)
ghost_atoms = ghost_atoms if len(ghost_atoms) > 0 else None
return mol, charge, spin_multiplicity, ghost_atoms
@classmethod
def _parse_rem(cls, contents):
d = dict()
int_pattern = re.compile('^[-+]?\d+$')
float_pattern = re.compile('^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $rem section, there should be "
"at least two field: key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if k2 == "xc_grid":
d[k2] = v
elif v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
@classmethod
def _parse_aux_basis(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("Auxiliary basis set section format error")
chunks = zip(*[iter(contents)]*3)
t = contents[0].split()
if len(t) == 2 and int(t[1]) > 0:
bs = []
for i, ch in enumerate(chunks):
element, number = ch[0].split()
basis = ch[1]
if int(number) != i+1:
raise ValueError("Atom order number doesn't match in $aux_basis section")
bs.append((element.strip().capitalize(), basis.strip().lower()))
else:
bs = dict()
for ch in chunks:
element, basis = ch[:2]
bs[element.strip().capitalize()] = basis.strip().lower()
return bs
@classmethod
def _parse_basis(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("Basis set section format error")
chunks = zip(*[iter(contents)]*3)
t = contents[0].split()
if len(t) == 2 and int(t[1]) > 0:
bs = []
for i, ch in enumerate(chunks):
element, number = ch[0].split()
basis = ch[1]
if int(number) != i+1:
raise ValueError("Atom order number doesn't match in $basis section")
bs.append((element.strip().capitalize(), basis.strip().lower()))
else:
bs = dict()
for ch in chunks:
element, basis = ch[:2]
bs[element.strip().capitalize()] = basis.strip().lower()
return bs
@classmethod
def _parse_ecp(cls, contents):
if len(contents) % 3 != 0:
raise ValueError("ECP section format error")
chunks = zip(*[iter(contents)]*3)
d = dict()
for ch in chunks:
element, ecp = ch[:2]
d[element.strip().capitalize()] = ecp.strip().lower()
return d
@classmethod
def _parse_pcm(cls, contents):
d = dict()
int_pattern = re.compile('^[-+]?\d+$')
float_pattern = re.compile('^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $pcm section, there should be "
"at least two field: key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
@classmethod
def _parse_pcm_solvent(cls, contents):
d = dict()
int_pattern = re.compile('^[-+]?\d+$')
float_pattern = re.compile('^[-+]?\d+\.\d+([eE][-+]?\d+)?$')
for line in contents:
tokens = line.strip().replace("=", ' ').split()
if len(tokens) < 2:
raise ValueError("Can't parse $pcm_solvent section, "
"there should be at least two field: "
"key and value!")
k1, v = tokens[:2]
k2 = k1.lower()
if k2 in cls.alternative_keys:
k2 = cls.alternative_keys[k2]
if v in cls.alternative_values:
v = cls.alternative_values
if k2 == "solventatom":
v = [int(i) for i in tokens[1:4]]
# noinspection PyTypeChecker
v.append(float(tokens[4]))
if k2 not in d:
d[k2] = [v]
else:
d[k2].append(v)
elif v == "True":
d[k2] = True
elif v == "False":
d[k2] = False
elif int_pattern.match(v):
d[k2] = int(v)
elif float_pattern.match(v):
d[k2] = float(v)
else:
d[k2] = v.lower()
return d
class QcInput(PMGSONable):
"""
An object representing a multiple step QChem input file.
Args:
jobs: The QChem jobs (List of QcInput object)
"""
def __init__(self, jobs):
jobs = jobs if isinstance(jobs, list) else [jobs]
for j in jobs:
if not isinstance(j, QcTask):
raise ValueError("jobs must be a list QcInput object")
self.jobs = jobs
def __str__(self):
return "\n@@@\n\n\n".join([str(j) for j in self.jobs])
def write_file(self, filename):
with zopen(filename, "wt") as f:
f.write(self.__str__())
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"jobs": [j.as_dict() for j in self.jobs]}
@classmethod
def from_dict(cls, d):
jobs = [QcTask.from_dict(j) for j in d["jobs"]]
return QcInput(jobs)
@classmethod
def from_string(cls, contents):
qc_contents = contents.split("@@@")
jobs = [QcTask.from_string(cont) for cont in qc_contents]
return QcInput(jobs)
@classmethod
def from_file(cls, filename):
with zopen(filename, "rt") as f:
return cls.from_string(f.read())
class QcOutput(object):
kcal_per_mol_2_eV = 4.3363E-2
def __init__(self, filename):
self.filename = filename
with zopen(filename, "rt") as f:
data = f.read()
chunks = re.split("\n\nRunning Job \d+ of \d+ \S+", data)
# noinspection PyTypeChecker
self.data = list(map(self._parse_job, chunks))
@classmethod
def _expected_successful_pattern(cls, qctask):
text = ["Convergence criterion met"]
if "correlation" in qctask.params["rem"]:
if "ccsd" in qctask.params["rem"]["correlation"]\
or "qcisd" in qctask.params["rem"]["correlation"]:
text.append('CC.*converged')
if qctask.params["rem"]["jobtype"] == "opt"\
or qctask.params["rem"]["jobtype"] == "ts":
text.append("OPTIMIZATION CONVERGED")
if qctask.params["rem"]["jobtype"] == "freq":
text.append("VIBRATIONAL ANALYSIS")
if qctask.params["rem"]["jobtype"] == "gradient":
text.append("Gradient of SCF Energy")
return text
@classmethod
def _parse_job(cls, output):
scf_energy_pattern = re.compile("Total energy in the final basis set ="
"\s+(?P<energy>-\d+\.\d+)")
corr_energy_pattern = re.compile("(?P<name>[A-Z\-\(\)0-9]+)\s+"
"([tT]otal\s+)?[eE]nergy\s+=\s+"
"(?P<energy>-\d+\.\d+)")
coord_pattern = re.compile("\s*\d+\s+(?P<element>[A-Z][a-zH]*)\s+"
"(?P<x>\-?\d+\.\d+)\s+"
"(?P<y>\-?\d+\.\d+)\s+"
"(?P<z>\-?\d+\.\d+)")
num_ele_pattern = re.compile("There are\s+(?P<alpha>\d+)\s+alpha "
"and\s+(?P<beta>\d+)\s+beta electrons")
total_charge_pattern = re.compile("Sum of atomic charges ="
"\s+(?P<charge>\-?\d+\.\d+)")
scf_iter_pattern = re.compile("\d+\s+(?P<energy>\-\d+\.\d+)\s+"
"(?P<diis_error>\d+\.\d+E[-+]\d+)")
zpe_pattern = re.compile("Zero point vibrational energy:"
"\s+(?P<zpe>\d+\.\d+)\s+kcal/mol")
thermal_corr_pattern = re.compile("(?P<name>\S.*\S):\s+"
"(?P<correction>\d+\.\d+)\s+"
"k?cal/mol")
detailed_charge_pattern = re.compile("(Ground-State )?(?P<method>\w+)( Net)?"
" Atomic Charges")
nbo_charge_pattern = re.compile("(?P<element>[A-Z][a-z]{0,2})\s*(?P<no>\d+)\s+(?P<charge>\-?\d\.\d+)"
"\s+(?P<core>\-?\d+\.\d+)\s+(?P<valence>\-?\d+\.\d+)"
"\s+(?P<rydberg>\-?\d+\.\d+)\s+(?P<total>\-?\d+\.\d+)"
"(\s+(?P<spin>\-?\d\.\d+))?")
nbo_wavefunction_type_pattern = re.compile("This is an? (?P<type>\w+\-\w+) NBO calculation")
bsse_pattern = re.compile("DE, kJ/mol\s+(?P<raw_be>\-?\d+\.?\d+([eE]\d+)?)\s+"
"(?P<corrected_be>\-?\d+\.?\d+([eE]\d+)?)")
float_pattern = re.compile("\-?\d+\.?\d+([eE]\d+)?$")
error_defs = (
(re.compile("Convergence failure"), "Bad SCF convergence"),
(re.compile("Coordinates do not transform within specified "
"threshold"), "autoz error"),
(re.compile("MAXIMUM OPTIMIZATION CYCLES REACHED"),
"Geometry optimization failed"),
(re.compile("\s+[Nn][Aa][Nn]\s+"), "NAN values"),
(re.compile("energy\s+=\s*(\*)+"), "Numerical disaster"),
(re.compile("NewFileMan::OpenFile\(\):\s+nopenfiles=\d+\s+"
"maxopenfiles=\d+s+errno=\d+"), "Open file error"),
(re.compile("Application \d+ exit codes: 1[34]\d+"), "Exit Code 134"),
(re.compile("Negative overlap matrix eigenvalue. Tighten integral "
"threshold \(REM_THRESH\)!"), "Negative Eigen"),
(re.compile("Unable to allocate requested memory in mega_alloc"),
"Insufficient static memory"),
(re.compile("Application \d+ exit signals: Killed"),
"Killed")
)
energies = []
scf_iters = []
coords = []
species = []
molecules = []
gradients = []
freqs = []
vib_freqs = []
vib_modes = []
grad_comp = None
errors = []
parse_input = False
parse_coords = False
parse_scf_iter = False
parse_gradient = False
parse_freq = False
parse_modes = False
qctask_lines = []
qctask = None
jobtype = None
charge = None
spin_multiplicity = None
thermal_corr = dict()
properly_terminated = False
pop_method = None
parse_charge = False
nbo_available = False
nbo_charge_header = None
parse_nbo_charge = False
charges = dict()
scf_successful = False
opt_successful = False
parse_alpha_homo = False
parse_alpha_lumo = False
parse_beta_homo = False
parse_beta_lumo = False
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
homo_lumo = []
bsse = None
hiershfiled_pop = False
for line in output.split("\n"):
for ep, message in error_defs:
if ep.search(line):
errors.append(message)
if parse_input:
if "-" * 50 in line:
if len(qctask_lines) == 0:
continue
else:
qctask = QcTask.from_string('\n'.join(qctask_lines))
jobtype = qctask.params["rem"]["jobtype"]
parse_input = False
continue
qctask_lines.append(line)
elif parse_coords:
if "-" * 50 in line:
if len(coords) == 0:
continue
else:
if qctask and qctask.ghost_atoms:
if isinstance(qctask.mol, Molecule):
for i in qctask.ghost_atoms:
species[i] = qctask.mol.sites[i].specie.symbol
molecules.append(Molecule(species, coords))
coords = []
species = []
parse_coords = False
continue
if "Atom" in line:
continue
m = coord_pattern.match(line)
coords.append([float(m.group("x")), float(m.group("y")),
float(m.group("z"))])
species.append(m.group("element"))
elif parse_scf_iter:
if "SCF time: CPU" in line:
parse_scf_iter = False
continue
if 'Convergence criterion met' in line:
scf_successful = True
m = scf_iter_pattern.search(line)
if m:
scf_iters[-1].append((float(m.group("energy")),
float(m.group("diis_error"))))
elif parse_gradient:
if "Max gradient component" in line:
gradients[-1]["max_gradient"] = \
float(line.split("=")[1])
if grad_comp:
if len(grad_comp) == 3:
gradients[-1]["gradients"].extend(zip(*grad_comp))
else:
raise Exception("Gradient section parsing failed")
continue
elif "RMS gradient" in line:
gradients[-1]["rms_gradient"] = \
float(line.split("=")[1])
parse_gradient = False
grad_comp = None
continue
elif "." not in line:
if grad_comp:
if len(grad_comp) == 3:
gradients[-1]["gradients"].extend(zip(*grad_comp))
else:
raise Exception("Gradient section parsing failed")
grad_comp = []
else:
grad_line_token = list(line)
grad_crowd = False
grad_line_final = line
for i in range(5, len(line), 12):
c = grad_line_token[i]
if not c.isspace():
grad_crowd = True
if ' ' in grad_line_token[i+1: i+6+1] or \
len(grad_line_token[i+1: i+6+1]) < 6:
continue
grad_line_token[i-1] = ' '
if grad_crowd:
grad_line_final = ''.join(grad_line_token)
grad_comp.append([float(x) for x
in grad_line_final.strip().split()[1:]])
elif parse_freq:
if parse_modes:
if "TransDip" in line:
parse_modes = False
for freq, mode in zip(vib_freqs, zip(*vib_modes)):
freqs.append({"frequency": freq,
"vib_mode": mode})
vib_modes = []
continue
dis_flat = [float(x) for x in line.strip().split()[1:]]
dis_atom = zip(*([iter(dis_flat)]*3))
vib_modes.append(dis_atom)
if "STANDARD THERMODYNAMIC QUANTITIES" in line\
or "Imaginary Frequencies" in line:
parse_freq = False
continue
if "Frequency:" in line:
vib_freqs = [float(vib) for vib
in line.strip().strip().split()[1:]]
elif "X Y Z" in line:
parse_modes = True
continue
elif parse_charge:
if '-'*20 in line:
if len(charges[pop_method]) == 0:
continue
else:
pop_method = None
parse_charge = False
else:
if len(line.strip()) == 0 or\
'Atom' in line:
continue
else:
charges[pop_method].append(float(line.split()[2]))
elif parse_nbo_charge:
if '-'*20 in line:
if len(charges[pop_method]) == 0:
continue
elif "="*20 in line:
pop_method = None
parse_nbo_charge = False
else:
m = nbo_charge_pattern.search(line)
if m:
charges[pop_method].append(float(m.group("charge")))
else:
raise Exception("Can't find NBO charges")
elif parse_alpha_homo:
if "-- Occupied --" in line:
continue
elif "-- Virtual --" in line:
parse_alpha_homo = False
parse_alpha_lumo = True
continue
else:
tokens = line.split()
m = float_pattern.search(tokens[-1])
if m:
current_alpha_homo = float(m.group(0))
continue
elif parse_alpha_lumo:
current_alpha_lumo = float(line.split()[0])
parse_alpha_lumo = False
continue
elif parse_beta_homo:
if "-- Occupied --" in line:
continue
elif "-- Virtual --" in line:
parse_beta_homo = False
parse_beta_lumo = True
continue
else:
tokens = line.split()
m = float_pattern.search(tokens[-1])
if m:
current_beta_homo = float(m.group(0))
continue
elif parse_beta_lumo:
current_beta_lumo = float(line.split()[0])
parse_beta_lumo = False
current_homo = max([current_alpha_homo, current_beta_homo])
current_lumo = min([current_alpha_lumo, current_beta_lumo])
homo_lumo.append([Energy(current_homo, "Ha").to("eV"),
Energy(current_lumo, "Ha").to("eV")])
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
continue
elif "-" * 50 in line and not (current_alpha_lumo is None):
homo_lumo.append([Energy(current_alpha_homo, "Ha").to("eV"),
Energy(current_alpha_lumo, "Ha").to("eV")])
current_alpha_homo = None
current_alpha_lumo = None
current_beta_homo = None
continue
else:
if spin_multiplicity is None:
m = num_ele_pattern.search(line)
if m:
spin_multiplicity = int(m.group("alpha")) - \
int(m.group("beta")) + 1
if charge is None:
m = total_charge_pattern.search(line)
if m:
charge = int(float(m.group("charge")))
if jobtype and jobtype == "freq":
m = zpe_pattern.search(line)
if m:
zpe = float(m.group("zpe"))
thermal_corr["ZPE"] = zpe
m = thermal_corr_pattern.search(line)
if m:
thermal_corr[m.group("name")] = \
float(m.group("correction"))
m = bsse_pattern.search(line)
if m:
raw_be = float(m.group("raw_be"))
corrected_be = float(m.group("corrected_be"))
bsse_fwu = FloatWithUnit(raw_be - corrected_be, "kJ mol^-1")
bsse = bsse_fwu.to('eV atom^-1').real
name = None
energy = None
m = scf_energy_pattern.search(line)
if m:
name = "SCF"
energy = Energy(m.group("energy"), "Ha").to("eV")
m = corr_energy_pattern.search(line)
if m and m.group("name") != "SCF":
name = m.group("name")
energy = Energy(m.group("energy"), "Ha").to("eV")
m = detailed_charge_pattern.search(line)
if m:
pop_method = m.group("method").lower()
parse_charge = True
charges[pop_method] = []
if nbo_available:
if nbo_charge_header is None:
m = nbo_wavefunction_type_pattern.search(line)
if m:
nbo_wavefunction_type = m.group("type")
nbo_charge_header_dict = {
"closed-shell": "Atom No Charge Core "
"Valence Rydberg Total",
"open-shell": "Atom No Charge Core "
"Valence Rydberg Total Density"}
nbo_charge_header = nbo_charge_header_dict[nbo_wavefunction_type]
continue
if nbo_charge_header in line:
pop_method = "nbo"
parse_nbo_charge = True
charges[pop_method] = []
if "N A T U R A L B O N D O R B I T A L A N A L Y S I S" in line:
nbo_available = True
if name and energy:
energies.append(tuple([name, energy]))
if "User input:" in line:
parse_input = True
elif "Standard Nuclear Orientation (Angstroms)" in line:
parse_coords = True
elif "Performing Hirshfeld population analysis" in line:
hiershfiled_pop = True
elif "Hirshfeld: atomic densities completed" in line:
hiershfiled_pop = False
elif ("Cycle Energy DIIS Error" in line
or "Cycle Energy RMS Gradient" in line)\
and not hiershfiled_pop:
parse_scf_iter = True
scf_iters.append([])
scf_successful = False
elif "Gradient of SCF Energy" in line:
parse_gradient = True
gradients.append({"gradients": []})
elif "VIBRATIONAL ANALYSIS" in line:
parse_freq = True
elif "Alpha MOs" in line:
parse_alpha_homo = True
parse_alpha_lumo = False
elif "Beta MOs" in line:
parse_beta_homo = True
parse_beta_lumo = False
elif "Thank you very much for using Q-Chem." in line:
properly_terminated = True
elif "OPTIMIZATION CONVERGED" in line:
opt_successful = True
if charge is None:
errors.append("Molecular charge is not found")
elif spin_multiplicity is None:
errors.append("Molecular spin multipilicity is not found")
else:
for mol in molecules:
if qctask is None or qctask.ghost_atoms is None:
mol.set_charge_and_spin(charge, spin_multiplicity)
for k in thermal_corr.keys():
v = thermal_corr[k]
if "Entropy" in k:
v *= cls.kcal_per_mol_2_eV * 1.0E-3
else:
v *= cls.kcal_per_mol_2_eV
thermal_corr[k] = v
solvent_method = "NA"
if qctask:
if "solvent_method" in qctask.params["rem"]:
solvent_method = qctask.params["rem"]["solvent_method"]
else:
errors.append("No input text")
if not scf_successful:
if 'Bad SCF convergence' not in errors:
errors.append('Bad SCF convergence')
if jobtype == 'opt':
if not opt_successful:
if 'Geometry optimization failed' not in errors:
errors.append('Geometry optimization failed')
if len(errors) == 0:
for text in cls._expected_successful_pattern(qctask):
success_pattern = re.compile(text)
if not success_pattern.search(output):
errors.append("Can't find text to indicate success")
data = {
"jobtype": jobtype,
"energies": energies,
"HOMO/LUMOs": homo_lumo,
"bsse": bsse,
'charges': charges,
"corrections": thermal_corr,
"molecules": molecules,
"errors": errors,
"has_error": len(errors) > 0,
"frequencies": freqs,
"gradients": gradients,
"input": qctask,
"gracefully_terminated": properly_terminated,
"scf_iteration_energies": scf_iters,
"solvent_method": solvent_method
}
return data
| yanikou19/pymatgen | pymatgen/io/qchemio.py | Python | mit | 73,185 | [
"Gaussian",
"Q-Chem",
"pymatgen"
] | 35e3aa32f6176719d461c7cdd14d534bec863a0b1b7a89e59504f1754c8286e2 |
try: from paraview import vtk
except: import vtk
try :
from paraview import numpy_support
except:
from vtk.util import numpy_support
try:
from paraview.vtk import vtkFiltersModeling
except:
import vtk as vtkFiltersModeling
from fonctions_basiques import *
from extractions import *
import numpy
#__________________________________________________________________________________________
def ecrire_interface_file(acces_fichier, dimensions, indice_paraview):
""" fonction pour ecrire les interface file a destination d'elsA
en vue de faire du prelevement par conditions limites
Les indices sont comme dans Paraview - i.e. l'indice du premier point est 0.
ecriture de fichiers au format fmt_v3d
"""
dimensions = numpy.array(dimensions)
narray = range(1, numpy.prod(dimensions - 1) + 1)
narray_fente = []
indice_paraview = numpy.asarray(indice_paraview)
if len(indice_paraview.shape) == 1:
for i in range(indice_paraview[0], indice_paraview[1]):
for j in range(indice_paraview[2], indice_paraview[3]):
narray_fente.append(float((i + 1) + j * (dimensions[0] - 1)))
elif len(indice_paraview.shape) == 2:
for fente in range(indice_paraview.shape[0]):
indices_fente = indice_paraview[fente]
for i in range(indices_fente[0], indices_fente[1]):
for j in range(indices_fente[2], indices_fente[3]):
narray_fente.append(float((i + 1) + j * (dimensions[0] - 1)))
else:
raise IOError, 'Pas compriiis : indice_paraview'
narray_fente = numpy.asarray(narray_fente)
for num in narray_fente:
narray.remove(num)
narray = numpy.asarray(narray)
ecrire_v3d(acces_fichier =
acces_fichier + '_fente.bnd',
dict_numpy_arrays = {'interface_index': numpy.asarray(narray_fente)},
numbloc = 1,
fmt_fichier = 'fmt',
dimensions = (narray_fente.size, 1, 1)
)
ecrire_v3d(acces_fichier =
acces_fichier + '_paroi.bnd',
dict_numpy_arrays = {'interface_index': numpy.asarray(narray)},
numbloc = 1,
fmt_fichier = 'fmt',
dimensions = (narray.size, 1, 1)
)
#__________________________________________________________________________________________
#__________________________________________________________________________________________
def lire_interface_file(input, acces_fichier, type_fichier = "v3d", \
fmt_fichier= "fmt", endian= "big" , \
precision = 'i4r8', nom_array_interface = "interface_index"):
""" fonction de lecture des interface file
la surface doit etre indique en entree
1 est attribue aux cellules indiquee dans le fichier interface file, 0 aux autres
le fichier interface_file doit etre donne a format v3d
"""
output = vtk_new_shallowcopy(input)
if type_fichier == 'v3d':
data = lire_v3d(acces_fichier = acces_fichier, fmt_fichier = fmt_fichier,
endian = endian, precision = precision)
elif type_fichier == 'tp':
data = lire_fichier_tecplot(acces_fichier = acces_fichier)
else:
raise IOError, 'format de fichier non implemente'
cell_interf = numpy.zeros(output.GetNumberOfCells())
cell_interf[numpy.asarray(data['data'][nom_array_interface], dtype = int) - 1] = 1
cell_interf = numpy_support.numpy_to_vtk(cell_interf, deep = 1)
cell_interf.SetName(nom_array_interface)
output.GetCellData().AddArray(cell_interf)
return output
#__________________________________________________________________________________________
##____________________________________________________________________________
#def trouver_col(volume, nb_aubes, bloc_aubage=None, coupe="coordx=65", formule_extraction_surface_aubage="j=jmin",
#surface_aubage=None,
#axe=2):
#"""POUR DIFFUSEUR RADIAL SEULEMENT - extension a programmer
#fonction qui retourne un plan correspondant au col
#nb_aubes est le nombre d'aubes correspondant
#a la grille consideree. Ce nombre est utilise pour determiner la position
#de l'aube voisine.
#coupe est la coupe a effectuer pour obtenir un profil 2d a partir du 3d
#et determiner la ligne du col, support du plan au col
#Si surface_aubage est donnee, sous la forme d'un objet VTK, alors elle est directement utilisee
#Typiquement utilise pour les maillages non structure
#axe indique l'axe de rotation : 0 pour x -- 1 pour y -- 2 pour z
#"""
#if surface_aubage is None:
#surface = Extraction(input = bloc_aubage,
#formule_extraction = formule_extraction_surface_aubage,
#calculer_vecteur_normal = 1).get_output()
#else:
#surface = surface_aubage
##on extrait une ligne sur le profil. La recherche du col est faite en 2D
#ligne = Extraction(input = surface, formule_extraction = coupe).get_output()
#coords = get_vtk_array_as_numpy_array(input = ligne, nom_array = 'coords')
##Calcul du rayon pour chacun des points sur le profil
#if axe == 0:
#coordr = numpy.sqrt(coords[:, 1] ** 2 + coords[:, 2] ** 2)
#elif axe == 1:
#coordr = numpy.sqrt(coords[:, 0] ** 2 + coords[:, 2] ** 2)
#elif axe == 2:
#coordr = numpy.sqrt(coords[:, 0] ** 2 + coords[:, 1] ** 2)
#else:
#raise IOError, "pyturbo pas comprendre axe"
##on cherche le bord d'attaque avec le minimum du rayon
##amarsan : a verifier pour turbine radiale
#coords_ba = coords[numpy.argmin(coordr), :]
##calcul des coordonnees du bord d'attaque de l'aubage voisin
#angle_rotation = 2 * numpy.pi / nb_aubes
#if axe == 0:
#coords_ba = [
#coords_ba[0],
#coords_ba[1] * numpy.cos(angle_rotation) - coords_ba[2] * numpy.sin(angle_rotation),
#coords_ba[2] * numpy.cos(angle_rotation) + coords_ba[1] * numpy.sin(angle_rotation)
#]
#elif axe == 1:
#coords_ba = [
#coords_ba[0] * numpy.cos(angle_rotation) + coords_ba[2] * numpy.sin(angle_rotation),
#coords_ba[1],
#coords_ba[2] * numpy.cos(angle_rotation) - coords_ba[0] * numpy.sin(angle_rotation),
#]
#else: #a ce moment la, l'axe est forcement 0, 1 ou 2. Plus d'erreur possible.
#coords_ba = [
#coords_ba[0] * numpy.cos(angle_rotation) - coords_ba[1] * numpy.sin(angle_rotation),
#coords_ba[1] * numpy.cos(angle_rotation) + coords_ba[0] * numpy.sin(angle_rotation),
#coords_ba[2]
#]
##calcul de la distance entre chacun des points de la ligne et le bord d'attaque de l'aubage voisin
#dist_ba = numpy.sqrt(numpy.sum((coords - coords_ba) ** 2, axis = 1))
##on prend le minimum de cette distance
#coords_col = coords[dist_ba.argmin(), :]
##calcul du vecteur normal au plan
#vect_col = coords_ba - coords_col
#normal = numpy.cross(
#[axe == 0, axe == 1, axe == 2], vect_col) / numpy.linalg.norm(vect_col)
##generation du plan de coupe
#plan = vtk.vtkPlane()
#plan.SetOrigin(coords_ba)
#plan.SetNormal(normal)
##coupe du volume duplique (pour avoir un canal complet)
#coupe = vtk.vtkCutter()
#coupe.SetCutFunction(plan)
#volume_duplique = dupliquer_canal(volume, angle_rotation * 180. / numpy.pi, axe = axe)
#plan = appliquer_sur_multibloc(coupe, volume_duplique)
#plan = merge_multibloc(plan)
##calcul pour exclure les exterieurs du plan, et ne garder que le col
##on fait le produit scalaire avec le segment jusqu'au bord d'attaque et on prend la zone entre 0 et 1
#coords = get_vtk_array_as_numpy_array(plan, 'coords')
#data = numpy.dot(coords[:, 1:] - coords_col[1:],
#vect_col[1:]) / numpy.linalg.norm(vect_col) ** 2
#data = numpy_support.numpy_to_vtk(data, deep = 1)
#data.SetName("dist")
#plan.GetPointData().AddArray(data)
#plan = set_scalaires_actifs(plan, "dist")
#select = vtk.vtkThreshold()
#vtk_set_input(select, plan)
#select.ThresholdBetween(1e-4, 1 - 1e-4)
#select.Update()
#col = select.GetOutput()
#return col
##____________________________________________________________________________
#____________________________________________________________________________
def trouver_col(volume, surface_aubage, nb_aubes,
# coupe="coordz=0.005",
coupe="coordx=64.7500961",
axe=2):
"""Fonction qui retourne un plan correspondant au col
Pour un profil 2D seulement.
Extension a programmer.
- volume est le volume du canal (un seul canal), sous la forme d'un objet VTK
- surface_aubage est la surface de l'aubage dans ce canal, sous la forme d'un objet VTK
- nb_aubes est le nombre d'aubes de la roue consideree. Il est utilise pour dupliquer le canal
et determiner la position de l'aube voisine, par rotation autour de l'axe.
- coupe est la coupe a effectuer pour obtenir un profil 2d a partir du 3d
et determiner la ligne du col, support du plan au col
axe indique l'axe de rotation : 0 pour x -- 1 pour y -- 2 pour z
"""
#on extrait une ligne sur le profil. La recherche du col est faite en 2D
profil = Extraction(input = surface_aubage, formule_extraction = coupe).get_output()
ligne = get_vtk_array_as_numpy_array(input = profil, nom_array = 'coords')
#calcul des coordonnees de la ligne de l'aubage voisin, par rotation autour de l'axe
angle_rotation = 360. / nb_aubes
profil_voisin = rotation(profil, angle_rotation, axe=axe)
ligne_voisine = get_vtk_array_as_numpy_array(input = profil_voisin, nom_array = 'coords')
#calcul des distances point a point entre les deux lignes.
#En notant n le nombre de point de la ligne, on obtient alors une matrice (n, n)
"""
ligne = [0 0 0 0 ...
1 1 1 1 ...
...
]
ligne_voisine = [0 1 2 ...
0 1 2 ...
0 1 2 ...
...
]
"""
ligne = numpy.repeat(ligne, ligne.shape[0], axis=-1).reshape(ligne.shape + (ligne.shape[0],)).transpose(0, 2, 1)
ligne_voisine = numpy.repeat(ligne_voisine, ligne_voisine.shape[0], axis=-1).reshape(
ligne_voisine.shape + (ligne_voisine.shape[0],)).transpose(2, 0, 1)
distances = numpy.sqrt(numpy.sum((ligne - ligne_voisine) ** 2, axis = -1))
where_min = numpy.where(distances == numpy.min(distances))
pt_ligne = ligne[where_min[0], 0].ravel()
pt_ligne_voisine = ligne_voisine[0, where_min[1]].ravel()
#calcul du vecteur normal au plan
vect_col = pt_ligne_voisine - pt_ligne
normal = numpy.cross(
[axe == 0, axe == 1, axe == 2], vect_col) / numpy.linalg.norm(vect_col)
#generation du plan de coupe
plan = vtk.vtkPlane()
plan.SetOrigin(pt_ligne)
plan.SetNormal(normal)
#coupe du volume duplique (pour avoir un canal complet)
coupe = vtk.vtkCutter()
coupe.SetCutFunction(plan)
volume_duplique = dupliquer_canal(volume, 360. / nb_aubes, axe = axe)
plan = appliquer_sur_multibloc(coupe, volume_duplique)
plan = merge_multibloc(plan)
#calcul pour exclure les exterieurs du plan, et ne garder que le col
#on fait le produit scalaire avec le segment jusqu'au bord d'attaque et on prend la zone entre 0 et 1
coords = get_vtk_array_as_numpy_array(plan, 'coords')
data = numpy.dot(coords - pt_ligne,
vect_col) / numpy.linalg.norm(vect_col) ** 2
data = numpy_support.numpy_to_vtk(data, deep = 1)
data.SetName("dist")
plan.GetPointData().AddArray(data)
plan = set_scalaires_actifs(plan, "dist")
select = vtk.vtkThreshold()
vtk_set_input(select, plan)
select.ThresholdBetween(0.0, 1.0)
select.Update()
col = select.GetOutput()
col = convertir_en_polydata(col)
return col
#____________________________________________________________________________
#____________________________________________________________________________
def dupliquer_canal(input, angle_periodicite, nb_canaux=2, axe=2):
"""
duplique pour mettre plusieurs canaux cote-a-cote
s'applique a un multibloc ou a un monobloc
angle_periodicite en degres
### PEUT ETRE UNE LISTE pour un multibloc ###
Axe designe l'axe de rotation.
0 pour x, 1 pour y, 2 pour z.
*****
pour un multibloc
possibilite d'indiquer des listes d'angle de periodicite et de nb_canaux
dans ce cas, pour le bloc numero N, la valeur de l'angle utilisee est le N-ieme angle dans la liste
idem pour nb_canaux
"""
#cas multibloc
if isinstance(input, vtk.vtkMultiBlockDataSet):
multibloc_duplique = vtk_new_shallowcopy(input)
nb_blocs = multibloc_duplique.GetNumberOfBlocks()
if isinstance(angle_periodicite, list):
if not(isinstance(nb_canaux, list)):
raise IOError, "lorsque une liste d'angles est donnee en entree, le nb_canaux doit aussi etre une liste"
for numbloc in get_numeros_blocs_non_vides(multibloc_duplique):
angle_rotation = angle_periodicite[numbloc]
nb_reconstruire = nb_canaux[numbloc]
for num_canal in range(2, nb_reconstruire + 1):
multibloc_duplique.SetBlock(numbloc + (num_canal - 1) * (nb_blocs // 10 + 1) * 10,
rotation(multibloc_duplique.GetBlock(numbloc), (num_canal - 1) * angle_rotation, axe)
)
else:
for numbloc in get_numeros_blocs_non_vides(multibloc_duplique):
for num_canal in range(2, nb_canaux + 1):
multibloc_duplique.SetBlock(numbloc + (num_canal - 1) * (nb_blocs // 10 + 1) * 10,
rotation(multibloc_duplique.GetBlock(numbloc), (num_canal - 1) * angle_periodicite, axe)
)
#monobloc
else:
if isinstance(angle_periodicite, list):
raise IOError, "impossible de donner plusieurs angles de periodicite pour un monobloc... !"
multibloc_duplique = vtk.vtkMultiBlockDataSet()
multibloc_duplique.SetBlock(1, input)
for num_canal in range(2, nb_canaux + 1):
multibloc_duplique.SetBlock(num_canal,
rotation(input, (num_canal - 1) * angle_periodicite, axe))
return multibloc_duplique
#____________________________________________________________________________
#____________________________________________________________________________
def dupliquer_canaux(input, angle_periodicite, num_canaux=[0, 1], axe=2):
"""
duplique pour mettre plusieurs canaux cote-a-cote
s'applique a un multibloc ou a un monobloc
angle_periodicite en degres
### PEUT ETRE UNE LISTE pour un multibloc ###
Axe designe l'axe de rotation.
0 pour x, 1 pour y, 2 pour z.
*****
pour un multibloc
possibilite d'indiquer des listes d'angle de periodicite et de nb_canaux
dans ce cas, pour le bloc numero N, la valeur de l'angle utilisee est le N-ieme angle dans la liste
et num_canaux doit etre une liste de listes contenant les numeros de canaux a reconstruire, pour chaque bloc
******* ATTENTION : les numeros des canaux doivent etre POSITIFS (ou nuls) *******
"""
#cas multibloc
if isinstance(input, vtk.vtkMultiBlockDataSet):
multibloc_duplique = vtk_new_instance(input)
nb_blocs = input.GetNumberOfBlocks()
if isinstance(angle_periodicite, list):
if not(isinstance(num_canaux[0], list)):
raise IOError, "lorsque une liste d'angles est donnee en entree, le num_canaux doit etre une liste de listes contenant les numeros de canaux a reconstruire, pour chaque bloc"
for numbloc in get_numeros_blocs_non_vides(input):
angle_rotation = angle_periodicite[numbloc]
num_a_reconstruire = num_canaux[numbloc]
for num_canal in num_a_reconstruire:
multibloc_duplique.SetBlock(numbloc + num_canal * (nb_blocs // 10 + 1) * 10,
rotation(input.GetBlock(numbloc), num_canal * angle_rotation, axe)
)
else:
for numbloc in get_numeros_blocs_non_vides(input):
for num_canal in num_canaux:
multibloc_duplique.SetBlock(numbloc + num_canal * (nb_blocs // 10 + 1) * 10,
rotation(input.GetBlock(numbloc), num_canal * angle_periodicite, axe)
)
#monobloc
else:
if isinstance(angle_periodicite, list):
raise IOError, "impossible de donner plusieurs angles de periodicite pour un monobloc... !"
multibloc_duplique = vtk.vtkMultiBlockDataSet()
multibloc_duplique.SetBlock(1, input)
for num_canal in num_canaux:
multibloc_duplique.SetBlock(num_canal,
rotation(input, num_canal * angle_periodicite, axe))
return multibloc_duplique
#____________________________________________________________________________
#____________________________________________________________________________
def hacher_diffuseur(volume, surface, coupe_2, coupe_1="coordx=65", retourner_normal_plan=False):
"""teste sur DIFFUSEUR RADIAL seulement
fonction qui retourne un plan perpendiculaire a surface, generalement celle de l'aubage
coupe_1 et coupe_2 sont les Extractions effectuees pour trouver le point sur surface
qui servira de centre pour le plan
typiquement coupe_1 = "coordx=65", coupe_2 = "coordr=158"
la normale au plan est alors determinee par un produit scalaire entre
le vecteur iHat du repere et le vecteur normal a la surface en ce point
surface est converti en polydata si besoin
"""
surface = convertir_en_polydata(surface, calculer_vecteur_normal = True)
ligne = Extraction(input = surface, formule_extraction = coupe_1).get_output()
point = Extraction(input = ligne, formule_extraction = coupe_2,
).get_output()
if point.GetNumberOfPoints() == 0:
raise IOError, "impossible de trouver le point central du plan"
normal = get_vtk_array_as_numpy_array(input = point, nom_array = 'Normals')[0, :]
coords = get_vtk_array_as_numpy_array(input = point, nom_array = 'coords')[0, :]
normal_plan = numpy.cross([1, 0, 0], normal)
plan = vtk.vtkPlane()
plan.SetOrigin(coords)
plan.SetNormal(normal_plan)
coupe = vtk.vtkCutter()
coupe.SetCutFunction(plan)
if not isinstance(volume, vtk.vtkMultiBlockDataSet):
vtk_set_input(coupe, volume)
coupe.Update()
output = coupe.GetOutput()
else:
output = appliquer_sur_multibloc(coupe, volume)
if retourner_normal_plan:
return output, normal_plan
else:
return output
#____________________________________________________________________________
#__________________________________________________________________________
def moyenne_azimutale_hsH(vtkobject, liste_hsh, quantite, moyenne_debit=False, \
nom_array_hsh = "hsH"):
"""fonction qui effectue une moyenne azimutale a differentes hauteurs de veine
En entree:
- vtkobject
l'objet VTK auquel appliquer la fonction indifferemment mono-bloc ou multi-blocs
- liste_hsh
contient la lste des hauteurs auxquelles realiser la moyenne azimutale
- quantite
la quantite a moyenner
- moyenne_debit
False ou True en fonction de si la moyenne azimutale doit etre ponderee
par l'extension azimutale (r*theta) ou le debit
- nom_array_hsh
par defaut "hsH", mais peut etre change pour s'adapter a d'autres cas
"""
#extraction du plan
plan, surface_plan = calculer_surfaces_cellules(vtkobject, retourner_surface_totale = True)
#calcul de la quantite a moyenner
if moyenne_debit:
calc = CalculettePyturbo(input = plan,
a_calculer = (quantite + "*abs(momentum.Normals)", "abs(momentum.Normals)",
))
else:
calc = CalculettePyturbo(input = plan,
a_calculer = (quantite, "coordtheta*coordr")
)
#calcul de la moyenne pour chacune des hauteurs demandees
valeurs_moyennes = []
for hsH in liste_hsh:
#extraction de la ligne a hauteur constante
ligne = Extraction(input = calc.get_output(),
formule_extraction = 'hsH={0}'.format(hsH)
).get_output()
#MOYENNE SUR LES LIGNES
if moyenne_debit:
valeurs_moyennes.append(
integrer_sur_la_surface(input = ligne,
array_a_integrer = quantite + "*abs(momentum.Normals)")
/ integrer_sur_la_surface(input = ligne,
array_a_integrer = "abs(momentum.Normals)")
)
else:
valeurs_moyennes.append(
integrer_sur_la_surface(input = ligne, array_a_integrer = quantite,
array_poids = "coordtheta*coordr")
/ integrer_sur_la_surface(input = ligne, array_a_integrer = 1,
array_poids = "coordtheta*coordr")
)
return valeurs_moyennes
#__________________________________________________________________________
#__________________________________________________________________________
def moyenne_surfacique(vtkobject, quantite, moyenne_debit=False, array_momentum='momentum'):
"""fonction qui calcule la moyenne surfacique d'une grandeur
En entree:
- vtkobject
l'objet VTK auquel appliquer la fonction indifferemment mono-bloc ou multi-blocs
- quantite
la quantite a moyenner
- moyenne_debit
False ou True en fonction de si la moyenne azimutale doit etre ponderee
par l'extension azimutale (r*theta) ou le debit
Dans ce cas, il faut que momentum = ro*Velocity soit disponible aux noeuds !
"""
#extraction du plan
plan, surface_plan = calculer_surfaces_cellules(vtkobject, retourner_surface_totale = True)
#calcul de la quantite a moyenner
if moyenne_debit:
plan = CalculettePyturbo(input = plan,
a_calculer = (quantite + "*abs({0}.Normals)".format(array_momentum), "abs({0}.Normals)".format(array_momentum))
).get_output()
else:
plan = CalculettePyturbo(input = plan, a_calculer = quantite).get_output()
#calcul de la moyenne sur la surface
if moyenne_debit:
valeurs_moyenne = \
integrer_sur_la_surface(input = plan, array_a_integrer = quantite + "*abs({0}.Normals)".format(array_momentum)) \
/ integrer_sur_la_surface(input = plan, array_a_integrer = "abs({0}.Normals)".format(array_momentum))
else:
valeurs_moyenne = \
integrer_sur_la_surface(input = plan, array_a_integrer = quantite) \
/ surface_plan
return valeurs_moyenne
#__________________________________________________________________________
#__________________________________________________________________________
def integration_surfacique(vtkobject, quantite):
"""fonction qui calcule l'integration surfacique d'une grandeur. Peut etre typiquement
utilisee pour calculer le debit a travers une fonction
integration_surfacique(plan, "momentum.Normals")
En entree:
- vtkobject
l'objet VTK auquel appliquer la fonction indifferemment mono-bloc ou multi-blocs
- quantite
la quantite a moyenner
"""
#extraction du plan
plan = calculer_surfaces_cellules(vtkobject)
#calcul de la quantite a integrer
plan = CalculettePyturbo(input = plan, a_calculer = quantite).get_output()
#integration
resultat_integration = integrer_sur_la_surface(input = plan,
array_a_integrer = quantite, array_poids = 'CellSurface')
return resultat_integration
#__________________________________________________________________________
#____________________________________________________________________________
def moyenne_sur_epaisseur(paroi, data, liste_ep):
"""Fonction qui realise la moyenne des donnees presentes aux points
sur plusieurs nappes decalees par rapport a la premiere le long du vecteur 'Normals'
utilise fonctions_basiques.VTKProbe pour prober mono/multi-blocs
si paroi est un vtkStructuredGrid, alors il est transforme en vtkPolyData en utilisant
le filtre vtkGeometryFilter, avant d'entrer dans le vtkProbleFilter
"""
paroi.GetPointData().SetActiveVectors('Normals')
for nom_array in get_noms_arrays_presents(paroi, 'points'):
if nom_array != 'Normals':
paroi.GetPointData().RemoveArray(nom_array)
for nom_array in get_noms_arrays_presents(paroi, 'cellules'):
paroi.GetCellData().RemoveArray(nom_array)
liste_noms_arrays = get_noms_arrays_presents(data, loc = 'points')
dict_data = dict.fromkeys(liste_noms_arrays)
f = vtk.vtkGeometryFilter()
vtk_set_input(f, paroi)
f.Update()
paroi = f.GetOutput()
for ep in liste_ep:
print "epaisseur ", ep
warp = vtk.vtkWarpVector()
vtk_set_input(warp, paroi)
warp.SetScaleFactor(ep)
warp.Update()
warp = warp.GetOutput()
warp = VTKProbe(input = warp, source = data)
for array in liste_noms_arrays:
if dict_data[array] is None:
dict_data[array] = get_vtk_array_as_numpy_array(warp, array, True).reshape(
warp.GetPointData().GetArray(array).GetNumberOfTuples(),
warp.GetPointData().GetArray(array).GetNumberOfComponents())
else:
dict_data[array] += get_vtk_array_as_numpy_array(warp, array, True).reshape(
warp.GetPointData().GetArray(array).GetNumberOfTuples(),
warp.GetPointData().GetArray(array).GetNumberOfComponents())
for array in dict_data:
dict_data[array] /= len(liste_ep)
varray = numpy_support.numpy_to_vtk(dict_data[array], deep = 1)
varray.SetName(array)
paroi.GetPointData().AddArray(varray)
return paroi
#____________________________________________________________________________
#____________________________________________________________________________
def calculer_champ_meridien(vtkDataObject,
maillage_regulier=None,
retourner_maillage_regulier=False,
hubFileName = "/media/FreeAgent GoFlex Drive/DATA_PI4/hub",
tipFileName = "/media/FreeAgent GoFlex Drive/DATA_PI4/shroud",
stepSize=2.0, relativeExtension=0.1,
numberOfPoints_xm = 75, numberOfPoints_h = 10,
dtheta = 2 * numpy.pi / (21 * 10)
):
"""Fonction qui retourne un plan, contenant le champ meridien
Le champ est le resultat d'une moyenne azimutale ponderee simple
"""
#Creation du maillage regulier
if maillage_regulier is None:
from UVParametrizationFilter import CreateSpline
spline, numberOfPoints = CreateSpline(hubFileName, tipFileName, stepSize,
relativeExtension=relativeExtension)
coords = get_vtk_array_as_numpy_array(vtkDataObject, "coords")
coordx = coords[:, 0]
coordy = coords[:, 1]
coordz = coords[:, 2]
coordtheta = numpy.arctan2(coordz, coordy)
min_theta = coordtheta.min()
max_theta = coordtheta.max()
ntheta = int((max_theta - min_theta) // dtheta)
print "Dimensions du grid ", numberOfPoints_xm, numberOfPoints_h, ntheta
points = vtk.vtkPoints()
uvMin = -relativeExtension
uvMax = 1.0 + relativeExtension
for _v in numpy.linspace(uvMin, uvMax, numberOfPoints_xm):
print _v
for _u in numpy.linspace(uvMin, uvMax, numberOfPoints_h):
for theta in numpy.linspace(min_theta, max_theta, ntheta):
pnt = spline(_u, _v)
x = pnt[0]
y = pnt[1] * numpy.cos(theta)
z = pnt[1] * numpy.sin(theta)
points.InsertNextPoint(x, y, z)
grid = vtk.vtkStructuredGrid()
grid.SetDimensions(ntheta, numberOfPoints_h, numberOfPoints_xm)
grid.SetPoints(points)
#on retourne le maillage_regulier si demande
if retourner_maillage_regulier == 1:
return grid
else:
grid = maillage_regulier
#Probe et moyenne azimutale, pour finalement retourner le plan
grid = VTKProbe(input = grid, source = vtkDataObject)
#pour le plan a retourner, on choisit arbitrairement le plan i=0 du maillage regulier
plan = Extraction(input = grid, formule_extraction='i=0').get_output()
#moyenne de chacune des grandeurs le long des lignes a x, r, constants
for nom_grandeur in get_noms_arrays_presents(vtkDataObject):
grandeur = get_vtk_array_as_numpy_array(grid, nom_grandeur)
if grandeur.size == grid.GetNumberOfPoints():
grandeur = grandeur.reshape(grid.GetDimensions()[::-1])
else:
grandeur = grandeur.reshape(grid.GetDimensions()[::-1] + (3,))
grandeur = numpy.ma.masked_less(grandeur, 0.1) #peut-etre a modifier amarsan
grandeur = numpy.mean(grandeur, axis=2)
grandeur = grandeur.ravel()
plan = ajouter_numpy_array_as_vtk_array(plan, grandeur, nom_grandeur)
return plan
#____________________________________________________________________________
#____________________________________________________________________________
def soustraction_vtk(vtk1, vtk2):
"""soustraction de deux objets vtk. vtk1 - vtk2
Les geometries doivent etre les memes.
"""
#somme de tous les champs
output = vtk_new_shallowcopy(vtk1)
for var in get_noms_arrays_presents(output):
voutput = get_vtk_array_as_numpy_array(output, var) - get_vtk_array_as_numpy_array(vtk2, var)
output = ajouter_numpy_array_as_vtk_array(output, voutput, var)
return output
#____________________________________________________________________________
#____________________________________________________________________________
def addition_vtk(vtk1, vtk2):
"""addition de deux objets vtk. vtk1 + vtk2
Les geometries doivent etre les memes.
Utiliser la fonction somme_vtk pour ajouter plus que deux objets vtk.
"""
#somme de tous les champs
output = vtk_new_shallowcopy(vtk1)
for var in get_noms_arrays_presents(output):
voutput = get_vtk_array_as_numpy_array(output, var) + get_vtk_array_as_numpy_array(vtk2, var)
output = ajouter_numpy_array_as_vtk_array(output, voutput, var)
return output
#____________________________________________________________________________
#____________________________________________________________________________
def somme_vtk(liste_vtk):
"""somme de plusieurs objets vtk.
Les geometries doivent etre les memes.
"""
#somme de tous les champs
output = vtk_new_shallowcopy(liste_vtk[0])
for data in liste_vtk[1:]:
for var in get_noms_arrays_presents(output):
voutput = get_vtk_array_as_numpy_array(output, var) + get_vtk_array_as_numpy_array(data, var)
output = ajouter_numpy_array_as_vtk_array(output, voutput, var)
return output
#____________________________________________________________________________
#____________________________________________________________________________
def moyenne_vtk(liste_vtk):
"""moyenne de plusieurs objets vtk.
Les geometries doivent etre les memes.
"""
#somme de tous les champs
output = vtk_new_shallowcopy(liste_vtk[0])
nb = 1.0
for data in liste_vtk[1:]:
nb += 1.
for var in get_noms_arrays_presents(output):
voutput = (get_vtk_array_as_numpy_array(output, var) * (nb - 1.) +
get_vtk_array_as_numpy_array(data, var)) / nb
output = ajouter_numpy_array_as_vtk_array(output, voutput, var)
return output
#____________________________________________________________________________
#____________________________________________________________________________
def extruder(input, longueur, nb_step=None):
"""Extrusion. Repose sur vtkLinearExtrusionFilter. Voir l'aide de VTK pour plus de details.
extrusion dans la direction du vecteur Normals seulement.
D'autres options sont disponibles dans le filtre VTK mais ne sont pas interfacees ici. Voir l'aide VTK.
Normals doit etre present.
input doit etre un vtkPolyData
ou un multibloc compose de vtkPolyData
"""
# cas multibloc
if isinstance(input, vtk.vtkMultiBlockDataSet):
output = vtk_new_instance(input)
for numbloc in get_numeros_blocs_non_vides(input):
output.SetBlock(numbloc, extruder(input.GetBlock(numbloc), longueur, nb_step))
return output
# cas monobloc
if nb_step is None:
f = vtkFiltersModeling.vtkLinearExtrusionFilter()
vtk_set_input(f, input)
f.SetExtrusionTypeToNormalExtrusion()
f.SetScaleFactor(longueur)
f.Update()
output = f.GetOutput()
else:
f = vtk.vtkAppendPolyData()
dl = numpy.linspace(0, longueur, nb_step)
numbloc = -1
for k in range(nb_step - 1):
numbloc += 1
data = decaler_paroi(input, dl[k])
data = extruder(data, dl[k + 1] - dl[k], None)
# m.SetBlock(numbloc, data)
try:
f.AddInputData(data)
except:
f.AddInput(data)
# output = merge_multibloc(m)
f.Update()
output = f.GetOutput()
return output
#____________________________________________________________________________
##____________________________________________________________________________
#def extrusion(input, longueur, discretisation):
#"""Extrusion. Repose sur vtkLinearExtrusionFilter. Voir l'aide de VTK pour plus de details.
#extrusion dans la direction du vecteur Normals seulement.
#D'autres options sont disponibles dans le filtre VTK mais ne sont pas interfacees ici. Voir l'aide VTK.
#Normals doit etre present.
#input doit etre un vtkPolyData
#ou un multibloc compose de vtkPolyData
#"""
## cas multibloc
#if isinstance(input, vtk.vtkMultiBlockDataSet):
#output = vtk_new_instance(input)
#for numbloc in get_numeros_blocs_non_vides(input):
#output.SetBlock(numbloc, extrusion(input.GetBlock(numbloc), longueur, discretisation))
#return output
## cas monobloc
#coords_init = get_vtk_array_as_numpy_array(input, 'coords', 1)[None]
#normals = get_vtk_array_as_numpy_array(input, 'Normals')[None]
#coords = coords_init
#for dx in numpy.linspace(0, longueur, discretisation)[1:]:
#coords = numpy.r_[coords, coords_init + dx * normals]
#coords = coords[None]
#coords = coords.transpose(2, 1, 0, 3)
#output = create_bloc_structure_from_numpy_array(coords)
#return output
##____________________________________________________________________________
#__________________________________________________________________________________________
def read_fig(filename):
""" Fonction de lecture des donnees contenues dans une figure MATLAB .fig
"""
output = {}
d = loadmat(filename, squeeze_me=True, struct_as_record=False)
matfig = d['hgS_070000']
childs = matfig.children
ax1 = [c for c in childs if c.type == 'axes'][0]
for line in ax1.children:
try:
if line.type == 'graph2d.lineseries':
x = line.properties.XData
y = line.properties.YData
leg = line.properties.DisplayName
print leg
output[leg] = numpy.column_stack((x, y))
except:
print 'One children is ignored...'
return output
#__________________________________________________________________________________________
| aurmarsan/pyturbo | fonctions_speciales.py | Python | mit | 37,518 | [
"ParaView",
"VTK"
] | 0dfe324087b65550bf505e7761ca9c2aed6242be5b38a540d1c039b148da92cf |
#!/usr/bin/env python
# encoding: utf-8
'''
Created by Brian Cherinka on 2016-04-26 09:20:35
Licensed under a 3-clause BSD license.
Revision History:
Initial Version: 2016-04-26 09:20:35 by Brian Cherinka
Last Modified On: 2016-04-26 09:20:35 by Brian
'''
import numpy
from decimal import Decimal
from psycopg2.extensions import register_adapter, AsIs, new_type, DECIMAL, register_type
# See:
# http://rehalcon.blogspot.com/2010/03/sqlalchemy-programmingerror-cant-adapt.html
# and
# http://initd.org/psycopg/docs/advanced.html#adapting-new-python-types-to-sql-syntax
# and
# http://pyopengl.sourceforge.net/pydoc/numpy.core.numerictypes.html
#
# http://numpy.sourceforge.net/numdoc/HTML/numdoc.htm
''' numpy data types:
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
'''
DEC2FLOAT = new_type(DECIMAL.values, 'DEC2FLOAT', lambda value,
curs: float(value) if value is not None else None)
register_type(DEC2FLOAT)
def adapt_decimal(Decimal):
return AsIs(float)
register_adapter(Decimal, adapt_decimal)
def adapt_numpy_int8(numpy_int8):
return AsIs(numpy_int8)
register_adapter(numpy.int8, adapt_numpy_int8)
def adapt_numpy_int16(numpy_int16):
return AsIs(numpy_int16)
register_adapter(numpy.int16, adapt_numpy_int16)
def adapt_numpy_int32(numpy_int32):
return AsIs(numpy_int32)
register_adapter(numpy.int32, adapt_numpy_int32)
def adapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.int64, adapt_numpy_int64)
# def adapt_numpy_int128(numpy_int128):
# return AsIs(numpy_int128)
# register_adapter(numpy.int128, adapt_numpy_int128)
def adapt_numpy_uint8(numpy_uint8):
return AsIs(numpy_uint8)
register_adapter(numpy.uint8, adapt_numpy_uint8)
def adapt_numpy_uint16(numpy_uint16):
return AsIs(numpy_uint16)
register_adapter(numpy.uint16, adapt_numpy_uint16)
def adapt_numpy_uint32(numpy_uint32):
return AsIs(numpy_uint32)
register_adapter(numpy.uint32, adapt_numpy_uint32)
def adapt_numpy_uint64(numpy_uint64):
return AsIs(numpy_uint64)
register_adapter(numpy.uint64, adapt_numpy_uint64)
# def adapt_numpy_uint128(numpy_uint128):
# return AsIs(numpy_uint128)
# register_adapter(numpy.uint128, adapt_numpy_uint128)
# def adapt_numpy_float16(numpy_float16):
# return AsIs(numpy_float16)
# register_adapter(numpy.float16, adapt_numpy_float16)
def adapt_numpy_float32(numpy_float32):
return AsIs(numpy_float32)
register_adapter(numpy.float32, adapt_numpy_float32)
def adapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
register_adapter(numpy.float64, adapt_numpy_float64)
# def adapt_numpy_float96(numpy_float96):
# return AsIs(numpy_float96)
# register_adapter(numpy.float96, adapt_numpy_float96)
# def adapt_numpy_float128(numpy_float128):
# return AsIs(numpy_float128)
# register_adapter(numpy.float128, adapt_numpy_float128)
# def adapt_numpy_float256(numpy_float256):
# return AsIs(numpy_float256)
# register_adapter(numpy.float256, adapt_numpy_float256)
# def adapt_numpy_complex32(numpy_complex32):
# return AsIs(numpy_complex32)
# register_adapter(numpy.complex32, adapt_numpy_complex32)
# def adapt_numpy_complex64(numpy_complex64):
# return AsIs(numpy_complex64)
# register_adapter(numpy.complex64, adapt_numpy_complex64)
# def adapt_numpy_complex128(numpy_complex128):
# return AsIs(numpy_complex128)
# register_adapter(numpy.complex128, adapt_numpy_complex128)
# def adapt_numpy_complex192(numpy_complex192):
# return AsIs(numpy_complex192)
# register_adapter(numpy.complex192, adapt_numpy_complex192)
# def adapt_numpy_complex256(numpy_complex256):
# return AsIs(numpy_complex256)
# register_adapter(numpy.complex256, adapt_numpy_complex256)
# def adapt_numpy_complex512(numpy_complex512):
# return AsIs(numpy_complex512)
# register_adapter(numpy.complex512, adapt_numpy_complex512)
def adapt_numpy_bool(numpy_bool):
return AsIs(numpy_bool)
register_adapter(numpy.bool_, adapt_numpy_bool)
def adapt_numpy_nan(numpy_nan):
return "'NaN'"
register_adapter(numpy.nan, adapt_numpy_nan)
def adapt_numpy_inf(numpy_inf):
return "'Infinity'"
register_adapter(numpy.inf, adapt_numpy_inf)
def adapt_numpy_ndarray(numpy_ndarray):
return AsIs(numpy_ndarray.tolist())
register_adapter(numpy.ndarray, adapt_numpy_ndarray)
| albireox/marvin | python/marvin/db/NumpyAdaptors.py | Python | bsd-3-clause | 4,454 | [
"Brian"
] | 9169842cf9a6ebe81aa89f30c4507396ee259c8611aa0ceb799562a6ae7f2d17 |
""" pycorrfit.models.control
Controls which fitting models are imported an in which order.
"""
import numpy as np
from .classes import Model
def append_model(modelarray):
""" Append a new model from a modelarray. *Modelarray* has to be a list
whose elements have two items:
[0] parameters
[1] some info about the model
See separate models for more information
"""
global values
global valuedict
global models
global modeldict
global supplement
global boundaries
global modeltypes
if not isinstance(modelarray, list):
modelarray = [modelarray]
for datadict in modelarray:
# We can have many models in one model array
amod = Model(datadict)
models.append(amod)
if amod.id in modeldict:
raise ValueError("Model with same is already exists: \n {} vs. {}".
format(amod, modeldict[amod.id]))
modeldict[amod.id] = amod
values.append(amod.parameters)
valuedict[amod.id] = amod.parameters
# Supplementary Data might be there
supplement[amod.id] = amod.func_supplements
# Check functions - check for correct values
boundaries[amod.id] = amod.boundaries
# Add model type to internal type list.
if amod.type is not None:
if not amod.type in modeltypes:
modeltypes[amod.type] = []
modeltypes[amod.type].append(amod.id)
def model_setup(modelid, name, comp, mtype, fctn, par_labels, par_values,
par_vary=None, par_boundaries=None, par_constraints=None,
par_hr_labels=None, par_hr_factors=None,
supplementary_method=None,
):
u"""
This helper method does everything that is required to make a model
available for PyCorrFit. The idea is that this method can be called from
anywhere and thus we do not need to do the tedious work of adding models
in the __init__.py file.
Parameters
----------
modelid : int
Model identifier.
name : str
Name of the Model.
comp : str
Description of components of the model, e.g. "T+3D+2D"
mtype : str
Type of model, e.g. "Confocal (Gaussian)"
fctn : callable
The method that computes the model function. It must take
two arguments. The first is of shape `par_values` and the
second is a 2D array containing lag time and correlation.
par_labels : list-like, strings
The labels of each parameter in PyCorrFit dimensionless
representation, i.e.
unit of time : 1 ms
unit of inverse time: 1000 /s
unit of distance : 100 nm
unit of Diff.coeff : 10 µm²/s
unit of inverse area: 100 /µm²
unit of inv. volume : 1000 /µm³
par_values : list-like, floats
The parameter values in PyCorrFit dimensionless units.
par_vary : list-like, bools or None
A list describing which parameters should be varied during
fitting. If not given, only the first element is set to `True`.
par_boundaries : list-like, floats
The parameter boundaries - two values for each parameter.
Examples: [[0, np.inf], [0,1]]
par_constraints : list of lists
Constraints between parameters. For example, make sure parameter
2 is always larger than parameter 1 and parameter 5 is always
smaller than parameter 1: [[2, ">", 1], [5, "<", 1]]
Parameter count starts at 0.
par_hr_labels : list-like, strings
User-defined human readable labels of the parameters. If this is
set, `par_hr_factors` is also required.
par_hr_factors : list-like, floats
The multiplicative factors to get from `par_labels` to
`par_hr_labels`.
supplementary_method : callable
A method that takes the parameters `par_values` and the countrate
of the experiment as an argument and returns a dictinoary of
supplementary information.
"""
# Checks
assert len(par_labels) == len(par_values)
for p in [par_vary,
par_boundaries,
par_hr_labels,
par_hr_factors,
]:
if p is not None:
assert len(p) == len(
par_values), "Number of parameters must match!"
if par_hr_factors is None or par_hr_labels is None:
assert par_hr_factors is None, "human readable requires two parameter"
assert par_hr_labels is None, "human readable requires two parameter"
if par_vary is None:
# Set par_vary
par_vary = np.zeros(len(par_values), dtype=bool)
par_vary[0] = True
if par_hr_factors is None:
# Set equal to labels
par_hr_labels = par_labels
par_hr_factors = np.ones_like(par_values)
model = {}
model["Parameters"] = [par_labels, par_values, par_vary,
par_hr_labels, par_hr_factors]
model["Definitions"] = [modelid, comp, name, fctn, mtype]
if supplementary_method is not None:
model["Supplements"] = supplementary_method
if par_boundaries is not None:
model["Boundaries"] = par_boundaries
if par_constraints is not None:
model["Constraints"] = par_constraints
append_model(model)
# Pack all variables
values = list()
# Also create a dictionary, key is modelid
valuedict = dict()
# Pack all models
models = list()
# Also create a dictinary
modeldict = dict()
# A dictionary for supplementary data:
supplement = dict()
# A dictionary containing model boundaries
boundaries = dict()
# shorttypes are used by the GUI to abbreviate the model type
shorttype = dict()
shorttype[u"Confocal (Gaussian)"] = u"CFoc"
shorttype[u"Confocal (Gaussian) and triplet"] = u"CFoc"
shorttype[u"Confocal (Gaussian) with double triplet"] = u"CFoc"
shorttype[u"TIR (Gaussian/Exp.)"] = u"TIR CFoc"
shorttype[u"TIR (□xσ/Exp.)"] = u"TIR □xσ"
# Create a list for the differentiation between the models
# This should make everything look a little cleaner
modeltypes = {}
modeltypes[u"User"] = []
# The order of the import matters!
# These models perform the integration by themselves using the `model_setup` method.
from . import model_confocal_3d
from . import model_confocal_3d_3d
from . import model_confocal_2d
from . import model_confocal_2d_2d
from . import model_confocal_3d_2d
from . import model_confocal_t_3d
from . import model_confocal_t_3d_3d
from . import model_confocal_t_2d
from . import model_confocal_t_2d_2d
from . import model_confocal_t_3d_2d
from . import model_confocal_t_3d_3d_3d
from . import model_confocal_t_3d_3d_2d
from . import model_confocal_tt_3d
from . import model_confocal_tt_3d_3d
from . import model_confocal_tt_2d
from . import model_confocal_tt_2d_2d
from . import model_confocal_tt_3d_2d
# These lines can be removed once all models are converted
# from `MODEL_*` to `model_` syntax.
modeltypes[u"TIR (Gaussian/Exp.)"] = [6014, 6034, 6033]
modeltypes[u"TIR (□xσ/Exp.)"] = [6010, 6023, 6000, 6022, 6020, 6021]
# Old models
from . import MODEL_TIRF_gaussian_1C
from . import MODEL_TIRF_gaussian_3D2D
from . import MODEL_TIRF_gaussian_3D3D
from . import MODEL_TIRF_1C
from . import MODEL_TIRF_2D2D
from . import MODEL_TIRF_3D2D
from . import MODEL_TIRF_3D3D
from . import MODEL_TIRF_3D2Dkin_Ries
# Load all models from the imported "MODEL_*" submodules
# These are the models that were not imported using the `model_setup` method.
for g in list(globals().keys()):
if g.startswith("MODEL_") and hasattr(globals()[g], "Modelarray"):
append_model(globals()[g].Modelarray)
| paulmueller/PyCorrFit | pycorrfit/models/control.py | Python | gpl-2.0 | 7,712 | [
"Gaussian"
] | d2c38748313151c90068f435a242f86b845af19b53369d8e16f936f1dc083b74 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from future.utils import viewitems
from skbio.util._decorator import deprecated
class _CompressedNode(object):
"""Represents a node in the compressed trie
Parameters
----------
key : string
the key attached to the node
values : list of objects, optional
the values attached to this node
"""
def __init__(self, key, values=None):
self.values = values or []
self.key = key
self.children = {}
def __nonzero__(self):
return (self.key != "" or len(self.values) > 0 or
len(self.children.keys()) > 0)
def __len__(self):
"""Returns the number of values attached to the node
.. warning:: This method is recursive
"""
return sum(len(n) for n in self.children.values()) + len(self.values)
@property
def size(self):
"""int with the number of nodes below the node
.. warning:: This method is recursive
"""
return sum(n.size for n in self.children.values()) + 1
@property
def prefix_map(self):
"""Dict with the prefix map
Dictionary of {values: list of values} containing the prefix map
of this node
"""
mapping = {}
if len(self.children) == 0:
# we have a leaf
mapping = {self.values[0]: self.values[1:]}
else:
# we are at an internal node
for child in self.children.values():
mapping.update(child.prefix_map)
# get largest group
n = -1
key_largest = None
for key, value in viewitems(mapping):
if len(value) > n:
n = len(value)
key_largest = key
# append this node's values
mapping[key_largest].extend(self.values)
return mapping
def insert(self, key, value):
"""Inserts key with value in the node
Parameters
----------
key : string
The string key attached to the value
value : object
Object to attach to the key
"""
node_key_len = len(self.key)
length = min(node_key_len, len(key))
# Follow the key into the tree
split_node = False
index = 0
while index < length and not split_node:
split_node = key[index] != self.key[index]
index += 1
if split_node:
# Index has been incremented after split_node was set to true,
# decrement it to make it work
index -= 1
# We need to split up the node pointed by index
# Get the key for the new node
new_key_node = _CompressedNode(key[index:], [value])
# Get a new node for the old key node
old_key_node = _CompressedNode(self.key[index:], self.values)
old_key_node.children = self.children
self.children = {key[index]: new_key_node,
self.key[index]: old_key_node}
self.key = self.key[:index]
self.values = []
elif index == len(self.key) and index == len(key):
# The new key matches node key exactly
self.values.append(value)
elif index < node_key_len:
# Key shorter than node key
lower_node = _CompressedNode(self.key[index:], self.values)
lower_node.children = self.children
self.children = {self.key[index]: lower_node}
self.key = key
self.values = [value]
else:
# New key longer than current node key
node = self.children.get(key[index])
if node:
# insert into next node
node.insert(key[index:], value)
else:
# Create new node
new_node = _CompressedNode(key[index:], [value])
self.children[key[index]] = new_node
def find(self, key):
"""Searches for key and returns values stored for the key.
Parameters
----------
key : string
The key of the value to search for
Returns
-------
object
The value attached to the key
"""
# key exhausted
if len(key) == 0:
return self.values
# find matching part of key and node_key
min_length = min(len(key), len(self.key))
keys_diff = False
index = 0
while index < min_length and not keys_diff:
keys_diff = key[index] != self.key[index]
index += 1
if keys_diff:
return []
elif index == len(key):
# key and node_key match exactly
return self.values
else:
node = self.children.get(key[index])
if node:
# descend to next node
return node.find(key[index:])
return []
trie_deprecation_p = {
'as_of': '0.4.0', 'until': '0.4.1', 'reason': (
"scikit-bio's trie functionality will be replaced with "
"with functionality from a dedicated package. To track "
"progress, see [#937]"
"(https://github.com/biocore/scikit-bio/issues/937).")}
class CompressedTrie(object):
""" A compressed Trie for a list of (key, value) pairs
Parameters
----------
pair_list : list of tuples, optional
List of (key, value) pairs to initialize the Trie
"""
@deprecated(**trie_deprecation_p)
def __init__(self, pair_list=None):
self._root = _CompressedNode("")
if pair_list:
for key, value in pair_list:
self.insert(key, value)
@deprecated(**trie_deprecation_p)
def __nonzero__(self):
return bool(self._root)
@deprecated(**trie_deprecation_p)
def __len__(self):
return len(self._root)
@property
@deprecated(**trie_deprecation_p)
def size(self):
"""int with the number of nodes in the Trie"""
return self._root.size
@property
@deprecated(**trie_deprecation_p)
def prefix_map(self):
"""Dict with the prefix map
Dictionary of {values: list of values} containing the prefix map
"""
return self._root.prefix_map
@deprecated(**trie_deprecation_p)
def insert(self, key, value):
"""Inserts key with value in Trie
Parameters
----------
key : string
The string key attached to the value
value : object
Object to attach to the key
"""
self._root.insert(key, value)
@deprecated(**trie_deprecation_p)
def find(self, key):
"""Searches for key and returns values stored for the key.
Parameters
----------
key : string
Returns
-------
object
The value attached to the key
"""
return self._root.find(key)
@deprecated(**trie_deprecation_p)
def fasta_to_pairlist(seqs):
"""Yields (key, value) pairs, useful for populating a Trie object
Parameters
----------
seqs : Iterable
tuples of the form ``(label, seq)``
Yields
------
tuple
Tuple of the form ``(seq, label)``.
"""
for label, seq in seqs:
yield seq, label
| xguse/scikit-bio | skbio/tree/_trie.py | Python | bsd-3-clause | 7,771 | [
"scikit-bio"
] | 5ce087b7ba47c1e37579666823575fbd63e464051987afefeef3d54331a879e5 |
""" ComponentMonitoring class is a front-end to the Component monitoring Database
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from urllib import parse
from DIRAC import gConfig, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities import Time, Network
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemURLs
__RCSID__ = "$Id$"
class ComponentMonitoringDB(DB):
def __init__(self):
"""c'tor
Initialize the DB
"""
DB.__init__(self, "ComponentMonitoringDB", "Framework/ComponentMonitoringDB")
retVal = self.__initializeDB()
if not retVal["OK"]:
raise Exception("Can't create tables: %s" % retVal["Message"])
self.__optionalFields = ("startTime", "cycles", "version", "queries", "DIRACVersion", "description", "platform")
self.__mainFields = (
"Id",
"Setup",
"Type",
"ComponentName",
"Host",
"Port",
"StartTime",
"LastHeartbeat",
"cycles",
"queries",
"LoggingState",
)
self.__versionFields = ("VersionTimestamp", "Version", "DIRACVersion", "Platform", "Description")
def getOptionalFields(self):
return self.__optionalFields
def __getTableName(self, name):
return "compmon_%s" % name
def __initializeDB(self):
"""
Create the tables
"""
retVal = self._query("show tables")
if not retVal["OK"]:
return retVal
tablesInDB = [t[0] for t in retVal["Value"]]
tablesD = {}
tN = self.__getTableName("Components")
if tN not in tablesInDB:
tablesD[tN] = {
"Fields": {
"Id": "INTEGER AUTO_INCREMENT NOT NULL",
"ComponentName": "VARCHAR(255) NOT NULL",
"Setup": "VARCHAR(255) NOT NULL",
"Type": 'ENUM ( "service", "agent" ) NOT NULL',
"Host": "VARCHAR(255) NOT NULL",
"Port": "INTEGER DEFAULT 0",
"LastHeartbeat": "DATETIME NOT NULL",
"StartTime": "DATETIME NOT NULL",
"LoggingState": 'VARCHAR(64) DEFAULT "unknown"',
"Cycles": "INTEGER",
"Queries": "INTEGER",
},
"PrimaryKey": "Id",
"Indexes": {
"ComponentIndex": ["ComponentName", "Setup", "Host", "Port"],
"TypeIndex": ["Type"],
},
}
tN = self.__getTableName("VersionHistory")
if tN not in tablesInDB:
tablesD[tN] = {
"Fields": {
"CompId": "INTEGER NOT NULL",
"VersionTimestamp": "DATETIME NOT NULL",
"Version": "VARCHAR(255)",
"DIRACVersion": "VARCHAR(255) NOT NULL",
"Platform": "VARCHAR(255) NOT NULL",
"Description": "BLOB",
},
"Indexes": {"Component": ["CompId"]},
}
return self._createTables(tablesD)
def __datetime2str(self, dt):
"""
This method converts the datetime type to a string type.
"""
if isinstance(dt, six.string_types):
return dt
return "%s-%s-%s %s:%s:%s" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def __registerIfNotThere(self, compDict):
"""
Registers the component if it's not there
"""
sqlCond = []
sqlInsertFields = []
sqlInsertValues = []
tableName = self.__getTableName("Components")
for field in ("componentName", "setup", "type", "host", "port"):
if field not in compDict:
if field == "port":
continue
return S_ERROR("Missing %s field in the component dict" % field)
value = compDict[field]
field = field.capitalize()
sqlInsertFields.append(field)
sqlInsertValues.append("'%s'" % value)
sqlCond.append("%s = '%s'" % (field, value))
compLogName = ":".join(sqlInsertValues).replace("'", "")
self.log.info("Trying to register %s" % compLogName)
result = self._query("SELECT id FROM `%s` WHERE %s" % (tableName, " AND ".join(sqlCond)))
if not result["OK"]:
self.log.error("Cannot register component", "%s: %s" % (compLogName, result["Message"]))
return result
if len(result["Value"]):
compId = result["Value"][0][0]
self.log.info("%s has compId %s" % (compLogName, compId))
return S_OK(compId)
# It's not there, we just need to insert it
sqlInsertFields.append("LastHeartbeat")
sqlInsertValues.append("UTC_TIMESTAMP()")
if "startTime" in compDict:
sqlInsertFields.append("StartTime")
val = compDict["startTime"]
if isinstance(val, Time._allDateTypes):
val = self.__datetime2str(val)
sqlInsertValues.append("'%s'" % val)
for field in ("cycles", "queries"):
if field not in compDict:
compDict[field] = 0
value = compDict[field]
field = field.capitalize()
sqlInsertFields.append(field)
sqlInsertValues.append(str(value))
self.log.info("Registering component %s" % compLogName)
result = self._update(
"INSERT INTO `%s` ( %s ) VALUES ( %s )"
% (tableName, ", ".join(sqlInsertFields), ", ".join(sqlInsertValues))
)
if not result["OK"]:
return result
compId = result["lastRowId"]
self.log.info("%s has compId %s" % (compLogName, compId))
return S_OK(compId)
def __updateVersionHistoryIfNeeded(self, compId, compDict):
"""
Updates the version history given the condition dictionary and component id.
"""
sqlCond = ["CompId=%s" % compId]
sqlInsertFields = []
sqlInsertValues = []
tableName = self.__getTableName("VersionHistory")
for field in ("version", "DIRACVersion", "platform"):
if field not in compDict:
return S_ERROR("Missing %s field in the component dict" % field)
value = compDict[field]
field = field.capitalize()
sqlInsertFields.append(field)
sqlInsertValues.append("'%s'" % value)
sqlCond.append("%s = '%s'" % (field, value))
result = self._query("SELECT CompId FROM `%s` WHERE %s" % (tableName, " AND ".join(sqlCond)))
if not result["OK"]:
return result
if len(result["Value"]):
return S_OK(compId)
# It's not there, we just need to insert it
sqlInsertFields.append("CompId")
sqlInsertValues.append(str(compId))
sqlInsertFields.append("VersionTimestamp")
sqlInsertValues.append("UTC_TIMESTAMP()")
if "description" in compDict:
sqlInsertFields.append("Description")
result = self._escapeString(compDict["description"])
if not result["OK"]:
return result
sqlInsertValues.append(result["Value"])
result = self._update(
"INSERT INTO `%s` ( %s ) VALUES ( %s )"
% (tableName, ", ".join(sqlInsertFields), ", ".join(sqlInsertValues))
)
if not result["OK"]:
return result
return S_OK(compId)
def registerComponent(self, compDict, shallow=False):
"""
Register a new component in the DB given a component dictionary and returns a component id.
And if it's already registered it returns the corresponding component id.
"""
result = self.__registerIfNotThere(compDict)
if not result["OK"]:
return result
compId = result["Value"]
if shallow:
return S_OK(compId)
# Check if something has changed in the version history
result = self.__updateVersionHistoryIfNeeded(compId, compDict)
if not result["OK"]:
return result
return S_OK(compId)
def heartbeat(self, compDict):
"""
Updates the heartbeat
"""
if "compId" not in compDict:
result = self.__registerIfNotThere(compDict)
if not result["OK"]:
return result
compId = result["Value"]
compDict["compId"] = compId
sqlUpdateFields = ["LastHeartbeat=UTC_TIMESTAMP()"]
for field in ("cycles", "queries"):
value = 0
if field in compDict:
value = compDict[field]
sqlUpdateFields.append("%s=%s" % (field.capitalize(), value))
if "startTime" in compDict:
sqlUpdateFields.append("StartTime='%s'" % self.__datetime2str(compDict["startTime"]))
return self._update(
"UPDATE `%s` SET %s WHERE Id=%s"
% (self.__getTableName("Components"), ", ".join(sqlUpdateFields), compDict["compId"])
)
def __getComponents(self, condDict):
"""
Loads the components from the DB.
:type sourceDict: dictionary
:param sourceDict: The dictionary containing source information.
:return: S_OK with the components / the error message.
"""
compTable = self.__getTableName("Components")
mainFields = ", ".join(self.__mainFields)
versionTable = self.__getTableName("VersionHistory")
versionFields = ", ".join(self.__versionFields)
sqlWhere = []
for field in condDict:
val = condDict[field]
if isinstance(val, six.string_types):
sqlWhere.append("%s='%s'" % (field, val))
elif isinstance(val, six.integer_types + (float,)):
sqlWhere.append("%s='%s'" % (field, val))
else:
sqlWhere.append("( %s )" % " OR ".join(["%s='%s'" % (field, v) for v in val]))
if sqlWhere:
sqlWhere = "WHERE %s" % " AND ".join(sqlWhere)
else:
sqlWhere = ""
result = self._query("SELECT %s FROM `%s` %s" % (mainFields, compTable, sqlWhere))
if not result["OK"]:
return result
records = []
dbData = result["Value"]
for record in dbData:
rD = {}
for i in range(len(self.__mainFields)):
rD[self.__mainFields[i]] = record[i]
result = self._query(
"SELECT %s FROM `%s` WHERE CompId=%s ORDER BY VersionTimestamp DESC LIMIT 1"
% (versionFields, versionTable, rD["Id"])
)
if not result["OK"]:
return result
if len(result["Value"]) > 0:
versionRec = result["Value"][0]
for i in range(len(self.__versionFields)):
rD[self.__versionFields[i]] = versionRec[i]
del rD["Id"]
records.append(rD)
return S_OK(StatusSet(records))
def __checkCondition(self, condDict, field, value):
"""
It is used to check if a field is present in the condition dictionary or not with the corresponding value.
:type condDict: dictionary
:param condDict: The dictionary containing the conditions.
:type field: string
:param field: The field.
:type value: string
:param field: The value.
:return: True / False
"""
if field not in condDict:
return True
condVal = condDict[field]
if isinstance(condVal, (list, tuple)):
return value in condVal
return value == condVal
def __getComponentDefinitionFromCS(self, system, setup, instance, cType, component):
"""
Gets the basic component details from the configuration file.
:type system: string
:param system: The system name.
:type setup: string
:param setup: The setup site.
:type instance: string
:param instance: The instance.
:type cType: string
:param cType: The component type.
:type component: string
:param component: The component name.
:return: a component dictionary.
"""
componentName = "%s/%s" % (system, component)
compDict = {"ComponentName": componentName, "Type": cType, "Setup": setup}
componentSection = "/Systems/%s/%s/%s/%s" % (system, instance, "%ss" % cType.capitalize(), component)
compStatus = gConfig.getValue("%s/Status" % componentSection, "Active")
if compStatus.lower() in ("inactive",):
compDict["Status"] = compStatus.lower().capitalize()
if cType == "service":
result = gConfig.getOption("%s/Port" % componentSection)
if not result["OK"]:
compDict["Status"] = "Error"
compDict["Message"] = "Component seems to be defined wrong in CS: %s" % result["Message"]
return compDict
try:
compDict["Port"] = int(result["Value"])
except Exception:
compDict["Status"] = "Error"
compDict["Message"] = "Port for component doesn't seem to be a number"
return compDict
return compDict
def __componentMatchesCondition(self, compDict, requiredComponents, conditionDict={}):
"""
This method uses __checkCondition method to check if the (key, field) inside component dictionary
are already present in condition dictionary or not.
"""
for key in compDict:
if not self.__checkCondition(conditionDict, key, compDict[key]):
return False
return True
def getComponentsStatus(self, conditionDict={}):
"""
Get the status of the defined components in the CS compared to the ones that are known in the DB
:type condDict: dictionary
:param condDict: The dictionary containing the conditions.
:return: S_OK with the requires results.
"""
result = self.__getComponents(conditionDict)
if not result["OK"]:
return result
statusSet = result["Value"]
requiredComponents = {}
result = gConfig.getSections("/DIRAC/Setups")
if not result["OK"]:
return result
for setup in result["Value"]:
if not self.__checkCondition(conditionDict, "Setup", setup):
continue
# Iterate through systems
result = gConfig.getOptionsDict("/DIRAC/Setups/%s" % setup)
if not result["OK"]:
return result
systems = result["Value"]
for system in systems:
instance = systems[system]
# Check defined agents and services
for cType in ("agent", "service"):
# Get entries for the instance of a system
result = gConfig.getSections("/Systems/%s/%s/%s" % (system, instance, "%ss" % cType.capitalize()))
if not result["OK"]:
continue
components = result["Value"]
for component in components:
compDict = self.__getComponentDefinitionFromCS(system, setup, instance, cType, component)
if self.__componentMatchesCondition(compDict, requiredComponents, conditionDict):
statusSet.addUniqueToSet(requiredComponents, compDict)
# Walk the URLs
systemURLs = getSystemURLs(system, setup) # verify URLs in getSystemURLs method
for service in systemURLs:
# systemURLs is a dict that contain a list of URLs for service
if not systemURLs[service]:
self.log.error("Not found URL for %s service." % service)
continue
url = parse.urlparse(systemURLs[service][0])
if self.__componentMatchesCondition(
dict(
Setup=setup,
Port=url.port,
Host=url.hostname,
Type="service",
ComponentName=system + "/" + service,
),
requiredComponents,
conditionDict,
):
statusSet.addUniqueToSet(requiredComponents, compDict)
# WALK THE DICT
statusSet.setComponentsAsRequired(requiredComponents)
return S_OK(
(statusSet.getRequiredComponents(), self.__mainFields[1:] + self.__versionFields + ("Status", "Message"))
)
class StatusSet(object):
"""
This class is used to set component status as required and this method is used only by the
ComponentMonitoringDB class.
"""
def __init__(self, dbRecordsList=[]):
self.__requiredSet = {}
self.__requiredFields = ("Setup", "Type", "ComponentName")
self.__maxSecsSinceHeartbeat = 600
self.setDBRecords(dbRecordsList)
def setDBRecords(self, recordsList):
"""
This sets the DB records given a records list.
:type recordsList: list
:param recordsList: a set of records.
:return: S_OK
"""
self.__dbSet = {}
for record in recordsList:
cD = self.walkSet(self.__dbSet, record)
cD.append(record)
return S_OK()
def addUniqueToSet(self, setDict, compDict):
"""
Adds unique components to a separate set.
:type setDict: dictionary
:param setDict: The set dictionary.
:type compDict: dictionary
:param compDict: The dictionary containing the component information.
"""
rC = self.walkSet(setDict, compDict)
if compDict not in rC:
rC.append(compDict)
inactive = False
for cD in rC:
if "Status" in cD and cD["Status"] == "Inactive":
inactive = True
break
if inactive:
for cD in rC:
cD["Status"] = "Inactive"
def walkSet(self, setDict, compDict, createMissing=True):
"""
Updates the set dictionary.
:type setDict: dictionary
:param setDict: The set dictionary.
:type compDict: dictionary
:param compDict: The dictionary containing the component information.
:type creatMissing: bool
:param createMissing: A variable for adding missing values.
:return: The set dictionary.
"""
sD = setDict
for field in self.__requiredFields:
val = compDict[field]
if val not in sD:
if not createMissing:
return None
if field == self.__requiredFields[-1]:
sD[val] = []
else:
sD[val] = {}
sD = sD[val]
return sD
def __reduceComponentList(self, componentList):
"""
Only keep the most restrictive components.
:type componentList: list
:param componentList: A list of components.
:return: A list of reduced components.
"""
for i in range(len(componentList)):
component = componentList[i]
for j in range(len(componentList)):
if i == j or componentList[j] is False:
continue
potentiallyMoreRestrictiveComponent = componentList[j]
match = True
for key in component:
if key not in potentiallyMoreRestrictiveComponent:
match = False
break
if key == "Host":
result = Network.checkHostsMatch(component[key], potentiallyMoreRestrictiveComponent[key])
if not result["OK"] or not result["Value"]:
match = False
break
else:
if component[key] != potentiallyMoreRestrictiveComponent[key]:
match = False
break
if match:
componentList[i] = False
break
return [comp for comp in componentList if comp]
def setComponentsAsRequired(self, requiredSet):
"""
Sets component details according to the required set.
:type requiredSet: dictionary
:param requiredSet: The required set dictionary.
"""
for setup in requiredSet:
for cType in requiredSet[setup]:
for name in requiredSet[setup][cType]:
# Need to narrow down required
cDL = requiredSet[setup][cType][name]
cDL = self.__reduceComponentList(cDL)
self.__setComponentListAsRequired(cDL)
def __setComponentListAsRequired(self, compDictList):
dbD = self.walkSet(self.__dbSet, compDictList[0], createMissing=False)
if not dbD:
self.__addMissingDefinedComponents(compDictList)
return S_OK()
self.__addFoundDefinedComponent(compDictList)
return S_OK()
def __addMissingDefinedComponents(self, compDictList):
cD = self.walkSet(self.__requiredSet, compDictList[0])
for compDict in compDictList:
compDict = self.__setStatus(compDict, "Error", "Component is not up or hasn't connected to register yet")
cD.append(compDict)
def __setStatus(self, compDict, status, message=False):
"""
Sets status within the component dict.
:type compDict: dictionary
:param compDict: The component dictionary.
:type status: string
:param status: the status.
:type message: bool
:param message: the message.
:return: A component dictionary.
"""
if "Status" in compDict:
return compDict
compDict["Status"] = status
if message:
compDict["Message"] = message
return compDict
def __addFoundDefinedComponent(self, compDictList):
cD = self.walkSet(self.__requiredSet, compDictList[0])
dbD = self.walkSet(self.__dbSet, compDictList[0])
now = Time.dateTime()
unmatched = compDictList
for dbComp in dbD:
if "Status" not in dbComp:
self.__setStatus(dbComp, "OK")
if dbComp["Type"] == "service":
if "Port" not in dbComp:
self.__setStatus(dbComp, "Error", "Port is not defined")
elif dbComp["Port"] not in [compDict["Port"] for compDict in compDictList if "Port" in compDict]:
self.__setStatus(
compDictList[-1],
"Error",
"Port (%s) is different that specified in the CS" % dbComp["Port"],
)
elapsed = now - dbComp["LastHeartbeat"]
elapsed = elapsed.days * 86400 + elapsed.seconds
if elapsed > self.__maxSecsSinceHeartbeat:
self.__setStatus(
dbComp,
"Error",
"Last heartbeat was received at %s (%s secs ago)" % (dbComp["LastHeartbeat"], elapsed),
)
cD.append(dbComp)
# See if we have a perfect match
newUnmatched = []
for unmatchedComp in unmatched:
perfectMatch = True
for field in unmatchedComp:
if field in ("Status", "Message"):
continue
if field not in dbComp:
perfectMatch = False
continue
if field == "Host":
result = Network.checkHostsMatch(unmatchedComp[field], dbComp[field])
if not result["OK"] or not result["Value"]:
perfectMatch = False
else:
if unmatchedComp[field] != dbComp[field]:
perfectMatch = False
if not perfectMatch:
newUnmatched.append(unmatchedComp)
unmatched = newUnmatched
for unmatchedComp in unmatched:
self.__setStatus(unmatchedComp, "Error", "There is no component up with this properties")
cD.append(unmatchedComp)
def getRequiredComponents(self):
return self.__requiredSet
| ic-hep/DIRAC | src/DIRAC/FrameworkSystem/DB/ComponentMonitoringDB.py | Python | gpl-3.0 | 25,121 | [
"DIRAC"
] | 56f155b84b3105e4af30ebe0703cf3f1aaae3264f9ee449f9598de31e6c85280 |
# -*- coding: utf-8 -*-
__author__ = 'José Francisco Velez & Rainer Arencibia'
import logging
import numpy as np
import os
import sys
import time
from PyQt4 import QtCore, QtGui
import cv2
from ViewerWindow_uy import Ui_ViewerWindow
from WindowAboutus import Ui_Dialog
from src.Builders.Canny.CannyBuilder import CannyBuilder
from src.Builders.GaussianBlur.GaussianBlurBuilder import GaussianBlurBuilder
from src.Builders.Histogram.HistogramBuilder import HistogramBuilder
"""
The MIT License (MIT)
Copyright (c) 2016 Rainer Arencibia
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class WindowAboutus(QtGui.QMainWindow):
"""
This window will show information about this application, developer & info related it.
"""
def __init__(self):
QtGui.QWidget.__init__(self)
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.setFixedSize(self.size())
class ViewerWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.ui = Ui_ViewerWindow()
self.ui.setupUi(self)
self.ui.graphicsView.setMouseTracking(True)
self.ui.graphicsView.mousePressEvent = self.mouse_press_event_viewer
self.ui.preview.mousePressEvent = self.mouse_press_event_preview
# New Windows ABOUT-US Code by Rainer
self.window_aboutus = WindowAboutus()
# when click in apply, the image in preview tab will go to the viewer tab.
# On this moment we saved because if the user want to save the result
# it´s easier. check: def on_actionSave_Image_triggered(self):
self.image_viewer = None # image loaded on the viewer tab
self.image_preview = None # the result image loaded on the preview tab
self.url = ''
self.start = time.time()
self.timer = QtCore.QTimer()
self.timer.connect(self.timer, QtCore.SIGNAL('timeout()'), self.display_time_elapsed)
self.timer.start(1000)
def mouse_press_event_viewer(self, event):
position = QtCore.QPoint(event.pos().x(), event.pos().y())
image = self.to_qimage(self.image_viewer)
color = QtGui.QColor.fromRgb(image.pixel(position))
if color.isValid():
self.ui.coordsLabel.setText(' '.join([str(event.pos().x()), 'x', str(event.pos().y())]))
self.ui.valueLabel.setText('RGB: ' + ' '.join([str(color.red()), 'x', str(color.green()), 'x', str(color.blue())]))
self.ui.zoomLabel.setText(str(1.0))
def mouse_press_event_preview(self, event):
position = QtCore.QPoint(event.pos().x(), event.pos().y())
image = self.to_qimage(self.image_preview)
color = QtGui.QColor.fromRgb(image.pixel(position))
if color.isValid():
self.ui.coordsLabel.setText(' '.join([str(event.pos().x()), 'x', str(event.pos().y())]))
self.ui.valueLabel.setText('RGB: ' + ' '.join([str(color.red()), 'x', str(color.green()), 'x', str(color.blue())]))
self.ui.zoomLabel.setText(str(1.0))
def display_time_elapsed(self):
secs = time.time() - self.start
minutes = (secs / 60) % 60
hours = (secs / 3600)
secs %= 60
self.ui.elapsedTime.setText(''.join([str('%02d' % int(hours)), ':', str('%02d' % int(minutes)), ':',
str('%02d' % int(secs))]))
@QtCore.pyqtSlot()
def on_actionHistogram_triggered(self):
"""
This method and slot will generate a histogram of a Color, Gray or Bitonal image.
:return: Will show a new window with the Histogram information.
"""
if self.url == '':
fd = QtGui.QFileDialog(self)
self.url = str(fd.getOpenFileName(self, 'Open an image or a video file',
'/',
"Images (*.bmp *.dib *.jpeg *.jpe *.jpg *.pbm *.pgm *.png *.ppm *.ras *.sr)"
";;Videos (*.avi *flv *mp4 *mpeg *mpg *m4v *wmv)"
'Choose your file'))
self.drawImageViewer(self.url)
builder = HistogramBuilder(self.url)
builder.compute_histograms()
builder.draw_histograms()
builder.window_histogram.show()
builder.window_histogram.exec_()
@QtCore.pyqtSlot()
def on_actionGaussianBlur_triggered(self):
"""
This method and slot will generate a new image with the gaussian blur effect on it.
:return: Will show the new image as result of the Gaussian Blur.
"""
if self.url == '':
fd = QtGui.QFileDialog(self)
self.url = str(fd.getOpenFileName(self, 'Open an image or a video file', '/',
"Images (*.bmp *.dib *.jpeg *.jpe *.jpg *.pbm *.pgm *.png *.ppm *.ras *.sr)"
";;Videos (*.avi *flv *mp4 *mpeg *mpg *m4v *wmv)"
'Choose your file'))
self.drawImageViewer(self.url)
builder = GaussianBlurBuilder(self.url)
builder.window_builder.show()
''' Hay que comprobar como se puede pasar la imagen de gaussianbuilder hasta aqui. '''
builder.window_builder.exec_()
self.image_preview = builder.dst_image
self.drawImagePreview(self.image_preview)
@QtCore.pyqtSlot()
def on_actionExit_triggered(self):
logging.info(''.join([' ', str(time.asctime(time.localtime(time.time()))), ' : ',
'VisionViewer app Finished.']))
self.close()
@QtCore.pyqtSlot()
def on_actionLog_2_triggered(self):
"""
:return: Open the LOG file.
"""
os.system("notepad.exe ./Extra/VisionViewer.log")
@QtCore.pyqtSlot()
def on_actionCanny_triggered(self):
"""
This method and slot will generate a new image with the edges of the original image.
:return: Will show the new image as result of the Canny effect depending on the setting selected by the user.
"""
if self.url == '':
fd = QtGui.QFileDialog(self)
self.url = str(fd.getOpenFileName(self, 'Open an image or a video file',
'/',
"Images (*.bmp *.dib *.jpeg *.jpe *.jpg *.pbm *.pgm *.png *.ppm *.ras *.sr)"
";;Videos (*.avi *flv *mp4 *mpeg *mpg *m4v *wmv)"
'Choose your file'))
self.drawImageViewer(self.url)
builder = CannyBuilder(self.url)
dst = builder.canny()
self.image_preview = dst
self.drawImagePreview(dst)
@QtCore.pyqtSlot()
def on_actionAbout_us_triggered(self):
"""
This method and slot will show the about us window.
:return: a new window on the screen.
"""
self.window_aboutus.show()
@QtCore.pyqtSlot()
def on_actionSave_Image_triggered(self):
"""
This method and slot will show the about us window.
:return: save an image if there is one in Viewer tab.
"""
if self.image_preview is not None:
fd = QtGui.QFileDialog(self)
s = str(fd.getSaveFileName())
if s is not '':
cv2.imwrite(s, self.image_preview)
else:
logging.info(''.join([' ', str(time.asctime(time.localtime(time.time()))), ' : ',
'There is no Name or directory to save.']))
if self.image_preview is None:
logging.info(''.join([' ', str(time.asctime(time.localtime(time.time()))), ' : ',
'There is no image to save.']))
@QtCore.pyqtSlot()
def on_actionOpen_file_triggered(self):
"""
:return: Open a photo or a video file.
"""
fd = QtGui.QFileDialog(self)
s = str(fd.getOpenFileName())
self.drawImageViewer(s)
self.image_viewer = cv2.imread(s)
self.url = s
def drawImageViewer(self, s):
"""
:param s: string with the url of the picture loaded.
:return: put the photo on the QgraphicsView
"""
cvBGRImg = cv2.imread(s)
cvRGBImg = cv2.cvtColor(cvBGRImg, cv2.cv.CV_BGR2RGB)
qimg = QtGui.QImage(cvRGBImg, cvRGBImg.shape[1], cvRGBImg.shape[0], cvBGRImg.strides[0],
QtGui.QImage.Format_RGB888)
qpm = QtGui.QPixmap.fromImage(qimg)
scene = QtGui.QGraphicsScene()
scene.addPixmap(qpm)
self.ui.graphicsView.setScene(scene)
self.image_viewer = cvRGBImg
def drawImagePreview(self, img):
"""
:param img: the result image (numpy array).
:return: put the image on the QgraphicsView (QImage).
"""
q_img = self.to_qimage(img)
qpm = QtGui.QPixmap.fromImage(q_img)
scene = QtGui.QGraphicsScene()
scene.addPixmap(qpm)
self.ui.preview.setScene(scene)
self.image_preview = img
def to_qimage(self, img, copy=False):
""" Transform a image(numpy array) to a QImage.
:param img: img is a numpy array
:return: QImage
"""
gray_color_table = [QtGui.qRgb(i, i, i) for i in range(256)]
if img is None:
return QtGui.QImage()
if img.dtype == np.uint8:
if len(img.shape) == 2:
qim = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.strides[0], QtGui.QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
return qim.copy() if copy else qim
elif len(img.shape) == 3:
im = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if im.shape[2] == 3:
qim = QtGui.QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_RGB888)
return qim.copy() if copy else qim
elif im.shape[2] == 4:
qim = QtGui.QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QtGui.QImage.Format_ARGB32)
return qim.copy() if copy else qim
def main():
logging.basicConfig(filename='./Extra/VisionViewer.log', level=logging.DEBUG)
with open("./Extra/VisionViewer.log", "w") as f:
f.truncate()
logging.info(' Type - User - Date & Time - Details')
logging.info(''.join([' ', str(time.asctime(time.localtime(time.time()))), ' : ',
'VisionViewer app Started.']))
app = QtGui.QApplication(sys.argv)
window = ViewerWindow()
window.display_time_elapsed()
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| rainer85ah/VisionViewer | src/Viewer/VisionViewer.py | Python | mit | 11,796 | [
"Gaussian"
] | 5d1c4189af2cc33ab48a7198d6f248906358e674a9cd24789dc5fa31daba2d12 |
"""
The subpackage ``calib`` contains some useful cosmetics and pre-processing
functionalities:
- resizing frames and cubes : upscaling/pixel binning,
- shifting frames,
- rotating frames and cubes,
- cropping frames and cubes,
- removing bad pixels from frames and cubes,
- correcting nan values from frames and cubes,
- detecting bad frames in cubes, using:
- pixel statistics in annulus or circular aperture,
- ellipticity of a point like source,
- frames correlation,
- temporal subsampling of cubes (mean, median, trimmed mean),
- registration (re-centering) of frames, using:
- centroid fitting a 2d gaussian or moffat,
- DFT upsampling or fourier cross-correlation (Guizar et al. 2008),
- radon transform for broadband frames (Pueyo et al. 2014),
- using satellite/waffle spots (fitting plus intersection).
- sky subtraction (PCA method).
Astronomical calibration functionality like flat fielding and dark-sky
subtraction, in spite of its simplicity was not included in VIP because of the
heterogeneity of the datasets coming from different observatories (each having
different data storage and headers). You can perform this in python in procedures
of a few lines or using dedicated instrument pipelines such as esorex (ESO
instruments)."""
from __future__ import absolute_import
from .badframes import *
from .badpixremoval import *
from .cosmetics import *
from .cosmetics_ifs import *
from .derotation import *
from .parangles import *
from .recentering import *
from .rescaling import *
from .skysubtraction import *
from .subsampling import *
| henry-ngo/VIP | vip_hci/preproc/__init__.py | Python | mit | 1,656 | [
"Gaussian"
] | 000b4436ef04f5f6bb4d374e919bc7a5e54f9a600449c84e90400067abe28a59 |
#
# Parse tree nodes for expressions
#
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object)
import sys
import operator
from Errors import error, warning, warn_once, InternalError, CompileError
from Errors import hold_errors, release_errors, held_errors, report_error
from Code import UtilityCode, TempitaUtilityCode
import StringEncoding
import Naming
import Nodes
from Nodes import Node
import PyrexTypes
from PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type, cython_memoryview_ptr_type
import TypeSlots
from Builtin import list_type, tuple_type, set_type, dict_type, \
unicode_type, str_type, bytes_type, type_type
import Builtin
import Symtab
import Options
from Cython import Utils
from Annotate import AnnotationItem
from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
find_coercion_error = {
# string related errors
(Builtin.unicode_type, Builtin.bytes_type) : "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(Builtin.unicode_type, Builtin.str_type) : "Cannot convert Unicode string to 'str' implicitly. This is not portable and requires explicit encoding.",
(Builtin.unicode_type, PyrexTypes.c_char_ptr_type) : "Unicode objects do not support coercion to C types.",
(Builtin.bytes_type, Builtin.unicode_type) : "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(Builtin.bytes_type, Builtin.str_type) : "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(Builtin.str_type, Builtin.unicode_type) : "str objects do not support coercion to unicode, use a unicode string literal instead (u'')",
(Builtin.str_type, Builtin.bytes_type) : "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(Builtin.str_type, PyrexTypes.c_char_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(PyrexTypes.c_char_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_uchar_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
}.get
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# result_code/temp_result can safely be set to None
result_ctype = None
type = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = 0
is_string_literal = 0
is_attribute = 0
saved_subexpr_nodes = None
is_temp = 0
is_target = 0
is_starred = 0
constant_result = constant_value_not_set
# whether this node with a memoryview type should be broadcast
memslice_broadcast = False
try:
_get_child_attrs = operator.attrgetter('subexprs')
except AttributeError:
# Python 2.3
def __get_child_attrs(self):
return self.subexprs
_get_child_attrs = __get_child_attrs
child_attrs = property(fget=_get_child_attrs)
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue()
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
return self.temp_code
else:
return self.calculate_result_code()
def result_as(self, type = None):
# Return the result code cast to the specified C type.
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
self.analyse_types(env)
return self.check_const()
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
self.analyse_types(env)
bool = self.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
self.analyse_types(env)
return self.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be infered.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_temp or self.type.is_array or self.type.is_cfunction
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s" % (
self.old_temp, self.__class__.__name__))
else:
raise RuntimeError("no temp, but release requested in %s" % (
self.__class__.__name__))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
# If result is a pyobject, make sure we own
# a reference to it.
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp:
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
src_is_py_type = src_type.is_pyobject
dst_is_py_type = dst_type.is_pyobject
if self.check_for_coercion_error(dst_type):
return self
if dst_type.is_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specific")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(
dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" %
(src_type,))
elif not MemoryView.src_conforms_to_dst(
src.type, dst_type, broadcast=self.memslice_broadcast):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env)
if not src.type.subtype_of(dst_type):
if not isinstance(src, NoneNode):
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_pyobject or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
else:
if not (type.is_int or type.is_enum or type.is_error):
error(self.pos,
"Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not self.type.is_pyobject:
return False
if self.constant_result not in (not_a_constant, constant_value_not_set):
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
pass
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
pass # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
return str(int(self.value))
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
self.constant_result in (constant_value_not_set, not_a_constant) or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if self.constant_result >= -2**31 and self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.constant_result is not not_a_constant:
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = dst_type, is_c_literal = True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = PyrexTypes.py_object_type, is_c_literal = False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
type = PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = self.value_as_c_integer_string(plain_digits=True)
self.result_code = code.get_py_num(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def value_as_c_integer_string(self, plain_digits=False):
value = self.value
if isinstance(value, basestring) and len(value) > 2:
# must convert C-incompatible Py3 oct/bin notations
if value[1] in 'oO':
if plain_digits:
value = int(value[2:], 8)
else:
value = value[0] + value[2:] # '0o123' => '0123'
elif value[1] in 'bB':
value = int(value[2:], 2)
elif plain_digits and value[1] in 'xX':
value = int(value[2:], 16)
return str(value)
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def calculate_result_code(self):
strval = self.value
assert isinstance(strval, (str, unicode))
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def compile_time_value(self, denv):
return self.value
def analyse_as_type(self, env):
type = PyrexTypes.parse_basic_type(self.value)
if type is not None:
return type
from TreeFragment import TreeFragment
pos = (self.pos[0], self.pos[1], self.pos[2]-7)
declaration = TreeFragment(u"sizeof(%s)" % self.value, name=pos[0].filename, initial_pos=pos)
sizeof_node = declaration.root.stats[0].expr
sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value)
node = BytesNode(self.pos, value=self.value)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, fail=True)
return node
elif dst_type == PyrexTypes.c_char_ptr_type:
node.type = dst_type
return node
elif dst_type == PyrexTypes.c_uchar_ptr_type:
node.type = PyrexTypes.c_char_ptr_type
return CastNode(node, PyrexTypes.c_uchar_ptr_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_string_const(self.value)
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(PyConstNode):
# A Python unicode object
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character Unicode string literals or surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value), constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a C char* context
return BytesNode(self.pos, value=self.bytes_value).coerce_to(dst_type, env)
error(self.pos, "Unicode literals do not support coercion to C types other than Py_UNICODE or Py_UCS4.")
elif dst_type is not py_object_type:
if not self.check_for_coercion_error(dst_type):
self.fail_assignment(dst_type)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def contains_surrogates(self):
# Check if the unicode string contains surrogate code points
# on a CPython platform with wide (UCS-4) or narrow (UTF-16)
# Unicode, i.e. characters that would be spelled as two
# separate code units on a narrow platform.
for c in map(ord, self.value):
if c > 65535: # can only happen on wide platforms
return True
# We only look for the first code unit (D800-DBFF) of a
# surrogate pair - if we find one, the other one
# (DC00-DFFF) is likely there, too. If we don't find it,
# any second code unit cannot make for a surrogate pair by
# itself.
if c >= 0xD800 and c <= 0xDBFF:
return True
return False
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
self.check_for_coercion_error(dst_type, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class LongNode(AtomicExprNode):
# Python long integer literal
#
# value string
type = py_object_type
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
def analyse_types(self, env):
self.is_temp = 1
def may_be_none(self):
return False
gil_message = "Constructing Python long int"
def generate_result_code(self, code):
code.putln(
'%s = PyLong_FromString((char *)"%s", 0, 0); %s' % (
self.result(),
self.value,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
def compile_time_value(self, denv):
return complex(0.0, self.value)
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = PyrexTypes.py_object_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
return_type = PyrexTypes.CFuncType(type, [])
return_type = PyrexTypes.CPtrType(return_type)
type.scope.declare_cfunction(u'<init>', return_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.declaration_code("")
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self.entry,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None:
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type and entry.type.is_extension_type:
return entry.type
else:
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
if self.entry is None:
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
if not self.entry:
self.type = PyrexTypes.error_type
return
entry = self.entry
if entry:
entry.used = 1
if entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(entry)
if entry.utility_code:
env.use_utility_code(entry.utility_code)
self.analyse_rvalue_entry(env)
def analyse_target_types(self, env):
self.analyse_entry(env)
if (not self.is_lvalue() and self.entry.is_cfunction and
self.entry.fused_cfunction and self.entry.as_variable):
# We need this for the fused 'def' TreeFragment
self.entry = self.entry.as_variable
self.type = self.entry.type
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'"
% self.name)
self.type = PyrexTypes.error_type
self.entry.used = 1
if self.entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(self.entry)
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
env.use_utility_code(get_name_interned_utility_code)
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
elif self.entry.type.is_memoryviewslice:
if self.cf_is_null or self.cf_maybe_null:
import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env)
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and self.type.is_pyobject:
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
return entry and (entry.is_local or entry.is_arg) and not entry.in_closure
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return self.entry.is_variable and \
not self.entry.type.is_array and \
not self.entry.is_readonly
def is_addressable(self):
return self.entry.is_variable
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
if self.cf_maybe_null:
if not self.cf_is_null:
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.putln(
'%s = __Pyx_GetName(%s, %s);' % (
self.result(),
Naming.module_cname,
interned_cname))
if not self.cf_is_null:
code.putln("}");
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_pyglobal or entry.is_builtin:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
code.globalstate.use_utility_code(get_name_interned_utility_code)
code.putln(
'%s = __Pyx_GetName(%s, %s); %s' % (
self.result(),
namespace,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s->tp_dict, %s, %s)' % (
namespace,
interned_cname,
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
elif entry.is_pyclass_attr:
code.put_error_if_neg(self.pos,
'PyObject_SetItem(%s, %s, %s)' % (
namespace,
interned_cname,
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
else:
code.put_error_if_neg(self.pos,
'PyObject_SetAttr(%s, %s, %s)' % (
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
if entry.is_cglobal:
code.put_decref(self.result(), self.ctype())
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
code.putln('%s = %s;' % (self.result(), rhs.result_as(self.ctype())))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
code.put_error_if_neg(self.pos,
'PyMapping_DelItem(%s, %s)' % (
namespace,
interned_cname))
elif self.entry.is_pyglobal:
code.put_error_if_neg(self.pos,
'__Pyx_DelAttrString(%s, "%s")' % (
Naming.module_cname,
self.entry.name))
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
code.annotate(pos, AnnotationItem('py_call', 'python function', size=len(self.name)))
else:
code.annotate(pos, AnnotationItem('c_call', 'c function', size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package accourding
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if env.directives['language_level'] < 3 or env.directives['py2_import']:
self.level = -1
else:
self.level = 0
self.module_name.analyse_types(env)
self.module_name = self.module_name.coerce_to_pyobject(env)
if self.name_list:
self.name_list.analyse_types(env)
self.name_list.coerce_to_pyobject(env)
self.is_temp = 1
env.use_utility_code(import_utility_code)
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.putln(
"%s = __Pyx_Import(%s, %s, %d); %s" % (
self.result(),
self.module_name.py_result(),
name_list_code,
self.level,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type is list_type or \
self.sequence.type is tuple_type:
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type is list_type or \
sequence_type is tuple_type
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (PyList_CheckExact(%s) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln(
"%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value
))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (self.iter_func_ptr, self.py_result()))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
code.putln(
"if (%s >= Py%s_GET_SIZE(%s)) break;" % (
self.counter_cname,
test_name,
self.py_result()))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s;" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec))
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
for test_name in ('List', 'Tuple'):
code.putln("if (!%s && Py%s_CheckExact(%s)) {" % (
self.iter_func_ptr, test_name, self.py_result()))
self.generate_next_sequence_item(test_name, result_name, code)
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("if (PyErr_Occurred()) {")
code.putln("if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = iterator.next()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
type = py_object_type
def __init__(self, iterator):
self.pos = iterator.pos
self.iterator = iterator
if iterator.type.is_ptr or iterator.type.is_array:
self.type = iterator.type.base_type
self.is_temp = 1
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
subexprs = ['args']
def analyse_types(self, env):
self.args.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
def generate_result_code(self, code):
if isinstance(self.args, TupleNode):
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.putln("%s = PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
code.putln(code.error_goto_if_neg(self.result(), self.pos))
code.funcstate.release_temp(result_var)
if isinstance(self.args, TupleNode):
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos, env):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
pass
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self.type
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None):
self.pos = pos
self.type = type
def analyse_types(self, env):
return self.type
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self.type
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self.type
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class IndexNode(ExprNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# indices [ExprNode]
# is_buffer_access boolean Whether this is a buffer access.
#
# indices is used on buffer access, index on non-buffer access.
# The former contains a clean list of index parameters, the
# latter whatever Python object is needed for index access.
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index', 'indices']
indices = None
is_fused_index = False
# Whether we're assigning to a buffer (in that case it needs to be
# writable)
writable_needed = False
# Whether we are indexing or slicing a memoryviewslice
memslice_index = False
memslice_slice = False
is_memslice_copy = False
memslice_ellipsis_noop = False
warned_untyped_idx = False
# set by SingleAssignmentNode after analyse_types()
is_memslice_scalar_assignment = False
def __init__(self, pos, index, *args, **kw):
ExprNode.__init__(self, pos, index=index, *args, **kw)
self._index = index
def calculate_constant_result(self):
self.constant_result = \
self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception, e:
self.compile_time_value_error(e)
def is_ephemeral(self):
return self.base.is_ephemeral()
def is_simple(self):
if self.is_buffer_access or self.memslice_index:
return False
elif self.memslice_slice:
return True
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
import Nodes
type_node = Nodes.TemplatedTypeNode(
pos = self.pos,
positional_args = template_values,
keyword_args = None)
return type_node.analyse(env, base_type = base_type)
else:
return PyrexTypes.CArrayType(base_type, int(self.index.compile_time_value(env)))
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if isinstance(self.index, SliceNode):
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type in (unicode_type, bytes_type, str_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, (IntNode, LongNode)):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # infering 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
self.analyse_base_and_index_types(env, getting = 1)
def analyse_target_types(self, env):
self.analyse_base_and_index_types(env, setting = 1)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
def analyse_base_and_index_types(self, env, getting = 0, setting = 0, analyse_base = True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
# Note that this function must leave IndexNode in a cloneable state.
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False
# a[...] = b
self.is_memslice_copy = False
# incomplete indexing, Ellipsis indexing or slicing
self.memslice_slice = False
# integer indexing
self.memslice_index = False
if analyse_base:
self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return
is_slice = isinstance(self.index, SliceNode)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
skip_child_analysis = False
buffer_access = False
if self.indices:
indices = self.indices
elif isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
if (is_memslice and not self.indices and
isinstance(self.index, EllipsisNode)):
# Memoryviewslice copying
self.is_memslice_copy = True
elif is_memslice:
# memoryviewslice indexing or slicing
import MemoryView
skip_child_analysis = True
have_slices, indices = MemoryView.unellipsify(indices,
self.base.type.ndim)
self.memslice_index = len(indices) == self.base.type.ndim
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) > self.base.type.ndim:
self.type = error_type
return error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" %
self.base.type)
suboffsets_dim = -1
for i, index in enumerate(indices[:]):
index.analyse_types(env)
access, packing = self.base.type.axes[i]
if isinstance(index, SliceNode):
suboffsets_dim = i
self.memslice_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more "
"efficient access", level=2)
IndexNode.warned_untyped_idx = True
self.memslice_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
return error(index.pos, "Invalid index for memoryview specified")
self.memslice_index = self.memslice_index and not self.memslice_slice
self.original_indices = indices
# All indices with all start/stop/step for slices.
# We need to keep this around
self.indices = new_indices
self.env = env
elif self.base.type.is_buffer:
# Buffer indexing
if len(indices) == self.base.type.ndim:
buffer_access = True
skip_child_analysis = True
for x in indices:
x.analyse_types(env)
if not x.type.is_int:
buffer_access = False
if buffer_access and not self.base.type.is_memoryviewslice:
assert hasattr(self.base, "entry") # Must be a NameNode-like node
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not (buffer_access and isinstance(self.index, CloneNode))
self.nogil = env.nogil
if buffer_access or self.memslice_index:
if self.base.type.is_memoryviewslice and not self.base.is_name:
self.base = self.base.coerce_to_temp(env)
self.indices = indices
self.index = None
self.type = self.base.type.dtype
self.is_buffer_access = True
self.buffer_type = self.base.type #self.base.entry.type
if getting and self.type.is_pyobject:
self.is_temp = True
if setting and self.base.type.is_memoryviewslice:
self.base.type.writable_needed = True
elif setting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
elif self.is_memslice_copy:
self.type = self.base.type
if getting:
self.memslice_ellipsis_noop = True
else:
self.memslice_broadcast = True
elif self.memslice_slice:
self.index = None
self.is_temp = True
self.use_managed_ref = True
self.type = PyrexTypes.MemoryViewSliceType(
self.base.type.dtype, axes)
if (self.base.type.is_memoryviewslice and not
self.base.is_name and not
self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
if setting:
self.memslice_broadcast = True
else:
base_type = self.base.type
fused_index_operation = base_type.is_cfunction and base_type.is_fused
if not fused_index_operation:
if isinstance(self.index, TupleNode):
self.index.analyse_types(env, skip_children=skip_child_analysis)
elif not skip_child_analysis:
self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if self.index.constant_result in (0, -1):
# FIXME: we know that this node is redundant -
# currently, this needs to get handled in Optimize.py
pass
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
if self.index.type.is_int:
if (not setting
and (base_type in (list_type, tuple_type, unicode_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or isinstance(self.index, IntNode) and int(self.index.value) >= 0)
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
self.type = py_object_type
else:
if base_type.is_ptr or base_type.is_array:
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(
PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos,
"Invalid index type '%s'" %
self.index.type)
elif base_type.is_cpp_class:
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
elif fused_index_operation:
self.parse_indexed_fused_cdef(env)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
specific_types = []
positions = []
if self.index.is_name:
positions.append(self.index.pos)
specific_types.append(self.index.analyse_as_type(env))
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_type = arg.analyse_as_type(env)
specific_types.append(specific_type)
else:
specific_types = [False]
if not Utils.all(specific_types):
self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not Utils.any([specific_type.same_as(t)
for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def nogil_check(self, env):
if self.is_buffer_access or self.memslice_index or self.memslice_slice:
if not self.memslice_slice and env.directives['boundscheck']:
# error(self.pos, "Cannot check buffer index bounds without gil; "
# "use boundscheck(False) directive")
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
return
super(IndexNode, self).nogil_check(env)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
base_type = self.base.type
if self.type.is_ptr or self.type.is_array:
return not base_type.base_type.is_array
else:
return True
def calculate_result_code(self):
if self.is_buffer_access:
return "(*%s)" % self.buffer_ptr_code
elif self.is_memslice_copy:
return self.base.result()
elif self.base.type is list_type:
return "PyList_GET_ITEM(%s, %s)" % (self.base.result(), self.index.result())
elif self.base.type is tuple_type:
return "PyTuple_GET_ITEM(%s, %s)" % (self.base.result(), self.index.result())
elif self.base.type is unicode_type and self.type.is_unicode_char:
return "__Pyx_PyUnicode_READ_CHAR(%s, %s)" % (self.base.result(), self.index.result())
elif (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
else:
return "(%s[%s])" % (
self.base.result(), self.index.result())
def extra_index_params(self):
if self.index.type.is_int:
if self.original_index_type.signed:
size_adjustment = ""
else:
size_adjustment = "+1"
return ", sizeof(%s)%s, %s" % (self.original_index_type.declaration_code(""), size_adjustment, self.original_index_type.to_py_function)
else:
return ""
def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code)
if self.indices is None:
self.index.generate_evaluation_code(code)
else:
for i in self.indices:
i.generate_evaluation_code(code)
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if self.indices is None:
self.index.generate_disposal_code(code)
else:
for i in self.indices:
i.generate_disposal_code(code)
def free_subexpr_temps(self, code):
self.base.free_temps(code)
if self.indices is None:
self.index.free_temps(code)
else:
for i in self.indices:
i.free_temps(code)
def generate_result_code(self, code):
if self.is_buffer_access or self.memslice_index:
self.nonecheck(code)
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
code.putln("%s = *%s;" % (self.result(), self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
elif self.memslice_slice:
self.nonecheck(code)
self.put_memoryviewslice_slice_code(code)
elif self.is_temp:
if self.type.is_pyobject:
if self.index.type.is_int:
index_code = self.index.result()
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(getitem_int_utility_code)
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(getitem_dict_utility_code)
else:
function = "PyObject_GetItem"
code.putln(
"%s = %s(%s, %s%s); if (!%s) %s" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(),
self.result(),
code.error_goto(self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
index_code = self.index.result()
function = "__Pyx_GetItemInt_Unicode"
code.globalstate.use_utility_code(getitem_int_pyunicode_utility_code)
code.putln(
"%s = %s(%s, %s%s); if (unlikely(%s == (Py_UCS4)-1)) %s;" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(),
self.result(),
code.error_goto(self.pos)))
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
function = "__Pyx_SetItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(setitem_int_utility_code)
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem PyTuple_SetItem and a Py_ssize_t as input,
# not a PyObject*, and bad conversion here would give the wrong
# exception. Also, tuples are supposed to be immutable, and raise
# TypeErrors when trying to set their entries (PyTuple_SetItem
# is for creating new tuples from).
else:
function = "PyObject_SetItem"
code.putln(
"if (%s(%s, %s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(),
code.error_goto(self.pos)))
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
self.nonecheck(code)
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_DECREF(*%s); __Pyx_INCREF(%s);" % (
ptr, rhs_code
))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_assignment_code(self, rhs, code):
generate_evaluation_code = (self.is_memslice_scalar_assignment or
self.memslice_slice)
if generate_evaluation_code:
self.generate_evaluation_code(code)
else:
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access or self.memslice_index:
self.generate_buffer_setitem_code(rhs, code)
elif self.is_memslice_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
elif self.memslice_slice or self.is_memslice_copy:
self.generate_memoryviewslice_setslice_code(rhs, code)
elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
else:
code.putln(
"%s = %s;" % (
self.result(), rhs.result()))
if generate_evaluation_code:
self.generate_disposal_code(code)
else:
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(delitem_int_utility_code)
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(
"if (%s(%s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(),
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def buffer_entry(self):
import Buffer, MemoryView
if self.base.is_name:
entry = self.base.entry
else:
assert self.base.is_temp
cname = self.base.result()
entry = Symtab.Entry(cname, cname, self.base.type, self.base.pos)
if entry.type.is_buffer:
buffer_entry = Buffer.BufferEntry(entry)
else:
buffer_entry = MemoryView.MemoryViewSliceBufferEntry(entry)
return buffer_entry
def buffer_lookup_code(self, code):
"ndarray[1, 2, 3] and memslice[1, 2, 3]"
# Assign indices to temps
index_temps = [code.funcstate.allocate_temp(i.type, manage_ref=False)
for i in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps
import Buffer, MemoryView
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[i.type.signed for i in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def put_memoryviewslice_slice_code(self, code):
"memslice[:]"
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
if sys.version_info < (3,):
def next_(it):
return it.next()
else:
next_ = next
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
is_slice = isinstance(index, SliceNode)
have_slices = have_slices or is_slice
if is_slice:
if not index.start.is_none:
index.start = next_(it)
if not index.stop.is_none:
index.stop = next_(it)
if not index.step.is_none:
index.step = next_(it)
else:
next_(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(code, self.original_indices,
self.result(),
have_gil=have_gil,
have_slices=have_slices)
def generate_memoryviewslice_setslice_code(self, rhs, code):
"memslice1[...] = memslice2 or memslice1[:] = memslice2"
import MemoryView
MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code)
def generate_memoryviewslice_assign_scalar_code(self, rhs, code):
"memslice1[...] = 0.0 or memslice1[:] = 0.0"
import MemoryView
MemoryView.assign_scalar(self, rhs, code)
def nonecheck(self, code):
if code.globalstate.directives['nonecheck']:
self.put_nonecheck(code)
def put_nonecheck(self, code):
if self.base.type.is_memoryviewslice:
code.globalstate.use_utility_code(
raise_noneindex_memview_error_utility_code)
code.putln("if (unlikely((PyObject *) %s.memview == Py_None)) {" %
self.base.result())
code.putln("__Pyx_RaiseNoneMemviewIndexingError();")
else:
code.globalstate.use_utility_code(raise_noneindex_error_utility_code)
code.putln("if (%s) {" % code.unlikely("%s == Py_None") %
self.base.result_as(PyrexTypes.py_object_type))
code.putln("__Pyx_RaiseNoneIndexingError();")
code.putln(code.error_goto(self.pos))
code.putln("}")
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
subexprs = ['base', 'start', 'stop']
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string:
return bytes_type
elif base_type in (bytes_type, str_type, unicode_type,
list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def calculate_constant_result(self):
self.constant_result = self.base.constant_result[
self.start.constant_result : self.stop.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception, e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if self.type.is_pyobject:
self.type = py_object_type
def analyse_types(self, env, getting=True):
self.base.analyse_types(env)
if self.base.type.is_memoryviewslice:
# Gross hack here! But we do not know the type until this point,
# and we cannot create and return a new node. So we change the
# type...
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
del self.start
del self.stop
self.index = index
self.__class__ = IndexNode
self.analyse_base_and_index_types(env,
getting=getting,
setting=not getting,
analyse_base=False)
return
if self.start:
self.start.analyse_types(env)
if self.stop:
self.stop.analyse_types(env)
base_type = self.base.type
if base_type.is_string:
self.type = bytes_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
if self.base.type.is_string:
if self.stop is None:
code.putln(
"%s = PyBytes_FromString(%s + %s); %s" % (
self.result(),
self.base.result(),
self.start_code(),
code.error_goto_if_null(self.result(), self.pos)))
else:
code.putln(
"%s = PyBytes_FromStringAndSize(%s + %s, %s - %s); %s" % (
self.result(),
self.base.result(),
self.start_code(),
self.stop_code(),
self.start_code(),
code.error_goto_if_null(self.result(), self.pos)))
else:
code.putln(
"%s = __Pyx_PySequence_GetSlice(%s, %s, %s); %s" % (
self.result(),
self.base.py_result(),
self.start_code(),
self.stop_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.put_error_if_neg(self.pos,
"__Pyx_PySequence_SetSlice(%s, %s, %s, %s)" % (
self.base.py_result(),
self.start_code(),
self.stop_code(),
rhs.py_result()))
else:
start_offset = ''
if self.start:
start_offset = self.start_code()
if start_offset == '0':
start_offset = ''
else:
start_offset += '+'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
error(self.pos,
"Slice assignments from pointers are not yet supported.")
# FIXME: fix the array size according to start/stop
array_length = self.base.type.size
for i in range(array_length):
code.putln("%s[%s%s] = %s[%d];" % (
self.base.result(), start_offset, i,
rhs.result(), i))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.put_error_if_neg(self.pos,
"__Pyx_PySequence_DelSlice(%s, %s, %s)" % (
self.base.py_result(),
self.start_code(),
self.stop_code()))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
slice_size = self.base.type.size + stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
start = self.base.type.size + start
slice_size -= start
start = None
except ValueError:
pass
check = None
if slice_size < 0:
if target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif start is None and stop is None:
# we know the exact slice length
if target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %d, got %d" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
check = "(%s)-(%s)" % (stop, start)
else: # stop is not None:
check = stop
if check:
code.putln("if (unlikely((%s) != %d)) {" % (check, target_size))
code.putln('PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length, expected %%"PY_FORMAT_SIZE_T"d, got %%"PY_FORMAT_SIZE_T"d", (Py_ssize_t)%d, (Py_ssize_t)(%s));' % (
target_size, check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
type = py_object_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.start.analyse_types(env)
self.stop.analyse_types(env)
self.step.analyse_types(env)
self.start = self.start.coerce_to_pyobject(env)
self.stop = self.stop.coerce_to_pyobject(env)
self.step = self.step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice_', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
if isinstance(self.function, NewExprNode):
return PyrexTypes.CPtrType(self.function.class_type)
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env)
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
for arg in self.args:
arg.analyse_types(env)
constructor = type.scope.lookup("<init>")
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.declaration_code(""))
self.analyse_c_function_call(env)
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception, e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return
if self.analysed:
return
self.analysed = True
function = self.function
function.is_called = 1
self.function.analyse_types(env)
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple.analyse_types(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
for arg in self.args:
arg.analyse_types(env)
if self.self and func_type.args:
# Coerce 'self' to the type expected by the method.
self_arg = func_type.args[0]
if self_arg.not_none: # C methods must do the None test for self at *call* time
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error = 'PyExc_AttributeError',
format_args = [self.function.entry.name])
expected_type = self_arg.type
if self_arg.accept_builtin_subtypes:
self.coerced_self = CMethodSelfCloneNode(self.self)
else:
self.coerced_self = CloneNode(self.self)
self.coerced_self = self.coerced_self.coerce_to(expected_type, env)
# Insert coerced 'self' argument into argument list.
self.args.insert(0, self.coerced_self)
self.analyse_c_function_call(env)
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def analyse_c_function_call(self, env):
if self.function.type is error_type:
self.type = error_type
return
if self.function.type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif (isinstance(self.function, IndexNode) and
self.function.is_fused_index):
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(self.args, alternatives, self.pos, env)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# Coerce arguments
some_args_in_temps = False
for i in xrange(min(max_nargs, actual_nargs)):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
# handle additional varargs parameters
for i in xrange(max_nargs, actual_nargs):
arg = self.args[i]
if arg.type.is_pyobject:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
self.args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None \
or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(cpp_exception_utility_code)
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
arg_code = self.arg_tuple.py_result()
code.putln(
"%s = PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), exc_val))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
if func_type.exception_value is None:
raise_py_exception = "__Pyx_CppExn2PyErr()"
elif func_type.exception_value.type.is_pyobject:
raise_py_exception = ' try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
func_type.exception_value.entry.cname,
func_type.exception_value.entry.cname)
else:
raise_py_exception = '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.")' % func_type.exception_value.entry.cname
if self.nogil:
raise_py_exception = 'Py_BLOCK_THREADS; %s; Py_UNBLOCK_THREADS' % raise_py_exception
code.putln(
"try {%s%s;} catch(...) {%s; %s}" % (
lhs,
rhs,
raise_py_exception,
code.error_goto(self.pos)))
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
return True
def analyse_types(self, env):
self.function_name.analyse_types(env)
for arg in self.args:
arg.analyse_types(env)
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in xrange(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
self.pos = pos
self.name = py_name
self.cname = cname
self.type = func_type
self.utility_code = utility_code
def analyse_types(self, env):
pass
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception, e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not isinstance(self.keyword_args, DictNode) or
not isinstance(self.positional_args, TupleNode)):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return
self.function.analyse_types(env)
self.positional_args.analyse_types(env)
if self.keyword_args:
self.keyword_args.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return
if hasattr(self.function, 'entry') and not self.function.entry.as_variable:
error(self.pos, "Keyword and starred arguments not allowed in cdef functions.")
else:
self.function = self.function.coerce_to_pyobject(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.putln(
"%s = PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.base.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = tuple_type
self.is_temp = 1
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
if self.analyse_as_cimported_attribute(env, 0):
return self.entry.type
elif self.analyse_as_unbound_cmethod(env):
return self.entry.type
else:
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type = obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
self.type = py_object_type
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
self.analyse_types(env, target = 1)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
if self.analyse_as_cimported_attribute(env, target):
self.entry.used = True
elif not target and self.analyse_as_unbound_cmethod(env):
self.entry.used = True
else:
self.analyse_as_ordinary_attribute(env, target)
if self.entry:
self.entry.used = True
def analyse_as_cimported_attribute(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
self.mutate_into_name_node(env, entry, target)
entry.used = 1
return 1
return 0
def analyse_as_unbound_cmethod(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
type = self.obj.analyse_as_extension_type(env)
if type:
entry = type.scope.lookup_here(self.attribute)
if entry and entry.is_cmethod:
# Create a temporary entry describing the C method
# as an ordinary function.
ubcm_entry = Symtab.Entry(entry.name,
"%s->%s" % (type.vtabptr_cname, entry.cname),
entry.type)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
self.mutate_into_name_node(env, ubcm_entry, None)
return 1
return 0
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type and entry.type.is_extension_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def mutate_into_name_node(self, env, entry, target):
# Mutate this node into a NameNode and complete the
# analyse_types phase.
self.__class__ = NameNode
self.name = self.attribute
self.entry = entry
del self.obj
del self.attribute
if target:
NameNode.analyse_target_types(self, env)
else:
NameNode.analyse_rvalue_entry(self, env)
def analyse_as_ordinary_attribute(self, env, target):
self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
entry = None
if obj_type.attributes_known():
if (obj_type.is_memoryviewslice and not
obj_type.scope.lookup_here(self.attribute)):
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# methods need the normal attribute lookup
# because they do not have struct entries
if entry.is_variable or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# NumPy hack
if (getattr(self.obj, 'type', None) and
obj_type.is_extension_type and
obj_type.objstruct_cname == 'PyArrayObject'):
from NumpySupport import numpy_transform_attribute_node
replacement_node = numpy_transform_attribute_node(self)
# Since we can't actually replace our node yet, we only grasp its
# type, and then the replacement happens in
# AnalyseExpresssionsTransform...
self.type = replacement_node.type
if replacement_node is not self:
return
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type)
def analyse_as_python_attribute(self, env, obj_type = None):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
if obj_type.can_coerce_to_pyobject(env):
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env, 'attribute')
gil_message = "Accessing Python attribute"
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return not self.type.is_array
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
else:
return self.member
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
code.putln(
'%s = PyObject_GetAttr(%s, %s); %s' % (
self.result(),
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if code.globalstate.directives['nonecheck']:
self.put_nonecheck(code)
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
elif (self.obj.type.is_memoryviewslice and
code.globalstate.directives['nonecheck']):
self.put_nonecheck(code)
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type.is_extension_type:
if self.needs_none_check and code.globalstate.directives['nonecheck']:
self.put_nonecheck(code)
elif self.entry and self.entry.is_cmethod and self.entry.utility_code:
# C method implemented as function call with utility code
code.globalstate.use_utility_code(self.entry.utility_code)
def generate_assignment_code(self, rhs, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.put_error_if_neg(self.pos,
'PyObject_SetAttr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
if (self.obj.type.needs_nonecheck()
and self.needs_none_check
and code.globalstate.directives['nonecheck']):
self.put_nonecheck(code)
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (isinstance(self.entry.scope, Symtab.PropertyScope)
and u'__del__' in self.entry.scope.entries):
code.put_error_if_neg(self.pos,
'PyObject_DelAttr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
code.annotate(self.pos, AnnotationItem('py_attr', 'python attribute', size=len(self.attribute)))
else:
code.annotate(self.pos, AnnotationItem('c_attr', 'c attribute', size=len(self.attribute)))
def put_nonecheck(self, code):
code.globalstate.use_utility_code(raise_noneattr_error_utility_code)
if self.obj.type.is_extension_type:
test = "%s == Py_None" % self.obj.result_as(PyrexTypes.py_object_type)
elif self.obj.type.is_memoryviewslice:
test = "(PyObject *) %s.memview == Py_None" % self.obj.result()
else:
assert False
code.putln("if (%s) {" % code.unlikely(test))
code.putln("__Pyx_RaiseNoneAttributeError(\"%s\");" % self.attribute)
code.putln(code.error_goto(self.pos))
code.putln("}")
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredTargetNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment targets such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be removed during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
def __init__(self, pos, target):
self.pos = pos
self.target = target
def analyse_declarations(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target.analyse_declarations(env)
def analyse_types(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target.analyse_types(env)
self.type = self.target.type
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target.analyse_target_types(env)
self.type = self.target.type
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i in range(len(self.args)):
arg = self.args[i]
if not skip_children: arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for arg in self.args:
arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(Builtin.list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = Builtin.list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
py_multiply = self.mult_factor and not self.mult_factor.type.is_int
if plain or py_multiply:
mult_factor = None
else:
mult_factor = self.mult_factor
if mult_factor:
mult = mult_factor.result()
if isinstance(mult_factor.constant_result, (int,long)) \
and mult_factor.constant_result > 0:
size_factor = ' * %s' % mult_factor.constant_result
else:
size_factor = ' * ((%s<0) ? 0:%s)' % (mult, mult)
else:
size_factor = ''
mult = ''
if self.type is Builtin.list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is Builtin.tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence unpacking for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, mult, counter
))
else:
offset = ''
for i in xrange(arg_count):
arg = self.args[i]
if mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
code.put_giveref(arg.py_result())
if mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
elif py_multiply and not plain:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, self.mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
sequence_type_test = list_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
sequence_type_test = tuple_check
else:
sequence_types = ['Tuple', 'List']
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
self.generate_special_parallel_unpacking_code(
code, sequence_types[0],
use_loop=long_enough_for_a_loop and sequence_types[0] != 'Tuple')
if len(sequence_types) == 2:
code.putln("} else {")
self.generate_special_parallel_unpacking_code(
code, sequence_types[1], use_loop=long_enough_for_a_loop)
code.putln("}")
rhs.generate_disposal_code(code)
code.putln("} else")
code.putln("#endif")
code.putln("{")
if special_unpack and rhs.type is tuple_type:
code.globalstate.use_utility_code(tuple_unpacking_error_code)
code.putln("__Pyx_UnpackTupleError(%s, %s);" % (
rhs.py_result(), len(self.args)))
code.putln(code.error_goto(self.pos))
else:
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, sequence_type, use_loop):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
if use_loop:
# must be at the start of a C block!
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("if (unlikely(Py%s_GET_SIZE(sequence) != %d)) {" % (
sequence_type, len(self.args)))
code.putln("if (Py%s_GET_SIZE(sequence) > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
sequence_type, len(self.args), len(self.args)))
code.putln("else __Pyx_RaiseNeedMoreValuesError(Py%s_GET_SIZE(sequence));" % sequence_type)
code.putln(code.error_goto(self.pos))
code.putln("}")
if use_loop:
# shorter code in a loop works better for lists in CPython
counter = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("for (%s=0; %s < %s; %s++) {" % (
counter, counter, len(self.unpacked_items), counter))
code.putln("PyObject* item = Py%s_GET_ITEM(sequence, %s);" % (
sequence_type, counter))
code.putln("*(temps[%s]) = item;" % counter)
code.put_incref("item", PyrexTypes.py_object_type)
code.putln("}")
code.funcstate.release_temp(counter)
else:
# unrolling the loop is very fast for tuples in CPython
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (item.result(), sequence_type, i))
code.put_incref(item.result(), item.ctype())
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_GetItem(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PyList_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
self.is_temp = False
self.is_literal = True
else:
SequenceNode.analyse_types(self, env, skip_children)
for child in self.args:
if not child.is_literal:
break
else:
if not self.mult_factor or self.mult_factor.is_literal and \
isinstance(self.mult_factor.constant_result, (int, long)):
self.is_temp = False
self.is_literal = True
else:
if not self.mult_factor.type.is_pyobject:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = True
self.is_partly_literal = True
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple_', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple_', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
SequenceNode.analyse_expressions(self, env)
self.coerce_to_pyobject(env)
def analyse_types(self, env):
hold_errors()
self.original_args = list(self.args)
SequenceNode.analyse_types(self, env)
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_ptr and dst_type.base_type is not PyrexTypes.c_void_type:
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, len(self.args))
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too may members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def release_temp(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
pass
else:
SequenceNode.release_temp(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
for i, arg in enumerate(self.args):
code.putln("%s[%s] = %s;" % (
self.result(),
i,
arg.result()))
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
pass
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
pass
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for entry in self.expr_scope.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = tuple(code.new_loop_labels())
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
for entry in py_entries:
code.put_var_decref(entry)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
for entry in py_entries:
code.put_var_decref(entry)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
class ComprehensionNode(ScopedExprNode):
subexprs = ["target"]
child_attrs = ["loop"]
def infer_type(self, env):
return self.target.infer_type(env)
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
self.target.analyse_expressions(env)
self.type = self.target.type
if not self.has_local_scope:
self.loop.analyse_expressions(env)
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop.analyse_expressions(env)
def may_be_none(self):
return False
def calculate_result_code(self):
return self.target.result()
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(UtilityCode.load_cached("InternalListAppend", "Optimize.c"))
function = "__Pyx_PyList_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ScopedExprNode):
# An inlined generator expression for which the result is
# calculated inside of the loop. This will only be created by
# transforms when replacing builtin calls on generator
# expressions.
#
# loop ForStatNode the for-loop, not containing any YieldExprNodes
# result_node ResultRefNode the reference to the result value temp
# orig_func String the name of the builtin function this node replaces
child_attrs = ["loop"]
loop_analysed = False
type = py_object_type
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def may_be_none(self):
return False
def annotate(self, code):
self.loop.annotate(code)
def infer_type(self, env):
return self.result_node.infer_type(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop_analysed = True
self.loop.analyse_expressions(env)
self.type = self.result_node.type
self.is_temp = True
def analyse_scoped_expressions(self, env):
self.loop_analysed = True
if self.has_local_scope:
self.loop.analyse_expressions(env)
def coerce_to(self, dst_type, env):
if self.orig_func == 'sum' and dst_type.is_numeric and not self.loop_analysed:
# We can optimise by dropping the aggregation variable and
# the add operations into C. This can only be done safely
# before analysing the loop body, after that, the result
# reference type will have infected expressions and
# assignments.
self.result_node.type = self.type = dst_type
return self
return super(InlinedGeneratorExpressionNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
self.result_node.result_code = self.result()
self.loop.generate_execution_code(code)
class SetNode(ExprNode):
# Set constructor.
type = set_type
subexprs = ['args']
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(Builtin.py_set_utility_code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
arg.generate_evaluation_code(code)
code.putln(
code.error_goto_if_neg(
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()),
self.pos))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
hold_errors()
for item in self.key_value_pairs:
item.analyse_types(env)
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
if self.type.is_pyobject:
self.release_errors()
code.putln(
"%s = PyDict_New(); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if self.type.is_pyobject:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key.analyse_types(env)
self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class ModuleNameMixin(object):
def set_mod_name(self, env):
self.module_name = env.global_scope().qualified_name
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
def analyse_types(self, env):
self.bases.analyse_types(env)
if self.doc:
self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
env.use_utility_code(create_class_utility_code);
#TODO(craig,haoyu) This should be moved to a better place
self.set_mod_name(env)
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItemString(%s, "__doc__", %s)' % (
self.dict.py_result(),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = 1
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(create_py3class_utility_code)
cname = code.intern_identifier(self.name)
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.metaclass.result(),
cname,
self.bases.py_result(),
self.dict.py_result(),
self.mkw.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class KeywordArgsNode(ExprNode):
# Helper class for keyword arguments.
#
# starstar_arg DictNode
# keyword_args [DictItemNode]
subexprs = ['starstar_arg', 'keyword_args']
is_temp = 1
type = dict_type
def calculate_constant_result(self):
result = dict(self.starstar_arg.constant_result)
for item in self.keyword_args:
key, value = item.constant_result
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = self.starstar_arg.compile_time_value(denv)
pairs = [ (item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.keyword_args ]
try:
result = dict(result)
for key, value in pairs:
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception, e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
self.starstar_arg.analyse_types(env)
self.starstar_arg = self.starstar_arg.coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
for item in self.keyword_args:
item.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.starstar_arg.generate_evaluation_code(code)
if self.starstar_arg.type is not Builtin.dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_Check(%s))) {' %
self.starstar_arg.py_result())
if self.keyword_args:
code.putln(
"%s = PyDict_Copy(%s); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
code.putln("%s = %s;" % (
self.result(),
self.starstar_arg.py_result()))
code.put_incref(self.result(), py_object_type)
if self.starstar_arg.type is not Builtin.dict_type:
code.putln('} else {')
code.putln(
"%s = PyObject_CallFunctionObjArgs("
"(PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln('}')
self.starstar_arg.generate_disposal_code(code)
self.starstar_arg.free_temps(code)
if not self.keyword_args:
return
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
for item in self.keyword_args:
item.generate_evaluation_code(code)
code.putln("if (unlikely(PyDict_GetItem(%s, %s))) {" % (
self.result(),
item.key.py_result()))
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
item.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
self.starstar_arg.annotate(code)
for item in self.keyword_args:
item.annotate(code)
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
def may_be_none(self):
return True
def generate_result_code(self, code):
code.putln(
"%s = __Pyx_Py3MetaclassGet(%s, %s); %s" % (
self.result(),
self.bases.result(),
self.mkw.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
self.bases.analyse_types(env)
if self.doc:
self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
#TODO(craig,haoyu) This should be moved to a better place
self.set_mod_name(env)
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s); %s" % (
self.result(),
self.metaclass.result(),
self.bases.result(),
cname,
self.mkw.result(),
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
if self.is_active:
env.use_utility_code(cyfunction_class_cell_utility_code)
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.putln('__Pyx_CyFunction_InitClassCell(%s, %s);' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
pass
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)));
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
gil_message = "Constructing an bound method"
def generate_result_code(self, code):
code.putln(
"%s = PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
if self.specialized_cpdefs or self.is_specialization:
env.use_utility_code(fused_function_utility_code)
else:
env.use_utility_code(binding_cfunc_utility_code)
self.analyse_default_args(env)
#TODO(craig,haoyu) This should be moved to a better place
self.set_mod_name(env)
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
for arg in self.def_node.args:
if arg.default:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
default_args.append(arg)
if nonliteral_objects or nonliteral_objects:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args:
if self.defaults_struct is None:
self.defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple.analyse_types(env)
else:
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=DefaultsTupleNode(
self.pos, default_args,
self.defaults_struct)),
decorators=None, name=StringEncoding.EncodedString("__defaults__"))
defaults_getter.analyse_declarations(env)
defaults_getter.analyse_expressions(env)
defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
def_node = self.def_node
if self.specialized_cpdefs:
constructor = "__pyx_FusedFunction_NewEx"
def_node = self.specialized_cpdefs[0]
elif self.is_specialization:
constructor = "__pyx_FusedFunction_NewEx"
else:
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = %s(&%s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.self_result_code(),
py_mod_name,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % (Naming.cur_scope_cname)
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
if def_node.star_arg:
args.append(def_node.star_arg)
if def_node.starstar_arg:
args.append(def_node.starstar_arg)
local_vars = [ arg for arg in def_node.local_scope.var_entries
if arg.name ]
self.varnames = TupleNode(
def_node.pos,
args = [ IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars ],
is_temp = 0,
is_literal = 1)
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
self.result_code = code.get_py_const(py_object_type, 'codeobj_', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.BytesLiteral(func.pos[0].get_filenametable_entry().encode('utf8'))
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, 0, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args), # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
pass
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node.analyse_expressions(env)
super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_name string name of the C label used for this yield
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
def analyse_types(self, env):
if not self.label_num:
error(self.pos, "'yield' not supported here")
self.is_temp = 1
if self.arg is not None:
self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
self.label_name = code.new_label('resume_from_yield')
code.use_label(self.label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
code.put_finish_refcount_context()
code.putln("/* return from generator, yielding value */")
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, self.label_num))
code.putln("return %s;" % Naming.retval_cname);
code.put_label(self.label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.put_xgotref(cname)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln('%s = %s; %s' %
(self.result(), Naming.sent_value_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_incref(self.result(), py_object_type)
else:
code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos))
class YieldFromExprNode(YieldExprNode):
# "yield from GEN" expression
is_yield_from = True
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("YieldFrom", "Generator.c"))
self.arg.generate_evaluation_code(code)
code.putln("%s = __Pyx_Generator_Yield_From(%s, %s);" % (
Naming.retval_cname,
Naming.generator_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
# YieldExprNode has allocated the result temp for us
code.putln("if (__Pyx_PyGen_FetchStopIterationValue(&%s) < 0) %s" % (
self.result(),
code.error_goto(self.pos)))
else:
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (!PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)) %s" %
code.error_goto(self.pos))
code.putln("PyErr_Clear();")
code.putln("}")
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
gil_message = "Constructing globals dict"
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = [var.name for var in env.entries.values() if var.name]
items = [DictItemNode(pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_tmep = 0
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def analyse_types(self, env):
self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
def generate_py_operation_code(self, code):
function = self.py_operation_function()
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env):
type = self.operand.type
if type.is_ptr:
type = type.base_type
function = type.scope.lookup("operator%s" % self.operator)
if not function:
error(self.pos, "'%s' operator not defined for %s"
% (self.operator, type))
self.type_error()
return
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.type = func_type.return_type
class NotNode(ExprNode):
# 'not' operator
#
# operand ExprNode
type = PyrexTypes.c_bint_type
subexprs = ['operand']
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand.analyse_types(env)
self.operand = self.operand.coerce_to_boolean(env)
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % (value)
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(ExprNode):
# The C address-of operator.
#
# operand ExprNode
subexprs = ['operand']
def infer_type(self, env):
return PyrexTypes.c_ptr_type(self.operand.infer_type(env))
def analyse_types(self, env):
self.operand.analyse_types(env)
argtype = self.operand.type
if not (argtype.is_cfunction or self.operand.is_addressable()):
self.error("Taking address of non-lvalue")
return
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return
self.type = PyrexTypes.c_ptr_type(argtype)
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
pass
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)))
elif isinstance(operand, UnopNode) and operand.operator == operator:
warning(pos, "Python has no increment/decrement operator: %s%sx = %s(%sx) = x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand.analyse_types(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral() and not self.type.is_numeric:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
# FIXME: the type cast node isn't needed in this case
# and can be dropped once analyse_types() can return a
# different node
self.operand = CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
self.operand = self.operand.coerce_to_pyobject(env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck and self.type.is_extension_type:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def nonlocally_immutable(self):
return self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
# we usually do not know the result of a type cast at code
# generation time
pass
def calculate_result_code(self):
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(self.operand.result())
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
import MemoryView
self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
MemoryView.validate_memslice_dtype(self.pos, array_dtype)
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
return error(self.operand.pos, ERR_NOT_POINTER)
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
return error()
if not base_type.same_as(array_dtype):
return error(self.operand.pos, ERR_BASE_TYPE)
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
return error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
return error(axis.start.pos, ERR_START)
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=dimsize,
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
return error(axis.pos, ERR_NOT_STOP)
axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
return error(axis.step.pos, "Expected an integer literal")
if axis.step.compile_time_value(env) != 1:
return error(axis.step.pos, ERR_STEPS)
if axis_no == 0:
self.mode = "fortran"
elif axis.step and not first_or_last:
return error(axis.step.pos, ERR_STEPS)
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated mulitple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
return env.global_scope().context.cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.declaration_code("")
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue("(" %s ")", %s);' % (shapes_temp,
buildvalue_fmt,
", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
self.analyse_types(env)
return
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.declaration_code("")
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand.analyse_types(env)
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
self.literal = StringNode(self.pos, value=value)
self.literal.analyse_types(env)
self.literal = self.literal.coerce_to_pyobject(env)
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
def _not_in(x, seq):
return x not in seq
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'in': operator.contains,
'not_in': _not_in,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
self.analyse_operation(env)
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
if type1.is_string:
type1 = Builtin.bytes_type
elif self.operator == '%' \
and type1 in (Builtin.str_type, Builtin.unicode_type):
# note that b'%s' % b'abc' doesn't work in Py3
return type1
if type1.is_builtin_type:
if type1 is type2:
if self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
elif self.operator == '*':
if type1 in (Builtin.bytes_type, Builtin.str_type, Builtin.unicode_type):
return type1
# multiplication of containers/numbers with an
# integer value always (?) returns the same type
if type2.is_int:
return type1
elif type2.is_builtin_type and type1.is_int and self.operator == '*':
# multiplication of containers/numbers with an
# integer value always (?) returns the same type
return type2
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
function = self.py_operation_function()
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
BinopNode.analyse_types(self, env)
if self.is_py_operation():
self.type = PyrexTypes.error_type
def py_operation_function(self):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def calculate_result_code(self):
if self.infix:
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self):
fuction = self.py_functions[self.operator]
if self.inplace:
fuction = fuction.replace('PyNumber_', 'PyNumber_InPlace')
return fuction
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power"
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_int_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if (type1.is_string and type2.is_int) \
or (type2.is_string and type1.is_int):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, (int,long)) and isinstance(op2, (int,long)):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def analyse_operation(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
if env.nogil:
error(self.pos, "Pythonic division not allowed without gil, consider using cython.cdivision(True)")
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
code.putln('PyErr_Format(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(division_overflow_test_code)
code.putln("else if (sizeof(%s) == sizeof(long) && unlikely(%s == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.declaration_code(''),
self.operand2.result(),
self.operand1.result()))
code.putln('PyErr_Format(PyExc_OverflowError, "value too large to perform division");')
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(cdivision_warning_utility_code)
code.putln("if ((%s < 0) ^ (%s < 0)) {" % (
self.operand1.result(),
self.operand2.result()))
code.putln(code.set_error_info(self.pos));
code.put("if (__Pyx_cdivision_warning(%(FILENAME)s, "
"%(LINENO)s)) " % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
})
code.put_goto(code.error_label)
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = code.globalstate.directives['cdivision'] or not self.type.signed
if not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(mod_int_utility_code.specialize(self.type))
else:
code.globalstate.use_utility_code(
mod_float_utility_code.specialize(self.type, math_h_modifier=self.type.math_h_modifier))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
else:
self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_')
env.use_utility_code(
int_pow_utility_code.specialize(func_name=self.pow_func,
type=self.type.declaration_code('')))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
# Note: This class is temporarily "shut down" into an ineffective temp
# allocation mode.
#
# More sophisticated temp reuse was going on before, one could have a
# look at adding this again after /all/ classes are converted to the
# new temp scheme. (The temp juggling cannot work otherwise).
class BoolBinopNode(ExprNode):
# Short-circuiting boolean operation.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
subexprs = ['operand1', 'operand2']
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
if self.operator == 'and':
self.constant_result = \
self.operand1.constant_result and \
self.operand2.constant_result
else:
self.constant_result = \
self.operand1.constant_result or \
self.operand2.constant_result
def compile_time_value(self, denv):
if self.operator == 'and':
return self.operand1.compile_time_value(denv) \
and self.operand2.compile_time_value(denv)
else:
return self.operand1.compile_time_value(denv) \
or self.operand2.compile_time_value(denv)
def coerce_to_boolean(self, env):
return BoolBinopNode(
self.pos,
operator = self.operator,
operand1 = self.operand1.coerce_to_boolean(env),
operand2 = self.operand2.coerce_to_boolean(env),
type = PyrexTypes.c_bint_type,
is_temp = self.is_temp)
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type)
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
# For what we're about to do, it's vital that
# both operands be temp nodes.
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
self.is_temp = 1
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.operand1.generate_evaluation_code(code)
test_result, uses_temp = self.generate_operand1_test(code)
if self.operator == 'and':
sense = ""
else:
sense = "!"
code.putln(
"if (%s%s) {" % (
sense,
test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
self.operand1.generate_disposal_code(code)
self.operand2.generate_evaluation_code(code)
self.allocate_temp_result(code)
self.operand2.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), self.operand2.result()))
self.operand2.generate_post_assignment_code(code)
self.operand2.free_temps(code)
code.putln("} else {")
self.operand1.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), self.operand1.result()))
self.operand1.generate_post_assignment_code(code)
self.operand1.free_temps(code)
code.putln("}")
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(PyrexTypes.c_bint_type,
manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def analyse_types(self, env):
self.test.analyse_types(env)
self.test = self.test.coerce_to_boolean(env)
self.true_val.analyse_types(env)
self.false_val.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(self.true_val.type, self.false_val.type)
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
self.is_temp = 1
if self.type == PyrexTypes.error_type:
self.type_error()
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatable types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result() )
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
result = func(operand1_result, operand2_result)
if self.cascade:
self.cascade.calculate_cascaded_constant_result(operand2_result)
if self.cascade.constant_result:
self.constant_result = result and self.cascade.constant_result
else:
self.constant_result = result
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
# FIXME: I bet this must call cascaded_compile_time_value()
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if op not in ('==', '!=') \
and (type1.is_complex or type1.is_numeric) \
and (type2.is_complex or type2.is_numeric):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = type1
elif type2.is_pyobject:
new_common_type = type2
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env):
if self.operator in ('==', '!='):
type1, type2 = self.operand1.type, self.operand2.type
if type1.is_pyobject and type2.is_pyobject:
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
env.use_utility_code(UtilityCode.load_cached("UnicodeEquals", "StringTools.c"))
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
env.use_utility_code(UtilityCode.load_cached("BytesEquals", "StringTools.c"))
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
env.use_utility_code(UtilityCode.load_cached("StrEquals", "StringTools.c"))
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
coerce_result = "__Pyx_PyBool_FromLong"
else:
coerce_result = ""
if 'not' in op:
negation = "!"
else:
negation = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
code.putln("%s = %s(%s, %s, %s); %s" % (
result_code,
self.special_bool_cmp_function,
result1,
result2,
richcmp_constants[op],
code.error_goto_if_neg(result_code, self.pos)))
elif op == 'in' or op == 'not_in':
code.globalstate.use_utility_code(contains_utility_code)
if self.type.is_pyobject:
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
if op == 'not_in':
negation = "__Pyx_NegateNonNeg"
if operand2.type is dict_type:
method = "PyDict_Contains"
else:
method = "PySequence_Contains"
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
else:
error_clause = code.error_goto_if_neg
got_ref = ""
code.putln(
"%s = %s(%s(%s(%s, %s))); %s%s" % (
result_code,
coerce_result,
negation,
method,
operand2.py_result(),
operand1.py_result(),
got_ref,
error_clause(result_code, self.pos)))
elif (operand1.type.is_pyobject
and op not in ('is', 'is_not')):
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
code.error_goto_if_null(result_code, self.pos)))
code.put_gotref(result_code)
elif operand1.type.is_complex:
if op == "!=":
negation = "!"
else:
negation = ""
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
negation,
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
code.putln("%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2))
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
contains_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE int __Pyx_NegateNonNeg(int b) {
return unlikely(b < 0) ? b : !b;
}
static CYTHON_INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) {
return unlikely(b < 0) ? NULL : __Pyx_PyBool_FromLong(b);
}
""")
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'cascade']
cascade = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1.analyse_types(env)
self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return
if self.cascade:
self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return
else:
if self.operand2.type is dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env):
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
self.is_temp = True # must check for error return
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade:
self.is_temp = 1
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
if self.operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
self.operand2.result(),
self.operand1.result())
else:
return "(%s %s %s)" % (
self.operand1.result(),
self.c_operator(self.operator),
self.operand2.result())
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(code,
self.result(), self.operand2)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'cascade']
cascade = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2.analyse_types(env)
if self.cascade:
self.cascade.analyse_types(env)
def has_python_operands(self):
return self.operand2.type.is_pyobject
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.operand2)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode
}
def binop_node(pos, operator, operand1, operand2, inplace=False):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](pos,
operator = operator,
operand1 = operand1,
operand2 = operand2,
inplace = inplace)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
self.pos = arg.pos
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
pass
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if not self.type.is_builtin_type:
code.globalstate.use_utility_code(type_test_utility_code)
code.putln(
"if (!(%s)) %s" % (
self.type.type_test_code(self.arg.py_result(), self.notnone),
code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
def analyse_types(self, env):
pass
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.arg.py_result())
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s); %s ' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ]),
code.error_goto(self.pos)))
else:
code.putln('PyErr_SetString(%s, "%s"); %s ' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8')),
code.error_goto(self.pos)))
code.putln("}")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string:
self.type = bytes_type
elif arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
else:
# FIXME: check that the target type and the resulting type are compatible
pass
if arg.type.is_memoryviewslice:
# Register utility codes at this point
arg.type.get_to_py_function(env, arg)
self.env = env
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
pass
def generate_result_code(self, code):
if self.arg.type.is_memoryviewslice:
funccall = self.arg.type.get_to_py_function(self.env, self.arg)
else:
funccall = "%s(%s)" % (self.arg.type.to_py_function,
self.arg.result())
code.putln('%s = %s; %s' % (
self.result(),
funccall,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_Format(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string and self.arg.is_ephemeral():
error(arg.pos,
"Obtaining char * from temporary Python value")
def analyse_types(self, env):
# The arg is always already analysed
pass
def generate_result_code(self, code):
function = self.type.from_py_function
operand = self.arg.py_result()
rhs = "%s(%s)" % (function, operand)
if self.type.is_enum:
rhs = typecast(self.type, c_long_type, rhs)
code.putln('%s = %s; %s' % (
self.result(),
rhs,
code.error_goto_if(self.type.error_condition(self.result()), self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type : 'PyList_GET_SIZE',
Builtin.tuple_type : 'PyTuple_GET_SIZE',
Builtin.bytes_type : 'PyBytes_GET_SIZE',
Builtin.unicode_type : 'PyUnicode_GET_SIZE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
self.result(),
self.arg.py_result(),
test_func,
self.arg.py_result()))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
pass
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self._proxy_type()
def analyse_expressions(self, env):
self.arg.analyse_expressions(env)
self._proxy_type()
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
pass
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
pass
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttrString(%s, "__doc__"); %s' % (
self.result(), self.body.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
get_name_interned_utility_code = UtilityCode(
proto = """
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
""",
impl = """
static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
PyObject *result;
result = PyObject_GetAttr(dict, name);
if (!result) {
if (dict != %(BUILTINS)s) {
PyErr_Clear();
result = PyObject_GetAttr(%(BUILTINS)s, name);
}
if (!result) {
PyErr_SetObject(PyExc_NameError, name);
}
}
return result;
}
""" % {'BUILTINS' : Naming.builtins_cname})
#------------------------------------------------------------------------------------
import_utility_code = UtilityCode(
proto = """
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level); /*proto*/
""",
impl = """
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, long level) {
PyObject *py_import = 0;
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
py_import = __Pyx_GetAttrString(%(BUILTINS)s, "__import__");
if (!py_import)
goto bad;
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(%(GLOBALS)s);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
#if PY_VERSION_HEX >= 0x02050000
{
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
}
#else
if (level>0) {
PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
goto bad;
}
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, NULL);
#endif
bad:
Py_XDECREF(empty_list);
Py_XDECREF(py_import);
Py_XDECREF(empty_dict);
return module;
}
""" % {
"BUILTINS": Naming.builtins_cname,
"GLOBALS": Naming.module_cname,
})
#------------------------------------------------------------------------------------
get_exception_utility_code = UtilityCode(
proto = """
static PyObject *__Pyx_GetExcValue(void); /*proto*/
""",
impl = """
static PyObject *__Pyx_GetExcValue(void) {
PyObject *type = 0, *value = 0, *tb = 0;
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyObject *result = 0;
PyThreadState *tstate = PyThreadState_Get();
PyErr_Fetch(&type, &value, &tb);
PyErr_NormalizeException(&type, &value, &tb);
if (PyErr_Occurred())
goto bad;
if (!value) {
value = Py_None;
Py_INCREF(value);
}
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
/* Make sure tstate is in a consistent state when we XDECREF
these objects (XDECREF may run arbitrary code). */
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
result = value;
Py_XINCREF(result);
type = 0;
value = 0;
tb = 0;
bad:
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(tb);
return result;
}
""")
#------------------------------------------------------------------------------------
type_test_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
""",
impl = """
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
""")
#------------------------------------------------------------------------------------
find_py2_metaclass_utility_code = UtilityCode(
proto = '''
static PyObject *__Pyx_FindPy2Metaclass(PyObject *bases); /*proto*/
''',
impl = '''
static PyObject *__Pyx_FindPy2Metaclass(PyObject *bases) {
PyObject *metaclass;
/* Default metaclass */
#if PY_MAJOR_VERSION < 3
if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) {
PyObject *base = PyTuple_GET_ITEM(bases, 0);
metaclass = PyObject_GetAttrString(base, (char *)"__class__");
if (!metaclass) {
PyErr_Clear();
metaclass = (PyObject*) Py_TYPE(base);
}
} else {
metaclass = (PyObject *) &PyClass_Type;
}
#else
if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) {
PyObject *base = PyTuple_GET_ITEM(bases, 0);
metaclass = (PyObject*) Py_TYPE(base);
} else {
metaclass = (PyObject *) &PyType_Type;
}
#endif
Py_INCREF(metaclass);
return metaclass;
}
''')
create_class_utility_code = UtilityCode(
proto = """
static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name,
PyObject *modname); /*proto*/
""",
impl = """
static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name,
PyObject *modname) {
PyObject *result;
PyObject *metaclass;
if (PyDict_SetItemString(dict, "__module__", modname) < 0)
return NULL;
/* Python2 __metaclass__ */
metaclass = PyDict_GetItemString(dict, "__metaclass__");
if (metaclass) {
Py_INCREF(metaclass);
} else {
metaclass = __Pyx_FindPy2Metaclass(bases);
}
result = PyObject_CallFunctionObjArgs(metaclass, name, bases, dict, NULL);
Py_DECREF(metaclass);
return result;
}
""",
requires = [find_py2_metaclass_utility_code])
#------------------------------------------------------------------------------------
create_py3class_utility_code = UtilityCode(
proto = """
static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw); /*proto*/
static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *mkw, PyObject *modname, PyObject *doc); /*proto*/
static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw); /*proto*/
""",
impl = """
PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw) {
PyObject *metaclass = PyDict_GetItemString(mkw, "metaclass");
if (metaclass) {
Py_INCREF(metaclass);
if (PyDict_DelItemString(mkw, "metaclass") < 0) {
Py_DECREF(metaclass);
return NULL;
}
return metaclass;
}
return __Pyx_FindPy2Metaclass(bases);
}
PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *mkw,
PyObject *modname, PyObject *doc) {
PyObject *prep;
PyObject *pargs;
PyObject *ns;
PyObject *str;
prep = PyObject_GetAttrString(metaclass, (char *)"__prepare__");
if (!prep) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return NULL;
PyErr_Clear();
return PyDict_New();
}
pargs = PyTuple_New(2);
if (!pargs) {
Py_DECREF(prep);
return NULL;
}
Py_INCREF(name);
Py_INCREF(bases);
PyTuple_SET_ITEM(pargs, 0, name);
PyTuple_SET_ITEM(pargs, 1, bases);
ns = PyObject_Call(prep, pargs, mkw);
Py_DECREF(prep);
Py_DECREF(pargs);
if (ns == NULL)
return NULL;
/* Required here to emulate assignment order */
/* XXX: use consts here */
#if PY_MAJOR_VERSION >= 3
str = PyUnicode_FromString("__module__");
#else
str = PyString_FromString("__module__");
#endif
if (!str) {
Py_DECREF(ns);
return NULL;
}
if (PyObject_SetItem(ns, str, modname) < 0) {
Py_DECREF(ns);
Py_DECREF(str);
return NULL;
}
Py_DECREF(str);
if (doc) {
#if PY_MAJOR_VERSION >= 3
str = PyUnicode_FromString("__doc__");
#else
str = PyString_FromString("__doc__");
#endif
if (!str) {
Py_DECREF(ns);
return NULL;
}
if (PyObject_SetItem(ns, str, doc) < 0) {
Py_DECREF(ns);
Py_DECREF(str);
return NULL;
}
Py_DECREF(str);
}
return ns;
}
PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, PyObject *mkw) {
PyObject *result;
PyObject *margs = PyTuple_New(3);
if (!margs)
return NULL;
Py_INCREF(name);
Py_INCREF(bases);
Py_INCREF(dict);
PyTuple_SET_ITEM(margs, 0, name);
PyTuple_SET_ITEM(margs, 1, bases);
PyTuple_SET_ITEM(margs, 2, dict);
result = PyObject_Call(metaclass, margs, mkw);
Py_DECREF(margs);
return result;
}
""",
requires = [find_py2_metaclass_utility_code])
#------------------------------------------------------------------------------------
cpp_exception_utility_code = UtilityCode(
proto = """
#ifndef __Pyx_CppExn2PyErr
static void __Pyx_CppExn2PyErr() {
// Catch a handful of different errors here and turn them into the
// equivalent Python errors.
try {
if (PyErr_Occurred())
; // let the latest Python exn pass through and ignore the current one
else
throw;
} catch (const std::bad_alloc& exn) {
PyErr_SetString(PyExc_MemoryError, exn.what());
} catch (const std::bad_cast& exn) {
PyErr_SetString(PyExc_TypeError, exn.what());
} catch (const std::domain_error& exn) {
PyErr_SetString(PyExc_ValueError, exn.what());
} catch (const std::invalid_argument& exn) {
PyErr_SetString(PyExc_ValueError, exn.what());
} catch (const std::ios_base::failure& exn) {
// Unfortunately, in standard C++ we have no way of distinguishing EOF
// from other errors here; be careful with the exception mask
PyErr_SetString(PyExc_IOError, exn.what());
} catch (const std::out_of_range& exn) {
// Change out_of_range to IndexError
PyErr_SetString(PyExc_IndexError, exn.what());
} catch (const std::overflow_error& exn) {
PyErr_SetString(PyExc_OverflowError, exn.what());
} catch (const std::range_error& exn) {
PyErr_SetString(PyExc_ArithmeticError, exn.what());
} catch (const std::underflow_error& exn) {
PyErr_SetString(PyExc_ArithmeticError, exn.what());
} catch (const std::exception& exn) {
PyErr_SetString(PyExc_RuntimeError, exn.what());
}
catch (...)
{
PyErr_SetString(PyExc_RuntimeError, "Unknown exception");
}
}
#endif
""",
impl = ""
)
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_noneattr_error_utility_code = UtilityCode.load_cached("RaiseNoneAttrError", "ObjectHandling.c")
raise_noneindex_error_utility_code = UtilityCode.load_cached("RaiseNoneIndexingError", "ObjectHandling.c")
raise_none_iter_error_utility_code = UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c")
raise_noneindex_memview_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseNoneMemviewIndexingError(void);
""",
impl = '''
static CYTHON_INLINE void __Pyx_RaiseNoneMemviewIndexingError(void) {
PyErr_SetString(PyExc_TypeError, "Cannot index None memoryview slice");
}
''')
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
getitem_dict_utility_code = UtilityCode(
proto = """
#if PY_MAJOR_VERSION >= 3
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
if (unlikely(d == Py_None)) {
__Pyx_RaiseNoneIndexingError();
return NULL;
}
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred())
PyErr_SetObject(PyExc_KeyError, key);
return NULL;
}
Py_INCREF(value);
return value;
}
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#endif
""",
requires = [raise_noneindex_error_utility_code])
#------------------------------------------------------------------------------------
getitem_int_pyunicode_utility_code = UtilityCode(
proto = '''
#define __Pyx_GetItemInt_Unicode(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
__Pyx_GetItemInt_Unicode_Fast(o, i) : \\
__Pyx_GetItemInt_Unicode_Generic(o, to_py_func(i)))
static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i) {
const Py_ssize_t length = __Pyx_PyUnicode_GET_LENGTH(ustring);
if (likely((0 <= i) & (i < length))) {
return __Pyx_PyUnicode_READ_CHAR(ustring, i);
} else if ((-length <= i) & (i < 0)) {
return __Pyx_PyUnicode_READ_CHAR(ustring, i + length);
} else {
PyErr_SetString(PyExc_IndexError, "string index out of range");
return (Py_UCS4)-1;
}
}
static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Generic(PyObject* ustring, PyObject* j) {
Py_UCS4 uchar;
PyObject *uchar_string;
if (!j) return (Py_UCS4)-1;
uchar_string = PyObject_GetItem(ustring, j);
Py_DECREF(j);
if (!uchar_string) return (Py_UCS4)-1;
uchar = __Pyx_PyUnicode_READ_CHAR(uchar_string, 0);
Py_DECREF(uchar_string);
return uchar;
}
''')
getitem_int_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
""" + ''.join([
"""
#define __Pyx_GetItemInt_%(type)s(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
__Pyx_GetItemInt_%(type)s_Fast(o, i) : \\
__Pyx_GetItemInt_Generic(o, to_py_func(i)))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_%(type)s_Fast(PyObject *o, Py_ssize_t i) {
if (likely(o != Py_None)) {
if (likely((0 <= i) & (i < Py%(type)s_GET_SIZE(o)))) {
PyObject *r = Py%(type)s_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
else if ((-Py%(type)s_GET_SIZE(o) <= i) & (i < 0)) {
PyObject *r = Py%(type)s_GET_ITEM(o, Py%(type)s_GET_SIZE(o) + i);
Py_INCREF(r);
return r;
}
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
""" % {'type' : type_name} for type_name in ('List', 'Tuple')
]) + """
#define __Pyx_GetItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
__Pyx_GetItemInt_Fast(o, i) : \\
__Pyx_GetItemInt_Generic(o, to_py_func(i)))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i) {
if (PyList_CheckExact(o)) {
Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = (likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if (likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (likely(i >= 0)) {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
return m->sq_item(o, i);
}
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
""",
impl = """
""")
#------------------------------------------------------------------------------------
setitem_int_utility_code = UtilityCode(
proto = """
#define __Pyx_SetItemInt(o, i, v, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
__Pyx_SetItemInt_Fast(o, i, v) : \\
__Pyx_SetItemInt_Generic(o, to_py_func(i), v))
static CYTHON_INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
int r;
if (!j) return -1;
r = PyObject_SetItem(o, j, v);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v) {
if (PyList_CheckExact(o)) {
Py_ssize_t n = (likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if (likely((n >= 0) & (n < PyList_GET_SIZE(o)))) {
PyObject* old = PyList_GET_ITEM(o, n);
Py_INCREF(v);
PyList_SET_ITEM(o, n, v);
Py_DECREF(old);
return 1;
}
}
else if (likely(i >= 0)) {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_ass_item)) {
return m->sq_ass_item(o, i, v);
}
}
return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
}
""",
impl = """
""")
#------------------------------------------------------------------------------------
delitem_int_utility_code = UtilityCode(
proto = """
#define __Pyx_DelItemInt(o, i, size, to_py_func) (((size) <= sizeof(Py_ssize_t)) ? \\
__Pyx_DelItemInt_Fast(o, i) : \\
__Pyx_DelItem_Generic(o, to_py_func(i)))
static CYTHON_INLINE int __Pyx_DelItem_Generic(PyObject *o, PyObject *j) {
int r;
if (!j) return -1;
r = PyObject_DelItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i) {
if (likely(i >= 0)) {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_ass_item)) {
return m->sq_ass_item(o, i, (PyObject *)NULL);
}
}
return __Pyx_DelItem_Generic(o, PyInt_FromSsize_t(i));
}
""",
impl = """
""")
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
#------------------------------------------------------------------------------------
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
#------------------------------------------------------------------------------------
# CPython supports calling functions with non-dict kwargs by
# converting them to a dict first
kwargs_call_utility_code = UtilityCode(
proto = """
static PyObject* __Pyx_PyEval_CallObjectWithKeywords(PyObject*, PyObject*, PyObject*); /*proto*/
""",
impl = """
static PyObject* __Pyx_PyEval_CallObjectWithKeywords(PyObject *callable, PyObject *args, PyObject *kwargs) {
PyObject* result;
if (likely(PyDict_Check(kwargs))) {
return PyEval_CallObjectWithKeywords(callable, args, kwargs);
} else {
PyObject* real_dict;
real_dict = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, kwargs, NULL);
if (unlikely(!real_dict))
return NULL;
result = PyEval_CallObjectWithKeywords(callable, args, real_dict);
Py_DECREF(real_dict);
return result; /* may be NULL */
}
}
""",
)
#------------------------------------------------------------------------------------
int_pow_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) {
%(type)s t = b;
switch (e) {
case 3:
t *= b;
case 2:
t *= b;
case 1:
return t;
case 0:
return 1;
}
if (unlikely(e<0)) return 0;
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1); /* 1 or b */
b *= b;
e >>= 1;
}
return t;
}
""")
# ------------------------------ Division ------------------------------------
div_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s q = a / b;
%(type)s r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
""")
mod_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = a %% b;
r += ((r != 0) & ((r ^ b) < 0)) * b;
return r;
}
""")
mod_float_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = fmod%(math_h_modifier)s(a, b);
r += ((r != 0) & ((r < 0) ^ (b < 0))) * b;
return r;
}
""")
cdivision_warning_utility_code = UtilityCode(
proto="""
static int __Pyx_cdivision_warning(const char *, int); /* proto */
""",
impl="""
static int __Pyx_cdivision_warning(const char *filename, int lineno) {
return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
filename,
lineno,
__Pyx_MODULE_NAME,
NULL);
}
""")
# from intobject.c
division_overflow_test_code = UtilityCode(
proto="""
#define UNARY_NEG_WOULD_OVERFLOW(x) \
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
""")
binding_cfunc_utility_code = TempitaUtilityCode.load(
"CythonFunction", context=vars(Naming))
fused_function_utility_code = TempitaUtilityCode.load(
"FusedFunction",
"CythonFunction.c",
context=vars(Naming),
requires=[binding_cfunc_utility_code])
cyfunction_class_cell_utility_code = UtilityCode.load(
"CyFunctionClassCell",
"CythonFunction.c",
requires=[binding_cfunc_utility_code])
generator_utility_code = UtilityCode.load(
"Generator",
"Generator.c",
requires=[Nodes.raise_utility_code, Nodes.swap_exception_utility_code],
)
| rguillebert/CythonCTypesBackend | Cython/Compiler/ExprNodes.py | Python | apache-2.0 | 386,025 | [
"VisIt"
] | d14c7dc679a0cc9cb9e4adcea29ff09189bca15d9dceefae46b80e83c833a12a |
#! /usr/bin/env python
#
# Copyright (C) 2011, 2012, 2014, 2015, 2016 David Maxwell
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import PISM
from PISM import util
import numpy as np
import math
import siple
siple.reporting.clear_loggers()
siple.reporting.add_logger(PISM.invert.sipletools.pism_logger)
siple.reporting.set_pause_callback(PISM.invert.sipletools.pism_pause)
import matplotlib.pyplot as pp
class PlotListener(PISM.invert.listener.PlotListener):
def __call__(self, inv_solver, it, data):
grid = self.grid
zeta = self.toproczero(data.zeta)
u = self.toproczero(data.u)
if inv_solver.method.startswith('tikhonov'):
eta = data.tikhonov_penalty
sWeight = 1
dWeight = 1 / eta
grad_zeta = self.toproczero(data.grad_JDesign)
grad_u = self.toproczero(data.grad_JState)
grad = self.toproczero(data.grad_JTikhonov)
else:
r = self.toproczero(data.residual)
if zeta is not None:
Mx = grid.Mx()
y = grid.y()
pp.figure(self.figure())
pp.clf()
pp.subplot(1, 3, 1)
pp.plot(y, zeta[:, Mx / 2])
mag = np.max(np.abs(u))
pp.subplot(1, 3, 2)
pp.plot(y, u[0, :, Mx / 2] / mag)
pp.subplot(1, 3, 3)
if inv_solver.method.startswith('tikhonov'):
pp.plot(y, -grad_zeta[:, Mx / 2] * dWeight, y, grad_u[:, Mx / 2] * sWeight, y, grad[:, Mx / 2])
else:
pp.plot(y, r[0, :, Mx / 2])
pp.ion()
pp.show()
class LinPlotListener(PISM.invert.listener.PlotListener):
def __call__(self, inv_solver, it, data):
grid = self.grid
x = self.toproczero(data.x)
if x is not None:
Mx = grid.Mx()
y = grid.y()
pp.figure(self.figure())
pp.clf()
mag = np.max(np.abs(x))
if mag == 0:
mag = 1
pp.plot(y, x[:, Mx / 2] / mag)
pp.ion()
pp.show()
Mx = 11
My = 61
kFEMStencilWidth = 1
m_schoof = 2 # (pure number)
L_schoof = 40e3 # meters
aspect_schoof = 0.05 # (pure)
H0_schoof = aspect_schoof * L_schoof
# = 2000 m THICKNESS
B_schoof = 3.7e8 # Pa s^{1/3}; hardness
# given on p. 239 of Schoof; why so big?
p_schoof = 4.0 / 3.0 # = 1 + 1/n
slope = 0.001
right_side_weight = 1.
tauc_guess_scale = 0.3
tauc_guess_const = None
def testi_tauc(grid, tauc):
standard_gravity = grid.ctx().config().get_double("constants.standard_gravity")
ice_density = grid.ctx().config().get_double("constants.ice.density")
f = ice_density * standard_gravity * H0_schoof * slope
with PISM.vec.Access(comm=tauc):
for (i, j) in grid.points():
y = grid.y(j)
tauc[i, j] = f * (abs(y / L_schoof) ** m_schoof)
class testi_run(PISM.invert.ssa.SSATaucForwardRun):
def __init__(self, Mx, My):
self.grid = None
self.Mx = Mx
self.My = My
def _initGrid(self):
Mx = self.Mx
My = self.My
Ly = 3 * L_schoof # 300.0 km half-width (L=40.0km in Schoof's choice of variables)
Lx = max(60.0e3, ((Mx - 1) / 2.) * (2.0 * Ly / (My - 1)))
ctx = PISM.Context().ctx
self.grid = PISM.IceGrid.Shallow(ctx, Lx, Ly, x0, y0, Mx, My,
PISM.CELL_CENTER,
PISM.X_PERIODIC)
def _initPhysics(self):
config = self.config
config.set_boolean("basal_resistance.pseudo_plastic.enabled", False)
# irrelevant
enthalpyconverter = PISM.EnthalpyConverter(config)
config.set_string("stress_balance.ssa.flow_law", "isothermal_glen")
config.set_double("flow_law.isothermal_Glen.ice_softness", pow(3.7e8, -config.get_double("stress_balance.ssa.Glen_exponent")))
self.modeldata.setPhysics(enthalpyconverter)
def _initSSACoefficients(self):
vecs = self.modeldata.vecs
grid = self.grid
vecs.add(model.createIceThicknessVec(grid), 'thickness')
vecs.add(model.createBedrockElevationVec(grid), 'bed')
vecs.add(model.createYieldStressVec(grid), 'tauc')
vecs.add(model.createEnthalpyVec(grid), 'enthalpy')
vecs.add(model.createIceMaskVec(grid), 'ice_mask')
vecs.add(model.createDrivingStressXVec(grid))
vecs.add(model.createDrivingStressYVec(grid))
vecs.add(model.createVelocityMisfitWeightVec(grid))
self._allocateBCs()
vecs.thickness.set(H0_schoof)
vecs.ice_mask.set(PISM.MASK_GROUNDED)
vecs.bed.set(0.)
testi_tauc(self.modeldata.grid, vecs.tauc)
grid = self.grid
standard_gravity = grid.ctx().config().get_double("constants.standard_gravity")
ice_density = grid.ctx().config().get_double("constants.ice.density")
f = ice_density * standard_gravity * H0_schoof * slope
vecs.ssa_driving_stress_y.set(0)
vecs.ssa_driving_stress_x.set(f)
with PISM.vec.Access(comm=[vecs.bc_mask, vecs.vel_bc]):
for (i, j) in grid.points():
if (j == 0) or (j == grid.My() - 1):
vecs.bc_mask[i, j] = 1
vecs.vel_bc[i, j].u = 0
vecs.vel_bc[i, j].v = 0
misfit_weight = vecs.vel_misfit_weight
with PISM.vec.Access(comm=misfit_weight):
for (i, j) in grid.points():
if grid.y(j) <= 0:
misfit_weight[i, j] = 1.
else:
misfit_weight[i, j] = right_side_weight
# Main code starts here
if __name__ == "__main__":
context = PISM.Context()
config = context.config
PISM.set_abort_on_sigint(True)
Mx = PISM.optionsInt("-Mx", "Number of grid points in x-direction", default=Mx)
My = PISM.optionsInt("-My", "Number of grid points in y-direction", default=My)
output_file = PISM.optionsString("-o", "output file", default="invert_testi.nc")
right_side_weight = PISM.optionsReal("-right_side_weight", "L2 weight for y>0", default=right_side_weight)
tauc_guess_scale = PISM.optionsReal("-tauc_guess_scale", "initial guess for tauc to be this factor of the true value", default=tauc_guess_scale)
tauc_guess_const = PISM.optionsReal("-tauc_guess_const", "initial guess for tauc to be this constant", default=tauc_guess_const)
do_plotting = PISM.optionsFlag("-inv_plot", "perform visualization during the computation", default=False)
do_final_plot = PISM.optionsFlag("-inv_final_plot", "perform visualization at the end of the computation", default=True)
do_pause = PISM.optionsFlag("-inv_pause", "pause each iteration", default=False)
test_adjoint = PISM.optionsFlag("-inv_test_adjoint", "Test that the adjoint is working", default=False)
inv_method = config.get_string("inverse.ssa.method")
length_scale = L_schoof
slope = 0.001
standard_gravity = config.get_double("constants.standard_gravity")
ice_density = config.get_double("constants.ice.density")
f0 = ice_density * standard_gravity * H0_schoof * slope
stress_scale = f0
Ly = 3 * L_schoof # 300.0 km half-width (L=40.0km in Schoof's choice of variables)
Lx = max(60.0e3, ((Mx - 1) / 2.) * (2.0 * Ly / (My - 1)))
area_scale = Lx * Ly
depth_scale = H0_schoof
B = B_schoof
velocity_scale = (f0 / B) ** (3.) * (length_scale / depth_scale) ** (3.) * length_scale
time_scale = length_scale / velocity_scale
strainrate_scale = 1. / time_scale
viscosity_scale = B * (strainrate_scale ** (-2. / 3.))
nuH_scale = viscosity_scale * depth_scale
testi = testi_run(Mx, My)
testi.setup()
solver = PISM.invert.ssa.createInvSSASolver(testi)
tauc_param = solver.ssarun.designVariableParameterization()
grid = testi.grid
# Build the true yeild stress for test I
tauc_true = PISM.model.createYieldStressVec(grid, name="tauc_true")
testi_tauc(grid, tauc_true)
# Convert tauc_true to zeta_true
zeta_true = PISM.IceModelVec2S()
zeta_true.create(grid, "zeta_true", PISM.WITH_GHOSTS, kFEMStencilWidth)
tauc_param = PISM.invert.ssa.createDesignVariableParam(config, 'tauc')
tauc_param.convertFromDesignVariable(tauc_true, zeta_true)
# Build the initial guess for tauc for the inversion.
tauc = PISM.model.createYieldStressVec(grid)
if not tauc_guess_const is None:
tauc.set(tauc_guess_const)
else:
testi_tauc(grid, tauc)
tauc.scale(tauc_guess_scale)
# Convert tauc guess to zeta guess
zeta0 = PISM.IceModelVec2S()
zeta0.create(grid, "zeta", PISM.WITH_GHOSTS, kFEMStencilWidth)
tauc_param.convertFromDesignVariable(tauc, zeta0)
if test_adjoint:
if solver.method.startswith('tikhonov'):
siple.reporting.msg("option -inv_test_adjoint cannot be used with inverse method %s", solver.method)
exit(1)
from PISM.invert.sipletools import PISMLocalVector as PLV
stencil_width = 1
forward_problem = solver.forward_problem
sys = grid.ctx().unit_system()
d = PLV(PISM.vec.randVectorS(grid, 1e5, stencil_width))
r = PLV(PISM.vec.randVectorV(grid,
PISM.convert(sys, 1.0, "m/year", "m/second"),
stencil_width))
(domainIP, rangeIP) = forward_problem.testTStar(PLV(zeta0), d, r, 3)
siple.reporting.msg("domainip %g rangeip %g", domainIP, rangeIP)
exit(0)
# Setup the output file.
pio = PISM.util.prepare_output(output_file)
zeta0.write(output_file)
# Send the true yeild stress through the forward problem to
# get at true velocity field.
u_obs = PISM.model.create2dVelocityVec(grid, name='_ssa_true', desc='SSA velocity boundary condition', intent='intent')
solver.solveForward(zeta_true, out=u_obs)
# Attach various iteration listeners to the solver as needed for:
# progress reporting,
if inv_method.startswith('tikhonov'):
solver.addIterationListener(PISM.invert.ssa.PrintTikhonovProgress)
# Plotting
if do_plotting:
solver.addIterationListener(PlotListener(grid))
if inv_method == 'ign':
solver.addLinearIterationListener(LinPlotListener(grid))
# Pausing
if do_pause:
solver.addIterationListener(PISM.invert.listener.pauseListener)
# Iteration saving
solver.addDesignUpdateListener(PISM.invert.ssa.ZetaSaver(output_file))
# Try solving
reason = solver.solveInverse(zeta0, u_obs, zeta0)
if reason.failed():
PISM.verbPrintf(1, grid.com, "Inverse solve FAILURE (%s)!\n" % reason.description())
quit()
PISM.verbPrintf(1, grid.com, "Inverse solve success (%s)!\n" % reason.description())
(zeta_i, u_i) = solver.inverseSolution()
tauc_param.convertToDesignVariable(zeta_i, tauc)
# Write solution out to netcdf file
testi.write(output_file)
tauc.write(output_file)
tauc_true.write(output_file)
u_i.set_name("_computed", 0)
u_i.write(output_file)
u_obs.write(output_file)
# Draw a pretty picture
tz = PISM.vec.ToProcZero(grid)
tauc_a = tz.communicate(tauc)
tauc_true = tz.communicate(tauc_true)
tz2 = PISM.vec.ToProcZero(grid, dof=2, dim=2)
u_i_a = tz2.communicate(u_i)
u_obs_a = tz2.communicate(u_obs)
sys = grid.ctx().unit_system()
secpera = PISM.convert(sys, 1.0, "year", "seconds")
if do_final_plot and (not tauc_a is None):
y = grid.y()
from matplotlib import pyplot
pyplot.clf()
pyplot.subplot(1, 2, 1)
pyplot.plot(y, tauc_a[:, Mx / 2])
pyplot.plot(y, tauc_true[:, Mx / 2])
pyplot.subplot(1, 2, 2)
pyplot.plot(y, u_i_a[0, :, Mx / 2] * secpera)
pyplot.plot(y, u_obs_a[0, :, Mx / 2] * secpera)
pyplot.ion()
pyplot.show()
siple.reporting.endpause()
| talbrecht/pism_pik | examples/inverse/invert_testi.py | Python | gpl-3.0 | 12,700 | [
"NetCDF"
] | 16388a8d7917af89c15af05bd82f9d8e05b8531415208f0209b594b4f3671415 |
import h2o_cmd, h2o_util
import h2o2 as h2o
import re, random, math
from h2o_test import check_sandbox_for_errors, dump_json, verboseprint
import h2o_nodes
from tabulate import tabulate
# recursive walk an object check that it has valid numbers only (no "" or nan or inf
def check_obj_has_good_numbers(obj, hierarchy="", curr_depth=0, max_depth=4, allowNaN=False):
"""Represent instance of a class as JSON.
Arguments:
obj -- any object
Return:
String that represent JSON-encoded object.
"""
def serialize(obj, hierarchy="", curr_depth=0):
"""Recursively walk object's hierarchy. Limit to max_depth"""
if curr_depth>max_depth:
return
if isinstance(obj, (bool, int, long, float, basestring)):
try:
number = float(obj)
print "Yay!", hierarchy, number
except:
if obj is None:
print "Not Yay! how come you're giving me None for a coefficient? %s %s" % (hierarchy, obj)
elif str(obj)=="":
print "Not Yay! how come you're giving me an empty string for a coefficient? %s %s" % (hierarchy, obj)
else:
raise Exception("%s %s %s is not a valid float" % (hierarchy, obj, type(obj)))
# hack for now
number = 0.0
if not allowNaN and math.isnan(number):
raise Exception("%s %s is a NaN" % (hierarchy, obj))
if not allowNaN and math.isinf(number):
raise Exception("%s %s is a Inf" % (hierarchy, obj))
return number
elif isinstance(obj, dict):
obj = obj.copy()
for key in obj:
obj[key] = serialize(obj[key], hierarchy + ".%" % key, curr_depth+1)
return obj
elif isinstance(obj, (list, tuple)):
return [serialize(item, hierarchy + "[%s]" % i, curr_depth+1) for (i, item) in enumerate(obj)]
elif hasattr(obj, '__dict__'):
return serialize(obj.__dict__, hierarchy, curr_depth+1)
else:
return repr(obj) # Don't know how to handle, convert to string
return (serialize(obj, hierarchy, curr_depth+1))
#************************************************************88
# where do we get the CM?
def simpleCheckGLM(self, model, parameters,
labelList, labelListUsed, allowFailWarning=False, allowZeroCoeff=False,
prettyPrint=False, noPrint=False,
maxExpectedIterations=None, doNormalized=False, allowNaN=False):
warnings = ''
rank = model.rank
binomial = model.binomial
residual_deviance = model.residual_deviance
threshold = model.threshold
check_obj_has_good_numbers(threshold, 'threshold', allowNaN=allowNaN)
auc = model.auc
# NaN if not logistic
# check_obj_has_good_numbers(auc, 'model.auc')
best_lambda_idx = model.best_lambda_idx
model_category = model.model_category
name = model.name
residual_degrees_of_freedom = model.residual_degrees_of_freedom
# is this no longer used?
coefficients_magnitude = model.coefficients_magnitude
null_deviance = model.null_deviance
check_obj_has_good_numbers(null_deviance, 'model.null_deviance', allowNaN=allowNaN)
null_degrees_of_freedom = model.null_degrees_of_freedom
check_obj_has_good_numbers(null_degrees_of_freedom, 'model.null_degrees_of_freedom', allowNaN=allowNaN)
domains = model.domains
# when is is this okay to be NaN?
aic = model.aic
check_obj_has_good_numbers(aic, 'model.aic', allowNaN=allowNaN)
names = model.names
coeffs_names = model.coefficients_table.data[0]
# these are returned as quoted strings. Turn them into numbers
temp = model.coefficients_table.data[1]
assert len(coeffs_names)==len(temp), "%s %s" % (len(coeffs_names), len(temp))
# we need coefficients to be floats or empty
check_obj_has_good_numbers(temp, 'model.coeffs', allowNaN=False)
# print "temp", temp[0:10]
# print "temp[5489:5500]", temp[5489:5500]
# UPDATE: None (null json) is legal for coeffs
coeffs = map(lambda x : float(x) if (x is not None and str(x) != "") else 0, temp)
intercept = coeffs[-1]
interceptName = coeffs_names[-1]
assert interceptName == 'Intercept'
assert len(coeffs) == len(coeffs_names), "%s %s" % (len(coeffs), len(coeffs_names))
# FIX! if a coeff is zeroed/ignored, it doesn't show up?
# get rid of intercept in glm response
# assert (len(coeffs)-1) == len(labelListUsed, \
# "%s %s %s %s" % (len(coeffs), len(labelListUsed), coeffs, labelListUsed)
# labelList still has the response column?
# ignored columns aren't in model.names, but output response is.
# labelListUsed has the response col removed so add 1
# Hmm..dropped coefficients again? can't do this check?
# assert len(model.names) == len(labelListUsed), \
# "%s %s %s %s" % (len(model.names), len(labelListUsed), model.names, labelList)
# this is no longer true!
# assert model.threshold!=0
print "len(coeffs)", len(coeffs)
print "coeffs:", coeffs
# last one is intercept
if interceptName != "Intercept" or abs(intercept)<1e-26:
raise Exception("'Intercept' should be last in coeffs_names %s %s" % (interceptName, intercept))
y = parameters['response_column']
cString = "\n"
for i,c in enumerate(coeffs_names):
cString += "%s: %.5e " % (coeffs_names[i], coeffs[i])
print cString
print "\nH2O intercept:\t\t%.5e" % intercept
print "\nTotal # of coeffs:", len(coeffs_names)
# intercept is buried in there too
absIntercept = abs(float(intercept))
self.assertGreater(absIntercept, 1e-26, (
"abs. value of GLM coeffs['Intercept'] is " +
str(absIntercept) + ", not >= 1e-26 for Intercept" + "\n" +
"parameters:" + dump_json(parameters)
))
if (not allowZeroCoeff) and (len(coeffs)>1):
s = 0.0
for c in coeffs:
s += abs(float(c))
self.assertGreater(s, 1e-26, (
"sum of abs. value of GLM coeffs/intercept is " + str(s) + ", not >= 1e-26\n" +
"parameters:" + dump_json(parameters)
))
# shouldn't have any errors
check_sandbox_for_errors()
return (warnings, coeffs, intercept)
#************************************************************88
def pickRandGlmParams(paramDict, params):
colX = 0
randomGroupSize = random.randint(1,len(paramDict))
for i in range(randomGroupSize):
randomKey = random.choice(paramDict.keys())
randomV = paramDict[randomKey]
randomValue = random.choice(randomV)
params[randomKey] = randomValue
if (randomKey=='x'):
colX = randomValue
# Only identity, log and inverse links are allowed for family=gaussian.
# force legal family/ink combos
if 'family' not in params: # defaults to gaussian
if 'link' in params and params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'):
params['link'] = None
elif params['family'] is not None and 'link' in params and params['link'] is not None:
# only log/identity is legal?
if params['family'] == 'poisson':
if params['link'] not in ('identity', 'log', 'familyDefault'):
params['link'] = None
# only tweedie/tweedie is legal?
elif params['family'] == 'tweedie':
if params['link'] not in ('tweedie'):
params['link'] = None
elif params['family'] == 'binomial':
# only logit and log
if params['link'] not in ('logit', 'log', 'familyDefault'):
params['link'] = None
elif params['family'] == 'gaussian':
if params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'):
params['link'] = None
elif params['family'] is None: # defaults to gaussian
if 'link' in params and params['link'] not in ('identity', 'log', 'inverse', 'familyDefault'):
params['link'] = None
if 'lambda_search' in params and params['lambda_search']==1:
if 'nlambdas' in params and params['nlambdas']<=1:
params['nlambdas'] = 2
return colX
def simpleCheckGLMScore(self, glmScore, family='gaussian', allowFailWarning=False, **kwargs):
warnings = None
if 'warnings' in glmScore:
warnings = glmScore['warnings']
# stop on failed
x = re.compile("failed", re.IGNORECASE)
# don't stop if fail to converge
c = re.compile("converge", re.IGNORECASE)
for w in warnings:
print "\nwarning:", w
if re.search(x,w) and not allowFailWarning:
if re.search(c,w):
# ignore the fail to converge warning now
pass
else:
# stop on other 'fail' warnings (are there any? fail to solve?
raise Exception(w)
validation = glmScore['validation']
validation['err'] = h2o_util.cleanseInfNan(validation['err'])
validation['nullDev'] = h2o_util.cleanseInfNan(validation['nullDev'])
validation['resDev'] = h2o_util.cleanseInfNan(validation['resDev'])
print "%15s %s" % ("err:\t", validation['err'])
print "%15s %s" % ("nullDev:\t", validation['nullDev'])
print "%15s %s" % ("resDev:\t", validation['resDev'])
# threshold only there if binomial?
# auc only for binomial
if family=="binomial":
print "%15s %s" % ("auc:\t", validation['auc'])
print "%15s %s" % ("threshold:\t", validation['threshold'])
err = False
if family=="poisson" or family=="gaussian":
if 'aic' not in validation:
print "aic is missing from the glm json response"
err = True
if not allowNaN and math.isnan(validation['err']):
print "Why is this err = 'nan'?? %6s %s" % ("err:\t", validation['err'])
err = True
if not allowNaN and math.isnan(validation['resDev']):
print "Why is this resDev = 'nan'?? %6s %s" % ("resDev:\t", validation['resDev'])
err = True
if err:
raise Exception ("How am I supposed to tell that any of these errors should be ignored?")
# legal?
if not allowNaN and math.isnan(validation['nullDev']):
## emsg = "Why is this nullDev = 'nan'?? %6s %s" % ("nullDev:\t", validation['nullDev'])
## raise Exception(emsg)
pass
def oldSimpleCheckGLM(self, glm, colX, allowFailWarning=False, allowZeroCoeff=False,
prettyPrint=False, noPrint=False, maxExpectedIterations=None, doNormalized=False, **kwargs):
# if we hit the max_iter, that means it probably didn't converge. should be 1-maxExpectedIter
# h2o GLM will verboseprint the result and print errors.
# so don't have to do that
# different when cross validation is used? No trainingErrorDetails?
GLMModel = glm['glm_model']
if not GLMModel:
raise Exception("GLMModel didn't exist in the glm response? %s" % dump_json(glm))
warnings = None
if 'warnings' in GLMModel and GLMModel['warnings']:
warnings = GLMModel['warnings']
# stop on failed
x = re.compile("failed", re.IGNORECASE)
# don't stop if fail to converge
c = re.compile("converge", re.IGNORECASE)
for w in warnings:
print "\nwarning:", w
if re.search(x,w) and not allowFailWarning:
if re.search(c,w):
# ignore the fail to converge warning now
pass
else:
# stop on other 'fail' warnings (are there any? fail to solve?
raise Exception(w)
# for key, value in glm.iteritems(): print key
# not in GLMGrid?
# FIX! don't get GLMParams if it can't solve?
GLMParams = GLMModel['glm']
family = GLMParams["family"]
# number of submodels = number of lambda
# min of 2. lambda_max is first
submodels = GLMModel['submodels']
# since all our tests?? only use one lambda, the best_lamda_idx should = 1
best_lambda_idx = GLMModel['best_lambda_idx']
print "best_lambda_idx:", best_lambda_idx
lambda_max = GLMModel['lambda_max']
print "lambda_max:", lambda_max
# currently lambda_max is not set by tomas. ..i.e.not valid
if 1==0 and (lambda_max <= submodels[best_lambda_idx].lambda_value):
raise Exception("lambda_max %s should always be > the lambda result %s we're checking" % (lambda_max, submodels[best_lambda_idx].lambda_value))
# submodels0 = submodels[0]
# submodels1 = submodels[-1] # hackery to make it work when there's just one
if (best_lambda_idx >= len(submodels)) or (best_lambda_idx < 0):
raise Exception("best_lambda_idx: %s should point to one of lambdas (which has len %s)" % (best_lambda_idx, len(submodels)))
if (best_lambda_idx >= len(submodels)) or (best_lambda_idx < 0):
raise Exception("best_lambda_idx: %s should point to one of submodels (which has len %s)" % (best_lambda_idx, len(submodels)))
submodels1 = submodels[best_lambda_idx] # hackery to make it work when there's just one
iterations = submodels1['iteration']
print "GLMModel/iterations:", iterations
# if we hit the max_iter, that means it probably didn't converge. should be 1-maxExpectedIter
if maxExpectedIterations is not None and iterations > maxExpectedIterations:
raise Exception("Convergence issue? GLM did iterations: %d which is greater than expected: %d" % (iterations, maxExpectedIterations) )
if 'validation' not in submodels1:
raise Exception("Should be a 'validation' key in submodels1: %s" % dump_json(submodels1))
validationsList = submodels1['validation']
validations = validationsList
# xval. compare what we asked for and what we got.
n_folds = kwargs.setdefault('n_folds', None)
print "GLMModel/validations"
validations['null_deviance'] = h2o_util.cleanseInfNan(validations['null_deviance'])
validations['residual_deviance'] = h2o_util.cleanseInfNan(validations['residual_deviance'])
print "%15s %s" % ("null_deviance:\t", validations['null_deviance'])
print "%15s %s" % ("residual_deviance:\t", validations['residual_deviance'])
# threshold only there if binomial?
# auc only for binomial
if family=="binomial":
print "%15s %s" % ("auc:\t", validations['auc'])
best_threshold = validations['best_threshold']
thresholds = validations['thresholds']
print "%15s %s" % ("best_threshold:\t", best_threshold)
# have to look up the index for the cm, from the thresholds list
best_index = None
for i,t in enumerate(thresholds):
if t >= best_threshold: # ends up using next one if not present
best_index = i
break
assert best_index!=None, "%s %s" % (best_threshold, thresholds)
print "Now printing the right 'best_threshold' %s from '_cms" % best_threshold
# cm = glm['glm_model']['submodels'][0]['validation']['_cms'][-1]
submodels = glm['glm_model']['submodels']
# FIX! this isn't right if we have multiple lambdas? different submodels?
cms = submodels[0]['validation']['_cms']
self.assertEqual(len(thresholds), len(cms),
msg="thresholds %s and cm %s should be lists of the same size. %s" % (len(thresholds), len(cms), thresholds))
# FIX! best_threshold isn't necessarily in the list. jump out if >=
assert best_index<len(cms), "%s %s" % (best_index, len(cms))
# if we want 0.5..rounds to int
# mid = len(cms)/2
# cm = cms[mid]
cm = cms[best_index]
print "cm:", dump_json(cm['_arr'])
predErr = cm['_predErr']
classErr = cm['_classErr']
# compare to predErr
# pctWrong = h2o_gbm.pp_cm_summary(cm['_arr']);
# FIX!
pctWrong = 0
print "predErr:", predErr
print "calculated pctWrong from cm:", pctWrong
print "classErr:", classErr
# self.assertLess(pctWrong, 9,"Should see less than 9% error (class = 4)")
print "\nTrain\n==========\n"
# print h2o_gbm.pp_cm(cm['_arr'])
if family=="poisson" or family=="gaussian":
print "%15s %s" % ("aic:\t", validations['aic'])
coefficients_names = GLMModel['coefficients_names']
# print "coefficients_names:", coefficients_names
idxs = submodels1['idxs']
print "idxs:", idxs
coefficients_names = coefficients_names
# always check both normalized and normal coefficients
norm_beta = submodels1['norm_beta']
# if norm_beta and len(coefficients_names)!=len(norm_beta):
# print len(coefficients_names), len(norm_beta)
# raise Exception("coefficients_names and normalized_norm_beta from h2o json not same length. coefficients_names: %s normalized_norm_beta: %s" % (coefficients_names, norm_beta))
#
beta = submodels1['beta']
# print "beta:", beta
# if len(coefficients_names)!=len(beta):
# print len(coefficients_names), len(beta)
# raise Exception("coefficients_names and beta from h2o json not same length. coefficients_names: %s beta: %s" % (coefficients_names, beta))
# test wants to use normalized?
if doNormalized:
beta_used = norm_beta
else:
beta_used = beta
coefficients = {}
# create a dictionary with name, beta (including intercept) just like v1
for i,b in zip(idxs, beta_used[:-1]):
name = coefficients_names[i]
coefficients[name] = b
print "len(idxs)", len(idxs), "len(beta_used)", len(beta_used)
print "coefficients:", coefficients
print "beta:", beta
print "norm_beta:", norm_beta
coefficients['Intercept'] = beta_used[-1]
print "len(coefficients_names)", len(coefficients_names)
print "len(idxs)", len(idxs)
print "idxs[-1]", idxs[-1]
print "intercept demapping info:", \
"coefficients_names[-i]:", coefficients_names[-1], \
"idxs[-1]:", idxs[-1], \
"coefficients_names[idxs[-1]]:", coefficients_names[idxs[-1]], \
"beta_used[-1]:", beta_used[-1], \
"coefficients['Intercept']", coefficients['Intercept']
# last one is intercept
interceptName = coefficients_names[idxs[-1]]
if interceptName != "Intercept" or abs(beta_used[-1])<1e-26:
raise Exception("'Intercept' should be last in coefficients_names and beta %s %s %s" %\
(idxs[-1], beta_used[-1], "-"+interceptName+"-"))
# idxs has the order for non-zero coefficients, it's shorter than beta_used and coefficients_names
# new 5/28/14. glm can point to zero coefficients
# for i in idxs:
# if beta_used[i]==0.0:
## raise Exception("idxs shouldn't point to any 0 coefficients i: %s %s:" % (i, beta_used[i]))
if len(idxs) > len(beta_used):
raise Exception("idxs shouldn't be longer than beta_used %s %s" % (len(idxs), len(beta_used)))
intercept = coefficients.pop('Intercept', None)
# intercept demapping info: idxs[-1]: 54 coefficients_names[[idxs[-1]]: Intercept beta_used[-1]: -6.6866753099
# the last one shoudl be 'Intercept' ?
coefficients_names.pop()
# have to skip the output col! get it from kwargs
# better always be there!
y = kwargs['response']
# the dict keys are column headers if they exist...how to order those? new: use the 'coefficients_names'
# from the response
# Tomas created 'coefficients_names which is the coefficient list in order.
# Just use it to index coefficients! works for header or no-header cases
# I guess now we won't print the "None" cases for dropped columns (constant columns!)
# Because Tomas doesn't get everything in 'coefficients_names' if dropped by GLMQuery before
# he gets it?
def add_to_coefficient_list_and_string(c, cList, cString):
if c in coefficients:
cValue = coefficients[c]
cValueString = "%s: %.5e " % (c, cValue)
else:
print "Warning: didn't see '" + c + "' in json coefficient response.",\
"Inserting 'None' with assumption it was dropped due to constant column)"
cValue = None
cValueString = "%s: %s " % (c, cValue)
cList.append(cValue)
# we put each on newline for easy comparison to R..otherwise keep condensed
if prettyPrint:
cValueString = "H2O coefficient " + cValueString + "\n"
# not mutable?
return cString + cValueString
# creating both a string for printing and a list of values
cString = ""
cList = []
# print in order using col_names
# coefficients_names is input only now..same for header or no header, or expanded enums
for c in coefficients_names:
cString = add_to_coefficient_list_and_string(c, cList, cString)
if prettyPrint:
print "\nH2O intercept:\t\t%.5e" % intercept
print cString
else:
if not noPrint:
print "\nintercept:", intercept, cString
print "\nTotal # of coefficients:", len(coefficients_names)
# pick out the coefficent for the column we enabled for enhanced checking. Can be None.
# FIX! temporary hack to deal with disappearing/renaming columns in GLM
if (not allowZeroCoeff) and (colX is not None):
absXCoeff = abs(float(coefficients[str(colX)]))
# add kwargs to help debug without looking at console log
self.assertGreater(absXCoeff, 1e-26, (
"abs. value of GLM coefficients['" + str(colX) + "'] is " +
str(absXCoeff) + ", not >= 1e-26 for X=" + str(colX) + "\n" +
"kwargs:" + dump_json(kwargs)
))
# intercept is buried in there too
absIntercept = abs(float(intercept))
self.assertGreater(absIntercept, 1e-26, (
"abs. value of GLM coefficients['Intercept'] is " +
str(absIntercept) + ", not >= 1e-26 for Intercept" + "\n" +
"kwargs:" + dump_json(kwargs)
))
# this is good if we just want min or max
# maxCoeff = max(coefficients, key=coefficients.get)
# for more, just invert the dictionary and ...
if (len(coefficients)>0):
maxKey = max([(abs(coefficients[x]),x) for x in coefficients])[1]
print "H2O Largest abs. coefficient value:", maxKey, coefficients[maxKey]
minKey = min([(abs(coefficients[x]),x) for x in coefficients])[1]
print "H2O Smallest abs. coefficient value:", minKey, coefficients[minKey]
else:
print "Warning, no coefficients returned. Must be intercept only?"
# many of the GLM tests aren't single column though.
# quick and dirty check: if all the coefficients are zero,
# something is broken
# intercept is in there too, but this will get it okay
# just sum the abs value up..look for greater than 0
# skip this test if there is just one coefficient. Maybe pointing to a non-important coeff?
if (not allowZeroCoeff) and (len(coefficients)>1):
s = 0.0
for c in coefficients:
v = coefficients[c]
s += abs(float(v))
self.assertGreater(s, 1e-26, (
"sum of abs. value of GLM coefficients/intercept is " + str(s) + ", not >= 1e-26\n" +
"kwargs:" + dump_json(kwargs)
))
print "submodels1, run_time (milliseconds):", submodels1['run_time']
# shouldn't have any errors
check_sandbox_for_errors()
return (warnings, cList, intercept)
# compare this glm to last one. since the files are concatenations,
# the results should be similar? 10% of first is allowed delta
def compareToFirstGlm(self, key, glm, firstglm):
# if isinstance(firstglm[key], list):
# in case it's not a list allready (err is a list)
verboseprint("compareToFirstGlm key:", key)
verboseprint("compareToFirstGlm glm[key]:", glm[key])
# key could be a list or not. if a list, don't want to create list of that list
# so use extend on an empty list. covers all cases?
if type(glm[key]) is list:
kList = glm[key]
firstkList = firstglm[key]
elif type(glm[key]) is dict:
raise Exception("compareToFirstGLm: Not expecting dict for " + key)
else:
kList = [glm[key]]
firstkList = [firstglm[key]]
print "kbn:", kList, firstkList
for k, firstk in zip(kList, firstkList):
# delta must be a positive number ?
delta = .1 * abs(float(firstk))
msg = "Too large a delta (" + str(delta) + ") comparing current and first for: " + key
self.assertAlmostEqual(float(k), float(firstk), delta=delta, msg=msg)
self.assertGreaterEqual(abs(float(k)), 0.0, str(k) + " abs not >= 0.0 in current")
def simpleCheckGLMGrid(self, glmGridResult, colX=None, allowFailWarning=False, **kwargs):
# "grid": {
# "destination_keys": [
# "GLMGridResults__8222a49156af52532a34fb3ce4304308_0",
# "GLMGridResults__8222a49156af52532a34fb3ce4304308_1",
# "GLMGridResults__8222a49156af52532a34fb3ce4304308_2"
# ]
# },
destination_key = glmGridResult['grid']['destination_keys'][0]
inspectGG = h2o_nodes.nodes[0].glm_view(destination_key)
models = inspectGG['glm_model']['submodels']
verboseprint("GLMGrid inspect GLMGrid model 0(best):", dump_json(models[0]))
g = simpleCheckGLM(self, inspectGG, colX, allowFailWarning=allowFailWarning, **kwargs)
# just to get some save_model testing
for i,m in enumerate(glmGridResult['grid']['destination_keys']):
print "Saving model", m, "to model"+str(i)
h2o_nodes.nodes[0].save_model(model=m, path='model'+str(i), force=1)
return g
# This gives me a comma separated x string, for all the columns, with cols with
# missing values, enums, and optionally matching a pattern, removed. useful for GLM
# since it removes rows with any col with NA
# get input from this.
# (missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
# h2o_cmd.columnInfoFromInspect(parseResult['destination_key',
# exceptionOnMissingValues=False, timeoutSecs=300)
def goodXFromColumnInfo(y,
num_cols=None, missingValuesDict=None, constantValuesDict=None, enumSizeDict=None,
colTypeDict=None, colNameDict=None, keepPattern=None, key=None,
timeoutSecs=120, returnIgnoreX=False, noPrint=False, returnStringX=True):
y = str(y)
# if we pass a key, means we want to get the info ourselves here
if key is not None:
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(key, exceptionOnMissingValues=False,
max_column_display=99999999, timeoutSecs=timeoutSecs)
num_cols = len(colNameDict)
# now remove any whose names don't match the required keepPattern
if keepPattern is not None:
keepX = re.compile(keepPattern)
else:
keepX = None
x = range(num_cols)
# need to walk over a copy, cause we change x
xOrig = x[:]
ignore_x = [] # for use by RF
for k in xOrig:
name = colNameDict[k]
# remove it if it has the same name as the y output
if str(k)== y: # if they pass the col index as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, str(k), y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif name == y: # if they pass the name as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, name, y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif keepX is not None and not keepX.match(name):
if not noPrint:
print "Removing %d because name: %s doesn't match desired keepPattern %s" % (k, name, keepPattern)
x.remove(k)
ignore_x.append(k)
# missing values reports as constant also. so do missing first.
# remove all cols with missing values
# could change it against num_rows for a ratio
elif k in missingValuesDict:
value = missingValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has %d missing values" % (k, name, value)
x.remove(k)
ignore_x.append(k)
elif k in constantValuesDict:
value = constantValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has constant value: %s " % (k, name, str(value))
x.remove(k)
ignore_x.append(k)
# this is extra pruning..
# remove all cols with enums, if not already removed
elif k in enumSizeDict:
value = enumSizeDict[k]
if not noPrint:
print "Removing %d %s because it has enums of size: %d" % (k, name, value)
x.remove(k)
ignore_x.append(k)
if not noPrint:
print "x has", len(x), "cols"
print "ignore_x has", len(ignore_x), "cols"
# this is probably used in 'cols" in v2, which can take numbers
if returnStringX:
x = ",".join(map(str, x))
ignore_x = ",".join(map(lambda x: "C" + str(x+1), ignore_x))
if not noPrint:
print "\nx:", x
print "\nignore_x:", ignore_x
if returnIgnoreX:
return ignore_x
else:
return x
| bikash/h2o-dev | py2/h2o_glm.py | Python | apache-2.0 | 29,794 | [
"Gaussian"
] | 50156cba0eb891a05b5290d5f5510292727685716bedfbe86793f3106504a794 |
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
from distutils.version import LooseVersion
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / (np.sqrt(3 * width) * np.pi ** .25))
* (1 - (x - center) ** 2 / width ** 2)
* np.exp(-(x - center) ** 2 / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=n_components // 5)
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha,
# transform_n_nozero_coefs, color)
estimators = [('OMP', 'omp', None, 15, 'navy'),
('Lasso', 'lasso_lars', 2, None, 'turquoise'), ]
lw = 2
# Avoid FutureWarning about default value change when numpy >= 1.14
lstsq_rcond = None if LooseVersion(np.__version__) >= '1.14' else -1
plt.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
plt.subplot(1, 2, subplot + 1)
plt.title('Sparse coding against %s dictionary' % title)
plt.plot(y, lw=lw, linestyle='--', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero, color in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color=color, lw=lw,
label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y, rcond=lstsq_rcond)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
plt.plot(x, color='darkorange', lw=lw,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error'
% (len(idx), squared_error))
plt.axis('tight')
plt.legend(shadow=False, loc='best')
plt.subplots_adjust(.04, .07, .97, .90, .09, .2)
plt.show()
| chrsrds/scikit-learn | examples/decomposition/plot_sparse_coding.py | Python | bsd-3-clause | 4,220 | [
"Gaussian"
] | 413942b675b572cb28f8dbb5f248222a756b812eb849070253f1c3030be517f6 |
import cProfile
import astropy.table as table
import numpy as np
import common_settings
import continuum_fit_pca
import mock_core_with_shell
from data_access.hdf5_spectrum_container import Hdf5SpectrumContainer
from data_access.qso_data import QSORecord
from physics_functions import comoving_distance
from python_compat import range
draw_graph = False
if draw_graph:
# noinspection PyPackageRequirements
from mayavi import mlab
MAX_SPECTRA = 220000
MAX_WAVELENGTH_COUNT = 4992
settings = common_settings.Settings() # type: common_settings.Settings
fit_pca = continuum_fit_pca.ContinuumFitPCA()
lya_center = 1215.67
cd = comoving_distance.ComovingDistance()
mock_forest = mock_core_with_shell.MockForest(settings.get_mock_resolution(), settings.get_mock_fractional_width(),
settings.get_mock_shell_separation(), settings.get_mock_core_radius(),
settings.get_mock_shell_radius())
def profile_main():
qso_record_table = table.Table(np.load(settings.get_qso_metadata_npy()))
qso_record_list = [QSORecord.from_row(i) for i in qso_record_table]
qso_spectra_hdf5 = settings.get_qso_spectra_hdf5()
output_spectra = Hdf5SpectrumContainer(qso_spectra_hdf5, readonly=False, create_new=False,
num_spectra=MAX_SPECTRA)
total_ar_x = np.array([])
total_ar_y = np.array([])
total_ar_z = np.array([])
total_ar_c = np.array([])
for n in range(len(qso_record_list)):
qso_rec = qso_record_list[n]
redshift = qso_rec.z
# load data
ar_wavelength = output_spectra.get_wavelength(n)
ar_flux = output_spectra.get_flux(n)
ar_ivar = output_spectra.get_ivar(n)
# convert wavelength to redshift
ar_redshift = ar_wavelength / lya_center - 1
# fit continuum
ar_rest_wavelength = ar_wavelength / (1 + redshift)
fit_result = fit_pca.fit(ar_rest_wavelength, ar_flux, ar_ivar, qso_redshift=redshift,
boundary_value=np.nan, mean_flux_constraint_func=None)
# transmission is only meaningful in the ly_alpha range, and also requires a valid fit for that wavelength
# use the same range as in 1404.1801 (2014)
forest_mask = np.logical_and(ar_wavelength > 1040 * (1 + redshift),
ar_wavelength < 1200 * (1 + redshift))
fit_mask = ~np.isnan(fit_result.spectrum)
effective_mask = forest_mask & fit_mask
# ar_wavelength_masked = ar_wavelength[effective_mask]
# ar_fit_spectrum_masked = fit_result.spectrum[effective_mask]
# convert redshift to distance
ar_dist = np.asarray(cd.fast_comoving_distance(ar_redshift[effective_mask]))
dec = qso_rec.dec * np.pi / 180
ra = qso_rec.ra * np.pi / 180
x_unit = np.cos(dec) * np.cos(ra)
y_unit = np.cos(dec) * np.sin(ra)
z_unit = np.sin(dec)
scale = 1
ar_x = x_unit * ar_dist * scale
ar_y = y_unit * ar_dist * scale
# Note: this is the geometric coordinate, not redshift
ar_z = z_unit * ar_dist * scale
ar_mock_forest_array = mock_forest.get_forest(ar_x, ar_y, ar_z)
ar_delta_t = - ar_mock_forest_array
ar_rel_transmittance = ar_delta_t + 1
# set the forest part of the spectrum to the mock forest
mock_fraction = 1
ar_flux[effective_mask] = \
ar_flux[effective_mask] * (1 - mock_fraction) + \
ar_rel_transmittance * fit_result.spectrum[effective_mask] * mock_fraction
if draw_graph:
display_mask = ar_mock_forest_array > 0.
total_ar_x = np.append(total_ar_x, ar_x[display_mask])
total_ar_y = np.append(total_ar_y, ar_y[display_mask])
total_ar_z = np.append(total_ar_z, ar_z[display_mask])
total_ar_c = np.append(total_ar_c, ar_mock_forest_array[display_mask])
# overwrite the existing forest
output_spectra.set_flux(n, ar_flux)
if n % 1000 == 0:
print(n)
if draw_graph:
mlab.points3d(total_ar_x, total_ar_y, total_ar_z, total_ar_c,
mode='sphere', scale_mode='vector',
scale_factor=20, transparent=True, vmin=0, vmax=1, opacity=0.03)
mlab.show()
if settings.get_profile():
cProfile.run('profile_main()', sort=2, filename='create_mock_forests.prof')
else:
profile_main()
| yishayv/lyacorr | create_mock_forests.py | Python | mit | 4,534 | [
"Mayavi"
] | 319ffea39cb4509677000e5c908cc2470cb24d49a2a5fb034b3f6c008e118bc7 |
import logging
import json
import requests
from lxml import etree
from requests.exceptions import RequestException
from datetime import datetime
from moxie.core.kv import kv_store
from moxie.core.service import ProviderException
from . import TransportRTIProvider
logger = logging.getLogger(__name__)
CACHE_KEY = "ox-p-r"
CACHE_KEY_UPDATE = CACHE_KEY + "_updated"
class OxfordParkAndRideProvider(TransportRTIProvider):
"""Provider for Oxfordshire Park and Ride website
"""
_CARPARKS = {
'Pear Tree Park & Ride OX2 8JD': "osm:4333225",
'Redbridge Park & Ride OX1 4XG': "osm:2809915",
'Seacourt Park & Ride OX2 0HP': "osm:34425625",
'Thornhill Park & Ride OX3 8DP': "osm:24719725",
'Water Eaton Park & Ride OX2 8HA': "osm:4329908",
}
provides = {'p-r': "Park and Rides Live Information"}
def __init__(self, url="http://voyager.oxfordshire.gov.uk/Carpark.aspx", timeout=4):
self.url = url
self.timeout = timeout
def handles(self, doc, rti_type=None):
for ident in doc.identifiers:
if ident in self._CARPARKS.values():
return True
return False
def invoke(self, doc, rti_type=None, place_ident=None):
for ident in doc.identifiers:
if ident in self._CARPARKS.values():
data = self.get_data()
services = data.get(ident)
messages = []
title = self.provides.get(rti_type)
return services, messages, rti_type, title
return None
def get_all(self):
"""Get data from all park and rides
"""
return {'park_and_rides':[dict(v, identifier=k) for k, v in self.get_data().items()] }
def import_data(self):
try:
response = requests.get(self.url, timeout=self.timeout)
response.raise_for_status()
except RequestException as re:
logger.warning('Error in request to Park & Ride info', exc_info=True,
extra={
'data': {
'url': self.url}
})
raise ProviderException
else:
data = self.parse_html(response.text)
kv_store.set(CACHE_KEY, json.dumps(data))
kv_store.set(CACHE_KEY_UPDATE, datetime.now().isoformat())
def get_data(self):
"""
Requests the URL and parses the page
:return dictionary of park and rides availability information
"""
data = kv_store.get(CACHE_KEY)
if data:
return json.loads(data)
else:
raise ProviderException
def parse_html(self, html):
"""Parses the HTML of the page
:param html: HTML content as a string
:return dictionary of park and rides with availability information
"""
carparks = {}
try:
xml = etree.fromstring(html, parser=etree.HTMLParser())
tbody = xml.find(".//div[@class='cloud-amber']/table/tbody")
for tr in tbody:
name = tr[1].text.strip()
identifier = self._CARPARKS.get(name, None)
if not identifier:
continue
if tr[6].text == 'Faulty':
spaces = 0
unavailable = True
else:
try:
spaces = int(tr[3].text)
unavailable = False
except ValueError:
spaces = 0
unavailable = True
carparks[identifier] = {
'name': name,
'spaces': spaces,
'capacity': int(tr[4].text),
'percentage': int(100 * (1 - float(spaces) / float(tr[4].text))),
'unavailable': unavailable,
}
except Exception as e:
logger.exception("Couldn't parse the park and rides page", exc_info=True)
return carparks
if __name__ == '__main__':
provider = OxfordParkAndRideProvider()
response = requests.get(provider.url, timeout=provider.timeout)
print provider.parse_html(response.text)
| ox-it/moxie | moxie/transport/providers/park_and_rides.py | Python | apache-2.0 | 4,299 | [
"Amber"
] | e67c17e8f5f1413a0794119345c085825a27d426e4ff5f01eaf7b0186df73b69 |
# proxy module
from __future__ import absolute_import
from mayavi.components.common import *
| enthought/etsproxy | enthought/mayavi/components/common.py | Python | bsd-3-clause | 93 | [
"Mayavi"
] | d68518957bd913641fb802d7c4da9d5fab4fd5df303e8c197cff1ed636a8ead4 |
#!/usr/bin/env python
"""
Pazookle Audio Programming Language
Copyright (C) 2013 Bob Harris. All rights reserved.
This file is part of Pazookle.
Pazookle is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option)
any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__version__ = "0.01"
__author__ = "Bob Harris (zackobelsch@gmail.com)"
__copyright__ = "(C) 2013 Bob Harris. GNU GPLv3."
import os.path
from sys import stderr
from types import GeneratorType
from ugen import UGen
from output import TextOut
class ShredulerError(Exception):
def __init__(self,message):
Exception.__init__(self,message)
class Shreduler(object):
"""Manage audio shreds.
Debug settings as of this writing:
shreds: shred activation
pipeline: values percolating through the pipeline
Nota bene: self.clock is an integer that counts by one with each sample.
self.now is a floating point value, now >= clock, which
.. ideally is such that floor(now) == clock; when
.. floor(now) > clock we generate one sample and
.. increment clock
"""
# $$$ modify shred protocol so that a shred can return a list or tuple
# .. containing no more than one time; the other entries will all be
# .. event or message objects; the idea is that the shred will wait
# .. for the earliest event yielded (or the time if it expires first);
# .. the probem though is that we have no way to communicate to the
# .. shred *which* event has triggered
id = 0
debug = {}
@staticmethod
def set_debug(debugNames):
if (type(debugNames) not in (list,tuple)): debugNames = [debugNames]
for debugName in debugNames: Shreduler.debug[debugName] = True
@staticmethod
def unset_debug(debugNames):
if (type(debugNames) not in (list,tuple)): debugNames = [debugNames]
for debugName in debugNames:
try: del Shreduler.debug[debugName]
except KeyError: pass
#-- construction --
def __init__(self,sinks=None,samplingRate=44100):
if (sinks == None): self.sinks = []
else: self.sinks = sinks
self.samplingRate = samplingRate
self.set_times()
self._shreds = []
self._lastYield = {} # maps shred id to (time,duration) of last yield
self._updateOrder = None
self._pipelineChange = False
self._clock = 0
self._now = 0.0
def set_times(self):
self.msec = self.samplingRate / 1000.0
self.sec = float(self.samplingRate)
self.min = self.samplingRate * 60.0
self.hour = self.samplingRate * 3600.0
#-- attribute access (by intent, these are NOT properties) --
def clock(self):
return self._clock
def now(self):
return self._now
#-- shred scheduling --
def spork(self,shredFunction,shredName=None):
# $$$ keep a dict that maps id to (name,function), to be used for
# .. operations like kill_shred() and yield ("shred finished",shredId)
if (not isinstance(shredFunction,GeneratorType)):
if (shredName == None): shredName = "(unnamed)"
msg = "shred \"%s\" is invalid, it's not a python generator" % shredName
raise ShredulerError(msg)
Shreduler.id += 1
shredId = Shreduler.id
if (shredName == None): shredName = shredFunction.__name__
shredName = "%s.%s" % (shredName,shredId)
self._lastYield[shredId] = (None,None)
self.insert_shred(None,shredId,shredFunction,shredName)
return shredId
def run(self):
while (self._shreds != []):
self.run_earliest_shred()
def run_earliest_shred(self):
(when,shredId,shredFunction,shredName) = self._shreds.pop(0)
if (when != None):
while (self._clock+1 <= when):
self.run_sample_pipe()
if ("shreds" in Shreduler.debug):
print >>stderr, "running %s" % shredName
try:
if (when == None): self._now = self._clock
else: self._now = when
when = shredFunction.next()
if ("shreds" in Shreduler.debug):
if (type(when) == tuple):
print >>stderr, "%s yielded (%s)" % (shredName,",".join([str(x) for x in when]))
else:
print >>stderr, "%s yielded %s" % (shredName,when)
except StopIteration:
if ("shreds" in Shreduler.debug):
print >>stderr, "%s has completed" % shredName
del self._lastYield[shredId]
return
if (type(when) != tuple):
if (when != None): when += self._now
elif (when[0] == "absolute") and (len(when) == 2):
(_,when) = when
else:
msg = "incomprehensible yield from shred \"%s\": (%s)" % (shredName,",".join([str(x) for x in when]))
raise ShredulerError(msg)
if (when == None) or (when < self._now):
msg = "shred \"%s\" yielded %s <= %s" % (shredName,when,self._now)
raise ShredulerError(msg)
if (when == self._now) and (self._lastYield[shredId] == (when,0)):
msg = "shred \"%s\" yielded without advancing time twice in a row (now=%s)" \
% (shredName,when)
raise ShredulerError(msg)
(lastWhen,_) = self._lastYield[shredId]
if (lastWhen == None): duration = 0
else: duration = when - lastWhen
self._lastYield[shredId] = (when,duration)
self.insert_shred(when,shredId,shredFunction,shredName)
def insert_shred(self,when,shredId,shredFunction,shredName):
# $$$ replace this with a priority queue implementation
# insert new shred before any that are waiting for a later time
insertIx = len(self._shreds)
if (when == None):
for (ix,(oldWhen,_,_,_)) in enumerate(self._shreds):
if (oldWhen == None): continue
insertIx = ix
break
else:
for (ix,(oldWhen,_,_,_)) in enumerate(self._shreds):
if (oldWhen <= when): continue
insertIx = ix
break
self._shreds.insert(insertIx,(when,shredId,shredFunction,shredName))
#-- pipline construction --
def add_sink(self,sink):
if (sink not in self.sinks):
self.sinks += [sink]
self.pipeline_change()
def remove_sink(self,sink):
if (sink in self.sinks):
self.sinks.remove(sink)
self.pipeline_change()
def pipeline_change(self):
self._pipelineChange = True
def find_update_order(self):
# the algorithm here is based on the depth-first search topological
# sorting algorithm at en.wikipedia.org/wiki/Topological_sorting
# the main modification is that when we encounter a cycle, we simply
# ignore that link rather than abort the process
assert (self.sinks != []), \
"internal error: find_update_order caled with no sinks" \
% numFuncs
self._order = []
self._markedNodes = {}
for node in self.sinks:
if (node.id in self._markedNodes): continue
if (node.ignoreInputlessSink):
# if a sink has no input, and it's not generative, ignore it
if (node.dependencies() == []): continue
self.visit(node)
order = list(self._order)
del self._order
del self._markedNodes
return order
def visit(self,node):
if (node.id in self._markedNodes): return
self._markedNodes[node.id] = True
for predecessor in node.dependencies():
self.visit(predecessor)
self._order += [node]
#-- pipline percolation --
def run_sample_pipe(self):
self._clock += 1
if ("pipeline" in Shreduler.debug):
print >>stderr, "\n=== generating sample #%s ===" % self._clock
elif ("progress" in Shreduler.debug):
if (self._clock % 1000 == 0):
print >>stderr, "=== generating sample #%s ===" % self._clock
if (self.sinks == []):
if ("pipeline" in Shreduler.debug):
print >>stderr, "(pipeline has no sinks, so no percolation)"
return
if (self._pipelineChange):
self._updateOrder = None
self._pipelineChange = False
if (self._updateOrder == None):
self._updateOrder = self.find_update_order()
if ("pipeline" in Shreduler.debug):
print >>stderr, "update order: [%s]" % ",".join([str(node) for node in self._updateOrder])
for node in self._updateOrder:
node.percolate()
# initialization
console = TextOut(name="console", channels=1)
console2 = TextOut(name="console2",channels=2)
zook = Shreduler(sinks=[console,console2],samplingRate=44100)
now = zook.now
UGen.set_shreduler(zook)
zookPath = os.path.dirname(os.path.realpath(__file__))
zookParentPath = os.path.abspath(os.path.join(zookPath,os.path.pardir))
zookClipsPath = os.path.join(zookParentPath,"clips")
| zacko-belsch/pazookle | pazookle/shred.py | Python | gpl-3.0 | 8,653 | [
"VisIt"
] | 1d1eb3ff2b50b62efcfc3eae11755362c650e8b225a85013838109743823f93e |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
************************************
**espresso.analysis.AllParticlePos**
************************************
"""
from espresso import pmi
from _espresso import analysis_AllParticlePos
class AllParticlePosLocal(object):
"""Abstract local base class for observables."""
def gatherAllPositions(self):
return self.cxxclass.gatherAllPositions(self)
if pmi.isController :
class AllParticlePos(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
pmicall = [ "gatherAllPositions" ]
)
| BackupTheBerlios/espressopp | src/analysis/AllParticlePos.py | Python | gpl-3.0 | 1,414 | [
"ESPResSo"
] | 77d7544d6fbd2ebf9f423f2244e0313dfa4e23dc0182fc05dfd5bd4d38c54354 |
from . import base
from .. import stats
from .. import items
from .. import dialogue
from .. import context
from .. import aibrain
from .. import effects
from .. import animobs
from .. import targetarea
from .. import invocations
from . import animals
from .. import enchantments
from .. import items
import random
from . import treasuretype
# *******************************
# *** ENCOUNTER LEVEL 1 ***
# *******************************
# *******************************
# *** ENCOUNTER LEVEL 2 ***
# *******************************
# *******************************
# *** ENCOUNTER LEVEL 3 ***
# *******************************
class PewterGolem( base.Monster ):
name = "Pewter Golem"
statline = { stats.STRENGTH: 17, stats.TOUGHNESS: 13, stats.REFLEXES: 8, \
stats.INTELLIGENCE: 1, stats.PIETY: 11, stats.CHARISMA: 13 }
SPRITENAME = "monster_constructs.png"
FRAME = 10
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 8
VOICE = None
HABITAT = ( context.HAB_BUILDING, context.SET_EVERY,
context.MTY_CONSTRUCT, )
ENC_LEVEL = 3
COMBAT_AI = aibrain.SteadyAI()
TREASURE = treasuretype.Standard()
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_CRUSHING )
def init_monster( self ):
self.levels.append( base.Defender( 3, self ) )
# *******************************
# *** ENCOUNTER LEVEL 4 ***
# *******************************
# *******************************
# *** ENCOUNTER LEVEL 5 ***
# *******************************
class ClockworkSoldier( base.Monster ):
name = "Clockwork Soldier"
statline = { stats.STRENGTH: 18, stats.TOUGHNESS: 18, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 1, stats.PIETY: 12, stats.CHARISMA: 8, \
stats.RESIST_LIGHTNING: -150 }
SPRITENAME = "monster_constructs.png"
FRAME = 11
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 8
VOICE = None
HABITAT = ( context.HAB_EVERY, context.HAB_TUNNELS, context.SET_EVERY,
context.MAP_DUNGEON,
context.MTY_CONSTRUCT, context.MTY_FIGHTER )
ENC_LEVEL = 5
COMBAT_AI = aibrain.BrainDeadAI()
# Clockwork soldiers don't have normal treasure, but may drop a nice sword.
TREASURE = treasuretype.Standard( (items.SWORD,), swag_chance=25, swag_quality=2, scale=0 )
ATTACK = items.Attack( (1,8,0), element = stats.RESIST_SLASHING )
def init_monster( self ):
self.levels.append( base.Defender( 5, self ) )
# *******************************
# *** ENCOUNTER LEVEL 6 ***
# *******************************
# *******************************
# *** ENCOUNTER LEVEL 7 ***
# *******************************
class LivingPotion( base.Monster ):
name = "Living Potion"
statline = { stats.STRENGTH: 10, stats.TOUGHNESS: 10, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 12, stats.PIETY: 12, stats.CHARISMA: 1,
stats.RESIST_SLASHING: 25, stats.RESIST_PIERCING: 25, stats.RESIST_WATER: 200 }
SPRITENAME = "monster_constructs.png"
FRAME = 15
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 12
VOICE = None
HABITAT = ( context.SET_EVERY,
context.MTY_CONSTRUCT,
context.DES_WATER )
ENC_LEVEL = 7
TREASURE = treasuretype.Standard( (items.POTION,) )
COMBAT_AI = aibrain.SteadySpellAI()
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( invocations.MPInvocation( "Acid Blast",
effects.OpposedRoll( att_modifier=10, def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (2,6,0), stat_bonus=None, element=stats.RESIST_ACID, anim=animobs.GreenCloud )
,), on_failure = (
effects.HealthDamage( (1,6,0), stat_bonus=None, element=stats.RESIST_ACID, anim=animobs.GreenCloud )
,) ), com_tar=targetarea.SingleTarget(), ai_tar=invocations.TargetEnemy(), shot_anim=animobs.GreenSpray, mp_cost=1 ),
invocations.MPInvocation( "Poison Blast",
effects.OpposedRoll( att_modifier=10, def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (2,6,0), stat_bonus=None, element=stats.RESIST_POISON, anim=animobs.PoisonCloud )
,), on_failure = (
effects.HealthDamage( (1,6,0), stat_bonus=None, element=stats.RESIST_POISON, anim=animobs.PoisonCloud )
,) ), com_tar=targetarea.SingleTarget(), ai_tar=invocations.TargetEnemy(), shot_anim=animobs.GreenComet, mp_cost=1 ),
invocations.MPInvocation( "Healing Potion",
effects.HealthRestore( dice=(3,8,0) ),
com_tar=targetarea.SingleTarget(reach=10), ai_tar=invocations.TargetWoundedAlly(),
exp_tar=targetarea.SinglePartyMember(), shot_anim=animobs.YellowVortex, mp_cost=1
)
)
def init_monster( self ):
self.levels.append( base.Spellcaster( 7, self ) )
class AnimatedSword( base.Monster ):
name = "Animated Sword"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 12, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 1, stats.PIETY: 10, stats.CHARISMA: 1,
stats.RESIST_SLASHING: 50, stats.COUNTER_ATTACK: 50 }
SPRITENAME = "monster_constructs.png"
FRAME = 16
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 10
VOICE = None
HABITAT = ( context.SET_EVERY,
context.MTY_CONSTRUCT,
context.DES_AIR )
ENC_LEVEL = 7
COMBAT_AI = aibrain.SteadyAI()
ATTACK = items.Attack( (1,12,0), element = stats.RESIST_SLASHING )
def init_monster( self ):
self.levels.append( base.Beast( 7, self ) )
class AnimatedFlail( base.Monster ):
name = "Animated Flail"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 12, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 1, stats.PIETY: 10, stats.CHARISMA: 1,
stats.RESIST_CRUSHING: 50 }
SPRITENAME = "monster_constructs.png"
FRAME = 17
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 10
VOICE = None
HABITAT = ( context.SET_EVERY,
context.MTY_CONSTRUCT,
context.DES_EARTH )
ENC_LEVEL = 7
COMBAT_AI = aibrain.SteadyAI()
ATTACK = items.Attack( (3,6,0), element = stats.RESIST_CRUSHING )
def init_monster( self ):
self.levels.append( base.Beast( 7, self ) )
class CreepingCoins( base.Monster ):
name = "Creeping Coins"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 12, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 1, stats.PIETY: 10, stats.CHARISMA: 1, \
stats.RESIST_COLD: 155 }
SPRITENAME = "monster_constructs.png"
FRAME = 18
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 6
VOICE = None
HABITAT = ( context.HAB_CAVE, context.SET_RENFAN,
context.MTY_CONSTRUCT,
context.DES_ICE )
ENC_LEVEL = 7
TREASURE = treasuretype.High()
COMBAT_AI = aibrain.SteadyAI()
ATTACK = items.Attack( (2,6,0), element = stats.RESIST_CRUSHING )
TECHNIQUES = ( invocations.MPInvocation( "Cold Blast",
effects.OpposedRoll( att_modifier=10, def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (1,10,0), stat_bonus=None, element=stats.RESIST_COLD, anim=animobs.SnowCloud )
,), on_failure = (
effects.HealthDamage( (1,6,0), stat_bonus=None, element=stats.RESIST_COLD, anim=animobs.SnowCloud )
,) ), com_tar=targetarea.Cone(reach=5), ai_tar=invocations.TargetEnemy(), mp_cost=1 ),
)
def init_monster( self ):
self.levels.append( base.Defender( 6, self ) )
class AnimatedCandlestick( base.Monster ):
name = "Candlestick"
statline = { stats.STRENGTH: 12, stats.TOUGHNESS: 12, stats.REFLEXES: 12, \
stats.INTELLIGENCE: 12, stats.PIETY: 12, stats.CHARISMA: 12 }
SPRITENAME = "monster_constructs.png"
FRAME = 19
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 10
VOICE = None
HABITAT = ( context.HAB_BUILDING, context.SET_EVERY,
context.MTY_CONSTRUCT,
context.DES_FIRE )
ENC_LEVEL = 7
COMBAT_AI = aibrain.SteadyAI()
ATTACK = items.Attack( (2,6,0), element = stats.RESIST_PIERCING, extra_effect=
effects.OpposedRoll( att_modifier=-10, on_success = (
effects.Enchant( enchantments.BurnLowEn, anim=animobs.RedCloud )
,))
)
TECHNIQUES = ( invocations.MPInvocation( "Fireball",
effects.OpposedRoll( def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (1,10,0), stat_bonus=None, element=stats.RESIST_FIRE, anim=animobs.RedCloud ),
), on_failure = (
effects.HealthDamage( (1,6,0), stat_bonus=None, element=stats.RESIST_FIRE, anim=animobs.RedCloud ),
) ), com_tar=targetarea.SingleTarget(), ai_tar=invocations.TargetEnemy(), shot_anim=animobs.Fireball, mp_cost=5 ),
)
def init_monster( self ):
self.levels.append( base.Beast( 7, self ) )
# *******************************
# *** ENCOUNTER LEVEL 8 ***
# *******************************
class LivingStatue( base.Monster ):
name = "Living Statue"
statline = { stats.STRENGTH: 14, stats.TOUGHNESS: 13, stats.REFLEXES: 10, \
stats.INTELLIGENCE: 10, stats.PIETY: 10, stats.CHARISMA: 1, \
stats.RESIST_FIRE: 155 }
SPRITENAME = "monster_constructs.png"
FRAME = 13
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 10
VOICE = None
HABITAT = ( context.HAB_BUILDING, context.HAB_TUNNELS, context.SET_EVERY,
context.MTY_CONSTRUCT,
context.DES_FIRE, context.DES_SOLAR )
ENC_LEVEL = 8
TREASURE = treasuretype.Low()
COMBAT_AI = aibrain.SteadyAI()
ATTACK = items.Attack( (1,6,0), element = stats.RESIST_SLASHING )
TECHNIQUES = ( invocations.MPInvocation( "Magma Blast",
effects.OpposedRoll( att_modifier=10, def_stat=stats.REFLEXES, on_success = (
effects.HealthDamage( (2,8,0), stat_bonus=None, element=stats.RESIST_FIRE, anim=animobs.OrangeExplosion )
,), on_failure = (
effects.HealthDamage( (1,6,0), stat_bonus=None, element=stats.RESIST_FIRE, anim=animobs.OrangeExplosion )
,) ), com_tar=targetarea.Line(), ai_tar=invocations.TargetEnemy(), mp_cost=1 ),
)
def init_monster( self ):
self.levels.append( base.Defender( 7, self ) )
# *******************************
# *** ENCOUNTER LEVEL 9 ***
# *******************************
# ********************************
# *** ENCOUNTER LEVEL 10 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 11 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 12 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 13 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 14 ***
# ********************************
class FlamingSword( base.Monster ):
name = "Flaming Sword"
statline = { stats.STRENGTH: 16, stats.TOUGHNESS: 14, stats.REFLEXES: 18, \
stats.INTELLIGENCE: 12, stats.PIETY: 20, stats.CHARISMA: 14,
stats.RESIST_SLASHING: 50, stats.MAGIC_DEFENSE: 50, stats.COUNTER_ATTACK: 50 }
SPRITENAME = "monster_constructs.png"
FRAME = 9
TEMPLATES = (stats.CONSTRUCT,)
MOVE_POINTS = 12
VOICE = None
HABITAT = ( context.SET_EVERY,
context.MTY_CONSTRUCT,
context.DES_AIR, context.DES_FIRE,
context.SUMMON_FLAMINGSWORD )
ENC_LEVEL = 14
COMBAT_AI = aibrain.SteadyAI()
ATTACK = items.Attack( (3,8,0), element = stats.RESIST_SLASHING, extra_effect=
effects.OpposedRoll( on_success = (
effects.HealthDamage( (3,8,0), stat_bonus=stats.INTELLIGENCE, element=stats.RESIST_FIRE, anim=animobs.RedCloud ),
effects.Enchant( enchantments.BurnLowEn )
), on_failure = (
effects.HealthDamage( (3,8,0), stat_bonus=None, element=stats.RESIST_FIRE, anim=animobs.RedCloud ),
) )
)
def init_monster( self ):
self.levels.append( base.Defender( 14, self ) )
# ********************************
# *** ENCOUNTER LEVEL 15 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 16 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 17 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 18 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 19 ***
# ********************************
# ********************************
# *** ENCOUNTER LEVEL 20 ***
# ********************************
| jwvhewitt/dmeternal | old_game/monsters/constructs.py | Python | gpl-2.0 | 12,535 | [
"BLAST"
] | ba4cc7ffda48f9afbdbef8329dc62b0521106acbf14d803a09f09296a0e0e82b |
#!/usr/bin/python
# Author: David Stelter
# License: MIT License (See LICENSE.md)
# Copyright (c) 2017
# All rights reserved.
import os, sys, getopt
import datetime
__version__ = 0.2
#################### UNITS ####################
# Only used with --units flag
econv = 1.0 # Additional Factor for unit conversion if needed (energies)
lconv = 1.0 # Additional Factor for unit conversion if neededa (lengths)
dconv = 1.0 # Additional Factor for unit conversion if neededa (densities)
###############################################
print('\nEMC 2 LT conversion tool: v%s\n' % __version__)
def helpme():
print 'Help for the EMC 2 LT conversion tool\n'
print 'Input takes a list of files in EMC .prm format to be read.'
print 'Additional styles (bond, angle, etc) can be modified via the',\
'command line. Any valid LAMMPS style can be used.\n'
print 'Styles include:'
print '--pair-style='
print '--bond-style='
print '--angle-style='
print '--dihedral-style='
print '--improper-style=\n'
print 'Default styles are lj/cut/coul/long, harmonic, harmonic, harmonic,',\
'harmonic \n'
print 'Other commands:'
print '--name= provides basename for output file if desired\n'
print '--units flag for manual units (no parameter needed)\n'
print 'Usage example:'
print 'emcprm2lt.py file1 file2 --bond-style=harmonic --angle-style=harmonic'
print ''
def Abort():
print 'Aborting...'
sys.exit()
def WriteInit():
# Write generic LAMMPS settings, likely need additional on a per-ff basis
foutput.write(' write_once("In Init") {\n')
foutput.write(' # Warning: This is a very generic "In Init" section, further\n')
foutput.write(' # modification prior to any simulation is extremely likely\n')
foutput.write(' units real\n')
foutput.write(' atom_style full\n')
foutput.write(' bond_style hybrid %s\n' % bstyle)
if angle_flag:
foutput.write(' angle_style hybrid %s\n' % astyle)
if torsion_flag:
foutput.write(' dihedral_style hybrid %s\n' % dstyle)
if improp_flag:
foutput.write(' improper_style hybrid %s\n' % istyle)
foutput.write(' pair_style hybrid %s %f %f\n' % (pstyle,
float(inner[0])*lconv, float(cutoff[0])*lconv))
if pair14[0] == 'OFF':
foutput.write(' special_bonds lj/coul 0.0 0.0 0.0\n')
else:
print 'Warning: special_bonds needed, add to "In Init" section\n'
foutput.write(' } # end init\n')
def Units(length_flag, energy_flag, density_flag):
# Check flags for all units, determine what conversions are needed, hard-coded for LAMMPS 'real'
print 'Attempting to auto-convert units... This should always be double-checked',\
' especially for unique potential styles'
global lconv; global econv; global dconv
if length_flag:
print 'Warning: length scale does not match LAMMPS real units, attempting conversion to angstroms'
if length[0] == 'NANOMETER':
lconv = 10.0
print ' nanometer -> angstrom'
elif length[0] == 'MICROMETER':
lconv = 10000.0
print ' micrometer -> angstrom'
elif length[0] == 'METER':
lconv = 10000000000.0
print ' meter -> angstrom'
else:
print 'Length units NOT converted'
if energy_flag:
print 'Warning: energy units do not match LAMMPS real units, attempting conversion to kcal/mol'
if energy[0] == 'KJ/MOL':
econv = 0.239006
print ' kj/mol -> kcal/mol'
elif energy[0] == 'J/MOL':
econv = 0.000239006
print ' j/mol -> kcal/mol'
elif energy[0] == 'CAL/MOL':
econv = 0.001
print ' cal/mol -> kcal/mol'
else:
print 'Energy units NOT converted'
if density_flag:
print 'Warning: density units do not match LAMMPS real units, attempting conversion to gram/cm^3'
if density[0] == 'KG/M^3':
dconv = 0.001
print ' kg/m^3 -> g/cm^3'
else:
print 'Density units NOT converted'
return lconv, econv, dconv
def ChkPotential(manual_flag, angle_flag, torsion_flag, improp_flag):
# Check type of potential, determine type of unit conversion is necessary
global beconv
if angle_flag:
global aeconv
if torsion_flag:
global deconv
if improp_flag:
global ieconv
if manual_flag == False:
# Chk bond potential
if bstyle == '' or bstyle == 'harmonic':
beconv = econv / (2*pow(lconv,2))
else:
print 'Cannot find bond potential type, use manual units'
Abort()
if angle_flag:
if astyle == '' or astyle == 'harmonic':
aeconv = econv
elif astyle == 'cosine/squared':
aeconv = econv / 2
elif astyle == 'sdk':
aeconv = econv
else:
print 'Cannot find angle potential type, use manual units'
Abort()
# torsion and improper not implemented fully
elif torsion_flag:
if dstyle == '' or dstyle == 'harmonic':
deconv = econv
else:
print 'Cannot find torsion potential type, use manual units'
Abort()
elif improp_flag:
if istyle == '' or istyle == 'harmonic':
ieconv = econv
else:
print 'Cannot find improper potential type, use manual units'
Abort()
else:
# Modify as needed
print 'Warning: Manual units used, set potential conversion units in script'
beconv = 1
if angle_flag:
aeconv = 1
if torsion_flag:
deconv = 1
if improp_flag:
ieconv = 1
### Parse input ###
if len(sys.argv) == 1:
helpme()
sys.exit()
manual_units = False # Turned on via command line
args = list(sys.argv[1:])
myopts, args = getopt.gnu_getopt(args, 'fh', ['pair-style=', 'bond-style=', 'angle-style=',
'dihedral-style=', 'improper-style=', 'name=', 'units'])
filenames = list(args)
pstyle = ''; bstyle = ''; astyle = ''; dstyle = ''; istyle = ''
name = ''
for opt, arg in myopts:
if opt in ('-f'):
filenames = arg
elif opt in ('--pair-style'):
pstyle = arg
elif opt in ('--bond-style'):
bstyle = arg
elif opt in ('--angle-style'):
astyle = arg
elif opt in ('--dihedral-style'):
dstyle = arg
elif opt in ('--improper-style'):
istyle = arg
elif opt in ('--name'):
name = arg
elif opt in ('--units'):
manual_units = True
print 'Manual units enabled, modify python script accordingly'
elif opt in ('-h', '--help'):
helpme()
sys.exit()
### Check input filenames, make sure they exist ###
print 'Converting: '
for i in range(len(filenames)):
if os.path.isfile(filenames[i]):
print '', filenames[i]
else:
print 'invalid filename:', filenames[i]
Abort()
print 'from EMC .prm to moltemplate .lt format\n'
### Open all files ###
f = [open(fname, 'r') for fname in filenames]
### All these settings from DEFINE should be list of fixed size ###
ffname = [[] for i in range(len(f))]
fftype = [[] for i in range(len(f))]
version = [[] for i in range(len(f))]
created1 = [[] for i in range(len(f))]
created2 = [[] for i in range(len(f))]
length = [[] for i in range(len(f))]
energy = [[] for i in range(len(f))]
density = [[] for i in range(len(f))]
mix = [[] for i in range(len(f))]
nbonded = [[] for i in range(len(f))]
inner = [[] for i in range(len(f))]
cutoff = [[] for i in range(len(f))]
pair14 = [[] for i in range(len(f))]
angle_def = [[] for i in range(len(f))]
torsion_def = [[] for i in range(len(f))]
improp_def = [[] for i in range(len(f))] # not all prm have this
### Parse DEFINE section, save info for each file ###
for i in range(len(f)):
grab = False
for line in f[i]:
if line.strip() == 'ITEM DEFINE':
grab = True
elif line.strip() == 'ITEM END':
grab = False
elif grab:
if line.startswith('FFNAME'):
ffname[i] = line.split()[1].strip()
if line.startswith('FFTYPE'):
fftype[i] = line.split()[1].strip()
if line.startswith('VERSION'):
version[i] = line.split()[1].strip()
if line.startswith('CREATED'):
created1[i] = line.split()[1].strip()
created2[i] = line.split()[2].strip()
if line.startswith('LENGTH'):
length[i] = line.split()[1].strip()
if line.startswith('ENERGY'):
energy[i] = line.split()[1].strip()
if line.startswith('DENSITY'):
density[i] = line.split()[1].strip()
if line.startswith('MIX'):
mix[i] = line.split()[1].strip()
if line.startswith('NBONDED'):
nbonded[i] = line.split()[1].strip()
if line.startswith('INNER'):
inner[i] = line.split()[1].strip()
if line.startswith('CUTOFF'):
cutoff[i] = line.split()[1].strip()
if line.startswith('PAIR14'):
pair14[i] = line.split()[1].strip()
if line.startswith('ANGLE'):
angle_def[i] = line.split()[1].strip()
if line.startswith('TORSION'):
torsion_def[i] = line.split()[1].strip()
if line.startswith('IMPROP'):
improp_def[i] = line.split()[1].strip()
### Sanity Checks ###
for i in range(len(f)):
for j in range(len(f)):
if ffname[j] != ffname[i]:
print 'force field files do not match'
Abort()
if length[j] != length[i]:
print 'units not identical between files'
Abort()
if energy[j] != energy[i]:
print 'units not identical between files'
Abort()
if density[j] != density[i]:
print 'units not identical between files'
Abort()
if inner[j] != inner[i]:
print 'inner cutoff not identical between files'
Abort()
if cutoff[j] != cutoff[i]:
print 'cutoff not identical between files'
Abort()
if pair14[j] != pair14[i]:
print '1-4 pair interaction not consistent between files'
Abort()
### Check if sections exist in PRM file ###
angle_flag = False; torsion_flag = False; improp_flag = False
for i in range(len(f)):
if angle_def[i] == 'WARN':
angle_flag = True
if torsion_def[i] == 'WARN':
torsion_flag = True
if improp_def[i] == 'WARN':
improp_flag = True
### Check which units to use, trip convert flags ###
length_flag = False; energy_flag = False; density_flag = False
if length[0] != 'ANGSTROM':
length_flag = True
if energy[0] != 'KCAL/MOL':
energy_flag = True
if density[0] != 'G/CC':
density_flag = True
if manual_units == True:
length_flag = False
energy_flag = False
density_flag = False
Units(length_flag, energy_flag, density_flag)
### Read Whole File, save to lists ###
# Non-crucial sections include
# BONDS, ANGLE, TORSION, IMPROP, NONBOND
# Read all sections every time, only output sections when flags tripped
f = [open(fname, 'r') for fname in filenames]
masses = []; nonbond = []; bond = []; angle = []; torsion = []; improp = []
equiv = []
for i in range(len(f)):
MASS = False
NONBOND = False
BOND = False
ANGLE = False
TORSION = False
IMPROP = False
EQUIV = False
for line in f[i]:
if line.strip() == 'ITEM MASS':
MASS = True
elif line.strip() == 'ITEM END':
MASS = False
elif MASS:
if not line.startswith('#'):
if not line.startswith('\n'):
masses.append(line.strip().split())
if line.strip() == 'ITEM NONBOND':
NONBOND = True
elif line.strip() == 'ITEM END':
NONBOND = False
elif NONBOND:
if not line.startswith('#'):
if not line.startswith('\n'):
nonbond.append(line.strip().split())
if line.strip() == 'ITEM BOND':
BOND = True
elif line.strip() == 'ITEM END':
BOND = False
elif BOND:
if not line.startswith('#'):
if not line.startswith('\n'):
bond.append(line.strip().split())
if line.strip() == 'ITEM ANGLE':
ANGLE = True
elif line.strip() == 'ITEM END':
ANGLE = False
elif ANGLE:
if not line.startswith('#'):
if not line.startswith('\n'):
angle.append(line.strip().split())
if line.strip() == 'ITEM TORSION':
TORSION = True
elif line.strip() == 'ITEM END':
TORSION = False
elif TORSION:
if not line.startswith('#'):
if not line.startswith('\n'):
torsion.append(line.strip().split())
if line.strip() == 'ITEM IMPROP':
IMPROP = True
elif line.strip() == 'ITEM END':
IMPROP = False
elif IMPROP:
if not line.startswith('#'):
if not line.startswith('\n'):
improp.append(line.strip().split())
if line.strip() == 'ITEM EQUIVALENCE':
EQUIV = True
elif line.strip() == 'ITEM END':
EQUIV = False
elif EQUIV:
if not line.startswith('#'):
if not line.startswith('\n'):
equiv.append(line.strip().split())
### Close prm files ###
for fname in f:
fname.close()
### Sanity checks before writing LT files ###
# Check Equiv
for i in range(len(equiv)):
for j in range(len(equiv)):
if (equiv[i][0] == equiv[j][0]) and (equiv[i] != equiv[j]):
print 'Error: Identical atom types with different equivalences'
Abort()
# Check Masses
for i in range(len(masses)):
for j in range(len(masses)):
if (masses[i][0] == masses[j][0]) and (masses[i][1] != masses[j][1]):
print 'Error: Identical types with different mass'
Abort()
# Check Nonbond
for i in range(len(nonbond)):
for j in range(len(nonbond)):
if (nonbond[i][0] == nonbond[j][0]) and (nonbond[i][1] == nonbond[j][1]) and ((nonbond[i][2] != nonbond[j][2]) or (nonbond[i][3] != nonbond[j][3])):
print nonbond[i], nonbond[j]
print 'Error: Identical types with different pair-interactions'
Abort()
### Remove double equivalences ###
for i in range(len(equiv)):
once = True
for j in range(len(equiv)):
if (equiv[i][0] == equiv[j][0]) and once:
once = False
elif (equiv[i][0] == equiv[j][0]):
equiv[j][1] = None
equiv[j][2] = 'duplicate'
if len(equiv[i]) != 6:
print 'Warning: Incorrect equivalence formatting for type %s' % equiv[i][0],\
'skipping type, topology may not be complete'
equiv[i][1] = None
equiv[i][2] = 'invalid_format'
### Check Potential Styles and Set Units ###
ChkPotential(manual_units, angle_flag, torsion_flag, improp_flag)
### Set output LT file ###
fname = 'ff_output.lt'
if name == '':
fname = ffname[0] + '.lt'
else:
fname = name + '.lt'
foutput = open(fname, 'w')
### Output to LT format ###
foutput.write('# Autogenerated by EMC 2 LT tool v%s on %s\n' % (__version__, str(datetime.date.today())))
foutput.write('#\n# ')
for i in range(len(sys.argv)):
foutput.write('%s ' % sys.argv[i])
foutput.write('\n')
foutput.write('#\n')
foutput.write('# Adapted from EMC by Pieter J. in \'t Veld\n')
foutput.write('# Originally written as, FFNAME:%s STYLE:%s VERSION:%s on %s %s\n' %
(ffname[0], fftype[0], version[0], created1[0], created2[0]))
foutput.write('\n')
foutput.write('%s {\n' % ffname[0])
# Charges not necessary? emc file assign charges in smiles, which would
# be in the per-molecule files created by moltemplate user... not here
### Mass Info ###
foutput.write(' write_once("Data Masses") {\n')
for i in range(len(masses)):
if equiv[i][1] != None:
foutput.write(' @atom:%s %f # %s\n' %
(masses[i][0], float(masses[i][1]), masses[i][0]))
foutput.write(' } # end of atom masses\n\n')
### Equiv Info ###
# Write Equivalence
foutput.write(' # ----- EQUIVALENCE CATEGORIES for bonded interaction lookup -----\n')
for i in range(len(equiv)):
if equiv[i][1] != None:
foutput.write(' replace{ @atom:%s @atom:%s_b%s_a%s_d%s_i%s}\n' %
(equiv[i][0], equiv[i][0], equiv[i][2], equiv[i][3], equiv[i][4], equiv[i][5]))
foutput.write(' # END EQUIVALENCE\n\n')
# Sanity check equivalences vs masses
for i in range(len(equiv)):
check = None
for j in range(len(masses)):
if equiv[i][0] == masses[j][0]:
check = 'success'
if check == None:
print equiv[i], masses[j]
print 'Atom defined in Equivlances, but not found in Masses'
Abort()
# Sanity check masses vs equivalences
for i in range(len(masses)):
check = None
for j in range(len(masses)):
if masses[i][0] == equiv[j][0]:
check = 'success'
if check == None:
print masses[i], equiv[j]
print 'Atom defined in Masses, but not found in Equivlances'
Abort()
### Nonbonded Info ###
if pstyle == '':
print 'Warning: no non-bonded potential provided, assuming lj/cut/coul/long'
pstyle = 'lj/cut/coul/long'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Non-Bonded interactions -----\n')
# Add new types from equivalence
for i in range(len(equiv)):
once = True
for j in range(len(nonbond)):
# Get terms for new types
if (equiv[i][0] != equiv[i][1]) and (equiv[i][1] == nonbond[j][0]):
if not equiv[i][1] == nonbond[j][1]:
line = '%s %s %s %s' % (equiv[i][0], nonbond[j][1], nonbond[j][2], nonbond[j][3])
nonbond.append(line.split())
if once:
once = False
line = '%s %s %s %s' % (equiv[i][0], equiv[i][0], nonbond[j][2], nonbond[j][3])
nonbond.append(line.split())
if (equiv[i][0] != equiv[i][1]) and (equiv[i][1] == nonbond[j][1]):
line = '%s %s %s %s' % (equiv[i][0], nonbond[j][0], nonbond[j][2], nonbond[j][3])
if line.split() != nonbond[-1]:
nonbond.append(line.split())
for i in range(len(nonbond)):
atom1name = None
atom2name = None
stylename = pstyle
if pstyle == 'lj/sdk' or pstyle == 'lj/sdk/coul/long':
stylename = 'lj%s_%s' % (nonbond[i][4], nonbond[i][5])
# Cross Terms + Diagonal, normal
for j in range(len(equiv)):
if nonbond[i][0] == equiv[j][0]:
atom1name = '%s_b%s_a%s_d%s_i%s' % (nonbond[i][0], equiv[j][2], equiv[j][3], equiv[j][4], equiv[j][5])
if nonbond[i][1] == equiv[j][0]:
atom2name = '%s_b%s_a%s_d%s_i%s' % (nonbond[i][1], equiv[j][2], equiv[j][3], equiv[j][4], equiv[j][5])
if atom1name == None or atom2name == None:
print atom1name, atom2name, nonbond[i]
print 'Error: Atom in Nonbonded Pairs not found in Equivalences'
Abort()
foutput.write(' pair_coeff @atom:%s @atom:%s %s %f %f' %
(atom1name, atom2name, stylename, float(nonbond[i][3])*econv, float(nonbond[i][2])*lconv))
foutput.write(' # %s-%s\n' % (nonbond[i][0], nonbond[i][1]))
foutput.write(' } # end of nonbonded parameters\n\n')
### Bond Info ###
if bstyle == '':
print 'Warning: no bond potential provided, assuming harmonic'
bstyle == 'harmonic'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Bonds -----\n')
for i in range(len(bond)):
foutput.write(' bond_coeff @bond:%s-%s %s %f %f' %
(bond[i][0], bond[i][1], bstyle, float(bond[i][2])*beconv, float(bond[i][3])*lconv))
foutput.write(' # %s-%s\n' % (bond[i][0], bond[i][1]))
foutput.write(' }\n\n')
foutput.write(' write_once("Data Bonds By Type") {\n')
for i in range(len(bond)):
foutput.write(' @bond:%s-%s @atom:*_b%s_a*_d*_i* @atom:*_b%s_a*_d*_i*\n' %
(bond[i][0], bond[i][1], bond[i][0], bond[i][1]))
foutput.write(' } # end of bonds\n\n')
### Angle Info ###
if angle_flag:
if astyle == '':
print 'Warning: no angle potential provided, assuming harmonic'
astyle == 'harmonic'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Angles -----\n')
for i in range(len(angle)):
if (len(angle[i]) > 5): # Check if extra data in angle array
foutput.write(' angle_coeff @angle:%s-%s-%s %s %f %f' %
(angle[i][0], angle[i][1], angle[i][2], str(angle[i][5]), float(angle[i][3])*aeconv, float(angle[i][4])))
foutput.write(' # %s-%s-%s\n' % (angle[i][0], angle[i][1], angle[i][2]))
else:
foutput.write(' angle_coeff @angle:%s-%s-%s %s %f %f' %
(angle[i][0], angle[i][1], angle[i][2], astyle, float(angle[i][3])*aeconv, float(angle[i][4])))
foutput.write(' # %s-%s-%s\n' % (angle[i][0], angle[i][1], angle[i][2]))
foutput.write(' }\n\n')
foutput.write(' write_once("Data Angles By Type") {\n')
for i in range(len(angle)):
foutput.write(' @angle:%s-%s-%s @atom:*_b*_a%s_d*_i* @atom:*_b*_a%s_d*_i* @atom:*_b*_a%s_d*_i*\n' %
(angle[i][0], angle[i][1], angle[i][2], angle[i][0], angle[i][1], angle[i][2]))
foutput.write(' } # end of angles\n\n')
### Torsion/Dihedral Info ###a
# Incomplete
if torsion_flag:
if dstyle == '':
print 'Warning: no dihedral/torsion potential provided, assuming harmonic'
dstyle == 'harmonic'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Dihedrals -----\n')
for i in range(len(torsion)):
foutput.write(' dihedral_coeff @dihedral:%s-%s-%s-%s %s %f %f %f %f\n' %
(torsion[i][0], torsion[i][1], torsion[i][2], torsion[i][3], dstyle, float(torsion[i][4])*deconv, float(torsion[i][5]), float(torsion[i][6])))
foutput.write(' }\n\n')
foutput.write(' write_once("Data Dihedrals By Type") {\n')
for i in range(len(torsion)):
foutput.write(' @dihedral:%s-%s-%s-%s @atom:*_b*_a*_d%s_i* @atom:*_b*_a*_d%s_i* @atom:*_b*_a*_d%s_i* @atom:*_b*_a*_d%s_i*' %
(torsion[i][0], torsion[i][1], torsion[i][2], torsion[i][3], torsion[i][0], torsion[i][1], torsion[i][2], torsion[i][3]))
foutput.write(' } # end of dihedrals\n\n')
### Improper Info ###
# Incomplete
ieconv = econv # improper coeff conversion
if improp_flag:
if istyle == '':
print 'Warning: no improper potential provided, assuming harmonic'
istyle == 'harmonic'
foutput.write(' write_once("In Settings") {\n')
foutput.write(' # ----- Impropers -----\n')
# As discussed, a check for convention of impropers is probably needed here
for i in range(len(improp)):
foutput.write(' improper_coeff @improper:%s-%s-%s-%s %s %f %f\n' %
(improp[i][0], improp[i][1], improp[i][2], improp[i][3], istyle,
float(improp[i][4]), float(improp[i][5])))
foutput.write(' }\n\n')
foutput.write(' write_once("Data Impropers By Type") {\n')
for i in range(len(improp)):
foutput.write(' @improper:%s-%s-%s-%s @atom:*_b*_a*_d*_i%s @atom:*_b*_a*_d*_i%s @atom:*_b*_a*_d*_i%s @atom:*_b*_a*_d*_i%s' %
(improp[i][0], improp[i][1], improp[i][2], improp[i][3], improp[i][0], improp[i][1], improp[i][2], improp[i][3]))
foutput.write(' } # end of impropers\n\n')
### Initialization Info ###
print 'Warning: Attempting to write generic "In Init" section,',\
'further modification after this script is extremely likely'
WriteInit()
foutput.write('} # %s\n' % ffname[0])
sys.exit()
| smsaladi/moltemplate | moltemplate/force_fields/convert_EMC_files_to_LT_files/emcprm2lt.py | Python | bsd-3-clause | 24,255 | [
"LAMMPS"
] | b90c7f86241943487c36b9a63cbdec0c40e46c38a521608d15dfaf6a7d25ecd0 |
'''
The aim of a beat detection algorithm is to report the times at which a typical
human listener might tap their foot to a piece of music. As a result, most
metrics for evaluating the performance of beat tracking systems involve
computing the error between the estimated beat times and some reference list of
beat locations. Many metrics additionally compare the beat sequences at
different metric levels in order to deal with the ambiguity of tempo.
Based on the methods described in:
Matthew E. P. Davies, Norberto Degara, and Mark D. Plumbley.
"Evaluation Methods for Musical Audio Beat Tracking Algorithms",
Queen Mary University of London Technical Report C4DM-TR-09-06
London, United Kingdom, 8 October 2009.
See also the Beat Evaluation Toolbox:
https://code.soundsoftware.ac.uk/projects/beat-evaluation/
Conventions
-----------
Beat times should be provided in the form of a 1-dimensional array of beat
times in seconds in increasing order. Typically, any beats which occur before
5s are ignored; this can be accomplished using
:func:`mir_eval.beat.trim_beats()`.
Metrics
-------
* :func:`mir_eval.beat.f_measure`: The F-measure of the beat sequence, where an
estimated beat is considered correct if it is sufficiently close to a
reference beat
* :func:`mir_eval.beat.cemgil`: Cemgil's score, which computes the sum of
Gaussian errors for each beat
* :func:`mir_eval.beat.goto`: Goto's score, a binary score which is 1 when at
least 25\% of the estimated beat sequence closely matches the reference beat
sequence
* :func:`mir_eval.beat.p_score`: McKinney's P-score, which computes the
cross-correlation of the estimated and reference beat sequences represented
as impulse trains
* :func:`mir_eval.beat.continuity`: Continuity-based scores which compute the
proportion of the beat sequence which is continuously correct
* :func:`mir_eval.beat.information_gain`: The Information Gain of a normalized
beat error histogram over a uniform distribution
'''
import numpy as np
import collections
from . import util
import warnings
# The maximum allowable beat time
MAX_TIME = 30000.
def trim_beats(beats, min_beat_time=5.):
'''Removes beats before min_beat_time. A common preprocessing step.
:parameters:
- beats : np.ndarray
Array of beat times in seconds.
- min_beat_time : float
Minimum beat time to allow, default 5
:returns:
- beats_trimmed : np.ndarray
Trimmed beat array.
'''
# Remove beats before min_beat_time
return beats[beats >= min_beat_time]
def validate(reference_beats, estimated_beats):
'''Checks that the input annotations to a metric look like valid beat time
arrays, and throws helpful errors if not.
:parameters:
- reference_beats : np.ndarray
reference beat times, in seconds
- estimated_beats : np.ndarray
estimated beat times, in seconds
:raises:
- ValueError
Thrown when the provided annotations are not valid.
'''
# If reference or estimated beats are empty,
# warn because metric will be 0
if reference_beats.size == 0:
warnings.warn("Reference beats are empty.")
if estimated_beats.size == 0:
warnings.warn("Estimated beats are empty.")
for beats in [reference_beats, estimated_beats]:
util.validate_events(beats, MAX_TIME)
def _get_reference_beat_variations(reference_beats):
'''
Return metric variations of the reference beats
:parameters:
- reference_beats : np.ndarray
beat locations in seconds
:returns:
- reference_beats : np.ndarray
Original beat locations
- off_beat : np.ndarray
180 degrees out of phase from the original beat locations
- double : np.ndarray
Beats at 2x the original tempo
- half_odd : np.ndarray
Half tempo, odd beats
- half_even : np.ndarray
Half tempo, even beats
'''
# Create annotations at twice the metric level
interpolated_indices = np.arange(0, reference_beats.shape[0]-.5, .5)
original_indices = np.arange(0, reference_beats.shape[0])
double_reference_beats = np.interp(interpolated_indices,
original_indices,
reference_beats)
# Return metric variations:
# True, off-beat, double tempo, half tempo odd, and half tempo even
return (reference_beats,
double_reference_beats[1::2],
double_reference_beats,
reference_beats[::2],
reference_beats[1::2])
def f_measure(reference_beats,
estimated_beats,
f_measure_threshold=0.07):
'''
Compute the F-measure of correct vs incorrectly predicted beats.
"Corectness" is determined over a small window.
:usage:
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> f_measure = mir_eval.beat.f_measure(reference_beats,
estimated_beats)
:parameters:
- reference_beats : np.ndarray
reference beat times, in seconds
- estimated_beats : np.ndarray
estimated beat times, in seconds
- f_measure_threshold : float
Window size, in seconds, default 0.07
:returns:
- f_score : float
The computed F-measure score
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] Matthew E. P. Davies, Norberto Degara, and
Mark D. Plumbley. "Evaluation Methods for Musical Audio Beat
Tracking Algorithms", Queen Mary University of London Technical
Report C4DM-TR-09-06 London, United Kingdom, 8 October 2009.
.. [#] S. Dixon, "Onset detection revisited," in
Proceedings of 9th International Conference on Digital Audio
Effects (DAFx), Montreal, Canada, 2006, pp. 133-137.
'''
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Compute the best-case matching between reference and estimated locations
matching = util.match_events(reference_beats,
estimated_beats,
f_measure_threshold)
precision = float(len(matching))/len(estimated_beats)
recall = float(len(matching))/len(reference_beats)
return util.f_measure(precision, recall)
def cemgil(reference_beats,
estimated_beats,
cemgil_sigma=0.04):
'''
Cemgil's score, computes a gaussian error of each estimated beat.
Compares against the original beat times and all metrical variations.
:usage:
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> cemgil_score, cemgil_max = mir_eval.beat.cemgil(reference_beats,
estimated_beats)
:parameters:
- reference_beats : np.ndarray
reference beat times, in seconds
- estimated_beats : np.ndarray
query beat times, in seconds
- cemgil_sigma : float
Sigma parameter of gaussian error windows, default 0.04
:returns:
- cemgil_score : float
Cemgil's score for the original reference beats
- cemgil_max : float
The best Cemgil score for all metrical variations
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] Matthew E. P. Davies, Norberto Degara, and
Mark D. Plumbley. "Evaluation Methods for Musical Audio Beat
Tracking Algorithms", Queen Mary University of London Technical
Report C4DM-TR-09-06 London, United Kingdom, 8 October 2009.
.. [#] A. T. Cemgil, B. Kappen, P. Desain, and H. Honing,
"On tempo tracking: Tempogram representation and Kalman filtering,"
Journal Of New Music Research, vol. 28, no. 4, pp. 259-273, 2001.
'''
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0., 0.
# We'll compute Cemgil's accuracy for each variation
accuracies = []
for reference_beats in _get_reference_beat_variations(reference_beats):
accuracy = 0
# Cycle through beats
for beat in reference_beats:
# Find the error for the closest beat to the reference beat
beat_diff = np.min(np.abs(beat - estimated_beats))
# Add gaussian error into the accuracy
accuracy += np.exp(-(beat_diff**2)/(2.0*cemgil_sigma**2))
# Normalize the accuracy
accuracy /= .5*(estimated_beats.shape[0] + reference_beats.shape[0])
# Add it to our list of accuracy scores
accuracies.append(accuracy)
# Return raw accuracy with non-varied annotations
# and maximal accuracy across all variations
return accuracies[0], np.max(accuracies)
def goto(reference_beats,
estimated_beats,
goto_threshold=0.35,
goto_mu=0.2,
goto_sigma=0.2):
'''
Calculate Goto's score, a binary 1 or 0 depending on some specific
heuristic criteria
:usage:
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats)
:parameters:
- reference_beats : np.ndarray
reference beat times, in seconds
- estimated_beats : np.ndarray
query beat times, in seconds
- goto_threshold : float
Threshold of beat error for a beat to be "correct", default 0.35
- goto_mu : float
The mean of the beat errors in the continuously correct
track must be less than this, default 0.2
- goto_sigma : float
The std of the beat errors in the continuously correct track must
be less than this, default 0.2
:returns:
- goto_score : float
Either 1.0 or 0.0 if some specific criteria are met
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] Matthew E. P. Davies, Norberto Degara, and
Mark D. Plumbley. "Evaluation Methods for Musical Audio Beat
Tracking Algorithms", Queen Mary University of London Technical
Report C4DM-TR-09-06 London, United Kingdom, 8 October 2009.
.. [#] M. Goto and Y. Muraoka, "Issues in evaluating beat
tracking systems," in Working Notes of the IJCAI-97 Workshop on
Issues in AI and Music - Evaluation and Assessment, 1997, pp. 9-16.
'''
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Error for each beat
beat_error = np.ones(reference_beats.shape[0])
# Flag for whether the reference and estimated beats are paired
paired = np.zeros(reference_beats.shape[0])
# Keep track of Goto's three criteria
goto_criteria = 0
for n in xrange(1, reference_beats.shape[0]-1):
# Get previous inner-reference-beat-interval
previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1])
# Window start - in the middle of the current beat and the previous
window_min = reference_beats[n] - previous_interval
# Next inter-reference-beat-interval
next_interval = 0.5*(reference_beats[n+1] - reference_beats[n])
# Window end - in the middle of the current beat and the next
window_max = reference_beats[n] + next_interval
# Get estimated beats in the window
beats_in_window = np.logical_and((estimated_beats >= window_min),
(estimated_beats < window_max))
# False negative/positive
if beats_in_window.sum() == 0 or beats_in_window.sum() > 1:
paired[n] = 0
beat_error[n] = 1
else:
# Single beat is paired!
paired[n] = 1
# Get offset of the estimated beat and the reference beat
offset = estimated_beats[beats_in_window] - reference_beats[n]
# Scale by previous or next interval
if offset < 0:
beat_error[n] = offset/previous_interval
else:
beat_error[n] = offset/next_interval
# Get indices of incorrect beats
incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold)
# All beats are correct (first and last will be 0 so always correct)
if incorrect_beats.shape[0] < 3:
# Get the track of correct beats
track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1]
goto_criteria = 1
else:
# Get the track of maximal length
track_len = np.max(np.diff(incorrect_beats))
track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0]
# Is the track length at least 25% of the song?
if track_len - 1 > .25*(reference_beats.shape[0] - 2):
goto_criteria = 1
start_beat = incorrect_beats[track_start]
end_beat = incorrect_beats[track_start + 1]
track = beat_error[start_beat:end_beat + 1]
# If we have a track
if goto_criteria:
# Are mean and std of the track less than the required thresholds?
if np.mean(np.abs(track)) < goto_mu \
and np.std(track, ddof=1) < goto_sigma:
goto_criteria = 3
# If all criteria are met, score is 100%!
return 1.0*(goto_criteria == 3)
def p_score(reference_beats,
estimated_beats,
p_score_threshold=0.2):
'''
Get McKinney's P-score.
Based on the autocorrelation of the reference and estimated beats
:usage:
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> p_score = mir_eval.beat.p_score(reference_beats, estimated_beats)
:parameters:
- reference_beats : np.ndarray
reference beat times, in seconds
- estimated_beats : np.ndarray
query beat times, in seconds
- p_score_threshold : float
Window size will be
p_score_threshold*median(inter_annotation_intervals),
default 0.2
:returns:
- correlation : float
McKinney's P-score
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] Matthew E. P. Davies, Norberto Degara, and
Mark D. Plumbley. "Evaluation Methods for Musical Audio Beat
Tracking Algorithms", Queen Mary University of London Technical
Report C4DM-TR-09-06 London, United Kingdom, 8 October 2009.
.. [#] M. F. McKinney, D. Moelants, M. E. P.
Davies, and A. Klapuri, "Evaluation of audio beat tracking and
music tempo extraction algorithms," Journal of New Music Research,
vol. 36, no. 1, pp. 1-16, 2007.
'''
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Quantize beats to 10ms
sampling_rate = int(1.0/0.010)
# Shift beats so that the minimum in either sequence is zero
offset = min(estimated_beats.min(), reference_beats.min())
estimated_beats = np.array(estimated_beats - offset)
reference_beats = np.array(reference_beats - offset)
# Get the largest time index
end_point = np.int(np.ceil(np.max([np.max(estimated_beats),
np.max(reference_beats)])))
# Make impulse trains with impulses at beat locations
reference_train = np.zeros(end_point*sampling_rate + 1)
beat_indices = np.ceil(reference_beats*sampling_rate).astype(np.int)
reference_train[beat_indices] = 1.0
estimated_train = np.zeros(end_point*sampling_rate + 1)
beat_indices = np.ceil(estimated_beats*sampling_rate).astype(np.int)
estimated_train[beat_indices] = 1.0
# Window size to take the correlation over
# defined as .2*median(inter-annotation-intervals)
annotation_intervals = np.diff(np.flatnonzero(reference_train))
win_size = int(np.round(p_score_threshold*np.median(annotation_intervals)))
# Get full correlation
train_correlation = np.correlate(reference_train, estimated_train, 'full')
# Get the middle element - note we are rounding down on purpose here
middle_lag = train_correlation.shape[0]/2
# Truncate to only valid lags (those corresponding to the window)
start = middle_lag - win_size
end = middle_lag + win_size + 1
train_correlation = train_correlation[start:end]
# Compute and return the P-score
n_beats = np.max([estimated_beats.shape[0], reference_beats.shape[0]])
return np.sum(train_correlation)/n_beats
def continuity(reference_beats,
estimated_beats,
continuity_phase_threshold=0.175,
continuity_period_threshold=0.175):
'''
Get metrics based on how much of the estimated beat sequence is
continually correct.
:usage:
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> CMLc, CMLt, AMLc, AMLt = mir_eval.beat.continuity(reference_beats,
estimated_beats)
:parameters:
- reference_beats : np.ndarray
reference beat times, in seconds
- estimated_beats : np.ndarray
query beat times, in seconds
- continuity_phase_threshold : float
Allowable ratio of how far is the estimated beat
can be from the reference beat, default 0.175
- continuity_period_threshold : float
Allowable distance between the inter-beat-interval
and the inter-annotation-interval, default 0.175
:returns:
- CMLc : float
Correct metric level, continuous accuracy
- CMLt : float
Correct metric level, total accuracy (continuity not required)
- AMLc : float
Any metric level, continuous accuracy
- AMLt : float
Any metric level, total accuracy (continuity not required)
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] Matthew E. P. Davies, Norberto Degara, and
Mark D. Plumbley. "Evaluation Methods for Musical Audio Beat
Tracking Algorithms", Queen Mary University of London Technical
Report C4DM-TR-09-06 London, United Kingdom, 8 October 2009.
.. [#] S. Hainsworth, "Techniques for the
automated analysis of musical audio," Ph.D. dissertation,
Department of Engineering, Cambridge University, 2004.
.. [#] A. P. Klapuri, A. Eronen, and J. Astola,
"Analysis of the meter of acoustic musical signals," IEEE
Transactions on Audio, Speech and Language Processing, vol. 14, no.
1, pp. 342-355, 2006.
'''
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0., 0., 0., 0.
# Accuracies for each variation
continuous_accuracies = []
total_accuracies = []
# Get accuracy for each variation
for reference_beats in _get_reference_beat_variations(reference_beats):
# Annotations that have been used
n_annotations = np.max([reference_beats.shape[0],
estimated_beats.shape[0]])
used_annotations = np.zeros(n_annotations)
# Whether or not we are continuous at any given point
beat_successes = np.zeros(n_annotations)
for m in xrange(estimated_beats.shape[0]):
# Is this beat correct?
beat_success = 0
# Get differences for this beat
beat_differences = np.abs(estimated_beats[m] - reference_beats)
# Get nearest annotation index
nearest = np.argmin(beat_differences)
min_difference = beat_differences[nearest]
# Have we already used this annotation?
if used_annotations[nearest] == 0:
# Is this the first beat or first annotation?
# If so, look forward.
if m == 0 or nearest == 0:
# How far is the estimated beat from the reference beat,
# relative to the inter-annotation-interval?
if nearest + 1 < reference_beats.shape[0]:
reference_interval = (reference_beats[nearest + 1] -
reference_beats[nearest])
else:
# Special case when nearest + 1 is too large - use the
# previous interval instead
reference_interval = (reference_beats[nearest] -
reference_beats[nearest - 1])
# Handle this special case when beats are not unique
if reference_interval == 0:
if min_difference == 0:
phase = 1
else:
phase = np.inf
else:
phase = np.abs(min_difference/reference_interval)
# How close is the inter-beat-interval
# to the inter-annotation-interval?
if m + 1 < estimated_beats.shape[0]:
estimated_interval = (estimated_beats[m + 1] -
estimated_beats[m])
else:
# Special case when m + 1 is too large - use the
# previous interval
estimated_interval = (estimated_beats[m] -
estimated_beats[m - 1])
# Handle this special case when beats are not unique
if reference_interval == 0:
if estimated_interval == 0:
period = 0
else:
period = np.inf
else:
period = \
np.abs(1 - estimated_interval/reference_interval)
if phase < continuity_phase_threshold and \
period < continuity_period_threshold:
# Set this annotation as used
used_annotations[nearest] = 1
# This beat is matched
beat_success = 1
# This beat/annotation is not the first
else:
# How far is the estimated beat from the reference beat,
# relative to the inter-annotation-interval?
reference_interval = (reference_beats[nearest] -
reference_beats[nearest - 1])
phase = np.abs(min_difference/reference_interval)
# How close is the inter-beat-interval
# to the inter-annotation-interval?
estimated_interval = (estimated_beats[m] -
estimated_beats[m - 1])
reference_interval = (reference_beats[nearest] -
reference_beats[nearest - 1])
period = np.abs(1 - estimated_interval/reference_interval)
if phase < continuity_phase_threshold and \
period < continuity_period_threshold:
# Set this annotation as used
used_annotations[nearest] = 1
# This beat is matched
beat_success = 1
# Set whether this beat is matched or not
beat_successes[m] = beat_success
# Add 0s at the begnning and end
# so that we at least find the beginning/end of the estimated beats
beat_successes = np.append(np.append(0, beat_successes), 0)
# Where is the beat not a match?
beat_failures = np.nonzero(beat_successes == 0)[0]
# Take out those zeros we added
beat_successes = beat_successes[1:-1]
# Get the continuous accuracy as the longest track of successful beats
longest_track = np.max(np.diff(beat_failures)) - 1
continuous_accuracy = longest_track/(1.0*beat_successes.shape[0])
continuous_accuracies.append(continuous_accuracy)
# Get the total accuracy - all sequences
total_accuracy = np.sum(beat_successes)/(1.0*beat_successes.shape[0])
total_accuracies.append(total_accuracy)
# Grab accuracy scores
return (continuous_accuracies[0],
total_accuracies[0],
np.max(continuous_accuracies),
np.max(total_accuracies))
def information_gain(reference_beats,
estimated_beats,
bins=41):
'''
Get the information gain - K-L divergence of the beat error histogram
to a uniform histogram
:usage:
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> information_gain = mir_eval.beat.information_gain(reference_beats,
estimated_beats)
:parameters:
- reference_beats : np.ndarray
reference beat times, in seconds
- estimated_beats : np.ndarray
query beat times, in seconds
- bins : int
Number of bins in the beat error histogram, default 41
:returns:
- information_gain_score : float
Entropy of beat error histogram
:raises:
- ValueError
Thrown when the provided annotations are not valid.
:references:
.. [#] Matthew E. P. Davies, Norberto Degara, and
Mark D. Plumbley. "Evaluation Methods for Musical Audio Beat
Tracking Algorithms", Queen Mary University of London Technical
Report C4DM-TR-09-06 London, United Kingdom, 8 October 2009.
'''
validate(reference_beats, estimated_beats)
# If an even number of bins is provided,
# there will be no bin centered at zero, so warn the user.
if not bins % 2:
warnings.warn("bins parameter is even, "
"so there will not be a bin centered at zero.")
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Get entropy for reference beats->estimated beats
# and estimated beats->reference beats
forward_entropy = _get_entropy(reference_beats, estimated_beats, bins)
backward_entropy = _get_entropy(estimated_beats, reference_beats, bins)
# Pick the larger of the entropies
norm = np.log2(bins)
if forward_entropy > backward_entropy:
# Note that the beat evaluation toolbox does not normalize
information_gain_score = (norm - forward_entropy)/norm
else:
information_gain_score = (norm - backward_entropy)/norm
return information_gain_score
def _get_entropy(reference_beats, estimated_beats, bins):
'''
Helper function for information gain
(needs to be run twice - once backwards, once forwards)
:parameters:
- reference_beats : np.ndarray
reference beat times, in seconds
- estimated_beats : np.ndarray
query beat times, in seconds
- bins : int
Number of bins in the beat error histogram
:returns:
- entropy : float
Entropy of beat error histogram
'''
beat_error = np.zeros(estimated_beats.shape[0])
for n in xrange(estimated_beats.shape[0]):
# Get index of closest annotation to this beat
beat_distances = estimated_beats[n] - reference_beats
closest_beat = np.argmin(np.abs(beat_distances))
absolute_error = beat_distances[closest_beat]
# If the first annotation is closest...
if closest_beat == 0:
# Inter-annotation interval - space between first two beats
interval = .5*(reference_beats[1] - reference_beats[0])
# If last annotation is closest...
if closest_beat == (reference_beats.shape[0] - 1):
interval = .5*(reference_beats[-1] - reference_beats[-2])
else:
if absolute_error < 0:
# Closest annotation is the one before the current beat
# so look at previous inner-annotation-interval
start = reference_beats[closest_beat]
end = reference_beats[closest_beat - 1]
interval = .5*(start - end)
else:
# Closest annotation is the one after the current beat
# so look at next inner-annotation-interval
start = reference_beats[closest_beat + 1]
end = reference_beats[closest_beat]
interval = .5*(start - end)
# The actual error of this beat
beat_error[n] = .5*absolute_error/interval
# Put beat errors in range (-.5, .5)
beat_error = np.mod(beat_error + .5, -1) + .5
# Note these are slightly different the beat evaluation toolbox
# (they are uniform)
histogram_bin_edges = np.linspace(-.5, .5, bins + 1)
# Get the histogram
raw_bin_values = np.histogram(beat_error, histogram_bin_edges)[0]
# Turn into a proper probability distribution
raw_bin_values = raw_bin_values/(1.0*np.sum(raw_bin_values))
# Set zero-valued bins to 1 to make the entropy calculation well-behaved
raw_bin_values[raw_bin_values == 0] = 1
# Calculate entropy
return -np.sum(raw_bin_values * np.log2(raw_bin_values))
def evaluate(reference_beats, estimated_beats, **kwargs):
'''
Compute all metrics for the given reference and estimated annotations.
:usage:
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> scores = mir_eval.beat.evaluate(reference_beats, estimated_beats)
:parameters:
- reference_beats : np.ndarray
Reference beat times, in seconds
- estimated_beats : np.ndarray
Query beat times, in seconds
- kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
:returns:
- scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
:raises:
- ValueError
Thrown when the provided annotations are not valid.
'''
# Trim beat times at the beginning of the annotations
reference_beats = util.filter_kwargs(trim_beats, reference_beats, **kwargs)
estimated_beats = util.filter_kwargs(trim_beats, estimated_beats, **kwargs)
# Now compute all the metrics
scores = collections.OrderedDict()
# F-Measure
scores['F-measure'] = util.filter_kwargs(f_measure, reference_beats,
estimated_beats, **kwargs)
# Cemgil
scores['Cemgil'], scores['Cemgil Best Metric Level'] = \
util.filter_kwargs(cemgil, reference_beats, estimated_beats, **kwargs)
# Goto
scores['Goto'] = util.filter_kwargs(goto, reference_beats,
estimated_beats, **kwargs)
# P-Score
scores['P-score'] = util.filter_kwargs(p_score, reference_beats,
estimated_beats, **kwargs)
# Continuity metrics
(scores['Correct Metric Level Continuous'],
scores['Correct Metric Level Total'],
scores['Any Metric Level Continuous'],
scores['Any Metric Level Total']) = util.filter_kwargs(continuity,
reference_beats,
estimated_beats,
**kwargs)
# Information gain
scores['Information gain'] = util.filter_kwargs(information_gain,
reference_beats,
estimated_beats,
**kwargs)
return scores
| mrgloom/mir_eval | mir_eval/beat.py | Python | mit | 34,468 | [
"Gaussian"
] | 04cc750516b482a6858207c1e94af0f06e93e60050b22570db1b18d12e0d3462 |
input_name = '../examples/diffusion/laplace_coupling_lcbcs.py'
output_name = 'test_laplace_coupling_lcbcs.vtk'
from tests_basic import TestInput
class Test(TestInput):
pass
| RexFuzzle/sfepy | tests/test_input_laplace_coupling_lcbcs.py | Python | bsd-3-clause | 178 | [
"VTK"
] | 16a775e7b3c3c0b2809b414a39179f3a7ae7b6babe81c821245f4e80d3366a3d |
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import logging
from markdown.util import etree
LOG = logging.getLogger(__name__)
class MarkdownTable(object):
"""
A generic tool for generating Markdown tables.
Args:
column_headers[list]: A list of strings that contain the column headers.
"""
def __init__(self, *args):
self._column_headers = args
self._rows = []
def size(self):
"""
Return the number of rows.
"""
return len(self._rows)
def __nonzero__(self):
"""
bool operator.
"""
return self.size() > 0
def addRow(self, *args):
"""
Add a row to the table.
Args:
*args: Items to include in the table (must be the same length as the supplied headers).
"""
if len(args) != len(self._column_headers):
msg = "The number of supplied items ({}) does not match the number of columns ({})."
raise Exception(msg.format(len(args), len(self._column_headers)))
self._rows.append(args)
def html(self):
"""
Return the table in an html etree object.
"""
table = etree.Element('table')
tr = etree.SubElement(table, 'tr')
for h in self._column_headers:
th = etree.SubElement(tr, 'th')
th.text = h
for row in self._rows:
tr = etree.SubElement(table, 'tr')
for d in row:
td = etree.SubElement(tr, 'td')
if isinstance(d, str):
td.text = d
else:
td.append(d)
return table
| Chuban/moose | python/MooseDocs/common/MarkdownTable.py | Python | lgpl-2.1 | 3,002 | [
"MOOSE"
] | d1bc7595b8efe18f310b8235bb47e916b412ec5e9b87556598264778aa5ef8b8 |
# -*- coding: utf-8 -*-
import codecs
import os.path
import inlineplz.linters.clippy as clippy
clippy_path = os.path.join("tests", "testdata", "parsers", "clippy.txt")
def test_clippy():
with codecs.open(clippy_path, encoding="utf-8", errors="replace") as inputfile:
messages = sorted(list(clippy.ClippyParser().parse(inputfile.read())))
assert (
messages[0][2]
== 'error: this comparison involving the minimum or maximum element for this type contains a case that is always true or always false\n --> src/main.rs:3:20\n |\n3 | println!("{}", x <= 0);\n | ^^^^^^\n |\n = note: #[deny(clippy::absurd_extreme_comparisons)] on by default\n = help: because 0 is the minimum value for this type, the case where the two sides are not equal never occurs, consider using x == 0 instead\n = help: for further information visit https://rust-lang-nursery.github.io/rust-clippy/v0.0.212/index.html#absurd_extreme_comparisons\n\n'
)
assert messages[0][1] == 3
assert messages[0][0] == "src/main.rs"
| guykisel/inline-plz | tests/parsers/test_clippy.py | Python | isc | 1,088 | [
"VisIt"
] | a5a0bf3f9a0635a9c2b11589705fdff3c5880f8ec4ebc7bfabeb77476163d0b1 |
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import sys
import numpy as np
import ast
import inspect
import operator
import types as pytypes
from contextlib import contextmanager
from copy import deepcopy
import numba
from numba import njit, stencil
from numba.core.utils import PYVERSION
from numba.core import types, registry
from numba.core.compiler import compile_extra, Flags
from numba.core.cpu import ParallelOptions
from numba.tests.support import tag, skip_parfors_unsupported, _32bit
from numba.core.errors import LoweringError, TypingError, NumbaValueError
import unittest
skip_unsupported = skip_parfors_unsupported
@stencil
def stencil1_kernel(a):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0])
@stencil(neighborhood=((-5, 0), ))
def stencil2_kernel(a):
cum = a[-5]
for i in range(-4, 1):
cum += a[i]
return 0.3 * cum
@stencil(cval=1.0)
def stencil3_kernel(a):
return 0.25 * a[-2, 2]
@stencil
def stencil_multiple_input_kernel(a, b):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0] +
b[0, 1] + b[1, 0] + b[0, -1] + b[-1, 0])
@stencil
def stencil_multiple_input_kernel_var(a, b, w):
return w * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0] +
b[0, 1] + b[1, 0] + b[0, -1] + b[-1, 0])
@stencil
def stencil_multiple_input_mixed_types_2d(a, b, f):
return a[0, 0] if f[0, 0] else b[0, 0]
@stencil(standard_indexing=("b",))
def stencil_with_standard_indexing_1d(a, b):
return a[-1] * b[0] + a[0] * b[1]
@stencil(standard_indexing=("b",))
def stencil_with_standard_indexing_2d(a, b):
return (a[0, 1] * b[0, 1] + a[1, 0] * b[1, 0]
+ a[0, -1] * b[0, -1] + a[-1, 0] * b[-1, 0])
@njit
def addone_njit(a):
return a + 1
if not _32bit: # prevent compilation on unsupported 32bit targets
@njit(parallel=True)
def addone_pjit(a):
return a + 1
@unittest.skipIf(PYVERSION != (3, 7), "Run under 3.7 only, AST unstable")
class TestStencilBase(unittest.TestCase):
_numba_parallel_test_ = False
def __init__(self, *args):
# flags for njit()
self.cflags = Flags()
self.cflags.nrt = True
super(TestStencilBase, self).__init__(*args)
def _compile_this(self, func, sig, flags):
return compile_extra(registry.cpu_target.typing_context,
registry.cpu_target.target_context, func, sig,
None, flags, {})
def compile_parallel(self, func, sig, **kws):
flags = Flags()
flags.nrt = True
options = True if not kws else kws
flags.auto_parallel=ParallelOptions(options)
return self._compile_this(func, sig, flags)
def compile_njit(self, func, sig):
return self._compile_this(func, sig, flags=self.cflags)
def compile_all(self, pyfunc, *args, **kwargs):
sig = tuple([numba.typeof(x) for x in args])
# compile with parallel=True
cpfunc = self.compile_parallel(pyfunc, sig)
# compile a standard njit of the original function
cfunc = self.compile_njit(pyfunc, sig)
return cfunc, cpfunc
def check(self, no_stencil_func, pyfunc, *args):
cfunc, cpfunc = self.compile_all(pyfunc, *args)
# results without stencil macro
expected = no_stencil_func(*args)
# python result
py_output = pyfunc(*args)
# njit result
njit_output = cfunc.entry_point(*args)
# parfor result
parfor_output = cpfunc.entry_point(*args)
np.testing.assert_almost_equal(py_output, expected, decimal=3)
np.testing.assert_almost_equal(njit_output, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
# make sure parfor set up scheduling
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
class TestStencil(TestStencilBase):
def __init__(self, *args, **kwargs):
super(TestStencil, self).__init__(*args, **kwargs)
@skip_unsupported
def test_stencil1(self):
"""Tests whether the optional out argument to stencil calls works.
"""
def test_with_out(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
B = stencil1_kernel(A, out=B)
return B
def test_without_out(n):
A = np.arange(n**2).reshape((n, n))
B = stencil1_kernel(A)
return B
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
B[i, j] = 0.25 * (A[i, j + 1] +
A[i + 1, j] + A[i, j - 1] + A[i - 1, j])
return B
n = 100
self.check(test_impl_seq, test_with_out, n)
self.check(test_impl_seq, test_without_out, n)
@skip_unsupported
def test_stencil2(self):
"""Tests whether the optional neighborhood argument to the stencil
decorate works.
"""
def test_seq(n):
A = np.arange(n)
B = stencil2_kernel(A)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
for i in range(5, len(A)):
B[i] = 0.3 * sum(A[i - 5:i + 1])
return B
n = 100
self.check(test_impl_seq, test_seq, n)
# variable length neighborhood in numba.stencil call
# only supported in parallel path
def test_seq(n, w):
A = np.arange(n)
def stencil2_kernel(a, w):
cum = a[-w]
for i in range(-w + 1, w + 1):
cum += a[i]
return 0.3 * cum
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ))(A, w)
return B
def test_impl_seq(n, w):
A = np.arange(n)
B = np.zeros(n)
for i in range(w, len(A) - w):
B[i] = 0.3 * sum(A[i - w:i + w + 1])
return B
n = 100
w = 5
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp))
expected = test_impl_seq(n, w)
# parfor result
parfor_output = cpfunc.entry_point(n, w)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
# test index_offsets
def test_seq(n, w, offset):
A = np.arange(n)
def stencil2_kernel(a, w):
cum = a[-w + 1]
for i in range(-w + 1, w + 1):
cum += a[i + 1]
return 0.3 * cum
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ),
index_offsets=(-offset, ))(A, w)
return B
offset = 1
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp,
types.intp))
parfor_output = cpfunc.entry_point(n, w, offset)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
# test slice in kernel
def test_seq(n, w, offset):
A = np.arange(n)
def stencil2_kernel(a, w):
return 0.3 * np.sum(a[-w + 1:w + 2])
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ),
index_offsets=(-offset, ))(A, w)
return B
offset = 1
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp,
types.intp))
parfor_output = cpfunc.entry_point(n, w, offset)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
@skip_unsupported
def test_stencil3(self):
"""Tests whether a non-zero optional cval argument to the stencil
decorator works. Also tests integer result type.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = stencil3_kernel(A)
return B
test_njit = njit(test_seq)
test_par = njit(test_seq, parallel=True)
n = 5
seq_res = test_seq(n)
njit_res = test_njit(n)
par_res = test_par(n)
self.assertTrue(seq_res[0, 0] == 1.0 and seq_res[4, 4] == 1.0)
self.assertTrue(njit_res[0, 0] == 1.0 and njit_res[4, 4] == 1.0)
self.assertTrue(par_res[0, 0] == 1.0 and par_res[4, 4] == 1.0)
@skip_unsupported
def test_stencil_standard_indexing_1d(self):
"""Tests standard indexing with a 1d array.
"""
def test_seq(n):
A = np.arange(n)
B = [3.0, 7.0]
C = stencil_with_standard_indexing_1d(A, B)
return C
def test_impl_seq(n):
A = np.arange(n)
B = [3.0, 7.0]
C = np.zeros(n)
for i in range(1, n):
C[i] = A[i - 1] * B[0] + A[i] * B[1]
return C
n = 100
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_standard_indexing_2d(self):
"""Tests standard indexing with a 2d array and multiple stencil calls.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.ones((3, 3))
C = stencil_with_standard_indexing_2d(A, B)
D = stencil_with_standard_indexing_2d(C, B)
return D
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.ones((3, 3))
C = np.zeros(n**2).reshape((n, n))
D = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
C[i, j] = (A[i, j + 1] * B[0, 1] + A[i + 1, j] * B[1, 0] +
A[i, j - 1] * B[0, -1] + A[i - 1, j] * B[-1, 0])
for i in range(1, n - 1):
for j in range(1, n - 1):
D[i, j] = (C[i, j + 1] * B[0, 1] + C[i + 1, j] * B[1, 0] +
C[i, j - 1] * B[0, -1] + C[i - 1, j] * B[-1, 0])
return D
n = 5
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_multiple_inputs(self):
"""Tests whether multiple inputs of the same size work.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
C = stencil_multiple_input_kernel(A, B)
return C
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
C = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
C[i, j] = 0.25 * \
(A[i, j + 1] + A[i + 1, j]
+ A[i, j - 1] + A[i - 1, j]
+ B[i, j + 1] + B[i + 1, j]
+ B[i, j - 1] + B[i - 1, j])
return C
n = 3
self.check(test_impl_seq, test_seq, n)
# test stencil with a non-array input
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
w = 0.25
C = stencil_multiple_input_kernel_var(A, B, w)
return C
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_mixed_types(self):
def test_impl_seq(n):
A = np.arange(n ** 2).reshape((n, n))
B = n ** 2 - np.arange(n ** 2).reshape((n, n))
S = np.eye(n, dtype=np.bool_)
O = np.zeros((n, n), dtype=A.dtype)
for i in range(0, n):
for j in range(0, n):
O[i, j] = A[i, j] if S[i, j] else B[i, j]
return O
def test_seq(n):
A = np.arange(n ** 2).reshape((n, n))
B = n ** 2 - np.arange(n ** 2).reshape((n, n))
S = np.eye(n, dtype=np.bool_)
O = stencil_multiple_input_mixed_types_2d(A, B, S)
return O
n = 3
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_call(self):
"""Tests 2D numba.stencil calls.
"""
def test_impl1(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
numba.stencil(lambda a: 0.25 * (a[0, 1] + a[1, 0] + a[0, -1]
+ a[-1, 0]))(A, out=B)
return B
def test_impl2(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
def sf(a):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0])
B = numba.stencil(sf)(A)
return B
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
B[i, j] = 0.25 * (A[i, j + 1] + A[i + 1, j]
+ A[i, j - 1] + A[i - 1, j])
return B
n = 100
self.check(test_impl_seq, test_impl1, n)
self.check(test_impl_seq, test_impl2, n)
@skip_unsupported
def test_stencil_call_1D(self):
"""Tests 1D numba.stencil calls.
"""
def test_impl(n):
A = np.arange(n)
B = np.zeros(n)
numba.stencil(lambda a: 0.3 * (a[-1] + a[0] + a[1]))(A, out=B)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
for i in range(1, n - 1):
B[i] = 0.3 * (A[i - 1] + A[i] + A[i + 1])
return B
n = 100
self.check(test_impl_seq, test_impl, n)
@skip_unsupported
def test_stencil_call_const(self):
"""Tests numba.stencil call that has an index that can be inferred as
constant from a unary expr. Otherwise, this would raise an error since
neighborhood length is not specified.
"""
def test_impl1(n):
A = np.arange(n)
B = np.zeros(n)
c = 1
numba.stencil(lambda a,c : 0.3 * (a[-c] + a[0] + a[c]))(
A, c, out=B)
return B
def test_impl2(n):
A = np.arange(n)
B = np.zeros(n)
c = 2
numba.stencil(lambda a,c : 0.3 * (a[1-c] + a[0] + a[c-1]))(
A, c, out=B)
return B
# recursive expr case
def test_impl3(n):
A = np.arange(n)
B = np.zeros(n)
c = 2
numba.stencil(lambda a,c : 0.3 * (a[-c+1] + a[0] + a[c-1]))(
A, c, out=B)
return B
# multi-constant case
def test_impl4(n):
A = np.arange(n)
B = np.zeros(n)
d = 1
c = 2
numba.stencil(lambda a,c,d : 0.3 * (a[-c+d] + a[0] + a[c-d]))(
A, c, d, out=B)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
c = 1
for i in range(1, n - 1):
B[i] = 0.3 * (A[i - c] + A[i] + A[i + c])
return B
n = 100
# constant inference is only possible in parallel path
cpfunc1 = self.compile_parallel(test_impl1, (types.intp,))
cpfunc2 = self.compile_parallel(test_impl2, (types.intp,))
cpfunc3 = self.compile_parallel(test_impl3, (types.intp,))
cpfunc4 = self.compile_parallel(test_impl4, (types.intp,))
expected = test_impl_seq(n)
# parfor result
parfor_output1 = cpfunc1.entry_point(n)
parfor_output2 = cpfunc2.entry_point(n)
parfor_output3 = cpfunc3.entry_point(n)
parfor_output4 = cpfunc4.entry_point(n)
np.testing.assert_almost_equal(parfor_output1, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output2, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output3, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output4, expected, decimal=3)
# check error in regular Python path
with self.assertRaises(NumbaValueError) as e:
test_impl4(4)
self.assertIn("stencil kernel index is not constant, "
"'neighborhood' option required", str(e.exception))
# check error in njit path
# TODO: ValueError should be thrown instead of LoweringError
with self.assertRaises((LoweringError, NumbaValueError)) as e:
njit(test_impl4)(4)
self.assertIn("stencil kernel index is not constant, "
"'neighborhood' option required", str(e.exception))
@skip_unsupported
def test_stencil_parallel_off(self):
"""Tests 1D numba.stencil calls without parallel translation
turned off.
"""
def test_impl(A):
return numba.stencil(lambda a: 0.3 * (a[-1] + a[0] + a[1]))(A)
cpfunc = self.compile_parallel(test_impl, (numba.float64[:],), stencil=False)
self.assertNotIn('@do_scheduling', cpfunc.library.get_llvm_str())
@skip_unsupported
def test_stencil_nested1(self):
"""Tests whether nested stencil decorator works.
"""
@njit(parallel=True)
def test_impl(n):
@stencil
def fun(a):
c = 2
return a[-c+1]
B = fun(n)
return B
def test_impl_seq(n):
B = np.zeros(len(n), dtype=int)
for i in range(1, len(n)):
B[i] = n[i-1]
return B
n = np.arange(10)
np.testing.assert_equal(test_impl(n), test_impl_seq(n))
@skip_unsupported
def test_out_kwarg_w_cval(self):
""" Issue #3518, out kwarg did not work with cval."""
# test const value that matches the arg dtype, and one that can be cast
const_vals = [7, 7.0]
def kernel(a):
return (a[0, 0] - a[1, 0])
for const_val in const_vals:
stencil_fn = numba.stencil(kernel, cval=const_val)
def wrapped():
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
stencil_fn(A, out=ret)
return ret
# stencil function case
A = np.arange(12).reshape((3, 4))
expected = np.full_like(A, -4)
expected[-1, :] = const_val
ret = np.ones_like(A)
stencil_fn(A, out=ret)
np.testing.assert_almost_equal(ret, expected)
# wrapped function case, check njit, then njit(parallel=True)
impls = self.compile_all(wrapped,)
for impl in impls:
got = impl.entry_point()
np.testing.assert_almost_equal(got, expected)
# now check exceptions for cval dtype mismatch with out kwarg dtype
stencil_fn = numba.stencil(kernel, cval=1j)
def wrapped():
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
stencil_fn(A, out=ret)
return ret
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
with self.assertRaises(NumbaValueError) as e:
stencil_fn(A, out=ret)
msg = "cval type does not match stencil return type."
self.assertIn(msg, str(e.exception))
for compiler in [self.compile_njit, self.compile_parallel]:
try:
compiler(wrapped,())
except(NumbaValueError, LoweringError) as e:
self.assertIn(msg, str(e))
else:
raise AssertionError("Expected error was not raised")
@skip_unsupported
def test_out_kwarg_w_cval_np_attr(self):
""" Test issue #7286 where the cval is a np attr/string-based numerical
constant"""
for cval in (np.nan, np.inf, -np.inf, float('inf'), -float('inf')):
def kernel(a):
return (a[0, 0] - a[1, 0])
stencil_fn = numba.stencil(kernel, cval=cval)
def wrapped():
A = np.arange(12.).reshape((3, 4))
ret = np.ones_like(A)
stencil_fn(A, out=ret)
return ret
# stencil function case
A = np.arange(12.).reshape((3, 4))
expected = np.full_like(A, -4)
expected[-1, :] = cval
ret = np.ones_like(A)
stencil_fn(A, out=ret)
np.testing.assert_almost_equal(ret, expected)
# wrapped function case, check njit, then njit(parallel=True)
impls = self.compile_all(wrapped,)
for impl in impls:
got = impl.entry_point()
np.testing.assert_almost_equal(got, expected)
class pyStencilGenerator:
"""
Holds the classes and methods needed to generate a python stencil
implementation from a kernel purely using AST transforms.
"""
class Builder:
"""
Provides code generation for the AST manipulation pipeline.
The class methods largely produce AST nodes/trees.
"""
def __init__(self):
self.__state = 0
ids = [chr(ord(v) + x) for v in ['a', 'A'] for x in range(26)]
def varidx(self):
"""
a monotonically increasing index for use in labelling variables.
"""
tmp = self.__state
self.__state = self.__state + 1
return tmp
# builder functions
def gen_alloc_return(self, orig, var, dtype_var, init_val=0):
"""
Generates an AST equivalent to:
`var = np.full(orig.shape, init_val, dtype = dtype_var)`
"""
new = ast.Assign(
targets=[
ast.Name(
id=var,
ctx=ast.Store())],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(
id='np',
ctx=ast.Load()),
attr='full',
ctx=ast.Load()),
args=[
ast.Attribute(
value=ast.Name(
id=orig,
ctx=ast.Load()),
attr='shape',
ctx=ast.Load()),
self.gen_num(init_val)],
keywords=[ast.keyword(arg='dtype',
value=self.gen_call('type', [dtype_var.id]).value)],
starargs=None,
kwargs=None),
)
return new
def gen_assign(self, var, value, index_names):
"""
Generates an AST equivalent to:
`retvar[(*index_names,)] = value[<already present indexing>]`
"""
elts_info = [ast.Name(id=x, ctx=ast.Load()) for x in index_names]
new = ast.Assign(
targets=[
ast.Subscript(
value=ast.Name(
id=var,
ctx=ast.Load()),
slice=ast.Index(
value=ast.Tuple(
elts=elts_info,
ctx=ast.Load())),
ctx=ast.Store())],
value=value)
return new
def gen_loop(self, var, start=0, stop=0, body=None):
"""
Generates an AST equivalent to a loop in `var` from
`start` to `stop` with body `body`.
"""
if isinstance(start, int):
start_val = ast.Num(n=start)
else:
start_val = start
if isinstance(stop, int):
stop_val = ast.Num(n=stop)
else:
stop_val = stop
return ast.For(
target=ast.Name(id=var, ctx=ast.Store()),
iter=ast.Call(
func=ast.Name(id='range', ctx=ast.Load()),
args=[start_val, stop_val],
keywords=[],
starargs=None, kwargs=None),
body=body, orelse=[])
def gen_return(self, var):
"""
Generates an AST equivalent to `return var`
"""
return ast.Return(value=ast.Name(id=var, ctx=ast.Load()))
def gen_slice(self, value):
"""Generates an Index with the given value"""
return ast.Index(value=ast.Num(n=value))
def gen_attr(self, name, attr):
"""
Generates AST equivalent to `name.attr`
"""
return ast.Attribute(
value=ast.Name(id=name, ctx=ast.Load()),
attr=attr, ctx=ast.Load())
def gen_subscript(self, name, attr, index, offset=None):
"""
Generates an AST equivalent to a subscript, something like:
name.attr[slice(index) +/- offset]
"""
attribute = self.gen_attr(name, attr)
slise = self.gen_slice(index)
ss = ast.Subscript(value=attribute, slice=slise, ctx=ast.Load())
if offset:
pm = ast.Add() if offset >= 0 else ast.Sub()
ss = ast.BinOp(left=ss, op=pm, right=ast.Num(n=abs(offset)))
return ss
def gen_num(self, value):
"""
Generates an ast.Num of value `value`
"""
# pretend bools are ints, ast has no boolean literal support
if isinstance(value, bool):
return ast.Num(int(value))
if abs(value) >= 0:
return ast.Num(value)
else:
return ast.UnaryOp(ast.USub(), ast.Num(-value))
def gen_call(self, call_name, args, kwargs=None):
"""
Generates an AST equivalent to a call, something like:
`call_name(*args, **kwargs)
"""
fixed_args = [ast.Name(id='%s' % x, ctx=ast.Load()) for x in args]
if kwargs is not None:
keywords = [ast.keyword(
arg='%s' %
x, value=ast.parse(str(x)).body[0].value)
for x in kwargs]
else:
keywords = []
func = ast.Name(id=call_name, ctx=ast.Load())
return ast.Expr(value=ast.Call(
func=func, args=fixed_args,
keywords=keywords,
starargs=None, kwargs=None), ctx=ast.Load())
# AST transformers
class FoldConst(ast.NodeTransformer, Builder):
"""
Folds const expr, this is so const expressions in the relidx are
more easily handled
"""
# just support a few for testing purposes
supported_ops = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
}
def visit_BinOp(self, node):
# does const expr folding
node = self.generic_visit(node)
op = self.supported_ops.get(node.op.__class__)
lhs = getattr(node, 'left', None)
rhs = getattr(node, 'right', None)
if not (lhs and rhs and op):
return node
if (isinstance(lhs, ast.Num) and
isinstance(rhs, ast.Num)):
return ast.Num(op(node.left.n, node.right.n))
else:
return node
class FixRelIndex(ast.NodeTransformer, Builder):
""" Fixes the relative indexes to be written in as
induction index + relative index
"""
def __init__(self, argnames, const_assigns,
standard_indexing, neighborhood, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._argnames = argnames
self._const_assigns = const_assigns
self._idx_len = -1
self._mins = None
self._maxes = None
self._imin = np.iinfo(int).min
self._imax = np.iinfo(int).max
self._standard_indexing = standard_indexing \
if standard_indexing else []
self._neighborhood = neighborhood
self._id_pat = '__%sn' if neighborhood else '__%s'
def get_val_from_num(self, node):
"""
Gets the literal value from a Num or UnaryOp
"""
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.UnaryOp):
return -node.operand.n
else:
raise ValueError(
"get_val_from_num: Unknown indexing operation")
def visit_Subscript(self, node):
"""
Transforms subscripts of the form `a[x]` and `a[x, y, z, ...]`
where `x, y, z` are relative indexes, to forms such as:
`a[x + i]` and `a[x + i, y + j, z + k]` for use in loop induced
indexing.
"""
def handle2dindex(node):
idx = []
for x, val in enumerate(node.slice.value.elts):
useval = self._const_assigns.get(val, val)
idx.append(
ast.BinOp(
left=ast.Name(
id=self._id_pat % self.ids[x],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load()))
if self._idx_len == -1:
self._idx_len = len(idx)
else:
if(self._idx_len != len(idx)):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.Index(
value=ast.Tuple(
elts=idx,
ctx=ast.Load()),
ctx=ast.Load()),
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax] * self._idx_len
self._maxes = [self._imin] * self._idx_len
if not self._neighborhood:
for x, lnode in enumerate(node.slice.value.elts):
if isinstance(lnode, ast.Num) or\
isinstance(lnode, ast.UnaryOp):
relvalue = self.get_val_from_num(lnode)
elif (hasattr(lnode, 'id') and
lnode.id in self._const_assigns):
relvalue = self._const_assigns[lnode.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
for x, lnode in enumerate(self._neighborhood):
self._mins[x] = self._neighborhood[x][0]
self._maxes[x] = self._neighborhood[x][1]
return newnode
def handle1dindex(node):
useval = self._const_assigns.get(
node.slice.value, node.slice.value)
idx = ast.BinOp(left=ast.Name(
id=self._id_pat % self.ids[0],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
if self._idx_len == -1:
self._idx_len = 1
else:
if(self._idx_len != 1):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.Index(
value=idx,
ctx=ast.Load()),
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax, ]
self._maxes = [self._imin, ]
if not self._neighborhood:
if isinstance(node.slice.value, ast.Num) or\
isinstance(node.slice.value, ast.UnaryOp):
relvalue = self.get_val_from_num(node.slice.value)
elif (hasattr(node.slice.value, 'id') and
node.slice.value.id in self._const_assigns):
relvalue = self._const_assigns[node.slice.value.id]
else:
raise ValueError("Cannot interpret indexing value")
if relvalue < self._mins[0]:
self._mins[0] = relvalue
if relvalue > self._maxes[0]:
self._maxes[0] = relvalue
else:
self._mins[0] = self._neighborhood[0][0]
self._maxes[0] = self._neighborhood[0][1]
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def computeSlice(i, node):
def gen_idx(val, x):
useval = self._const_assigns.get(val, val)
value = self.get_val_from_num(val)
tmp = ast.BinOp(
left=ast.Name(
id=self._id_pat % self.ids[x],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
ast.copy_location(tmp, node)
ast.fix_missing_locations(tmp)
return tmp
newnode = ast.Slice(gen_idx(node.lower, i),
gen_idx(node.upper, i),
node.step)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def computeIndex(i, node):
useval = self._const_assigns.get(node.value, node.value)
idx = ast.BinOp(left=ast.Name(
id=self._id_pat % self.ids[i],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
newnode = ast.Index(value=idx, ctx=ast.Load())
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def handleExtSlice(node):
idx = []
for i, val in enumerate(node.slice.dims):
if isinstance(val, ast.Slice):
idx.append(computeSlice(i, val))
if isinstance(val, ast.Index):
idx.append(computeIndex(i, val))
# TODO: handle more node types
if self._idx_len == -1:
self._idx_len = len(node.slice.dims)
else:
if(self._idx_len != len(node.slice.dims)):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.ExtSlice(
dims=idx,
ctx=ast.Load()),
ctx=context
)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax] * self._idx_len
self._maxes = [self._imin] * self._idx_len
if not self._neighborhood:
for x, anode in enumerate(node.slice.dims):
if isinstance(anode, ast.Slice):
for lnode in [anode.lower, anode.upper]:
if isinstance(lnode, ast.Num) or\
isinstance(lnode, ast.UnaryOp):
relvalue = self.get_val_from_num(lnode)
elif (hasattr(lnode, 'id') and
lnode.id in self._const_assigns):
relvalue = self._const_assigns[lnode.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
val = anode.value
if isinstance(val, ast.Num) or\
isinstance(val, ast.UnaryOp):
relvalue = self.get_val_from_num(val)
elif (hasattr(val, 'id') and
val.id in self._const_assigns):
relvalue = self._const_assigns[val.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
for x, lnode in enumerate(self._neighborhood):
self._mins[x] = self._neighborhood[x][0]
self._maxes[x] = self._neighborhood[x][1]
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def handleSlice(node):
idx = computeSlice(0, node.slice)
idx.ctx=ast.Load()
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=idx,
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
if self._idx_len == -1:
self._idx_len = 1
else:
if(self._idx_len != 1):
raise ValueError(
"Relative indexing mismatch detected")
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax]
self._maxes = [self._imin]
if not self._neighborhood:
if isinstance(node.slice.value, ast.Num) or\
isinstance(node.slice.value, ast.UnaryOp):
relvalue = self.get_val_from_num(node.slice.value)
elif (hasattr(node.slice.value, 'id') and
node.slice.value.id in self._const_assigns):
relvalue = self._const_assigns[node.slice.value.id]
else:
raise ValueError("Cannot interpret indexing value")
if relvalue < self._mins[0]:
self._mins[0] = relvalue
if relvalue > self._maxes[0]:
self._maxes[0] = relvalue
else:
self._mins[0] = self._neighborhood[0][0]
self._maxes[0] = self._neighborhood[0][1]
return newnode
node = self.generic_visit(node)
if (node.value.id in self._argnames) and (
node.value.id not in self._standard_indexing):
# fancy slice
if isinstance(node.slice, ast.ExtSlice):
return handleExtSlice(node)
# plain slice
if isinstance(node.slice, ast.Slice):
return handleSlice(node)
# 2D index
if isinstance(node.slice.value, ast.Tuple):
return handle2dindex(node)
# 1D index
elif isinstance(node.slice, ast.Index):
return handle1dindex(node)
else: # unknown
raise ValueError("Unhandled subscript")
else:
return node
@property
def idx_len(self):
if self._idx_len == -1:
raise ValueError(
'Transform has not been run/no indexes found')
else:
return self._idx_len
@property
def maxes(self):
return self._maxes
@property
def mins(self):
return self._mins
@property
def id_pattern(self):
return self._id_pat
class TransformReturns(ast.NodeTransformer, Builder):
"""
Transforms return nodes into assignments.
"""
def __init__(self, relidx_info, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._relidx_info = relidx_info
self._ret_var_idx = self.varidx()
retvar = '__b%s' % self._ret_var_idx
self._retvarname = retvar
def visit_Return(self, node):
self.generic_visit(node)
nloops = self._relidx_info.idx_len
var_pattern = self._relidx_info.id_pattern
return self.gen_assign(
self._retvarname, node.value,
[var_pattern % self.ids[l] for l in range(nloops)])
@property
def ret_var_name(self):
return self._retvarname
class FixFunc(ast.NodeTransformer, Builder):
""" The main function rewriter, takes the body of the kernel and generates:
* checking function calls
* return value allocation
* loop nests
* return site
* Function definition as an entry point
"""
def __init__(self, kprops, relidx_info, ret_info,
cval, standard_indexing, neighborhood, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._original_kernel = kprops.original_kernel
self._argnames = kprops.argnames
self._retty = kprops.retty
self._relidx_info = relidx_info
self._ret_info = ret_info
self._standard_indexing = standard_indexing \
if standard_indexing else []
self._neighborhood = neighborhood if neighborhood else tuple()
self._relidx_args = [
x for x in self._argnames if x not in self._standard_indexing]
# switch cval to python type
if hasattr(cval, 'dtype'):
self.cval = cval.tolist()
else:
self.cval = cval
self.stencil_arr = self._argnames[0]
def visit_FunctionDef(self, node):
"""
Transforms the kernel function into a function that will perform
the stencil like behaviour on the kernel.
"""
self.generic_visit(node)
# this function validates arguments and is injected into the top
# of the stencil call
def check_stencil_arrays(*args, **kwargs):
# the first has to be an array due to parfors requirements
neighborhood = kwargs.get('neighborhood')
init_shape = args[0].shape
if neighborhood is not None:
if len(init_shape) != len(neighborhood):
raise ValueError("Invalid neighborhood supplied")
for x in args[1:]:
if hasattr(x, 'shape'):
if init_shape != x.shape:
raise ValueError(
"Input stencil arrays do not commute")
checksrc = inspect.getsource(check_stencil_arrays)
check_impl = ast.parse(
checksrc.strip()).body[0] # don't need module
ast.fix_missing_locations(check_impl)
checker_call = self.gen_call(
'check_stencil_arrays',
self._relidx_args,
kwargs=['neighborhood'])
nloops = self._relidx_info.idx_len
def computebound(mins, maxs):
minlim = 0 if mins >= 0 else -mins
maxlim = -maxs if maxs > 0 else 0
return (minlim, maxlim)
var_pattern = self._relidx_info.id_pattern
loop_body = node.body
# create loop nests
loop_count = 0
for l in range(nloops):
minlim, maxlim = computebound(
self._relidx_info.mins[loop_count],
self._relidx_info.maxes[loop_count])
minbound = minlim
maxbound = self.gen_subscript(
self.stencil_arr, 'shape', loop_count, maxlim)
loops = self.gen_loop(
var_pattern % self.ids[loop_count],
minbound, maxbound, body=loop_body)
loop_body = [loops]
loop_count += 1
# patch loop location
ast.copy_location(loops, node)
_rettyname = self._retty.targets[0]
# allocate a return
retvar = self._ret_info.ret_var_name
allocate = self.gen_alloc_return(
self.stencil_arr, retvar, _rettyname, self.cval)
ast.copy_location(allocate, node)
# generate the return
returner = self.gen_return(retvar)
ast.copy_location(returner, node)
add_kwarg = [ast.arg('neighborhood', None)]
defaults = []
newargs = ast.arguments(
args=node.args.args +
add_kwarg,
defaults=defaults,
vararg=None,
kwarg=None,
kwonlyargs=[],
kw_defaults=[],
posonlyargs=[])
new = ast.FunctionDef(
name='__%s' %
node.name,
args=newargs,
body=[
check_impl,
checker_call,
self._original_kernel,
self._retty,
allocate,
loops,
returner],
decorator_list=[])
ast.copy_location(new, node)
return new
class GetKernelProps(ast.NodeVisitor, Builder):
""" Gets the argument names and other properties
of the original kernel.
"""
def __init__(self, *args, **kwargs):
ast.NodeVisitor.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._argnames = None
self._kwargnames = None
self._retty = None
self._original_kernel = None
self._const_assigns = {}
def visit_FunctionDef(self, node):
if self._argnames is not None or self._kwargnames is not None:
raise RuntimeError("multiple definition of function/args?")
attr = 'arg'
self._argnames = [getattr(x, attr) for x in node.args.args]
if node.args.kwarg:
self._kwargnames = [x.arg for x in node.args.kwarg]
compute_retdtype = self.gen_call(node.name, self._argnames)
self._retty = ast.Assign(targets=[ast.Name(
id='__retdtype',
ctx=ast.Store())], value=compute_retdtype.value)
self._original_kernel = ast.fix_missing_locations(deepcopy(node))
self.generic_visit(node)
def visit_Assign(self, node):
self.generic_visit(node)
tgt = node.targets
if len(tgt) == 1:
target = tgt[0]
if isinstance(target, ast.Name):
if isinstance(node.value, ast.Num):
self._const_assigns[target.id] = node.value.n
elif isinstance(node.value, ast.UnaryOp):
if isinstance(node.value, ast.UAdd):
self._const_assigns[target.id] = node.value.n
else:
self._const_assigns[target.id] = -node.value.n
@property
def argnames(self):
"""
The names of the arguments to the function
"""
return self._argnames
@property
def const_assigns(self):
"""
A map of variable name to constant for variables that are simple
constant assignments
"""
return self._const_assigns
@property
def retty(self):
"""
The return type
"""
return self._retty
@property
def original_kernel(self):
"""
The original unmutated kernel
"""
return self._original_kernel
class FixCalls(ast.NodeTransformer):
""" Fixes call sites for astor (in case it is in use) """
def visit_Call(self, node):
self.generic_visit(node)
# Add in starargs and kwargs to calls
new = ast.Call(
func=node.func,
args=node.args,
keywords=node.keywords,
starargs=None,
kwargs=None)
return new
def generate_stencil_tree(
self, func, cval, standard_indexing, neighborhood):
"""
Generates the AST tree for a stencil from:
func - a python stencil kernel
cval, standard_indexing and neighborhood as per the @stencil decorator
"""
src = inspect.getsource(func)
tree = ast.parse(src.strip())
# Prints debugging information if True.
# If astor is installed the decompilation of the AST is also printed
DEBUG = False
if DEBUG:
print("ORIGINAL")
print(ast.dump(tree))
def pipeline(tree):
""" the pipeline of manipulations """
# get the arg names
kernel_props = self.GetKernelProps()
kernel_props.visit(tree)
argnm = kernel_props.argnames
const_asgn = kernel_props.const_assigns
if standard_indexing:
for x in standard_indexing:
if x not in argnm:
msg = ("Non-existent variable "
"specified in standard_indexing")
raise ValueError(msg)
# fold consts
fold_const = self.FoldConst()
fold_const.visit(tree)
# rewrite the relative indices as induced indices
relidx_fixer = self.FixRelIndex(
argnm, const_asgn, standard_indexing, neighborhood)
relidx_fixer.visit(tree)
# switch returns into assigns
return_transformer = self.TransformReturns(relidx_fixer)
return_transformer.visit(tree)
# generate the function body and loop nests and assemble
fixer = self.FixFunc(
kernel_props,
relidx_fixer,
return_transformer,
cval,
standard_indexing,
neighborhood)
fixer.visit(tree)
# fix up the call sites so they work better with astor
callFixer = self.FixCalls()
callFixer.visit(tree)
ast.fix_missing_locations(tree.body[0])
# run the pipeline of transforms on the tree
pipeline(tree)
if DEBUG:
print("\n\n\nNEW")
print(ast.dump(tree, include_attributes=True))
try:
import astor
print(astor.to_source(tree))
except ImportError:
pass
return tree
def pyStencil(func_or_mode='constant', **options):
"""
A pure python implementation of (a large subset of) stencil functionality,
equivalent to StencilFunc.
"""
if not isinstance(func_or_mode, str):
mode = 'constant' # default style
func = func_or_mode
else:
assert isinstance(func_or_mode, str), """stencil mode should be
a string"""
mode = func_or_mode
func = None
for option in options:
if option not in ["cval", "standard_indexing", "neighborhood"]:
raise ValueError("Unknown stencil option " + option)
if mode != 'constant':
raise ValueError("Unsupported mode style " + mode)
cval = options.get('cval', 0)
standard_indexing = options.get('standard_indexing', None)
neighborhood = options.get('neighborhood', None)
# generate a new AST tree from the kernel func
gen = pyStencilGenerator()
tree = gen.generate_stencil_tree(func, cval, standard_indexing,
neighborhood)
# breathe life into the tree
mod_code = compile(tree, filename="<ast>", mode="exec")
func_code = mod_code.co_consts[0]
full_func = pytypes.FunctionType(func_code, globals())
return full_func
@skip_unsupported
class TestManyStencils(TestStencilBase):
def __init__(self, *args, **kwargs):
super(TestManyStencils, self).__init__(*args, **kwargs)
def check(self, pyfunc, *args, **kwargs):
"""
For a given kernel:
The expected result is computed from a pyStencil version of the
stencil.
The following results are then computed:
* from a pure @stencil decoration of the kernel.
* from the njit of a trivial wrapper function around the pure @stencil
decorated function.
* from the njit(parallel=True) of a trivial wrapper function around
the pure @stencil decorated function.
The results are then compared.
"""
options = kwargs.get('options', dict())
expected_exception = kwargs.get('expected_exception')
# DEBUG print output arrays
DEBUG_OUTPUT = False
# collect fails
should_fail = []
should_not_fail = []
# runner that handles fails
@contextmanager
def errorhandler(exty=None, usecase=None):
try:
yield
except Exception as e:
if exty is not None:
lexty = exty if hasattr(exty, '__iter__') else [exty, ]
found = False
for ex in lexty:
found |= isinstance(e, ex)
if not found:
raise
else:
should_not_fail.append(
(usecase, "%s: %s" %
(type(e), str(e))))
else:
if exty is not None:
should_fail.append(usecase)
if isinstance(expected_exception, dict):
pystencil_ex = expected_exception['pyStencil']
stencil_ex = expected_exception['stencil']
njit_ex = expected_exception['njit']
parfor_ex = expected_exception['parfor']
else:
pystencil_ex = expected_exception
stencil_ex = expected_exception
njit_ex = expected_exception
parfor_ex = expected_exception
stencil_args = {'func_or_mode': pyfunc}
stencil_args.update(options)
expected_present = True
try:
# ast impl
ast_impl = pyStencil(func_or_mode=pyfunc, **options)
expected = ast_impl(
*args, neighborhood=options.get('neighborhood'))
if DEBUG_OUTPUT:
print("\nExpected:\n", expected)
except Exception as ex:
# check exception is expected
with errorhandler(pystencil_ex, "pyStencil"):
raise ex
pyStencil_unhandled_ex = ex
expected_present = False
stencilfunc_output = None
with errorhandler(stencil_ex, "@stencil"):
stencil_func_impl = stencil(**stencil_args)
# stencil result
stencilfunc_output = stencil_func_impl(*args)
# wrapped stencil impl, could this be generated?
if len(args) == 1:
def wrap_stencil(arg0):
return stencil_func_impl(arg0)
elif len(args) == 2:
def wrap_stencil(arg0, arg1):
return stencil_func_impl(arg0, arg1)
elif len(args) == 3:
def wrap_stencil(arg0, arg1, arg2):
return stencil_func_impl(arg0, arg1, arg2)
else:
raise ValueError(
"Up to 3 arguments can be provided, found %s" %
len(args))
sig = tuple([numba.typeof(x) for x in args])
njit_output = None
with errorhandler(njit_ex, "njit"):
wrapped_cfunc = self.compile_njit(wrap_stencil, sig)
# njit result
njit_output = wrapped_cfunc.entry_point(*args)
parfor_output = None
with errorhandler(parfor_ex, "parfors"):
wrapped_cpfunc = self.compile_parallel(wrap_stencil, sig)
# parfor result
parfor_output = wrapped_cpfunc.entry_point(*args)
if DEBUG_OUTPUT:
print("\n@stencil_output:\n", stencilfunc_output)
print("\nnjit_output:\n", njit_output)
print("\nparfor_output:\n", parfor_output)
if expected_present:
try:
if not stencil_ex:
np.testing.assert_almost_equal(
stencilfunc_output, expected, decimal=1)
self.assertEqual(expected.dtype, stencilfunc_output.dtype)
except Exception as e:
should_not_fail.append(
('@stencil', "%s: %s" %
(type(e), str(e))))
print("@stencil failed: %s" % str(e))
try:
if not njit_ex:
np.testing.assert_almost_equal(
njit_output, expected, decimal=1)
self.assertEqual(expected.dtype, njit_output.dtype)
except Exception as e:
should_not_fail.append(('njit', "%s: %s" % (type(e), str(e))))
print("@njit failed: %s" % str(e))
try:
if not parfor_ex:
np.testing.assert_almost_equal(
parfor_output, expected, decimal=1)
self.assertEqual(expected.dtype, parfor_output.dtype)
try:
self.assertIn(
'@do_scheduling',
wrapped_cpfunc.library.get_llvm_str())
except AssertionError:
msg = 'Could not find `@do_scheduling` in LLVM IR'
raise AssertionError(msg)
except Exception as e:
should_not_fail.append(
('parfors', "%s: %s" %
(type(e), str(e))))
print("@njit(parallel=True) failed: %s" % str(e))
if DEBUG_OUTPUT:
print("\n\n")
if should_fail:
msg = ["%s" % x for x in should_fail]
raise RuntimeError(("The following implementations should have "
"raised an exception but did not:\n%s") % msg)
if should_not_fail:
impls = ["%s" % x[0] for x in should_not_fail]
errs = ''.join(["%s: Message: %s\n\n" %
x for x in should_not_fail])
str1 = ("The following implementations should not have raised an "
"exception but did:\n%s\n" % impls)
str2 = "Errors were:\n\n%s" % errs
raise RuntimeError(str1 + str2)
if not expected_present:
if expected_exception is None:
raise RuntimeError(
"pyStencil failed, was not caught/expected",
pyStencil_unhandled_ex)
def exception_dict(self, **kwargs):
d = dict()
d['pyStencil'] = None
d['stencil'] = None
d['njit'] = None
d['parfor'] = None
for k, v in kwargs.items():
d[k] = v
return d
def test_basic00(self):
"""rel index"""
def kernel(a):
return a[0, 0]
a = np.arange(12).reshape(3, 4)
self.check(kernel, a)
def test_basic01(self):
"""rel index add const"""
def kernel(a):
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a)
def test_basic02(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, -1]
self.check(kernel, a)
def test_basic03(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, 0]
self.check(kernel, a)
def test_basic04(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 0]
self.check(kernel, a)
def test_basic05(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 1]
self.check(kernel, a)
def test_basic06(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, -1]
self.check(kernel, a)
def test_basic07(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, 1]
self.check(kernel, a)
def test_basic08(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, -1]
self.check(kernel, a)
def test_basic09(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-2, 2]
self.check(kernel, a)
def test_basic10(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[1, 0]
self.check(kernel, a)
def test_basic11(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 0] + a[1, 0]
self.check(kernel, a)
def test_basic12(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 1] + a[1, -1]
self.check(kernel, a)
def test_basic13(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, -1] + a[1, 1]
self.check(kernel, a)
def test_basic14(self):
"""rel index add domain change const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + 1j
self.check(kernel, a)
def test_basic14b(self):
"""rel index add domain change const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
t = 1.j
return a[0, 0] + t
self.check(kernel, a)
def test_basic15(self):
"""two rel index, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[1, 0] + 1.
self.check(kernel, a)
def test_basic16(self):
"""two rel index OOB, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[10, 0] + 1.
# only pyStencil bounds checks
ex = self.exception_dict(pyStencil=IndexError)
self.check(kernel, a, expected_exception=ex)
def test_basic17(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[2, 0] + 1.
self.check(kernel, a)
def test_basic18(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[-2, 0] + 1.
self.check(kernel, a)
def test_basic19(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, 3] + 1.
self.check(kernel, a)
def test_basic20(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, -3] + 1.
self.check(kernel, a)
def test_basic21(self):
"""same rel, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, 0] + 1.
self.check(kernel, a)
def test_basic22(self):
"""rel idx const expr folding, add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1 + 0, 0] + a[0, 0] + 1.
self.check(kernel, a)
def test_basic23(self):
"""rel idx, work in body"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
x = np.sin(10 + a[2, 1])
return a[1 + 0, 0] + a[0, 0] + x
self.check(kernel, a)
def test_basic23a(self):
"""rel idx, dead code should not impact rel idx"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
x = np.sin(10 + a[2, 1])
return a[1 + 0, 0] + a[0, 0]
self.check(kernel, a)
def test_basic24(self):
"""1d idx on 2d arr"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0] + 1.
self.check(kernel, a, expected_exception=[ValueError, TypingError])
def test_basic25(self):
"""no idx on 2d arr"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return 1.
self.check(kernel, a, expected_exception=[ValueError, NumbaValueError])
def test_basic26(self):
"""3d arr"""
a = np.arange(64).reshape(4, 8, 2)
def kernel(a):
return a[0, 0, 0] - a[0, 1, 0] + 1.
self.check(kernel, a)
def test_basic27(self):
"""4d arr"""
a = np.arange(128).reshape(4, 8, 2, 2)
def kernel(a):
return a[0, 0, 0, 0] - a[0, 1, 0, -1] + 1.
self.check(kernel, a)
def test_basic28(self):
"""type widen """
a = np.arange(12).reshape(3, 4).astype(np.float32)
def kernel(a):
return a[0, 0] + np.float64(10.)
self.check(kernel, a)
def test_basic29(self):
"""const index from func """
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, int(np.cos(0))]
self.check(kernel, a, expected_exception=[ValueError, NumbaValueError,
LoweringError])
def test_basic30(self):
"""signed zeros"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-0, -0]
self.check(kernel, a)
def test_basic31(self):
"""does a const propagate? 2D"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
t = 1
return a[t, 0]
self.check(kernel, a)
@unittest.skip("constant folding not implemented")
def test_basic31b(self):
"""does a const propagate?"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
s = 1
t = 1 - s
return a[t, 0]
self.check(kernel, a)
def test_basic31c(self):
"""does a const propagate? 1D"""
a = np.arange(12.)
def kernel(a):
t = 1
return a[t]
self.check(kernel, a)
def test_basic32(self):
"""typed int index"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[np.int8(1), 0]
self.check(kernel, a, expected_exception=[ValueError, NumbaValueError,
LoweringError])
def test_basic33(self):
"""add 0d array"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, 0] + np.array(1)
self.check(kernel, a)
def test_basic34(self):
"""More complex rel index with dependency on addition rel index"""
def kernel(a):
g = 4. + a[0, 1]
return g + (a[0, 1] + a[1, 0] + a[0, -1] + np.sin(a[-2, 0]))
a = np.arange(144).reshape(12, 12)
self.check(kernel, a)
def test_basic35(self):
"""simple cval """
def kernel(a):
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
stencil=NumbaValueError,
parfor=ValueError,
njit=NumbaValueError)
self.check(kernel, a, options={'cval': 5}, expected_exception=ex)
def test_basic36(self):
"""more complex with cval"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': 5.})
def test_basic37(self):
"""cval is expr"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': 5 + 63.})
def test_basic38(self):
"""cval is complex"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
stencil=NumbaValueError,
parfor=ValueError,
njit=NumbaValueError)
self.check(kernel, a, options={'cval': 1.j}, expected_exception=ex)
def test_basic39(self):
"""cval is func expr"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': np.sin(3.) + np.cos(2)})
def test_basic40(self):
"""2 args!"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b)
def test_basic41(self):
"""2 args! rel arrays wildly not same size!"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(1.).reshape(1, 1)
self.check(
kernel, a, b, expected_exception=[
ValueError, AssertionError])
def test_basic42(self):
"""2 args! rel arrays very close in size"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(9.).reshape(3, 3)
self.check(
kernel, a, b, expected_exception=[
ValueError, AssertionError])
def test_basic43(self):
"""2 args more complexity"""
def kernel(a, b):
return a[0, 1] + a[1, 2] + b[-2, 0] + b[0, -1]
a = np.arange(30.).reshape(5, 6)
b = np.arange(30.).reshape(5, 6)
self.check(kernel, a, b)
def test_basic44(self):
"""2 args, has assignment before use"""
def kernel(a, b):
a[0, 1] = 12
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic45(self):
"""2 args, has assignment and then cross dependency"""
def kernel(a, b):
a[0, 1] = 12
return a[0, 1] + a[1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic46(self):
"""2 args, has cross relidx assignment"""
def kernel(a, b):
a[0, 1] = b[1, 2]
return a[0, 1] + a[1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic47(self):
"""3 args"""
def kernel(a, b, c):
return a[0, 1] + b[1, 0] + c[-1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, c)
# matches pyStencil, but all ought to fail
# probably hard to detect?
def test_basic48(self):
"""2 args, has assignment before use via memory alias"""
def kernel(a):
c = a.T
c[:, :] = 10
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a)
def test_basic49(self):
"""2 args, standard_indexing on second"""
def kernel(a, b):
return a[0, 1] + b[0, 3]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
@unittest.skip("dynamic range checking not implemented")
def test_basic50(self):
"""2 args, standard_indexing OOB"""
def kernel(a, b):
return a[0, 1] + b[0, 15]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b'},
expected_exception=IndexError)
def test_basic51(self):
"""2 args, standard_indexing, no relidx"""
def kernel(a, b):
return a[0, 1] + b[0, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, options={
'standard_indexing': [
'a', 'b']}, expected_exception=[
ValueError, NumbaValueError])
def test_basic52(self):
"""3 args, standard_indexing on middle arg """
def kernel(a, b, c):
return a[0, 1] + b[0, 1] + c[1, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(4.).reshape(2, 2)
c = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, c, options={'standard_indexing': 'b'})
def test_basic53(self):
"""2 args, standard_indexing on variable that does not exist"""
def kernel(a, b):
return a[0, 1] + b[0, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=Exception,
parfor=ValueError,
njit=Exception)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'c'},
expected_exception=ex)
def test_basic54(self):
"""2 args, standard_indexing, index from var"""
def kernel(a, b):
t = 2
return a[0, 1] + b[0, t]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic55(self):
"""2 args, standard_indexing, index from more complex var"""
def kernel(a, b):
s = 1
t = 2 - s
return a[0, 1] + b[0, t]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic56(self):
"""2 args, standard_indexing, added complexity """
def kernel(a, b):
s = 1
acc = 0
for k in b[0, :]:
acc += k
t = 2 - s - 1
return a[0, 1] + b[0, t] + acc
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic57(self):
"""2 args, standard_indexing, split index operation """
def kernel(a, b):
c = b[0]
return a[0, 1] + c[1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic58(self):
"""2 args, standard_indexing, split index with broadcast mutation """
def kernel(a, b):
c = b[0] + 1
return a[0, 1] + c[1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic59(self):
"""3 args, mix of array, relative and standard indexing and const"""
def kernel(a, b, c):
return a[0, 1] + b[1, 1] + c
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = 10
self.check(kernel, a, b, c, options={'standard_indexing': ['b', 'c']})
def test_basic60(self):
"""3 args, mix of array, relative and standard indexing,
tuple pass through"""
def kernel(a, b, c):
return a[0, 1] + b[1, 1] + c[0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = (10,)
# parfors does not support tuple args for stencil kernels
ex = self.exception_dict(parfor=ValueError)
self.check(
kernel, a, b, c, options={
'standard_indexing': [
'b', 'c']}, expected_exception=ex)
def test_basic61(self):
"""2 args, standard_indexing on first"""
def kernel(a, b):
return a[0, 1] + b[1, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'a'},
expected_exception=Exception)
def test_basic62(self):
"""2 args, standard_indexing and cval"""
def kernel(a, b):
return a[0, 1] + b[1, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b',
'cval': 10.})
def test_basic63(self):
"""2 args, standard_indexing applied to relative, should fail,
non-const idx"""
def kernel(a, b):
return a[0, b[0, 1]]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=NumbaValueError,
parfor=ValueError,
njit=NumbaValueError)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b'},
expected_exception=ex)
# stencil, njit, parfors all fail. Does this make sense?
def test_basic64(self):
"""1 arg that uses standard_indexing"""
def kernel(a):
return a[0, 0]
a = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
options={
'standard_indexing': 'a'},
expected_exception=[
ValueError,
NumbaValueError])
def test_basic65(self):
"""basic induced neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
# Should this work? a[0] is out of neighborhood?
def test_basic66(self):
"""basic const neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
cumul += a[0]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic67(self):
"""basic 2d induced neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[i, j]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic67b(self):
"""basic 2d induced 1D neighborhood"""
def kernel(a):
cumul = 0
for j in range(-10, 1):
cumul += a[0, j]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(
kernel,
a,
options={
'neighborhood': (
(-10,
0),
)},
expected_exception=[
TypingError,
ValueError])
# Should this work or is it UB? a[i, 0] is out of neighborhood?
def test_basic68(self):
"""basic 2d one induced, one cost neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[i, 0]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
# Should this work or is it UB? a[0, 0] is out of neighborhood?
def test_basic69(self):
"""basic 2d two cost neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[0, 0]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic70(self):
"""neighborhood adding complexity"""
def kernel(a):
cumul = 0
zz = 12.
for i in range(-5, 1):
t = zz + i
for j in range(-10, 1):
cumul += a[i, j] + t
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic71(self):
"""neighborhood, type change"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
k = 0.
if i > -15:
k = 1j
cumul += a[i] + k
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic72(self):
"""neighborhood, narrower range than specified"""
def kernel(a):
cumul = 0
for i in range(-19, -3):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic73(self):
"""neighborhood, +ve range"""
def kernel(a):
cumul = 0
for i in range(5, 11):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((5, 10),)})
def test_basic73b(self):
"""neighborhood, -ve range"""
def kernel(a):
cumul = 0
for i in range(-10, -4):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-10, -5),)})
def test_basic74(self):
"""neighborhood, -ve->+ve range span"""
def kernel(a):
cumul = 0
for i in range(-5, 11):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-5, 10),)})
def test_basic75(self):
"""neighborhood, -ve->-ve range span"""
def kernel(a):
cumul = 0
for i in range(-10, -1):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-10, -2),)})
def test_basic76(self):
"""neighborhood, mixed range span"""
def kernel(a):
cumul = 0
zz = 12.
for i in range(-3, 0):
t = zz + i
for j in range(-3, 4):
cumul += a[i, j] + t
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-3, -1), (-3, 3),)})
def test_basic77(self):
""" neighborhood, two args """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, b, options={'neighborhood': ((-3, 0), (-3, 0),)})
def test_basic78(self):
""" neighborhood, two args, -ve range, -ve range """
def kernel(a, b):
cumul = 0
for i in range(-6, -2):
for j in range(-7, -1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(15. * 20.).reshape(15, 20)
b = np.arange(15. * 20.).reshape(15, 20)
self.check(
kernel, a, b, options={
'neighborhood': (
(-6, -3), (-7, -2),)})
def test_basic78b(self):
""" neighborhood, two args, -ve range, +ve range """
def kernel(a, b):
cumul = 0
for i in range(-6, -2):
for j in range(2, 10):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(15. * 20.).reshape(15, 20)
b = np.arange(15. * 20.).reshape(15, 20)
self.check(kernel, a, b, options={'neighborhood': ((-6, -3), (2, 9),)})
def test_basic79(self):
""" neighborhood, two incompatible args """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = np.arange(10. * 20.).reshape(10, 10, 2)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=TypingError,
parfor=TypingError,
njit=TypingError)
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0),)}, expected_exception=ex)
def test_basic80(self):
""" neighborhood, type change """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = 12.j
self.check(kernel, a, b, options={'neighborhood': ((-3, 0), (-3, 0))})
def test_basic81(self):
""" neighborhood, dimensionally incompatible arrays """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a[0].copy()
ex = self.exception_dict(
pyStencil=ValueError,
stencil=TypingError,
parfor=AssertionError,
njit=TypingError)
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0))}, expected_exception=ex)
def test_basic82(self):
""" neighborhood, with standard_indexing"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b'})
def test_basic83(self):
""" neighborhood, with standard_indexing and cval"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b', 'cval': 1.5})
def test_basic84(self):
""" kernel calls njit """
def kernel(a):
return a[0, 0] + addone_njit(a[0, 1])
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic85(self):
""" kernel calls njit(parallel=True)"""
def kernel(a):
return a[0, 0] + addone_pjit(a[0, 1])
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
# njit/parfors fail correctly, but the error message isn't very informative
def test_basic86(self):
""" bad kwarg """
def kernel(a):
return a[0, 0]
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'bad': 10},
expected_exception=[ValueError, TypingError])
def test_basic87(self):
""" reserved arg name in use """
def kernel(__sentinel__):
return __sentinel__[0, 0]
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic88(self):
""" use of reserved word """
def kernel(a, out):
return out * a[0, 1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=NumbaValueError,
parfor=ValueError,
njit=NumbaValueError)
self.check(
kernel,
a,
1.0,
options={},
expected_exception=ex)
def test_basic89(self):
""" basic multiple return"""
def kernel(a):
if a[0, 1] > 10:
return 10.
elif a[0, 3] < 8:
return a[0, 0]
else:
return 7.
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic90(self):
""" neighborhood, with standard_indexing and cval, multiple returns"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
res = cumul / (9.)
if res > 200.0:
return res + 1.0
else:
return res
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b', 'cval': 1.5})
def test_basic91(self):
""" Issue #3454, const(int) == const(int) evaluating incorrectly. """
def kernel(a):
b = 0
if(2 == 0):
b = 2
return a[0, 0] + b
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic92(self):
""" Issue #3497, bool return type evaluating incorrectly. """
def kernel(a):
return (a[-1, -1] ^ a[-1, 0] ^ a[-1, 1] ^
a[0, -1] ^ a[0, 0] ^ a[0, 1] ^
a[1, -1] ^ a[1, 0] ^ a[1, 1])
A = np.array(np.arange(20) % 2).reshape(4, 5).astype(np.bool_)
self.check(kernel, A)
def test_basic93(self):
""" Issue #3497, bool return type evaluating incorrectly. """
def kernel(a):
return (a[-1, -1] ^ a[-1, 0] ^ a[-1, 1] ^
a[0, -1] ^ a[0, 0] ^ a[0, 1] ^
a[1, -1] ^ a[1, 0] ^ a[1, 1])
A = np.array(np.arange(20) % 2).reshape(4, 5).astype(np.bool_)
self.check(kernel, A, options={'cval': True})
def test_basic94(self):
""" Issue #3528. Support for slices. """
def kernel(a):
return np.median(a[-1:2, -1:2])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a, options={'neighborhood': ((-1, 1), (-1, 1),)})
@unittest.skip("not yet supported")
def test_basic95(self):
""" Slice, calculate neighborhood. """
def kernel(a):
return np.median(a[-1:2, -3:4])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a)
def test_basic96(self):
""" 1D slice. """
def kernel(a):
return np.median(a[-1:2])
a = np.arange(20, dtype=np.uint32)
self.check(kernel, a, options={'neighborhood': ((-1, 1),)})
@unittest.skip("not yet supported")
def test_basic97(self):
""" 2D slice and index. """
def kernel(a):
return np.median(a[-1:2, 3])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a)
def test_basic98(self):
""" Test issue #7286 where the cval is a np attr/string-based numerical
constant"""
for cval in (np.nan, np.inf, -np.inf, float('inf'), -float('inf')):
def kernel(a):
return a[0, 0]
a = np.arange(6.).reshape((2, 3))
self.check(kernel, a, options={'neighborhood': ((-1, 1), (-1, 1),),
'cval':cval})
if __name__ == "__main__":
unittest.main()
| seibert/numba | numba/tests/test_stencils.py | Python | bsd-2-clause | 98,430 | [
"VisIt"
] | 460c20275f075fbe4f107f452bf65307c26744e4152226b0cb73c821360e4370 |
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import fnmatch
import json
import operator
import os
import shutil
import stat
import sys
import tarfile
import tempfile
import threading
import time
import yaml
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
from hashlib import sha256
from io import BytesIO
from yaml.error import YAMLError
try:
import queue
except ImportError:
import Queue as queue # Python 2
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.galaxy import get_collections_galaxy_meta_info
from ansible.galaxy.api import CollectionVersionMetadata, GalaxyError
from ansible.galaxy.user_agent import user_agent
from ansible.module_utils import six
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
from ansible.utils.galaxy import scm_archive_collection
from ansible.utils.hashing import secure_hash, secure_hash_s
from ansible.utils.version import SemanticVersion
from ansible.module_utils.urls import open_url
urlparse = six.moves.urllib.parse.urlparse
urldefrag = six.moves.urllib.parse.urldefrag
urllib_error = six.moves.urllib.error
display = Display()
MANIFEST_FORMAT = 1
ModifiedContent = namedtuple('ModifiedContent', ['filename', 'expected', 'installed'])
class CollectionRequirement:
_FILE_MAPPING = [(b'MANIFEST.json', 'manifest_file'), (b'FILES.json', 'files_file')]
def __init__(self, namespace, name, b_path, api, versions, requirement, force, parent=None, metadata=None,
files=None, skip=False, allow_pre_releases=False):
"""Represents a collection requirement, the versions that are available to be installed as well as any
dependencies the collection has.
:param namespace: The collection namespace.
:param name: The collection name.
:param b_path: Byte str of the path to the collection tarball if it has already been downloaded.
:param api: The GalaxyAPI to use if the collection is from Galaxy.
:param versions: A list of versions of the collection that are available.
:param requirement: The version requirement string used to verify the list of versions fit the requirements.
:param force: Whether the force flag applied to the collection.
:param parent: The name of the parent the collection is a dependency of.
:param metadata: The galaxy.api.CollectionVersionMetadata that has already been retrieved from the Galaxy
server.
:param files: The files that exist inside the collection. This is based on the FILES.json file inside the
collection artifact.
:param skip: Whether to skip installing the collection. Should be set if the collection is already installed
and force is not set.
:param allow_pre_releases: Whether to skip pre-release versions of collections.
"""
self.namespace = namespace
self.name = name
self.b_path = b_path
self.api = api
self._versions = set(versions)
self.force = force
self.skip = skip
self.required_by = []
self.allow_pre_releases = allow_pre_releases
self._metadata = metadata
self._files = files
self.add_requirement(parent, requirement)
def __str__(self):
return to_native("%s.%s" % (self.namespace, self.name))
def __unicode__(self):
return u"%s.%s" % (self.namespace, self.name)
@property
def metadata(self):
self._get_metadata()
return self._metadata
@property
def versions(self):
if self.allow_pre_releases:
return self._versions
return set(v for v in self._versions if v == '*' or not SemanticVersion(v).is_prerelease)
@versions.setter
def versions(self, value):
self._versions = set(value)
@property
def pre_releases(self):
return set(v for v in self._versions if SemanticVersion(v).is_prerelease)
@property
def latest_version(self):
try:
return max([v for v in self.versions if v != '*'], key=SemanticVersion)
except ValueError: # ValueError: max() arg is an empty sequence
return '*'
@property
def dependencies(self):
if not self._metadata:
if len(self.versions) > 1:
return {}
self._get_metadata()
dependencies = self._metadata.dependencies
if dependencies is None:
return {}
return dependencies
@staticmethod
def artifact_info(b_path):
"""Load the manifest data from the MANIFEST.json and FILES.json. If the files exist, return a dict containing the keys 'files_file' and 'manifest_file'.
:param b_path: The directory of a collection.
"""
info = {}
for b_file_name, property_name in CollectionRequirement._FILE_MAPPING:
b_file_path = os.path.join(b_path, b_file_name)
if not os.path.exists(b_file_path):
continue
with open(b_file_path, 'rb') as file_obj:
try:
info[property_name] = json.loads(to_text(file_obj.read(), errors='surrogate_or_strict'))
except ValueError:
raise AnsibleError("Collection file at '%s' does not contain a valid json string." % to_native(b_file_path))
return info
@staticmethod
def galaxy_metadata(b_path):
"""Generate the manifest data from the galaxy.yml file.
If the galaxy.yml exists, return a dictionary containing the keys 'files_file' and 'manifest_file'.
:param b_path: The directory of a collection.
"""
b_galaxy_path = get_galaxy_metadata_path(b_path)
info = {}
if os.path.exists(b_galaxy_path):
collection_meta = _get_galaxy_yml(b_galaxy_path)
info['files_file'] = _build_files_manifest(b_path, collection_meta['namespace'], collection_meta['name'], collection_meta['build_ignore'])
info['manifest_file'] = _build_manifest(**collection_meta)
return info
@staticmethod
def collection_info(b_path, fallback_metadata=False):
info = CollectionRequirement.artifact_info(b_path)
if info or not fallback_metadata:
return info
return CollectionRequirement.galaxy_metadata(b_path)
def add_requirement(self, parent, requirement):
self.required_by.append((parent, requirement))
new_versions = set(v for v in self.versions if self._meets_requirements(v, requirement, parent))
if len(new_versions) == 0:
if self.skip:
force_flag = '--force-with-deps' if parent else '--force'
version = self.latest_version if self.latest_version != '*' else 'unknown'
msg = "Cannot meet requirement %s:%s as it is already installed at version '%s'. Use %s to overwrite" \
% (to_text(self), requirement, version, force_flag)
raise AnsibleError(msg)
elif parent is None:
msg = "Cannot meet requirement %s for dependency %s" % (requirement, to_text(self))
else:
msg = "Cannot meet dependency requirement '%s:%s' for collection %s" \
% (to_text(self), requirement, parent)
collection_source = to_text(self.b_path, nonstring='passthru') or self.api.api_server
req_by = "\n".join(
"\t%s - '%s:%s'" % (to_text(p) if p else 'base', to_text(self), r)
for p, r in self.required_by
)
versions = ", ".join(sorted(self.versions, key=SemanticVersion))
if not self.versions and self.pre_releases:
pre_release_msg = (
'\nThis collection only contains pre-releases. Utilize `--pre` to install pre-releases, or '
'explicitly provide the pre-release version.'
)
else:
pre_release_msg = ''
raise AnsibleError(
"%s from source '%s'. Available versions before last requirement added: %s\nRequirements from:\n%s%s"
% (msg, collection_source, versions, req_by, pre_release_msg)
)
self.versions = new_versions
def download(self, b_path):
download_url = self._metadata.download_url
artifact_hash = self._metadata.artifact_sha256
headers = {}
self.api._add_auth_token(headers, download_url, required=False)
b_collection_path = _download_file(download_url, b_path, artifact_hash, self.api.validate_certs,
headers=headers)
return to_text(b_collection_path, errors='surrogate_or_strict')
def install(self, path, b_temp_path):
if self.skip:
display.display("Skipping '%s' as it is already installed" % to_text(self))
return
# Install if it is not
collection_path = os.path.join(path, self.namespace, self.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.display("Installing '%s:%s' to '%s'" % (to_text(self), self.latest_version, collection_path))
if self.b_path is None:
self.b_path = self.download(b_temp_path)
if os.path.exists(b_collection_path):
shutil.rmtree(b_collection_path)
if os.path.isfile(self.b_path):
self.install_artifact(b_collection_path, b_temp_path)
else:
self.install_scm(b_collection_path)
def install_artifact(self, b_collection_path, b_temp_path):
try:
with tarfile.open(self.b_path, mode='r') as collection_tar:
files_member_obj = collection_tar.getmember('FILES.json')
with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj):
files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict'))
_extract_tar_file(collection_tar, 'MANIFEST.json', b_collection_path, b_temp_path)
_extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path)
for file_info in files['files']:
file_name = file_info['name']
if file_name == '.':
continue
if file_info['ftype'] == 'file':
_extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path,
expected_hash=file_info['chksum_sha256'])
else:
_extract_tar_dir(collection_tar, file_name, b_collection_path)
except Exception:
# Ensure we don't leave the dir behind in case of a failure.
shutil.rmtree(b_collection_path)
b_namespace_path = os.path.dirname(b_collection_path)
if not os.listdir(b_namespace_path):
os.rmdir(b_namespace_path)
raise
def install_scm(self, b_collection_output_path):
"""Install the collection from source control into given dir.
Generates the Ansible collection artifact data from a galaxy.yml and installs the artifact to a directory.
This should follow the same pattern as build_collection, but instead of creating an artifact, install it.
:param b_collection_output_path: The installation directory for the collection artifact.
:raises AnsibleError: If no collection metadata found.
"""
b_collection_path = self.b_path
b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
if not os.path.exists(b_galaxy_path):
raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
info = CollectionRequirement.galaxy_metadata(b_collection_path)
collection_manifest = info['manifest_file']
collection_meta = collection_manifest['collection_info']
file_manifest = info['files_file']
_build_collection_dir(b_collection_path, b_collection_output_path, collection_manifest, file_manifest)
collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
collection_manifest['collection_info']['name'])
display.display('Created collection for %s at %s' % (collection_name, to_text(b_collection_output_path)))
def set_latest_version(self):
self.versions = set([self.latest_version])
self._get_metadata()
def verify(self, remote_collection, path, b_temp_tar_path):
if not self.skip:
display.display("'%s' has not been installed, nothing to verify" % (to_text(self)))
return
collection_path = os.path.join(path, self.namespace, self.name)
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
display.vvv("Verifying '%s:%s'." % (to_text(self), self.latest_version))
display.vvv("Installed collection found at '%s'" % collection_path)
display.vvv("Remote collection found at '%s'" % remote_collection.metadata.download_url)
# Compare installed version versus requirement version
if self.latest_version != remote_collection.latest_version:
err = "%s has the version '%s' but is being compared to '%s'" % (to_text(self), self.latest_version, remote_collection.latest_version)
display.display(err)
return
modified_content = []
# Verify the manifest hash matches before verifying the file manifest
expected_hash = _get_tar_file_hash(b_temp_tar_path, 'MANIFEST.json')
self._verify_file_hash(b_collection_path, 'MANIFEST.json', expected_hash, modified_content)
manifest = _get_json_from_tar_file(b_temp_tar_path, 'MANIFEST.json')
# Use the manifest to verify the file manifest checksum
file_manifest_data = manifest['file_manifest_file']
file_manifest_filename = file_manifest_data['name']
expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']]
# Verify the file manifest before using it to verify individual files
self._verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content)
file_manifest = _get_json_from_tar_file(b_temp_tar_path, file_manifest_filename)
# Use the file manifest to verify individual file checksums
for manifest_data in file_manifest['files']:
if manifest_data['ftype'] == 'file':
expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']]
self._verify_file_hash(b_collection_path, manifest_data['name'], expected_hash, modified_content)
if modified_content:
display.display("Collection %s contains modified content in the following files:" % to_text(self))
display.display(to_text(self))
display.vvv(to_text(self.b_path))
for content_change in modified_content:
display.display(' %s' % content_change.filename)
display.vvv(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed))
else:
display.vvv("Successfully verified that checksums for '%s:%s' match the remote collection" % (to_text(self), self.latest_version))
def _verify_file_hash(self, b_path, filename, expected_hash, error_queue):
b_file_path = to_bytes(os.path.join(to_text(b_path), filename), errors='surrogate_or_strict')
if not os.path.isfile(b_file_path):
actual_hash = None
else:
with open(b_file_path, mode='rb') as file_object:
actual_hash = _consume_file(file_object)
if expected_hash != actual_hash:
error_queue.append(ModifiedContent(filename=filename, expected=expected_hash, installed=actual_hash))
def _get_metadata(self):
if self._metadata:
return
self._metadata = self.api.get_collection_version_metadata(self.namespace, self.name, self.latest_version)
def _meets_requirements(self, version, requirements, parent):
"""
Supports version identifiers can be '==', '!=', '>', '>=', '<', '<=', '*'. Each requirement is delimited by ','
"""
op_map = {
'!=': operator.ne,
'==': operator.eq,
'=': operator.eq,
'>=': operator.ge,
'>': operator.gt,
'<=': operator.le,
'<': operator.lt,
}
for req in list(requirements.split(',')):
op_pos = 2 if len(req) > 1 and req[1] == '=' else 1
op = op_map.get(req[:op_pos])
requirement = req[op_pos:]
if not op:
requirement = req
op = operator.eq
# In the case we are checking a new requirement on a base requirement (parent != None) we can't accept
# version as '*' (unknown version) unless the requirement is also '*'.
if parent and version == '*' and requirement != '*':
display.warning("Failed to validate the collection requirement '%s:%s' for %s when the existing "
"install does not have a version set, the collection may not work."
% (to_text(self), req, parent))
continue
elif requirement == '*' or version == '*':
continue
if not op(SemanticVersion(version), SemanticVersion.from_loose_version(LooseVersion(requirement))):
break
else:
return True
# The loop was broken early, it does not meet all the requirements
return False
@staticmethod
def from_tar(b_path, force, parent=None):
if not tarfile.is_tarfile(b_path):
raise AnsibleError("Collection artifact at '%s' is not a valid tar file." % to_native(b_path))
info = {}
with tarfile.open(b_path, mode='r') as collection_tar:
for b_member_name, property_name in CollectionRequirement._FILE_MAPPING:
n_member_name = to_native(b_member_name)
try:
member = collection_tar.getmember(n_member_name)
except KeyError:
raise AnsibleError("Collection at '%s' does not contain the required file %s."
% (to_native(b_path), n_member_name))
with _tarfile_extract(collection_tar, member) as (dummy, member_obj):
try:
info[property_name] = json.loads(to_text(member_obj.read(), errors='surrogate_or_strict'))
except ValueError:
raise AnsibleError("Collection tar file member %s does not contain a valid json string."
% n_member_name)
meta = info['manifest_file']['collection_info']
files = info['files_file']['files']
namespace = meta['namespace']
name = meta['name']
version = meta['version']
meta = CollectionVersionMetadata(namespace, name, version, None, None, meta['dependencies'])
if SemanticVersion(version).is_prerelease:
allow_pre_release = True
else:
allow_pre_release = False
return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
metadata=meta, files=files, allow_pre_releases=allow_pre_release)
@staticmethod
def from_path(b_path, force, parent=None, fallback_metadata=False, skip=True):
info = CollectionRequirement.collection_info(b_path, fallback_metadata)
allow_pre_release = False
if 'manifest_file' in info:
manifest = info['manifest_file']['collection_info']
namespace = manifest['namespace']
name = manifest['name']
version = to_text(manifest['version'], errors='surrogate_or_strict')
try:
_v = SemanticVersion()
_v.parse(version)
if _v.is_prerelease:
allow_pre_release = True
except ValueError:
display.warning("Collection at '%s' does not have a valid version set, falling back to '*'. Found "
"version: '%s'" % (to_text(b_path), version))
version = '*'
dependencies = manifest['dependencies']
else:
if fallback_metadata:
warning = "Collection at '%s' does not have a galaxy.yml or a MANIFEST.json file, cannot detect version."
else:
warning = "Collection at '%s' does not have a MANIFEST.json file, cannot detect version."
display.warning(warning % to_text(b_path))
parent_dir, name = os.path.split(to_text(b_path, errors='surrogate_or_strict'))
namespace = os.path.split(parent_dir)[1]
version = '*'
dependencies = {}
meta = CollectionVersionMetadata(namespace, name, version, None, None, dependencies)
files = info.get('files_file', {}).get('files', {})
return CollectionRequirement(namespace, name, b_path, None, [version], version, force, parent=parent,
metadata=meta, files=files, skip=skip, allow_pre_releases=allow_pre_release)
@staticmethod
def from_name(collection, apis, requirement, force, parent=None, allow_pre_release=False):
namespace, name = collection.split('.', 1)
galaxy_meta = None
for api in apis:
try:
if not (requirement == '*' or requirement.startswith('<') or requirement.startswith('>') or
requirement.startswith('!=')):
# Exact requirement
allow_pre_release = True
if requirement.startswith('='):
requirement = requirement.lstrip('=')
resp = api.get_collection_version_metadata(namespace, name, requirement)
galaxy_meta = resp
versions = [resp.version]
else:
versions = api.get_collection_versions(namespace, name)
except GalaxyError as err:
if err.http_code == 404:
display.vvv("Collection '%s' is not available from server %s %s"
% (collection, api.name, api.api_server))
continue
raise
display.vvv("Collection '%s' obtained from server %s %s" % (collection, api.name, api.api_server))
break
else:
raise AnsibleError("Failed to find collection %s:%s" % (collection, requirement))
req = CollectionRequirement(namespace, name, None, api, versions, requirement, force, parent=parent,
metadata=galaxy_meta, allow_pre_releases=allow_pre_release)
return req
def build_collection(collection_path, output_path, force):
"""Creates the Ansible collection artifact in a .tar.gz file.
:param collection_path: The path to the collection to build. This should be the directory that contains the
galaxy.yml file.
:param output_path: The path to create the collection build artifact. This should be a directory.
:param force: Whether to overwrite an existing collection build artifact or fail.
:return: The path to the collection build artifact.
"""
b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')
b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
if not os.path.exists(b_galaxy_path):
raise AnsibleError("The collection galaxy.yml path '%s' does not exist." % to_native(b_galaxy_path))
info = CollectionRequirement.galaxy_metadata(b_collection_path)
collection_manifest = info['manifest_file']
collection_meta = collection_manifest['collection_info']
file_manifest = info['files_file']
collection_output = os.path.join(output_path, "%s-%s-%s.tar.gz" % (collection_meta['namespace'],
collection_meta['name'],
collection_meta['version']))
b_collection_output = to_bytes(collection_output, errors='surrogate_or_strict')
if os.path.exists(b_collection_output):
if os.path.isdir(b_collection_output):
raise AnsibleError("The output collection artifact '%s' already exists, "
"but is a directory - aborting" % to_native(collection_output))
elif not force:
raise AnsibleError("The file '%s' already exists. You can use --force to re-create "
"the collection artifact." % to_native(collection_output))
_build_collection_tar(b_collection_path, b_collection_output, collection_manifest, file_manifest)
def download_collections(collections, output_path, apis, validate_certs, no_deps, allow_pre_release):
"""Download Ansible collections as their tarball from a Galaxy server to the path specified and creates a requirements
file of the downloaded requirements to be used for an install.
:param collections: The collections to download, should be a list of tuples with (name, requirement, Galaxy Server).
:param output_path: The path to download the collections to.
:param apis: A list of GalaxyAPIs to query when search for a collection.
:param validate_certs: Whether to validate the certificate if downloading a tarball from a non-Galaxy host.
:param no_deps: Ignore any collection dependencies and only download the base requirements.
:param allow_pre_release: Do not ignore pre-release versions when selecting the latest.
"""
with _tempdir() as b_temp_path:
display.display("Process install dependency map")
with _display_progress():
dep_map = _build_dependency_map(collections, [], b_temp_path, apis, validate_certs, True, True, no_deps,
allow_pre_release=allow_pre_release)
requirements = []
display.display("Starting collection download process to '%s'" % output_path)
with _display_progress():
for name, requirement in dep_map.items():
collection_filename = "%s-%s-%s.tar.gz" % (requirement.namespace, requirement.name,
requirement.latest_version)
dest_path = os.path.join(output_path, collection_filename)
requirements.append({'name': collection_filename, 'version': requirement.latest_version})
display.display("Downloading collection '%s' to '%s'" % (name, dest_path))
b_temp_download_path = requirement.download(b_temp_path)
shutil.move(b_temp_download_path, to_bytes(dest_path, errors='surrogate_or_strict'))
requirements_path = os.path.join(output_path, 'requirements.yml')
display.display("Writing requirements.yml file of downloaded collections to '%s'" % requirements_path)
with open(to_bytes(requirements_path, errors='surrogate_or_strict'), mode='wb') as req_fd:
req_fd.write(to_bytes(yaml.safe_dump({'collections': requirements}), errors='surrogate_or_strict'))
def publish_collection(collection_path, api, wait, timeout):
"""Publish an Ansible collection tarball into an Ansible Galaxy server.
:param collection_path: The path to the collection tarball to publish.
:param api: A GalaxyAPI to publish the collection to.
:param wait: Whether to wait until the import process is complete.
:param timeout: The time in seconds to wait for the import process to finish, 0 is indefinite.
"""
import_uri = api.publish_collection(collection_path)
if wait:
# Galaxy returns a url fragment which differs between v2 and v3. The second to last entry is
# always the task_id, though.
# v2: {"task": "https://galaxy-dev.ansible.com/api/v2/collection-imports/35573/"}
# v3: {"task": "/api/automation-hub/v3/imports/collections/838d1308-a8f4-402c-95cb-7823f3806cd8/"}
task_id = None
for path_segment in reversed(import_uri.split('/')):
if path_segment:
task_id = path_segment
break
if not task_id:
raise AnsibleError("Publishing the collection did not return valid task info. Cannot wait for task status. Returned task info: '%s'" % import_uri)
display.display("Collection has been published to the Galaxy server %s %s" % (api.name, api.api_server))
with _display_progress():
api.wait_import_task(task_id, timeout)
display.display("Collection has been successfully published and imported to the Galaxy server %s %s"
% (api.name, api.api_server))
else:
display.display("Collection has been pushed to the Galaxy server %s %s, not waiting until import has "
"completed due to --no-wait being set. Import task results can be found at %s"
% (api.name, api.api_server, import_uri))
def install_collections(collections, output_path, apis, validate_certs, ignore_errors, no_deps, force, force_deps,
allow_pre_release=False):
"""Install Ansible collections to the path specified.
:param collections: The collections to install, should be a list of tuples with (name, requirement, Galaxy server).
:param output_path: The path to install the collections to.
:param apis: A list of GalaxyAPIs to query when searching for a collection.
:param validate_certs: Whether to validate the certificates if downloading a tarball.
:param ignore_errors: Whether to ignore any errors when installing the collection.
:param no_deps: Ignore any collection dependencies and only install the base requirements.
:param force: Re-install a collection if it has already been installed.
:param force_deps: Re-install a collection as well as its dependencies if they have already been installed.
"""
existing_collections = find_existing_collections(output_path, fallback_metadata=True)
with _tempdir() as b_temp_path:
display.display("Process install dependency map")
with _display_progress():
dependency_map = _build_dependency_map(collections, existing_collections, b_temp_path, apis,
validate_certs, force, force_deps, no_deps,
allow_pre_release=allow_pre_release)
display.display("Starting collection install process")
with _display_progress():
for collection in dependency_map.values():
try:
collection.install(output_path, b_temp_path)
except AnsibleError as err:
if ignore_errors:
display.warning("Failed to install collection %s but skipping due to --ignore-errors being set. "
"Error: %s" % (to_text(collection), to_text(err)))
else:
raise
def validate_collection_name(name):
"""Validates the collection name as an input from the user or a requirements file fit the requirements.
:param name: The input name with optional range specifier split by ':'.
:return: The input value, required for argparse validation.
"""
collection, dummy, dummy = name.partition(':')
if AnsibleCollectionRef.is_valid_collection_name(collection):
return name
raise AnsibleError("Invalid collection name '%s', "
"name must be in the format <namespace>.<collection>. \n"
"Please make sure namespace and collection name contains "
"characters from [a-zA-Z0-9_] only." % name)
def validate_collection_path(collection_path):
"""Ensure a given path ends with 'ansible_collections'
:param collection_path: The path that should end in 'ansible_collections'
:return: collection_path ending in 'ansible_collections' if it does not already.
"""
if os.path.split(collection_path)[1] != 'ansible_collections':
return os.path.join(collection_path, 'ansible_collections')
return collection_path
def verify_collections(collections, search_paths, apis, validate_certs, ignore_errors, allow_pre_release=False):
with _display_progress():
with _tempdir() as b_temp_path:
for collection in collections:
try:
local_collection = None
b_collection = to_bytes(collection[0], errors='surrogate_or_strict')
if os.path.isfile(b_collection) or urlparse(collection[0]).scheme.lower() in ['http', 'https'] or len(collection[0].split('.')) != 2:
raise AnsibleError(message="'%s' is not a valid collection name. The format namespace.name is expected." % collection[0])
collection_name = collection[0]
namespace, name = collection_name.split('.')
collection_version = collection[1]
# Verify local collection exists before downloading it from a galaxy server
for search_path in search_paths:
b_search_path = to_bytes(os.path.join(search_path, namespace, name), errors='surrogate_or_strict')
if os.path.isdir(b_search_path):
if not os.path.isfile(os.path.join(to_text(b_search_path, errors='surrogate_or_strict'), 'MANIFEST.json')):
raise AnsibleError(
message="Collection %s does not appear to have a MANIFEST.json. " % collection_name +
"A MANIFEST.json is expected if the collection has been built and installed via ansible-galaxy."
)
local_collection = CollectionRequirement.from_path(b_search_path, False)
break
if local_collection is None:
raise AnsibleError(message='Collection %s is not installed in any of the collection paths.' % collection_name)
# Download collection on a galaxy server for comparison
try:
remote_collection = CollectionRequirement.from_name(collection_name, apis, collection_version, False, parent=None,
allow_pre_release=allow_pre_release)
except AnsibleError as e:
if e.message == 'Failed to find collection %s:%s' % (collection[0], collection[1]):
raise AnsibleError('Failed to find remote collection %s:%s on any of the galaxy servers' % (collection[0], collection[1]))
raise
download_url = remote_collection.metadata.download_url
headers = {}
remote_collection.api._add_auth_token(headers, download_url, required=False)
b_temp_tar_path = _download_file(download_url, b_temp_path, None, validate_certs, headers=headers)
local_collection.verify(remote_collection, search_path, b_temp_tar_path)
except AnsibleError as err:
if ignore_errors:
display.warning("Failed to verify collection %s but skipping due to --ignore-errors being set. "
"Error: %s" % (collection[0], to_text(err)))
else:
raise
@contextmanager
def _tempdir():
b_temp_path = tempfile.mkdtemp(dir=to_bytes(C.DEFAULT_LOCAL_TMP, errors='surrogate_or_strict'))
yield b_temp_path
shutil.rmtree(b_temp_path)
@contextmanager
def _tarfile_extract(tar, member):
tar_obj = tar.extractfile(member)
yield member, tar_obj
tar_obj.close()
@contextmanager
def _display_progress():
config_display = C.GALAXY_DISPLAY_PROGRESS
display_wheel = sys.stdout.isatty() if config_display is None else config_display
if not display_wheel:
yield
return
def progress(display_queue, actual_display):
actual_display.debug("Starting display_progress display thread")
t = threading.current_thread()
while True:
for c in "|/-\\":
actual_display.display(c + "\b", newline=False)
time.sleep(0.1)
# Display a message from the main thread
while True:
try:
method, args, kwargs = display_queue.get(block=False, timeout=0.1)
except queue.Empty:
break
else:
func = getattr(actual_display, method)
func(*args, **kwargs)
if getattr(t, "finish", False):
actual_display.debug("Received end signal for display_progress display thread")
return
class DisplayThread(object):
def __init__(self, display_queue):
self.display_queue = display_queue
def __getattr__(self, attr):
def call_display(*args, **kwargs):
self.display_queue.put((attr, args, kwargs))
return call_display
# Temporary override the global display class with our own which add the calls to a queue for the thread to call.
global display
old_display = display
try:
display_queue = queue.Queue()
display = DisplayThread(display_queue)
t = threading.Thread(target=progress, args=(display_queue, old_display))
t.daemon = True
t.start()
try:
yield
finally:
t.finish = True
t.join()
except Exception:
# The exception is re-raised so we can sure the thread is finished and not using the display anymore
raise
finally:
display = old_display
def _get_galaxy_yml(b_galaxy_yml_path):
meta_info = get_collections_galaxy_meta_info()
mandatory_keys = set()
string_keys = set()
list_keys = set()
dict_keys = set()
for info in meta_info:
if info.get('required', False):
mandatory_keys.add(info['key'])
key_list_type = {
'str': string_keys,
'list': list_keys,
'dict': dict_keys,
}[info.get('type', 'str')]
key_list_type.add(info['key'])
all_keys = frozenset(list(mandatory_keys) + list(string_keys) + list(list_keys) + list(dict_keys))
try:
with open(b_galaxy_yml_path, 'rb') as g_yaml:
galaxy_yml = yaml.safe_load(g_yaml)
except YAMLError as err:
raise AnsibleError("Failed to parse the galaxy.yml at '%s' with the following error:\n%s"
% (to_native(b_galaxy_yml_path), to_native(err)))
set_keys = set(galaxy_yml.keys())
missing_keys = mandatory_keys.difference(set_keys)
if missing_keys:
raise AnsibleError("The collection galaxy.yml at '%s' is missing the following mandatory keys: %s"
% (to_native(b_galaxy_yml_path), ", ".join(sorted(missing_keys))))
extra_keys = set_keys.difference(all_keys)
if len(extra_keys) > 0:
display.warning("Found unknown keys in collection galaxy.yml at '%s': %s"
% (to_text(b_galaxy_yml_path), ", ".join(extra_keys)))
# Add the defaults if they have not been set
for optional_string in string_keys:
if optional_string not in galaxy_yml:
galaxy_yml[optional_string] = None
for optional_list in list_keys:
list_val = galaxy_yml.get(optional_list, None)
if list_val is None:
galaxy_yml[optional_list] = []
elif not isinstance(list_val, list):
galaxy_yml[optional_list] = [list_val]
for optional_dict in dict_keys:
if optional_dict not in galaxy_yml:
galaxy_yml[optional_dict] = {}
# license is a builtin var in Python, to avoid confusion we just rename it to license_ids
galaxy_yml['license_ids'] = galaxy_yml['license']
del galaxy_yml['license']
return galaxy_yml
def _build_files_manifest(b_collection_path, namespace, name, ignore_patterns):
# We always ignore .pyc and .retry files as well as some well known version control directories. The ignore
# patterns can be extended by the build_ignore key in galaxy.yml
b_ignore_patterns = [
b'galaxy.yml',
b'galaxy.yaml',
b'.git',
b'*.pyc',
b'*.retry',
b'tests/output', # Ignore ansible-test result output directory.
to_bytes('{0}-{1}-*.tar.gz'.format(namespace, name)), # Ignores previously built artifacts in the root dir.
]
b_ignore_patterns += [to_bytes(p) for p in ignore_patterns]
b_ignore_dirs = frozenset([b'CVS', b'.bzr', b'.hg', b'.git', b'.svn', b'__pycache__', b'.tox'])
entry_template = {
'name': None,
'ftype': None,
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT
}
manifest = {
'files': [
{
'name': '.',
'ftype': 'dir',
'chksum_type': None,
'chksum_sha256': None,
'format': MANIFEST_FORMAT,
},
],
'format': MANIFEST_FORMAT,
}
def _walk(b_path, b_top_level_dir):
for b_item in os.listdir(b_path):
b_abs_path = os.path.join(b_path, b_item)
b_rel_base_dir = b'' if b_path == b_top_level_dir else b_path[len(b_top_level_dir) + 1:]
b_rel_path = os.path.join(b_rel_base_dir, b_item)
rel_path = to_text(b_rel_path, errors='surrogate_or_strict')
if os.path.isdir(b_abs_path):
if any(b_item == b_path for b_path in b_ignore_dirs) or \
any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
if os.path.islink(b_abs_path):
b_link_target = os.path.realpath(b_abs_path)
if not _is_child_path(b_link_target, b_top_level_dir):
display.warning("Skipping '%s' as it is a symbolic link to a directory outside the collection"
% to_text(b_abs_path))
continue
manifest_entry = entry_template.copy()
manifest_entry['name'] = rel_path
manifest_entry['ftype'] = 'dir'
manifest['files'].append(manifest_entry)
if not os.path.islink(b_abs_path):
_walk(b_abs_path, b_top_level_dir)
else:
if any(fnmatch.fnmatch(b_rel_path, b_pattern) for b_pattern in b_ignore_patterns):
display.vvv("Skipping '%s' for collection build" % to_text(b_abs_path))
continue
# Handling of file symlinks occur in _build_collection_tar, the manifest for a symlink is the same for
# a normal file.
manifest_entry = entry_template.copy()
manifest_entry['name'] = rel_path
manifest_entry['ftype'] = 'file'
manifest_entry['chksum_type'] = 'sha256'
manifest_entry['chksum_sha256'] = secure_hash(b_abs_path, hash_func=sha256)
manifest['files'].append(manifest_entry)
_walk(b_collection_path, b_collection_path)
return manifest
def _build_manifest(namespace, name, version, authors, readme, tags, description, license_ids, license_file,
dependencies, repository, documentation, homepage, issues, **kwargs):
manifest = {
'collection_info': {
'namespace': namespace,
'name': name,
'version': version,
'authors': authors,
'readme': readme,
'tags': tags,
'description': description,
'license': license_ids,
'license_file': license_file if license_file else None, # Handle galaxy.yml having an empty string (None)
'dependencies': dependencies,
'repository': repository,
'documentation': documentation,
'homepage': homepage,
'issues': issues,
},
'file_manifest_file': {
'name': 'FILES.json',
'ftype': 'file',
'chksum_type': 'sha256',
'chksum_sha256': None, # Filled out in _build_collection_tar
'format': MANIFEST_FORMAT
},
'format': MANIFEST_FORMAT,
}
return manifest
def _build_collection_tar(b_collection_path, b_tar_path, collection_manifest, file_manifest):
"""Build a tar.gz collection artifact from the manifest data."""
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
with _tempdir() as b_temp_path:
b_tar_filepath = os.path.join(b_temp_path, os.path.basename(b_tar_path))
with tarfile.open(b_tar_filepath, mode='w:gz') as tar_file:
# Add the MANIFEST.json and FILES.json file to the archive
for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
b_io = BytesIO(b)
tar_info = tarfile.TarInfo(name)
tar_info.size = len(b)
tar_info.mtime = time.time()
tar_info.mode = 0o0644
tar_file.addfile(tarinfo=tar_info, fileobj=b_io)
for file_info in file_manifest['files']:
if file_info['name'] == '.':
continue
# arcname expects a native string, cannot be bytes
filename = to_native(file_info['name'], errors='surrogate_or_strict')
b_src_path = os.path.join(b_collection_path, to_bytes(filename, errors='surrogate_or_strict'))
def reset_stat(tarinfo):
if tarinfo.type != tarfile.SYMTYPE:
existing_is_exec = tarinfo.mode & stat.S_IXUSR
tarinfo.mode = 0o0755 if existing_is_exec or tarinfo.isdir() else 0o0644
tarinfo.uid = tarinfo.gid = 0
tarinfo.uname = tarinfo.gname = ''
return tarinfo
if os.path.islink(b_src_path):
b_link_target = os.path.realpath(b_src_path)
if _is_child_path(b_link_target, b_collection_path):
b_rel_path = os.path.relpath(b_link_target, start=os.path.dirname(b_src_path))
tar_info = tarfile.TarInfo(filename)
tar_info.type = tarfile.SYMTYPE
tar_info.linkname = to_native(b_rel_path, errors='surrogate_or_strict')
tar_info = reset_stat(tar_info)
tar_file.addfile(tarinfo=tar_info)
continue
# Dealing with a normal file, just add it by name.
tar_file.add(os.path.realpath(b_src_path), arcname=filename, recursive=False, filter=reset_stat)
shutil.copy(b_tar_filepath, b_tar_path)
collection_name = "%s.%s" % (collection_manifest['collection_info']['namespace'],
collection_manifest['collection_info']['name'])
display.display('Created collection for %s at %s' % (collection_name, to_text(b_tar_path)))
def _build_collection_dir(b_collection_path, b_collection_output, collection_manifest, file_manifest):
"""Build a collection directory from the manifest data.
This should follow the same pattern as _build_collection_tar.
"""
os.makedirs(b_collection_output, mode=0o0755)
files_manifest_json = to_bytes(json.dumps(file_manifest, indent=True), errors='surrogate_or_strict')
collection_manifest['file_manifest_file']['chksum_sha256'] = secure_hash_s(files_manifest_json, hash_func=sha256)
collection_manifest_json = to_bytes(json.dumps(collection_manifest, indent=True), errors='surrogate_or_strict')
# Write contents to the files
for name, b in [('MANIFEST.json', collection_manifest_json), ('FILES.json', files_manifest_json)]:
b_path = os.path.join(b_collection_output, to_bytes(name, errors='surrogate_or_strict'))
with open(b_path, 'wb') as file_obj, BytesIO(b) as b_io:
shutil.copyfileobj(b_io, file_obj)
os.chmod(b_path, 0o0644)
base_directories = []
for file_info in file_manifest['files']:
if file_info['name'] == '.':
continue
src_file = os.path.join(b_collection_path, to_bytes(file_info['name'], errors='surrogate_or_strict'))
dest_file = os.path.join(b_collection_output, to_bytes(file_info['name'], errors='surrogate_or_strict'))
if any(src_file.startswith(directory) for directory in base_directories):
continue
existing_is_exec = os.stat(src_file).st_mode & stat.S_IXUSR
mode = 0o0755 if existing_is_exec else 0o0644
if os.path.isdir(src_file):
mode = 0o0755
base_directories.append(src_file)
shutil.copytree(src_file, dest_file)
else:
shutil.copyfile(src_file, dest_file)
os.chmod(dest_file, mode)
def find_existing_collections(path, fallback_metadata=False):
collections = []
b_path = to_bytes(path, errors='surrogate_or_strict')
for b_namespace in os.listdir(b_path):
b_namespace_path = os.path.join(b_path, b_namespace)
if os.path.isfile(b_namespace_path):
continue
for b_collection in os.listdir(b_namespace_path):
b_collection_path = os.path.join(b_namespace_path, b_collection)
if os.path.isdir(b_collection_path):
req = CollectionRequirement.from_path(b_collection_path, False, fallback_metadata=fallback_metadata)
display.vvv("Found installed collection %s:%s at '%s'" % (to_text(req), req.latest_version,
to_text(b_collection_path)))
collections.append(req)
return collections
def _build_dependency_map(collections, existing_collections, b_temp_path, apis, validate_certs, force, force_deps,
no_deps, allow_pre_release=False):
dependency_map = {}
# First build the dependency map on the actual requirements
for name, version, source, req_type in collections:
_get_collection_info(dependency_map, existing_collections, name, version, source, b_temp_path, apis,
validate_certs, (force or force_deps), allow_pre_release=allow_pre_release, req_type=req_type)
checked_parents = set([to_text(c) for c in dependency_map.values() if c.skip])
while len(dependency_map) != len(checked_parents):
while not no_deps: # Only parse dependencies if no_deps was not set
parents_to_check = set(dependency_map.keys()).difference(checked_parents)
deps_exhausted = True
for parent in parents_to_check:
parent_info = dependency_map[parent]
if parent_info.dependencies:
deps_exhausted = False
for dep_name, dep_requirement in parent_info.dependencies.items():
_get_collection_info(dependency_map, existing_collections, dep_name, dep_requirement,
parent_info.api, b_temp_path, apis, validate_certs, force_deps,
parent=parent, allow_pre_release=allow_pre_release)
checked_parents.add(parent)
# No extra dependencies were resolved, exit loop
if deps_exhausted:
break
# Now we have resolved the deps to our best extent, now select the latest version for collections with
# multiple versions found and go from there
deps_not_checked = set(dependency_map.keys()).difference(checked_parents)
for collection in deps_not_checked:
dependency_map[collection].set_latest_version()
if no_deps or len(dependency_map[collection].dependencies) == 0:
checked_parents.add(collection)
return dependency_map
def _collections_from_scm(collection, requirement, b_temp_path, force, parent=None):
"""Returns a list of collections found in the repo. If there is a galaxy.yml in the collection then just return
the specific collection. Otherwise, check each top-level directory for a galaxy.yml.
:param collection: URI to a git repo
:param requirement: The version of the artifact
:param b_temp_path: The temporary path to the archive of a collection
:param force: Whether to overwrite an existing collection or fail
:param parent: The name of the parent collection
:raises AnsibleError: if nothing found
:return: List of CollectionRequirement objects
:rtype: list
"""
reqs = []
name, version, path, fragment = parse_scm(collection, requirement)
b_repo_root = to_bytes(name, errors='surrogate_or_strict')
b_collection_path = os.path.join(b_temp_path, b_repo_root)
if fragment:
b_fragment = to_bytes(fragment, errors='surrogate_or_strict')
b_collection_path = os.path.join(b_collection_path, b_fragment)
b_galaxy_path = get_galaxy_metadata_path(b_collection_path)
err = ("%s appears to be an SCM collection source, but the required galaxy.yml was not found. "
"Append #path/to/collection/ to your URI (before the comma separated version, if one is specified) "
"to point to a directory containing the galaxy.yml or directories of collections" % collection)
display.vvvvv("Considering %s as a possible path to a collection's galaxy.yml" % b_galaxy_path)
if os.path.exists(b_galaxy_path):
return [CollectionRequirement.from_path(b_collection_path, force, parent, fallback_metadata=True, skip=False)]
if not os.path.isdir(b_collection_path) or not os.listdir(b_collection_path):
raise AnsibleError(err)
for b_possible_collection in os.listdir(b_collection_path):
b_collection = os.path.join(b_collection_path, b_possible_collection)
if not os.path.isdir(b_collection):
continue
b_galaxy = get_galaxy_metadata_path(b_collection)
display.vvvvv("Considering %s as a possible path to a collection's galaxy.yml" % b_galaxy)
if os.path.exists(b_galaxy):
reqs.append(CollectionRequirement.from_path(b_collection, force, parent, fallback_metadata=True, skip=False))
if not reqs:
raise AnsibleError(err)
return reqs
def _get_collection_info(dep_map, existing_collections, collection, requirement, source, b_temp_path, apis,
validate_certs, force, parent=None, allow_pre_release=False, req_type=None):
dep_msg = ""
if parent:
dep_msg = " - as dependency of %s" % parent
display.vvv("Processing requirement collection '%s'%s" % (to_text(collection), dep_msg))
b_tar_path = None
is_file = (
req_type == 'file' or
(not req_type and os.path.isfile(to_bytes(collection, errors='surrogate_or_strict')))
)
is_url = (
req_type == 'url' or
(not req_type and urlparse(collection).scheme.lower() in ['http', 'https'])
)
is_scm = (
req_type == 'git' or
(not req_type and not b_tar_path and collection.startswith(('git+', 'git@')))
)
if is_file:
display.vvvv("Collection requirement '%s' is a tar artifact" % to_text(collection))
b_tar_path = to_bytes(collection, errors='surrogate_or_strict')
elif is_url:
display.vvvv("Collection requirement '%s' is a URL to a tar artifact" % collection)
try:
b_tar_path = _download_file(collection, b_temp_path, None, validate_certs)
except urllib_error.URLError as err:
raise AnsibleError("Failed to download collection tar from '%s': %s"
% (to_native(collection), to_native(err)))
if is_scm:
if not collection.startswith('git'):
collection = 'git+' + collection
name, version, path, fragment = parse_scm(collection, requirement)
b_tar_path = scm_archive_collection(path, name=name, version=version)
with tarfile.open(b_tar_path, mode='r') as collection_tar:
collection_tar.extractall(path=to_text(b_temp_path))
# Ignore requirement if it is set (it must follow semantic versioning, unlike a git version, which is any tree-ish)
# If the requirement was the only place version was set, requirement == version at this point
if requirement not in {"*", ""} and requirement != version:
display.warning(
"The collection {0} appears to be a git repository and two versions were provided: '{1}', and '{2}'. "
"The version {2} is being disregarded.".format(collection, version, requirement)
)
requirement = "*"
reqs = _collections_from_scm(collection, requirement, b_temp_path, force, parent)
for req in reqs:
collection_info = get_collection_info_from_req(dep_map, req)
update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement)
else:
if b_tar_path:
req = CollectionRequirement.from_tar(b_tar_path, force, parent=parent)
collection_info = get_collection_info_from_req(dep_map, req)
else:
validate_collection_name(collection)
display.vvvv("Collection requirement '%s' is the name of a collection" % collection)
if collection in dep_map:
collection_info = dep_map[collection]
collection_info.add_requirement(parent, requirement)
else:
apis = [source] if source else apis
collection_info = CollectionRequirement.from_name(collection, apis, requirement, force, parent=parent,
allow_pre_release=allow_pre_release)
update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement)
def get_collection_info_from_req(dep_map, collection):
collection_name = to_text(collection)
if collection_name in dep_map:
collection_info = dep_map[collection_name]
collection_info.add_requirement(None, collection.latest_version)
else:
collection_info = collection
return collection_info
def update_dep_map_collection_info(dep_map, existing_collections, collection_info, parent, requirement):
existing = [c for c in existing_collections if to_text(c) == to_text(collection_info)]
if existing and not collection_info.force:
# Test that the installed collection fits the requirement
existing[0].add_requirement(parent, requirement)
collection_info = existing[0]
dep_map[to_text(collection_info)] = collection_info
def parse_scm(collection, version):
if ',' in collection:
collection, version = collection.split(',', 1)
elif version == '*' or not version:
version = 'HEAD'
if collection.startswith('git+'):
path = collection[4:]
else:
path = collection
path, fragment = urldefrag(path)
fragment = fragment.strip(os.path.sep)
if path.endswith(os.path.sep + '.git'):
name = path.split(os.path.sep)[-2]
elif '://' not in path and '@' not in path:
name = path
else:
name = path.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
return name, version, path, fragment
def _download_file(url, b_path, expected_hash, validate_certs, headers=None):
urlsplit = os.path.splitext(to_text(url.rsplit('/', 1)[1]))
b_file_name = to_bytes(urlsplit[0], errors='surrogate_or_strict')
b_file_ext = to_bytes(urlsplit[1], errors='surrogate_or_strict')
b_file_path = tempfile.NamedTemporaryFile(dir=b_path, prefix=b_file_name, suffix=b_file_ext, delete=False).name
display.vvv("Downloading %s to %s" % (url, to_text(b_path)))
# Galaxy redirs downloads to S3 which reject the request if an Authorization header is attached so don't redir that
resp = open_url(to_native(url, errors='surrogate_or_strict'), validate_certs=validate_certs, headers=headers,
unredirected_headers=['Authorization'], http_agent=user_agent())
with open(b_file_path, 'wb') as download_file:
actual_hash = _consume_file(resp, download_file)
if expected_hash:
display.vvvv("Validating downloaded file hash %s with expected hash %s" % (actual_hash, expected_hash))
if expected_hash != actual_hash:
raise AnsibleError("Mismatch artifact hash with downloaded file")
return b_file_path
def _extract_tar_dir(tar, dirname, b_dest):
""" Extracts a directory from a collection tar. """
member_names = [to_native(dirname, errors='surrogate_or_strict')]
# Create list of members with and without trailing separator
if not member_names[-1].endswith(os.path.sep):
member_names.append(member_names[-1] + os.path.sep)
# Try all of the member names and stop on the first one that are able to successfully get
for member in member_names:
try:
tar_member = tar.getmember(member)
except KeyError:
continue
break
else:
# If we still can't find the member, raise a nice error.
raise AnsibleError("Unable to extract '%s' from collection" % to_native(member, errors='surrogate_or_strict'))
b_dir_path = os.path.join(b_dest, to_bytes(dirname, errors='surrogate_or_strict'))
b_parent_path = os.path.dirname(b_dir_path)
try:
os.makedirs(b_parent_path, mode=0o0755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if tar_member.type == tarfile.SYMTYPE:
b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
if not _is_child_path(b_link_path, b_dest, link_name=b_dir_path):
raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
"collection '%s'" % (to_native(dirname), b_link_path))
os.symlink(b_link_path, b_dir_path)
else:
if not os.path.isdir(b_dir_path):
os.mkdir(b_dir_path, 0o0755)
def _extract_tar_file(tar, filename, b_dest, b_temp_path, expected_hash=None):
""" Extracts a file from a collection tar. """
with _get_tar_file_member(tar, filename) as (tar_member, tar_obj):
if tar_member.type == tarfile.SYMTYPE:
actual_hash = _consume_file(tar_obj)
else:
with tempfile.NamedTemporaryFile(dir=b_temp_path, delete=False) as tmpfile_obj:
actual_hash = _consume_file(tar_obj, tmpfile_obj)
if expected_hash and actual_hash != expected_hash:
raise AnsibleError("Checksum mismatch for '%s' inside collection at '%s'"
% (to_native(filename, errors='surrogate_or_strict'), to_native(tar.name)))
b_dest_filepath = os.path.abspath(os.path.join(b_dest, to_bytes(filename, errors='surrogate_or_strict')))
b_parent_dir = os.path.dirname(b_dest_filepath)
if not _is_child_path(b_parent_dir, b_dest):
raise AnsibleError("Cannot extract tar entry '%s' as it will be placed outside the collection directory"
% to_native(filename, errors='surrogate_or_strict'))
if not os.path.exists(b_parent_dir):
# Seems like Galaxy does not validate if all file entries have a corresponding dir ftype entry. This check
# makes sure we create the parent directory even if it wasn't set in the metadata.
os.makedirs(b_parent_dir, mode=0o0755)
if tar_member.type == tarfile.SYMTYPE:
b_link_path = to_bytes(tar_member.linkname, errors='surrogate_or_strict')
if not _is_child_path(b_link_path, b_dest, link_name=b_dest_filepath):
raise AnsibleError("Cannot extract symlink '%s' in collection: path points to location outside of "
"collection '%s'" % (to_native(filename), b_link_path))
os.symlink(b_link_path, b_dest_filepath)
else:
shutil.move(to_bytes(tmpfile_obj.name, errors='surrogate_or_strict'), b_dest_filepath)
# Default to rw-r--r-- and only add execute if the tar file has execute.
tar_member = tar.getmember(to_native(filename, errors='surrogate_or_strict'))
new_mode = 0o644
if stat.S_IMODE(tar_member.mode) & stat.S_IXUSR:
new_mode |= 0o0111
os.chmod(b_dest_filepath, new_mode)
def _get_tar_file_member(tar, filename):
n_filename = to_native(filename, errors='surrogate_or_strict')
try:
member = tar.getmember(n_filename)
except KeyError:
raise AnsibleError("Collection tar at '%s' does not contain the expected file '%s'." % (
to_native(tar.name),
n_filename))
return _tarfile_extract(tar, member)
def _get_json_from_tar_file(b_path, filename):
file_contents = ''
with tarfile.open(b_path, mode='r') as collection_tar:
with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
bufsize = 65536
data = tar_obj.read(bufsize)
while data:
file_contents += to_text(data)
data = tar_obj.read(bufsize)
return json.loads(file_contents)
def _get_tar_file_hash(b_path, filename):
with tarfile.open(b_path, mode='r') as collection_tar:
with _get_tar_file_member(collection_tar, filename) as (dummy, tar_obj):
return _consume_file(tar_obj)
def _is_child_path(path, parent_path, link_name=None):
""" Checks that path is a path within the parent_path specified. """
b_path = to_bytes(path, errors='surrogate_or_strict')
if link_name and not os.path.isabs(b_path):
# If link_name is specified, path is the source of the link and we need to resolve the absolute path.
b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))
b_path = os.path.abspath(os.path.join(b_link_dir, b_path))
b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')
return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))
def _consume_file(read_from, write_to=None):
bufsize = 65536
sha256_digest = sha256()
data = read_from.read(bufsize)
while data:
if write_to is not None:
write_to.write(data)
write_to.flush()
sha256_digest.update(data)
data = read_from.read(bufsize)
return sha256_digest.hexdigest()
def get_galaxy_metadata_path(b_path):
b_default_path = os.path.join(b_path, b'galaxy.yml')
candidate_names = [b'galaxy.yml', b'galaxy.yaml']
for b_name in candidate_names:
b_path = os.path.join(b_path, b_name)
if os.path.exists(b_path):
return b_path
return b_default_path
| indrajitr/ansible | lib/ansible/galaxy/collection.py | Python | gpl-3.0 | 68,862 | [
"Galaxy"
] | 0050c4497fe6da1d58af273890c190c55b1f01be38e65a12a7d344bd24f79ff0 |
from pymatgen.io.vaspio import Vasprun
def parse_xml():
v = Vasprun("../test_files/vasprun.xml")
if __name__ == "__main__":
import timeit
print(timeit.timeit("parse_xml()", setup="from __main__ import parse_xml",
number=1))
| Dioptas/pymatgen | dev_scripts/profile_xml.py | Python | mit | 247 | [
"pymatgen"
] | dc6a96a6f272d402f08d97b0f1df628a7e783816a660846be8deecd42117c6de |
import os
from types import GeneratorType
from tempfile import mkdtemp, NamedTemporaryFile
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
import pytest
from sklearn.metrics import r2_score
import oddt
from oddt.scoring import scorer, ensemble_descriptor, ensemble_model
from oddt.scoring.descriptors import (autodock_vina_descriptor,
fingerprints,
oddt_vina_descriptor)
from oddt.scoring.models.classifiers import neuralnetwork
from oddt.scoring.models import regressors
from oddt.scoring.functions import rfscore, nnscore, PLECscore, ri_score
from oddt.scoring.functions.RIscore import b_factor
test_data_dir = os.path.dirname(os.path.abspath(__file__))
actives_sdf = os.path.join(test_data_dir, 'data', 'dude', 'xiap',
'actives_docked.sdf')
receptor_pdb = os.path.join(test_data_dir, 'data', 'dude', 'xiap',
'receptor_rdkit.pdb')
results = os.path.join(test_data_dir, 'data', 'results', 'xiap')
@pytest.mark.filterwarnings('ignore:Data with input dtype int64 was converted')
def test_scorer():
np.random.seed(42)
# toy example with made up values
mols = list(oddt.toolkit.readfile('sdf', actives_sdf))
values = [0]*5 + [1]*5
test_values = [0, 0, 1, 1, 0]
if oddt.toolkit.backend == 'ob':
fp = 'fp2'
else:
fp = 'rdkit'
simple_scorer = scorer(neuralnetwork(), fingerprints(fp))
simple_scorer.fit(mols[:10], values)
predictions = simple_scorer.predict(mols[10:15])
assert_array_almost_equal(predictions, [0, 1, 0, 1, 0])
score = simple_scorer.score(mols[10:15], test_values)
assert_almost_equal(score, 0.6)
scored_mols = [simple_scorer.predict_ligand(mol) for mol in mols[10:15]]
single_predictions = [float(mol.data['score']) for mol in scored_mols]
assert_array_almost_equal(predictions, single_predictions)
scored_mols_gen = simple_scorer.predict_ligands(mols[10:15])
assert isinstance(scored_mols_gen, GeneratorType)
gen_predictions = [float(mol.data['score']) for mol in scored_mols_gen]
assert_array_almost_equal(predictions, gen_predictions)
def test_ensemble_descriptor():
mols = list(oddt.toolkit.readfile('sdf', actives_sdf))[:10]
list(map(lambda x: x.addh(), mols))
rec = next(oddt.toolkit.readfile('pdb', receptor_pdb))
rec.protein = True
rec.addh()
desc1 = rfscore(version=1).descriptor_generator
desc2 = oddt_vina_descriptor()
ensemble = ensemble_descriptor((desc1, desc2))
ensemble.set_protein(rec)
assert len(ensemble) == len(desc1) + len(desc2)
# set protein
assert desc1.protein == rec
assert desc2.protein == rec
ensemble_scores = ensemble.build(mols)
scores1 = desc1.build(mols)
scores2 = desc2.build(mols)
assert_array_almost_equal(ensemble_scores, np.hstack((scores1, scores2)))
def test_ensemble_model():
X = np.vstack((np.arange(30, 10, -2, dtype='float64'),
np.arange(100, 90, -1, dtype='float64'))).T
Y = np.arange(10, dtype='float64')
rf = regressors.randomforest(random_state=42)
nn = regressors.neuralnetwork(solver='lbfgs', random_state=42)
ensemble = ensemble_model((rf, nn))
# we do not need to fit underlying models, they change when we fit enseble
ensemble.fit(X, Y)
pred = ensemble.predict(X)
mean_pred = np.vstack((rf.predict(X), nn.predict(X))).mean(axis=0)
assert_array_almost_equal(pred, mean_pred)
assert_almost_equal(ensemble.score(X, Y), r2_score(Y, pred))
# ensemble of a single model should behave exactly like this model
nn = neuralnetwork(solver='lbfgs', random_state=42)
ensemble = ensemble_model((nn,))
ensemble.fit(X, Y)
assert_array_almost_equal(ensemble.predict(X), nn.predict(X))
assert_almost_equal(ensemble.score(X, Y), nn.score(X, Y))
def test_original_vina():
"""Check orignal Vina partial scores descriptor"""
mols = list(oddt.toolkit.readfile('sdf', actives_sdf))
list(map(lambda x: x.addh(), mols))
rec = next(oddt.toolkit.readfile('pdb', receptor_pdb))
rec.protein = True
rec.addh()
# Delete molecule which has differences in Acceptor-Donor def in RDK and OB
del mols[65]
vina_scores = ['vina_gauss1',
'vina_gauss2',
'vina_repulsion',
'vina_hydrophobic',
'vina_hydrogen']
# save correct results (for future use)
# np.savetxt(os.path.join(results, 'autodock_vina_scores.csv'),
# autodock_vina_descriptor(protein=rec,
# vina_scores=vina_scores).build(mols),
# fmt='%.16g',
# delimiter=',')
autodock_vina_results_correct = np.loadtxt(
os.path.join(results, 'autodock_vina_scores.csv'),
delimiter=',',
dtype=np.float64)
autodock_vina_results = autodock_vina_descriptor(
protein=rec,
vina_scores=vina_scores).build(mols)
assert_array_almost_equal(autodock_vina_results,
autodock_vina_results_correct,
decimal=4)
def test_internal_vina():
"""Compare internal vs orignal Vina partial scores"""
mols = list(oddt.toolkit.readfile('sdf', actives_sdf))
list(map(lambda x: x.addh(), mols))
rec = next(oddt.toolkit.readfile('pdb', receptor_pdb))
rec.protein = True
rec.addh()
# Delete molecule which has differences in Acceptor-Donor def in RDK and OB
del mols[65]
vina_scores = ['vina_gauss1',
'vina_gauss2',
'vina_repulsion',
'vina_hydrophobic',
'vina_hydrogen']
autodock_vina_results = np.loadtxt(
os.path.join(results, 'autodock_vina_scores.csv'),
delimiter=',',
dtype=np.float64)
oddt_vina_results = oddt_vina_descriptor(
protein=rec, vina_scores=vina_scores).build(mols)
assert_array_almost_equal(oddt_vina_results, autodock_vina_results, decimal=4)
def test_rfscore_desc():
"""Test RFScore v1-3 descriptors generators"""
mols = list(oddt.toolkit.readfile('sdf', actives_sdf))
list(map(lambda x: x.addh(), mols))
rec = next(oddt.toolkit.readfile('pdb', receptor_pdb))
rec.protein = True
rec.addh()
# Delete molecule which has differences in Acceptor-Donor def in RDK and OB
del mols[65]
for v in [1, 2, 3]:
descs = rfscore(version=v, protein=rec).descriptor_generator.build(mols)
# save correct results (for future use)
# np.savetxt(os.path.join(results, 'rfscore_v%i_descs.csv' % v),
# descs,
# fmt='%.16g',
# delimiter=',')
descs_correct = np.loadtxt(
os.path.join(results, 'rfscore_v%i_descs.csv' % v),
delimiter=',')
# help debug errors
for i in range(descs.shape[1]):
mask = np.abs(descs[:, i] - descs_correct[:, i]) > 1e-4
if mask.sum() > 1:
print(i, np.vstack((descs[mask, i], descs_correct[mask, i])))
assert_array_almost_equal(descs, descs_correct, decimal=4)
def test_nnscore_desc():
"""Test NNScore descriptors generators"""
mols = list(oddt.toolkit.readfile('sdf', actives_sdf))
list(map(lambda x: x.addh(only_polar=True), mols))
rec = next(oddt.toolkit.readfile('pdb', receptor_pdb))
rec.protein = True
rec.addh(only_polar=True)
# Delete molecule which has differences in Acceptor-Donor def in RDK and OB
del mols[65]
gen = nnscore(protein=rec).descriptor_generator
descs = gen.build(mols)
# save correct results (for future use)
# np.savetxt(os.path.join(results, 'nnscore_descs.csv'),
# descs,
# fmt='%.16g',
# delimiter=',')
if oddt.toolkit.backend == 'ob':
descs_correct = np.loadtxt(os.path.join(results, 'nnscore_descs_ob.csv'),
delimiter=',')
else:
descs_correct = np.loadtxt(os.path.join(results, 'nnscore_descs_rdk.csv'),
delimiter=',')
# help debug errors
for i in range(descs.shape[1]):
mask = np.abs(descs[:, i] - descs_correct[:, i]) > 1e-4
if mask.sum() > 1:
print(i, gen.titles[i], mask.sum())
print(np.vstack((descs[mask, i], descs_correct[mask, i])))
assert_array_almost_equal(descs, descs_correct, decimal=4)
models = ([PLECscore(n_jobs=1, version=v, size=2048)
for v in ['linear', 'nn', 'rf']] +
[nnscore(n_jobs=1)] +
[rfscore(version=v, n_jobs=1) for v in [1, 2, 3]])
@pytest.mark.parametrize('model', models)
def test_model_train(model):
mols = list(oddt.toolkit.readfile('sdf', actives_sdf))[:10]
list(map(lambda x: x.addh(), mols))
rec = next(oddt.toolkit.readfile('pdb', receptor_pdb))
rec.protein = True
rec.addh()
data_dir = os.path.join(test_data_dir, 'data')
home_dir = mkdtemp()
pdbbind_versions = (2007, 2013, 2016)
pdbbind_dir = os.path.join(data_dir, 'pdbbind')
for pdbbind_v in pdbbind_versions:
version_dir = os.path.join(data_dir, 'v%s' % pdbbind_v)
if not os.path.isdir(version_dir):
os.symlink(pdbbind_dir, version_dir)
with NamedTemporaryFile(suffix='.pickle') as f:
model.gen_training_data(data_dir, pdbbind_versions=pdbbind_versions,
home_dir=home_dir)
model.train(home_dir=home_dir, sf_pickle=f.name)
model.set_protein(rec)
# check if protein setting was successful
assert model.protein == rec
if hasattr(model.descriptor_generator, 'protein'):
assert model.descriptor_generator.protein == rec
preds = model.predict(mols)
assert len(preds) == 10
assert preds.dtype == np.float
assert model.score(mols, preds) == 1.0
def test_ri_score():
"""Rigidity Index"""
receptor = next(oddt.toolkit.readfile('pdb', os.path.join(
test_data_dir, 'data/dude/xiap/receptor_rdkit.pdb')))
receptor.protein = True
receptor.addh(only_polar=True)
ligands = list(oddt.toolkit.readfile('sdf', os.path.join(
test_data_dir, 'data/dude/xiap/actives_docked.sdf')))
ligands = list(filter(lambda x: x.title == '312335', ligands))
_ = list(map(lambda x: x.addh(only_polar=True), ligands))
if oddt.toolkit.backend == 'ob':
ri_score_target = np.array([
1798.951, 1815.714, 1851.27, 1781.166, 1789.73, 1766.882,
1792.726, 1747.342, 1836.919, 1766.815, 1816.401, 1569.533,
1544.601, 1530.015, 1785.551, 1896.576, 1555.909, 1710.525,
1707.488, 1586.404])
else:
ri_score_target = np.array([
4211.84, 4193.968, 4295.324, 4140.516, 4182.688, 4130.795, 4212.946,
4119.207, 4261.942, 4146.171, 4175.418, 3810.425, 3695.924, 3702.532,
4144.078, 4317.129, 3763.041, 4082.63, 4063.534, 3751.247])
ri_score_computed = np.array(
[ri_score(ligand, receptor) for ligand in ligands])
assert_almost_equal(ri_score_target, ri_score_computed, 2)
def test_b_factor():
"""Flexibility-Rigity Index"""
receptor = next(oddt.toolkit.readfile('pdb', os.path.join(
test_data_dir, 'data/dude/xiap/receptor_rdkit.pdb')))
receptor.protein = True
receptor.addh(only_polar=True)
ligands = list(oddt.toolkit.readfile('sdf', os.path.join(
test_data_dir, 'data/dude/xiap/actives_docked.sdf')))
ligands = list(filter(lambda x: x.title == '312335', ligands))
_ = list(map(lambda x: x.addh(only_polar=True), ligands))
if oddt.toolkit.backend == 'ob':
b_factor_target = np.array([
-0.052, -0.053, -0.053, -0.052, -0.052, -0.051, -0.052, -0.051,
- 0.053, -0.051, -0.052, -0.048, -0.048, -0.048, -0.052, -0.054,
- 0.048, -0.051, -0.051, -0.049])
else:
b_factor_target = np.array([
-0.055, -0.055, -0.056, -0.055, -0.055, -0.055, -0.055, -0.054,
-0.056, -0.055, -0.055, -0.052, -0.05, -0.05, -0.055, -0.057,
-0.051, -0.054, -0.054, -0.051])
b_factor_computed = np.array(
[b_factor(ligand, receptor) for ligand in ligands])
assert_almost_equal(b_factor_target, b_factor_computed, decimal=3)
| mkukielka/oddt | tests/test_scoring.py | Python | bsd-3-clause | 12,498 | [
"RDKit"
] | f9906377a322e0d07b81121f591f9908db14aa60453e7010e2dd509e8e6f3352 |
import numpy as np
import theano
import theano.tensor as T
import unittest
import tempfile
from numpy.testing import assert_array_equal
from smartlearner import views, stopping_criteria, Trainer, tasks
from smartlearner.direction_modifiers import ConstantLearningRate
from smartlearner.optimizers import SGD
from smartlearner.testing import DummyLoss, DummyBatchScheduler
from smartlearner.utils import sharedX
floatX = theano.config.floatX
class DummyLossWithGradient(DummyLoss):
def __init__(self, cost, param):
super().__init__()
self.cost = cost
self.param = param
def _get_gradients(self):
gparam = T.grad(cost=self.cost, wrt=self.param)
return {self.param: gparam}
class TestConstantLearningRate(unittest.TestCase):
def _build_experiment(self):
# Create an Nd gaussian function to optimize. This function is not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
N = 4
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), (param-center).T))
loss = DummyLossWithGradient(cost, param)
optimizer = SGD(loss)
direction_modifier = ConstantLearningRate(lr=self.lr)
optimizer.append_direction_modifier(direction_modifier)
trainer = Trainer(optimizer, DummyBatchScheduler())
# Monitor the learning rate.
logger = tasks.Logger(views.MonitorVariable(list(direction_modifier.parameters.values())[0]))
trainer.append_task(logger)
return trainer, logger, direction_modifier
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lr = 0.1234
self.max_epoch = 10
self.trainer, self.logger, self.direction_modifier = self._build_experiment()
self.trainer.append_task(stopping_criteria.MaxEpochStopping(self.max_epoch))
self.trainer.train()
def test_behaviour(self):
learning_rate_per_update = np.array(self.logger.get_variable_history(0))[:, :, 0].flatten()
expected_learning_rate_per_update = [self.lr for _ in range(self.max_epoch)]
assert_array_equal(learning_rate_per_update, expected_learning_rate_per_update)
def test_save_load(self):
# Save training and resume it.
with tempfile.TemporaryDirectory() as experiment_dir:
# Save current training state of the experiment.
self.trainer.save(experiment_dir)
# Load previous training state of the experiment.
trainer, logger, direction_modifier = self._build_experiment()
trainer.load(experiment_dir)
# Check the state of the direction modifier.
for key in direction_modifier.parameters:
assert_array_equal(direction_modifier.parameters[key].get_value(),
self.direction_modifier.parameters[key].get_value())
def test_resume(self):
trainer1, logger1, direction_modifier1 = self._build_experiment()
trainer1.append_task(stopping_criteria.MaxEpochStopping(5))
trainer1.train()
# Save training and resume it.
with tempfile.TemporaryDirectory() as experiment_dir:
# Save current training state of the experiment.
trainer1.save(experiment_dir)
# Load previous training state of the experiment.
trainer2, logger2, direction_modifier2 = self._build_experiment()
trainer2.append_task(stopping_criteria.MaxEpochStopping(10))
trainer2.load(experiment_dir)
trainer2.train()
# Check that concatenating `logger1` with `logger2` is the same as `self.logger`.
learning_rate_per_update_part1 = np.array(logger1.get_variable_history(0))[:, :, 0].flatten()
learning_rate_per_update_part2 = np.array(logger2.get_variable_history(0))[:, :, 0].flatten()
expected_learning_rate_per_update = np.array(self.logger.get_variable_history(0))[:, :, 0].flatten()
assert_array_equal(np.r_[learning_rate_per_update_part1, learning_rate_per_update_part2],
expected_learning_rate_per_update)
| ASalvail/smartlearner | tests/direction_modifiers/test_constant_learning_rate.py | Python | bsd-3-clause | 4,298 | [
"Gaussian"
] | d0e5ef4509255582f5e22a3f0aa1608960033779712970a749ac4fc5c41b4abd |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Tool to create a new, empty .grd file with all the basic sections.
'''
from grit.tool import interface
from grit import constants
from grit import util
# The contents of the new .grd file
_FILE_CONTENTS = '''\
<?xml version="1.0" encoding="UTF-8"?>
<grit base_dir="." latest_public_release="0" current_release="1"
source_lang_id="en" enc_check="%s">
<outputs>
<!-- TODO add each of your output files. Modify the three below, and add
your own for your various languages. See the user's guide for more
details.
Note that all output references are relative to the output directory
which is specified at build time. -->
<output filename="resource.h" type="rc_header" />
<output filename="en_resource.rc" type="rc_all" />
<output filename="fr_resource.rc" type="rc_all" />
</outputs>
<translations>
<!-- TODO add references to each of the XTB files (from the Translation
Console) that contain translations of messages in your project. Each
takes a form like <file path="english.xtb" />. Remember that all file
references are relative to this .grd file. -->
</translations>
<release seq="1">
<includes>
<!-- TODO add a list of your included resources here, e.g. BMP and GIF
resources. -->
</includes>
<structures>
<!-- TODO add a list of all your structured resources here, e.g. HTML
templates, menus, dialogs etc. Note that for menus, dialogs and version
information resources you reference an .rc file containing them.-->
</structures>
<messages>
<!-- TODO add all of your "string table" messages here. Remember to
change nontranslateable parts of the messages into placeholders (using the
<ph> element). You can also use the 'grit add' tool to help you identify
nontranslateable parts and create placeholders for them. -->
</messages>
</release>
</grit>''' % constants.ENCODING_CHECK
class NewGrd(interface.Tool):
'''Usage: grit newgrd OUTPUT_FILE
Creates a new, empty .grd file OUTPUT_FILE with comments about what to put
where in the file.'''
def ShortDescription(self):
return 'Create a new empty .grd file.'
def Run(self, global_options, my_arguments):
if not len(my_arguments) == 1:
print 'This tool requires exactly one argument, the name of the output file.'
return 2
filename = my_arguments[0]
out = util.WrapOutputStream(file(filename, 'w'), 'utf-8')
out.write(_FILE_CONTENTS)
out.close()
print "Wrote file %s" % filename
| JoKaWare/WTL-DUI | tools/grit/grit/tool/newgrd.py | Python | bsd-3-clause | 2,724 | [
"xTB"
] | f944257e8cd124b6d4d0ff08404d304d9fa7971744b94a12c3435b49069e041d |
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Tuple, Union
from types import ModuleType
import math
import mxnet as mx
from mxnet.gluon import nn
from rl_coach.base_parameters import AgentParameters
from rl_coach.core_types import ActionProbabilities
from rl_coach.spaces import SpacesDefinition, BoxActionSpace, DiscreteActionSpace
from rl_coach.utils import eps
from rl_coach.architectures.mxnet_components.heads.head import Head, HeadLoss, LossInputSchema,\
NormalizedRSSInitializer
from rl_coach.architectures.mxnet_components.heads.head import LOSS_OUT_TYPE_LOSS, LOSS_OUT_TYPE_REGULARIZATION
from rl_coach.architectures.mxnet_components.utils import hybrid_clip, broadcast_like
LOSS_OUT_TYPE_KL = 'kl_divergence'
LOSS_OUT_TYPE_ENTROPY = 'entropy'
LOSS_OUT_TYPE_LIKELIHOOD_RATIO = 'likelihood_ratio'
LOSS_OUT_TYPE_CLIPPED_LIKELIHOOD_RATIO = 'clipped_likelihood_ratio'
nd_sym_type = Union[mx.nd.NDArray, mx.sym.Symbol]
class MultivariateNormalDist:
def __init__(self,
num_var: int,
mean: nd_sym_type,
sigma: nd_sym_type,
F: ModuleType=mx.nd) -> None:
"""
Distribution object for Multivariate Normal. Works with batches.
Optionally works with batches and time steps, but be consistent in usage: i.e. if using time_step,
mean, sigma and data for log_prob must all include a time_step dimension.
:param num_var: number of variables in distribution
:param mean: mean for each variable,
of shape (num_var) or
of shape (batch_size, num_var) or
of shape (batch_size, time_step, num_var).
:param sigma: covariance matrix,
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
"""
self.num_var = num_var
self.mean = mean
self.sigma = sigma
self.F = F
def inverse_using_cholesky(self, matrix: nd_sym_type) -> nd_sym_type:
"""
Calculate inverses for a batch of matrices using Cholesky decomposition method.
:param matrix: matrix (or matrices) to invert,
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
:return: inverted matrix (or matrices),
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
"""
cholesky_factor = self.F.linalg.potrf(matrix)
return self.F.linalg.potri(cholesky_factor)
def log_det(self, matrix: nd_sym_type) -> nd_sym_type:
"""
Calculate log of the determinant for a batch of matrices using Cholesky decomposition method.
:param matrix: matrix (or matrices) to invert,
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
:return: inverted matrix (or matrices),
of shape (num_var, num_var) or
of shape (batch_size, num_var, num_var) or
of shape (batch_size, time_step, num_var, num_var).
"""
cholesky_factor = self.F.linalg.potrf(matrix)
return 2 * self.F.linalg.sumlogdiag(cholesky_factor)
def log_prob(self, x: nd_sym_type) -> nd_sym_type:
"""
Calculate the log probability of data given the current distribution.
See http://www.notenoughthoughts.net/posts/normal-log-likelihood-gradient.html
and https://discuss.mxnet.io/t/multivariate-gaussian-log-density-operator/1169/7
:param x: input data,
of shape (num_var) or
of shape (batch_size, num_var) or
of shape (batch_size, time_step, num_var).
:return: log_probability,
of shape (1) or
of shape (batch_size) or
of shape (batch_size, time_step).
"""
a = (self.num_var / 2) * math.log(2 * math.pi)
log_det_sigma = self.log_det(self.sigma)
b = (1 / 2) * log_det_sigma
sigma_inv = self.inverse_using_cholesky(self.sigma)
# deviation from mean, and dev_t is equivalent to transpose on last two dims.
dev = (x - self.mean).expand_dims(-1)
dev_t = (x - self.mean).expand_dims(-2)
# since batch_dot only works with ndarrays with ndim of 3,
# and we could have ndarrays with ndim of 4,
# we flatten batch_size and time_step into single dim.
dev_flat = dev.reshape(shape=(-1, 0, 0), reverse=1)
sigma_inv_flat = sigma_inv.reshape(shape=(-1, 0, 0), reverse=1)
dev_t_flat = dev_t.reshape(shape=(-1, 0, 0), reverse=1)
c = (1 / 2) * self.F.batch_dot(self.F.batch_dot(dev_t_flat, sigma_inv_flat), dev_flat)
# and now reshape back to (batch_size, time_step) if required.
c = c.reshape_like(b)
log_likelihood = -a - b - c
return log_likelihood
def entropy(self) -> nd_sym_type:
"""
Calculate entropy of current distribution.
See http://www.nowozin.net/sebastian/blog/the-entropy-of-a-normal-distribution.html
:return: entropy,
of shape (1) or
of shape (batch_size) or
of shape (batch_size, time_step).
"""
# todo: check if differential entropy is correct
log_det_sigma = self.log_det(self.sigma)
return (self.num_var / 2) + ((self.num_var / 2) * math.log(2 * math.pi)) + ((1 / 2) * log_det_sigma)
def kl_div(self, alt_dist) -> nd_sym_type:
"""
Calculated KL-Divergence with another MultivariateNormalDist distribution
See https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Specifically https://wikimedia.org/api/rest_v1/media/math/render/svg/a3bf3b4917bd1fcb8be48d6d6139e2e387bdc7d3
:param alt_dist: alternative distribution used for kl divergence calculation
:type alt_dist: MultivariateNormalDist
:return: KL-Divergence, of shape (1,)
"""
sigma_a_inv = self.F.linalg.potri(self.F.linalg.potrf(self.sigma))
sigma_b_inv = self.F.linalg.potri(self.F.linalg.potrf(alt_dist.sigma))
term1a = mx.nd.batch_dot(sigma_b_inv, self.sigma)
# sum of diagonal for batch of matrices
term1 = (broadcast_like(self.F, self.F.eye(self.num_var), term1a) * term1a).sum(axis=-1).sum(axis=-1)
mean_diff = (alt_dist.mean - self.mean).expand_dims(-1)
mean_diff_t = (alt_dist.mean - self.mean).expand_dims(-2)
term2 = self.F.batch_dot(self.F.batch_dot(mean_diff_t, sigma_b_inv), mean_diff).reshape_like(term1)
term3 = (2 * self.F.linalg.sumlogdiag(self.F.linalg.potrf(alt_dist.sigma))) -\
(2 * self.F.linalg.sumlogdiag(self.F.linalg.potrf(self.sigma)))
return 0.5 * (term1 + term2 - self.num_var + term3)
class CategoricalDist:
def __init__(self, n_classes: int, probs: nd_sym_type, F: ModuleType=mx.nd) -> None:
"""
Distribution object for Categorical data.
Optionally works with batches and time steps, but be consistent in usage: i.e. if using time_step,
mean, sigma and data for log_prob must all include a time_step dimension.
:param n_classes: number of classes in distribution
:param probs: probabilities for each class,
of shape (n_classes),
of shape (batch_size, n_classes) or
of shape (batch_size, time_step, n_classes)
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
"""
self.n_classes = n_classes
self.probs = probs
self.F = F
def log_prob(self, actions: nd_sym_type) -> nd_sym_type:
"""
Calculate the log probability of data given the current distribution.
:param actions: actions, with int8 data type,
of shape (1) if probs was (n_classes),
of shape (batch_size) if probs was (batch_size, n_classes) and
of shape (batch_size, time_step) if probs was (batch_size, time_step, n_classes)
:return: log_probability,
of shape (1) if probs was (n_classes),
of shape (batch_size) if probs was (batch_size, n_classes) and
of shape (batch_size, time_step) if probs was (batch_size, time_step, n_classes)
"""
action_mask = actions.one_hot(depth=self.n_classes)
action_probs = (self.probs * action_mask).sum(axis=-1)
return action_probs.log()
def entropy(self) -> nd_sym_type:
"""
Calculate entropy of current distribution.
:return: entropy,
of shape (1) if probs was (n_classes),
of shape (batch_size) if probs was (batch_size, n_classes) and
of shape (batch_size, time_step) if probs was (batch_size, time_step, n_classes)
"""
# todo: look into numerical stability
return -(self.probs.log()*self.probs).sum(axis=-1)
def kl_div(self, alt_dist) -> nd_sym_type:
"""
Calculated KL-Divergence with another Categorical distribution
:param alt_dist: alternative distribution used for kl divergence calculation
:type alt_dist: CategoricalDist
:return: KL-Divergence
"""
logits_a = self.probs.clip(a_min=eps, a_max=1 - eps).log()
logits_b = alt_dist.probs.clip(a_min=eps, a_max=1 - eps).log()
t = self.probs * (logits_a - logits_b)
t = self.F.where(condition=(alt_dist.probs == 0), x=self.F.ones_like(alt_dist.probs) * math.inf, y=t)
t = self.F.where(condition=(self.probs == 0), x=self.F.zeros_like(self.probs), y=t)
return t.sum(axis=-1)
class DiscretePPOHead(nn.HybridBlock):
def __init__(self, num_actions: int) -> None:
"""
Head block for Discrete Proximal Policy Optimization, to calculate probabilities for each action given
middleware representation of the environment state.
:param num_actions: number of actions in action space.
"""
super(DiscretePPOHead, self).__init__()
with self.name_scope():
self.dense = nn.Dense(units=num_actions, flatten=False,
weight_initializer=NormalizedRSSInitializer(0.01))
def hybrid_forward(self, F: ModuleType, x: nd_sym_type) -> nd_sym_type:
"""
Used for forward pass through head network.
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param x: middleware state representation,
of shape (batch_size, in_channels) or
of shape (batch_size, time_step, in_channels).
:return: batch of probabilities for each action,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
"""
policy_values = self.dense(x)
policy_probs = F.softmax(policy_values)
return policy_probs
class ContinuousPPOHead(nn.HybridBlock):
def __init__(self, num_actions: int) -> None:
"""
Head block for Continuous Proximal Policy Optimization, to calculate probabilities for each action given
middleware representation of the environment state.
:param num_actions: number of actions in action space.
"""
super(ContinuousPPOHead, self).__init__()
with self.name_scope():
self.dense = nn.Dense(units=num_actions, flatten=False,
weight_initializer=NormalizedRSSInitializer(0.01))
# all samples (across batch, and time step) share the same covariance, which is learnt,
# but since we assume the action probability variables are independent,
# only the diagonal entries of the covariance matrix are specified.
self.log_std = self.params.get('log_std',
shape=(num_actions,),
init=mx.init.Zero(),
allow_deferred_init=True)
# todo: is_local?
def hybrid_forward(self, F: ModuleType, x: nd_sym_type, log_std: nd_sym_type) -> Tuple[nd_sym_type, nd_sym_type]:
"""
Used for forward pass through head network.
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param x: middleware state representation,
of shape (batch_size, in_channels) or
of shape (batch_size, time_step, in_channels).
:return: batch of probabilities for each action,
of shape (batch_size, action_mean) or
of shape (batch_size, time_step, action_mean).
"""
policy_means = self.dense(x)
policy_std = broadcast_like(F, log_std.exp().expand_dims(0), policy_means)
return policy_means, policy_std
class ClippedPPOLossDiscrete(HeadLoss):
def __init__(self,
num_actions: int,
clip_likelihood_ratio_using_epsilon: float,
beta: float=0,
use_kl_regularization: bool=False,
initial_kl_coefficient: float=1,
kl_cutoff: float=0,
high_kl_penalty_coefficient: float=1,
weight: float=1,
batch_axis: int=0) -> None:
"""
Loss for discrete version of Clipped PPO.
:param num_actions: number of actions in action space.
:param clip_likelihood_ratio_using_epsilon: epsilon to use for likelihood ratio clipping.
:param beta: loss coefficient applied to entropy
:param use_kl_regularization: option to add kl divergence loss
:param initial_kl_coefficient: loss coefficient applied kl divergence loss (also see high_kl_penalty_coefficient).
:param kl_cutoff: threshold for using high_kl_penalty_coefficient
:param high_kl_penalty_coefficient: loss coefficient applied to kv divergence above kl_cutoff
:param weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
"""
super(ClippedPPOLossDiscrete, self).__init__(weight=weight, batch_axis=batch_axis)
self.weight = weight
self.num_actions = num_actions
self.clip_likelihood_ratio_using_epsilon = clip_likelihood_ratio_using_epsilon
self.beta = beta
self.use_kl_regularization = use_kl_regularization
self.initial_kl_coefficient = initial_kl_coefficient if self.use_kl_regularization else 0.0
self.kl_coefficient = self.params.get('kl_coefficient',
shape=(1,),
init=mx.init.Constant([initial_kl_coefficient,]),
differentiable=False)
self.kl_cutoff = kl_cutoff
self.high_kl_penalty_coefficient = high_kl_penalty_coefficient
@property
def input_schema(self) -> LossInputSchema:
return LossInputSchema(
head_outputs=['new_policy_probs'],
agent_inputs=['actions', 'old_policy_probs', 'clip_param_rescaler'],
targets=['advantages']
)
def loss_forward(self,
F: ModuleType,
new_policy_probs: nd_sym_type,
actions: nd_sym_type,
old_policy_probs: nd_sym_type,
clip_param_rescaler: nd_sym_type,
advantages: nd_sym_type,
kl_coefficient: nd_sym_type) -> List[Tuple[nd_sym_type, str]]:
"""
Used for forward pass through loss computations.
Works with batches of data, and optionally time_steps, but be consistent in usage: i.e. if using time_step,
new_policy_probs, old_policy_probs, actions and advantages all must include a time_step dimension.
NOTE: order of input arguments MUST NOT CHANGE because it matches the order
parameters are passed in ppo_agent:train_network()
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param new_policy_probs: action probabilities predicted by DiscretePPOHead network,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param old_policy_probs: action probabilities for previous policy,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param actions: true actions taken during rollout,
of shape (batch_size) or
of shape (batch_size, time_step).
:param clip_param_rescaler: scales epsilon to use for likelihood ratio clipping.
:param advantages: change in state value after taking action (a.k.a advantage)
of shape (batch_size) or
of shape (batch_size, time_step).
:param kl_coefficient: loss coefficient applied kl divergence loss (also see high_kl_penalty_coefficient).
:return: loss, of shape (batch_size).
"""
old_policy_dist = CategoricalDist(self.num_actions, old_policy_probs, F=F)
action_probs_wrt_old_policy = old_policy_dist.log_prob(actions)
new_policy_dist = CategoricalDist(self.num_actions, new_policy_probs, F=F)
action_probs_wrt_new_policy = new_policy_dist.log_prob(actions)
entropy_loss = - self.beta * new_policy_dist.entropy().mean()
if self.use_kl_regularization:
kl_div = old_policy_dist.kl_div(new_policy_dist).mean()
weighted_kl_div = kl_coefficient * kl_div
high_kl_div = F.stack(F.zeros_like(kl_div), kl_div - self.kl_cutoff).max().square()
weighted_high_kl_div = self.high_kl_penalty_coefficient * high_kl_div
kl_div_loss = weighted_kl_div + weighted_high_kl_div
else:
kl_div_loss = F.zeros(shape=(1,))
# working with log probs, so minus first, then exponential (same as division)
likelihood_ratio = (action_probs_wrt_new_policy - action_probs_wrt_old_policy).exp()
if self.clip_likelihood_ratio_using_epsilon is not None:
# clipping of likelihood ratio
min_value = 1 - self.clip_likelihood_ratio_using_epsilon * clip_param_rescaler
max_value = 1 + self.clip_likelihood_ratio_using_epsilon * clip_param_rescaler
# can't use F.clip (with variable clipping bounds), hence custom implementation
clipped_likelihood_ratio = hybrid_clip(F, likelihood_ratio, clip_lower=min_value, clip_upper=max_value)
# lower bound of original, and clipped versions or each scaled advantage
# element-wise min between the two ndarrays
unclipped_scaled_advantages = likelihood_ratio * advantages
clipped_scaled_advantages = clipped_likelihood_ratio * advantages
scaled_advantages = F.stack(unclipped_scaled_advantages, clipped_scaled_advantages).min(axis=0)
else:
scaled_advantages = likelihood_ratio * advantages
clipped_likelihood_ratio = F.zeros_like(likelihood_ratio)
# for each batch, calculate expectation of scaled_advantages across time steps,
# but want code to work with data without time step too, so reshape to add timestep if doesn't exist.
scaled_advantages_w_time = scaled_advantages.reshape(shape=(0, -1))
expected_scaled_advantages = scaled_advantages_w_time.mean(axis=1)
# want to maximize expected_scaled_advantages, add minus so can minimize.
surrogate_loss = (-expected_scaled_advantages * self.weight).mean()
return [
(surrogate_loss, LOSS_OUT_TYPE_LOSS),
(entropy_loss + kl_div_loss, LOSS_OUT_TYPE_REGULARIZATION),
(kl_div_loss, LOSS_OUT_TYPE_KL),
(entropy_loss, LOSS_OUT_TYPE_ENTROPY),
(likelihood_ratio, LOSS_OUT_TYPE_LIKELIHOOD_RATIO),
(clipped_likelihood_ratio, LOSS_OUT_TYPE_CLIPPED_LIKELIHOOD_RATIO)
]
class ClippedPPOLossContinuous(HeadLoss):
def __init__(self,
num_actions: int,
clip_likelihood_ratio_using_epsilon: float,
beta: float=0,
use_kl_regularization: bool=False,
initial_kl_coefficient: float=1,
kl_cutoff: float=0,
high_kl_penalty_coefficient: float=1,
weight: float=1,
batch_axis: int=0):
"""
Loss for continuous version of Clipped PPO.
:param num_actions: number of actions in action space.
:param clip_likelihood_ratio_using_epsilon: epsilon to use for likelihood ratio clipping.
:param beta: loss coefficient applied to entropy
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
:param use_kl_regularization: option to add kl divergence loss
:param initial_kl_coefficient: initial loss coefficient applied kl divergence loss (also see high_kl_penalty_coefficient).
:param kl_cutoff: threshold for using high_kl_penalty_coefficient
:param high_kl_penalty_coefficient: loss coefficient applied to kv divergence above kl_cutoff
:param weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param batch_axis: axis used for mini-batch (default is 0) and excluded from loss aggregation.
"""
super(ClippedPPOLossContinuous, self).__init__(weight=weight, batch_axis=batch_axis)
self.weight = weight
self.num_actions = num_actions
self.clip_likelihood_ratio_using_epsilon = clip_likelihood_ratio_using_epsilon
self.beta = beta
self.use_kl_regularization = use_kl_regularization
self.initial_kl_coefficient = initial_kl_coefficient if self.use_kl_regularization else 0.0
self.kl_coefficient = self.params.get('kl_coefficient',
shape=(1,),
init=mx.init.Constant([initial_kl_coefficient,]),
differentiable=False)
self.kl_cutoff = kl_cutoff
self.high_kl_penalty_coefficient = high_kl_penalty_coefficient
@property
def input_schema(self) -> LossInputSchema:
return LossInputSchema(
head_outputs=['new_policy_means','new_policy_stds'],
agent_inputs=['actions', 'old_policy_means', 'old_policy_stds', 'clip_param_rescaler'],
targets=['advantages']
)
def loss_forward(self,
F: ModuleType,
new_policy_means: nd_sym_type,
new_policy_stds: nd_sym_type,
actions: nd_sym_type,
old_policy_means: nd_sym_type,
old_policy_stds: nd_sym_type,
clip_param_rescaler: nd_sym_type,
advantages: nd_sym_type,
kl_coefficient: nd_sym_type) -> List[Tuple[nd_sym_type, str]]:
"""
Used for forward pass through loss computations.
Works with batches of data, and optionally time_steps, but be consistent in usage: i.e. if using time_step,
new_policy_means, old_policy_means, actions and advantages all must include a time_step dimension.
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param new_policy_means: action means predicted by MultivariateNormalDist network,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param new_policy_stds: action standard deviation returned by head,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param actions: true actions taken during rollout,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param old_policy_means: action means for previous policy,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param old_policy_stds: action standard deviation returned by head previously,
of shape (batch_size, num_actions) or
of shape (batch_size, time_step, num_actions).
:param clip_param_rescaler: scales epsilon to use for likelihood ratio clipping.
:param advantages: change in state value after taking action (a.k.a advantage)
of shape (batch_size,) or
of shape (batch_size, time_step).
:param kl_coefficient: loss coefficient applied kl divergence loss (also see high_kl_penalty_coefficient).
:return: loss, of shape (batch_size).
"""
def diagonal_covariance(stds, size):
vars = stds ** 2
# sets diagonal in (batch size and time step) covariance matrices
vars_tiled = vars.expand_dims(2).tile((1, 1, size))
covars = F.broadcast_mul(vars_tiled, F.eye(size))
return covars
old_covar = diagonal_covariance(stds=old_policy_stds, size=self.num_actions)
old_policy_dist = MultivariateNormalDist(self.num_actions, old_policy_means, old_covar, F=F)
action_probs_wrt_old_policy = old_policy_dist.log_prob(actions)
new_covar = diagonal_covariance(stds=new_policy_stds, size=self.num_actions)
new_policy_dist = MultivariateNormalDist(self.num_actions, new_policy_means, new_covar, F=F)
action_probs_wrt_new_policy = new_policy_dist.log_prob(actions)
entropy_loss = - self.beta * new_policy_dist.entropy().mean()
if self.use_kl_regularization:
kl_div = old_policy_dist.kl_div(new_policy_dist).mean()
weighted_kl_div = kl_coefficient * kl_div
high_kl_div = F.stack(F.zeros_like(kl_div), kl_div - self.kl_cutoff).max().square()
weighted_high_kl_div = self.high_kl_penalty_coefficient * high_kl_div
kl_div_loss = weighted_kl_div + weighted_high_kl_div
else:
kl_div_loss = F.zeros(shape=(1,))
# working with log probs, so minus first, then exponential (same as division)
likelihood_ratio = (action_probs_wrt_new_policy - action_probs_wrt_old_policy).exp()
if self.clip_likelihood_ratio_using_epsilon is not None:
# clipping of likelihood ratio
min_value = 1 - self.clip_likelihood_ratio_using_epsilon * clip_param_rescaler
max_value = 1 + self.clip_likelihood_ratio_using_epsilon * clip_param_rescaler
# can't use F.clip (with variable clipping bounds), hence custom implementation
clipped_likelihood_ratio = hybrid_clip(F, likelihood_ratio, clip_lower=min_value, clip_upper=max_value)
# lower bound of original, and clipped versions or each scaled advantage
# element-wise min between the two ndarrays
unclipped_scaled_advantages = likelihood_ratio * advantages
clipped_scaled_advantages = clipped_likelihood_ratio * advantages
scaled_advantages = F.stack(unclipped_scaled_advantages, clipped_scaled_advantages).min(axis=0)
else:
scaled_advantages = likelihood_ratio * advantages
clipped_likelihood_ratio = F.zeros_like(likelihood_ratio)
# for each batch, calculate expectation of scaled_advantages across time steps,
# but want code to work with data without time step too, so reshape to add timestep if doesn't exist.
scaled_advantages_w_time = scaled_advantages.reshape(shape=(0, -1))
expected_scaled_advantages = scaled_advantages_w_time.mean(axis=1)
# want to maximize expected_scaled_advantages, add minus so can minimize.
surrogate_loss = (-expected_scaled_advantages * self.weight).mean()
return [
(surrogate_loss, LOSS_OUT_TYPE_LOSS),
(entropy_loss + kl_div_loss, LOSS_OUT_TYPE_REGULARIZATION),
(kl_div_loss, LOSS_OUT_TYPE_KL),
(entropy_loss, LOSS_OUT_TYPE_ENTROPY),
(likelihood_ratio, LOSS_OUT_TYPE_LIKELIHOOD_RATIO),
(clipped_likelihood_ratio, LOSS_OUT_TYPE_CLIPPED_LIKELIHOOD_RATIO)
]
class PPOHead(Head):
def __init__(self,
agent_parameters: AgentParameters,
spaces: SpacesDefinition,
network_name: str,
head_type_idx: int=0,
loss_weight: float=1.,
is_local: bool=True,
activation_function: str='tanh',
dense_layer: None=None) -> None:
"""
Head block for Proximal Policy Optimization, to calculate probabilities for each action given middleware
representation of the environment state.
:param agent_parameters: containing algorithm parameters such as clip_likelihood_ratio_using_epsilon
and beta_entropy.
:param spaces: containing action spaces used for defining size of network output.
:param network_name: name of head network. currently unused.
:param head_type_idx: index of head network. currently unused.
:param loss_weight: scalar used to adjust relative weight of loss (if using this loss with others).
:param is_local: flag to denote if network is local. currently unused.
:param activation_function: activation function to use between layers. currently unused.
:param dense_layer: type of dense layer to use in network. currently unused.
"""
super().__init__(agent_parameters, spaces, network_name, head_type_idx, loss_weight, is_local, activation_function,
dense_layer=dense_layer)
self.return_type = ActionProbabilities
self.clip_likelihood_ratio_using_epsilon = agent_parameters.algorithm.clip_likelihood_ratio_using_epsilon
self.beta = agent_parameters.algorithm.beta_entropy
self.use_kl_regularization = agent_parameters.algorithm.use_kl_regularization
if self.use_kl_regularization:
self.initial_kl_coefficient = agent_parameters.algorithm.initial_kl_coefficient
self.kl_cutoff = 2 * agent_parameters.algorithm.target_kl_divergence
self.high_kl_penalty_coefficient = agent_parameters.algorithm.high_kl_penalty_coefficient
else:
self.initial_kl_coefficient, self.kl_cutoff, self.high_kl_penalty_coefficient = (None, None, None)
self._loss = []
if isinstance(self.spaces.action, DiscreteActionSpace):
self.net = DiscretePPOHead(num_actions=len(self.spaces.action.actions))
elif isinstance(self.spaces.action, BoxActionSpace):
self.net = ContinuousPPOHead(num_actions=self.spaces.action.shape[0])
else:
raise ValueError("Only discrete or continuous action spaces are supported for PPO.")
def hybrid_forward(self,
F: ModuleType,
x: nd_sym_type) -> nd_sym_type:
"""
:param (mx.nd or mx.sym) F: backend api (mx.sym if block has been hybridized).
:param x: middleware embedding
:return: policy parameters/probabilities
"""
return self.net(x)
def loss(self) -> mx.gluon.loss.Loss:
"""
Specifies loss block to be used for this policy head.
:return: loss block (can be called as function) for action probabilities returned by this policy network.
"""
if isinstance(self.spaces.action, DiscreteActionSpace):
loss = ClippedPPOLossDiscrete(len(self.spaces.action.actions),
self.clip_likelihood_ratio_using_epsilon,
self.beta,
self.use_kl_regularization, self.initial_kl_coefficient,
self.kl_cutoff, self.high_kl_penalty_coefficient,
self.loss_weight)
elif isinstance(self.spaces.action, BoxActionSpace):
loss = ClippedPPOLossContinuous(self.spaces.action.shape[0],
self.clip_likelihood_ratio_using_epsilon,
self.beta,
self.use_kl_regularization, self.initial_kl_coefficient,
self.kl_cutoff, self.high_kl_penalty_coefficient,
self.loss_weight)
else:
raise ValueError("Only discrete or continuous action spaces are supported for PPO.")
loss.initialize()
# set a property so can assign_kl_coefficient in future,
# make a list, otherwise it would be added as a child of Head Block (due to type check)
self._loss = [loss]
return loss
@property
def kl_divergence(self):
return self.head_type_idx, LOSS_OUT_TYPE_KL
@property
def entropy(self):
return self.head_type_idx, LOSS_OUT_TYPE_ENTROPY
@property
def likelihood_ratio(self):
return self.head_type_idx, LOSS_OUT_TYPE_LIKELIHOOD_RATIO
@property
def clipped_likelihood_ratio(self):
return self.head_type_idx, LOSS_OUT_TYPE_CLIPPED_LIKELIHOOD_RATIO
def assign_kl_coefficient(self, kl_coefficient: float) -> None:
self._loss[0].kl_coefficient.set_data(mx.nd.array((kl_coefficient,))) | NervanaSystems/coach | rl_coach/architectures/mxnet_components/heads/ppo_head.py | Python | apache-2.0 | 34,467 | [
"Gaussian"
] | 7a964ede595baefba335e32fa7b0c787b262aa6bfea27ff3844b24129c484a65 |
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
from __future__ import print_function
import numpy
import types
from ..spectrum.parinfo import ParinfoList,Parinfo
from astropy import log
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
def __init__(self, fcn, xall=None, functkw={}, parinfo=None, ftol=1.e-10,
xtol=1.e-10, gtol=1.e-10, damp=0., maxiter=200, factor=100.,
nprint=1, iterfunct='default', iterkw={}, nocovar=0, rescale=0,
autoderivative=1, quiet=0, diag=None, epsfcn=None, debug=False,
**kwargs):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
else:
self.fcn = fcn
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) is ParinfoList:
pass
elif not isinstance(parinfo, list):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if not isinstance(parinfo[0], dict):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
self.parinfo_in = parinfo
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
self.parnames = self.parinfo(parinfo, 'parname')
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (numpy.array(pfixed,dtype='int') == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if debug:
print("Number of free parameters: {0} out of {1}".format(nfree, npar))
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
free_pars_x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = free_pars_x * 0.
qllim = qulim
llim = free_pars_x * 0.
qanylim = 0
n = len(free_pars_x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
log.log(10, "First call to function with parameters {0} and keywords {1}"
.format(self.params, functkw))
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
else:
self.machar = machar(double=0)
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
if debug:
print("initial fnorm={0}".format(self.fnorm))
print("initial params={0}".format(self.params))
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = free_pars_x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = free_pars_x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(free_pars_x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
free_pars_x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
log.log(5, catch_msg+" step={0}".format(step))
fjac = self.fdjac2(fcn, free_pars_x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
log.log(5, "fjac max: {0}".format(fjac.max()))
#if numpy.all(fjac == 0):
# raise ValueError("All of fjac = 0, so there is no"
# " gradient with parameters. This probably "
# "should not happen.")
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (free_pars_x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (free_pars_x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
log.log(5, "There are {0} low-pegged parameters".format(nlpeg))
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
log.log(5, "There are {0} high-pegged parameters".format(nupeg))
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
#if numpy.all(wa2==0):
# raise ValueError("All of the wa2 vector = 0, so there is no"
# " gradient with parameters. This probably "
# "should not happen.")
log.log(5, "outer loop; wa1={0}".format(wa1))
log.log(5, "outer loop; wa2={0}".format(wa2))
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * free_pars_x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
log.log(5, 'Before optimizing wa4, value is ={0}'.format(wa4))
try:
wa4._sharedmask = False # to deal with np1.11+ shared mask behavior: should not change anything
except AttributeError:
# apparently numpy won't let you write attributes it doesn't know about...
pass
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# vsokolov 21 Mar 2017: switched to numpy's sum(),
# as both are numpy arrays.
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * numpy.sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
log.log(5, 'After optimizing wa4, qtf={0}'.format(qtf))
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
log.log(5, catch_msg+" fnorm={0}".format(self.fnorm))
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = numpy.max([gnorm,numpy.abs(sum0/wa2[l])])
if gnorm == 0.:
log.warning("gnorm=0. wa2={0}".format(wa2))
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
log.log(5, "gnorm={0} gtol={1} BREAK with status 4"
.format(gnorm,gtol))
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
log.log(5, "before parameter setting; wa1={0}".format(wa1))
log.log(5, "before parameter setting; wa2={0}".format(wa2))
log.log(5, "before parameter setting; params={0}".format(self.params))
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = free_pars_x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip(wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((free_pars_x + wa1) < llim)))[0]
if len(whl) > 0:
t = ((llim[whl] - free_pars_x[whl]) / wa1[whl])
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((free_pars_x + wa1) > ulim)))[0]
if len(whu) > 0:
t = ((ulim[whu] - free_pars_x[whu]) /
wa1[whu])
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax[ifree] != 0.) &
(maxstep[ifree] > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(numpy.abs(nwa1[whmax]) /
numpy.abs(maxstep[ifree[whmax]]))
if mrat > 1:
alpha = alpha / mrat
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = free_pars_x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
log.log(5, "after parameter setting: wa2={0}".format(wa2))
log.log(5, "after parameter setting: params={0}".format(self.params))
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
log.log(5, self.errmsg)
return
fnorm1 = self.enorm(wa4)
log.log(5, "params={0}".format(self.params))
log.log(5, "fnorm={0}".format(self.fnorm))
log.log(5, "fnorm1={0}".format(fnorm1))
#print("wa4={0}".format(wa4))
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (fnorm1/self.fnorm)**2 + 1.
log.log(5, "actred={0}".format(actred))
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
log.log(5, "ratio={0}".format(ratio))
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
free_pars_x = wa2
wa2 = diag * free_pars_x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
log.log(5, "at 'test for successful iteration' ratio={0} "
"fnorm={1}".format(ratio,self.fnorm))
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
log.log(5, "BREAK with status: {0}".format(self.status))
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
log.log(5, "BREAK with status: {0}".format(self.status))
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
log.log(5, "BREAK with ratio = {1} status: {0}".format(self.status, ratio))
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(free_pars_x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
log.log(5, "BREAK with status: {0} errmsg={1}".format(self.status, errmsg))
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
log.log(5, "BREAK with status: {0}".format(self.status))
break
# End of outer loop.
catch_msg = 'in the termination phase'
log.log(5, catch_msg)
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = free_pars_x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
log.log(5, "After iterations, status={1} fnorm={0}".format(self.fnorm, self.status))
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
log.log(5, "After iterations, fnorm={0}".format(self.fnorm))
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar).copy()
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None, quiet=0,
iterstop=None, parinfo=None, format=None, pformat='%.10g',
dof=1):
log.log(5, 'Entering defiter...')
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print(p + (pformat % x[i]) + ' ')
return 0
def print_results(self, **kwargs):
self.defiter(self.fcn, self.params, self.niter, parinfo=self.parinfo_in,
dof=self.dof, fnorm=self.fnorm, **kwargs)
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
log.log(5, 'Entering parinfo...')
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (key in parinfo[i]):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if isinstance(default, list):
test=default[0]
if isinstance(test, int):
values = numpy.asarray(values, int)
elif isinstance(test, float):
values = numpy.asarray(values, float)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
log.log(5, 'Entering call with x={0}...'.format(x))
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
# removed scipy dependency
# see http://fseoane.net/blog/2011/computing-the-vector-norm/#comment-73197
# in particular, see http://i51.tinypic.com/2912tg8.png
ans = numpy.sqrt(numpy.dot(vec.T, vec))
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None,
dside=None, epsfcn=None, autoderivative=1, functkw=None,
xall=None, ifree=None, dstep=None):
log.log(5, 'Entering fdjac2...')
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print('ERROR: Derivative matrix was not computed properly.')
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
log.log(5, "Using relative step size dstep={0}".format(dstep))
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
log.log(5, "In fdjac2, epsilon={0}, m={1}, n={2}, and h={3}"
.format(eps, m, n, h))
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
log.log(5, 'Entering qrfac...')
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
# vsokolov 21 Mar 2017: switched to numpy's sum(),
# as both are numpy arrays.
a[j:,lk] = ajk - ajj * numpy.sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = rdiag[k]/wa[k]
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
log.log(5, 'Entering qrsolv... r={0} ipvt={1} diag={2} qtb={3}'.format(r, ipvt, diag, qtb))
sz = r.shape
# not used m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = r[k,k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
log.log(5, 'Entering lmpar... delta={0} x={1} sdiag={2} qtb={3}'.format(delta, x, sdiag, qtb))
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = wa1[0] / r[0,0] # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum0)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/numpy.min([delta,0.1])
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print('Entering tie...')
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print('Entering calc_covar...')
if numpy.ndim(rr) != 2:
print('ERROR: r must be a two-dimensional matrix')
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print('ERROR: r must be a square matrix')
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar:
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
class mpfitException(Exception):
pass
| jpinedaf/pyspeckit | pyspeckit/mpfit/mpfit.py | Python | mit | 97,201 | [
"Gaussian"
] | b3118ec08e725f734afb9434d1b450bff56eceb9fba0130dbfdebfdeb6d21eb7 |
#===============================================================================
# This file is part of TEMPy.
#
# TEMPy is a software designed to help the user in the manipulation
# and analyses of macromolecular assemblies using 3D electron microscopy maps.
#
# Copyright 2015 Birkbeck College University of London.
#
# Authors: Maya Topf, Daven Vasishtan, Arun Prasad Pandurangan,
# Irene Farabella, Agnel-Praveen Joseph, Harpal Sahota
#
# This software is made available under GPL V3 license
# http://www.gnu.org/licenses/gpl-3.0.html
#
#
# Please cite your use of TEMPy in published work:
#
# Farabella, I., Vasishtan, D., Joseph, A.P., Pandurangan, A.P., Sahota, H. & Topf, M. (2015). J. Appl. Cryst. 48.
#
#===============================================================================
from TEMPy.StructureBlurrer import StructureBlurrer
import math
from numpy import sum as numsum, copy as npcopy,mean as npmean, log10 as np_log10
from numpy import square,sqrt,absolute,histogram,argwhere,amin,count_nonzero,shape,size, array as nparray,\
transpose, mgrid,indices,meshgrid,nonzero,real,searchsorted,newaxis,where,matrix,ravel,ma,\
amax,ones,arange,floor,ceil,zeros, conjugate
from scipy.ndimage.interpolation import map_coordinates,spline_filter
from scipy.fftpack import fftn, ifftn, fftshift, fftfreq, ifftshift
#from scipy import weave
#from scipy.weave import converters
from scipy.spatial import KDTree
import sys
import itertools
class ScoringFunctions:
"""
A class implementing various scoring functions used in density fitting.
Reference:
Vasishtan and Topf (2011) Scoring functions for cryoEM density fitting.
J Struct Biol 174:333-343.
"""
def __init__(self):
pass
def _overlap_map_samebox(self,map1,map2):
"""
volume overlap within 2 maps with same box size
Return:
% of overlap
"""
b=map1.fullMap
binmap1=map1.fullMap>0.0
binmap2=map2.fullMap>0.0
mask_array=(binmap1*binmap2)>0.0
return[count_nonzero(binmap1),count_nonzero(binmap2),count_nonzero(mask_array),mask_array.size]
def _overlap_map_array(self,map_target,map_target_threshold,map_probe,map_probe_threshold):
"""
mask maps with 2 cut-off map_target_threshold and map_probe_threshold (vol thr.)
return:
mask array where both are true.
"""
binmap1=map_target.fullMap>float(map_target_threshold)
binmap2=map_probe.fullMap>float(map_probe_threshold)
mask_array=(binmap1*binmap2)>0
return mask_array
#add by AJP
def calculate_map_threshold(self,map_target):
try:
peak,ave,sigma = map_target._peak_density()
vol_threshold = float(ave)+(2.0*float(sigma))
except:
if len(map_target.header)==0:
#amin = map_target.min()
#amax = map_target.max()
amean = map_target.mean()
rms = map_target.std()
vol_threshold = float(amean)+(1.5*float(rms))
else:
#amin = map.header[19]
#amax = map.header[20]
amean = map_target.mean()
rms = map_target.std()
vol_threshold = float(amean)+(1.5*float(rms))
return vol_threshold
def mapComparison(self, map_target, map_probe):
"""
Compare the properties (sampling rate, box size and origin) of two maps
Arguments:
*map_target, map_probe*
Map instances to compare.
Return:
True if the map properties are the same between two maps, False otherwise.
"""
if (map_target.apix - map_probe.apix < 1E-6) and map_target.box_size() == map_probe.box_size():
if round(map_target.origin[0],2) == round(map_probe.origin[0],2) and round(map_target.origin[1],2) == round(map_probe.origin[1],2) and round(map_target.origin[2],2) == round(map_probe.origin[2],2):
return True
else:
return False
else: return False
def _failed_match(self):
print("Warning: can't match the map at the moment, use map with same box size.") #comment all out!
sys.exit()
def _CCC_calc(self,m1,m2):
arr1 = m1.view(float)
arr2 = m2.view(float)
nd = len(arr1.shape)
if nd == 2 and len(arr1.shape)[1] == 0:
nd = 1
l = 1
dim = zeros(3,dtype=int)
for i in range(nd):
l *= arr1.shape[i]
dim[i] = arr1.shape[i]
#l = len(arr1)
corr = 0.0
#dims = nparray(ltmp,dtype=int)
code = """
int k,j,i;
float numer=0.0, var1=0.0, var2 = 0.0;
if (nd == 1){
for (int z=0; z<dim[0]; z++) {
numer += arr1[z]*arr2[z];
var1 += pow(arr1[z],2);
var2 += pow(arr2[z],2); }
}
else if (nd == 3){
for (int z=0; z<dim[0]; z++) {
for (int y=0; y<dim[1]; y++) {
for (int x=0; x<dim[2]; x++) {
numer += ARR13(z,y,x)*ARR23(z,y,x);
var1 += pow(ARR13(z,y,x),2);
var2 += pow(ARR23(z,y,x),2);
}
}
}
}
corr = (float) numer/sqrt(var1*var2);
return_val = corr;
"""
# check
# BEN - commented out
#try:
# #print datetime.now().time()
# corr = weave.inline(code,['arr1','arr2','corr','nd','dim'],headers=["<math.h>"],verbose=0)
# #print datetime.now().time()
# corr = min(1.0,corr)
# corr = max(-1.0,corr)
# return corr
#except:
# #print 'C++ scoring run failed!'
# return None
return None
# Cross correlation coefficient for the overlap (3), contoured (2) or complete map (1), added by APJ
def CCC_map(self, map_target,map_probe,map_target_threshold=0.0,map_probe_threshold=0.0,mode=1,meanDist=False,cmode=True):
"""
Calculate cross-correlation between two Map instances, for the overlap (3), contoured (2) or complete map (1).
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold,map_probe_threshold*
EMMap threshold
if not given, use calcualte_map_threshold to calculate map_target_threshold and map_probe_threshold
*mode*
3. calculation on the mask
2. calculation on contoured maps
1. calculation on complete map
*meanDist*
True if the deviation from mean needs to be calculated
"""
if self.mapComparison(map_target, map_probe):
if not mode == 1:
# calculate threshold if not given : 2* sigma can be used for experimental maps and 1*sigma for simulated?
if map_target_threshold==0 and map_probe_threshold==0:
map_target_threshold=self.calculate_map_threshold(map_target)
map_probe_threshold=self.calculate_map_threshold(map_probe)
# calculate contour overlap
# contour the first map
bin_map1 = map_target.fullMap > float(map_target_threshold)
bin_map2 = map_probe.fullMap > float(map_probe_threshold)
# percent calculated on the smaller contoured volume (can be changed)
minim = numsum(bin_map1)
minim2 = numsum(bin_map2)
if minim2 < minim: minim = minim2
mask_array = (bin_map1*bin_map2) > 0
#print '>>', numsum(bin_map1),numsum(bin_map2),numsum(mask_array),minim
if not minim == 0.0:perc_ovr = float(numsum(mask_array))/minim
else:
perc_ovr = 0.0
print('No map overlap (Cross correlation score), exiting score calculation..')
return -1.0, 0.0
if perc_ovr < 0.02: return -1.0, 0.0
else: perc_ovr = 1.0
# calculate CCC within volume of overlap
if mode == 3:
#mask_array = self._overlap_map_array(map_target,map_target_threshold,map_probe,map_probe_threshold)
if numsum(mask_array) == 0:
print ('No map overlap (Cross correlation score), exiting score calculation..')
return -1.0, 0.0
map1_mask = map_target.fullMap[mask_array]
map2_mask = map_probe.fullMap[mask_array]
if meanDist:
map1_mask = map1_mask - npmean(map1_mask)
map2_mask = map2_mask - npmean(map2_mask)
if cmode:
corr = self._CCC_calc(map1_mask.flatten(),map2_mask.flatten())
#print corr, numsum(map1_mask * map2_mask)/sqrt(numsum(square(map1_mask))*numsum(square(map2_mask))), numsum(map1_mask * map2_mask)
else: corr = None
if corr is None:
return numsum(map1_mask * map2_mask)/sqrt(numsum(square(map1_mask))*numsum(square(map2_mask))), perc_ovr
else: return corr, perc_ovr
# calculate CCC for contoured maps based on threshold
elif mode == 2:
#bin_map1 = map_target.fullMap > float(map_target_threshold)
#bin_map2 = map_probe.fullMap > float(map_probe_threshold)
map1_mask = map_target.fullMap*bin_map1
map2_mask = map_probe.fullMap*bin_map2
if meanDist:
map1_mask = map1_mask - npmean(map_target.fullMap[bin_map1])
map2_mask = map2_mask - npmean(map_probe.fullMap[bin_map2])
map1_mask = map1_mask*bin_map1
map2_mask = map2_mask*bin_map2
else:
map1_mask = map_target.fullMap*bin_map1
map2_mask = map_probe.fullMap*bin_map2
if cmode: corr = self._CCC_calc(map1_mask,map2_mask)
else: corr = None
#print corr, numsum(map1_mask * map2_mask)/sqrt(numsum(square(map1_mask))*numsum(square(map2_mask)))
if corr is None:
return numsum(map1_mask * map2_mask)/sqrt(numsum(square(map1_mask))*numsum(square(map2_mask))), perc_ovr
else:
return corr, perc_ovr
# calculate on the complete map
if meanDist:
if cmode: corr = self._CCC_calc(map_target.fullMap-npmean(map_target.fullMap),map_probe.fullMap-npmean(map_probe.fullMap))
else: corr = None
#print corr,numsum((map_target.fullMap-npmean(map_target.fullMap)) * (map_probe.fullMap-npmean(map_probe.fullMap)))/(sqrt(numsum(square(map_target.fullMap-npmean(map_target.fullMap)))*numsum(square(map_probe.fullMap-npmean(map_probe.fullMap)))))
if corr is None:
return numsum((map_target.fullMap-npmean(map_target.fullMap)) * (map_probe.fullMap-npmean(map_probe.fullMap)))/(sqrt(numsum(square(map_target.fullMap-npmean(map_target.fullMap)))*numsum(square(map_probe.fullMap-npmean(map_probe.fullMap))))), perc_ovr
else: return corr, perc_ovr
if cmode: corr = self._CCC_calc(map_target.fullMap,map_probe.fullMap)
else: corr = None
#print corr, numsum(map_target.fullMap * map_probe.fullMap)/sqrt(numsum(square(map_target.fullMap))*numsum(square(map_probe.fullMap))), numsum(map_target.fullMap * map_probe.fullMap)
if corr is None:
return numsum(map_target.fullMap * map_probe.fullMap)/sqrt(numsum(square(map_target.fullMap))*numsum(square(map_probe.fullMap))), perc_ovr
else: return corr, perc_ovr
else:
print("@@@ Maps could not be matched")
return -1., 0.
def CCC(self, map_target, map_probe):
"""
Calculate cross-correlation between two Map instances.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
Return:
CCC score
"""
if self.mapComparison(map_target, map_probe):
return (map_target.normalise().getMap()*map_probe.normalise().getMap()).mean()
else:
self._failed_match()
#m1,m2 = self.matchMaps(map_target, map_probe)
#return (m1.normalise().getMap()*m2.normalise().getMap()).mean()
#TODO: check and delete the following
'''
### Correlation coefficient about mean for the overlap mask
def CCC_local(self, map_target,map_probe,map_target_threshold=0,map_probe_threshold=0):
"""
Calculate cross-correlation about mean between two Map instances, for the overlap region.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold,map_probe_threshold*
EMMap threshold
use calcualte_map_threshold to calculate map_target_threshold and map_probe_threshold.
Return:
mean CCC score
"""
if self.mapComparison(map_target, map_probe):
if map_target_threshold==0:
map_target_threshold=self.calculate_map_threshold(map_target)
if map_probe_threshold==0:
map_probe_threshold=self.calculate_map_threshold(map_probe)
mask_array = self._overlap_map_array(map_target,map_target_threshold,map_probe,map_probe_threshold)
map_target_mask = map_target.fullMap[mask_array]
map_target_mask = map_target_mask - float(map_target_mask.sum()/len(map_target_mask))
map_probe_mask = map_probe.fullMap[mask_array]
map_probe_mask = map_probe_mask - float(map_probe_mask.sum()/len(map_probe_mask))
return absolute((map_target_mask * map_probe_mask)).sum()/sqrt(square(map_target_mask).sum()*square(map_probe_mask).sum())
#return (map_target_mask * map_probe_mask).sum()/sqrt(square(map_target_mask).sum()*square(map_probe_mask).sum())
else:
self._failed_match()
#m1,m2 = self.matchMaps(map_target, map_probe)
#return (m1.normalise().getMap()*m2.normalise().getMap()).mean()
# MAIN: Cross correlation coefficient for the overlap (3), contoured (2) or complete map (1)
def CCC_mask_zero(self, map_target,map_probe,map_target_threshold=0,map_probe_threshold=0):
"""
Calculate cross-correlation about zero for the overlap region between two Map instances.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold,map_probe_threshold*
EMMap threshold
use calcualte_map_threshold to calculate map_target_threshold and map_probe_threshold.
Return:
mean CCC score
"""
if self.mapComparison(map_target, map_probe):
if map_target_threshold==0:
map_target_threshold=self.calculate_map_threshold(map_target)
if map_probe_threshold==0:
map_probe_threshold=self.calculate_map_threshold(map_probe)
mask_array = self._overlap_map_array(map_target,map_target_threshold,map_probe,map_probe_threshold)
map_target_mask = map_target.fullMap[mask_array]
map_probe_mask = map_probe.fullMap[mask_array]
return (map_target_mask * map_probe_mask).sum()/sqrt(square(map_target_mask).sum()*square(map_probe_mask).sum())
else:
self._failed_match()
#m1,m2 = self.matchMaps(map_target, map_probe)
#return (m1.normalise().getMap()*m2.normalise().getMap()).mean()
'''
def LSF(self, map_target, map_probe):
"""
Calculate least-squares between two Map instances.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
Return:
least-squares value
"""
if self.mapComparison(map_target, map_probe):
map_target, map_probe = map_target, map_probe
else:
self._failed_match()
return ((map_target.getMap()-map_probe.getMap())**2).mean()
def laplace_CCC(self, map_target, map_probe, prefil=(False, False)):
"""
Calculate Laplacian cross-correlation between two Map instances.
Based on (Chacon and Wriggers, 2002).
Arguments:
*map_target, map_probe*
Map instances to compare.
*prefil*
2-tuple of boolean values, one for each map respectively.
True if Map instance is already Laplacian-filtered. False otherwise.
Return:
Laplacian cross-correlation score
"""
if self.mapComparison(map_target, map_probe):
m1, m2 = map_target, map_probe
else:
self._failed_match()
#m1,m2 = self.matchMaps(map_target, map_probe)
if not prefil[0]:
map_target = map_target.laplace_filtered()
if not prefil[1]:
map_probe = map_probe.laplace_filtered()
map_target = map_target.normalise()
map_probe = map_probe.normalise()
return self.CCC(map_target, map_probe)
# MAIN: normal vector score calculated on surface voxels derived by different methods
def normal_vector_score(self, map_target, map_probe, primary_boundary, secondary_boundary=0.0,Filter=None):
"""
Calculate the Normal Vector Score between two Map surfaces.
Based on 3SOM algorithm (Ceulemans and Russell, 2004).
Arguments:
*map_target, map_probe*
EMMap instances to compare. map_target is the target map.
*primary_boundary, secondary_boundary*
If a filter is selected, just input a contour level as primary threshold.
Otherwise, need to run get_primary_boundary and get_second_boundary based on map target.
*Filter*
Filter to use:
i Sobel Filter (Filter=='Sobel')
ii Laplace Filter (Filter=='Laplace')
iii Minimum Filter (Filter=='Minimum')
iv Mean Filter (Filter=='Mean')
Return:
Normal vector score.
"""
if Filter not in ['Sobel','Laplace','Mean','Minimum',None]:
print("Incorrect name of filter: " + Filter)
print("Select one of the following Filters if applicable: " + ''.join(['Sobel','Laplace']))
sys.exit()
scores = []
if not self.mapComparison(map_target, map_probe):
#map_target, map_probe = self.matchMaps(map_target, map_probe)
self._failed_match()
assert isinstance(primary_boundary,float)
assert isinstance(secondary_boundary,float)
#print "fff", primary_boundary, secondary_boundary
if primary_boundary > secondary_boundary:
temp_thr = secondary_boundary
secondary_boundary = primary_boundary
primary_boundary = temp_thr
points = argwhere((map_target.fullMap > primary_boundary) & (map_target.fullMap < secondary_boundary))
if Filter=='Sobel':
# sobel filter surface
map1_surface = map_target._sobel_filter_contour(primary_boundary)
points = argwhere(map1_surface.fullMap > (map1_surface.max()/2.0))
elif Filter=='Laplace':
# sobel filter surface
map1_surface = map_target._laplace_filtered_contour(primary_boundary)
points = argwhere(map1_surface.fullMap > (map1_surface.max()/2.0))
elif Filter=='Minimum':
# the filter returns points touching surface (zeros)
#map1_surface = map_target._surface_minimum_filter(float(primary_boundary))
map1_surface = map_target._surface_minimum_filter(float(primary_boundary))
points = argwhere(map1_surface == 1)
elif Filter=='Mean':
# the filter returns points from protrusions/curved surfaces
map1_filter = map_target._surface_features(float(primary_boundary))
# to extract points with filtered values less than a cut-off
# more finer the bins are, more precise will be number of points chosen; not very crucial
bin_test = [0.0001]
for ii in range(1,41): bin_test.append(0.025*ii)
freq_test = histogram(map1_filter.fullMap,bin_test)[0]
sum_freq = 0.0
for fr in range(len(freq_test)):
sum_freq += float(freq_test[fr])
if sum_freq/numsum(freq_test) > 0.05 and bin_test[fr+1] >= 0.3:
t1 = bin_test[fr+1]
break
if sum_freq/numsum(freq_test) > 0.10 or sum_freq > 100000:
t1 = bin_test[fr+1]
break
points = argwhere((map1_filter.fullMap > 0.0) & (map1_filter.fullMap < t1))
#C++ calculation
flagc = 1
try:
vecnorm_target = map_target._get_normal_vector(points)
vecnorm_probe = map_probe._get_normal_vector(points)
except:
flagc = 0
if vecnorm_target is None or vecnorm_probe is None: flagc = 0
ct = 0
if flagc == 1:
for l in range(len(vecnorm_target)):
ct += 1
nvec = vecnorm_target[l]
ovec = vecnorm_probe[l]
### add max value for regions of null variation
if (nvec[0] == 0. and nvec[1] == 0. and nvec[2] == 0.):
if (ovec[0] == 0. and ovec[1] == 0. and ovec[2] == 0.0):
continue
else:
scores.append(3.14)
continue
else:
if (ovec[0] == 0. and ovec[1] == 0. and ovec[2] == 0.):
scores.append(3.14)
continue
try:
dotprod = ovec[0] * nvec[0] + ovec[1] * nvec[1] + ovec[2] * nvec[2]
den = sqrt(nvec[0]**2 + nvec[1]**2 + nvec[2]**2) * sqrt(ovec[0]**2 + ovec[1]**2 + ovec[2]**2)
if abs(dotprod-den) < 0.00001:
ang = 0.0
else:
ang = math.acos(min(max(dotprod/den,-1.0),1.0))
if den == 0.0: print(dotprod, den, nvec, ovec)
scores.append(abs(ang))
except ValueError:
print('Error: Angle could not be calculated: ', nvec,' ', ovec)
#print scores[-10:]
if len(scores) == 0:
print("There are no points to be scored! The threshold values or the number of points to be considered needs to be changed.")
return None
else:
if sum(scores) == 0:
return 0.0
else:
#return 1-(sum(scores)/(len(points)*3.14)) #in this way go from 1 to 0
return 1-(sum(scores)/(len(points)*3.14))
scores = []
ct1 = 0
if flagc == 0:
for v in points:
n_vec = map_target.get_normal_vector(v[2],v[1],v[0])
o_vec = map_probe.get_normal_vector(v[2],v[1],v[0])
ct1 += 1
### add max value for regions of null variation
if (n_vec.x == -9 and n_vec.y == -9 and n_vec.z == -9):
if (o_vec.x == -9 and o_vec.y == -9 and o_vec.z == -9):
continue
else:
scores.append(3.14)
continue
else:
if (o_vec.x == -9 and o_vec.y == -9 and o_vec.z == -9):
scores.append(3.14)
continue
try:
scores.append(abs(n_vec.arg(o_vec)))
except ValueError:
print('Error: Angle between '+ str(n_vec) +', '+ str(o_vec) +' for point %d, %d, %d cannot be calculated.',v.x,v.y,v.z)
if len(scores) == 0:
print ("There are no points to be scored! The threshold values or the number of points to be considered needs to be changed.")
else:
if sum(scores) == 0:
return 0
else:
#return 1-(sum(scores)/(len(points)*3.14)) #in this way go from 1 to 0
return 1-(sum(scores)/(len(points)*3.14))
def get_partial_DLSF(self, num_of_points, map_target, map_probe):
"""
Calculate the DLSF score between two Map instances.
The DLSF is similar to the LSF;
whereas the LSF compares absolute density values,
the DLSF compares the difference between pairs of values.
Arguments:
*map_target, map_probe*
the two Map instances to compare.
*num_of_points*
number of significant points.
Return:
DLSF score
"""
if not self.mapComparison(map_target, map_probe):
#map_target, map_probe = self.matchMaps(map_target, map_probe)
return "can't Match the map"
#print "fff", primary_boundary, secondary_boundary
map_target_sig_pairs=map_target._get_random_significant_pairs(int(num_of_points))
otherMap=map_probe
score = 0.0
for p in map_target_sig_pairs:
z1 = p[0]
y1 = p[1]
x1 = p[2]
z2 = p[3]
y2 = p[4]
x2 = p[5]
dens = p[6]
prot_dens = otherMap.fullMap[z1][y1][x1] - otherMap.fullMap[z2][y2][x2]
score += (dens-prot_dens)**2
return score/map_target.fullMap.size
def _MI(self, map_target, map_probe, layers=20):
"""
Calculate the mutual information score between two Map instances.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*layers*
Number of layers used to bin the map. Default is 20 as in Shatsky et al., 2008.
Return:
MI score
"""
if self.mapComparison(map_target, map_probe):
m1, m2 = map_target, map_probe
else:
self._failed_match()
#m1,m2 = self.matchMaps(map_target, map_probe)
score = 0
m1_levels = (m1.max()-m1.min())/layers
m2_levels = (m2.max()-m2.min())/layers
for x in range(layers):
for y in range(layers):
m1_level_map = (m1.getMap() >= m1.min()+(x*m1_levels))*(m1.getMap() <= m1.min()+((x+1)*m1_levels))
m2_level_map = (m2.getMap() >= m2.min()+(y*m2_levels))*(m2.getMap() <= m2.min()+((y+1)*m2_levels))
comb_level_map = m1_level_map*m2_level_map
p_m1 = float(m1_level_map.sum())/m1_level_map.size
p_m2 = float(m2_level_map.sum())/m2_level_map.size
p_comb = float(comb_level_map.sum())/comb_level_map.size
if p_comb == 0:
mi_score = 0.0
else:
#print p_comb, p_m1, p_m2, p_comb/(p_m1*p_m2), math.log(p_comb/(p_m1*p_m2),2)
mi_score = p_comb*math.log(p_comb/(p_m1*p_m2), 2)
score += mi_score
return score
def _MI_C(self,m1,m2,layers1=20,layers2=20,N=0,lc1=0.0,lc2=0.0):
#from datetime import datetime
#print datetime.now().time()
ly1 = int (layers1)
ly2 = int (layers2)
# input 3D arrays
arr1 = (m1).view(float)
arr2 = (m2).view(float)
nz = int(arr1.shape[0])
ny = int(arr1.shape[1])
nx = int(arr1.shape[2])
# min and max to set left and right bound
ma1 = ma.masked_less_equal(arr1,lc1,copy=False)
min1 = float(ma1.min())
max1 = float(ma1.max())
#print min1,max1,amin(m1[msk]),amax(m1[msk])
#min1 = float(amin(m1[msk]))
#max1 = amax(m1[msk])
ma2 = ma.masked_less_equal(arr2,lc2,copy=False)
min2 = float(ma2.min())
max2 = float(ma2.max())
#print min2,max2
#min2 = float(amin(m2[msk]))
#max2 = amax(m2[msk])
min1 = float(min1-((max1-min1)/layers1)*0.0001)
min2 = float(min2-((max2-min2)/layers2)*0.0001)
# bin width
step1 = (max1-min1)/float(layers1)
step2 = (max2-min2)/float(layers2)
# histogram freq in bins
freq1 = zeros(layers1,dtype=float)
freq2 = zeros(layers2,dtype=float)
comb_freq = zeros((layers1,layers2),dtype=float)
code = """
int i,j,k,s1=0,s2=0;
float p1=0.0, p2=0.0, pcomb = 0.0,Hxy=0.0,Hy=0.0,Hx=0.0;
float va1,va2;
/*long index = 0;
long indexend = nz * ny * nx;
while (index < indexend){
va1 = arr1[index];
va2 = arr2[index];*/
/* use 3d array loop */
for (int z=0; z<nz; z++) {
for (int y=0; y<ny; y++) {
for (int x=0; x<nx; x++) {
va1 = ARR13(z,y,x);
va2 = ARR23(z,y,x);
for (i=0; i<ly1; i++)
{
if ((va1 > (min1+ i*step1)) && (va1 <= (min1+(i+1)*step1)))
{
FREQ11(i) += 1.0;
s1 += 1;
break;
}
}
if (i == ly1) i = i-1;
for (j=0; j<ly2; j++)
{
if ((va2 > (min2+j*step2)) && (va2 <= (min2+(j+1)*step2)))
{
FREQ21(j) += 1.0;
s2 += 1;
COMB_FREQ2(i,j) += 1.0;
break;
}
}
/*index ++;*/
}
}
}
for (i=0; i<ly1; i++){
p1 = FREQ11(i)/(float) s1;
/*std::cout << s1 << ' ' << s2 << std::endl;*/
for (j=0; j<ly2; j++){
p2 = FREQ21(j)/(float) s2;
pcomb = COMB_FREQ2(i,j)/(float) s1;
if (pcomb != 0.0) Hxy += (-pcomb*log2(pcomb));
if ((i == 0) && (p2 != 0.0)) Hy += (-p2*log2(p2));
}
if (p1 != 0.0) Hx += (-p1*log2(p1));
}
/*std::cout << Hxy << ' ' << Hx << ' ' << Hy << ' ' << std::endl;*/
if (N == 1) {
if (Hxy != 0.0) return_val = (Hx+Hy)/Hxy;
else return_val = 0.0;
}
else return_val = Hx+Hy-Hxy;
"""
# check
# BEN Commented out due to weave
#try:
#print datetime.now().time()
# mi = weave.inline(code,['arr1','arr2','ly1','ly2','N','freq1','freq2','comb_freq','nz','ny','nx','step1','step2','min1','min2'],headers=["<math.h>"],verbose=0)
# #print datetime.now().time()
# mi = max(0.0,mi)
# return mi
#except:
# #print 'C++ MI scoring run failed!'
# return None
return None
#Faster version of MI, in the overlap region (3) or complete density (1), added by APJ
def MI(self, map_target, map_probe, map_target_threshold=0.0, map_probe_threshold=0.0, mode=1, layers1=None,layers2=None, weight=False,cmode=True):
"""
Calculate the mutual information score between two Map instances.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold, map_probe_threshold*
Thresholds used for contouring
*mode*
1. use complete map for calculation
3. use overlap region for calculation
*layers1, layers2*
Number of layers used to bin the maps. Default is 20 as in Shatsky et al., 2008.
Return:
MI score
"""
if not self.mapComparison(map_target, map_probe):
#m1, m2 = map_target, map_probe
#else:
self._failed_match()
# calculate threshold if not given : 2* sigma can be used for experimental maps and 1*sigma for simulated?
if map_target_threshold==0.0:
map_target_threshold=self.calculate_map_threshold(map_target)
if map_probe_threshold==0.0:
map_probe_threshold=self.calculate_map_threshold(map_probe)
# calculation on the complete map
if mode == 1:
if weight: wt = 1
else: wt = 0
if layers1 is None:
layers1 = 20
if layers2 is None:
layers2 = 20
min1 = amin(map_target.fullMap) - 0.00001*(amax(map_target.fullMap)-amin(map_target.fullMap))
min2 = amin(map_probe.fullMap) - 0.00001*(amax(map_probe.fullMap)-amin(map_probe.fullMap))
if cmode: mic = self._MI_C(map_target.fullMap,map_probe.fullMap,layers1,layers2,wt,min1,min2)
else: mic = None
if not mic == None: return mic
# digitize whole map based on layers
map1_bin = map_target._map_digitize(map_target.min(),layers1,True)
map2_bin = map_probe._map_digitize(map_probe.min(),layers2,True)
bins1 = []
for i in range(layers1+2): bins1.append(i)
bins2 = []
for i in range(layers2+2): bins2.append(i)
# calculate frequency of bins
map1_freq = histogram(map1_bin.fullMap,bins1)[0][1:]
map2_freq = histogram(map2_bin.fullMap,bins2)[0][1:]
elif mode == 3:
# For score within masked region, the background is a bit ambiguous because low densities are overrepresented
mask_array = self._overlap_map_array(map_target,map_target_threshold,map_probe,map_probe_threshold)
if numsum(mask_array) == 0:
print('No map overlap (Mutual information score), exiting score calculation..')
return 0.0
# sturges rule provides a way of calculating number of bins : 1+math.log(number of points)
if layers1 is None:
try: layers1=int(1+math.log(numsum(mask_array),2))
except ValueError:
print('No map overlap (Mutual information score), exiting score calculation..')
return 0.0
if layers2 is None:
try: layers2=int(1+math.log(numsum(mask_array),2))
except ValueError:
print('No map overlap (Mutual information score), exiting score calculation..')
return 0.0
layers1 = max(layers1,15)
layers2 = max(layers2,15)
if weight: wt = 1
else: wt = 0
if cmode: mic = self._MI_C(nparray(map_target.fullMap*mask_array),nparray(map_probe.fullMap*mask_array),layers1,layers2,wt)
else: mic = None
if not mic == None: return mic
# digitize masked map based on layers
map1_bin = map_target.copy()
map2_bin = map_probe.copy()
map1_bin.fullMap = map1_bin.fullMap*mask_array
map2_bin.fullMap = map2_bin.fullMap*mask_array
map1_bin = map1_bin._map_digitize(map_target.fullMap[mask_array].min(),layers1,True)
map2_bin = map2_bin._map_digitize(map_probe.fullMap[mask_array].min(),layers2,True)
# make sure the outside region is filled with zeros
map1_bin.fullMap = map1_bin.fullMap*mask_array
map2_bin.fullMap = map2_bin.fullMap*mask_array
#background frequencies from the whole map
bins1 = []
for i in range(layers1+2): bins1.append(i)
bins2 = []
for i in range(layers2+2): bins2.append(i)
# calculate frequency of bins
map1_freq = histogram(map1_bin.fullMap,bins1)[0][1:]
map2_freq = histogram(map2_bin.fullMap,bins2)[0][1:]
score = 0.0
total = 0
if numsum(map1_freq) == 0:
print('No map overlap (Mutual information score), exiting score calculation..')
return 0.0
if numsum(map2_freq) == 0:
print('No map overlap (Mutual information score), exiting score calculation..')
return 0.0
list_overlaps = []
for x in range(layers1):
mask_array = map1_bin.fullMap == float(x+1)
overlap_freq = histogram(map2_bin.fullMap[mask_array],bins2)[0][1:]
total += float(numsum(overlap_freq))
list_overlaps.append(overlap_freq)
if total == 0:
print('No map overlap (Mutual information score), exiting score calculation..')
return 0.0
enter = 0
Hxy = 0.0
Hx = 0.0
Hy = 0.0
mi_score = 0.0
p_comb = 0.0
#print numsum(map1_freq), numsum(map2_freq), total
for x in range(layers1):
# probability of occurrence of x
p_m1 = map1_freq[x]/float(numsum(map1_freq))
for y in range(layers2):
enter = 1
# probability for overlap of bins x and y
p_comb = list_overlaps[x][y]/total
# probability of occurrence of y
p_m2 = map2_freq[y]/float(numsum(map2_freq))
#if p_m1 == 0.0 or p_m2 == 0.0:
# mi_score = 0.0
# continue
if p_comb == 0:
mi_score = 0.0
else:
# p_m1 and p_m2 (background probabilties can be non-zero when p_comb=0), so the entropy based definition may be used
## mi_score = p_comb*math.log(p_comb/(p_m1*p_m2), 2)
Hxy += -p_comb*math.log(p_comb, 2) # joined entropy
score += mi_score
if x == 0 and not p_m2 == 0.0: Hy += (-p_m2*math.log(p_m2, 2))
if not p_m1 == 0.0: Hx += (-p_m1*math.log(p_m1, 2))
if enter == 1:
# normalised MI (Studholme et al.) is used to account for overlap of 'contours'
# MI = Hx+Hy-Hxy & NMI = Hx+Hy/Hxy
if weight:
if Hxy == 0.0: return 0.0
return (Hx+Hy)/Hxy
return Hx+Hy-Hxy#score
else: return None
# MAIN: Faster version of MI, in the overlap region (3) or map contour (2) or complete density (1)
def _hausdorff_list(self, primary_boundary, secondary_boundary, kdtree, map_probe):
"""
This is for the chamdef distance def chamfer_distance, min max density value that define the surface of the protein
Arguments:
*kdtree* (there are 2 of them in numpy one Cbased on py-based, the latter is better, ctrl) this have to be one of the input.
kdtree from map_target
*primary_boundary, secondary_boundary* need to run get_primary_boundary and get_second_boundary for map_probe
NOTE: if you keep the kdtree as parametre out os less time consuming as building it takes time.
"""
points = map_probe.get_pos(primary_boundary, secondary_boundary)
#print "HERE POINTS",points
return kdtree.query(points)[0] #kdtree give 2 list 0=distance 1=actual points
def chamfer_distance(self, map_target, map_probe, primary_boundary, secondary_boundary, kdtree=None):
"""
Calculate the chamfer distance Score between two Map instances.
NOT RACCOMANDED.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*primary_boundary*
is the value returned by get_primary_boundary for map_probe
*secondary_boundary*
is the value returned by get_second_boundary for map_probe
*kdtree*
If set True it is possible to choose between the option of kdtree in numpy
The one that is py-based is a better choice.
"""
if self.mapComparison(map_target, map_probe):
m1, m2 = map_target, map_probe
else:
self._failed_match()
#m1,m2 = matchMaps(map_target, map_probe)
print("here")
if kdtree:
return self._hausdorff_list(primary_boundary, secondary_boundary, kdtree, m2).mean()
else:
print(m1,primary_boundary, secondary_boundary)
kdtree = m1.makeKDTree(primary_boundary, secondary_boundary) #if you don't assine it wil be build one kdtree
if kdtree==None:
print("Error. No points selected, change boundary parameters.")
sys.exit()
return self._hausdorff_list(primary_boundary, secondary_boundary, kdtree, m2).mean()#mean distance to the nearest neighbour
# CHAMFER DISTANCE SCORE based on a defined surface based on modes
def _surface_distance_score(self,map_target,map_probe,map_target_threshold1=0.0,map_probe_threshold=0.0,Filter=None,map_target_threshold2=0.0,weight=False):
"""
Calculate the chamfer distance Score between two Map instances.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold1*
contour threshold of the target map.
This value is used the primary boundary if map_target_threshold2 is given.
*map_probe_threshold*
contour threshold for the probe map.
*Filter*
definition of the surface:
1) None : surface defined by known boundaries - map_target_threshold1 & map_target_threshold2
If the boundaries are not known and target&probe map contour levels are known:
2) Std : to define the boundaries, contour level +- 5%sigma is calculated.
5%sigma is used to limit the number of points picked as surface.
For small maps, higher values (eg: 10%sigma) can be used.
3) Mean: a mean filter is applied on the binary contour mask over a long window.
The resulting mask has values between 0 and 1.
Points with values less than 0.3 is used to represent surface.
As the average is calculated on a long window, highly exposed surface points \
have very low values and partially exposed surfaces/grooves have relatively higher values.
This definition is useful especially when the map surface has many features/projections.
4) Minimum: a minimum filter is applied on a binary contour mask to locate surface points.
Voxels surrounded by points outside the contour (zeroes) are detected as surface.
Voxels surrounded by points outside the contour (zeroes) are detected as surface.
5) Sobel: sobel filter is applied on the map to detect high density gradients.
Before applying the sobel filter, it is important to reduce the noise density \
and large variations (gradients) in the noise region.
*weight*
If set true, the distances between the surface points is normalized in a way similar to GDT (Zemla 2007)\
calculation for atomic co-ordinate alignments.
"""
# check if both maps are on the same grid
if not self.mapComparison(map_target, map_probe):
print("@@@ Maps could not be matched")
return -999.
# if the boundaries are known, calculate the kdtree
if Filter == None:
kdtree = map_target.makeKDTree(map_target_threshold1,map_target_threshold2)
probe_points = map_probe.get_pos(map_target_threshold1, map_target_threshold2)
# surface based on contour density thresholds for target and probe. 5% sigma is used to define boundaries.
elif Filter == 'Std':
# argwhere returns points as z,y,x, in the same way the map array dimensions are defined.
target_points = argwhere((map_target.fullMap > (float(map_target_threshold1)-(map_target.std()*0.10))) & (map_target.fullMap < (float(map_target_threshold1)+(map_target.std()*0.10))))
probe_points = argwhere((map_probe.fullMap > (float(map_probe_threshold)-(map_probe.std()*0.10))) & (map_probe.fullMap < (float(map_probe_threshold)+(map_probe.std()*0.10))))
# check whether the probe points is larger than the probe surface points. if not use the smaller one as probe point
if len(target_points) < len(probe_points):
probe_points1 = npcopy(target_points)
target_points = npcopy(probe_points)
probe_points = npcopy(probe_points1)
if len(target_points) == 0 or len(probe_points) == 0:
print ('Surface detection failed (Std filter), exiting..')
return None
try:
from scipy.spatial import cKDTree
try: kdtree = cKDTree(target_points)
except RuntimeError: return None
except ImportError:
try: kdtree = KDTree(target_points)
except RuntimeError: return None
elif Filter == 'Mean':
map1_filter = map_target._surface_features(float(map_target_threshold1))
map2_filter = map_probe._surface_features(float(map_probe_threshold))
# define surface based on the filtered mask values.
# points with values less than 0.3 are usually preferred. But in some cases like viruses, most surface points are highly exposed and \
# a large number of points are returned and the calculation becomes slow.
# Hence an additional filter is added: the maximum allowed points is 10% of box size.
# The minimum number of points is kept as 7%. This mode is less sensitive to the number of surface points chosen \
# as the extent of exposure is used for defining surface. Hence thick surface is not usually required.
# calculate frequencies in bins for filtered mask.
# The smaller the bins, more precise will be the calculation of points allowed based on percent of points chosen.
# As this is just an additional filter and doesn't affect the calculations drastically, 40 bins are used to calculate frequencies.
bin_test = [0.0001]
for ii in range(1,41): bin_test.append(0.025*ii)
freq_test = histogram(map1_filter.fullMap,bin_test)[0]
map1_filled = numsum(map1_filter.fullMap>0)
# select points with values less than 0.3
sum_freq = 0.0
for fr in range(len(freq_test)):
sum_freq += float(freq_test[fr])
# a minimum of 5% (of box size) points are chosen
if sum_freq/map1_filled > 0.05 and bin_test[fr+1] >= 0.3:
t1 = bin_test[fr+1]
break
# if number of points are more than 5% and still have values less than 0.3, a maximum limit of 10% is applied
if sum_freq/map1_filled > 0.10 or sum_freq > 200000:
t1 = bin_test[fr+1]
break
# for the second map
sum_freq = 0.0
freq_test = histogram(map2_filter.fullMap,bin_test)[0]
map2_filled = numsum(map2_filter.fullMap>0)
for fr in range(len(freq_test)):
sum_freq += float(freq_test[fr])
if sum_freq/map2_filled > 0.05 and bin_test[fr+1] >= 0.3:
t2 = bin_test[fr+1]
break
if sum_freq/map2_filled > 0.10 or sum_freq > 200000:
t2 = bin_test[fr+1]
break
# t1 and t2 are the selected levels based on filtered values and percent of points
target_points = argwhere((map1_filter.fullMap > 0.0) & (map1_filter.fullMap <= t1))
probe_points = argwhere((map2_filter.fullMap > 0.0) & (map2_filter.fullMap <= t2))
if len(target_points) == 0 or len(probe_points) == 0:
print('Surface detection failed (Mean filter), exiting..')
return None
#print len(target_points), len(probe_points), t1, t2
# check whether the probe points is larger than the probe surface points. if not use the smaller one as probe point
if len(target_points) < len(probe_points):
probe_points1 = npcopy(target_points)
target_points = npcopy(probe_points)
probe_points = npcopy(probe_points1)
try:
from scipy.spatial import cKDTree
try: kdtree = cKDTree(target_points)
except RuntimeError: return None
except ImportError:
try: kdtree = KDTree(target_points)
except RuntimeError: return None
elif Filter == 'Minimum':
map1_surface = map_target._surface_minimum_filter(float(map_target_threshold1))
map2_surface = map_probe._surface_minimum_filter(float(map_probe_threshold))
# select the surface points represented by the mask
target_points = argwhere(map1_surface == 1)
probe_points = argwhere(map2_surface == 1)
if len(target_points) == 0 or len(probe_points) == 0:
print('Surface detection failed (Minimum filter), exiting..')
return None
#print len(target_points), len(probe_points)
# stop if the number of points are large
if len(target_points) + len(probe_points) > 250000: return None
# check whether the probe points is larger than the probe surface points. if not use the smaller one as probe point
if len(target_points) < len(probe_points):
probe_points1 = npcopy(target_points)
target_points = npcopy(probe_points)
probe_points = npcopy(probe_points1)
try:
from scipy.spatial import cKDTree
try: kdtree = cKDTree(target_points)
except RuntimeError: return None
except ImportError:
try: kdtree = KDTree(target_points)
except RuntimeError: return None
# surface based on sobel filter on contoured map, high gradient points chosen
elif Filter == 'Sobel':
map1_surface = map_target._sobel_filter_contour(float(map_target_threshold1))
map2_surface = map_probe._sobel_filter_contour(float(map_probe_threshold))
target_points = argwhere(map1_surface.fullMap > map1_surface.max()/float(2))
probe_points = argwhere(map2_surface.fullMap > map2_surface.max()/float(2))
if len(target_points) == 0 or len(probe_points) == 0:
print('Surface detection failed (Sobel filter), exiting..')
return None
#print len(target_points), len(probe_points)
# check whether the probe points is larger than the probe surface points. if not use the smaller one as probe point
if len(target_points) < len(probe_points):
probe_points1 = npcopy(target_points)
target_points = npcopy(probe_points)
probe_points = npcopy(probe_points1)
try:
from scipy.spatial import cKDTree
try: kdtree = cKDTree(target_points)
except RuntimeError: return None
except ImportError:
try: kdtree = KDTree(target_points)
except RuntimeError: return None
distances = kdtree.query(probe_points)[0]
#print distances
#print npmean(distances)
# by default return mean distance, 1/npmean(distances) gives a similarity score
if len(distances) == 0: return None
if not weight:
if not npmean(distances) <= 0.05: return 1/npmean(distances)
# becomes inf if mean(dist) is 0. Max score of 20 (will be changed later)
else: return 1/0.05
x = int(30.0/map_target.apix) # 40A selected as potential distance threshold to calculate weighted score
if amin(distances) < x/2: distances = distances - amin(distances)
bins = []
# to select points that are aligned to target
i = 0
while i <= float(x):
bins.append(i*1.0)
i += 1
num_distances = len(distances)
overlap_freq = histogram(distances,bins)[0]
for fr_i in range(len(overlap_freq)):
if overlap_freq[fr_i] > amax(overlap_freq)/3.:
break
total_ext = fr_i
#might help in accounting for contour difference
bins = bins[fr_i:]
#distancebin = distances < int(x/2.)
#to check if the aligned surfaces of maps form patches
if cl:
## use this routine to check if the points form a patch
#points_cl = probe_points[distancebin]
points_cl = probe_points
# points_cl represents indices of the smaller map which aligns well with the other map
# create a kdtree to check whether the points form a patch
if len(points_cl) == 0: return None,None
try: kdtree = cKDTree(points_cl)
except: return None,None
#cKDtree count_neighbors would work better, but not available in old scipy version
neighbors_num = 20
distance_lim = 3.0
# query against the same points to check integrity
neigh = kdtree.query(points_cl,k=neighbors_num,distance_upper_bound=distance_lim)[1]
ct_neigh = 0
# for those points where 8 neighbors are not found, len(neigh) is returned as index
#cl_weight = numsum(numsum(neigh<len(neigh),axis=1) > 15)/float(len(neigh))
# ratio of 'patch-like' aligned points to total query points : gives the fraction of surface overlap
cl_weight = numsum(numsum(neigh<len(neigh),axis=1) > 17)/float(len(probe_points))
# to calculate distances involving these points
#distances_align = distances[distancebin]
distances_align = distances
distances_sel = distances_align[numsum(neigh<len(neigh),axis=1) > 17]
distances = distances_sel[:]
overlap_freq = histogram(distances,bins)[0]
total = total_ext #make total_ext=0.0 above for proper contours
cumul_freq = 0.0
enter = 0
sum_sc = 0.0
for i in range(len(overlap_freq)):
w = len(overlap_freq)-(i)
try:
cumul_freq += overlap_freq[i]
except IndexError: pass
try:
perc_equiv = float(cumul_freq)/num_distances #/len(distances)
except ZeroDivisionError:
print('Distance weighting failed!!. Check surface defined')
return None, None
#sum_sc = sum_sc + (npexp(w/2.)*perc_equiv)
#total += npexp(w/2.)
sum_sc = sum_sc + ((w)*perc_equiv)
total += (w)
enter = 1
score = float(sum_sc)/total
if cl:
if enter == 1:
if len(distances_sel) == 0.0: return 0.0
if npmean(distances_sel) == 0.0: return 0.0
if cl_weight == 0.0: return 0.0
return score#cl_weight*(1/npmean(distances_sel))
else: return None, None
if enter == 1:
if npmean(distances) <= 0.05: return 1.0
if npmean(distances) == 0.0: return 1.0
return score
else: return None, None
def envelope_score(self,map_target, primary_boundary, structure_instance,norm=True):
"""
Calculate the envelope score between a target Map and a Structure Instances.
Arguments:
*map_target*
Target Map Instance.
*primary_boundary*
Value specified is calculated with primary_boundary of the map object.
*structure_instance*
Structure Instance to compare.
Return:
Envelope score
"""
binMap = map_target.make_bin_map(primary_boundary)
max_score = float(-2*numsum(binMap.fullMap))
min_score = float(numsum(binMap.fullMap)-2*numsum(binMap.fullMap+1))
blurrer = StructureBlurrer()
struct_binMap = blurrer.make_atom_overlay_map1(map_target, structure_instance)
grid = struct_binMap.get_pos(0.9,1.1)
for x,y,z in grid:
g = binMap[z][y][x]
if g == -1:
binMap[z][y][x] = 2
elif g == 0:
binMap[z][y][x] = -2
#score=binMap.fullMap.sum()
score = float(numsum(binMap.fullMap))
if norm:
norm_score = float((score-min_score)/(max_score-min_score))
return norm_score
else:
return score
def envelope_score_map(self,map_target, map_probe,map_target_threshold=0,map_probe_threshold=0,norm=True):
"""
Calculate the envelope score between two Map instance using numoy array.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold,map_probe_threshold*
EMMap threshold
use calcualte_map_threshold to calculate map_target_threshold and map_probe_threshold.
Return:
Envelope score
"""
if self.mapComparison(map_target, map_probe):
if map_target_threshold==0:
map_target_threshold=self.calculate_map_threshold(map_target)
if map_probe_threshold==0:
map_probe_threshold=self.calculate_map_threshold(map_probe)
binMap = map_target.make_bin_map(map_target_threshold)
max_score = float(-2*numsum(binMap.fullMap))
min_score = float(numsum(binMap.fullMap)-2*numsum(binMap.fullMap+1))
struct_binMap = map_probe.make_bin_map(map_probe_threshold)
newMap=binMap.fullMap+2*struct_binMap.fullMap
hist_array=histogram(newMap,4)
score=2*hist_array[0][0]-(2*(hist_array[0][1]))-(hist_array[0][2])
#print score, max_score, min_score, numsum(binMap.fullMap)
if norm:
norm_score = float((score-min_score))/(max_score-min_score)
return norm_score
else:
return score
#calculate percent of overlap for two contoured maps
def _percent_overlap(self,map_target,map_probe,map_target_threshold,map_probe_threshold,flagsize=0):
"""
Calculate the fraction of overlap between two map grids.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold,map_probe_threshold*
map contour thresholds for map_target and map_probe.
Return:
Percent overlap with respect to smaller grid
"""
if self.mapComparison(map_target,map_probe):
# contour the first map
binmap1 = map_target.fullMap > float(map_target_threshold)
binmap2 = map_probe.fullMap > float(map_probe_threshold)
# percent calculated on the smaller contoured volume (can be changed)
minim = len(map_target.fullMap[binmap1])
if len(map_probe.fullMap[binmap2]) < minim: minim = len(map_probe.fullMap[binmap2])
maskmap = (binmap1*binmap2) > 0
if flagsize == 1: return numsum(maskmap), numsum(binmap1), numsum(binmap2)
#print numsum(binmap1),numsum(binmap2),numsum(maskmap),minim
if not minim == 0.0: return float(len(map_target.fullMap[maskmap]))/minim
else:
print("Check map contour!!")
return 0.0
else:
print("@@@ Maps could not be matched")
return -1.0
def SCCC(self,map_target,resolution_densMap,sigma_map,structure_instance,rigid_body_structure,write=False,c_mode=True):
"""
Calculate Segment based cross-correlation from Pandurangan et al. 2013,J Struct Biol. 2013 Dec 12
It is a local CCC around a selection of atoms.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*structure_instance*
Structure instance to compare
*rigid_body_structure*
Rigid-body Structure instance.
. Return:
SCCC score
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
outline = ""
resolution_densMap=float(resolution_densMap)
whole_fit_map = blurrer.gaussian_blur(structure_instance, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
sim_map = blurrer.gaussian_blur(rigid_body_structure, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
minDens = sim_map.std()
sim_mask_array = sim_map._get_maskArray(minDens)
#Apply the mask to em and simulated maps
mask_emMap=map_target._get_maskMap(sim_mask_array)
mask_simMap = whole_fit_map._get_maskMap(sim_mask_array)
#sse_lccf=scorer.CCC(mask_emMap,mask_simMap)
sse_lccf,ov=scorer.CCC_map(mask_emMap,mask_simMap,cmode=c_mode)
#return the overall score
if write==True:
outline+='SCCC for segment %f\n'%(sse_lccf)
return outline
return sse_lccf
def SCCC_LAP(self,map_target,resolution_densMap,sigma_map,structure_instance,rigid_body_structure,write=False):
"""
Calculate Segment based cross-correlation from Pandurangan et al. 2013,J Struct Biol. 2013 Dec 12
It is a local CCC around a selection of atoms.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*structure_instance*
Structure instance to compare
*rigid_body_structure*
Rigid-body Structure instance.
. Return:
SCCC score
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
outline = ""
resolution_densMap=float(resolution_densMap)
whole_fit_map = blurrer.gaussian_blur(structure_instance, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
sim_map = blurrer.gaussian_blur(rigid_body_structure, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
minDens = sim_map.std()
sim_mask_array = sim_map._get_maskArray(minDens)
#Apply the mask to em and simulated maps
mask_emMap=map_target._get_maskMap(sim_mask_array)
mask_simMap = whole_fit_map._get_maskMap(sim_mask_array)
sse_lccf=scorer.laplace_CCC(mask_emMap,mask_simMap)
#return the overall score
if write==True:
outline+='SCCC for segment %f\n'%(sse_lccf)
return outline
return sse_lccf
def SCCC_MI(self,map_target,resolution_densMap,sigma_map,structure_instance,rigid_body_structure,write=False):
"""
Calculate Segment based cross-correlation from Pandurangan et al. 2013,J Struct Biol. 2013 Dec 12
It is a local CCC around a selection of atoms.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*structure_instance*
Structure instance to compare
*rigid_body_structure*
Rigid-body Structure instance.
. Return:
SCCC score
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
outline = ""
resolution_densMap=float(resolution_densMap)
whole_fit_map = blurrer.gaussian_blur(structure_instance, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
sim_map = blurrer.gaussian_blur(rigid_body_structure, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
minDens = sim_map.std()
sim_mask_array = sim_map._get_maskArray(minDens)
#Apply the mask to em and simulated maps
mask_emMap=map_target._get_maskMap(sim_mask_array)
mask_simMap = whole_fit_map._get_maskMap(sim_mask_array)
sse_lccf=scorer.MI(mask_emMap,mask_simMap)
#return the overall score
if write==True:
outline+='SCCC for segment %f\n'%(sse_lccf)
return outline
return sse_lccf
def calc_moc(self,indices,map_probe,map_target):
map_target_mask = map_target.fullMap[indices]
##map_target_mask = map_target_mask - float(map_target_mask.sum()/len(map_target_mask))
map_probe_mask = map_probe.fullMap[indices]
##map_probe_mask = map_probe_mask - float(map_probe_mask.sum()/len(map_probe_mask))
num = numsum(map_target_mask * map_probe_mask)
den = sqrt(numsum(square(map_target_mask))*numsum(square(map_probe_mask)))
if den == 0.0: return -1.0
return num/den
def SMOC(self,map_target,resolution_densMap,structure_instance,win=11,rigid_body_file=None,sigma_map=0.225,write=False,c_mode=True):
"""
Calculate Local cross correlation (Mander's Overlap)
It is a local Overlap Coefficient calculated on atoms in sliding residue windows along the chain.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*structure_instance*
Model structure instance.
*win*
Overlapping Window length to calculate the score
*rigid_body_file*
Rigid-body file.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
Return:
Dictionary of smoc scores for residues in the chain
"""
blurrer = StructureBlurrer()
sim_map = blurrer.gaussian_blur_real_space(structure_instance, resolution_densMap,densMap=map_target,sigma_coeff=sigma_map,normalise=True)
peak,ave,sigma = sim_map._peak_density()
#NOTE: filter background
sim_map.fullMap = sim_map.fullMap*(sim_map.fullMap > peak)
dict_chain_indices,dict_chain_res, dict_res_dist = blurrer.get_indices(structure_instance,map_target,resolution_densMap,sigma_map)
#get details of map
origin = map_target.origin
apix = map_target.apix
box_size = map_target.box_size()
nz,ny,nx = map_target.fullMap.shape
zg,yg,xg = mgrid[0:nz,0:ny,0:nx]
indi = list(zip(xg.ravel(), yg.ravel(), zg.ravel()))
#save rigid body details
dict_rf_res = {}
dict_rf_sc = {}
res_list = []
rb_list = []
list_sccc = []
#save scores for each chain and res
dict_chain_scores = {}
#TODO: add multi-chain rigid body parser below
'''
r_ct = 0
if rigid_body_file != None:
inp = open(rigid_body_file,'r')
for l in inp:
if l[0] != '#':
score_indices = []
lrb = l.split()
if len(lrb) == 0: continue
r_ct += 1
res_list = []
rb_pairs = []
# get scores for each res and each rigid body
for i in range(max((len(lrb)/2)-1,1)):
rb_pairs.append([int(lrb[2*i]),int(lrb[2*i+1])])
# NOTE: wont work for insertion codes
for r in range(int(lrb[2*i]),int(lrb[2*i+1])+1):
score_indices.extend(dict_res_indices[r])
res_list.append(r)
rb_list.append(lrb)
dict_rf_res[r_ct] = rb_pairs
if len(score_indices) == 0:
dict_rf_sc[r_ct] = 0.0#-0.99
for res in res_list: dict_res_scores[res] = 0.0#-0.99
continue
tmplist = score_indices[:]
setlist = set(tmplist)
score_indices = list(setlist)
sc_indices = []
for ii in score_indices: sc_indices.append(indi[ii])
array_indices = nparray(sc_indices)
ind_arrxyz = transpose(array_indices)
# get indices for use with map arrays: ([z...],[y...],x...])
ind_arrzyx = (ind_arrxyz[2],ind_arrxyz[1],ind_arrxyz[0])
sccc = self.calc_moc(ind_arrzyx,sim_map,map_target)
dict_rf_sc[r_ct] = sccc
#save scores
for res in res_list:
dict_res_scores[res] = sccc
list_sccc.append(sccc)
inp.close()
'''
#for residues not in rigid bodies: consider pentapeptides
for ch in dict_chain_indices:
dict_res_scores = {}
dict_res_indices = dict_chain_indices[ch]
for res in dict_res_indices:
if not res in dict_res_scores.keys():
indices = dict_res_indices[res][:]
#consider residues on both sides. NOTE: wont work for insertion codes!
#need to rewite res numbers to avoid insertion codes
for ii in range(1,int(round((win+1)/2))):
try:
#get prev residue indices
indices.extend(dict_res_indices[dict_chain_res[ch][dict_chain_res[ch].index(res)-ii]])
except: pass
for ii in range(1,int(round((win+1)/2))):
try:
indices.extend(dict_res_indices[dict_chain_res[ch][dict_chain_res[ch].index(res)+ii]])
except: pass
tmplist = indices[:]
setlist = set(tmplist)
indices = list(setlist)
sc_indices = []
for ii in indices: sc_indices.append(indi[ii])
if len(indices) < 10:
try:
dict_res_scores[res] = dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)-1]]
try: dict_res_scores[res] = (dict_res_scores[res]+dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)+1]])/2.0
except (IndexError,KeyError): pass
except (IndexError,KeyError):
try: dict_res_scores[res] = dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)+1]]
except (IndexError,KeyError): dict_res_scores[res] = 0.0
continue
array_indices = nparray(sc_indices)
ind_arrxyz = transpose(array_indices)
ind_arrzyx = (ind_arrxyz[2],ind_arrxyz[1],ind_arrxyz[0])
sccc = self.calc_moc(ind_arrzyx,sim_map,map_target)
dict_res_scores[res] = sccc
if sccc == -1.0:
try:
dict_res_scores[res] = dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)-1]]
try: dict_res_scores[res] = (dict_res_scores[res]+dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)+1]])/2.0
except (IndexError,KeyError): pass
except IndexError:
try: dict_res_scores[res] = dict_res_scores[dict_chain_res[ch][dict_chain_res[ch].index(res)+1]]
except (IndexError,KeyError): dict_res_scores[res] = 0.0
continue
list_sccc.append(sccc)
dict_chain_scores[ch] = dict_res_scores
return dict_chain_scores, dict_chain_res
def _SMOC1(self,map_target,resolution_densMap,structure_instance,win=11,rigid_body_file=None,sigma_map=0.225,write=False):
"""
Calculate Local cross correlation (Mander's Overlap)
It is a local Overlap Coefficient calculated on atoms in sliding residue windows along the chain.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*structure_instance*
Model structure instance.
*win*
Overlapping Window length to calculate the score
*rigid_body_file*
Rigid-body file.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
Return:
Dictionary of smoc scores for residues in the chain
"""
blurrer = StructureBlurrer()
sim_map = blurrer.gaussian_blur_real_space(structure_instance, resolution_densMap,densMap=map_target,sigma_coeff=sigma_map,normalise=True)
peak,ave,sigma = sim_map._peak_density()
#NOTE: filter background
sim_map.fullMap = sim_map.fullMap*(sim_map.fullMap > peak)
dict_res_indices,dict_res_dist = blurrer.get_indices(structure_instance,map_target,resolution_densMap)
#get details of map
origin = map_target.origin
apix = map_target.apix
box_size = map_target.box_size()
nz,ny,nx = map_target.fullMap.shape
zg,yg,xg = mgrid[0:nz,0:ny,0:nx]
indi = list(zip(xg.ravel(), yg.ravel(), zg.ravel()))
#save rigid body details
dict_rf_res = {}
dict_rf_sc = {}
res_list = []
rb_list = []
list_sccc = []
#save scores for each res
dict_res_scores = {}
r_ct = 0
if rigid_body_file != None:
inp = open(rigid_body_file,'r')
for l in inp:
if l[0] != '#':
score_indices = []
lrb = l.split()
if len(lrb) == 0: continue
r_ct += 1
res_list = []
rb_pairs = []
# get scores for each res and each rigid body
for i in range(max((len(lrb)/2)-1,1)):
rb_pairs.append([int(lrb[2*i]),int(lrb[2*i+1])])
# NOTE: wont work for insertion codes
for r in range(int(lrb[2*i]),int(lrb[2*i+1])+1):
score_indices.extend(dict_res_indices[r])
res_list.append(r)
rb_list.append(lrb)
dict_rf_res[r_ct] = rb_pairs
if len(score_indices) == 0:
dict_rf_sc[r_ct] = 0.0#-0.99
for res in res_list: dict_res_scores[res] = 0.0#-0.99
continue
tmplist = score_indices[:]
setlist = set(tmplist)
score_indices = list(setlist)
sc_indices = []
for ii in score_indices: sc_indices.append(indi[ii])
array_indices = nparray(sc_indices)
ind_arrxyz = transpose(array_indices)
# get indices for use with map arrays: ([z...],[y...],x...])
ind_arrzyx = (ind_arrxyz[2],ind_arrxyz[1],ind_arrxyz[0])
sccc = self.calc_moc(ind_arrzyx,sim_map,map_target)
dict_rf_sc[r_ct] = sccc
#save scores
for res in res_list:
dict_res_scores[res] = sccc
list_sccc.append(sccc)
inp.close()
#for residues not in rigid bodies: consider pentapeptides
for res in dict_res_indices:
if not res in dict_res_scores.keys():
indices = dict_res_indices[res][:]
#consider residues on both sides. NOTE: wont work for insertion codes!
#need to rewite res numbers to avoid insertion codes
for ii in range(1,int(round((win+1)/2))):
try:
indices.extend(dict_res_indices[res-ii])
except: pass
for ii in range(1,int(round((win+1)/2))):
try:
indices.extend(dict_res_indices[res+ii])
except: pass
tmplist = indices[:]
setlist = set(tmplist)
indices = list(setlist)
sc_indices = []
for ii in indices: sc_indices.append(indi[ii])
if len(indices) == 0:
dict_res_scores[res] = 0.0#-0.99
continue
array_indices = nparray(sc_indices)
ind_arrxyz = transpose(array_indices)
ind_arrzyx = (ind_arrxyz[2],ind_arrxyz[1],ind_arrxyz[0])
sccc = self.calc_moc(ind_arrzyx,sim_map,map_target)
dict_res_scores[res] = sccc
list_sccc.append(sccc)
return dict_res_scores
def _get_shell(self,dist1,maxlevel,step):
# indices between upper and lower shell bound
fshells1 = ((dist1 < min(maxlevel,x+step)) & (dist1 >= x))
# match power spectra for two maps
def _amplitude_match(self,map_1,map_2,shellmin,shellmax,step=0.005,c1=0,c2=0,reso=None,lpfiltb=False,lpfilta=False,ref=False):
# fourier transform: use pyfftw if available
pyfftw_flag = 1
try:
import pyfftw
except ImportError: pyfftw_flag = 0
try:
if pyfftw_flag == 0: raise ImportError
inputa1 = pyfftw.n_byte_align_empty(map_1.fullMap.shape, 16, 'complex128')
outputa1 = pyfftw.n_byte_align_empty(map_1.fullMap.shape, 16, 'complex128')
# fft planning, set planning_timelimit or flags to make it faster
fft = pyfftw.FFTW(inputa1,outputa1,direction='FFTW_FORWARD',axes=(0,1,2),flags=['FFTW_ESTIMATE'])#planning_timelimit=0.5)
inputa1[:,:,:] = map_1.fullMap[:,:,:]
fft()
ft1 = Map(fftshift(outputa1), map_1.origin, map_1.apix, map_1.filename, map_1.header[:])
except:
# use numpy fft instead
ft1 = map_1.fourier_transform()
try:
if pyfftw_flag == 0: raise ImportError
inputa2 = pyfftw.n_byte_align_empty(map_2.fullMap.shape, 16, 'complex128')
outputa2 = pyfftw.n_byte_align_empty(map_2.fullMap.shape, 16, 'complex128')
fft = pyfftw.FFTW(inputa2,outputa2,direction='FFTW_FORWARD',axes=(0,1,2),flags=['FFTW_ESTIMATE'])#planning_timelimit=0.5)
inputa2[:,:,:] = map_2.fullMap[:,:,:]
fft()
ft2 = Map(fftshift(outputa2), map_2.origin, map_2.apix, map_2.filename, map_2.header[:])
except:
ft2 = map_2.fourier_transform()
#low pass filter before scaling
if reso != None:
cutoff1 = map_1.apix/float(reso)
cutoff2 = map_2.apix/float(reso)
if lpfiltb and not lpfilta:
ft1._tanh_lowpass(cutoff1,fall=0.2,ftmap=True)
ft2._tanh_lowpass(cutoff2,fall=0.2,ftmap=True)
# max dimension
size1 = max(map_1.x_size(),map_1.y_size(),map_1.z_size())
#shell values correspond to freq: 0-0.5 (nyquist)
dist1 = map_1._make_fourier_shell(1)/map_1.apix
size2 = max(map_2.x_size(),map_2.y_size(),map_2.z_size())
#shell values correspond to freq: 0-0.5 (nyquist)
dist2 = map_2._make_fourier_shell(1)/map_2.apix
#SCALING
# storing for plots
ft1_avg = []
ft2_avg = []
ft1_avg_new = []
lfreq = []
# select max spatial frequency to iterate to. low resolution map
maxlevel = 0.5/max(map_1.apix,map_2.apix)
# loop over freq shells, shellwidth=0.005
#for x in arange(0,maxlevel+step,step):
nc = 0
x = 0.0
highlevel = x+step
while (x<maxlevel):
#print x,highlevel, maxlevel
# indices between upper and lower shell bound
fshells1 = ((dist1 < min(maxlevel,highlevel)) & (dist1 >= x))
# radial average
shellvec1 = ft1.fullMap[fshells1]
# indices between upper and lower shell bound
fshells2 = ((dist2 < min(maxlevel,highlevel)) & (dist2 >= x))
# radial average
shellvec2 = ft2.fullMap[fshells2]
#if len(shellvec1) == 0 or len(shellvec2) == 0: continue
abs1 = abs(shellvec1)
abs2 = abs(shellvec2)
#print nonzero(abs1)
#print nonzero(abs2)
ns1 = len(nonzero(abs1)[0]) #or count_nonzero
ns2 = len(nonzero(abs2)[0]) #or count_nonzero
if ns1 < 10 or ns2 < 10:
nc += 1
highlevel = min(maxlevel,x+(nc+1)*step)
x = max(0.0,x-nc*step)
#print ns1, ns2
continue
else: nc = 0
mft1 = npmean(abs1)#npmean(sqrt(shellvec1.real**2+shellvec1.imag**2))
mft2 = npmean(abs2)#npmean(sqrt(shellvec2.real**2+shellvec2.imag**2))#npmean(abs(ft2.fullMap[fshells2]))
if mft1 == 0.0 and mft2 == 0.0:
continue
# sq of radial avg amplitude
ft1_avg.append(np_log10(npmean(square(abs1))))
ft2_avg.append(np_log10(npmean(square(abs2))))
# scale to amplitudes of the ref map
if ref:
if mft1 == 0.0: continue
ft1.fullMap[fshells1] = shellvec1*(mft2/mft1)
else:
# replace with avg amplitudes for the two maps
ft1.fullMap[fshells1] = shellvec1*(mft2+mft1)/(2*mft1)
ft2.fullMap[fshells2] = shellvec2*(mft2+mft1)/(2*mft2)
# new radial average (to check)
mft1 = npmean(abs(ft1.fullMap[fshells1]))#numsum(absolute(ft1.fullMap[fshells1]))/len(shellvec1)
ft1_avg_new.append(np_log10(npmean(square(abs(ft1.fullMap[fshells1])))))
lfreq.append(highlevel)
sampling_frq = highlevel
cutoff_freq = min((1.0/reso) + 0.25,maxlevel) # 0.25 added to reso based cutoff
#print 'freq cutoff', (1.0/reso)+0.25, maxlevel
# scale the rest and break after relevant frequencies
if sampling_frq > cutoff_freq:
fshells1 = (dist1 >= highlevel)
shellvec1 = ft1.fullMap[fshells1]
mft1 = npmean(abs(shellvec1))
fshells2 = (dist2 >= highlevel)
shellvec2 = ft2.fullMap[fshells2]
mft2 = npmean(abs(shellvec2))
if mft1 == 0.0 and mft2 == 0.0:
break
ft1_avg.append(np_log10(npmean(square(abs(shellvec1)))))
ft2_avg.append(np_log10(npmean(square(abs(shellvec2)))))
if ref:
if mft1 == 0.0: break
ft1.fullMap[fshells1] = shellvec1*(mft2/mft1)
else:
ft1.fullMap[fshells1] = shellvec1*(mft2+mft1)/(2*mft1)
ft2.fullMap[fshells2] = shellvec2*(mft2+mft1)/(2*mft2)
mft1 = npmean(abs(ft1.fullMap[fshells1])) #after scaling
ft1_avg_new.append(np_log10(npmean(square(abs(ft1.fullMap[fshells1]))))) #after scaling
lfreq.append((highlevel+step/2))
break
x = highlevel
highlevel = x+step
# low pass filter after?
#low pass filter before scaling
if reso != None:
if lpfilta and not lpfiltb:
ft1._tanh_lowpass(cutoff1,fall=0.2,ftmap=True)
ft2._tanh_lowpass(cutoff2,fall=0.2,ftmap=True)
# ifft
try:
if pyfftw_flag == 0: raise ImportError
ifft = pyfftw.FFTW(inputa1,outputa1,direction='FFTW_BACKWARD',axes=(0,1,2),flags=['FFTW_ESTIMATE'])#planning_timelimit=0.5)
inputa1[:,:,:] = ifftshift(ft1.fullMap)[:,:,:]
ifft()
map1_filt = Map(outputa1.real.astype('float'), map_1.origin, map_1.apix, map_1.filename, map_1.header[:])
except:
# use numpy ifft instead
map1_filt = map_1.copy()
map1_filt.fullMap = real(ifftn(ifftshift(ft1.fullMap)))
try:
if pyfftw_flag == 0: raise ImportError
ifft = pyfftw.FFTW(inputa2,outputa2,direction='FFTW_BACKWARD',axes=(0,1,2),flags=['FFTW_ESTIMATE'])#planning_timelimit=0.5)
inputa2[:,:,:] = ifftshift(ft2.fullMap)[:,:,:]
ifft()
map2_filt = Map(outputa2.real.astype('float'), map_2.origin, map_2.apix, map_2.filename, map_2.header[:])
except:
map2_filt = map_2.copy()
map2_filt.fullMap = real(ifftn(ifftshift(ft2.fullMap)))
try:
# to check frequency plots
#print lfreq
#print ft1_avg
#print ft2_avg
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import pylab
try: plt.style.use('ggplot')
except AttributeError: pass
plt.rcParams.update({'font.size': 18})
plt.rcParams.update({'legend.fontsize': 18})
plt.plot(lfreq,ft1_avg,'r--',label='map1')
plt.plot(lfreq,ft2_avg,'bs',label='map2')
plt.plot(lfreq,ft1_avg_new,'g^',label='scaled')
#plt.show()
leg = plt.legend(loc='upper right')
for legobj in leg.legendHandles:
legobj.set_linewidth(2.0)
pylab.savefig("spectra.png")
plt.close()
except: pass
return map1_filt.fullMap,map2_filt.fullMap
# FSC for two maps
def _fsc(self,map_1,map_2,shellmin,shellmax,step=0.005,c1=0,c2=0,reso=None):
# fourier transform: use pyfftw if available
pyfftw_flag = 1
try:
import pyfftw
except ImportError: pyfftw_flag = 0
try:
if pyfftw_flag == 0: raise ImportError
inputa1 = pyfftw.n_byte_align_empty(map_1.fullMap.shape, 16, 'complex128')
outputa1 = pyfftw.n_byte_align_empty(map_1.fullMap.shape, 16, 'complex128')
# fft planning, set planning_timelimit or flags to make it faster
fft = pyfftw.FFTW(inputa1,outputa1,direction='FFTW_FORWARD',axes=(0,1,2),flags=['FFTW_ESTIMATE'])#planning_timelimit=0.5)
inputa1[:,:,:] = map_1.fullMap[:,:,:]
fft()
ft1 = Map(fftshift(outputa1), map_1.origin, map_1.apix, map_1.filename, map_1.header[:])
except:
# use numpy fft instead
ft1 = map_1.fourier_transform()
try:
if pyfftw_flag == 0: raise ImportError
inputa2 = pyfftw.n_byte_align_empty(map_2.fullMap.shape, 16, 'complex128')
outputa2 = pyfftw.n_byte_align_empty(map_2.fullMap.shape, 16, 'complex128')
fft = pyfftw.FFTW(inputa2,outputa2,direction='FFTW_FORWARD',axes=(0,1,2),flags=['FFTW_ESTIMATE'])#planning_timelimit=0.5)
inputa2[:,:,:] = map_2.fullMap[:,:,:]
fft()
ft2 = Map(fftshift(outputa2), map_2.origin, map_2.apix, map_2.filename, map_2.header[:])
except:
ft2 = map_2.fourier_transform()
#low pass filter before scaling
if reso != None:
cutoff1 = map_1.apix/float(reso)
cutoff2 = map_2.apix/float(reso)
# max dimension
size1 = max(map_1.x_size(),map_1.y_size(),map_1.z_size())
#shell values correspond to freq: 0-0.5 (nyquist)
#and convert to abs frequencies
dist1 = map_1._make_fourier_shell(1)/map_1.apix
size2 = max(map_2.x_size(),map_2.y_size(),map_2.z_size())
#SCALING
# storing for plots
lfreq = []
# select max spatial frequency to iterate to. low resolution map
maxlevel = 0.5/max(map_1.apix,map_2.apix)
# loop over freq shells, shellwidth=0.005
#for x in arange(0,maxlevel+step,step):
nc = 0
x = 0.0
listC = []
highlevel = x+step
while (x<maxlevel):
#print x,highlevel, maxlevel
# indices between upper and lower shell bound
C1 = 0.0
C2 = 0.0
C3 = 0.0
fshells = argwhere((dist1 < min(maxlevel,highlevel)) & (dist1 >= x))
# shell values
shellvec1 = ft1.fullMap[transpose(fshells)]
# shell values
shellvec2 = ft2.fullMap[transpose(fshells)]
#if len(shellvec1) == 0 or len(shellvec2) == 0: continue
abs1 = abs(shellvec1)
abs2 = abs(shellvec2)
#print nonzero(abs1)
#print nonzero(abs2)
ns1 = len(nonzero(abs1)[0]) #or count_nonzero
ns2 = len(nonzero(abs2)[0]) #or count_nonzero
if ns1 < 10 or ns2 < 10:
nc += 1
highlevel = min(maxlevel,x+(nc+1)*step)
x = max(0.0,x-nc*step)
#print ns1, ns2
continue
else: nc = 0
for v in fshells:
if v[2] > 0 or (v[0] >= 0 and (v[1] >= 0 or v[0] != 0)):
C1 += ft1.fullMap[v[0]][v[1]][v[2]]*conjugate(ft2.fullMap[v[0]][v[1]][v[2]])
C2 += ft1.fullMap[v[0]][v[1]][v[2]]*conjugate(ft1.fullMap[v[0]][v[1]][v[2]])
C3 += ft2.fullMap[v[0]][v[1]][v[2]]*conjugate(ft2.fullMap[v[0]][v[1]][v[2]])
listC.append(abs(C1)/sqrt(abs(C2)*abs(C3)))
print(abs(C1)/sqrt(abs(C2)*abs(C3)), (x+highlevel)/2.)
lfreq.append(highlevel)
sampling_frq = highlevel
cutoff_freq = min((1.0/reso) + 0.25,maxlevel) # 0.1 added to reso based cutoff
#print 'freq cutoff', (1.0/reso), sampling_frq/map_1.apix
# scale the rest and break after relevant frequencies
if sampling_frq > cutoff_freq:
fshells = argwhere(dist1 >= highlevel)
for v in fshells:
if v[2] > 0 or (v[0] >= 0 and (v[1] >= 0 or v[0] != 0)):
C1 += ft1.fullMap[v[0]][v[1]][v[2]]*conjugate(ft2.fullMap[v[0]][v[1]][v[2]])
C2 += ft1.fullMap[v[0]][v[1]][v[2]]*conjugate(ft1.fullMap[v[0]][v[1]][v[2]])
C3 += ft2.fullMap[v[0]][v[1]][v[2]]*conjugate(ft2.fullMap[v[0]][v[1]][v[2]])
listC.append(abs(C1)/sqrt(abs(C2)*abs(C3)))
print(abs(C1)/sqrt(abs(C2)*abs(C3)), (x+highlevel)/2.)
lfreq.append((highlevel+step/2))
break
x = highlevel
highlevel = x+step
# to check frequency plots
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import pylab
fig = plt.plot(lfreq,listC,'g^')
plt.show()
pylab.savefig("test.png")
plt.close()
return
#Guess not requited here. Check and remove it.
def get_clash_map(self,emmap, apix):
template_grid = emmap._make_clash_map(apix)
return template_grid
def get_sm_score(self, struct, ncomp, template_grid, cvol, apix):
overlay_maplist = []
overlay_maplist = self.get_overlay_comp_maplist(struct,template_grid)
nc = list(range(ncomp))
cpair = list(itertools.combinations(nc,2))
score = 0.0
n_overlap_voxel = 0
overlap_volume = 0.0
for i in cpair:
n_overlap_voxel = (overlay_maplist[i[0]].fullMap * overlay_maplist[i[1]].fullMap).sum()
#overlap_volume = ((n_overlap_voxel*2)*apix)**3
overlap_volume = ((apix**3)*n_overlap_voxel) * 2
clash_percent = (float(overlap_volume / (cvol[i[0]]+cvol[i[1]])))
score = score + clash_percent
return -(score)
def get_overlay_comp_maplist(self, struct,template_grid):
blurrer = StructureBlurrer()
overlay_maplist = []
#ssplit = struct.structList
ssplit = struct.split_into_chains()
#split_into_chains()
for x in ssplit:
#print 'Chain:'
#CHANGE HERE FOR SHAPE SCORE BASED ON OVERLAP SCORE OR GRID SCORE
overlay_maplist.append(blurrer.make_atom_overlay_map1(template_grid, x))
#print 'chain ids from overlay_maplist ', x
#overlay_maplist.append(blurrer.get_shapeGrid(template_grid, x))
#print 'Done overlay_comp_maplist'
#exit(0)
return overlay_maplist
| OniDaito/ChimeraXTempy | TEMPy/ScoringFunctions.py | Python | mit | 101,337 | [
"Gaussian"
] | 85be23ff175e247d4ee5398b5c7311d6da244fafa1b646c11aa4b20222e74150 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
Common test support for pymatgen test scripts.
This single module should provide all the common functionality for pymatgen
tests in a single location, so that test scripts can just import it and work
right away.
"""
import unittest
import tempfile
import numpy.testing.utils as nptu
from six.moves import zip
from io import open
import os
import json
from monty.json import MontyDecoder
from monty.serialization import loadfn
from monty.json import MSONable
from monty.dev import requires
from pymatgen import SETTINGS, MPRester
class PymatgenTest(unittest.TestCase):
"""
Extends unittest.TestCase with functions (taken from numpy.testing.utils)
that support the comparison of arrays.
"""
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
STRUCTURES_DIR = os.path.join(MODULE_DIR, "structures")
"""
Dict for test structures to aid testing.
"""
TEST_STRUCTURES = {}
for fn in os.listdir(STRUCTURES_DIR):
TEST_STRUCTURES[fn.rsplit(".", 1)[0]] = loadfn(os.path.join(
STRUCTURES_DIR, fn), cls=MontyDecoder)
@classmethod
def get_structure(cls, name):
return cls.TEST_STRUCTURES[name].copy()
@classmethod
@requires(SETTINGS.get("PMG_MAPI_KEY"), "PMG_MAPI_KEY needs to be set.")
def get_mp_structure(cls, mpid):
m = MPRester()
return m.get_structure_by_material_id(mpid)
@staticmethod
def assert_almost_equal(actual, desired, decimal=7, err_msg='',
verbose=True):
"""
Alternative naming for assertArrayAlmostEqual.
"""
return PymatgenTest.assertArrayAlmostEqual(
actual, desired, decimal, err_msg, verbose)
@staticmethod
def assert_equal(actual, desired, err_msg='', verbose=True):
"""
Alternative naming for assertArrayEqual.
"""
return PymatgenTest.assertArrayEqual(actual, desired,
err_msg=err_msg, verbose=verbose)
@staticmethod
def assertArrayAlmostEqual(actual, desired, decimal=7, err_msg='',
verbose=True):
"""
Tests if two arrays are almost equal to a tolerance. The CamelCase
naming is so that it is consistent with standard unittest methods.
"""
return nptu.assert_almost_equal(actual, desired, decimal, err_msg,
verbose)
@staticmethod
def assertArrayEqual(actual, desired, err_msg='', verbose=True):
"""
Tests if two arrays are equal. The CamelCase naming is so that it is
consistent with standard unittest methods.
"""
return nptu.assert_equal(actual, desired, err_msg=err_msg,
verbose=verbose)
def serialize_with_pickle(self, objects, protocols=None, test_eq=True):
"""
Test whether the object(s) can be serialized and deserialized with pickle.
This method tries to serialize the objects with pickle and the protocols
specified in input. Then it deserializes the pickle format and compares
the two objects with the __eq__ operator if test_eq == True.
Args:
objects: Object or list of objects.
protocols: List of pickle protocols to test. If protocols is None, HIGHEST_PROTOCOL is tested.
Returns:
Nested list with the objects deserialized with the specified protocols.
"""
# Use the python version so that we get the traceback in case of errors
import pickle as pickle
from pymatgen.serializers.pickle_coders import pmg_pickle_load, pmg_pickle_dump
# Build a list even when we receive a single object.
got_single_object = False
if not isinstance(objects, (list, tuple)):
got_single_object = True
objects = [objects]
if protocols is None:
#protocols = set([0, 1, 2] + [pickle.HIGHEST_PROTOCOL])
protocols = [pickle.HIGHEST_PROTOCOL]
# This list will contains the object deserialized with the different protocols.
objects_by_protocol, errors = [], []
for protocol in protocols:
# Serialize and deserialize the object.
mode = "wb"
fd, tmpfile = tempfile.mkstemp(text="b" not in mode)
try:
with open(tmpfile, mode) as fh:
#pickle.dump(objects, fh, protocol=protocol)
pmg_pickle_dump(objects, fh, protocol=protocol)
except Exception as exc:
errors.append("pickle.dump with protocol %s raised:\n%s" % (protocol, str(exc)))
continue
try:
with open(tmpfile, "rb") as fh:
#new_objects = pickle.load(fh)
new_objects = pmg_pickle_load(fh)
except Exception as exc:
errors.append("pickle.load with protocol %s raised:\n%s" % (protocol, str(exc)))
continue
# Test for equality
if test_eq:
for old_obj, new_obj in zip(objects, new_objects):
#print("old_obj:", type(old_obj))
#print(old_obj)
#print("new_obj:", type(new_obj))
#print(new_obj)
self.assertEqual(old_obj, new_obj)
# Save the deserialized objects and test for equality.
objects_by_protocol.append(new_objects)
if errors:
raise ValueError("\n".join(errors))
# Return nested list so that client code can perform additional tests.
if got_single_object:
return [o[0] for o in objects_by_protocol]
else:
return objects_by_protocol
def tmpfile_write(self, string):
"""
Write string to a temporary file. Returns the name of the temporary file.
"""
fd, tmpfile = tempfile.mkstemp(text=True)
with open(tmpfile, "w") as fh:
fh.write(string)
return tmpfile
def assertMSONable(self, obj, test_if_subclass=True):
"""
Tests if obj is MSONable and tries to verify whether the contract is fullfilled.
By default, the method tests whether obj is an instance of MSONable.
This check can be deactivated by setting test_if_subclass to False.
"""
if test_if_subclass:
self.assertIsInstance(obj, MSONable)
self.assertDictEqual(obj.as_dict(), obj.__class__.from_dict(obj.as_dict()).as_dict())
json.loads(obj.to_json(), cls=MontyDecoder)
| aykol/pymatgen | pymatgen/util/testing.py | Python | mit | 6,802 | [
"pymatgen"
] | 4c3064b9316d6452e9d966211a743ccf0e2b7990e06485b740f85385afba8427 |
#! /usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-jobs-select-output-search
# Author : Vladimir Romanovsky
########################################################################
"""
Retrieve output sandbox for DIRAC Jobs for the given selection and search for a string in their std.out
"""
__RCSID__ = "$Id$"
import os
from shutil import rmtree
import DIRAC
from DIRAC.Core.Base import Script
Script.registerSwitch( "", "Status=", "Primary status" )
Script.registerSwitch( "", "MinorStatus=", "Secondary status" )
Script.registerSwitch( "", "ApplicationStatus=", "Application status" )
Script.registerSwitch( "", "Site=", "Execution site" )
Script.registerSwitch( "", "Owner=", "Owner (DIRAC nickname)" )
Script.registerSwitch( "", "JobGroup=", "Select jobs for specified job group" )
Script.registerSwitch( "", "Date=", "Date in YYYY-MM-DD format, if not specified default is today" )
Script.registerSwitch( "", "File=", "File name,if not specified default is std.out " )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... String ...' % Script.scriptName,
'Arguments:',
' String: string to search for' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
#Default values
status = None
minorStatus = None
appStatus = None
site = None
owner = None
jobGroup = None
date = None
filename = 'std.out'
if len( args ) != 1:
Script.showHelp()
searchstring = str( args[0] )
for switch in Script.getUnprocessedSwitches():
if switch[0].lower() == "status":
status = switch[1]
elif switch[0].lower() == "minorstatus":
minorStatus = switch[1]
elif switch[0].lower() == "applicationstatus":
appStatus = switch[1]
elif switch[0].lower() == "site":
site = switch[1]
elif switch[0].lower() == "owner":
owner = switch[1]
elif switch[0].lower() == "jobgroup":
jobGroup = switch[1]
elif switch[0].lower() == "date":
date = switch[1]
elif switch[0].lower() == "file":
filename = switch[1]
selDate = date
if not date:
selDate = 'Today'
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
resultDict = {}
result = dirac.selectJobs( status = status, minorStatus = minorStatus, applicationStatus = appStatus,
site = site, owner = owner, jobGroup = jobGroup, date = date )
if result['OK']:
jobs = result['Value']
else:
print "Error in selectJob", result['Message']
DIRAC.exit( 2 )
for job in jobs:
result = dirac.getOutputSandbox( job )
if result['OK']:
if os.path.exists( '%s' % job ):
lines = []
try:
lines = open( os.path.join( job, filename ) ).readlines()
except Exception, x:
errorList.append( ( job, x ) )
for line in lines:
if line.count( searchstring ):
resultDict[job] = line
rmtree( "%s" % ( job ) )
else:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for result in resultDict.iteritems():
print result
DIRAC.exit( exitCode )
| Sbalbp/DIRAC | Interfaces/scripts/dirac-wms-jobs-select-output-search.py | Python | gpl-3.0 | 3,266 | [
"DIRAC"
] | b6401efe9830c4ddd39263ce8c12d600a807dc03395e88d27491666dd9fb6b19 |
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Jason Swails
# Contributors:
#
# This code for reading Amber restart and inpcrd files was taken from ParmEd,
# which is released under the GNU Lesser General Public License
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
This module provides the ability to read Amber inpcrd/restart files as well as
Amber NetCDF restart files. This code was taken from ParmEd and simplified by
removing the functionality that is not needed.
"""
from __future__ import print_function, division
from distutils.version import StrictVersion
from math import ceil
import os
import warnings
import numpy as np
from mdtraj import version
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils import ensure_type, import_, in_units_of, cast_indices, six
__all__ = ['AmberRestartFile', 'load_restrt', 'AmberNetCDFRestartFile',
'load_ncrestrt']
range = six.moves.range
@FormatRegistry.register_loader('.rst7')
@FormatRegistry.register_loader('.restrt')
@FormatRegistry.register_loader('.inpcrd')
def load_restrt(filename, top=None, atom_indices=None):
"""Load an AMBER ASCII restart/inpcrd file. Since this file doesn't contain
information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.rst7')
@FormatRegistry.register_fileobject('.restrt')
@FormatRegistry.register_fileobject('.inpcrd')
class AmberRestartFile(object):
"""Interface for reading and writing AMBER ASCII restart files. This is a
file-like object, that supports both reading and writing depending on the
`mode` flag. It implements the context manager protocol, so you can also
use it with the python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
See Also
--------
md.AmberNetCDFRestartFile : Low level interface to AMBER NetCDF-format restart files
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=True):
self._closed = True
self._mode = mode
self._filename = filename
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
if mode == 'w':
self._needs_initialization = True
self._handle = open(filename, mode)
self._closed = False
elif mode == 'r':
with open(filename, mode) as f:
f.readline()
words = f.readline().split()
try:
self._n_atoms = int(words[0])
except (IndexError, ValueError):
raise TypeError('"%s" is not a recognized Amber restart' %
filename)
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._n_atoms
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def _parse(self, lines):
""" Parses the file """
self._time = None
try:
words = lines[1].split()
self._n_atoms = natom = int(words[0])
except (IndexError, ValueError):
raise TypeError('not a recognized Amber restart')
time = None
if len(words) >= 2:
time = float(words[1])
lines_per_frame = int(ceil(natom / 2))
if len(lines) == lines_per_frame + 2:
hasbox = hasvels = False
elif natom in (1, 2) and len(lines) == 4:
# This is the _only_ case where line counting does not work -- there
# is either 1 or 2 atoms and there are 4 lines. The 1st 3 lines are
# the title, natom/time, and coordinates. The 4th are almost always
# velocities since it's hard to have a periodic system this small.
# However, velocities (which are scaled down by 20.445) have a ~0%
# chance of being 60+, so we can pretty easily tell if the last line
# has box dimensions and angles or velocities. I cannot envision a
# plausible scenario where the detection here will ever fail
line = lines[3]
if natom == 1:
tmp = [line[i:i+12] for i in range(0, 72, 12) if
line[i:i+12].strip()]
if len(tmp) == 3:
hasvels = True
hasbox = False
elif len(tmp) == 6:
hasbox = True
hasvels = False
else:
raise TypeError('not a recognized Amber restart')
else:
# Ambiguous case
tmp = [float(line[i:i+12]) >= 60.0 for i in range(0, 72, 12)]
if any(tmp):
hasbox = True
hasvels = False
else:
hasvels = True
hasbox = False
elif len(lines) == lines_per_frame + 3:
hasbox = True
hasvels = False
elif len(lines) == 2*lines_per_frame + 2:
hasbox = False
hasvels = True
elif len(lines) == 2*lines_per_frame + 3:
hasbox = hasvels = True
else:
raise TypeError('Badly formatted restart file. Has %d lines for '
'%d atoms' % (len(lines), natom))
coordinates = np.zeros((1, natom, 3))
if time is None:
time = np.zeros(1)
else:
time = np.asarray((time,))
# Fill the coordinates
for i in range(lines_per_frame):
line = lines[i+2] # Skip first two lines
i2 = i * 2
coordinates[0,i2,:] = [float(line[j:j+12]) for j in range(0,36,12)]
i2 += 1
if i2 < natom:
coordinates[0,i2,:] = [float(line[j:j+12]) for j in
range(36,72,12)]
if hasbox:
cell_lengths = np.zeros((1,3))
cell_angles = np.zeros((1,3))
line = lines[-1]
cell_lengths[0,:] = [float(line[i:i+12]) for i in range(0,36,12)]
cell_angles[0,:] = [float(line[i:i+12]) for i in range(36,72,12)]
else:
cell_lengths = cell_angles = None
return coordinates, time, cell_lengths, cell_angles
def read_as_traj(self, topology, atom_indices=None):
"""Read an AMBER ASCII restart file as a trajectory.
Parameters
----------
topology : Topology
The system topology
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object with 1 frame created from the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
xyz, time, cell_lengths, cell_angles = self.read(atom_indices=atom_indices)
xyz = in_units_of(xyz, self.distance_unit, Trajectory._distance_unit,
inplace=True)
cell_lengths = in_units_of(cell_lengths, self.distance_unit,
Trajectory._distance_unit, inplace=True)
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
def read(self, atom_indices=None):
"""Read data from an AMBER ASCII restart file
Parameters
----------
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(1, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms. These
files only ever contain 1 frame
time : np.ndarray, None
The time corresponding to the frame, in units of picoseconds, or
None if no time information is present
cell_lengths : np.ndarray, None
The lengths (a, b, c) of the unit cell for the frame in angstroms,
or None if the information is not present in the file
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for each
frame, or None if the information is not present in the file.
"""
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not '
'allowed.' % self._mode)
with open(self._filename, 'r') as f:
lines = f.readlines()
coordinates, time, cell_lengths, cell_angles = self._parse(lines)
if atom_indices is not None:
atom_slice = ensure_type(atom_indices, dtype=int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice) >= 0:
raise ValueError('Entries in atom_slice must be >= 0')
coordinates = coordinates[:, atom_slice, :]
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None,
cell_angles=None):
"""Write one frame of a MD trajectory to disk in the AMBER ASCII restart
file format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=([1,] n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms. Must
be only a single frame (shape can be (1,N,3) or (N,3) where N is
the number of atoms)
time : array-like with 1 element or float, optional
The time corresponding to this frame. If not specified, a place
holder of 0 will be written
cell_lengths : np.ndarray, dtype=np.double, shape=([1,] 3)
The lengths (a,b,c) of the unit cell for the frame in Angstroms
cell_angles : np.ndarray, dtype=np.double, shape=([1,] 3)
The angles between the unit cell vectors for the frame in Degrees
"""
if self._mode != 'w':
raise IOError('The file was opened in mode=%s. Writing not allowed.'
% self._mode)
if not self._needs_initialization:
# Must have already been written -- can only write once
raise RuntimeError('restart file has already been written -- can '
'only write one frame to restart files.')
# These are no-ops.
# coordinates = in_units_of(coordinates, None, 'angstroms')
# time = in_units_of(time, None, 'picoseconds')
# cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
# cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates',
length=None, can_be_none=False,
shape=(1,None,3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, self._n_atoms = coordinates.shape[0], coordinates.shape[1]
if n_frames != 1:
raise ValueError('Can only write 1 frame to a restart file!')
if time is not None:
try:
time = float(time)
except TypeError:
raise TypeError('Can only provide a single time')
else:
time = 0.0
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if ((cell_lengths is None and cell_angles is not None) or
(cell_lengths is not None and cell_angles is None)):
prov, negl = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
prov, negl = negl, prov
raise ValueError('You provided the variable "%s" but did not '
'provide "%s". Either provide both or neither -- '
'one without the other is meaningless.' %
(prov, negl))
self._handle.write('Amber restart file (without velocities) written by '
'MDTraj\n')
self._handle.write('%5d%15.7e\n' % (self._n_atoms, time))
fmt = '%12.7f%12.7f%12.7f'
for i in range(self._n_atoms):
acor = coordinates[0, i, :]
self._handle.write(fmt % (acor[0], acor[1], acor[2]))
if i % 2 == 1: self._handle.write('\n')
if self._n_atoms % 2 == 1: self._handle.write('\n')
if cell_lengths is not None:
self._handle.write(fmt % (cell_lengths[0,0], cell_lengths[0,1],
cell_lengths[0,2]))
self._handle.write(fmt % (cell_angles[0,0], cell_angles[0,1],
cell_angles[0,2]) + '\n')
self._handle.flush()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __del__(self):
self.close()
def __len__(self):
return 1 # All restarts have only 1 frame
@FormatRegistry.register_loader('.ncrst')
def load_ncrestrt(filename, top=None, atom_indices=None):
"""Load an AMBER NetCDF restart/inpcrd file. Since this file doesn't
contain information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberNetCDFRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.ncrst')
class AmberNetCDFRestartFile(object):
"""Interface for reading and writing AMBER NetCDF files. This is a file-like
object, that supports both reading and writing depending on the `mode` flag.
It implements the context manager protocol, so you can also use it with the
python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=False):
self._closed = True
self._mode = mode
if StrictVersion(import_('scipy.version').short_version) < StrictVersion('0.12.0'):
raise ImportError('MDTraj NetCDF support requires scipy>=0.12.0. '
'You have %s' % import_('scipy.version').short_version)
netcdf = import_('scipy.io').netcdf_file
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
# AMBER uses the NetCDF3 format, with 64 bit encodings, which for
# scipy.io.netcdf_file is "version=2"
self._handle = netcdf(filename, mode=mode, version=2)
self._closed = False
if mode == 'w':
self._needs_initialization = True
elif mode == 'r':
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._handle.dimensions['atom']
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def read_as_traj(self, topology, atom_indices=None):
"""Read an AMBER ASCII restart file as a trajectory.
Parameters
----------
topology : Topology
The system topology
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object with 1 frame created from the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
xyz, time, cell_lengths, cell_angles = self.read(atom_indices=atom_indices)
xyz = in_units_of(xyz, self.distance_unit, Trajectory._distance_unit,
inplace=True)
cell_lengths = in_units_of(cell_lengths, self.distance_unit,
Trajectory._distance_unit, inplace=True)
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
def read(self, atom_indices=None):
"""Read data from an AMBER NetCDF restart file
Parameters
----------
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(1, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms. These
files only ever contain 1 frame
time : np.ndarray, None
The time corresponding to the frame, in units of picoseconds, or
None if no time information is present
cell_lengths : np.ndarray, None
The lengths (a, b, c) of the unit cell for the frame in angstroms,
or None if the information is not present in the file
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for each
frame, or None if the information is not present in the file.
Notes
-----
If the file is not a NetCDF file with the appropriate convention, a
TypeError is raised. If variables that are needed do not exist or if
illegal values are passed in for parameters, ValueError is raised. If
I/O errors occur, IOError is raised.
"""
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not '
'allowed.' % self._mode)
if self._closed:
raise IOError("The file has been closed!")
if 'coordinates' not in self._handle.variables:
raise ValueError('No coordinates found in the NetCDF file.')
# Check that conventions are correct
try:
conventions = self._handle.Conventions.decode('ascii')
except UnicodeDecodeError:
raise TypeError('NetCDF file does not have correct Conventions')
try:
convention_version = self._handle.ConventionVersion.decode('ascii')
except UnicodeDecodeError:
raise ValueError('NetCDF file does not have correct ConventionVersion')
except AttributeError:
raise TypeError('NetCDF file does not have ConventionVersion')
if (not hasattr(self._handle, 'Conventions') or
conventions != 'AMBERRESTART'):
raise TypeError('NetCDF file does not have correct Conventions')
if convention_version != '1.0':
raise ValueError('NetCDF restart has ConventionVersion %s. Only '
'Version 1.0 is supported.' % convention_version)
if atom_indices is not None:
atom_slice = ensure_type(atom_indices, dtype=int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice) >= 0:
raise ValueError('Entries in atom_slice must be >= 0')
coordinates = self._handle.variables['coordinates'][atom_slice, :]
else:
coordinates = self._handle.variables['coordinates'][:, :]
# Get unit cell parameters
if 'cell_lengths' in self._handle.variables:
cell_lengths = self._handle.variables['cell_lengths'][:]
else:
cell_lengths = None
if 'cell_angles' in self._handle.variables:
cell_angles = self._handle.variables['cell_angles'][:]
else:
cell_angles = None
if cell_lengths is None and cell_angles is not None:
warnings.warn('cell_lengths were found, but no cell_angles')
if cell_lengths is not None and cell_angles is None:
warnings.warn('cell_angles were found, but no cell_lengths')
if 'time' in self._handle.variables:
time = self._handle.variables['time'].getValue()
else:
time = None
# scipy.io.netcdf variables are mem-mapped, and are only backed by valid
# memory while the file handle is open. This is _bad_ because we need to
# support the user opening the file, reading the coordinates, and then
# closing it, and still having the coordinates be a valid memory
# segment.
# https://github.com/mdtraj/mdtraj/issues/440
if coordinates is not None and not coordinates.flags['WRITEABLE']:
coordinates = np.array(coordinates, copy=True)
if cell_lengths is not None and not cell_lengths.flags['WRITEABLE']:
cell_lengths = np.array(cell_lengths, copy=True)
if cell_angles is not None and not cell_angles.flags['WRITEABLE']:
cell_angles = np.array(cell_angles, copy=True)
# The leading frame dimension is missing on all of these arrays since
# restart files have only one frame. Reshape them to add this extra
# dimension
coordinates = coordinates[np.newaxis,:]
if cell_lengths is not None:
cell_lengths = cell_lengths[np.newaxis,:]
if cell_angles is not None:
cell_angles = cell_angles[np.newaxis,:]
if time is not None:
time = np.asarray([time,])
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None,
cell_angles=None):
"""Write one frame of a MD trajectory to disk in the AMBER NetCDF
restart file format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=([1,] n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms. Must
be only a single frame (shape can be (1,N,3) or (N,3) where N is
the number of atoms)
time : array-like with 1 element or float, optional
The time corresponding to this frame. If not specified, a place
holder of 0 will be written
cell_lengths : np.ndarray, dtype=np.double, shape=([1,] 3)
The lengths (a,b,c) of the unit cell for the frame in Angstroms
cell_angles : np.ndarray, dtype=np.double, shape=([1,] 3)
The angles between the unit cell vectors for the frame in Degrees
Notes
-----
You must only have one frame to write to this file.
"""
if self._mode != 'w':
raise IOError('The file was opened in mode=%s. Writing not allowed.'
% self._mode)
if not self._needs_initialization:
# Must have already been written -- can only write once
raise RuntimeError('NetCDF restart file has already been written '
'-- can only write one frame to restart files.')
# these are no-ops
# coordinates = in_units_of(coordinates, None, 'angstroms')
# time = in_units_of(time, None, 'picoseconds')
# cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
# cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates',
length=None, can_be_none=False,
shape=(1,None,3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, n_atoms = coordinates.shape[0], coordinates.shape[1]
if n_frames != 1:
raise ValueError('Can only write 1 frame to a restart file!')
if time is not None:
try:
time = float(time)
except TypeError:
raise TypeError('Can only provide a single time')
else:
time = 0.0
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if ((cell_lengths is None and cell_angles is not None) or
(cell_lengths is not None and cell_angles is None)):
prov, negl = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
prov, negl = negl, prov
raise ValueError('You provided the variable "%s" but did not '
'provide "%s". Either provide both or neither -- '
'one without the other is meaningless.' %
(prov, negl))
self._initialize_headers(n_atoms=n_atoms,
set_coordinates=True,
set_time=(time is not None),
set_cell=(cell_lengths is not None))
self._needs_initialization = False
# Write the time, coordinates, and box info
if time is not None:
self._handle.variables['time'][0] = float(time)
self._handle.variables['coordinates'][:,:] = coordinates[0,:,:]
if cell_lengths is not None:
self._handle.variables['cell_angles'][:] = cell_angles[0,:]
self._handle.variables['cell_lengths'][:] = cell_lengths[0,:]
self.flush()
def _initialize_headers(self, n_atoms, set_coordinates, set_time, set_cell):
"""Initialize the headers and convention properties of the NetCDF
restart file
"""
ncfile = self._handle
ncfile.Conventions = 'AMBERRESTART'
ncfile.ConventionVersion = "1.0"
ncfile.title = 'NetCDF Restart file written by MDTraj w/out velocities'
ncfile.application = 'Omnia'
ncfile.program = 'MDTraj'
ncfile.programVersion = version.short_version
# Dimensions
ncfile.createDimension('spatial', 3)
ncfile.createDimension('atom', n_atoms)
if set_cell:
ncfile.createDimension('cell_spatial', 3)
ncfile.createDimension('label', 5)
ncfile.createDimension('cell_angular', 3)
if set_time:
ncfile.createDimension('time', 1)
# Variables
v = ncfile.createVariable('spatial', 'c', ('spatial',))
v[:] = np.asarray(list('xyz'))
v = ncfile.createVariable('coordinates', 'd', ('atom', 'spatial'))
v.units = 'angstrom'
if set_cell:
v = ncfile.createVariable('cell_angular', 'c',
('cell_angular', 'label'))
v[0] = np.asarray(list('alpha'))
v[1] = np.asarray(list('beta '))
v[2] = np.asarray(list('gamma'))
v = ncfile.createVariable('cell_spatial', 'c', ('cell_spatial',))
v[:] = np.asarray(list('abc'))
v = ncfile.createVariable('cell_lengths', 'd', ('cell_spatial',))
v.units = 'angstrom'
v = ncfile.createVariable('cell_angles', 'd', ('cell_angular',))
v.units = 'degree'
if set_time:
v = ncfile.createVariable('time', 'd', ('time',))
v.units = 'picosecond'
self.flush()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __del__(self):
self.close()
def __len__(self):
return 1 # All restarts have only 1 frame
def flush(self):
self._validate_open()
if self._mode != 'w':
raise IOError('Cannot flush a file opened for reading')
self._handle.flush()
| dwhswenson/mdtraj | mdtraj/formats/amberrst.py | Python | lgpl-2.1 | 33,342 | [
"Amber",
"MDTraj",
"NetCDF"
] | 0af97b794bece1c3c41c7295ecb2bf4a20ea1d9b62ab983647bfa94e7e199e56 |
#!/usr/bin/env python
import argparse
from CPT_GFFParser import gffParse, gffWrite
from Bio import SeqIO
from gff3 import feature_lambda, feature_test_type
def main(fasta, gff3):
seq_dict = SeqIO.to_dict(SeqIO.parse(fasta, "fasta"))
codon_usage = {}
for rec in gffParse(gff3, base_dict=seq_dict):
for feat in feature_lambda(
rec.features, feature_test_type, {"type": "CDS"}, subfeatures=True
):
seq = str(feat.extract(rec).seq)[-3:]
try:
codon_usage[seq] += 1
except KeyError:
codon_usage[seq] = 1
names = {
"TAG": "Amber",
"TAA": "Ochre",
"TGA": "Opal",
}
# TODO: print all actg combinations? Or just ones that are there
print "# Name\tCodon\tCount"
for key in sorted(codon_usage):
print "\t".join((names.get(key.upper(), "None"), key, str(codon_usage[key])))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Summarise stop codon usage", epilog=""
)
parser.add_argument("fasta", type=argparse.FileType("r"), help="Fasta Genome")
parser.add_argument("gff3", type=argparse.FileType("r"), help="GFF3 File")
args = parser.parse_args()
main(**vars(args))
| TAMU-CPT/galaxy-tools | tools/gff3/stop_stats.py | Python | gpl-3.0 | 1,276 | [
"Amber"
] | ba5d39aeb0321a0a01611e2e1d6fc58b3dc5977544f50932da48a8627697b72f |
"""Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
pick random sample
generate random permutation
distributions on the real line:
------------------------------
uniform
triangular
normal (Gaussian)
lognormal
negative exponential
gamma
beta
pareto
Weibull
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
General notes on the underlying Mersenne Twister core generator:
* The period is 2**19937-1.
* It is one of the most extensively tested generators in existence.
* Without a direct way to compute N steps forward, the semantics of
jumpahead(n) are weakened to simply jump to another distant state and rely
on the large period to avoid overlapping sequences.
* The random() method is implemented in C, executes in a single Python step,
and is, therefore, threadsafe.
"""
from __future__ import division
from warnings import warn as _warn
from types import MethodType as _MethodType, BuiltinMethodType as _BuiltinMethodType
from math import log as _log, exp as _exp, pi as _pi, e as _e, ceil as _ceil
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from os import urandom as _urandom
from binascii import hexlify as _hexlify
__all__ = ["Random","seed","random","uniform","randint","choice","sample",
"randrange","shuffle","normalvariate","lognormvariate",
"expovariate","vonmisesvariate","gammavariate","triangular",
"gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead", "WichmannHill", "getrandbits",
"SystemRandom"]
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
TWOPI = 2.0*_pi
LOG4 = _log(4.0)
SG_MAGICCONST = 1.0 + _log(4.5)
BPF = 53 # Number of bits in a float
RECIP_BPF = 2**-BPF
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley. Adapted by Raymond Hettinger for use with
# the Mersenne Twister and os.urandom() core generators.
import _random
class Random(_random.Random):
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
Optionally, implement a getrandbits() method so that randrange() can cover
arbitrarily large ranges.
"""
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
super(Random, self).seed(a)
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, super(Random, self).getstate(), self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 3:
version, internalstate, self.gauss_next = state
super(Random, self).setstate(internalstate)
elif version == 2:
version, internalstate, self.gauss_next = state
# In version 2, the state was saved as signed ints, which causes
# inconsistencies between 32/64-bit systems. The state is
# really unsigned 32-bit ints, so we convert negative ints from
# version 2 to positive longs for version 3.
try:
internalstate = tuple( long(x) % (2**32) for x in internalstate )
except ValueError, e:
raise TypeError, e
super(Random, self).setstate(internalstate)
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
def __reduce__(self):
return self.__class__, (), self.getstate()
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None,
maxwidth=1L<<BPF):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
Do not supply the 'int', 'default', and 'maxwidth' arguments.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is default:
if istart > 0:
if istart >= maxwidth:
return self._randbelow(istart)
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
width = istop - istart
if step == 1 and width > 0:
# Note that
# int(istart + self.random()*width)
# instead would be incorrect. For example, consider istart
# = -2 and istop = 0. Then the guts would be in
# -2.0 to 0.0 exclusive on both ends (ignoring that random()
# might return 0.0), and because int() truncates toward 0, the
# final result would be -1 or 0 (instead of -2 or -1).
# istart + int(self.random()*width)
# would also be incorrect, for a subtler reason: the RHS
# can return a long, and then randrange() would also return
# a long, but we're supposed to return an int (for backward
# compatibility).
if width >= maxwidth:
return int(istart + self._randbelow(width))
return int(istart + int(self.random()*width))
if step == 1:
raise ValueError, "empty range for randrange() (%d,%d, %d)" % (istart, istop, width)
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
if n >= maxwidth:
return istart + istep*self._randbelow(n)
return istart + istep*int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
def _randbelow(self, n, _log=_log, int=int, _maxwidth=1L<<BPF,
_Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
"""Return a random int in the range [0,n)
Handles the case where n has more bits than returned
by a single call to the underlying generator.
"""
try:
getrandbits = self.getrandbits
except AttributeError:
pass
else:
# Only call self.getrandbits if the original random() builtin method
# has not been overridden or if a new getrandbits() was supplied.
# This assures that the two methods correspond.
if type(self.random) is _BuiltinMethod or type(getrandbits) is _Method:
k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = getrandbits(k)
while r >= n:
r = getrandbits(k)
return r
if n >= _maxwidth:
_warn("Underlying random() generator does not supply \n"
"enough bits to choose from a population range this large")
return int(self.random() * n)
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, x, random=None, int=int):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
"""
if random is None:
random = self.random
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(random() * (i+1))
x[i], x[j] = x[j], x[i]
def sample(self, population, k):
"""Chooses k unique random elements from a population sequence.
Returns a new list containing elements from the population while
leaving the original population unchanged. The resulting list is
in selection order so that all sub-slices will also be valid random
samples. This allows raffle winners (the sample) to be partitioned
into grand prize and second place winners (the subslices).
Members of the population need not be hashable or unique. If the
population contains repeats, then each occurrence is a possible
selection in the sample.
To choose a sample in a range of integers, use xrange as an argument.
This is especially fast and space efficient for sampling from a
large population: sample(xrange(10000000), 60)
"""
# XXX Although the documentation says `population` is "a sequence",
# XXX attempts are made to cater to any iterable with a __len__
# XXX method. This has had mixed success. Examples from both
# XXX sides: sets work fine, and should become officially supported;
# XXX dicts are much harder, and have failed in various subtle
# XXX ways across attempts. Support for mapping types should probably
# XXX be dropped (and users should pass mapping.keys() or .values()
# XXX explicitly).
# Sampling without replacement entails tracking either potential
# selections (the pool) in a list or previous selections in a set.
# When the number of selections is small compared to the
# population, then tracking selections is efficient, requiring
# only a small set and an occasional reselection. For
# a larger number of selections, the pool tracking method is
# preferred since the list takes less space than the
# set and it doesn't suffer from frequent reselections.
n = len(population)
if not 0 <= k <= n:
raise ValueError, "sample larger than population"
random = self.random
_int = int
result = [None] * k
setsize = 21 # size of a small set minus size of an empty list
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4)) # table size for big sets
if n <= setsize or hasattr(population, "keys"):
# An n-length list is smaller than a k-length set, or this is a
# mapping type so the other algorithm wouldn't work.
pool = list(population)
for i in xrange(k): # invariant: non-selected at [0,n-i)
j = _int(random() * (n-i))
result[i] = pool[j]
pool[j] = pool[n-i-1] # move non-selected item into vacancy
else:
try:
selected = set()
selected_add = selected.add
for i in xrange(k):
j = _int(random() * n)
while j in selected:
j = _int(random() * n)
selected_add(j)
result[i] = population[j]
except (TypeError, KeyError): # handle (at least) sets
if isinstance(population, list):
raise
return self.sample(tuple(population), k)
return result
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"Get a random number in the range [a, b) or [a, b] depending on rounding."
return a + (b-a) * self.random()
## -------------------- triangular --------------------
def triangular(self, low=0.0, high=1.0, mode=None):
"""Triangular distribution.
Continuous distribution bounded by given lower and upper limits,
and having a given mode value in-between.
http://en.wikipedia.org/wiki/Triangular_distribution
"""
u = self.random()
c = 0.5 if mode is None else (mode - low) / (high - low)
if u > c:
u = 1.0 - u
c = 1.0 - c
low, high = high, low
return low + (high - low) * (u * c) ** 0.5
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. It should be
nonzero. (The parameter would be called "lambda", but that is
a reserved word in Python.) Returned values range from 0 to
positive infinity if lambd is positive, and from negative
infinity to 0 if lambd is negative.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
random = self.random
u = random()
while u <= 1e-7:
u = random()
return -_log(u)/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
r = (1.0 + b * b)/(2.0 * b)
while 1:
u1 = random()
z = _cos(_pi * u1)
f = (1.0 + r * z)/(r + z)
c = kappa * (r - f)
u2 = random()
if u2 < c * (2.0 - c) or u2 <= c * _exp(1.0 - c):
break
u3 = random()
if u3 > 0.5:
theta = (mu % TWOPI) + _acos(f)
else:
theta = (mu % TWOPI) - _acos(f)
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = p ** (1.0/alpha)
else:
x = -_log((b-p)/alpha)
u1 = random()
if p > 1.0:
if u1 <= x ** (alpha - 1.0):
break
elif u1 <= _exp(-x):
break
return x * beta
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > 0 and beta > 0.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## -------------------- Wichmann-Hill -------------------
class WichmannHill(Random):
VERSION = 1 # used by getstate/setstate
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
If a is not None or an int or long, hash(a) is used instead.
If a is an int or long, a is used directly. Distinct values between
0 and 27814431486575L inclusive are guaranteed to yield distinct
internal states (this guarantee is specific to the default
Wichmann-Hill generator).
"""
if a is None:
try:
a = long(_hexlify(_urandom(16)), 16)
except NotImplementedError:
import time
a = long(time.time() * 256) # use fractional seconds
if not isinstance(a, (int, long)):
a = hash(a)
a, x = divmod(a, 30268)
a, y = divmod(a, 30306)
a, z = divmod(a, 30322)
self._seed = int(x)+1, int(y)+1, int(z)+1
self.gauss_next = None
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
# Wichman-Hill random number generator.
#
# Wichmann, B. A. & Hill, I. D. (1982)
# Algorithm AS 183:
# An efficient and portable pseudo-random number generator
# Applied Statistics 31 (1982) 188-190
#
# see also:
# Correction to Algorithm AS 183
# Applied Statistics 33 (1984) 123
#
# McLeod, A. I. (1985)
# A remark on Algorithm AS 183
# Applied Statistics 34 (1985),198-200
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
self._seed = x, y, z
# END CRITICAL SECTION
# Note: on a platform using IEEE-754 double arithmetic, this can
# never return 0.0 (asserted by Tim; proof too long for a comment).
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, self._seed, self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 1:
version, self._seed, self.gauss_next = state
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Act as if n calls to random() were made, but quickly.
n is an int, greater than or equal to 0.
Example use: If you have 2 threads and know that each will
consume no more than a million random numbers, create two Random
objects r1 and r2, then do
r2.setstate(r1.getstate())
r2.jumpahead(1000000)
Then r1 and r2 will use guaranteed-disjoint segments of the full
period.
"""
if not n >= 0:
raise ValueError("n must be >= 0")
x, y, z = self._seed
x = int(x * pow(171, n, 30269)) % 30269
y = int(y * pow(172, n, 30307)) % 30307
z = int(z * pow(170, n, 30323)) % 30323
self._seed = x, y, z
def __whseed(self, x=0, y=0, z=0):
"""Set the Wichmann-Hill seed from (x, y, z).
These must be integers in the range [0, 256).
"""
if not type(x) == type(y) == type(z) == int:
raise TypeError('seeds must be integers')
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError('seeds must be in range(0, 256)')
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
self.gauss_next = None
def whseed(self, a=None):
"""Seed from hashable object's hash code.
None or no argument seeds from current time. It is not guaranteed
that objects with distinct hash codes lead to distinct internal
states.
This is obsolete, provided for compatibility with the seed routine
used prior to Python 2.1. Use the .seed() method instead.
"""
if a is None:
self.__whseed()
return
a = hash(a)
a, x = divmod(a, 256)
a, y = divmod(a, 256)
a, z = divmod(a, 256)
x = (x + a) % 256 or 1
y = (y + a) % 256 or 1
z = (z + a) % 256 or 1
self.__whseed(x, y, z)
## --------------- Operating System Random Source ------------------
class SystemRandom(Random):
"""Alternate random number generator using sources provided
by the operating system (such as /dev/urandom on Unix or
CryptGenRandom on Windows).
Not available on all systems (see os.urandom() for details).
"""
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return (long(_hexlify(_urandom(7)), 16) >> 3) * RECIP_BPF
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
bytes = (k + 7) // 8 # bits / 8 and rounded up
x = long(_hexlify(_urandom(bytes)), 16)
return x >> (bytes * 8 - k) # trim excess bits
def _stub(self, *args, **kwds):
"Stub method. Not used for a system random number generator."
return None
seed = jumpahead = _stub
def _notimplemented(self, *args, **kwds):
"Method should not be called for a system random number generator."
raise NotImplementedError('System entropy source does not have state.')
getstate = setstate = _notimplemented
## -------------------- test program --------------------
def _test_generator(n, func, args):
import time
print n, 'times', func.__name__
total = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = func(*args)
total += x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = total/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=2000):
_test_generator(N, random, ())
_test_generator(N, normalvariate, (0.0, 1.0))
_test_generator(N, lognormvariate, (0.0, 1.0))
_test_generator(N, vonmisesvariate, (0.0, 1.0))
_test_generator(N, gammavariate, (0.01, 1.0))
_test_generator(N, gammavariate, (0.1, 1.0))
_test_generator(N, gammavariate, (0.1, 2.0))
_test_generator(N, gammavariate, (0.5, 1.0))
_test_generator(N, gammavariate, (0.9, 1.0))
_test_generator(N, gammavariate, (1.0, 1.0))
_test_generator(N, gammavariate, (2.0, 1.0))
_test_generator(N, gammavariate, (20.0, 1.0))
_test_generator(N, gammavariate, (200.0, 1.0))
_test_generator(N, gauss, (0.0, 1.0))
_test_generator(N, betavariate, (3.0, 3.0))
_test_generator(N, triangular, (0.0, 1.0, 1.0/3.0))
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions share state across all uses
#(both in the user's code and in the Python libraries), but that's fine
# for most programs and is easier for the casual user than making them
# instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
triangular = _inst.triangular
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
sample = _inst.sample
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
jumpahead = _inst.jumpahead
getrandbits = _inst.getrandbits
if __name__ == '__main__':
_test()
| babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/random.py | Python | mit | 32,864 | [
"Gaussian"
] | 75524c8ce97569161b832440d9b3cb309d4900ee8b96ab494cf0308c7b700cca |
"""HTML character entity references.
Backported for python-future from Python 3.3
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future import standard_library
from future.builtins import *
# maps the HTML entity name to the Unicode codepoint
name2codepoint = {
'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': 0x0391, # greek capital letter alpha, U+0391
'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': 0x0392, # greek capital letter beta, U+0392
'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': 0x03a7, # greek capital letter chi, U+03A7
'Dagger': 0x2021, # double dagger, U+2021 ISOpub
'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3
'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': 0x0395, # greek capital letter epsilon, U+0395
'Eta': 0x0397, # greek capital letter eta, U+0397
'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3
'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
'Iota': 0x0399, # greek capital letter iota, U+0399
'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': 0x039a, # greek capital letter kappa, U+039A
'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3
'Mu': 0x039c, # greek capital letter mu, U+039C
'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': 0x039d, # greek capital letter nu, U+039D
'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2
'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': 0x039f, # greek capital letter omicron, U+039F
'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech
'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
'Rho': 0x03a1, # greek capital letter rho, U+03A1
'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1
'Tau': 0x03a4, # greek capital letter tau, U+03A4
'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3
'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3
'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': 0x0396, # greek capital letter zeta, U+0396
'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
'amp': 0x0026, # ampersand, U+0026 ISOnum
'and': 0x2227, # logical and = wedge, U+2227 ISOtech
'ang': 0x2220, # angle, U+2220 ISOamso
'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW
'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3
'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub
'cap': 0x2229, # intersection = cap, U+2229 ISOtech
'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': 0x00a2, # cent sign, U+00A2 ISOnum
'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3
'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub
'cong': 0x2245, # approximately equal to, U+2245 ISOtech
'copy': 0x00a9, # copyright sign, U+00A9 ISOnum
'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': 0x222a, # union = cup, U+222A ISOtech
'curren': 0x00a4, # currency sign, U+00A4 ISOnum
'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa
'dagger': 0x2020, # dagger, U+2020 ISOpub
'darr': 0x2193, # downwards arrow, U+2193 ISOnum
'deg': 0x00b0, # degree sign, U+00B0 ISOnum
'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3
'diams': 0x2666, # black diamond suit, U+2666 ISOpub
'divide': 0x00f7, # division sign, U+00F7 ISOnum
'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso
'emsp': 0x2003, # em space, U+2003 ISOpub
'ensp': 0x2002, # en space, U+2002 ISOpub
'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': 0x2261, # identical to, U+2261 ISOtech
'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3
'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1
'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': 0x20ac, # euro sign, U+20AC NEW
'exist': 0x2203, # there exists, U+2203 ISOtech
'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
'forall': 0x2200, # for all, U+2200 ISOtech
'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': 0x2044, # fraction slash, U+2044 NEW
'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech
'gt': 0x003e, # greater-than sign, U+003E ISOnum
'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa
'harr': 0x2194, # left right arrow, U+2194 ISOamsa
'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub
'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1
'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum
'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1
'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': 0x221e, # infinity, U+221E ISOtech
'int': 0x222b, # integral, U+222B ISOtech
'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3
'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
'isin': 0x2208, # element of, U+2208 ISOtech
'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3
'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech
'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3
'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': 0x2190, # leftwards arrow, U+2190 ISOnum
'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum
'le': 0x2264, # less-than or equal to, U+2264 ISOtech
'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc
'lowast': 0x2217, # asterisk operator, U+2217 ISOtech
'loz': 0x25ca, # lozenge, U+25CA ISOpub
'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum
'lt': 0x003c, # less-than sign, U+003C ISOnum
'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': 0x2014, # em dash, U+2014 ISOpub
'micro': 0x00b5, # micro sign, U+00B5 ISOnum
'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': 0x2212, # minus sign, U+2212 ISOtech
'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3
'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech
'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': 0x2013, # en dash, U+2013 ISOpub
'ne': 0x2260, # not equal to, U+2260 ISOtech
'ni': 0x220b, # contains as member, U+220B ISOtech
'not': 0x00ac, # not sign, U+00AC ISOnum
'notin': 0x2209, # not an element of, U+2209 ISOtech
'nsub': 0x2284, # not a subset of, U+2284 ISOamsn
'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3
'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2
'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
'oline': 0x203e, # overline = spacing overscore, U+203E NEW
'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3
'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW
'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb
'or': 0x2228, # logical or = vee, U+2228 ISOtech
'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum
'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum
'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb
'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': 0x2202, # partial differential, U+2202 ISOtech
'permil': 0x2030, # per mille sign, U+2030 ISOtech
'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3
'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3
'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3
'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': 0x00a3, # pound sign, U+00A3 ISOnum
'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech
'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb
'prop': 0x221d, # proportional to, U+221D ISOtech
'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3
'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum
'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech
'radic': 0x221a, # square root = radical sign, U+221A ISOtech
'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum
'rceil': 0x2309, # right ceiling, U+2309 ISOamsc
'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum
'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': 0x230b, # right floor, U+230B ISOamsc
'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3
'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum
'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW
'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2
'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb
'sect': 0x00a7, # section sign, U+00A7 ISOnum
'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
'spades': 0x2660, # black spade suit, U+2660 ISOpub
'sub': 0x2282, # subset of, U+2282 ISOtech
'sube': 0x2286, # subset of or equal to, U+2286 ISOtech
'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb
'sup': 0x2283, # superset of, U+2283 ISOtech
'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': 0x2287, # superset of or equal to, U+2287 ISOtech
'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3
'there4': 0x2234, # therefore, U+2234 ISOtech
'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3
'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
'thinsp': 0x2009, # thin space, U+2009 ISOpub
'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1
'tilde': 0x02dc, # small tilde, U+02DC ISOdia
'times': 0x00d7, # multiplication sign, U+00D7 ISOnum
'trade': 0x2122, # trade mark sign, U+2122 ISOnum
'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa
'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1
'uarr': 0x2191, # upwards arrow, U+2191 ISOnum
'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3
'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1
'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070
'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070
}
# maps the HTML5 named character references to the equivalent Unicode character(s)
html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
# maps the Unicode codepoint to the HTML entity name
codepoint2name = {}
# maps the HTML entity name to the character
# (or a character reference if the character is outside the Latin-1 range)
entitydefs = {}
for (name, codepoint) in name2codepoint.items():
codepoint2name[codepoint] = name
entitydefs[name] = chr(codepoint)
del name, codepoint
| talishte/ctigre | env/lib/python2.7/site-packages/future/standard_library/html/entities.py | Python | bsd-2-clause | 75,465 | [
"Bowtie"
] | b981539cb1c293689b8011ff3bc8232cb748e12d64bf2425cb4f916ca46c4ad2 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`~openlp.plugins.songs.lib` module contains a number of library functions and classes used in the Songs plugin.
"""
import logging
import os
import re
from PyQt4 import QtGui
from openlp.core.lib import translate
from openlp.core.utils import AppLocation, CONTROL_CHARS
from openlp.plugins.songs.lib.db import MediaFile, Song
from .db import Author
from .ui import SongStrings
log = logging.getLogger(__name__)
WHITESPACE = re.compile(r'[\W_]+', re.UNICODE)
APOSTROPHE = re.compile('[\'`’ʻ′]', re.UNICODE)
# PATTERN will look for the next occurence of one of these symbols:
# \controlword - optionally preceded by \*, optionally followed by a number
# \'## - where ## is a pair of hex digits, representing a single character
# \# - where # is a single non-alpha character, representing a special symbol
# { or } - marking the beginning/end of a group
# a run of characters without any \ { } or end-of-line
PATTERN = re.compile(r"(\\\*)?\\([a-z]{1,32})(-?\d{1,10})?[ ]?|\\'([0-9a-f]{2})|\\([^a-z*])|([{}])|[\r\n]+|([^\\{}\r\n]+)", re.I)
# RTF control words which specify a "destination" to be ignored.
DESTINATIONS = frozenset((
'aftncn', 'aftnsep', 'aftnsepc', 'annotation', 'atnauthor',
'atndate', 'atnicn', 'atnid', 'atnparent', 'atnref', 'atntime',
'atrfend', 'atrfstart', 'author', 'background', 'bkmkend',
'bkmkstart', 'blipuid', 'buptim', 'category',
'colorschememapping', 'colortbl', 'comment', 'company', 'creatim',
'datafield', 'datastore', 'defchp', 'defpap', 'do', 'doccomm',
'docvar', 'dptxbxtext', 'ebcend', 'ebcstart', 'factoidname',
'falt', 'fchars', 'ffdeftext', 'ffentrymcr', 'ffexitmcr',
'ffformat', 'ffhelptext', 'ffl', 'ffname', 'ffstattext',
'file', 'filetbl', 'fldinst', 'fldtype', 'fname',
'fontemb', 'fontfile', 'footer', 'footerf', 'footerl', 'footerr',
'footnote', 'formfield', 'ftncn', 'ftnsep', 'ftnsepc', 'g',
'generator', 'gridtbl', 'header', 'headerf', 'headerl',
'headerr', 'hl', 'hlfr', 'hlinkbase', 'hlloc', 'hlsrc', 'hsv',
'htmltag', 'info', 'keycode', 'keywords', 'latentstyles',
'lchars', 'levelnumbers', 'leveltext', 'lfolevel', 'linkval',
'list', 'listlevel', 'listname', 'listoverride',
'listoverridetable', 'listpicture', 'liststylename', 'listtable',
'listtext', 'lsdlockedexcept', 'macc', 'maccPr', 'mailmerge',
'maln', 'malnScr', 'manager', 'margPr', 'mbar', 'mbarPr',
'mbaseJc', 'mbegChr', 'mborderBox', 'mborderBoxPr', 'mbox',
'mboxPr', 'mchr', 'mcount', 'mctrlPr', 'md', 'mdeg', 'mdegHide',
'mden', 'mdiff', 'mdPr', 'me', 'mendChr', 'meqArr', 'meqArrPr',
'mf', 'mfName', 'mfPr', 'mfunc', 'mfuncPr', 'mgroupChr',
'mgroupChrPr', 'mgrow', 'mhideBot', 'mhideLeft', 'mhideRight',
'mhideTop', 'mhtmltag', 'mlim', 'mlimloc', 'mlimlow',
'mlimlowPr', 'mlimupp', 'mlimuppPr', 'mm', 'mmaddfieldname',
'mmath', 'mmathPict', 'mmathPr', 'mmaxdist', 'mmc', 'mmcJc',
'mmconnectstr', 'mmconnectstrdata', 'mmcPr', 'mmcs',
'mmdatasource', 'mmheadersource', 'mmmailsubject', 'mmodso',
'mmodsofilter', 'mmodsofldmpdata', 'mmodsomappedname',
'mmodsoname', 'mmodsorecipdata', 'mmodsosort', 'mmodsosrc',
'mmodsotable', 'mmodsoudl', 'mmodsoudldata', 'mmodsouniquetag',
'mmPr', 'mmquery', 'mmr', 'mnary', 'mnaryPr', 'mnoBreak',
'mnum', 'mobjDist', 'moMath', 'moMathPara', 'moMathParaPr',
'mopEmu', 'mphant', 'mphantPr', 'mplcHide', 'mpos', 'mr',
'mrad', 'mradPr', 'mrPr', 'msepChr', 'mshow', 'mshp', 'msPre',
'msPrePr', 'msSub', 'msSubPr', 'msSubSup', 'msSubSupPr', 'msSup',
'msSupPr', 'mstrikeBLTR', 'mstrikeH', 'mstrikeTLBR', 'mstrikeV',
'msub', 'msubHide', 'msup', 'msupHide', 'mtransp', 'mtype',
'mvertJc', 'mvfmf', 'mvfml', 'mvtof', 'mvtol', 'mzeroAsc',
'mzFrodesc', 'mzeroWid', 'nesttableprops', 'nextfile',
'nonesttables', 'objalias', 'objclass', 'objdata', 'object',
'objname', 'objsect', 'objtime', 'oldcprops', 'oldpprops',
'oldsprops', 'oldtprops', 'oleclsid', 'operator', 'panose',
'password', 'passwordhash', 'pgp', 'pgptbl', 'picprop', 'pict',
'pn', 'pnseclvl', 'pntext', 'pntxta', 'pntxtb', 'printim',
'private', 'propname', 'protend', 'protstart', 'protusertbl',
'pxe', 'result', 'revtbl', 'revtim', 'rsidtbl', 'rxe', 'shp',
'shpgrp', 'shpinst', 'shppict', 'shprslt', 'shptxt', 'sn', 'sp',
'staticval', 'stylesheet', 'subject', 'sv', 'svb', 'tc',
'template', 'themedata', 'title', 'txe', 'ud', 'upr',
'userprops', 'wgrffmtfilter', 'windowcaption', 'writereservation',
'writereservhash', 'xe', 'xform', 'xmlattrname', 'xmlattrvalue',
'xmlclose', 'xmlname', 'xmlnstbl', 'xmlopen'))
# Translation of some special characters.
SPECIAL_CHARS = {
'\n': '\n',
'\r': '\n',
'~': '\u00A0',
'-': '\u00AD',
'_': '\u2011',
'par': '\n',
'sect': '\n\n',
# Required page and column break.
# Would be good if we could split verse into subverses here.
'page': '\n\n',
'column': '\n\n',
# Soft breaks.
'softpage': '[---]',
'softcol': '[---]',
'line': '\n',
'tab': '\t',
'emdash': '\u2014',
'endash': '\u2013',
'emspace': '\u2003',
'enspace': '\u2002',
'qmspace': '\u2005',
'bullet': '\u2022',
'lquote': '\u2018',
'rquote': '\u2019',
'ldblquote': '\u201C',
'rdblquote': '\u201D',
'ltrmark': '\u200E',
'rtlmark': '\u200F',
'zwj': '\u200D',
'zwnj': '\u200C'}
CHARSET_MAPPING = {
'0': 'cp1252',
'128': 'cp932',
'129': 'cp949',
'134': 'cp936',
'161': 'cp1253',
'162': 'cp1254',
'163': 'cp1258',
'177': 'cp1255',
'178': 'cp1256',
'186': 'cp1257',
'204': 'cp1251',
'222': 'cp874',
'238': 'cp1250'}
class VerseType(object):
"""
VerseType provides an enumeration for the tags that may be associated with verses in songs.
"""
Verse = 0
Chorus = 1
Bridge = 2
PreChorus = 3
Intro = 4
Ending = 5
Other = 6
names = [
'Verse',
'Chorus',
'Bridge',
'Pre-Chorus',
'Intro',
'Ending',
'Other']
tags = [name[0].lower() for name in names]
translated_names = [
translate('SongsPlugin.VerseType', 'Verse'),
translate('SongsPlugin.VerseType', 'Chorus'),
translate('SongsPlugin.VerseType', 'Bridge'),
translate('SongsPlugin.VerseType', 'Pre-Chorus'),
translate('SongsPlugin.VerseType', 'Intro'),
translate('SongsPlugin.VerseType', 'Ending'),
translate('SongsPlugin.VerseType', 'Other')]
translated_tags = [name[0].lower() for name in translated_names]
@staticmethod
def translated_tag(verse_tag, default=Other):
"""
Return the translated UPPERCASE tag for a given tag, used to show translated verse tags in UI
``verse_tag``
The string to return a VerseType for
``default``
Default return value if no matching tag is found
"""
verse_tag = verse_tag[0].lower()
for num, tag in enumerate(VerseType.tags):
if verse_tag == tag:
return VerseType.translated_tags[num].upper()
if len(VerseType.names) > default:
return VerseType.translated_tags[default].upper()
else:
return VerseType.translated_tags[VerseType.Other].upper()
@staticmethod
def translated_name(verse_tag, default=Other):
"""
Return the translated name for a given tag
``verse_tag``
The string to return a VerseType for
``default``
Default return value if no matching tag is found
"""
verse_tag = verse_tag[0].lower()
for num, tag in enumerate(VerseType.tags):
if verse_tag == tag:
return VerseType.translated_names[num]
if len(VerseType.names) > default:
return VerseType.translated_names[default]
else:
return VerseType.translated_names[VerseType.Other]
@staticmethod
def from_tag(verse_tag, default=Other):
"""
Return the VerseType for a given tag
``verse_tag``
The string to return a VerseType for
``default``
Default return value if no matching tag is found
"""
verse_tag = verse_tag[0].lower()
for num, tag in enumerate(VerseType.tags):
if verse_tag == tag:
return num
if len(VerseType.names) > default:
return default
else:
return VerseType.Other
@staticmethod
def from_translated_tag(verse_tag, default=Other):
"""
Return the VerseType for a given tag
``verse_tag``
The string to return a VerseType for
``default``
Default return value if no matching tag is found
"""
verse_tag = verse_tag[0].lower()
for num, tag in enumerate(VerseType.translated_tags):
if verse_tag == tag:
return num
if len(VerseType.names) > default:
return default
else:
return VerseType.Other
@staticmethod
def from_string(verse_name, default=Other):
"""
Return the VerseType for a given string
``verse_name``
The string to return a VerseType for
``default``
Default return value if no matching tag is found
"""
verse_name = verse_name.lower()
for num, name in enumerate(VerseType.names):
if verse_name == name.lower():
return num
return default
@staticmethod
def from_translated_string(verse_name):
"""
Return the VerseType for a given string
``verse_name``
The string to return a VerseType for
"""
verse_name = verse_name.lower()
for num, translation in enumerate(VerseType.translated_names):
if verse_name == translation.lower():
return num
@staticmethod
def from_loose_input(verse_name, default=Other):
"""
Return the VerseType for a given string
``verse_name``
The string to return a VerseType for
``default``
Default return value if no matching tag is found
"""
if len(verse_name) > 1:
verse_index = VerseType.from_translated_string(verse_name)
if verse_index is None:
verse_index = VerseType.from_string(verse_name, default)
elif len(verse_name) == 1:
verse_index = VerseType.from_translated_tag(verse_name, None)
if verse_index is None:
verse_index = VerseType.from_tag(verse_name, default)
else:
return default
return verse_index
def retrieve_windows_encoding(recommendation=None):
"""
Determines which encoding to use on an information source. The process uses both automated detection, which is
passed to this method as a recommendation, and user confirmation to return an encoding.
``recommendation``
A recommended encoding discovered programmatically for the user to confirm.
"""
# map chardet result to compatible windows standard code page
codepage_mapping = {'IBM866': 'cp866', 'TIS-620': 'cp874',
'SHIFT_JIS': 'cp932', 'GB2312': 'cp936', 'HZ-GB-2312': 'cp936',
'EUC-KR': 'cp949', 'Big5': 'cp950', 'ISO-8859-2': 'cp1250',
'windows-1250': 'cp1250', 'windows-1251': 'cp1251',
'windows-1252': 'cp1252', 'ISO-8859-7': 'cp1253',
'windows-1253': 'cp1253', 'ISO-8859-8': 'cp1255',
'windows-1255': 'cp1255'}
if recommendation in codepage_mapping:
recommendation = codepage_mapping[recommendation]
# Show dialog for encoding selection
encodings = [('cp1256', translate('SongsPlugin', 'Arabic (CP-1256)')),
('cp1257', translate('SongsPlugin', 'Baltic (CP-1257)')),
('cp1250', translate('SongsPlugin', 'Central European (CP-1250)')),
('cp1251', translate('SongsPlugin', 'Cyrillic (CP-1251)')),
('cp1253', translate('SongsPlugin', 'Greek (CP-1253)')),
('cp1255', translate('SongsPlugin', 'Hebrew (CP-1255)')),
('cp932', translate('SongsPlugin', 'Japanese (CP-932)')),
('cp949', translate('SongsPlugin', 'Korean (CP-949)')),
('cp936', translate('SongsPlugin', 'Simplified Chinese (CP-936)')),
('cp874', translate('SongsPlugin', 'Thai (CP-874)')),
('cp950', translate('SongsPlugin', 'Traditional Chinese (CP-950)')),
('cp1254', translate('SongsPlugin', 'Turkish (CP-1254)')),
('cp1258', translate('SongsPlugin', 'Vietnam (CP-1258)')),
('cp1252', translate('SongsPlugin', 'Western European (CP-1252)'))]
recommended_index = -1
if recommendation:
for index in range(len(encodings)):
if recommendation == encodings[index][0]:
recommended_index = index
break
if recommended_index > -1:
choice = QtGui.QInputDialog.getItem(None,
translate('SongsPlugin', 'Character Encoding'),
translate('SongsPlugin', 'The codepage setting is responsible\n'
'for the correct character representation.\nUsually you are fine with the preselected choice.'),
[pair[1] for pair in encodings], recommended_index, False)
else:
choice = QtGui.QInputDialog.getItem(None,
translate('SongsPlugin', 'Character Encoding'),
translate('SongsPlugin', 'Please choose the character encoding.\n'
'The encoding is responsible for the correct character representation.'),
[pair[1] for pair in encodings], 0, False)
if not choice[1]:
return None
return next(filter(lambda item: item[1] == choice[0], encodings))[0]
def clean_string(string):
"""
Strips punctuation from the passed string to assist searching.
"""
return WHITESPACE.sub(' ', APOSTROPHE.sub('', string)).lower()
def clean_title(title):
"""
Cleans the song title by removing Unicode control chars groups C0 & C1, as well as any trailing spaces.
"""
return CONTROL_CHARS.sub('', title).rstrip()
def clean_song(manager, song):
"""
Cleans the search title, rebuilds the search lyrics, adds a default author if the song does not have one and other
clean ups. This should always called when a new song is added or changed.
``manager``
The song's manager.
``song``
The song object.
"""
from .xml import SongXML
if song.title:
song.title = clean_title(song.title)
else:
song.title = ''
if song.alternate_title:
song.alternate_title = clean_title(song.alternate_title)
else:
song.alternate_title = ''
song.search_title = clean_string(song.title) + '@' + clean_string(song.alternate_title)
# Only do this, if we the song is a 1.9.4 song (or older).
if song.lyrics.find('<lyrics language="en">') != -1:
# Remove the old "language" attribute from lyrics tag (prior to 1.9.5). This is not very important, but this
# keeps the database clean. This can be removed when everybody has cleaned his songs.
song.lyrics = song.lyrics.replace('<lyrics language="en">', '<lyrics>')
verses = SongXML().get_verses(song.lyrics)
song.search_lyrics = ' '.join([clean_string(verse[1])
for verse in verses])
# We need a new and clean SongXML instance.
sxml = SongXML()
# Rebuild the song's verses, to remove any wrong verse names (for example translated ones), which might have
# been added prior to 1.9.5.
# List for later comparison.
compare_order = []
for verse in verses:
verse_type = VerseType.tags[VerseType.from_loose_input(verse[0]['type'])]
sxml.add_verse_to_lyrics(
verse_type,
verse[0]['label'],
verse[1],
verse[0].get('lang')
)
compare_order.append(('%s%s' % (verse_type, verse[0]['label'])).upper())
if verse[0]['label'] == '1':
compare_order.append(verse_type.upper())
song.lyrics = str(sxml.extract_xml(), 'utf-8')
# Rebuild the verse order, to convert translated verse tags, which might have been added prior to 1.9.5.
if song.verse_order:
order = CONTROL_CHARS.sub('', song.verse_order).strip().split()
else:
order = []
new_order = []
for verse_def in order:
verse_type = VerseType.tags[
VerseType.from_loose_input(verse_def[0])]
if len(verse_def) > 1:
new_order.append(('%s%s' % (verse_type, verse_def[1:])).upper())
else:
new_order.append(verse_type.upper())
song.verse_order = ' '.join(new_order)
# Check if the verse order contains tags for verses which do not exist.
for order in new_order:
if order not in compare_order:
song.verse_order = ''
break
else:
verses = SongXML().get_verses(song.lyrics)
song.search_lyrics = ' '.join([clean_string(verse[1])
for verse in verses])
# The song does not have any author, add one.
if not song.authors:
name = SongStrings.AuthorUnknown
author = manager.get_object_filtered(Author, Author.display_name == name)
if author is None:
author = Author.populate(display_name=name, last_name='', first_name='')
song.authors.append(author)
if song.copyright:
song.copyright = CONTROL_CHARS.sub('', song.copyright).strip()
def get_encoding(font, font_table, default_encoding, failed=False):
"""
Finds an encoding to use. Asks user, if necessary.
``font``
The number of currently active font.
``font_table``
Dictionary of fonts and respective encodings.
``default_encoding``
The default encoding to use when font_table is empty or no font is used.
``failed``
A boolean indicating whether the previous encoding didn't work.
"""
encoding = None
if font in font_table:
encoding = font_table[font]
if not encoding and default_encoding:
encoding = default_encoding
if not encoding or failed:
encoding = retrieve_windows_encoding()
default_encoding = encoding
font_table[font] = encoding
return encoding, default_encoding
def strip_rtf(text, default_encoding=None):
"""
This function strips RTF control structures and returns an unicode string.
Thanks to Markus Jarderot (MizardX) for this code, used by permission.
http://stackoverflow.com/questions/188545
``text``
RTF-encoded text, a string.
``default_encoding``
Default encoding to use when no encoding is specified.
"""
# Current font is the font tag we last met.
font = ''
# Character encoding is defined inside fonttable.
# font_table could contain eg u'0': u'cp1252'
font_table = {'': ''}
# Stack of things to keep track of when entering/leaving groups.
stack = []
# Whether this group (and all inside it) are "ignorable".
ignorable = False
# Number of ASCII characters to skip after an unicode character.
ucskip = 1
# Number of ASCII characters left to skip.
curskip = 0
# Output buffer.
out = []
# Encoded buffer.
ebytes = bytearray()
for match in PATTERN.finditer(text):
iinu, word, arg, hex, char, brace, tchar = match.groups()
# \x (non-alpha character)
if char:
if char in '\\{}':
tchar = char
else:
word = char
# Flush encoded buffer to output buffer
if ebytes and not hex and not tchar:
failed = False
while True:
try:
encoding, default_encoding = get_encoding(font, font_table, default_encoding, failed=failed)
if not encoding:
return None
dbytes = ebytes.decode(encoding)
# Code 5C is a peculiar case with Windows Codepage 932
if encoding == 'cp932' and '\\' in dbytes:
dbytes = dbytes.replace('\\', '\u00A5')
out.append(dbytes)
ebytes.clear()
except UnicodeDecodeError:
failed = True
else:
break
# {}
if brace:
curskip = 0
if brace == '{':
# Push state
stack.append((ucskip, ignorable, font))
elif brace == '}' and len(stack) > 0:
# Pop state
ucskip, ignorable, font = stack.pop()
# \command
elif word:
curskip = 0
if word in DESTINATIONS:
ignorable = True
elif word in SPECIAL_CHARS:
if not ignorable:
out.append(SPECIAL_CHARS[word])
elif word == 'uc':
ucskip = int(arg)
elif word == 'u':
c = int(arg)
if c < 0:
c += 0x10000
if not ignorable:
out.append(chr(c))
curskip = ucskip
elif word == 'fonttbl':
ignorable = True
elif word == 'f':
font = arg
elif word == 'ansicpg':
font_table[font] = 'cp' + arg
elif word == 'fcharset' and font not in font_table and arg in CHARSET_MAPPING:
font_table[font] = CHARSET_MAPPING[arg]
elif word == 'fldrslt':
pass
# \* 'Ignore if not understood' marker
elif iinu:
ignorable = True
# \'xx
elif hex:
if curskip > 0:
curskip -= 1
elif not ignorable:
ebytes.append(int(hex, 16))
elif tchar:
if curskip > 0:
curskip -= 1
elif not ignorable:
ebytes += tchar.encode()
text = ''.join(out)
return text, default_encoding
def delete_song(song_id, song_plugin):
"""
Deletes a song from the database. Media files associated to the song
are removed prior to the deletion of the song.
``song_id``
The ID of the song to delete.
``song_plugin``
The song plugin instance.
"""
media_files = song_plugin.manager.get_all_objects(MediaFile, MediaFile.song_id == song_id)
for media_file in media_files:
try:
os.remove(media_file.file_name)
except:
log.exception('Could not remove file: %s', media_file.file_name)
try:
save_path = os.path.join(AppLocation.get_section_data_path(song_plugin.name), 'audio', str(song_id))
if os.path.exists(save_path):
os.rmdir(save_path)
except OSError:
log.exception('Could not remove directory: %s', save_path)
song_plugin.manager.delete_object(Song, song_id)
| marmyshev/bug_1117098 | openlp/plugins/songs/lib/__init__.py | Python | gpl-2.0 | 25,408 | [
"Brian"
] | 20c9ff913d9373c555625b19dc6dd3cd6101cb45129faed2162e8f63446e1336 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hassourcecountbase import HasSourceCountBase
#-------------------------------------------------------------------------
# "People having sources"
#-------------------------------------------------------------------------
class HasSourceCount(HasSourceCountBase):
"""People with sources"""
name = _('People with <count> sources')
description = _("Matches people with a certain number of sources connected to it")
| pmghalvorsen/gramps_branch | gramps/gen/filters/rules/person/_hassourcecount.py | Python | gpl-2.0 | 1,800 | [
"Brian"
] | 2f4cc0673f4e1beebe9b68ee60661466b3afe9f076f25bae5e2d2be05bf3bd9f |
#!/usr/bin/env python
"""
Get VM instances available in the configured cloud sites
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC import gLogger, exit as DIRACExit
Script.setUsageMessage('\n'.join(['Get VM nodes information',
'Usage:',
'%s site ce node [option]... [cfgfile]' % Script.scriptName,
'Arguments:',
' cfgfile: DIRAC Cfg with description of the configuration (optional)']))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
from VMDIRAC.WorkloadManagementSystem.Client.VMClient import VMClient
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
if len(args) != 3:
print(Script.showHelp())
DIRACExit(-1)
site, ce, node = args
vmClient = VMClient()
result = vmClient.stopInstance(site, ce, node)
if not result['OK']:
gLogger.error(result['Message'])
DIRACExit(-1)
DIRACExit(0)
| DIRACGrid/VMDIRAC | VMDIRAC/WorkloadManagementSystem/scripts/dirac-vm-instance-stop.py | Python | gpl-3.0 | 1,112 | [
"DIRAC"
] | 6c34405efd230e653c26d7987a7ca67228e367d8106b352684a47eb4f416c27a |
#!/usr/bin/python
import unittest
import os
import random
import json
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.core.lattice import Lattice
from pymatgen.core.surface import Slab, SlabGenerator, generate_all_slabs, \
get_symmetrically_distinct_miller_indices, ReconstructionGenerator, \
miller_index_from_sites
from pymatgen.symmetry.groups import SpaceGroup
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.testing import PymatgenTest
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files", "surface_tests",
path_str)
return path
class SlabTest(PymatgenTest):
def setUp(self):
zno1 = Structure.from_file(get_path("ZnO-wz.cif"), primitive=False)
zno55 = SlabGenerator(zno1, [1, 0, 0], 5, 5, lll_reduce=False,
center_slab=False).get_slab()
Ti = Structure(Lattice.hexagonal(4.6, 2.82), ["Ti", "Ti", "Ti"],
[[0.000000, 0.000000, 0.000000],
[0.333333, 0.666667, 0.500000],
[0.666667, 0.333333, 0.500000]])
Ag_fcc = Structure(Lattice.cubic(4.06), ["Ag", "Ag", "Ag", "Ag"],
[[0.000000, 0.000000, 0.000000],
[0.000000, 0.500000, 0.500000],
[0.500000, 0.000000, 0.500000],
[0.500000, 0.500000, 0.000000]])
self.ti = Ti
self.agfcc = Ag_fcc
self.zno1 = zno1
self.zno55 = zno55
self.h = Structure(Lattice.cubic(3), ["H"],
[[0, 0, 0]])
self.libcc = Structure(Lattice.cubic(3.51004), ["Li", "Li"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_init(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
m =self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.lengths_and_angles,
self.zno55.lattice.lengths_and_angles)
self.assertEqual(zno_slab.oriented_unit_cell.composition,
self.zno1.composition)
self.assertEqual(len(zno_slab), 8)
def test_add_adsorbate_atom(self):
zno_slab = Slab(self.zno55.lattice, self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0, self.zno55.scale_factor)
zno_slab.add_adsorbate_atom([1], 'H', 1)
self.assertEqual(len(zno_slab), 9)
self.assertEqual(str(zno_slab[8].specie), 'H')
self.assertAlmostEqual(zno_slab.get_distance(1, 8), 1.0)
self.assertTrue(zno_slab[8].c > zno_slab[0].c)
m = self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.lengths_and_angles,
self.zno55.lattice.lengths_and_angles)
def test_get_sorted_structure(self):
species = [str(site.specie) for site in
self.zno55.get_sorted_structure()]
self.assertEqual(species, ["Zn2+"] * 4 + ["O2-"] * 4)
def test_methods(self):
#Test various structure methods
self.zno55.get_primitive_structure()
def test_as_from_dict(self):
d = self.zno55.as_dict()
obj = Slab.from_dict(d)
self.assertEqual(obj.miller_index, (1, 0, 0))
def test_dipole_and_is_polar(self):
self.assertArrayAlmostEqual(self.zno55.dipole, [0, 0, 0])
self.assertFalse(self.zno55.is_polar())
cscl = self.get_structure("CsCl")
cscl.add_oxidation_state_by_element({"Cs": 1, "Cl": -1})
slab = SlabGenerator(cscl, [1, 0, 0], 5, 5, reorient_lattice=False,
lll_reduce=False, center_slab=False).get_slab()
self.assertArrayAlmostEqual(slab.dipole, [-4.209, 0, 0])
self.assertTrue(slab.is_polar())
def test_surface_sites_and_symmetry(self):
# test if surfaces are equivalent by using
# Laue symmetry and surface site equivalence
for bool in [True, False]:
# We will also set the slab to be centered and
# off centered in order to test the center of mass
slabgen = SlabGenerator(self.agfcc, (3, 1, 0), 10, 10, center_slab=bool)
slab = slabgen.get_slabs()[0]
surf_sites_dict = slab.get_surface_sites()
self.assertEqual(len(surf_sites_dict["top"]), len(surf_sites_dict["bottom"]))
total_surf_sites = sum([len(surf_sites_dict[key])
for key in surf_sites_dict.keys()])
self.assertTrue(slab.is_symmetric())
self.assertEqual(total_surf_sites/2, 4)
self.assertTrue(slab.have_equivalent_surfaces())
# Test if the ratio of surface sites per area is
# constant, ie are the surface energies the same
r1 = total_surf_sites/(2*slab.surface_area)
slabgen = SlabGenerator(self.agfcc, (3, 1, 0), 10, 10, primitive=False)
slab = slabgen.get_slabs()[0]
surf_sites_dict = slab.get_surface_sites()
total_surf_sites = sum([len(surf_sites_dict[key])
for key in surf_sites_dict.keys()])
r2 = total_surf_sites/(2*slab.surface_area)
self.assertArrayEqual(r1, r2)
def test_symmetrization(self):
# Restricted to elemental materials due to the risk of
# broken stoichiometry. For compound materials, use is_polar()
# Get all slabs for P6/mmm Ti and Fm-3m Ag up to index of 2
all_Ti_slabs = generate_all_slabs(self.ti, 2, 10, 10, bonds=None,
tol=1e-3, max_broken_bonds=0,
lll_reduce=False, center_slab=False,
primitive=True, max_normal_search=2,
symmetrize=True)
all_Ag_fcc_slabs = generate_all_slabs(self.agfcc, 2, 10, 10, bonds=None,
tol=1e-3, max_broken_bonds=0,
lll_reduce=False, center_slab=False,
primitive=True, max_normal_search=2,
symmetrize=True)
all_slabs = [all_Ti_slabs, all_Ag_fcc_slabs]
for i, slabs in enumerate(all_slabs):
assymetric_count = 0
symmetric_count = 0
for i, slab in enumerate(slabs):
sg = SpacegroupAnalyzer(slab)
# Check if a slab is symmetric
if not sg.is_laue():
assymetric_count += 1
else:
symmetric_count += 1
# Check if slabs are all symmetric
self.assertEqual(assymetric_count, 0)
self.assertEqual(symmetric_count, len(slabs))
def test_get_symmetric_sites(self):
# Check to see if we get an equivalent site on one
# surface if we add a new site to the other surface
all_Ti_slabs = generate_all_slabs(self.ti, 2, 10, 10, bonds=None,
tol=1e-3, max_broken_bonds=0,
lll_reduce=False, center_slab=False,
primitive=True, max_normal_search=2,
symmetrize=True)
for slab in all_Ti_slabs:
sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])
site = sorted_sites[-1]
point = site.frac_coords
point[2] = point[2]+0.1
point2 = slab.get_symmetric_site(point)
slab.append("O", point)
slab.append("O", point2)
# Check if slab is all symmetric
sg = SpacegroupAnalyzer(slab)
self.assertTrue(sg.is_laue())
def test_oriented_unit_cell(self):
# Check to see if we get the fully reduced oriented unit
# cell. This will also ensure that the constrain_latt
# parameter for get_primitive_structure is working properly
def surface_area(s):
m = s.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
all_slabs = generate_all_slabs(self.agfcc, 3, 10, 10, max_normal_search=3)
for slab in all_slabs:
ouc = slab.oriented_unit_cell
self.assertAlmostEqual(surface_area(slab), surface_area(ouc))
self.assertGreaterEqual(len(slab), len(ouc))
class SlabGeneratorTest(PymatgenTest):
def setUp(self):
lattice = Lattice.cubic(3.010)
frac_coords = [[0.00000, 0.00000, 0.00000],
[0.00000, 0.50000, 0.50000],
[0.50000, 0.00000, 0.50000],
[0.50000, 0.50000, 0.00000],
[0.50000, 0.00000, 0.00000],
[0.50000, 0.50000, 0.50000],
[0.00000, 0.00000, 0.50000],
[0.00000, 0.50000, 0.00000]]
species = ['Mg', 'Mg', 'Mg', 'Mg', 'O', 'O', 'O', 'O']
self.MgO = Structure(lattice, species, frac_coords)
self.MgO.add_oxidation_state_by_element({"Mg": 2, "O": -6})
lattice_Dy = Lattice.hexagonal(3.58, 25.61)
frac_coords_Dy = [[0.00000, 0.00000, 0.00000],
[0.66667, 0.33333, 0.11133],
[0.00000, 0.00000, 0.222],
[0.66667, 0.33333, 0.33333],
[0.33333, 0.66666, 0.44467],
[0.66667, 0.33333, 0.55533],
[0.33333, 0.66667, 0.66667],
[0.00000, 0.00000, 0.778],
[0.33333, 0.66667, 0.88867]]
species_Dy = ['Dy', 'Dy', 'Dy', 'Dy', 'Dy', 'Dy', 'Dy', 'Dy', 'Dy']
self.Dy = Structure(lattice_Dy, species_Dy, frac_coords_Dy)
def test_get_slab(self):
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
s = gen.get_slab(0.25)
self.assertAlmostEqual(s.lattice.abc[2], 20.820740000000001)
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10)
slab = gen.get_slab()
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, primitive=False)
slab_non_prim = gen.get_slab()
self.assertEqual(len(slab), 6)
self.assertEqual(len(slab_non_prim), len(slab) * 4)
# Some randomized testing of cell vectors
for i in range(1, 231):
i = random.randint(1, 230)
sg = SpaceGroup.from_int_number(i)
if sg.crystal_system == "hexagonal" or (sg.crystal_system == \
"trigonal" and (sg.symbol.endswith("H") or
sg.int_number in [143, 144, 145, 147, 149, 150, 151, 152,
153, 154, 156, 157, 158, 159, 162, 163,
164, 165])):
latt = Lattice.hexagonal(5, 10)
else:
# Cubic lattice is compatible with all other space groups.
latt = Lattice.cubic(5)
s = Structure.from_spacegroup(i, latt, ["H"], [[0, 0, 0]])
miller = (0, 0, 0)
while miller == (0, 0, 0):
miller = (random.randint(0, 6), random.randint(0, 6),
random.randint(0, 6))
gen = SlabGenerator(s, miller, 10, 10)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
def test_normal_search(self):
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
for miller in [(1, 0, 0), (1, 1, 0), (1, 1, 1), (2, 1, 1)]:
gen = SlabGenerator(fcc, miller, 10, 10)
gen_normal = SlabGenerator(fcc, miller, 10, 10,
max_normal_search=max(miller))
slab = gen_normal.get_slab()
self.assertAlmostEqual(slab.lattice.alpha, 90)
self.assertAlmostEqual(slab.lattice.beta, 90)
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell),
len(gen.oriented_unit_cell))
graphite = self.get_structure("Graphite")
for miller in [(1, 0, 0), (1, 1, 0), (0, 0, 1), (2, 1, 1)]:
gen = SlabGenerator(graphite, miller, 10, 10)
gen_normal = SlabGenerator(graphite, miller, 10, 10,
max_normal_search=max(miller))
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell),
len(gen.oriented_unit_cell))
sc = Structure(Lattice.hexagonal(3.32, 5.15), ["Sc", "Sc"],
[[1/3, 2/3, 0.25], [2/3, 1/3, 0.75]])
gen = SlabGenerator(sc, (1, 1, 1), 10, 10, max_normal_search=1)
self.assertAlmostEqual(gen.oriented_unit_cell.lattice.angles[1], 90)
def test_get_slabs(self):
gen = SlabGenerator(self.get_structure("CsCl"), [0, 0, 1], 10, 10)
#Test orthogonality of some internal variables.
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
self.assertEqual(len(gen.get_slabs()), 1)
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
self.assertEqual(len(gen.get_slabs()), 5)
self.assertEqual(len(gen.get_slabs(bonds={("P", "O"): 3})), 2)
# There are no slabs in LFP that does not break either P-O or Fe-O
# bonds for a miller index of [0, 0, 1].
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3})), 0)
#If we allow some broken bonds, there are a few slabs.
self.assertEqual(len(gen.get_slabs(
bonds={("P", "O"): 3, ("Fe", "O"): 3},
max_broken_bonds=2)), 2)
# At this threshold, only the origin and center Li results in
# clustering. All other sites are non-clustered. So the of
# slabs is of sites in LiFePO4 unit cell - 2 + 1.
self.assertEqual(len(gen.get_slabs(tol=1e-4)), 15)
LiCoO2=Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
gen = SlabGenerator(LiCoO2, [0, 0, 1], 10, 10)
lco = gen.get_slabs(bonds={("Co", "O"): 3})
self.assertEqual(len(lco), 1)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
scc = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Fe"],
[[0, 0, 0]])
gen = SlabGenerator(scc, [0, 0, 1], 10, 10)
slabs = gen.get_slabs()
self.assertEqual(len(slabs), 1)
gen = SlabGenerator(scc, [1, 1, 1], 10, 10, max_normal_search=1)
slabs = gen.get_slabs()
self.assertEqual(len(slabs), 1)
# Test whether using units of hkl planes instead of Angstroms for
# min_slab_size and min_vac_size will give us the same number of atoms
natoms = []
for a in [1, 1.4, 2.5, 3.6]:
s = Structure.from_spacegroup("Im-3m", Lattice.cubic(a), ["Fe"], [[0,0,0]])
slabgen = SlabGenerator(s, (1,1,1), 10, 10, in_unit_planes=True,
max_normal_search=2)
natoms.append(len(slabgen.get_slab()))
n = natoms[0]
for i in natoms:
self.assertEqual(n, i)
def test_triclinic_TeI(self):
# Test case for a triclinic structure of TeI. Only these three
# Miller indices are used because it is easier to identify which
# atoms should be in a surface together. The closeness of the sites
# in other Miller indices can cause some ambiguity when choosing a
# higher tolerance.
numb_slabs = {(0, 0, 1): 5, (0, 1, 0): 3, (1, 0, 0): 7}
TeI = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
for k, v in numb_slabs.items():
trclnc_TeI = SlabGenerator(TeI, k, 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
self.assertEqual(v, len(TeI_slabs))
def test_get_orthogonal_c_slab(self):
TeI = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
trclnc_TeI = SlabGenerator(TeI, (0, 0, 1), 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
slab = TeI_slabs[0]
norm_slab = slab.get_orthogonal_c_slab()
self.assertAlmostEqual(norm_slab.lattice.angles[0], 90)
self.assertAlmostEqual(norm_slab.lattice.angles[1], 90)
def test_get_tasker2_slabs(self):
# The uneven distribution of ions on the (111) facets of Halite
# type slabs are typical examples of Tasker 3 structures. We
# will test this algo to generate a Tasker 2 structure instead
slabgen = SlabGenerator(self.MgO, (1,1,1), 10, 10,
max_normal_search=1)
# We generate the Tasker 3 structure first
slab = slabgen.get_slabs()[0]
self.assertFalse(slab.is_symmetric())
self.assertTrue(slab.is_polar())
# Now to generate the Tasker 2 structure, we must
# ensure there are enough ions on top to move around
slab.make_supercell([2,1,1])
slabs = slab.get_tasker2_slabs()
# Check if our Tasker 2 slab is nonpolar and symmetric
for slab in slabs:
self.assertTrue(slab.is_symmetric())
self.assertFalse(slab.is_polar())
def test_nonstoichiometric_symmetrized_slab(self):
# For the (111) halite slab, sometimes a nonstoichiometric
# system is preferred over the stoichiometric Tasker 2.
slabgen = SlabGenerator(self.MgO, (1,1,1), 10, 10,
max_normal_search=1)
slabs = slabgen.get_slabs(symmetrize=True)
# We should end up with two terminations, one with
# an Mg rich surface and another O rich surface
self.assertEqual(len(slabs), 2)
for slab in slabs:
self.assertTrue(slab.is_symmetric())
# For a low symmetry elemental system such as
# R-3m, there should be some nonsymmetric slabs
# without using nonstoichiometric_symmetrized_slab
slabs = generate_all_slabs(self.Dy, 1, 30, 30,
center_slab=True, symmetrize=True)
for s in slabs:
self.assertTrue(s.is_symmetric())
self.assertGreater(len(s), len(self.Dy))
def test_move_to_other_side(self):
# Tests to see if sites are added to opposite side
s = self.get_structure("LiFePO4")
slabgen = SlabGenerator(s, (0,0,1), 10, 10, center_slab=True)
slab = slabgen.get_slab()
surface_sites = slab.get_surface_sites()
# check if top sites are moved to the bottom
top_index = [ss[1] for ss in surface_sites["top"]]
slab = slabgen.move_to_other_side(slab, top_index)
all_bottom = [slab[i].frac_coords[2] < slab.center_of_mass[2]
for i in top_index]
self.assertTrue(all(all_bottom))
# check if bottom sites are moved to the top
bottom_index = [ss[1] for ss in surface_sites["bottom"]]
slab = slabgen.move_to_other_side(slab, bottom_index)
all_top = [slab[i].frac_coords[2] > slab.center_of_mass[2]
for i in bottom_index]
self.assertTrue(all(all_top))
class ReconstructionGeneratorTests(PymatgenTest):
def setUp(self):
l = Lattice.cubic(3.51)
species = ["Ni"]
coords = [[0,0,0]]
self.Ni = Structure.from_spacegroup("Fm-3m", l, species, coords)
self.Si = Structure.from_spacegroup("Fd-3m", Lattice.cubic(5.430500),
["Si"], [(0, 0, 0.5)])
def test_build_slab(self):
# First lets test a reconstruction where we only remove atoms
recon = ReconstructionGenerator(self.Ni, 10, 10,
"fcc_110_missing_row_1x2")
slab = recon.get_unreconstructed_slab()
recon_slab = recon.build_slab()
self.assertTrue(recon_slab.reconstruction)
self.assertEqual(len(slab), len(recon_slab)+2)
self.assertTrue(recon_slab.is_symmetric())
# Test if the ouc corresponds to the reconstructed slab
recon_ouc = recon_slab.oriented_unit_cell
ouc = slab.oriented_unit_cell
self.assertEqual(ouc.lattice.b*2, recon_ouc.lattice.b)
self.assertEqual(len(ouc)*2, len(recon_ouc))
# Test a reconstruction where we simply add atoms
recon = ReconstructionGenerator(self.Ni, 10, 10,
"fcc_111_adatom_t_1x1")
slab = recon.get_unreconstructed_slab()
recon_slab = recon.build_slab()
self.assertEqual(len(slab), len(recon_slab)-2)
self.assertTrue(recon_slab.is_symmetric())
# If a slab references another slab,
# make sure it is properly generated
recon = ReconstructionGenerator(self.Ni, 10, 10,
"fcc_111_adatom_ft_1x1")
slab = recon.build_slab()
self.assertTrue(slab.is_symmetric)
# Test a reconstruction where it works on a specific
# termination (Fd-3m (111))
recon = ReconstructionGenerator(self.Si, 10, 10,
"diamond_111_1x2")
slab = recon.get_unreconstructed_slab()
recon_slab = recon.build_slab()
self.assertEqual(len(slab), len(recon_slab)-8)
self.assertTrue(recon_slab.is_symmetric())
# Test a reconstruction where terminations give
# different reconstructions with a non-elemental system
def test_get_d(self):
# Ensure that regardles of the size of the vacuum or slab
# layer, the spacing between atomic layers should be the same
recon = ReconstructionGenerator(self.Si, 10, 10,
"diamond_100_2x1")
recon2 = ReconstructionGenerator(self.Si, 20, 10,
"diamond_100_2x1")
self.assertAlmostEqual(recon.get_d(), recon2.get_d())
class MillerIndexFinderTests(PymatgenTest):
def setUp(self):
self.cscl = Structure.from_spacegroup(
"Pm-3m", Lattice.cubic(4.2), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.Fe = Structure.from_spacegroup(\
"Im-3m", Lattice.cubic(2.82), ["Fe"],
[[0, 0, 0]])
self.lifepo4 = self.get_structure("LiFePO4")
self.tei = Structure.from_file(get_path("icsd_TeI.cif"),
primitive=False)
self.LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"),
primitive=False)
self.p1 = Structure(Lattice.from_parameters(3, 4, 5, 31, 43, 50),
["H", "He"], [[0, 0, 0], [0.1, 0.2, 0.3]])
self.graphite = self.get_structure("Graphite")
def test_get_symmetrically_distinct_miller_indices(self):
# Tests to see if the function obtains the known number of unique slabs
indices = get_symmetrically_distinct_miller_indices(self.cscl, 1)
self.assertEqual(len(indices), 3)
indices = get_symmetrically_distinct_miller_indices(self.cscl, 2)
self.assertEqual(len(indices), 6)
self.assertEqual(len(get_symmetrically_distinct_miller_indices(self.lifepo4, 1)), 7)
# The TeI P-1 structure should have 13 unique millers (only inversion
# symmetry eliminates pairs)
indices = get_symmetrically_distinct_miller_indices(self.tei, 1)
self.assertEqual(len(indices), 13)
# P1 and P-1 should have the same # of miller indices since surfaces
# always have inversion symmetry.
indices = get_symmetrically_distinct_miller_indices(self.p1, 1)
self.assertEqual(len(indices), 13)
indices = get_symmetrically_distinct_miller_indices(self.graphite, 2)
self.assertEqual(len(indices), 12)
def test_generate_all_slabs(self):
slabs = generate_all_slabs(self.cscl, 1, 10, 10)
# Only three possible slabs, one each in (100), (110) and (111).
self.assertEqual(len(slabs), 3)
# make sure it generates reconstructions
slabs = generate_all_slabs(self.Fe, 1, 10, 10,
include_reconstructions = True)
# Four possible slabs, (100), (110), (111) and the zigzag (100).
self.assertEqual(len(slabs), 4)
slabs = generate_all_slabs(self.cscl, 1, 10, 10,
bonds={("Cs", "Cl"): 4})
# No slabs if we don't allow broken Cs-Cl
self.assertEqual(len(slabs), 0)
slabs = generate_all_slabs(self.cscl, 1, 10, 10,
bonds={("Cs", "Cl"): 4},
max_broken_bonds=100)
self.assertEqual(len(slabs), 3)
slabs2 = generate_all_slabs(self.lifepo4, 1, 10, 10,
bonds={("P", "O"): 3, ("Fe", "O"): 3})
self.assertEqual(len(slabs2), 0)
# There should be only one possible stable surfaces, all of which are
# in the (001) oriented unit cell
slabs3 = generate_all_slabs(self.LiCoO2, 1, 10, 10,
bonds={("Co", "O"): 3})
self.assertEqual(len(slabs3), 1)
mill = (0, 0, 1)
for s in slabs3:
self.assertEqual(s.miller_index, mill)
slabs1 = generate_all_slabs(self.lifepo4, 1, 10, 10, tol=0.1,
bonds={("P", "O"): 3})
self.assertEqual(len(slabs1), 4)
# Now we test this out for repair_broken_bonds()
slabs1_repair = generate_all_slabs(self.lifepo4, 1, 10, 10, tol=0.1,
bonds={("P", "O"): 3}, repair=True)
self.assertGreater(len(slabs1_repair), len(slabs1))
# Lets see if there are no broken PO4 polyhedrons
miller_list = get_symmetrically_distinct_miller_indices(self.lifepo4, 1)
all_miller_list = []
for slab in slabs1_repair:
hkl = tuple(slab.miller_index)
if hkl not in all_miller_list:
all_miller_list.append(hkl)
broken = []
for site in slab:
if site.species_string == "P":
neighbors = slab.get_neighbors(site, 3)
cn = 0
for nn in neighbors:
cn += 1 if nn[0].species_string == "O" else 0
broken.append(cn != 4)
self.assertFalse(any(broken))
# check if we were able to produce at least one
# termination for each distinct Miller _index
self.assertEqual(len(miller_list), len(all_miller_list))
def test_miller_index_from_sites(self):
# test on a cubic system
m = Lattice.cubic(1).matrix
s1 = np.array([0.5, -1.5, 3])
s2 = np.array([0.5, 3.,-1.5])
s3 = np.array([2.5, 1.5,-4.])
self.assertEqual(tuple(miller_index_from_sites(m, [s1, s2, s3])),
(-2,-1,-1))
# test on a hexagonal system
m = np.array([[2.319, -4.01662582, 0.],
[2.319, 4.01662582, 0.],
[0., 0., 7.252]])
s1 = np.array([2.319, 1.33887527, 6.3455])
s2 = np.array([1.1595, 0.66943764, 4.5325])
s3 = np.array([1.1595, 0.66943764, 0.9065])
hkl = [np.round(i, 6) for i in miller_index_from_sites(m, [s1, s2, s3])]
self.assertEqual(tuple(hkl), (2, -1, 0))
if __name__ == "__main__":
unittest.main()
| czhengsci/pymatgen | pymatgen/core/tests/test_surface.py | Python | mit | 28,851 | [
"pymatgen"
] | 8ac1970c83e53be6c9e906b11b4a0fca01c45704a5dcd25de19a456aa59ee110 |
# trbchan.py ---
#
# Filename: trbchan.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Fri May 4 14:55:52 2012 (+0530)
# Version:
# Last-Updated: Fri May 3 11:45:07 2013 (+0530)
# By: subha
# Update #: 337
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Base class for channels in Traub model.
#
#
# Change log:
#
# 2012-05-04 14:55:56 (+0530) subha started porting code from
# channel.py in old moose version to dh_branch.
#
# Code:
from warnings import warn
import numpy as np
import moose
import config
vmin = -120e-3
vmax = 40e-3
vdivs = 640
v_array = np.linspace(vmin, vmax, vdivs+1)
ca_min = 0.0
ca_max = 1000.0
ca_divs = 1000
ca_conc = np.linspace(ca_min, ca_max, ca_divs + 1)
prototypes = {}
def setup_gate_tables(gate, param_dict, bases):
suffix = None
if gate.name == 'gateX':
suffix = 'x'
elif gate.name == 'gateY':
suffix = 'y'
elif gate.name == 'gateZ':
suffix = 'z'
else:
raise NameError('Gate in a channel must have names in [`gateX`, `gateY`, `gateZ`]')
if suffix in ['x', 'y']:
gate.min = vmin
gate.max = vmax
gate.divs = vdivs
else:
gate.min = ca_min
gate.max = ca_max
gate.divs = ca_divs
gate.useInterpolation = True
keys = ['%s_%s' % (key, suffix) for key in ['tau', 'inf', 'alpha', 'beta', 'tableA', 'tableB']]
msg = ''
if keys[0] in param_dict:
msg = 'Using tau/inf tables'
gate.tableA = param_dict[keys[1]] / param_dict[keys[0]]
gate.tableB = 1 / param_dict[keys[0]]
elif keys[2] in param_dict:
msg = 'Using alpha/beta tables'
gate.tableA = param_dict[keys[2]]
gate.tableB = param_dict[keys[2]] + param_dict[keys[3]]
elif keys[4] in param_dict:
msg = 'Using A/B tables'
gate.tableA = param_dict[keys[4]]
gate.tableB = param_dict[keys[5]]
else:
for base in bases:
new_bases = base.mro()
new_param_dict = base.__dict__
if new_bases:
new_bases = new_bases[1:]
if setup_gate_tables(gate, new_param_dict, new_bases):
msg = 'Gate setup in baseclass: '+base.__class__.__name__
break
if msg:
config.logger.debug('%s: %s' % (gate.path, msg))
return True
else:
config.logger.debug('%s: nothing was setup for this gate' % (gate.path))
return False
def get_class_field(name, cdict, bases, fieldname, default=None):
if fieldname in cdict:
config.logger.debug('%s: %s=%s' % (name, fieldname, str(cdict[fieldname])))
return cdict[fieldname]
else:
for base in bases:
if hasattr(base, fieldname):
return getattr(base, fieldname)
# warn('field %s not in the hierarchy of %s class. Returning default value.' % (fieldname, name))
return default
class ChannelMeta(type):
def __new__(cls, name, bases, cdict):
global prototypes
# classes that set absract=True will be
# abstract classes. Others will have the prototype insatntiated.
if 'abstract' in cdict and cdict['abstract'] == True:
return type.__new__(cls, name, bases, cdict)
proto = moose.HHChannel('%s/%s' % (config.library.path, name))
xpower = get_class_field(name, cdict, bases, 'Xpower', default=0.0)
if xpower > 0:
proto.Xpower = xpower
gate = moose.HHGate('%s/gateX' % (proto.path))
setup_gate_tables(gate, cdict, bases)
cdict['xGate'] = gate
ypower = get_class_field(name, cdict, bases, 'Ypower', default=0.0)
if ypower > 0:
proto.Ypower = ypower
gate = moose.HHGate('%s/gateY' % (proto.path))
setup_gate_tables(gate, cdict, bases)
cdict['yGate'] = gate
zpower = get_class_field(name, cdict, bases, 'Zpower', default=0.0)
if zpower > 0:
proto.Zpower = zpower
gate = moose.HHGate('%s/gateZ' % (proto.path))
setup_gate_tables(gate, cdict, bases)
cdict['zGate'] = gate
ca_msg_field = moose.Mstring('%s/addmsg1' % (proto.path))
ca_msg_field.value = '../CaPool concOut . concen'
proto.instant = get_class_field(name, cdict, bases, 'instant', default=0)
proto.useConcentration = True
proto.Ek = get_class_field(name, cdict, bases, 'Ek', default=0.0)
X = get_class_field(name, cdict, bases, 'X')
if X is not None:
proto.X = X
Y = get_class_field(name, cdict, bases, 'Y')
if Y is not None:
proto.Y = Y
Z = get_class_field(name, cdict, bases, 'Z')
if Z is not None:
proto.Z = Z
mstring_field = get_class_field(name, cdict, bases, 'mstring')
if mstring_field is not None:
# print 'mstring_field:', mstring_field
mstring = moose.Mstring('%s/%s' % (proto.path, mstring_field[0]))
mstring.value = mstring_field[1]
if 'annotation' in cdict:
info = moose.Annotator('%s/info' % (proto.path))
info.notes = '\n'.join('%s: %s' % kv for kv in cdict['annotation'].items())
# print proto.path, info.notes
cdict['prototype'] = proto
prototypes[name] = proto
config.logger.info('Created prototype: %s of class %s' % (proto.path, name))
return type.__new__(cls, name, bases, cdict)
class ChannelBase(moose.HHChannel):
annotation = {'cno': 'cno_0000047'}
abstract = True
__metaclass__ = ChannelMeta
def __init__(self, path, xpower=1, ypower=0, Ek=0.0):
moose.HHChannel.__init__(self, path)
#
# trbchan.py ends here
| dilawar/moose-full | moose-examples/traub_2005/py/channelbase.py | Python | gpl-2.0 | 5,829 | [
"MOOSE"
] | 9b645a679f18ebd532d972f00780393d63f1a8f49c6eaeafc26c8fe807053f3c |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Apache',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('visit', models.IntegerField()),
],
),
]
| za/dd3 | dd3/visitor/migrations/0001_initial.py | Python | apache-2.0 | 522 | [
"VisIt"
] | 5e1c641d33993467ba4643c656a35091ce831d29d5e65d487d5647c70b6452e4 |
<<<<<<< HEAD
<<<<<<< HEAD
""" Python 'utf-7' Codec
Written by Brian Quinlan (brian@sweetapp.com).
"""
import codecs
### Codec APIs
encode = codecs.utf_7_encode
def decode(input, errors='strict'):
return codecs.utf_7_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_7_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_7_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_7_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_7_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-7',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
=======
""" Python 'utf-7' Codec
Written by Brian Quinlan (brian@sweetapp.com).
"""
import codecs
### Codec APIs
encode = codecs.utf_7_encode
def decode(input, errors='strict'):
return codecs.utf_7_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_7_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_7_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_7_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_7_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-7',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
""" Python 'utf-7' Codec
Written by Brian Quinlan (brian@sweetapp.com).
"""
import codecs
### Codec APIs
encode = codecs.utf_7_encode
def decode(input, errors='strict'):
return codecs.utf_7_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_7_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_7_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_7_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_7_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-7',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/encodings/utf_7.py | Python | mit | 2,978 | [
"Brian"
] | 29bea489f60fa29cb3d3bb97d7ff115c73f3c809ff198e77ef315e98ca226027 |
import pyspeckit
# Rest wavelengths of the lines we are fitting - use as initial guesses
NIIa = 6549.86
NIIb = 6585.27
Halpha = 6564.614
SIIa = 6718.29
SIIb = 6732.68
# Initialize spectrum object and plot region surrounding Halpha-[NII] complex
spec = pyspeckit.Spectrum('sample_sdss.txt', errorcol=2)
spec.plotter(xmin = 6450, xmax = 6775, ymin = 0, ymax = 150)
# We fit the [NII] and [SII] doublets, and allow two components for Halpha.
# The widths of all narrow lines are tied to the widths of [SII].
guesses = [50, NIIa, 5, 100, Halpha, 5, 50, Halpha, 50, 50, NIIb, 5, 20, SIIa,
5, 20, SIIb, 5]
tied = ['', '', 'p[17]', '', '', 'p[17]', '', 'p[4]', '', '3 * p[0]', '',
'p[17]', '', '', 'p[17]', '', '', '']
# Actually do the fit.
spec.specfit(guesses = guesses, tied = tied, annotate = False)
spec.plotter.refresh()
# Let's use the measurements class to derive information about the emission
# lines. The galaxy's redshift and the flux normalization of the spectrum
# must be supplied to convert measured fluxes to line luminosities. If the
# spectrum we loaded in FITS format, 'BUNITS' would be read and we would not
# need to supply 'fluxnorm'.
spec.measure(z = 0.05, fluxnorm = 1e-17)
# Now overplot positions of lines and annotate
y = spec.plotter.ymax * 0.85 # Location of annotations in y
for i, line in enumerate(spec.measurements.lines.keys()):
# If this line is not in our database of lines, don't try to annotate it
if line not in spec.speclines.optical.lines.keys(): continue
x = spec.measurements.lines[line]['modelpars'][1] # Location of the emission line
# Draw dashed line to mark its position
spec.plotter.axis.plot([x]*2, [spec.plotter.ymin, spec.plotter.ymax],
ls='--', color='k')
# Label it
spec.plotter.axis.annotate(spec.speclines.optical.lines[line][-1], (x, y),
rotation = 90, ha = 'right', va = 'center')
# Make some nice axis labels
spec.plotter.axis.set_xlabel(r'Wavelength $(\AA)$')
spec.plotter.axis.set_ylabel(r'Flux $(10^{-17} \mathrm{erg/s/cm^2/\AA})$')
spec.plotter.refresh()
# Print out spectral line information
print("Line Flux (erg/s/cm^2) Amplitude (erg/s/cm^2)"
" FWHM (Angstrom) Luminosity (erg/s)")
for line in spec.measurements.lines.keys():
print(line, spec.measurements.lines[line]['flux'],
spec.measurements.lines[line]['amp'],
spec.measurements.lines[line]['fwhm'],
spec.measurements.lines[line]['lum'])
# Had we not supplied the objects redshift (or distance), the line
# luminosities would not have been measured, but integrated fluxes would
# still be derived. Also, the measurements class separates the broad and
# narrow H-alpha components, and identifies which lines are which. How nice!
spec.specfit.plot_fit()
# Save the figure
spec.plotter.figure.savefig("sdss_fit_example.png")
| low-sky/pyspeckit | docs/example_sdss.py | Python | mit | 2,916 | [
"Galaxy"
] | 23b32923bd03b972269dadf2c8008165f494aae8d84e58be6bc784081f8d7546 |
from contextlib import suppress
import numpy as np
import pandas as pd
from ..doctools import document
from ..exceptions import PlotnineError
from .stat_density import stat_density, compute_density
from .stat import stat
@document
class stat_ydensity(stat):
"""
Density estimate
{usage}
Parameters
----------
{common_parameters}
kernel : str, optional (default: 'gaussian')
Kernel used for density estimation. One of::
'biweight'
'cosine'
'cosine2'
'epanechnikov'
'gaussian'
'triangular'
'triweight'
'uniform'
adjust : float, optional (default: 1)
An adjustment factor for the ``bw``. Bandwidth becomes
:py:`bw * adjust`.
Adjustment of the bandwidth.
trim : bool, optional (default: False)
This parameter only matters if you are displaying multiple
densities in one plot. If :py:`False`, the default, each
density is computed on the full range of the data. If
:py:`True`, each density is computed over the range of that
group; this typically means the estimated x values will not
line-up, and hence you won't be able to stack density values.
n : int, optional(default: 1024)
Number of equally spaced points at which the density is to
be estimated. For efficient computation, it should be a power
of two.
bw : str or float, optional (default: 'nrd0')
The bandwidth to use, If a float is given, it is the bandwidth.
The :py:`str` choices are::
'normal_reference'
'scott'
'silverman'
``nrd0`` is a port of ``stats::bw.nrd0`` in R; it is eqiuvalent
to ``silverman`` when there is more than 1 value in a group.
scale : (default: area)
How to scale the violins. The options are::
'area' # all violins have the same area, before
# trimming the tails.
'count' # areas are scaled proportionally to the number
# of observations.
'width' # all violins have the same maximum width.
See Also
--------
plotnine.geoms.geom_violin
statsmodels.nonparametric.kde.KDEUnivariate
statsmodels.nonparametric.kde.KDEUnivariate.fit
"""
_aesthetics_doc = """
{aesthetics_table}
.. rubric:: Options for computed aesthetics
::
'width' # Maximum width of density, [0, 1] range.
'violinwidth' # Shape of the violin
Calculated aesthetics are accessed using the `after_stat` function.
e.g. :py:`after_stat('width')`.
"""
REQUIRED_AES = {'x', 'y'}
NON_MISSING_AES = {'weight'}
DEFAULT_PARAMS = {'geom': 'violin', 'position': 'dodge',
'na_rm': False,
'adjust': 1, 'kernel': 'gaussian',
'n': 1024, 'trim': True,
'bw': 'nrd0',
'scale': 'area'}
DEFAULT_AES = {'weight': None}
CREATES = {'width', 'violinwidth'}
def setup_params(self, data):
params = self.params.copy()
valid_scale = ('area', 'count', 'width')
if params['scale'] not in valid_scale:
msg = "Parameter scale should be one of {}"
raise PlotnineError(msg.format(valid_scale))
lookup = {
'biweight': 'biw',
'cosine': 'cos',
'cosine2': 'cos2',
'epanechnikov': 'epa',
'gaussian': 'gau',
'triangular': 'tri',
'triweight': 'triw',
'uniform': 'uni'}
with suppress(KeyError):
params['kernel'] = lookup[params['kernel'].lower()]
if params['kernel'] not in lookup.values():
msg = ("kernel should be one of {}. "
"You may use the abbreviations {}")
raise PlotnineError(msg.format(lookup.keys(),
lookup.values()))
missing_params = (stat_density.DEFAULT_PARAMS.keys() -
params.keys())
for key in missing_params:
params[key] = stat_density.DEFAULT_PARAMS[key]
return params
@classmethod
def compute_panel(cls, data, scales, **params):
data = super(cls, cls).compute_panel(data, scales, **params)
if not len(data):
return data
if params['scale'] == 'area':
data['violinwidth'] = data['density']/data['density'].max()
elif params['scale'] == 'count':
data['violinwidth'] = (data['density'] /
data['density'].max() *
data['n']/data['n'].max())
elif params['scale'] == 'width':
data['violinwidth'] = data['scaled']
else:
msg = "Unknown scale value '{}'"
raise PlotnineError(msg.format(params['scale']))
return data
@classmethod
def compute_group(cls, data, scales, **params):
n = len(data)
if n == 0:
return pd.DataFrame()
weight = data.get('weight')
if params['trim']:
range_y = data['y'].min(), data['y'].max()
else:
range_y = scales.y.dimension()
dens = compute_density(data['y'], weight, range_y, **params)
if not len(dens):
return dens
dens['y'] = dens['x']
dens['x'] = np.mean([data['x'].min(), data['x'].max()])
# Compute width if x has multiple values
if len(np.unique(data['x'])) > 1:
dens['width'] = np.ptp(data['x']) * 0.9
return dens
| has2k1/plotnine | plotnine/stats/stat_ydensity.py | Python | gpl-2.0 | 5,708 | [
"Gaussian"
] | 045640859aacfc6f2d5f54ac37daafc8cf4de61571ecd29bcf2fe4c6fbb8fb74 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.