text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
bootstrap procedures --- :mod:`MDAnalysis.analysis.ensemble.bootstrap`
======================================================================
The module contains functions for bootstrapping either ensembles (Universe
objects) or distance matrices, by resampling with replacement.
:Author: Matteo Tiberti, Wouter Boomsma, Tone Bengtsen
.. versionadded:: 0.16.0
"""
from __future__ import absolute_import
import numpy as np
import logging
import MDAnalysis as mda
from .utils import TriangularMatrix, ParallelCalculation
def bootstrapped_matrix(matrix, ensemble_assignment):
"""
Bootstrap an input square matrix. The resulting matrix will have the same
shape as the original one, but the order of its elements will be drawn
(with repetition). Separately bootstraps each ensemble.
Parameters
----------
matrix : encore.utils.TriangularMatrix
similarity/dissimilarity matrix
ensemble_assignment: numpy.array
array of ensemble assignments. This array must be matrix.size long.
Returns
-------
this_m : encore.utils.TriangularMatrix
bootstrapped similarity/dissimilarity matrix
"""
ensemble_identifiers = np.unique(ensemble_assignment)
this_m = TriangularMatrix(size=matrix.size)
indexes = []
for ens in ensemble_identifiers:
old_indexes = np.where(ensemble_assignment == ens)[0]
indexes.append(np.random.randint(low=np.min(old_indexes),
high=np.max(old_indexes) + 1,
size=old_indexes.shape[0]))
indexes = np.hstack(indexes)
for j in range(this_m.size):
for k in range(j):
this_m[j, k] = matrix[indexes[j], indexes[k]]
logging.info("Matrix bootstrapped.")
return this_m
def get_distance_matrix_bootstrap_samples(distance_matrix,
ensemble_assignment,
samples=100,
ncores=1):
"""
Calculates distance matrices corresponding to bootstrapped ensembles, by
resampling with replacement.
Parameters
----------
distance_matrix : encore.utils.TriangularMatrix
Conformational distance matrix
ensemble_assignment : str
Mapping from frames to which ensemble they are from (necessary because
ensembles are bootstrapped independently)
samples : int, optional
How many bootstrap samples to create.
ncores : int, optional
Maximum number of cores to be used (default is 1)
Returns
-------
confdistmatrix : list of encore.utils.TriangularMatrix
"""
bs_args = \
[([distance_matrix, ensemble_assignment]) for i in range(samples)]
pc = ParallelCalculation(ncores, bootstrapped_matrix, bs_args)
pc_results = pc.run()
bootstrap_matrices = zip(*pc_results)[1]
return bootstrap_matrices
def get_ensemble_bootstrap_samples(ensemble,
samples=100):
"""
Generates a bootstrapped ensemble by resampling with replacement.
Parameters
----------
ensemble : MDAnalysis.Universe
Conformational distance matrix
samples : int, optional
How many bootstrap samples to create.
Returns
-------
list of MDAnalysis.Universe objects
"""
ensemble.transfer_to_memory()
ensembles = []
for i in range(samples):
indices = np.random.randint(
low=0,
high=ensemble.trajectory.timeseries().shape[1],
size=ensemble.trajectory.timeseries().shape[1])
ensembles.append(
mda.Universe(ensemble.filename,
ensemble.trajectory.timeseries(format='fac')[indices,:,:],
format=mda.coordinates.memory.MemoryReader))
return ensembles
|
kain88-de/mdanalysis
|
package/MDAnalysis/analysis/encore/bootstrap.py
|
Python
|
gpl-2.0
| 4,911
|
[
"MDAnalysis"
] |
bbfa8f670036f1494ae19aecab0a18ed3150818ed5b1cd9082f61fef54f2be82
|
# Copyright (C) 2016 The ESPResSo project
# Copyright (C) 2014 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Define the espressomd package
# Initialize MPI, start the main loop on the slaves
import espressomd._init
# Initialize the Tcl Interpreter if available
try:
import espressomd._tcl
tcl = espressomd._tcl.TclInterpreter()
except ImportError:
pass
espressomd._init.setup()
from espressomd.system import System
from espressomd.code_info import features
|
Smiljanic/espresso
|
src/python/espressomd/__init__.py
|
Python
|
gpl-3.0
| 1,106
|
[
"ESPResSo"
] |
5d74f5da3cbac43cca0384ae7e4181dc1715befc1d91c5ca74e6b85098830196
|
"""
Vector Autoregression (VAR) processes
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
from __future__ import division, print_function
from statsmodels.compat.python import (range, lrange, string_types, StringIO, iteritems,
cStringIO)
from collections import defaultdict
import numpy as np
import numpy.linalg as npl
from numpy.linalg import cholesky as chol, solve
import scipy.stats as stats
import scipy.linalg as L
from statsmodels.tools.decorators import cache_readonly
from statsmodels.tools.tools import chain_dot
from statsmodels.tools.linalg import logdet_symm
from statsmodels.tsa.tsatools import vec, unvec
from statsmodels.tsa.vector_ar.irf import IRAnalysis
from statsmodels.tsa.vector_ar.output import VARSummary
import statsmodels.tsa.tsatools as tsa
import statsmodels.tsa.vector_ar.output as output
import statsmodels.tsa.vector_ar.plotting as plotting
import statsmodels.tsa.vector_ar.util as util
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
mat = np.array
#-------------------------------------------------------------------------------
# VAR process routines
def ma_rep(coefs, maxn=10):
r"""
MA(\infty) representation of VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
maxn : int
Number of MA matrices to compute
Notes
-----
VAR(p) process as
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
can be equivalently represented as
.. math:: y_t = \mu + \sum_{i=0}^\infty \Phi_i u_{t-i}
e.g. can recursively compute the \Phi_i matrices with \Phi_0 = I_k
Returns
-------
phis : ndarray (maxn + 1 x k x k)
"""
p, k, k = coefs.shape
phis = np.zeros((maxn+1, k, k))
phis[0] = np.eye(k)
# recursively compute Phi matrices
for i in range(1, maxn + 1):
for j in range(1, i+1):
if j > p:
break
phis[i] += np.dot(phis[i-j], coefs[j-1])
return phis
def is_stable(coefs, verbose=False):
"""
Determine stability of VAR(p) system by examining the eigenvalues of the
VAR(1) representation
Parameters
----------
coefs : ndarray (p x k x k)
Returns
-------
is_stable : bool
"""
A_var1 = util.comp_matrix(coefs)
eigs = np.linalg.eigvals(A_var1)
if verbose:
print('Eigenvalues of VAR(1) rep')
for val in np.abs(eigs):
print(val)
return (np.abs(eigs) <= 1).all()
def var_acf(coefs, sig_u, nlags=None):
"""
Compute autocovariance function ACF_y(h) up to nlags of stable VAR(p)
process
Parameters
----------
coefs : ndarray (p x k x k)
Coefficient matrices A_i
sig_u : ndarray (k x k)
Covariance of white noise process u_t
nlags : int, optional
Defaults to order p of system
Notes
-----
Ref: Lutkepohl p.28-29
Returns
-------
acf : ndarray, (p, k, k)
"""
p, k, _ = coefs.shape
if nlags is None:
nlags = p
# p x k x k, ACF for lags 0, ..., p-1
result = np.zeros((nlags + 1, k, k))
result[:p] = _var_acf(coefs, sig_u)
# yule-walker equations
for h in range(p, nlags + 1):
# compute ACF for lag=h
# G(h) = A_1 G(h-1) + ... + A_p G(h-p)
for j in range(p):
result[h] += np.dot(coefs[j], result[h-j-1])
return result
def _var_acf(coefs, sig_u):
"""
Compute autocovariance function ACF_y(h) for h=1,...,p
Notes
-----
Lutkepohl (2005) p.29
"""
p, k, k2 = coefs.shape
assert(k == k2)
A = util.comp_matrix(coefs)
# construct VAR(1) noise covariance
SigU = np.zeros((k*p, k*p))
SigU[:k,:k] = sig_u
# vec(ACF) = (I_(kp)^2 - kron(A, A))^-1 vec(Sigma_U)
vecACF = L.solve(np.eye((k*p)**2) - np.kron(A, A), vec(SigU))
acf = unvec(vecACF)
acf = acf[:k].T.reshape((p, k, k))
return acf
def forecast(y, coefs, intercept, steps):
"""
Produce linear MSE forecast
Parameters
----------
y :
coefs :
intercept :
steps :
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl p. 37
Also used by DynamicVAR class
"""
p = len(coefs)
k = len(coefs[0])
# initial value
forcs = np.zeros((steps, k)) + intercept
# h=0 forecast should be latest observation
# forcs[0] = y[-1]
# make indices easier to think about
for h in range(1, steps + 1):
# y_t(h) = intercept + sum_1^p A_i y_t_(h-i)
f = forcs[h - 1]
for i in range(1, p + 1):
# slightly hackish
if h - i <= 0:
# e.g. when h=1, h-1 = 0, which is y[-1]
prior_y = y[h - i - 1]
else:
# e.g. when h=2, h-1=1, which is forcs[0]
prior_y = forcs[h - i - 1]
# i=1 is coefs[0]
f = f + np.dot(coefs[i - 1], prior_y)
forcs[h - 1] = f
return forcs
def forecast_cov(ma_coefs, sig_u, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
k = len(sig_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, sig_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
def var_loglike(resid, omega, nobs):
r"""
Returns the value of the VAR(p) log-likelihood.
Parameters
----------
resid : ndarray (T x K)
omega : ndarray
Sigma hat matrix. Each element i,j is the average product of the
OLS residual for variable i and the OLS residual for variable j or
np.dot(resid.T,resid)/nobs. There should be no correction for the
degrees of freedom.
nobs : int
Returns
-------
llf : float
The value of the loglikelihood function for a VAR(p) model
Notes
-----
The loglikelihood function for the VAR(p) is
.. math::
-\left(\frac{T}{2}\right)
\left(\ln\left|\Omega\right|-K\ln\left(2\pi\right)-K\right)
"""
logdet = logdet_symm(np.asarray(omega))
neqs = len(omega)
part1 = - (nobs * neqs / 2) * np.log(2 * np.pi)
part2 = - (nobs / 2) * (logdet + neqs)
return part1 + part2
def _reordered(self, order):
#Create new arrays to hold rearranged results from .fit()
endog = self.endog
endog_lagged = self.endog_lagged
params = self.params
sigma_u = self.sigma_u
names = self.names
k_ar = self.k_ar
endog_new = np.zeros([np.size(endog,0),np.size(endog,1)])
endog_lagged_new = np.zeros([np.size(endog_lagged,0), np.size(endog_lagged,1)])
params_new_inc, params_new = [np.zeros([np.size(params,0), np.size(params,1)])
for i in range(2)]
sigma_u_new_inc, sigma_u_new = [np.zeros([np.size(sigma_u,0), np.size(sigma_u,1)])
for i in range(2)]
num_end = len(self.params[0])
names_new = []
#Rearrange elements and fill in new arrays
k = self.k_trend
for i, c in enumerate(order):
endog_new[:,i] = self.endog[:,c]
if k > 0:
params_new_inc[0,i] = params[0,i]
endog_lagged_new[:,0] = endog_lagged[:,0]
for j in range(k_ar):
params_new_inc[i+j*num_end+k,:] = self.params[c+j*num_end+k,:]
endog_lagged_new[:,i+j*num_end+k] = endog_lagged[:,c+j*num_end+k]
sigma_u_new_inc[i,:] = sigma_u[c,:]
names_new.append(names[c])
for i, c in enumerate(order):
params_new[:,i] = params_new_inc[:,c]
sigma_u_new[:,i] = sigma_u_new_inc[:,c]
return VARResults(endog=endog_new, endog_lagged=endog_lagged_new,
params=params_new, sigma_u=sigma_u_new,
lag_order=self.k_ar, model=self.model,
trend='c', names=names_new, dates=self.dates)
#-------------------------------------------------------------------------------
# VARProcess class: for known or unknown VAR process
class VAR(tsbase.TimeSeriesModel):
r"""
Fit VAR(p) process and do lag order selection
.. math:: y_t = A_1 y_{t-1} + \ldots + A_p y_{t-p} + u_t
Parameters
----------
endog : array-like
2-d endogenous response variable. The independent variable.
dates : array-like
must match number of rows of endog
References
----------
Lutkepohl (2005) New Introduction to Multiple Time Series Analysis
"""
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(VAR, self).__init__(endog, None, dates, freq, missing=missing)
if self.endog.ndim == 1:
raise ValueError("Only gave one variable to VAR")
self.y = self.endog #keep alias for now
self.neqs = self.endog.shape[1]
def _get_predict_start(self, start, k_ar):
if start is None:
start = k_ar
return super(VAR, self)._get_predict_start(start)
def predict(self, params, start=None, end=None, lags=1, trend='c'):
"""
Returns in-sample predictions or forecasts
"""
start = self._get_predict_start(start, lags)
end, out_of_sample = self._get_predict_end(end)
if end < start:
raise ValueError("end is before start")
if end == start + out_of_sample:
return np.array([])
k_trend = util.get_trendorder(trend)
k = self.neqs
k_ar = lags
predictedvalues = np.zeros((end + 1 - start + out_of_sample, k))
if k_trend != 0:
intercept = params[:k_trend]
predictedvalues += intercept
y = self.y
X = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
fittedvalues = np.dot(X, params)
fv_start = start - k_ar
pv_end = min(len(predictedvalues), len(fittedvalues) - fv_start)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[:pv_end] = fittedvalues[fv_start:fv_end]
if not out_of_sample:
return predictedvalues
# fit out of sample
y = y[-k_ar:]
coefs = params[k_trend:].reshape((k_ar, k, k)).swapaxes(1,2)
predictedvalues[pv_end:] = forecast(y, coefs, intercept, out_of_sample)
return predictedvalues
def fit(self, maxlags=None, method='ols', ic=None, trend='c',
verbose=False):
"""
Fit the VAR model
Parameters
----------
maxlags : int
Maximum number of lags to check for order selection, defaults to
12 * (nobs/100.)**(1./4), see select_order function
method : {'ols'}
Estimation method to use
ic : {'aic', 'fpe', 'hqic', 'bic', None}
Information criterion to use for VAR order selection.
aic : Akaike
fpe : Final prediction error
hqic : Hannan-Quinn
bic : Bayesian a.k.a. Schwarz
verbose : bool, default False
Print order selection output to the screen
trend, str {"c", "ct", "ctt", "nc"}
"c" - add constant
"ct" - constant and trend
"ctt" - constant, linear and quadratic trend
"nc" - co constant, no trend
Note that these are prepended to the columns of the dataset.
Notes
-----
Lutkepohl pp. 146-153
Returns
-------
est : VARResults
"""
lags = maxlags
if trend not in ['c', 'ct', 'ctt', 'nc']:
raise ValueError("trend '{}' not supported for VAR".format(trend))
if ic is not None:
selections = self.select_order(maxlags=maxlags, verbose=verbose)
if ic not in selections:
raise Exception("%s not recognized, must be among %s"
% (ic, sorted(selections)))
lags = selections[ic]
if verbose:
print('Using %d based on %s criterion' % (lags, ic))
else:
if lags is None:
lags = 1
k_trend = util.get_trendorder(trend)
self.exog_names = util.make_lag_names(self.endog_names, lags, k_trend)
self.nobs = len(self.endog) - lags
return self._estimate_var(lags, trend=trend)
def _estimate_var(self, lags, offset=0, trend='c'):
"""
lags : int
offset : int
Periods to drop from beginning-- for order selection so it's an
apples-to-apples comparison
trend : string or None
As per above
"""
# have to do this again because select_order doesn't call fit
self.k_trend = k_trend = util.get_trendorder(trend)
if offset < 0: # pragma: no cover
raise ValueError('offset must be >= 0')
y = self.y[offset:]
z = util.get_var_endog(y, lags, trend=trend, has_constant='raise')
y_sample = y[lags:]
# Lutkepohl p75, about 5x faster than stated formula
params = np.linalg.lstsq(z, y_sample)[0]
resid = y_sample - np.dot(z, params)
# Unbiased estimate of covariance matrix $\Sigma_u$ of the white noise
# process $u$
# equivalent definition
# .. math:: \frac{1}{T - Kp - 1} Y^\prime (I_T - Z (Z^\prime Z)^{-1}
# Z^\prime) Y
# Ref: Lutkepohl p.75
# df_resid right now is T - Kp - 1, which is a suggested correction
avobs = len(y_sample)
df_resid = avobs - (self.neqs * lags + k_trend)
sse = np.dot(resid.T, resid)
omega = sse / df_resid
varfit = VARResults(y, z, params, omega, lags, names=self.endog_names,
trend=trend, dates=self.data.dates, model=self)
return VARResultsWrapper(varfit)
def select_order(self, maxlags=None, verbose=True):
"""
Compute lag order selections based on each of the available information
criteria
Parameters
----------
maxlags : int
if None, defaults to 12 * (nobs/100.)**(1./4)
verbose : bool, default True
If True, print table of info criteria and selected orders
Returns
-------
selections : dict {info_crit -> selected_order}
"""
if maxlags is None:
maxlags = int(round(12*(len(self.endog)/100.)**(1/4.)))
ics = defaultdict(list)
for p in range(maxlags + 1):
# exclude some periods to same amount of data used for each lag
# order
result = self._estimate_var(p, offset=maxlags-p)
for k, v in iteritems(result.info_criteria):
ics[k].append(v)
selected_orders = dict((k, mat(v).argmin())
for k, v in iteritems(ics))
if verbose:
output.print_ic_table(ics, selected_orders)
return selected_orders
class VARProcess(object):
"""
Class represents a known VAR(p) process
Parameters
----------
coefs : ndarray (p x k x k)
intercept : ndarray (length k)
sigma_u : ndarray (k x k)
names : sequence (length k)
Returns
-------
**Attributes**:
"""
def __init__(self, coefs, intercept, sigma_u, names=None):
self.k_ar = len(coefs)
self.neqs = coefs.shape[1]
self.coefs = coefs
self.intercept = intercept
self.sigma_u = sigma_u
self.names = names
def get_eq_index(self, name):
"Return integer position of requested equation name"
return util.get_index(self.names, name)
def __str__(self):
output = ('VAR(%d) process for %d-dimensional response y_t'
% (self.k_ar, self.neqs))
output += '\nstable: %s' % self.is_stable()
output += '\nmean: %s' % self.mean()
return output
def is_stable(self, verbose=False):
"""Determine stability based on model coefficients
Parameters
----------
verbose : bool
Print eigenvalues of the VAR(1) companion
Notes
-----
Checks if det(I - Az) = 0 for any mod(z) <= 1, so all the eigenvalues of
the companion matrix must lie outside the unit circle
"""
return is_stable(self.coefs, verbose=verbose)
def plotsim(self, steps=1000):
"""
Plot a simulation from the VAR(p) process for the desired number of
steps
"""
Y = util.varsim(self.coefs, self.intercept, self.sigma_u, steps=steps)
plotting.plot_mts(Y)
def mean(self):
r"""Mean of stable process
Lutkepohl eq. 2.1.23
.. math:: \mu = (I - A_1 - \dots - A_p)^{-1} \alpha
"""
return solve(self._char_mat, self.intercept)
def ma_rep(self, maxn=10):
r"""Compute MA(:math:`\infty`) coefficient matrices
Parameters
----------
maxn : int
Number of coefficient matrices to compute
Returns
-------
coefs : ndarray (maxn x k x k)
"""
return ma_rep(self.coefs, maxn=maxn)
def orth_ma_rep(self, maxn=10, P=None):
r"""Compute Orthogonalized MA coefficient matrices using P matrix such
that :math:`\Sigma_u = PP^\prime`. P defaults to the Cholesky
decomposition of :math:`\Sigma_u`
Parameters
----------
maxn : int
Number of coefficient matrices to compute
P : ndarray (k x k), optional
Matrix such that Sigma_u = PP', defaults to Cholesky descomp
Returns
-------
coefs : ndarray (maxn x k x k)
"""
if P is None:
P = self._chol_sigma_u
ma_mats = self.ma_rep(maxn=maxn)
return mat([np.dot(coefs, P) for coefs in ma_mats])
def long_run_effects(self):
"""Compute long-run effect of unit impulse
.. math::
\Psi_\infty = \sum_{i=0}^\infty \Phi_i
"""
return L.inv(self._char_mat)
@cache_readonly
def _chol_sigma_u(self):
return chol(self.sigma_u)
@cache_readonly
def _char_mat(self):
return np.eye(self.neqs) - self.coefs.sum(0)
def acf(self, nlags=None):
"""Compute theoretical autocovariance function
Returns
-------
acf : ndarray (p x k x k)
"""
return var_acf(self.coefs, self.sigma_u, nlags=nlags)
def acorr(self, nlags=None):
"""Compute theoretical autocorrelation function
Returns
-------
acorr : ndarray (p x k x k)
"""
return util.acf_to_acorr(self.acf(nlags=nlags))
def plot_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.acorr(nlags=nlags), linewidth=linewidth)
def forecast(self, y, steps):
"""Produce linear minimum MSE forecasts for desired number of steps
ahead, using prior values y
Parameters
----------
y : ndarray (p x k)
steps : int
Returns
-------
forecasts : ndarray (steps x neqs)
Notes
-----
Lutkepohl pp 37-38
"""
return forecast(y, self.coefs, self.intercept, steps)
def mse(self, steps):
"""
Compute theoretical forecast error variance matrices
Parameters
----------
steps : int
Number of steps ahead
Notes
-----
.. math:: \mathrm{MSE}(h) = \sum_{i=0}^{h-1} \Phi \Sigma_u \Phi^T
Returns
-------
forc_covs : ndarray (steps x neqs x neqs)
"""
ma_coefs = self.ma_rep(steps)
k = len(self.sigma_u)
forc_covs = np.zeros((steps, k, k))
prior = np.zeros((k, k))
for h in range(steps):
# Sigma(h) = Sigma(h-1) + Phi Sig_u Phi'
phi = ma_coefs[h]
var = chain_dot(phi, self.sigma_u, phi.T)
forc_covs[h] = prior = prior + var
return forc_covs
forecast_cov = mse
def _forecast_vars(self, steps):
covs = self.forecast_cov(steps)
# Take diagonal for each cov
inds = np.arange(self.neqs)
return covs[:, inds, inds]
def forecast_interval(self, y, steps, alpha=0.05):
"""Construct forecast interval estimates assuming the y are Gaussian
Parameters
----------
Notes
-----
Lutkepohl pp. 39-40
Returns
-------
(lower, mid, upper) : (ndarray, ndarray, ndarray)
"""
assert(0 < alpha < 1)
q = util.norm_signif_level(alpha)
point_forecast = self.forecast(y, steps)
sigma = np.sqrt(self._forecast_vars(steps))
forc_lower = point_forecast - q * sigma
forc_upper = point_forecast + q * sigma
return point_forecast, forc_lower, forc_upper
#-------------------------------------------------------------------------------
# VARResults class
class VARResults(VARProcess):
"""Estimate VAR(p) process with fixed number of lags
Parameters
----------
endog : array
endog_lagged : array
params : array
sigma_u : array
lag_order : int
model : VAR model instance
trend : str {'nc', 'c', 'ct'}
names : array-like
List of names of the endogenous variables in order of appearance in `endog`.
dates
Returns
-------
**Attributes**
aic
bic
bse
coefs : ndarray (p x K x K)
Estimated A_i matrices, A_i = coefs[i-1]
cov_params
dates
detomega
df_model : int
df_resid : int
endog
endog_lagged
fittedvalues
fpe
intercept
info_criteria
k_ar : int
k_trend : int
llf
model
names
neqs : int
Number of variables (equations)
nobs : int
n_totobs : int
params
k_ar : int
Order of VAR process
params : ndarray (Kp + 1) x K
A_i matrices and intercept in stacked form [int A_1 ... A_p]
pvalues
names : list
variables names
resid
roots : array
The roots of the VAR process are the solution to
(I - coefs[0]*z - coefs[1]*z**2 ... - coefs[p-1]*z**k_ar) = 0.
Note that the inverse roots are returned, and stability requires that
the roots lie outside the unit circle.
sigma_u : ndarray (K x K)
Estimate of white noise process variance Var[u_t]
sigma_u_mle
stderr
trenorder
tvalues
y :
ys_lagged
"""
_model_type = 'VAR'
def __init__(self, endog, endog_lagged, params, sigma_u, lag_order,
model=None, trend='c', names=None, dates=None):
self.model = model
self.y = self.endog = endog #keep alias for now
self.ys_lagged = self.endog_lagged = endog_lagged #keep alias for now
self.dates = dates
self.n_totobs, neqs = self.y.shape
self.nobs = self.n_totobs - lag_order
k_trend = util.get_trendorder(trend)
if k_trend > 0: # make this the polynomial trend order
trendorder = k_trend - 1
else:
trendorder = None
self.k_trend = k_trend
self.trendorder = trendorder
self.exog_names = util.make_lag_names(names, lag_order, k_trend)
self.params = params
# Initialize VARProcess parent class
# construct coefficient matrices
# Each matrix needs to be transposed
reshaped = self.params[self.k_trend:]
reshaped = reshaped.reshape((lag_order, neqs, neqs))
# Need to transpose each coefficient matrix
intercept = self.params[0]
coefs = reshaped.swapaxes(1, 2).copy()
super(VARResults, self).__init__(coefs, intercept, sigma_u, names=names)
def plot(self):
"""Plot input time series
"""
plotting.plot_mts(self.y, names=self.names, index=self.dates)
@property
def df_model(self):
"""Number of estimated parameters, including the intercept / trends
"""
return self.neqs * self.k_ar + self.k_trend
@property
def df_resid(self):
"""Number of observations minus number of estimated parameters"""
return self.nobs - self.df_model
@cache_readonly
def fittedvalues(self):
"""The predicted insample values of the response variables of the model.
"""
return np.dot(self.ys_lagged, self.params)
@cache_readonly
def resid(self):
"""Residuals of response variable resulting from estimated coefficients
"""
return self.y[self.k_ar:] - self.fittedvalues
def sample_acov(self, nlags=1):
return _compute_acov(self.y[self.k_ar:], nlags=nlags)
def sample_acorr(self, nlags=1):
acovs = self.sample_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
def plot_sample_acorr(self, nlags=10, linewidth=8):
"Plot theoretical autocorrelation function"
plotting.plot_full_acorr(self.sample_acorr(nlags=nlags),
linewidth=linewidth)
def resid_acov(self, nlags=1):
"""
Compute centered sample autocovariance (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
return _compute_acov(self.resid, nlags=nlags)
def resid_acorr(self, nlags=1):
"""
Compute sample autocorrelation (including lag 0)
Parameters
----------
nlags : int
Returns
-------
"""
acovs = self.resid_acov(nlags=nlags)
return _acovs_to_acorrs(acovs)
@cache_readonly
def resid_corr(self):
"Centered residual correlation matrix"
return self.resid_acorr(0)[0]
@cache_readonly
def sigma_u_mle(self):
"""(Biased) maximum likelihood estimate of noise process covariance
"""
return self.sigma_u * self.df_resid / self.nobs
@cache_readonly
def cov_params(self):
"""Estimated variance-covariance of model coefficients
Notes
-----
Covariance of vec(B), where B is the matrix
[intercept, A_1, ..., A_p] (K x (Kp + 1))
Adjusted to be an unbiased estimator
Ref: Lutkepohl p.74-75
"""
z = self.ys_lagged
return np.kron(L.inv(np.dot(z.T, z)), self.sigma_u)
def cov_ybar(self):
r"""Asymptotically consistent estimate of covariance of the sample mean
.. math::
\sqrt(T) (\bar{y} - \mu) \rightarrow {\cal N}(0, \Sigma_{\bar{y}})\\
\Sigma_{\bar{y}} = B \Sigma_u B^\prime, \text{where } B = (I_K - A_1
- \cdots - A_p)^{-1}
Notes
-----
Lutkepohl Proposition 3.3
"""
Ainv = L.inv(np.eye(self.neqs) - self.coefs.sum(0))
return chain_dot(Ainv, self.sigma_u, Ainv.T)
#------------------------------------------------------------
# Estimation-related things
@cache_readonly
def _zz(self):
# Z'Z
return np.dot(self.ys_lagged.T, self.ys_lagged)
@property
def _cov_alpha(self):
"""
Estimated covariance matrix of model coefficients ex intercept
"""
# drop intercept and trend
return self.cov_params[self.k_trend*self.neqs:, self.k_trend*self.neqs:]
@cache_readonly
def _cov_sigma(self):
"""
Estimated covariance matrix of vech(sigma_u)
"""
D_K = tsa.duplication_matrix(self.neqs)
D_Kinv = npl.pinv(D_K)
sigxsig = np.kron(self.sigma_u, self.sigma_u)
return 2 * chain_dot(D_Kinv, sigxsig, D_Kinv.T)
@cache_readonly
def llf(self):
"Compute VAR(p) loglikelihood"
return var_loglike(self.resid, self.sigma_u_mle, self.nobs)
@cache_readonly
def stderr(self):
"""Standard errors of coefficients, reshaped to match in size
"""
stderr = np.sqrt(np.diag(self.cov_params))
return stderr.reshape((self.df_model, self.neqs), order='C')
bse = stderr # statsmodels interface?
@cache_readonly
def tvalues(self):
"""Compute t-statistics. Use Student-t(T - Kp - 1) = t(df_resid) to test
significance.
"""
return self.params / self.stderr
@cache_readonly
def pvalues(self):
"""Two-sided p-values for model coefficients from Student t-distribution
"""
return stats.t.sf(np.abs(self.tvalues), self.df_resid)*2
def plot_forecast(self, steps, alpha=0.05, plot_stderr=True):
"""
Plot forecast
"""
mid, lower, upper = self.forecast_interval(self.y[-self.k_ar:], steps,
alpha=alpha)
plotting.plot_var_forc(self.y, mid, lower, upper, names=self.names,
plot_stderr=plot_stderr)
# Forecast error covariance functions
def forecast_cov(self, steps=1):
r"""Compute forecast covariance matrices for desired number of steps
Parameters
----------
steps : int
Notes
-----
.. math:: \Sigma_{\hat y}(h) = \Sigma_y(h) + \Omega(h) / T
Ref: Lutkepohl pp. 96-97
Returns
-------
covs : ndarray (steps x k x k)
"""
mse = self.mse(steps)
omegas = self._omega_forc_cov(steps)
return mse + omegas / self.nobs
#Monte Carlo irf standard errors
def irf_errband_mc(self, orth=False, repl=1000, T=10,
signif=0.05, seed=None, burn=100, cum=False):
"""
Compute Monte Carlo integrated error bands assuming normally
distributed for impulse response functions
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Lutkepohl (2005) Appendix D
Returns
-------
Tuple of lower and upper arrays of ma_rep monte carlo standard errors
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
ma_sort = np.sort(ma_coll, axis=0) #sort to get quantiles
index = round(signif/2*repl)-1,round((1-signif/2)*repl)-1
lower = ma_sort[index[0],:, :, :]
upper = ma_sort[index[1],:, :, :]
return lower, upper
def irf_resim(self, orth=False, repl=1000, T=10,
seed=None, burn=100, cum=False):
"""
Simulates impulse response function, returning an array of simulations.
Used for Sims-Zha error band calculation.
Parameters
----------
orth: bool, default False
Compute orthoganalized impulse response error bands
repl: int
number of Monte Carlo replications to perform
T: int, default 10
number of impulse response periods
signif: float (0 < signif <1)
Significance level for error bars, defaults to 95% CI
seed: int
np.random.seed for replications
burn: int
number of initial observations to discard for simulation
cum: bool, default False
produce cumulative irf error bands
Notes
-----
Sims, Christoper A., and Tao Zha. 1999. "Error Bands for Impulse Response." Econometrica 67: 1113-1155.
Returns
-------
Array of simulated impulse response functions
"""
neqs = self.neqs
mean = self.mean()
k_ar = self.k_ar
coefs = self.coefs
sigma_u = self.sigma_u
intercept = self.intercept
df_model = self.df_model
nobs = self.nobs
if seed is not None:
np.random.seed(seed=seed)
ma_coll = np.zeros((repl, T+1, neqs, neqs))
if (orth == True and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T).cumsum(axis=0)
elif (orth == True and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
orth_ma_rep(maxn=T)
elif (orth == False and cum == True):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T).cumsum(axis=0)
elif (orth == False and cum == False):
fill_coll = lambda sim : VAR(sim).fit(maxlags=k_ar).\
ma_rep(maxn=T)
for i in range(repl):
#discard first hundred to eliminate correct for starting bias
sim = util.varsim(coefs, intercept, sigma_u, steps=nobs+burn)
sim = sim[burn:]
ma_coll[i,:,:,:] = fill_coll(sim)
return ma_coll
def _omega_forc_cov(self, steps):
# Approximate MSE matrix \Omega(h) as defined in Lut p97
G = self._zz
Ginv = L.inv(G)
# memoize powers of B for speedup
# TODO: see if can memoize better
B = self._bmat_forc_cov()
_B = {}
def bpow(i):
if i not in _B:
_B[i] = np.linalg.matrix_power(B, i)
return _B[i]
phis = self.ma_rep(steps)
sig_u = self.sigma_u
omegas = np.zeros((steps, self.neqs, self.neqs))
for h in range(1, steps + 1):
if h == 1:
omegas[h-1] = self.df_model * self.sigma_u
continue
om = omegas[h-1]
for i in range(h):
for j in range(h):
Bi = bpow(h - 1 - i)
Bj = bpow(h - 1 - j)
mult = np.trace(chain_dot(Bi.T, Ginv, Bj, G))
om += mult * chain_dot(phis[i], sig_u, phis[j].T)
omegas[h-1] = om
return omegas
def _bmat_forc_cov(self):
# B as defined on p. 96 of Lut
upper = np.zeros((1, self.df_model))
upper[0,0] = 1
lower_dim = self.neqs * (self.k_ar - 1)
I = np.eye(lower_dim)
lower = np.column_stack((np.zeros((lower_dim, 1)), I,
np.zeros((lower_dim, self.neqs))))
return np.vstack((upper, self.params.T, lower))
def summary(self):
"""Compute console output summary of estimates
Returns
-------
summary : VARSummary
"""
return VARSummary(self)
def irf(self, periods=10, var_decomp=None, var_order=None):
"""Analyze impulse responses to shocks in system
Parameters
----------
periods : int
var_decomp : ndarray (k x k), lower triangular
Must satisfy Omega = P P', where P is the passed matrix. Defaults to
Cholesky decomposition of Omega
var_order : sequence
Alternate variable order for Cholesky decomposition
Returns
-------
irf : IRAnalysis
"""
if var_order is not None:
raise NotImplementedError('alternate variable order not implemented'
' (yet)')
return IRAnalysis(self, P=var_decomp, periods=periods)
def fevd(self, periods=10, var_decomp=None):
"""
Compute forecast error variance decomposition ("fevd")
Returns
-------
fevd : FEVD instance
"""
return FEVD(self, P=var_decomp, periods=periods)
def reorder(self, order):
"""Reorder variables for structural specification
"""
if len(order) != len(self.params[0,:]):
raise ValueError("Reorder specification length should match number of endogenous variables")
#This convert order to list of integers if given as strings
if isinstance(order[0], string_types):
order_new = []
for i, nam in enumerate(order):
order_new.append(self.names.index(order[i]))
order = order_new
return _reordered(self, order)
#-------------------------------------------------------------------------------
# VAR Diagnostics: Granger-causality, whiteness of residuals, normality, etc.
def test_causality(self, equation, variables, kind='f', signif=0.05,
verbose=True):
"""Compute test statistic for null hypothesis of Granger-noncausality,
general function to test joint Granger-causality of multiple variables
Parameters
----------
equation : string or int
Equation to test for causality
variables : sequence (of strings or ints)
List, tuple, etc. of variables to test for Granger-causality
kind : {'f', 'wald'}
Perform F-test or Wald (chi-sq) test
signif : float, default 5%
Significance level for computing critical values for test,
defaulting to standard 0.95 level
Notes
-----
Null hypothesis is that there is no Granger-causality for the indicated
variables. The degrees of freedom in the F-test are based on the
number of variables in the VAR system, that is, degrees of freedom
are equal to the number of equations in the VAR times degree of freedom
of a single equation.
Returns
-------
results : dict
"""
if isinstance(variables, (string_types, int, np.integer)):
variables = [variables]
k, p = self.neqs, self.k_ar
# number of restrictions
N = len(variables) * self.k_ar
# Make restriction matrix
C = np.zeros((N, k ** 2 * p + k), dtype=float)
eq_index = self.get_eq_index(equation)
vinds = mat([self.get_eq_index(v) for v in variables])
# remember, vec is column order!
offsets = np.concatenate([k + k ** 2 * j + k * vinds + eq_index
for j in range(p)])
C[np.arange(N), offsets] = 1
# Lutkepohl 3.6.5
Cb = np.dot(C, vec(self.params.T))
middle = L.inv(chain_dot(C, self.cov_params, C.T))
# wald statistic
lam_wald = statistic = chain_dot(Cb, middle, Cb)
if kind.lower() == 'wald':
df = N
dist = stats.chi2(df)
elif kind.lower() == 'f':
statistic = lam_wald / N
df = (N, k * self.df_resid)
dist = stats.f(*df)
else:
raise Exception('kind %s not recognized' % kind)
pvalue = dist.sf(statistic)
crit_value = dist.ppf(1 - signif)
conclusion = 'fail to reject' if statistic < crit_value else 'reject'
results = {
'statistic' : statistic,
'crit_value' : crit_value,
'pvalue' : pvalue,
'df' : df,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.causality_summary(results, variables, equation, kind)
print(summ)
return results
def test_whiteness(self, nlags=10, plot=True, linewidth=8):
"""
Test white noise assumption. Sample (Y) autocorrelations are compared
with the standard :math:`2 / \sqrt(T)` bounds.
Parameters
----------
plot : boolean, default True
Plot autocorrelations with 2 / sqrt(T) bounds
"""
acorrs = self.sample_acorr(nlags)
bound = 2 / np.sqrt(self.nobs)
# TODO: this probably needs some UI work
if (np.abs(acorrs) > bound).any():
print('FAIL: Some autocorrelations exceed %.4f bound. '
'See plot' % bound)
else:
print('PASS: No autocorrelations exceed %.4f bound' % bound)
if plot:
fig = plotting.plot_full_acorr(acorrs[1:],
xlabel=np.arange(1, nlags+1),
err_bound=bound,
linewidth=linewidth)
fig.suptitle(r"ACF plots with $2 / \sqrt{T}$ bounds "
"for testing whiteness assumption")
def test_normality(self, signif=0.05, verbose=True):
"""
Test assumption of normal-distributed errors using Jarque-Bera-style
omnibus Chi^2 test
Parameters
----------
signif : float
Test significance threshold
Notes
-----
H0 (null) : data are generated by a Gaussian-distributed process
"""
Pinv = npl.inv(self._chol_sigma_u)
w = np.array([np.dot(Pinv, u) for u in self.resid])
b1 = (w ** 3).sum(0) / self.nobs
lam_skew = self.nobs * np.dot(b1, b1) / 6
b2 = (w ** 4).sum(0) / self.nobs - 3
lam_kurt = self.nobs * np.dot(b2, b2) / 24
lam_omni = lam_skew + lam_kurt
omni_dist = stats.chi2(self.neqs * 2)
omni_pvalue = omni_dist.sf(lam_omni)
crit_omni = omni_dist.ppf(1 - signif)
conclusion = 'fail to reject' if lam_omni < crit_omni else 'reject'
results = {
'statistic' : lam_omni,
'crit_value' : crit_omni,
'pvalue' : omni_pvalue,
'df' : self.neqs * 2,
'conclusion' : conclusion,
'signif' : signif
}
if verbose:
summ = output.normality_summary(results)
print(summ)
return results
@cache_readonly
def detomega(self):
r"""
Return determinant of white noise covariance with degrees of freedom
correction:
.. math::
\hat \Omega = \frac{T}{T - Kp - 1} \hat \Omega_{\mathrm{MLE}}
"""
return L.det(self.sigma_u)
@cache_readonly
def info_criteria(self):
"information criteria for lagorder selection"
nobs = self.nobs
neqs = self.neqs
lag_order = self.k_ar
free_params = lag_order * neqs ** 2 + neqs * self.k_trend
ld = logdet_symm(self.sigma_u_mle)
# See Lutkepohl pp. 146-150
aic = ld + (2. / nobs) * free_params
bic = ld + (np.log(nobs) / nobs) * free_params
hqic = ld + (2. * np.log(np.log(nobs)) / nobs) * free_params
fpe = ((nobs + self.df_model) / self.df_resid) ** neqs * np.exp(ld)
return {
'aic' : aic,
'bic' : bic,
'hqic' : hqic,
'fpe' : fpe
}
@property
def aic(self):
"""Akaike information criterion"""
return self.info_criteria['aic']
@property
def fpe(self):
"""Final Prediction Error (FPE)
Lutkepohl p. 147, see info_criteria
"""
return self.info_criteria['fpe']
@property
def hqic(self):
"""Hannan-Quinn criterion"""
return self.info_criteria['hqic']
@property
def bic(self):
"""Bayesian a.k.a. Schwarz info criterion"""
return self.info_criteria['bic']
@cache_readonly
def roots(self):
neqs = self.neqs
k_ar = self.k_ar
p = neqs * k_ar
arr = np.zeros((p,p))
arr[:neqs,:] = np.column_stack(self.coefs)
arr[neqs:,:-neqs] = np.eye(p-neqs)
roots = np.linalg.eig(arr)[0]**-1
idx = np.argsort(np.abs(roots))[::-1] # sort by reverse modulus
return roots[idx]
class VARResultsWrapper(wrap.ResultsWrapper):
_attrs = {'bse' : 'columns_eq', 'cov_params' : 'cov',
'params' : 'columns_eq', 'pvalues' : 'columns_eq',
'tvalues' : 'columns_eq', 'sigma_u' : 'cov_eq',
'sigma_u_mle' : 'cov_eq',
'stderr' : 'columns_eq'}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
_wrap_methods.pop('cov_params') # not yet a method in VARResults
wrap.populate_wrapper(VARResultsWrapper, VARResults)
class FEVD(object):
"""
Compute and plot Forecast error variance decomposition and asymptotic
standard errors
"""
def __init__(self, model, P=None, periods=None):
self.periods = periods
self.model = model
self.neqs = model.neqs
self.names = model.model.endog_names
self.irfobj = model.irf(var_decomp=P, periods=periods)
self.orth_irfs = self.irfobj.orth_irfs
# cumulative impulse responses
irfs = (self.orth_irfs[:periods] ** 2).cumsum(axis=0)
rng = lrange(self.neqs)
mse = self.model.mse(periods)[:, rng, rng]
# lag x equation x component
fevd = np.empty_like(irfs)
for i in range(periods):
fevd[i] = (irfs[i].T / mse[i]).T
# switch to equation x lag x component
self.decomp = fevd.swapaxes(0, 1)
def summary(self):
buf = StringIO()
rng = lrange(self.periods)
for i in range(self.neqs):
ppm = output.pprint_matrix(self.decomp[i], rng, self.names)
buf.write('FEVD for %s\n' % self.names[i])
buf.write(ppm + '\n')
print(buf.getvalue())
def cov(self):
"""Compute asymptotic standard errors
Returns
-------
"""
raise NotImplementedError
def plot(self, periods=None, figsize=(10,10), **plot_kwds):
"""Plot graphical display of FEVD
Parameters
----------
periods : int, default None
Defaults to number originally specified. Can be at most that number
"""
import matplotlib.pyplot as plt
k = self.neqs
periods = periods or self.periods
fig, axes = plt.subplots(nrows=k, figsize=figsize)
fig.suptitle('Forecast error variance decomposition (FEVD)')
colors = [str(c) for c in np.arange(k, dtype=float) / k]
ticks = np.arange(periods)
limits = self.decomp.cumsum(2)
for i in range(k):
ax = axes[i]
this_limits = limits[i].T
handles = []
for j in range(k):
lower = this_limits[j - 1] if j > 0 else 0
upper = this_limits[j]
handle = ax.bar(ticks, upper - lower, bottom=lower,
color=colors[j], label=self.names[j],
**plot_kwds)
handles.append(handle)
ax.set_title(self.names[i])
# just use the last axis to get handles for plotting
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper right')
plotting.adjust_subplots(right=0.85)
#-------------------------------------------------------------------------------
def _compute_acov(x, nlags=1):
x = x - x.mean(0)
result = []
for lag in range(nlags + 1):
if lag > 0:
r = np.dot(x[lag:].T, x[:-lag])
else:
r = np.dot(x.T, x)
result.append(r)
return np.array(result) / len(x)
def _acovs_to_acorrs(acovs):
sd = np.sqrt(np.diag(acovs[0]))
return acovs / np.outer(sd, sd)
if __name__ == '__main__':
import statsmodels.api as sm
from statsmodels.tsa.vector_ar.util import parse_lutkepohl_data
import statsmodels.tools.data as data_util
np.set_printoptions(linewidth=140, precision=5)
sdata, dates = parse_lutkepohl_data('data/%s.dat' % 'e1')
names = sdata.dtype.names
data = data_util.struct_to_ndarray(sdata)
adj_data = np.diff(np.log(data), axis=0)
# est = VAR(adj_data, p=2, dates=dates[1:], names=names)
model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
# model = VAR(adj_data[:-16], dates=dates[1:-16], names=names)
est = model.fit(maxlags=2)
irf = est.irf()
y = est.y[-2:]
"""
# irf.plot_irf()
# i = 2; j = 1
# cv = irf.cum_effect_cov(orth=True)
# print np.sqrt(cv[:, j * 3 + i, j * 3 + i]) / 1e-2
# data = np.genfromtxt('Canada.csv', delimiter=',', names=True)
# data = data.view((float, 4))
"""
'''
mdata = sm.datasets.macrodata.load().data
mdata2 = mdata[['realgdp','realcons','realinv']]
names = mdata2.dtype.names
data = mdata2.view((float,3))
data = np.diff(np.log(data), axis=0)
import pandas as pn
df = pn.DataFrame.fromRecords(mdata)
df = np.log(df.reindex(columns=names))
df = (df - df.shift(1)).dropna()
model = VAR(df)
est = model.fit(maxlags=2)
irf = est.irf()
'''
|
phobson/statsmodels
|
statsmodels/tsa/vector_ar/var_model.py
|
Python
|
bsd-3-clause
| 50,532
|
[
"Gaussian"
] |
c0ed4994782e4c05b143997c70374058ea2ec08659c0be0054a58b7a480258a6
|
# -*- coding: utf-8 -*-
"""
Unit tests for masquerade.
"""
import json
import pickle
from datetime import datetime
import ddt
from django.conf import settings
from django.urls import reverse
from django.test import TestCase
from mock import patch
from pytz import UTC
from capa.tests.response_xml_factory import OptionResponseXMLFactory
from courseware.masquerade import CourseMasquerade, MasqueradingKeyValueStore, get_masquerading_user_group
from courseware.tests.factories import StaffFactory
from courseware.tests.helpers import LoginEnrollmentTestCase, masquerade_as_group_member
from courseware.tests.test_submitting_problems import ProblemSubmissionTestMixin
from openedx.core.djangoapps.lang_pref import LANGUAGE_KEY
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.user_api.preferences.api import get_user_preference, set_user_preference
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from openedx.core.lib.tests import attr
from openedx.features.course_experience import UNIFIED_COURSE_TAB_FLAG
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xblock.runtime import DictKeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.partitions.partitions import Group, UserPartition
class MasqueradeTestCase(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Base class for masquerade tests that sets up a test course and enrolls a user in the course.
"""
@classmethod
def setUpClass(cls):
super(MasqueradeTestCase, cls).setUpClass()
cls.course = CourseFactory.create(number='masquerade-test', metadata={'start': datetime.now(UTC)})
cls.info_page = ItemFactory.create(
category="course_info", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="updates"
)
cls.chapter = ItemFactory.create(
parent_location=cls.course.location,
category="chapter",
display_name="Test Section",
)
cls.sequential_display_name = "Test Masquerade Subsection"
cls.sequential = ItemFactory.create(
parent_location=cls.chapter.location,
category="sequential",
display_name=cls.sequential_display_name,
)
cls.vertical = ItemFactory.create(
parent_location=cls.sequential.location,
category="vertical",
display_name="Test Unit",
)
problem_xml = OptionResponseXMLFactory().build_xml(
question_text='The correct answer is Correct',
num_inputs=2,
weight=2,
options=['Correct', 'Incorrect'],
correct_option='Correct'
)
cls.problem_display_name = "TestMasqueradeProblem"
cls.problem = ItemFactory.create(
parent_location=cls.vertical.location,
category='problem',
data=problem_xml,
display_name=cls.problem_display_name
)
def setUp(self):
super(MasqueradeTestCase, self).setUp()
self.test_user = self.create_user()
self.login(self.test_user.email, 'test')
self.enroll(self.course, True)
def get_courseware_page(self):
"""
Returns the server response for the courseware page.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.block_id,
'section': self.sequential.location.block_id,
}
)
return self.client.get(url)
def get_course_info_page(self):
"""
Returns the server response for course info page.
"""
url = reverse(
'info',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def get_progress_page(self):
"""
Returns the server response for progress page.
"""
url = reverse(
'progress',
kwargs={
'course_id': unicode(self.course.id),
}
)
return self.client.get(url)
def verify_staff_debug_present(self, staff_debug_expected):
"""
Verifies that the staff debug control visibility is as expected (for staff only).
"""
content = self.get_courseware_page().content
self.assertIn(self.sequential_display_name, content, "Subsection should be visible")
self.assertEqual(staff_debug_expected, 'Staff Debug Info' in content)
def get_problem(self):
"""
Returns the JSON content for the problem in the course.
"""
problem_url = reverse(
'xblock_handler',
kwargs={
'course_id': unicode(self.course.id),
'usage_id': unicode(self.problem.location),
'handler': 'xmodule_handler',
'suffix': 'problem_get'
}
)
return self.client.get(problem_url)
def verify_show_answer_present(self, show_answer_expected):
"""
Verifies that "Show Answer" is only present when expected (for staff only).
"""
problem_html = json.loads(self.get_problem().content)['html']
self.assertIn(self.problem_display_name, problem_html)
self.assertEqual(show_answer_expected, "Show Answer" in problem_html)
def ensure_masquerade_as_group_member(self, partition_id, group_id):
"""
Installs a masquerade for the test_user and test course, to enable the
user to masquerade as belonging to the specific partition/group combination.
Also verifies that the call to install the masquerade was successful.
Arguments:
partition_id (int): the integer partition id, referring to partitions already
configured in the course.
group_id (int); the integer group id, within the specified partition.
"""
self.assertEqual(200, masquerade_as_group_member(self.test_user, self.course, partition_id, group_id))
@attr(shard=1)
class NormalStudentVisibilityTest(MasqueradeTestCase):
"""
Verify the course displays as expected for a "normal" student (to ensure test setup is correct).
"""
def create_user(self):
"""
Creates a normal student user.
"""
return UserFactory()
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_not_visible(self):
"""
Tests that staff debug control is not present for a student.
"""
self.verify_staff_debug_present(False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_not_visible(self):
"""
Tests that "Show Answer" is not visible for a student.
"""
self.verify_show_answer_present(False)
class StaffMasqueradeTestCase(MasqueradeTestCase):
"""
Base class for tests of the masquerade behavior for a staff member.
"""
def create_user(self):
"""
Creates a staff user.
"""
return StaffFactory(course_key=self.course.id)
def update_masquerade(self, role, group_id=None, user_name=None):
"""
Toggle masquerade state.
"""
masquerade_url = reverse(
'masquerade_update',
kwargs={
'course_key_string': unicode(self.course.id),
}
)
response = self.client.post(
masquerade_url,
json.dumps({"role": role, "group_id": group_id, "user_name": user_name}),
"application/json"
)
self.assertEqual(response.status_code, 200)
return response
@attr(shard=1)
class TestStaffMasqueradeAsStudent(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as student.
"""
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_staff_debug_with_masquerade(self):
"""
Tests that staff debug control is not visible when masquerading as a student.
"""
# Verify staff initially can see staff debug
self.verify_staff_debug_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_staff_debug_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_staff_debug_present(True)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_show_answer_for_staff(self):
"""
Tests that "Show Answer" is not visible when masquerading as a student.
"""
# Verify that staff initially can see "Show Answer".
self.verify_show_answer_present(True)
# Toggle masquerade to student
self.update_masquerade(role='student')
self.verify_show_answer_present(False)
# Toggle masquerade back to staff
self.update_masquerade(role='staff')
self.verify_show_answer_present(True)
@ddt.ddt
@attr(shard=1)
class TestStaffMasqueradeAsSpecificStudent(StaffMasqueradeTestCase, ProblemSubmissionTestMixin):
"""
Check for staff being able to masquerade as a specific student.
"""
def setUp(self):
super(TestStaffMasqueradeAsSpecificStudent, self).setUp()
self.student_user = self.create_user()
self.login_student()
self.enroll(self.course, True)
def login_staff(self):
""" Login as a staff user """
self.logout()
self.login(self.test_user.email, 'test')
def login_student(self):
""" Login as a student """
self.logout()
self.login(self.student_user.email, 'test')
def submit_answer(self, response1, response2):
"""
Submit an answer to the single problem in our test course.
"""
return self.submit_question_answer(
self.problem_display_name,
{'2_1': response1, '2_2': response2}
)
def get_progress_detail(self):
"""
Return the reported progress detail for the problem in our test course.
The return value is a string like u'1/2'.
"""
json_data = json.loads(self.look_at_question(self.problem_display_name).content)
progress = '%s/%s' % (str(json_data['current_score']), str(json_data['total_possible']))
return progress
def assertExpectedLanguageInPreference(self, user, expected_language_code):
"""
This method is a custom assertion verifies that a given user has expected
language code in the preference and in cookies.
Arguments:
user: User model instance
expected_language_code: string indicating a language code
"""
self.assertEqual(
get_user_preference(user, LANGUAGE_KEY), expected_language_code
)
self.assertEqual(
self.client.cookies[settings.LANGUAGE_COOKIE].value, expected_language_code
)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_user_on_self_paced(self):
"""
Test masquerading as a specific user for course info page when self paced configuration
"enable_course_home_improvements" flag is set
Login as a staff user and visit course info page.
set masquerade to view same page as a specific student and revisit the course info page.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student,enable the self paced configuration, and check we can see the info page.
SelfPacedConfiguration(enable_course_home_improvements=True).save()
self.update_masquerade(role='student', user_name=self.student_user.username)
response = self.get_course_info_page()
self.assertEqual(response.status_code, 200)
content = response.content
self.assertIn("OOGIE BLOOGIE", content)
@ddt.data(
'john', # Non-unicode username
u'fôô@bar', # Unicode username with @, which is what the ENABLE_UNICODE_USERNAME feature allows
)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student(self, username):
"""
Test masquerading as a specific user.
We answer the problem in our test course as the student and as staff user, and we use the
progress as a proxy to determine who's state we currently see.
"""
student = UserFactory.create(username=username)
CourseEnrollment.enroll(student, self.course.id)
self.logout()
self.login(student.email, 'test')
# Answer correctly as the student, and check progress.
self.submit_answer('Correct', 'Correct')
self.assertEqual(self.get_progress_detail(), u'2/2')
# Log in as staff, and check the problem is unanswered.
self.login_staff()
self.assertEqual(self.get_progress_detail(), u'0/2')
# Masquerade as the student, and check we can see the student state.
self.update_masquerade(role='student', user_name=student.username)
self.assertEqual(self.get_progress_detail(), u'2/2')
# Temporarily override the student state.
self.submit_answer('Correct', 'Incorrect')
self.assertEqual(self.get_progress_detail(), u'1/2')
# Reload the page and check we see the student state again.
self.get_courseware_page()
self.assertEqual(self.get_progress_detail(), u'2/2')
# Become the staff user again, and check the problem is still unanswered.
self.update_masquerade(role='staff')
self.assertEqual(self.get_progress_detail(), u'0/2')
# Verify the student state did not change.
self.logout()
self.login(student.email, 'test')
self.assertEqual(self.get_progress_detail(), u'2/2')
def test_masquerading_with_language_preference(self):
"""
Tests that masquerading as a specific user for the course does not update preference language
for the staff.
Login as a staff user and set user's language preference to english and visit the courseware page.
Set masquerade to view same page as a specific student having different language preference and
revisit the courseware page.
"""
english_language_code = 'en'
set_user_preference(self.test_user, preference_key=LANGUAGE_KEY, preference_value=english_language_code)
self.login_staff()
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
# Set student language preference and set masquerade to view same page the student.
set_user_preference(self.student_user, preference_key=LANGUAGE_KEY, preference_value='es-419')
self.update_masquerade(role='student', user_name=self.student_user.username)
# Reload the page and check we have expected language preference in system and in cookies.
self.get_courseware_page()
self.assertExpectedLanguageInPreference(self.test_user, english_language_code)
@override_waffle_flag(UNIFIED_COURSE_TAB_FLAG, active=False)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_masquerade_as_specific_student_course_info(self):
"""
Test masquerading as a specific user for course info page.
We login with login_staff and check course info page content if it's working and then we
set masquerade to view same page as a specific student and test if it's working or not.
"""
# Log in as staff, and check we can see the info page.
self.login_staff()
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
# Masquerade as the student, and check we can see the info page.
self.update_masquerade(role='student', user_name=self.student_user.username)
content = self.get_course_info_page().content
self.assertIn("OOGIE BLOOGIE", content)
def test_masquerade_as_specific_student_progress(self):
"""
Test masquerading as a specific user for progress page.
"""
# Give the student some correct answers, check their progress page
self.login_student()
self.submit_answer('Correct', 'Correct')
student_progress = self.get_progress_page().content
self.assertNotIn("1 of 2 possible points", student_progress)
self.assertIn("2 of 2 possible points", student_progress)
# Staff answers are slightly different
self.login_staff()
self.submit_answer('Incorrect', 'Correct')
staff_progress = self.get_progress_page().content
self.assertNotIn("2 of 2 possible points", staff_progress)
self.assertIn("1 of 2 possible points", staff_progress)
# Should now see the student's scores
self.update_masquerade(role='student', user_name=self.student_user.username)
masquerade_progress = self.get_progress_page().content
self.assertNotIn("1 of 2 possible points", masquerade_progress)
self.assertIn("2 of 2 possible points", masquerade_progress)
@attr(shard=1)
class TestGetMasqueradingGroupId(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestGetMasqueradingGroupId, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
modulestore().update_item(self.course, self.test_user.id)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_get_masquerade_group(self):
"""
Tests that a staff member can masquerade as being in a group in a user partition
"""
# Verify there is no masquerading group initially
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
self.assertIsNone(group)
# Install a masquerading group
self.ensure_masquerade_as_group_member(0, 1)
# Verify that the masquerading group is returned
group = get_masquerading_user_group(self.course.id, self.test_user, self.user_partition)
self.assertEqual(group.id, 1)
class ReadOnlyKeyValueStore(DictKeyValueStore):
"""
A KeyValueStore that raises an exception on attempts to modify it.
Used to make sure MasqueradingKeyValueStore does not try to modify the underlying KeyValueStore.
"""
def set(self, key, value):
assert False, "ReadOnlyKeyValueStore may not be modified."
def delete(self, key):
assert False, "ReadOnlyKeyValueStore may not be modified."
def set_many(self, update_dict): # pylint: disable=unused-argument
assert False, "ReadOnlyKeyValueStore may not be modified."
class FakeSession(dict):
""" Mock for Django session object. """
modified = False # We need dict semantics with a writable 'modified' property
class MasqueradingKeyValueStoreTest(TestCase):
"""
Unit tests for the MasqueradingKeyValueStore class.
"""
def setUp(self):
super(MasqueradingKeyValueStoreTest, self).setUp()
self.ro_kvs = ReadOnlyKeyValueStore({'a': 42, 'b': None, 'c': 'OpenCraft'})
self.session = FakeSession()
self.kvs = MasqueradingKeyValueStore(self.ro_kvs, self.session)
def test_all(self):
self.assertEqual(self.kvs.get('a'), 42)
self.assertEqual(self.kvs.get('b'), None)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
with self.assertRaises(KeyError):
self.kvs.get('d')
self.assertTrue(self.kvs.has('a'))
self.assertTrue(self.kvs.has('b'))
self.assertTrue(self.kvs.has('c'))
self.assertFalse(self.kvs.has('d'))
self.kvs.set_many({'a': 'Norwegian Blue', 'd': 'Giraffe'})
self.kvs.set('b', 7)
self.assertEqual(self.kvs.get('a'), 'Norwegian Blue')
self.assertEqual(self.kvs.get('b'), 7)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
self.assertEqual(self.kvs.get('d'), 'Giraffe')
for key in 'abd':
self.assertTrue(self.kvs.has(key))
self.kvs.delete(key)
with self.assertRaises(KeyError):
self.kvs.get(key)
self.assertEqual(self.kvs.get('c'), 'OpenCraft')
class CourseMasqueradeTest(TestCase):
"""
Unit tests for the CourseMasquerade class.
"""
def test_unpickling_sets_all_attributes(self):
"""
Make sure that old CourseMasquerade objects receive missing attributes when unpickled from
the session.
"""
cmasq = CourseMasquerade(7)
del cmasq.user_name
pickled_cmasq = pickle.dumps(cmasq)
unpickled_cmasq = pickle.loads(pickled_cmasq)
self.assertEqual(unpickled_cmasq.user_name, None)
|
ahmedaljazzar/edx-platform
|
lms/djangoapps/courseware/tests/test_masquerade.py
|
Python
|
agpl-3.0
| 21,981
|
[
"VisIt"
] |
4aeceb7b888e91158a14ec1a4a5b7343f1803e5ba718e16cb66c39d2ecf04c25
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2009-2010 Benny Malengier <benny.malengier@gramps-project.org>
# Copyright (C) 2010 Peter Landgren
# Copyright (C) 2010 Tim Lyons
# Copyright (C) 2011 Adam Stein <adam@csh.rit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Report output generator for html documents, based on Html and HtmlBackend
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
import os
import shutil
import logging
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.utils.image import resize_to_jpeg
from gramps.gen.const import DATA_DIR, IMAGE_DIR, PROGRAM_NAME, URL_HOMEPAGE
from gramps.gen.errors import ReportError
from gramps.version import VERSION
from gramps.gen.plug.docgen import BaseDoc, TextDoc, URL_PATTERN
from gramps.plugins.lib.libhtmlbackend import HtmlBackend, process_spaces
from gramps.plugins.lib.libhtml import Html
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
LOG = logging.getLogger(".htmldoc")
_TEXTDOCSCREEN = 'grampstextdoc.css'
_HTMLSCREEN = 'grampshtml.css'
#------------------------------------------------------------------------
#
# Set up to make links clickable
#
#------------------------------------------------------------------------
_CLICKABLE = r'''<a href="\1">\1</a>'''
#------------------------------------------------------------------------
#
# HtmlDoc
#
#------------------------------------------------------------------------
class HtmlDoc(BaseDoc, TextDoc):
"""Implementation of the BaseDoc and TextDoc gen.plug.docgen api for the
creation of Html files. This is achieved by writing on a HtmlBackend
object
div id's defined here:
id="grampstextdoc" : the entire text report
id="grampsheading" : a small defined heading, but not h1 to h6 !
id="grampsstylednote" : start of part with a styled note, divided in
paragraphs
id="grampsnote" : start of part with a note. This id is normally not
used
The styles as defined in the stylesheed of the textdoc, will be converted
to css class. Color is removed to avoid conflicts with the css. Also
Fontface is removed. Size, italic, bold, margins, borders are retained
"""
def __init__(self, styles, paper_style):
BaseDoc.__init__(self, styles, None)
self.style_declaration = ''
self.htmllist = []
self._backend = None
self.css_filename = ''
self.warn_dir = True
self._col = 0
self._tbl = None
self._empty = 1
self.title = ''
self.__title_written = -1 # -1 = not written, 0 = writing, 1 = written
self.__link_attrs = {} # additional link attrs, eg {"style": "...", "class": "..."}
self.use_table_headers = False # th, td
self.first_row = True
def set_css_filename(self, css_filename):
"""
Set the css file to use. The path must be included.
Note: DocReportDialog sets this for html doc
"""
if css_filename and os.path.basename(css_filename):
self.css_filename = css_filename
else:
self.css_filename = ''
def open(self, filename):
"""
Overwrite base method
"""
self._backend = HtmlBackend(filename)
self._backend.open()
self.htmllist += [self._backend.html_body]
#start a gramps report
self.htmllist += [Html('div', id="grampstextdoc")]
self.build_header()
def build_header(self):
"""
Build up the header of the html file over the defaults of Html()
"""
# add additional meta tags and stylesheet links to head section
# create additional meta tags
_meta1 = 'name="generator" content="%s %s %s"' % (
PROGRAM_NAME, VERSION, URL_HOMEPAGE)
meta = Html('meta', attr=_meta1)
#set styles of the report as inline css
self.build_style_declaration()
# Gramps favicon en css
fname1 = '/'.join([self._backend.datadir(), 'favicon.ico'])
fname2 = '/'.join([self._backend.datadir(), _TEXTDOCSCREEN])
fname3 = '/'.join([self._backend.datadir(), _HTMLSCREEN])
# links for GRAMPS favicon and stylesheets
links = Html('link', rel='shortcut icon', href=fname1,
type='image/x-icon') + (
Html('link', rel='stylesheet', href=fname2,
type='text/css', media='screen', indent=False),)
if self.css_filename:
links += (Html('link', rel='stylesheet', href=fname3,
type='text/css', media='screen', indent=False),)
self._backend.html_header += (meta, links)
def build_style_declaration(self, id="grampstextdoc"):
"""
Convert the styles of the report into inline css for the html doc
"""
styles = self.get_style_sheet()
text = []
for sname in sorted(styles.get_cell_style_names()):
style = styles.get_cell_style(sname)
pad = "%.3fcm" % style.get_padding()
top = bottom = left = right = 'none'
if style.get_top_border():
top = 'thin solid #000000'
if style.get_bottom_border():
bottom = 'thin solid #000000'
if style.get_left_border():
left = 'thin solid #000000'
if style.get_right_border():
right = 'thin solid #000000'
text.append('#%s .%s {\n'
'\tpadding: %s %s %s %s;\n'
'\tborder-top:%s; border-bottom:%s;\n'
'\tborder-left:%s; border-right:%s;\n}'
% (id, sname, pad, pad, pad, pad, top, bottom,
left, right))
for style_name in sorted(styles.get_paragraph_style_names()):
style = styles.get_paragraph_style(style_name)
font = style.get_font()
font_size = font.get_size()
#font_color = '#%02x%02x%02x' % font.get_color()
align = style.get_alignment_text()
text_indent = "%.2f" % style.get_first_indent()
right_margin = "%.2f" % style.get_right_margin()
left_margin = "%.2f" % style.get_left_margin()
top_margin = "%.2f" % style.get_top_margin()
bottom_margin = "%.2f" % style.get_bottom_margin()
top = bottom = left = right = 'none'
if style.get_top_border():
top = 'thin solid #000000'
if style.get_bottom_border():
bottom = 'thin solid #000000'
if style.get_left_border():
left = 'thin solid #000000'
if style.get_right_border():
right = 'thin solid #000000'
italic = bold = ''
if font.get_italic():
italic = 'font-style:italic; '
if font.get_bold():
bold = 'font-weight:bold; '
#if font.get_type_face() == FONT_SANS_SERIF:
# family = '"Helvetica","Arial","sans-serif"'
#else:
# family = '"Times New Roman","Times","serif"'
# do not allow color, set in base css !
# so no : 'color: %s' % font_color
# so no : 'font-family:%s;' % family
text.append('#%s .%s {\n'
'\tfont-size: %dpt;\n'
'\ttext-align: %s; text-indent: %scm;\n'
'\tmargin-right: %scm; margin-left: %scm;\n'
'\tmargin-top: %scm; margin-bottom: %scm;\n'
'\tborder-top:%s; border-bottom:%s;\n'
'\tborder-left:%s; border-right:%s;\n'
'\t%s%s\n}'
% (id, style_name, font_size,
align, text_indent,
right_margin, left_margin,
top_margin, bottom_margin,
top, bottom, left, right,
italic, bold))
self.style_declaration = '\n'.join(text)
def close(self):
"""
Overwrite base method
"""
while len(self.htmllist) > 1:
self.__reduce_list()
#now write the actual file
self._backend.close()
self.write_support_files()
def copy_file(self, from_fname, to_fname, to_dir=''):
"""
Copy a file from a source to a (report) destination. If to_dir is not
present, then the destination directory will be created.
Normally 'to_fname' will be just a filename, without directory path.
'to_dir' is the relative path name in the destination root. It will
be prepended before 'to_fname'.
"""
#build absolute path
dest = os.path.join(self._backend.datadirfull(), to_dir, to_fname)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if from_fname != dest:
shutil.copyfile(from_fname, dest)
elif self.warn_dir:
raise ReportError(
_("Possible destination error"),
_("You appear to have set your target directory "
"to a directory used for data storage. This "
"could create problems with file management. "
"It is recommended that you consider using "
"a different directory to store your generated "
"web pages."))
self.warn_dir = False
def write_support_files(self):
"""
Copy support files to the datadir that needs to hold them
"""
#css of textdoc styles
with open(os.path.join(self._backend.datadirfull(),
_TEXTDOCSCREEN), 'w') as tdfile:
tdfile.write(self.style_declaration)
#css file
if self.css_filename:
#we do an extra check in case file does not exist, eg cli call
fullpath = os.path.join(DATA_DIR, self.css_filename)
if os.path.exists(fullpath):
self.copy_file(fullpath, _HTMLSCREEN)
#favicon
self.copy_file(os.path.join(IMAGE_DIR, 'webstuff', 'favicon.ico'),
'favicon.ico')
def __reduce_list(self):
"""
Takes the internal list of html objects, and adds the last to the
previous. This closes the upper tag
"""
self.htmllist[-2] += self.htmllist[-1]
self.htmllist.pop()
def __write_text(self, text, mark=None, markup=False, links=False):
"""
@param text: text to write.
@param mark: IndexMark to use for indexing (not supported)
@param markup: True if text already contains markup info.
Then text will no longer be escaped
@param links: make URLs clickable if True
"""
if not markup:
text = self._backend.ESCAPE_FUNC()(text)
if self.__title_written == 0:
self.title += text
if links is True:
import re
text = re.sub(URL_PATTERN, _CLICKABLE, text)
self.htmllist[-1] += text
def __empty_char(self):
"""
Output a non breaking whitespace so as to have browser behave ok on
empty content
"""
self.__write_text(' ', markup=True)
def write_text(self, text, mark=None, links=False):
"""
Overwrite base method
"""
if text != "":
self._empty = 0
self.__write_text(text, mark, links=links)
def write_title(self):
"""
Add title field to header
"""
self._backend.html_header += Html('title', self.title,
inline=True)
def start_table(self, name, style):
"""
Overwrite base method
"""
self.first_row = True
styles = self.get_style_sheet()
self._tbl = styles.get_table_style(style)
self.htmllist += [Html('table', width=str(self._tbl.get_width())+'%',
cellspacing='0')]
def end_table(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_row(self):
"""
Overwrite base method
"""
self.htmllist += [Html('tr')]
self._col = 0
def end_row(self):
"""
Overwrite base method
"""
self.first_row = False
self.__reduce_list()
def start_cell(self, style_name, span=1):
"""
Overwrite base method
"""
if self.use_table_headers and self.first_row:
tag = "th"
else:
tag = "td"
self._empty = 1
if span > 1:
self.htmllist += (Html(tag, colspan=str(span), class_=style_name),)
self._col += span
else:
self.htmllist += (Html(tag, colspan=str(span),
width=str(self._tbl.get_column_width(
self._col))+ '%',
class_=style_name),)
self._col += 1
def end_cell(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_paragraph(self, style_name, leader=None):
"""
Overwrite base method
"""
style_sheet = self.get_style_sheet()
style = style_sheet.get_paragraph_style(style_name)
level = style.get_header_level()
if level == 0:
#a normal paragraph
self.htmllist += (Html('p', class_=style_name, inline=True),)
elif level == 1:
if self.__title_written == -1 and \
style_name.upper().find('TITLE') != -1:
self.__title_written = 0
self.htmllist += (Html('div', id="header"),)
self.htmllist += (Html('h1', class_=style_name, id='SiteTitle',
inline=True),)
else:
self.htmllist += (Html('h1', class_=style_name, inline=True),)
elif 2 <= level <= 5:
tag = 'h'+str(level+1)
self.htmllist += (Html(tag, class_=style_name, inline=True),)
else:
# a low level header
self.htmllist += (Html('div', id='grampsheading',
class_=style_name),)
if leader is not None:
self.write_text(leader+' ')
def end_paragraph(self):
"""
Overwrite base method
"""
if self._empty == 1:
self.__empty_char()
self._empty = 0
self.__reduce_list()
if self.__title_written == 0:
self.__title_written = 1
#close div statement
self.__reduce_list()
self.write_title()
def start_bold(self):
"""
Overwrite base method
"""
self.htmllist += [Html('strong')]
def end_bold(self):
"""
Overwrite base method
"""
self.__reduce_list()
def start_superscript(self):
"""
Overwrite base method
"""
self.htmllist += [Html('sup')]
def end_superscript(self):
"""
Overwrite base method
"""
self.__reduce_list()
def write_styled_note(self, styledtext, format, style_name,
contains_html=False, links=False):
"""
Convenience function to write a styledtext to the html doc.
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
contains_html: bool, the backend should not check if html is present.
If contains_html=True, then the textdoc is free to handle that in
some way. Eg, a textdoc could remove all tags, or could make sure
a link is clickable. HtmlDoc will show the html as pure text, so
no escaping will happen.
links: bool, make URLs clickable if True
"""
text = str(styledtext)
self.htmllist += [Html('div', id='grampsstylednote')]
if contains_html:
#just dump the note out as it is. Adding markup would be dangerous
# as it could destroy the html. If html code, one can do the
self.start_paragraph(style_name)
self.__write_text(text, markup=True, links=links)
self.end_paragraph()
else:
s_tags = styledtext.get_tags()
markuptext = self._backend.add_markup_from_styled(text, s_tags,
split='\n')
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
# we explicitly set _empty because start and end para do not seem
# to do a very good job at setting them
linenb = 1
# The code is tricky here, because we don't want to start a new para
# at the end of the last line if there is no newline there.
# Instead, we want to just end the current para.
for line in markuptext.split('\n'):
[line, sigcount] = process_spaces(line, format)
if sigcount == 0:
if inpara is False:
# needed for runs of three or more newlines
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
self.end_paragraph()
inpara = False
linenb = 1
else:
if inpara is False:
self.start_paragraph(style_name)
inpara = True
self._empty = 1 # para is empty
if linenb > 1:
self.htmllist[-1] += Html('br')
self.__write_text(line, markup=True, links=links)
self._empty = 0 # para is not empty
linenb += 1
if inpara is True:
self.end_paragraph()
if sigcount == 0:
# if the last line was blank, then as well as outputting the
# previous para, which we have just done, we also output a new
# blank para
self.start_paragraph(style_name)
self._empty = 1 # para is empty
self.end_paragraph()
#end div element
self.__reduce_list()
def add_media(self, name, pos, w_cm, h_cm, alt='', style_name=None,
crop=None):
"""
Overwrite base method
"""
self._empty = 0
size = int(max(w_cm, h_cm) * float(150.0/2.54))
refname = "is%s" % os.path.basename(name)
imdir = self._backend.datadirfull()
try:
resize_to_jpeg(name, imdir + os.sep + refname, size, size,
crop=crop)
except:
LOG.warning(_("Could not create jpeg version of image %(name)s"),
name)
return
if len(alt):
alt = '<br />'.join(alt)
if pos not in ["right", "left"]:
if len(alt):
self.htmllist[-1] += Html('div') + (
Html('img', src=imdir + os.sep + refname,
border='0', alt=alt),
Html('p', class_="DDR-Caption") + alt
)
else:
self.htmllist[-1] += Html('img', src=imdir + os.sep + refname,
border='0', alt=alt)
else:
if len(alt):
self.htmllist[-1] += Html(
'div', style_="float: %s; padding: 5px; margin: 0;" % pos
) + (Html('img', src=imdir + os.sep + refname,
border='0', alt=alt),
Html('p', class_="DDR-Caption") + alt)
else:
self.htmllist[-1] += Html('img', src=imdir + os.sep + refname,
border='0', alt=alt, align=pos)
def page_break(self):
"""
overwrite base method so page break has no effect
"""
pass
def start_link(self, link):
"""
Starts a section to add a link. Link is a URI.
"""
self.htmllist += [Html('a', href=link, **self.__link_attrs)]
def stop_link(self):
"""
Stop a section of a link.
"""
self.__reduce_list()
def start_underline(self):
"""
Starts a section of underlining.
"""
self.htmllist += [Html('u')]
def stop_underline(self):
"""
Stop underlining.
"""
self.__reduce_list()
def set_link_attrs(self, attrs):
"""
Set some a attributes/values. attrs is a dictionary, eg
{"style": "...", "class": "..."}
"""
self.__link_attrs = attrs
|
beernarrd/gramps
|
gramps/plugins/docgen/htmldoc.py
|
Python
|
gpl-2.0
| 22,564
|
[
"Brian"
] |
5a01215aa0a429200bde04e7b0d7f08a82ef3b81bd6e70b19c99abf61f822b96
|
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
import numpy
import types
import scipy.lib.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
blas_enorm32, = scipy.lib.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32))
blas_enorm64, = scipy.lib.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) != types.ListType:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if type(parinfo[0]) != types.DictionaryType:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = numpy.max([gnorm,numpy.abs(sum0/wa2[l])])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = ((llim[whl] - x[whl]) /
wa1[whl])
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = ((ulim[whu] - x[whu]) /
wa1[whu])
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(numpy.abs(nwa1[whmax]) /
numpy.abs(maxstep[ifree[whmax]]))
if mrat > 1:
alpha = alpha / mrat
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (fnorm1/self.fnorm)**2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar)
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print 'Entering defiter...'
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print "Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof)
for i in range(nprint):
if (parinfo is not None) and (parinfo[i].has_key('parname')):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and (parinfo[i].has_key('mpprint')):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print p + (pformat % x[i]) + ' '
return 0
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print 'Entering parinfo...'
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (parinfo[i].has_key(key)):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if type(default) == types.ListType:
test=default[0]
if isinstance(test, types.IntType):
values = numpy.asarray(values, int)
elif isinstance(test, types.FloatType):
values = numpy.asarray(values, float)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print 'Entering call...'
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print 'Entering fdjac2...'
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print 'ERROR: Derivative matrix was not computed properly.'
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug: print 'Entering qrfac...'
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = rdiag[k]/wa[k]
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print 'Entering qrsolv...'
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r)
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = r[k,k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print 'Entering lmpar...'
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = wa1[0] / r[0,0] # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum0)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/numpy.min([delta,0.1])
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print 'Entering tie...'
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print 'Entering calc_covar...'
if numpy.rank(rr) != 2:
print 'ERROR: r must be a two-dimensional matrix'
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print 'ERROR: r must be a square matrix'
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar:
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
|
artemic/Bubbly
|
mpfit.py
|
Python
|
gpl-3.0
| 78,402
|
[
"Gaussian"
] |
5d60569c41b81511b2ff3a592250113f123685e92bb32f8d976cff8023c4b97b
|
# License: BSD 3 clause
import pickle
import itertools
import numpy as np
import pytest
from sklearn.metrics import DistanceMetric
from sklearn.neighbors._ball_tree import (
BallTree,
kernel_norm,
DTYPE,
ITYPE,
NeighborsHeap as NeighborsHeapBT,
simultaneous_sort as simultaneous_sort_bt,
nodeheap_sort as nodeheap_sort_bt,
)
from sklearn.neighbors._kd_tree import (
KDTree,
NeighborsHeap as NeighborsHeapKDT,
simultaneous_sort as simultaneous_sort_kdt,
nodeheap_sort as nodeheap_sort_kdt,
)
from sklearn.utils import check_random_state
from numpy.testing import assert_array_almost_equal, assert_allclose
rng = np.random.RandomState(42)
V_mahalanobis = rng.rand(3, 3)
V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
DIMENSION = 3
METRICS = {
"euclidean": {},
"manhattan": {},
"minkowski": dict(p=3),
"chebyshev": {},
"seuclidean": dict(V=rng.random_sample(DIMENSION)),
"wminkowski": dict(p=3, w=rng.random_sample(DIMENSION)),
"mahalanobis": dict(V=V_mahalanobis),
}
KD_TREE_METRICS = ["euclidean", "manhattan", "chebyshev", "minkowski"]
BALL_TREE_METRICS = list(METRICS)
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1.0 / p)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == "gaussian":
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == "tophat":
return norm * (d < h).sum(-1)
elif kernel == "epanechnikov":
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == "exponential":
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == "linear":
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == "cosine":
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError("kernel not recognized")
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
@pytest.mark.parametrize(
"kernel", ["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"]
)
@pytest.mark.parametrize("h", [0.01, 0.1, 1])
@pytest.mark.parametrize("rtol", [0, 1e-5])
@pytest.mark.parametrize("atol", [1e-6, 1e-2])
@pytest.mark.parametrize("breadth_first", [True, False])
def test_kernel_density(
Cls, kernel, h, rtol, atol, breadth_first, n_samples=100, n_features=3
):
rng = check_random_state(1)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
dens_true = compute_kernel_slow(Y, X, kernel, h)
tree = Cls(X, leaf_size=10)
dens = tree.kernel_density(
Y, h, atol=atol, rtol=rtol, kernel=kernel, breadth_first=breadth_first
)
assert_allclose(dens, dens_true, atol=atol, rtol=max(rtol, 1e-7))
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
def test_neighbor_tree_query_radius(Cls, n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1e-15 # roundoff error can cause test to fail
tree = Cls(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = tree.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
def test_neighbor_tree_query_radius_distance(Cls, n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1e-15 # roundoff error can cause test to fail
tree = Cls(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = tree.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
@pytest.mark.parametrize("dualtree", (True, False))
def test_neighbor_tree_two_point(Cls, dualtree, n_samples=100, n_features=3):
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
r = np.linspace(0, 1, 10)
tree = Cls(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
counts = tree.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
@pytest.mark.parametrize("NeighborsHeap", [NeighborsHeapBT, NeighborsHeapKDT])
def test_neighbors_heap(NeighborsHeap, n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
rng = check_random_state(0)
for row in range(n_pts):
d_in = rng.random_sample(2 * n_nbrs).astype(DTYPE, copy=False)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
@pytest.mark.parametrize("nodeheap_sort", [nodeheap_sort_bt, nodeheap_sort_kdt])
def test_node_heap(nodeheap_sort, n_nodes=50):
rng = check_random_state(0)
vals = rng.random_sample(n_nodes).astype(DTYPE, copy=False)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
@pytest.mark.parametrize(
"simultaneous_sort", [simultaneous_sort_bt, simultaneous_sort_kdt]
)
def test_simultaneous_sort(simultaneous_sort, n_rows=10, n_pts=201):
rng = check_random_state(0)
dist = rng.random_sample((n_rows, n_pts)).astype(DTYPE, copy=False)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE, copy=False)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
def test_gaussian_kde(Cls, n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
rng = check_random_state(0)
x_in = rng.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
tree = Cls(x_in[:, None])
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
dens_tree = tree.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_tree, dens_gkde, decimal=3)
@pytest.mark.parametrize(
"Cls, metric",
itertools.chain(
[(KDTree, metric) for metric in KD_TREE_METRICS],
[(BallTree, metric) for metric in BALL_TREE_METRICS],
),
)
@pytest.mark.parametrize("k", (1, 3, 5))
@pytest.mark.parametrize("dualtree", (True, False))
@pytest.mark.parametrize("breadth_first", (True, False))
def test_nn_tree_query(Cls, metric, k, dualtree, breadth_first):
rng = check_random_state(0)
X = rng.random_sample((40, DIMENSION))
Y = rng.random_sample((10, DIMENSION))
kwargs = METRICS[metric]
kdt = Cls(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree, breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
@pytest.mark.parametrize(
"Cls, metric",
[(KDTree, "euclidean"), (BallTree, "euclidean"), (BallTree, dist_func)],
)
@pytest.mark.parametrize("protocol", (0, 1, 2))
def test_pickle(Cls, metric, protocol):
rng = check_random_state(0)
X = rng.random_sample((10, 3))
if hasattr(metric, "__call__"):
kwargs = {"p": 2}
else:
kwargs = {}
tree1 = Cls(X, leaf_size=1, metric=metric, **kwargs)
ind1, dist1 = tree1.query(X)
s = pickle.dumps(tree1, protocol=protocol)
tree2 = pickle.loads(s)
ind2, dist2 = tree2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert isinstance(tree2, Cls)
|
sergeyf/scikit-learn
|
sklearn/neighbors/tests/test_neighbors_tree.py
|
Python
|
bsd-3-clause
| 9,078
|
[
"Gaussian"
] |
4001018fd5c575b8f4cce37b8165570eed3e8fdf5ba8df2794d1ad7f75fd006a
|
#!/usr/bin/env python
#
# Unit tests for sites manipulation in samba
# Copyright (C) Matthieu Patou <mat@matws.net> 2011
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import optparse
import sys
sys.path.insert(0, "bin/python")
import samba
from samba.tests.subunitrun import TestProgram, SubunitOptions
import samba.getopt as options
from samba import sites
from samba import subnets
from samba.auth import system_session
from samba.samdb import SamDB
from samba import gensec
from samba.credentials import Credentials, DONT_USE_KERBEROS
import samba.tests
from samba.tests import delete_force
from samba.dcerpc import security
from ldb import SCOPE_SUBTREE, LdbError, ERR_INSUFFICIENT_ACCESS_RIGHTS
parser = optparse.OptionParser("sites.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
if not "://" in host:
ldaphost = "ldap://%s" % host
else:
ldaphost = host
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
#
# Tests start here
#
class SitesBaseTests(samba.tests.TestCase):
def setUp(self):
super(SitesBaseTests, self).setUp()
self.ldb = SamDB(ldaphost, credentials=creds,
session_info=system_session(lp), lp=lp)
self.base_dn = self.ldb.domain_dn()
self.domain_sid = security.dom_sid(self.ldb.get_domain_sid())
self.configuration_dn = self.ldb.get_config_basedn().get_linearized()
def get_user_dn(self, name):
return "CN=%s,CN=Users,%s" % (name, self.base_dn)
#tests on sites
class SimpleSitesTests(SitesBaseTests):
def test_create_and_delete(self):
"""test creation and deletion of 1 site"""
sites.create_site(self.ldb, self.ldb.get_config_basedn(),
"testsamba")
self.assertRaises(sites.SiteAlreadyExistsException,
sites.create_site, self.ldb,
self.ldb.get_config_basedn(),
"testsamba")
sites.delete_site(self.ldb, self.ldb.get_config_basedn(),
"testsamba")
self.assertRaises(sites.SiteNotFoundException,
sites.delete_site, self.ldb,
self.ldb.get_config_basedn(),
"testsamba")
def test_delete_not_empty(self):
"""test removal of 1 site with servers"""
self.assertRaises(sites.SiteServerNotEmptyException,
sites.delete_site, self.ldb,
self.ldb.get_config_basedn(),
"Default-First-Site-Name")
# tests for subnets
class SimpleSubnetTests(SitesBaseTests):
def setUp(self):
super(SimpleSubnetTests, self).setUp()
self.basedn = self.ldb.get_config_basedn()
self.sitename = "testsite"
self.sitename2 = "testsite2"
self.ldb.transaction_start()
sites.create_site(self.ldb, self.basedn, self.sitename)
sites.create_site(self.ldb, self.basedn, self.sitename2)
self.ldb.transaction_commit()
def tearDown(self):
self.ldb.transaction_start()
sites.delete_site(self.ldb, self.basedn, self.sitename)
sites.delete_site(self.ldb, self.basedn, self.sitename2)
self.ldb.transaction_commit()
super(SimpleSubnetTests, self).tearDown()
def test_create_delete(self):
"""Create a subnet and delete it again."""
basedn = self.ldb.get_config_basedn()
cidr = "10.11.12.0/24"
subnets.create_subnet(self.ldb, basedn, cidr, self.sitename)
self.assertRaises(subnets.SubnetAlreadyExists,
subnets.create_subnet, self.ldb, basedn, cidr,
self.sitename)
subnets.delete_subnet(self.ldb, basedn, cidr)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % cidr)
self.assertEqual(len(ret), 0, 'Failed to delete subnet %s' % cidr)
def test_create_shift_delete(self):
"""Create a subnet, shift it to another site, then delete it."""
basedn = self.ldb.get_config_basedn()
cidr = "10.11.12.0/24"
subnets.create_subnet(self.ldb, basedn, cidr, self.sitename)
subnets.set_subnet_site(self.ldb, basedn, cidr, self.sitename2)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % cidr)
sites = ret[0]['siteObject']
self.assertEqual(len(sites), 1)
self.assertEqual(sites[0],
'CN=testsite2,CN=Sites,%s' % self.ldb.get_config_basedn())
self.assertRaises(subnets.SubnetAlreadyExists,
subnets.create_subnet, self.ldb, basedn, cidr,
self.sitename)
subnets.delete_subnet(self.ldb, basedn, cidr)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % cidr)
self.assertEqual(len(ret), 0, 'Failed to delete subnet %s' % cidr)
def test_delete_subnet_that_does_not_exist(self):
"""Ensure we can't delete a site that isn't there."""
basedn = self.ldb.get_config_basedn()
cidr = "10.15.0.0/16"
self.assertRaises(subnets.SubnetNotFound,
subnets.delete_subnet, self.ldb, basedn, cidr)
def get_user_and_ldb(self, username, password, hostname=ldaphost):
"""Get a connection for a temporarily user that will vanish as soon as
the test is over."""
user = self.ldb.newuser(username, password)
creds_tmp = Credentials()
creds_tmp.set_username(username)
creds_tmp.set_password(password)
creds_tmp.set_domain(creds.get_domain())
creds_tmp.set_realm(creds.get_realm())
creds_tmp.set_workstation(creds.get_workstation())
creds_tmp.set_gensec_features(creds_tmp.get_gensec_features()
| gensec.FEATURE_SEAL)
creds_tmp.set_kerberos_state(DONT_USE_KERBEROS)
ldb_target = SamDB(url=hostname, credentials=creds_tmp, lp=lp)
self.addCleanup(delete_force, self.ldb, self.get_user_dn(username))
return (user, ldb_target)
def test_rename_delete_good_subnet_to_good_subnet_other_user(self):
"""Make sure that we can't rename or delete subnets when we aren't
admin."""
basedn = self.ldb.get_config_basedn()
cidr = "10.16.0.0/24"
new_cidr = "10.16.1.0/24"
subnets.create_subnet(self.ldb, basedn, cidr, self.sitename)
user, non_admin_ldb = self.get_user_and_ldb("notadmin", "samba123@")
try:
subnets.rename_subnet(non_admin_ldb, basedn, cidr, new_cidr)
except LdbError as e:
self.assertEqual(e.args[0], ERR_INSUFFICIENT_ACCESS_RIGHTS,
("subnet rename by non-admin failed "
"in the wrong way: %s" % e))
else:
self.fail("subnet rename by non-admin succeeded: %s" % e)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % cidr)
self.assertEqual(len(ret), 1, ('Subnet %s destroyed or renamed '
'by non-admin' % cidr))
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression=('(&(objectclass=subnet)(cn=%s))'
% new_cidr))
self.assertEqual(len(ret), 0,
'New subnet %s created by non-admin' % cidr)
try:
subnets.delete_subnet(non_admin_ldb, basedn, cidr)
except LdbError as e:
self.assertEqual(e.args[0], ERR_INSUFFICIENT_ACCESS_RIGHTS,
("subnet delete by non-admin failed "
"in the wrong way: %s" % e))
else:
self.fail("subnet delete by non-admin succeeded: %s" % e)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % cidr)
self.assertEqual(len(ret), 1, 'Subnet %s deleted non-admin' % cidr)
subnets.delete_subnet(self.ldb, basedn, cidr)
def test_create_good_subnet_other_user(self):
"""Make sure that we can't create subnets when we aren't admin."""
basedn = self.ldb.get_config_basedn()
cidr = "10.16.0.0/24"
user, non_admin_ldb = self.get_user_and_ldb("notadmin", "samba123@")
try:
subnets.create_subnet(non_admin_ldb, basedn, cidr, self.sitename)
except LdbError as e:
self.assertEqual(e.args[0], ERR_INSUFFICIENT_ACCESS_RIGHTS,
("subnet create by non-admin failed "
"in the wrong way: %s" % e))
else:
subnets.delete_subnet(self.ldb, basedn, cidr)
self.fail("subnet create by non-admin succeeded: %s")
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % cidr)
self.assertEqual(len(ret), 0, 'New subnet %s created by non-admin' % cidr)
def test_rename_good_subnet_to_good_subnet(self):
"""Make sure that we can rename subnets"""
basedn = self.ldb.get_config_basedn()
cidr = "10.16.0.0/24"
new_cidr = "10.16.1.0/24"
subnets.create_subnet(self.ldb, basedn, cidr, self.sitename)
subnets.rename_subnet(self.ldb, basedn, cidr, new_cidr)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % new_cidr)
self.assertEqual(len(ret), 1, 'Failed to rename subnet %s' % cidr)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % cidr)
self.assertEqual(len(ret), 0, 'Failed to remove old subnet during rename %s' % cidr)
subnets.delete_subnet(self.ldb, basedn, new_cidr)
def test_rename_good_subnet_to_bad_subnet(self):
"""Make sure that the CIDR checking runs during rename"""
basedn = self.ldb.get_config_basedn()
cidr = "10.17.0.0/24"
bad_cidr = "10.11.12.0/14"
subnets.create_subnet(self.ldb, basedn, cidr, self.sitename)
self.assertRaises(subnets.SubnetInvalid, subnets.rename_subnet,
self.ldb, basedn, cidr, bad_cidr)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % bad_cidr)
self.assertEqual(len(ret), 0, 'Failed to rename subnet %s' % cidr)
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression='(&(objectclass=subnet)(cn=%s))' % cidr)
self.assertEqual(len(ret), 1, 'Failed to remove old subnet during rename %s' % cidr)
subnets.delete_subnet(self.ldb, basedn, cidr)
def test_create_bad_ranges(self):
"""These CIDR ranges all have something wrong with them, and they
should all fail."""
basedn = self.ldb.get_config_basedn()
cidrs = [
# IPv4
# insufficient zeros
"10.11.12.0/14",
"110.0.0.0/6",
"1.0.0.0/0",
"10.11.13.1/24",
"1.2.3.4/29",
"10.11.12.0/21",
# out of range mask
"110.0.0.0/33",
"110.0.0.0/-1",
"4.0.0.0/111",
# out of range address
"310.0.0.0/24",
"10.0.0.256/32",
"1.1.-20.0/24",
# badly formed
"1.0.0.0/1e",
"1.0.0.0/24.0",
"1.0.0.0/1/1",
"1.0.0.0",
"1.c.0.0/24",
"1.2.0.0.0/27",
"1.23.0/24",
"1.23.0.-7/24",
"1.-23.0.7/24",
"1.23.-0.7/24",
"1.23.0.0/0x10",
# IPv6 insufficient zeros -- this could be a subtle one
# due to the vagaries of endianness in the 16 bit groups.
"aaaa:bbbb:cccc:dddd:eeee:ffff:2222:1100/119",
"aaaa:bbbb::/31",
"a:b::/31",
"c000::/1",
"a::b00/119",
"1::1/127",
"1::2/126",
"1::100/119",
"1::8000/112",
# out of range mask
"a:b::/130",
"a:b::/-1",
"::/129",
# An IPv4 address can't be exactly the bitmask (MS ADTS)
"128.0.0.0/1",
"192.0.0.0/2",
"255.192.0.0/10",
"255.255.255.0/24",
"255.255.255.255/32",
"0.0.0.0/0",
# The address can't have leading zeros (not RFC 4632, but MS ADTS)
"00.1.2.0/24",
"003.1.2.0/24",
"022.1.0.0/16",
"00000000000000000000000003.1.2.0/24",
"09876::abfc/126",
"0aaaa:bbbb::/32",
"009876::abfc/126",
"000a:bbbb::/32",
# How about extraneous zeros later on
"3.01.2.0/24",
"3.1.2.00/24",
"22.001.0.0/16",
"3.01.02.0/24",
"100a:0bbb:0023::/48",
"100a::0023/128",
# Windows doesn't like the zero IPv4 address
"0.0.0.0/8",
# or the zero mask on IPv6
"::/0",
# various violations of RFC5952
"0:0:0:0:0:0:0:0/8",
"0::0/0",
"::0:0/48",
"::0:4/128",
"0::/8",
"0::4f/128",
"0::42:0:0:0:0/64",
"4f::0/48",
# badly formed -- mostly the wrong arrangement of colons
"a::b::0/120",
"a::abcdf:0/120",
"a::g:0/120",
"::0::3/48",
"2001:3::110::3/118",
"aaaa:bbbb:cccc:dddd:eeee:ffff:2222:1111:0000/128",
"a:::5:0/120",
# non-canonical representations (vs RFC 5952)
# "2001:0:c633:63::1:0/120" is correct
"2001:0:c633:63:0:0:1:0/120",
"2001::c633:63:0:0:1:0/120",
"2001:0:c633:63:0:0:1::/120",
# "10:0:0:42::/64" is correct
"10::42:0:0:0:0/64",
"10:0:0:42:0:0:0:0/64",
# "1::4:5:0:0:8/127" is correct
"1:0:0:4:5:0:0:8/127",
"1:0:0:4:5::8/127",
# "2001:db8:0:1:1:1:1:1/128" is correct
"2001:db8::1:1:1:1:1/128",
# IP4 embedded - rejected
"a::10.0.0.0/120",
"a::10.9.8.7/128",
# The next ones tinker indirectly with IPv4 embedding,
# where Windows has some odd behaviour.
#
# Samba's libreplace inet_ntop6 expects IPv4 embedding
# with addresses in these forms:
#
# ::wx:yz
# ::FFFF:wx:yz
#
# these will be stringified with trailing dottted decimal, thus:
#
# ::w.x.y.z
# ::ffff:w.x.y.z
#
# and this will cause the address to be rejected by Samba,
# because it uses a inet_pton / inet_ntop round trip to
# ascertain correctness.
"::ffff:0:0/96", #this one fails on WIN2012r2
"::ffff:aaaa:a000/120",
"::ffff:10:0/120",
"::ffff:2:300/120",
"::3:0/120",
"::2:30/124",
"::ffff:2:30/124",
# completely wrong
None,
"bob",
3.1415,
False,
"10.11.16.0/24\x00hidden bytes past a zero",
self,
]
failures = []
for cidr in cidrs:
try:
subnets.create_subnet(self.ldb, basedn, cidr, self.sitename)
except subnets.SubnetInvalid:
print("%s fails properly" % (cidr,), file=sys.stderr)
continue
# we are here because it succeeded when it shouldn't have.
print("CIDR %s fails to fail" % (cidr,), file=sys.stderr)
failures.append(cidr)
subnets.delete_subnet(self.ldb, basedn, cidr)
if failures:
print("These bad subnet names were accepted:")
for cidr in failures:
print(" %s" % cidr)
self.fail()
def test_create_good_ranges(self):
"""All of these CIDRs are good, and the subnet creation should
succeed."""
basedn = self.ldb.get_config_basedn()
cidrs = [
# IPv4
"10.11.12.0/24",
"10.11.12.0/23",
"10.11.12.0/25",
"110.0.0.0/7",
"1.0.0.0/32",
"10.11.13.0/32",
"10.11.13.1/32",
"99.0.97.0/24",
"1.2.3.4/30",
"10.11.12.0/22",
"0.12.13.0/24",
# IPv6
"aaaa:bbbb:cccc:dddd:eeee:ffff:2222:1100/120",
"aaaa:bbbb:cccc:dddd:eeee:ffff:2222:11f0/124",
"aaaa:bbbb:cccc:dddd:eeee:ffff:2222:11fc/126",
# don't forget upper case
"FFFF:FFFF:FFFF:FFFF:ABCD:EfFF:FFFF:FFeF/128",
"9876::ab00/120",
"9876::abf0/124",
"9876::abfc/126",
"aaaa:bbbb::/32",
"aaaa:bbba::/31",
"aaaa:ba00::/23",
"aaaa:bb00::/24",
"aaaa:bb00::/77",
"::/48",
"a:b::/32",
"c000::/2",
"a::b00/120",
"1::2/127",
# this pattern of address suffix == mask is forbidden with
# IPv4 but OK for IPv6.
"8000::/1",
"c000::/2",
"ffff:ffff:ffc0::/42",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF/128",
# leading zeros are forbidden, but implicit IPv6 zeros
# (via "::") are OK.
"::1000/116",
"::8000/113",
# taken to the logical conclusion, "::/0" should be OK, but no.
"::/48",
# Try some reserved ranges, which it might be reasonable
# to exclude, but which are not excluded in practice.
"129.0.0.0/16",
"129.255.0.0/16",
"100.64.0.0/10",
"127.0.0.0/8",
"127.0.0.0/24",
"169.254.0.0/16",
"169.254.1.0/24",
"192.0.0.0/24",
"192.0.2.0/24",
"198.18.0.0/15",
"198.51.100.0/24",
"203.0.113.0/24",
"224.0.0.0/4",
"130.129.0.0/16",
"130.255.0.0/16",
"192.12.0.0/24",
"223.255.255.0/24",
"240.255.255.0/24",
"224.0.0.0/8",
"::/96",
"100::/64",
"2001:10::/28",
"fec0::/10",
"ff00::/8",
"::1/128",
"2001:db8::/32",
"2001:10::/28",
"2002::/24",
"2002:a00::/24",
"2002:7f00::/24",
"2002:a9fe::/32",
"2002:ac10::/28",
"2002:c000::/40",
"2002:c000:200::/40",
"2002:c0a8::/32",
"2002:c612::/31",
"2002:c633:6400::/40",
"2002:cb00:7100::/40",
"2002:e000::/20",
"2002:f000::/20",
"2002:ffff:ffff::/48",
"2001::/40",
"2001:0:a00::/40",
"2001:0:7f00::/40",
"2001:0:a9fe::/48",
"2001:0:ac10::/44",
"2001:0:c000::/56",
"2001:0:c000:200::/56",
"2001:0:c0a8::/48",
"2001:0:c612::/47",
"2001:0:c633:6400::/56",
"2001:0:cb00:7100::/56",
"2001:0:e000::/36",
"2001:0:f000::/36",
"2001:0:ffff:ffff::/64",
# non-RFC-5952 versions of these are tested in create_bad_ranges
"2001:0:c633:63::1:0/120",
"10:0:0:42::/64",
"1::4:5:0:0:8/127",
"2001:db8:0:1:1:1:1:1/128",
# The "well-known prefix" 64::ff9b is another IPv4
# embedding scheme. Let's try that.
"64:ff9b::aaaa:aaaa/127",
"64:ff9b::/120",
"64:ff9b::ffff:2:3/128",
]
failures = []
for cidr in cidrs:
try:
subnets.create_subnet(self.ldb, basedn, cidr, self.sitename)
except subnets.SubnetInvalid as e:
print(e)
failures.append(cidr)
continue
ret = self.ldb.search(base=basedn, scope=SCOPE_SUBTREE,
expression=('(&(objectclass=subnet)(cn=%s))' %
cidr))
if len(ret) != 1:
print("%s was not created" % cidr)
failures.append(cidr)
continue
subnets.delete_subnet(self.ldb, basedn, cidr)
if failures:
print("These good subnet names were not accepted:")
for cidr in failures:
print(" %s" % cidr)
self.fail()
TestProgram(module=__name__, opts=subunitopts)
|
sathieu/samba
|
source4/dsdb/tests/python/sites.py
|
Python
|
gpl-3.0
| 22,483
|
[
"TINKER"
] |
005d3c1697afd7e794933c03745b93e78dd89c05ecb53bd65cd3f128ddb3978a
|
"Demonstrates molecular dynamics with constant energy."
from ase.calculators.emt import EMT
from ase.lattice.cubic import FaceCenteredCubic
from ase.md.langevin import Langevin
from ase.io.trajectory import PickleTrajectory
from ase import units
from asap3 import EMT # Way too slow with ase.EMT !
size = 10
T = 1500 # Kelvin
# Set up a crystal
atoms = FaceCenteredCubic(directions=[[1,0,0],[0,1,0],[0,0,1]], symbol="Cu",
size=(size,size,size), pbc=False)
# Describe the interatomic interactions with the Effective Medium Theory
atoms.set_calculator(EMT())
# We want to run MD with constant energy using the Langevin algorithm
# with a time step of 5 fs, the temperature T and the friction
# coefficient to 0.02 atomic units.
dyn = Langevin(atoms, 5*units.fs, T*units.kB, 0.002)
#Function to print the potential, kinetic and total energy.
def printenergy(a=atoms): #store a reference to atoms in the definition.
epot = a.get_potential_energy() / len(a)
ekin = a.get_kinetic_energy() / len(a)
print ("Energy per atom: Epot = %.3feV Ekin = %.3feV (T=%3.0fK) Etot = %.3feV" %
(epot, ekin, ekin/(1.5*units.kB), epot+ekin))
dyn.attach(printenergy, interval=50)
#We also want to save the positions of all atoms after every 100th time step.
traj = PickleTrajectory("moldyn3.traj", 'w', atoms)
dyn.attach(traj.write, interval=50)
# Now run the dynamics
printenergy()
dyn.run(5000)
|
grhawk/ASE
|
tools/doc/tutorials/md/moldyn3.py
|
Python
|
gpl-2.0
| 1,439
|
[
"ASE",
"CRYSTAL"
] |
ba870f9f7dd3b00da0817dfcfd60e06453d164c0bfb4615cbb2e21d5a2d77113
|
#!/usr/env/python
import numpy as np
from ..utils import arrays as ar
# from ..utils import sliding_window as window
def distsToVectors(X, V):
"""Returns the distances between each row of X and each row of V. If X is
n x m and v is p x m, returns an n x p matrix of L2 distances.
>>> n, m, p = 100, 40, 10
>>> X = np.random.randn(n, m)
>>> V = np.random.randn(p, m)
>>> dists = distsToVectors(X, V)
>>> for i in range(n):
... for j in range(p):
... diff = X[i] - V[j]
... d = np.sqrt(np.sum(diff * diff))
... assert(np.abs(dists[i, j] - d) < .0001)
>>>
"""
V = V.T # each col is now one of the vectors
# ||x - v||^2 = ||x||^2 + ||v||^2 - 2 * x.v
dists = -2. * np.dot(X, V)
dists += np.sum(X*X, axis=1).reshape((-1, 1)) # add to each col
dists += np.sum(V*V, axis=0) # add to each row
# n = len(X)
# numVects = len(V)
# dists = np.empty((n, numVects))
# for i, row in enumerate(X):
# for j, col in enumerate(V):
# diff = row - col
# dists[i, j] = np.dot(diff, diff)
# dists = np.sqrt(dists) # triangle inequality holds for norm, not norm^2
return np.sqrt(dists)
def distsToRandomVects(X, numReferenceVects=10, referenceVects=None,
referenceVectAlgo='gauss', norm='z', **sink):
"""Creates a set of numReferenceVects Gaussian vectors and returns the
distances of each row of X to each of these vectors as an
(N x numReferenceVects) array, where N is the number of rows in X.
Further, the columns are sorted in descending order of standard deviation.
"""
n, m = X.shape
assert(m > 1) #
if referenceVects is None:
if referenceVectAlgo == 'gauss':
referenceVects = np.random.randn(numReferenceVects, m) # rows are projection vects
elif referenceVectAlgo == 'randwalk':
referenceVects = np.random.randn(numReferenceVects, m)
referenceVects = np.cumsum(referenceVects, axis=1)
elif referenceVectAlgo == 'sample':
idxs = np.random.choice(np.arange(len(X)))
referenceVects = np.copy(X[idxs])
if norm == 'z':
referenceVects = ar.zNormalizeRows(referenceVects)
elif norm == 'mean':
referenceVects = ar.meanNormalizeRows(referenceVects)
referenceDists = distsToVectors(X, referenceVects)
# figure out std deviations of dists to different projections and
# sort projections by decreasing std
distStds = np.std(referenceDists, axis=0)
refSortIdxs = np.argsort(distStds)[::-1]
referenceDists = referenceDists[:, refSortIdxs]
referenceVects = referenceVects[refSortIdxs]
return referenceDists, referenceVects
def buildOrderline(X, numReferenceVects=10, **kwargs):
# TODO comment this if we use it
projDists, projVects = distsToRandomVects(X, numReferenceVects, **kwargs)
sortIdxs = np.argsort(projDists[:, 0])
unsortIdxs = np.arange(len(sortIdxs))[sortIdxs] # unsortIdxs: projected idx -> orig idx
Xsort = X[sortIdxs, :]
projDistsSort = projDists[sortIdxs, :]
return Xsort, projDistsSort, projVects, unsortIdxs
|
dblalock/flock
|
python/algo/dist.py
|
Python
|
mit
| 2,920
|
[
"Gaussian"
] |
8d283e4f175d2f7c71179df7d2510ea505e9f4e1bcf93c9ae30e786454e08dd1
|
from unittest import skipUnless
from bok_choy.web_app_test import WebAppTest
from acceptance_tests import ENABLE_OAUTH_TESTS
from acceptance_tests.mixins import LoginMixin
from acceptance_tests.pages import LoginPage
@skipUnless(ENABLE_OAUTH_TESTS, 'OAuth tests are not enabled.')
class OAuth2FlowTests(LoginMixin, WebAppTest):
def setUp(self):
"""
Instantiate the page objects.
"""
super().setUp()
self.insights_login_page = LoginPage(self.browser)
def test_login(self):
self.login_with_lms()
# Visit login URL and get redirected
self.insights_login_page.visit()
# User should arrive at course index page (or access denied page, if no permissions)
# Splitting this out into two separate tests would require two separate sets of credentials. That is
# feasible, but somewhat time-consuming. For now, we will rely on unit tests to validate the permissions and
# ensure both cases below are met.
self.assertTrue(self.browser.title.startswith('Courses') or self.browser.title.startswith('Access Denied'))
|
edx/edx-analytics-dashboard
|
acceptance_tests/test_auth.py
|
Python
|
agpl-3.0
| 1,122
|
[
"VisIt"
] |
6319d2a5ebf3f10bd8cb8a4cef049d80e4413d82ed07f6fccd84a739dabd8334
|
from codecs import getwriter
from collections import defaultdict
from errno import ENOENT
from json import load
import os
from os.path import abspath, relpath, splitext, sep
import subprocess
from tempfile import TemporaryFile, NamedTemporaryFile
from six import string_types
from sphinx.errors import SphinxError
from .parsers import path_and_formal_params, PathVisitor
from .suffix_tree import PathTaken, SuffixTree
from .typedoc import parse_typedoc
def gather_doclets(app):
"""Run JSDoc or another analysis tool across a whole codebase, and squirrel
away its results in jsdoc doclet format."""
source_paths = [app.config.js_source_path] if isinstance(app.config.js_source_path, string_types) else app.config.js_source_path
# Uses cwd, which Sphinx seems to set to the dir containing conf.py:
abs_source_paths = [abspath(path) for path in source_paths]
root_for_relative_paths = root_or_fallback(app.config.root_for_relative_js_paths,
abs_source_paths)
analyze = analyzer_for(app.config.js_language)
doclets = analyze(abs_source_paths, app)
# 2 doclets are made for classes, and they are largely redundant: one for
# the class itself and another for the constructor. However, the
# constructor one gets merged into the class one and is intentionally
# marked as undocumented, even if it isn't. See
# https://github.com/jsdoc3/jsdoc/issues/1129.
doclets = [d for d in doclets if d.get('comment')
and not d.get('undocumented')]
# Build table for lookup by name, which most directives use:
app._sphinxjs_doclets_by_path = SuffixTree()
conflicts = []
for d in doclets:
try:
app._sphinxjs_doclets_by_path.add(
doclet_full_path(d, root_for_relative_paths),
d)
except PathTaken as conflict:
conflicts.append(conflict.segments)
if conflicts:
raise PathsTaken(conflicts)
# Build lookup table for autoclass's :members: option. This will also
# pick up members of functions (inner variables), but it will instantly
# filter almost all of them back out again because they're undocumented.
# We index these by unambiguous full path. Then, when looking them up by
# arbitrary name segment, we disambiguate that first by running it through
# the suffix tree above. Expect trouble due to jsdoc's habit of calling
# things (like ES6 class methods) "<anonymous>" in the memberof field, even
# though they have names. This will lead to multiple methods having each
# other's members. But if you don't have same-named inner functions or
# inner variables that are documented, you shouldn't have trouble.
app._sphinxjs_doclets_by_class = defaultdict(lambda: [])
for d in doclets:
of = d.get('memberof')
if of: # speed optimization
segments = doclet_full_path(d, root_for_relative_paths, longname_field='memberof')
app._sphinxjs_doclets_by_class[tuple(segments)].append(d)
def program_name_on_this_platform(program):
"""Return the name of the executable file on the current platform, given a
command name with no extension."""
return program + '.cmd' if os.name == 'nt' else program
class Command(object):
def __init__(self, program):
self.program = program_name_on_this_platform(program)
self.args = []
def add(self, *args):
self.args.extend(args)
def make(self):
return [self.program] + self.args
def analyze_jsdoc(abs_source_paths, app):
command = Command('jsdoc')
command.add('-X', *abs_source_paths)
if app.config.jsdoc_config_path:
command.add('-c', app.config.jsdoc_config_path)
# Use a temporary file to handle large output volume. JSDoc defaults to
# utf8-encoded output.
with getwriter('utf-8')(TemporaryFile(mode='w+b')) as temp:
try:
p = subprocess.Popen(command.make(), cwd=app.confdir, stdout=temp)
except OSError as exc:
if exc.errno == ENOENT:
raise SphinxError('%s was not found. Install it using "npm install -g jsdoc".' % command.program)
else:
raise
p.wait()
# Once output is finished, move back to beginning of file and load it:
temp.seek(0)
try:
return load(temp)
except ValueError:
raise SphinxError('jsdoc found no JS files in the directories %s. Make sure js_source_path is set correctly in conf.py. It is also possible (though unlikely) that jsdoc emitted invalid JSON.' % abs_source_paths)
def analyze_typescript(abs_source_paths, app):
command = Command('typedoc')
if app.config.jsdoc_config_path:
command.add('--tsconfig', app.config.jsdoc_config_path)
with getwriter('utf-8')(NamedTemporaryFile(mode='w+b')) as temp:
command.add('--json', temp.name, *abs_source_paths)
try:
subprocess.call(command.make())
except OSError as exc:
if exc.errno == ENOENT:
raise SphinxError('%s was not found. Install it using "npm install -g typedoc".' % command.program)
else:
raise
# typedoc emits a valid JSON file even if it finds no TS files in the dir:
return parse_typedoc(temp)
ANALYZERS = {'javascript': analyze_jsdoc,
'typescript': analyze_typescript}
def analyzer_for(language):
"""Return a callable that spits out JSDoc-style doclets from some language:
JS, TypeScript, or other."""
try:
return ANALYZERS[language]
except KeyError:
raise SphinxError('Unsupported value of js_language in config: %s' % language)
def root_or_fallback(root_for_relative_paths, abs_source_paths):
"""Return the path that relative JS entity paths in the docs are relative to.
Fall back to the sole JS source path if the setting is unspecified.
:arg root_for_relative_paths: The raw root_for_relative_js_paths setting.
None if the user hasn't specified it.
:arg abs_source_paths: Absolute paths of dirs to scan for JS code
"""
if root_for_relative_paths:
return abspath(root_for_relative_paths)
else:
if len(abs_source_paths) > 1:
raise SphinxError('Since more than one js_source_path is specified in conf.py, root_for_relative_js_paths must also be specified. This allows paths beginning with ./ or ../ to be unambiguous.')
else:
return abs_source_paths[0]
def doclet_full_path(d, base_dir, longname_field='longname'):
"""Return the full, unambiguous list of path segments that points to an
entity described by a doclet.
Example: ``['./', 'dir/', 'dir/', 'file/', 'object.', 'object#', 'object']``
:arg d: The doclet
:arg base_dir: Absolutized value of the jsdoc_source_path option
:arg longname_field: The field to look in at the top level of the doclet
for the long name of the object to emit a path to
"""
meta = d['meta']
rel = relpath(meta['path'], base_dir)
rel = '/'.join(rel.split(sep))
if not rel.startswith(('../', './')) and rel not in ('..', '.'):
# It just starts right out with the name of a folder in the cwd.
rooted_rel = './%s' % rel
else:
rooted_rel = rel
# Building up a string and then parsing it back down again is probably
# not the fastest approach, but it means knowledge of path format is in
# one place: the parser.
path = '%s/%s.%s' % (rooted_rel,
splitext(meta['filename'])[0],
d[longname_field])
return PathVisitor().visit(
path_and_formal_params['path'].parse(path))
class PathsTaken(Exception):
"""One or more JS objects had the same paths.
Rolls up multiple PathTaken exceptions for mass reporting.
"""
def __init__(self, conflicts):
# List of paths, each given as a list of segments:
self.conflicts = conflicts
def __str__(self):
return ('Your JS code contains multiple documented objects at each of '
"these paths:\n\n %s\n\nWe won't know which one you're "
'talking about. Using JSDoc tags like @class might help you '
'differentiate them.' %
'\n '.join(''.join(c) for c in self.conflicts))
|
erikrose/sphinx-js
|
sphinx_js/doclets.py
|
Python
|
mit
| 8,437
|
[
"VisIt"
] |
74b9abc5f8365ad8e62ee464401bcc20dde5309db6dd8b15100625f13fc2d452
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Custom astroid checker for config calls."""
import sys
import os
import os.path
import astroid
from pylint import interfaces, checkers
from pylint.checkers import utils
sys.path.insert(
0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir))
from qutebrowser.config import configdata
class ConfigChecker(checkers.BaseChecker):
"""Custom astroid checker for config calls."""
__implements__ = interfaces.IAstroidChecker
name = 'config'
msgs = {
'E0000': ('"%s -> %s" is no valid config option.', # flake8: disable=S001
'bad-config-call',
None),
}
priority = -1
@utils.check_messages('bad-config-call')
def visit_call(self, node):
"""Visit a Call node."""
if hasattr(node, 'func'):
infer = utils.safe_infer(node.func)
if infer and infer.root().name == 'qutebrowser.config.config':
if getattr(node.func, 'attrname', None) in ['get', 'set']:
self._check_config(node)
def _check_config(self, node):
"""Check that the arguments to config.get(...) are valid."""
try:
sect_arg = utils.get_argument_from_call(node, position=0,
keyword='sectname')
opt_arg = utils.get_argument_from_call(node, position=1,
keyword='optname')
except utils.NoSuchArgumentError:
return
sect_arg = utils.safe_infer(sect_arg)
opt_arg = utils.safe_infer(opt_arg)
if not (isinstance(sect_arg, astroid.Const) and
isinstance(opt_arg, astroid.Const)):
return
try:
configdata.DATA[sect_arg.value][opt_arg.value]
except KeyError:
self.add_message('bad-config-call', node=node,
args=(sect_arg.value, opt_arg.value))
def register(linter):
"""Register this checker."""
linter.register_checker(ConfigChecker(linter))
|
lahwaacz/qutebrowser
|
scripts/dev/pylint_checkers/qute_pylint/config.py
|
Python
|
gpl-3.0
| 2,880
|
[
"VisIt"
] |
6d88bb9a4480d7bb242b791795a602c7028051ac28df2d0d4cb12cee38f90583
|
import logging
from simtk.unit import nanometers, picoseconds
import numpy as np
logger = logging.getLogger('InterMolLog')
class GromacsGroParser(object):
"""GromacsGroParser reads and writes Gromacs .gro files
A .gro file also contains some topological information, such as elements and
residue names, but not enough to construct a full Topology object. This
information is recorded and stored in the object's public fields.
"""
def __init__(self, gro_file):
"""Load a .gro gro_file.
The atom positions can be retrieved by calling getPositions().
Parameters:
- gro_file (string) the name of the gro_file to read or write
"""
self.gro_file = gro_file
def read(self):
atomname = list()
resid = list()
resname = list()
boxes = list()
xyzs = list()
vels = list()
with open(self.gro_file) as gro:
next(gro)
n_atoms = int(next(gro).strip())
for _ in range(n_atoms):
line = next(gro)
(thisresnum, thisresname, thisatomname) = [line[i*5:i*5+5].strip() for i in range(3)]
resname.append(thisresname)
resid.append(int(thisresnum))
atomname.append(thisatomname)
entries = line[20:].split()
# If there aren't 6, then fixed column, presumably 8 digit
if len(entries) not in [3, 6]:
data = line[20:]
entries = []
spacing = 8
for j in range(0, len(data), spacing):
entry = data[j:j+spacing].strip()
if len(entry) > 0:
entries.append(entry)
entries = [float(x) for x in entries]
xyz = [x * nanometers for x in entries[:3]]
xyzs.append(xyz)
if len(entries) == 6:
vel = [v * nanometers / picoseconds for v in entries[3:6]]
else:
vel = [v * nanometers / picoseconds for v in [0., 0., 0.]]
vels.append(vel)
line = next(gro)
raw_box_vector = line.split()
v = np.zeros([3, 3], float) * nanometers
# Diagonals
for i in range(3):
v[i, i] = float(raw_box_vector[i]) * nanometers
if len(raw_box_vector) == 9:
k = 3
# Then the off-diagonals
for i in range(3):
for j in range(3):
if i != j:
v[i, j] = float(raw_box_vector[k]) * nanometers
k += 1
boxes.append(v)
self.positions = np.array(xyzs)
self.velocities = np.array(vels)
self.atom_names = atomname
self.residue_ids = resid
self.residue_names = resname
self.box_vector = boxes[0]
def write(self, system):
"""Write the system out in a Gromacs 4.6 format
Args:
filename (str): the file to write out to
"""
with open(self.gro_file, 'w') as gro:
gro.write("{0}\n".format(system.name))
gro.write("{0}\n".format(system.n_atoms))
for n, atom in enumerate(system.atoms):
if atom.name.isdigit():
# Kluge for atoms read in from a LAMMPS data file.
atom.name = "LMP_{0}".format(atom.name)
# .gro wraps at 100,0000, which is why the field is 5 width.
gro.write('{0:5d}{1:<5s}{2:5s}{3:5d}'.format(
atom.residue_index, atom.residue_name, atom.name, (n + 1)%100000))
for pos in atom.position:
gro.write('{0:17.12f}'.format(pos.value_in_unit(nanometers)))
if np.any(atom.velocity):
for vel in atom.velocity:
gro.write('{0:17.12f}'.format(vel.value_in_unit(nanometers / picoseconds)))
gro.write('\n')
# Check for rectangular; should be symmetric, so we don't have to
# check 6 values
if (system.box_vector[1, 0]._value == 0 and
system.box_vector[2, 0]._value == 0 and
system.box_vector[2, 1]._value == 0):
for i in range(3):
gro.write('{0:11.7f}'.format(system.box_vector[i, i].value_in_unit(nanometers)))
else:
for i in range(3):
gro.write('{0:11.7f}'.format(system.box_vector[i, i].value_in_unit(nanometers)))
for i in range(3):
for j in range(3):
if i != j:
gro.write('{0:11.7f}'.format(system.box_vector[i, j].value_in_unit(nanometers)))
gro.write('\n')
|
ctk3b/InterMol
|
intermol/gromacs/grofile_parser.py
|
Python
|
mit
| 4,947
|
[
"Gromacs",
"LAMMPS"
] |
023361bd833683eb148c7b2e1dd87fba11d96f6c8a128d286a26e35e6d61dbae
|
#!/usr/bin/env python
#
# PDBList.py
#
# A tool for tracking changes in the PDB Protein Structure Database.
#
# Version 2.0
#
# (c) 2003 Kristian Rother
# This work was supported by the German Ministry of Education
# and Research (BMBF). Project http://www.bcbio.de
#
# Contact the author
# homepage : http://www.rubor.de/bioinf
# email : krother@genesilico.pl
#
#
# This Code is released under the conditions of the Biopython license.
# It may be distributed freely with respect to the original author.
# Any maintainer of the BioPython code may change this notice
# when appropriate.
#
# Last modified on Fri, Oct 24th 2006, Warszawa
#
# Removed 'write' options from retrieve_pdb_file method: it is not used.
# Also added a 'dir' options (pdb file is put in this directory if given),
# and an 'exist' option (test if the file is already there). This method
# now returns the name of the downloaded uncompressed file.
#
# -Thomas, 1/06/04
#
#
# Including bugfixes from Sunjoong Lee (9/2006)
#
"""Access the PDB over the internet (for example to download structures)."""
import os
import shutil
import urllib
class PDBList:
"""
This class provides quick access to the structure lists on the
PDB server or its mirrors. The structure lists contain
four-letter PDB codes, indicating that structures are
new, have been modified or are obsolete. The lists are released
on a weekly basis.
It also provides a function to retrieve PDB files from the server.
To use it properly, prepare a directory /pdb or the like,
where PDB files are stored.
If You want to use this module from inside a proxy, add
the proxy variable to Your environment, e.g. in Unix
export HTTP_PROXY='http://realproxy.charite.de:888'
(This can also be added to ~/.bashrc)
"""
PDB_REF="""
The Protein Data Bank: a computer-based archival file for macromolecular structures.
F.C.Bernstein, T.F.Koetzle, G.J.B.Williams, E.F.Meyer Jr, M.D.Brice, J.R.Rodgers, O.Kennard, T.Shimanouchi, M.Tasumi
J. Mol. Biol. 112 pp. 535-542 (1977)
http://www.pdb.org/.
"""
alternative_download_url = "http://www.rcsb.org/pdb/files/"
# just append PDB code to this, and then it works.
# (above URL verified with a XXXX.pdb appended on 2 Sept 2008)
def __init__(self,server='ftp://ftp.wwpdb.org', pdb=os.getcwd(), obsolete_pdb=None):
"""Initialize the class with the default server or a custom one."""
# remote pdb server
self.pdb_server = server
# local pdb file tree
self.local_pdb = pdb
# local file tree for obsolete pdb files
if obsolete_pdb:
self.obsolete_pdb = obsolete_pdb
else:
self.obsolete_pdb = os.path.join(self.local_pdb, 'obsolete')
if not os.access(self.obsolete_pdb,os.F_OK):
os.makedirs(self.obsolete_pdb)
# variables for command-line options
self.overwrite = 0
self.flat_tree = 0
def get_status_list(self,url):
"""Retrieves a list of pdb codes in the weekly pdb status file
from the given URL. Used by get_recent_files.
Typical contents of the list files parsed by this method is now
very simply one PDB name per line.
"""
handle = urllib.urlopen(url)
answer = []
for line in handle:
pdb = line.strip()
assert len(pdb)==4
answer.append(pdb)
handle.close()
return answer
def get_recent_changes(self):
"""Returns three lists of the newest weekly files (added,mod,obsolete).
Reads the directories with changed entries from the PDB server and
returns a tuple of three URL's to the files of new, modified and
obsolete entries from the most recent list. The directory with the
largest numerical name is used.
Returns None if something goes wrong.
Contents of the data/status dir (20031013 would be used);
drwxrwxr-x 2 1002 sysadmin 512 Oct 6 18:28 20031006
drwxrwxr-x 2 1002 sysadmin 512 Oct 14 02:14 20031013
-rw-r--r-- 1 1002 sysadmin 1327 Mar 12 2001 README
"""
url = urllib.urlopen(self.pdb_server+'/pub/pdb/data/status/')
# added by S.Lee
# recent = filter(lambda x: x.isdigit(), \
# map(lambda x: x.split()[-1], url.readlines()))[-1]
recent = filter(str.isdigit,
(x.split()[-1] for x in url.readlines())
)[-1]
path = self.pdb_server+'/pub/pdb/data/status/%s/'%(recent)
# retrieve the lists
added = self.get_status_list(path+'added.pdb')
modified = self.get_status_list(path+'modified.pdb')
obsolete = self.get_status_list(path+'obsolete.pdb')
return [added,modified,obsolete]
def get_all_entries(self):
"""Retrieves a big file containing all the
PDB entries and some annotation to them.
Returns a list of PDB codes in the index file.
"""
print "retrieving index file. Takes about 5 MB."
url = urllib.urlopen(self.pdb_server+'/pub/pdb/derived_data/index/entries.idx')
# extract four-letter-codes
# entries = map(lambda x: x[:4], \
# filter(lambda x: len(x)>4, url.readlines()[2:]))
return [line[:4] for line in url.readlines()[2:] if len(line) > 4]
def get_all_obsolete(self):
"""Returns a list of all obsolete entries ever in the PDB.
Returns a list of all obsolete pdb codes that have ever been
in the PDB.
Gets and parses the file from the PDB server in the format
(the first pdb_code column is the one used). The file looks
like this:
LIST OF OBSOLETE COORDINATE ENTRIES AND SUCCESSORS
OBSLTE 31-JUL-94 116L 216L
...
OBSLTE 29-JAN-96 1HFT 2HFT
OBSLTE 21-SEP-06 1HFV 2J5X
OBSLTE 21-NOV-03 1HG6
OBSLTE 18-JUL-84 1HHB 2HHB 3HHB
OBSLTE 08-NOV-96 1HID 2HID
OBSLTE 01-APR-97 1HIU 2HIU
OBSLTE 14-JAN-04 1HKE 1UUZ
...
"""
handle = urllib.urlopen(self.pdb_server+'/pub/pdb/data/status/obsolete.dat')
# extract pdb codes. Could use a list comprehension, but I want
# to include an assert to check for mis-reading the data.
obsolete = []
for line in handle:
if not line.startswith("OBSLTE ") : continue
pdb = line.split()[2]
assert len(pdb)==4
obsolete.append(pdb)
handle.close()
return obsolete
def retrieve_pdb_file(self,pdb_code, obsolete=0, compression='.gz',
uncompress="gunzip", pdir=None):
"""Retrieves a PDB structure file from the PDB server and
stores it in a local file tree.
The PDB structure is returned as a single string.
If obsolete is 1, the file will be by default saved in a special file tree.
The compression should be '.Z' or '.gz'. 'uncompress' is
the command called to uncompress the files.
@param pdir: put the file in this directory (default: create a PDB-style directory tree)
@type pdir: string
@return: filename
@rtype: string
"""
# get the structure
code=pdb_code.lower()
filename="pdb%s.ent%s"%(code,compression)
if not obsolete:
url=(self.pdb_server+
'/pub/pdb/data/structures/divided/pdb/%s/pdb%s.ent%s'
% (code[1:3],code,compression))
else:
url=(self.pdb_server+
'/pub/pdb/data/structures/obsolete/pdb/%s/pdb%s.ent%s'
% (code[1:3],code,compression))
# in which dir to put the pdb file?
if pdir is None:
if self.flat_tree:
if not obsolete:
path=self.local_pdb
else:
path=self.obsolete_pdb
else:
# Put in PDB style directory tree
if not obsolete:
path=os.path.join(self.local_pdb, code[1:3])
else:
path=os.path.join(self.obsolete_pdb,code[1:3])
else:
# Put in specified directory
path=pdir
if not os.access(path,os.F_OK):
os.makedirs(path)
filename=os.path.join(path, filename)
# the final uncompressed file
final_file=os.path.join(path, "pdb%s.ent" % code)
# check whether the file exists
if not self.overwrite:
if os.path.exists(final_file):
print "file exists, not retrieved %s" % final_file
return final_file
# Retrieve the file
print 'retrieving %s' % url
lines=urllib.urlopen(url).read()
open(filename,'wb').write(lines)
# uncompress the file
os.system("%s %s" % (uncompress, filename))
return final_file
def update_pdb(self):
"""
I guess this is the 'most wanted' function from this module.
It gets the weekly lists of new and modified pdb entries and
automatically downloads the according PDB files.
You can call this module as a weekly cronjob.
"""
assert os.path.isdir(self.local_pdb)
assert os.path.isdir(self.obsolete_pdb)
new, modified, obsolete = self.get_recent_changes()
for pdb_code in new+modified:
try:
#print 'retrieving %s' % pdb_code
self.retrieve_pdb_file(pdb_code)
except Exception:
print 'error %s\n' % pdb_code
# you can insert here some more log notes that
# something has gone wrong.
# move the obsolete files to a special folder
for pdb_code in obsolete:
if self.flat_tree:
old_file = os.path.join(self.local_pdb,
'pdb%s.ent' % pdb_code)
new_dir = self.obsolete_pdb
else:
old_file = os.path.join(self.local_pdb, pdb_code[1:3],
'pdb%s.ent' % pdb_code)
new_dir = os.path.join(self.obsolete_pdb, pdb_code[1:3])
new_file = os.path.join(new_dir, 'pdb%s.ent' % pdb_code)
if os.path.isfile(old_file):
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
try:
shutil.move(old_file, new_file)
except Exception:
print "Could not move %s to obsolete folder" % old_file
elif os.path.isfile(new_file):
print "Obsolete file %s already moved" % old_file
else:
print "Obsolete file %s is missing" % old_file
def download_entire_pdb(self, listfile=None):
"""Retrieve all PDB entries not present in the local PDB copy.
Writes a list file containing all PDB codes (optional, if listfile is
given).
"""
entries = self.get_all_entries()
for pdb_code in entries:
self.retrieve_pdb_file(pdb_code)
# Write the list
if listfile:
outfile = open(listfile, 'w')
outfile.writelines((x+'\n' for x in entries))
outfile.close()
def download_obsolete_entries(self, listfile=None):
"""Retrieve all obsolete PDB entries not present in the local obsolete
PDB copy.
Writes a list file containing all PDB codes (optional, if listfile is
given).
"""
entries = self.get_all_obsolete()
for pdb_code in entries:
self.retrieve_pdb_file(pdb_code, obsolete=1)
# Write the list
if listfile:
outfile = open(listfile, 'w')
outfile.writelines((x+'\n' for x in entries))
outfile.close()
# this is actually easter egg code not used by any of the methods
# maybe someone will find it useful.
#
def get_seqres_file(self,savefile='pdb_seqres.txt'):
"""Retrieves a (big) file containing all the sequences of PDB entries
and writes it to a file.
"""
print "retrieving sequence file. Takes about 15 MB."
url = urllib.urlopen(self.pdb_server +
'/pub/pdb/derived_data/pdb_seqres.txt')
lines = url.readlines()
outfile = open(savefile, 'w')
outfile.writelines(lines)
outfile.close()
if __name__ == '__main__':
import sys
doc = """PDBList.py
(c) Kristian Rother 2003, Contributed to BioPython
Usage:
PDBList.py update <pdb_path> [options] - write weekly PDB updates to
local pdb tree.
PDBList.py all <pdb_path> [options] - write all PDB entries to
local pdb tree.
PDBList.py obsol <pdb_path> [options] - write all obsolete PDB
entries to local pdb tree.
PDBList.py <PDB-ID> <pdb_path> [options] - retrieve single structure
Options:
-d A single directory will be used as <pdb_path>, not a tree.
-o Overwrite existing structure files.
"""
print doc
if len(sys.argv)>2:
pdb_path = sys.argv[2]
pl = PDBList(pdb=pdb_path)
if len(sys.argv)>3:
for option in sys.argv[3:]:
if option == '-d': pl.flat_tree = 1
elif option == '-o': pl.overwrite = 1
else:
pdb_path = os.getcwd()
pl = PDBList()
pl.flat_tree = 1
if len(sys.argv) > 1:
if sys.argv[1] == 'update':
# update PDB
print "updating local PDB at "+pdb_path
pl.update_pdb()
elif sys.argv[1] == 'all':
# get the entire PDB
pl.download_entire_pdb()
elif sys.argv[1] == 'obsol':
# get all obsolete entries
pl.download_obsolete_entries(pdb_path)
elif len(sys.argv[1]) == 4 and sys.argv[1][0].isdigit():
# get single PDB entry
pl.retrieve_pdb_file(sys.argv[1],pdir=pdb_path)
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/PDB/PDBList.py
|
Python
|
gpl-2.0
| 14,525
|
[
"Biopython"
] |
5b8d8c091f51fb8e9e17de5ede8c7f239c5ed4a0bd23d0332fc873033e0e8c6c
|
"""
Copyright (C) 2014, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Maintainer: Rudolf Streif (rstreif@jaguarlandrover.com)
"""
from django.contrib import admin, messages
from devices.models import Device, Remote
from devices.tasks import send_remote
import logging
logger = logging.getLogger('rvi')
def replace(str):
str.replace(" ", "<br>")
return str
class RemoteInline(admin.StackedInline):
"""
A Remote is associated with a Device. We use this Inline to show
all Remotes of a Device on the Device's detail page.
"""
model = Remote
extra = 1
fieldsets = [
(None, {'fields': ['rem_name']}),
(None, {'fields': ['rem_device']}),
(None, {'fields': ['rem_vehicle']}),
(None, {'fields': ['rem_validfrom', 'rem_validto']}),
(None, {'fields': ['rem_lock', 'rem_engine', 'rem_trunk', 'rem_horn', 'rem_lights', 'rem_windows', 'rem_hazard']}),
]
def has_add_permission(self, request):
return True
class DeviceAdmin(admin.ModelAdmin):
"""
Administration view for Devices
"""
fieldsets = [
(None, {'fields': ['dev_name']}),
('Owner Information', {'fields': ['dev_owner', 'dev_mdn']}),
('Device Information', {'fields': ['dev_uuid', 'dev_min', 'dev_imei', 'dev_wifimac', 'dev_btmac']}),
('RVI Information', {'fields': ['dev_rvibasename']}),
('Security Information', {'fields': ['dev_key']}),
]
list_display = ('dev_name', 'dev_owner', 'dev_mdn', 'account')
inlines = [RemoteInline]
def save_model(self, request, obj, form, change):
obj.account = request.user
obj.save()
class RemoteAdmin(admin.ModelAdmin):
"""
Administration view for Remotes
"""
fieldsets = [
(None, {'fields': ['rem_name']}),
('Device Information', {'fields': ['rem_device']}),
('Vehicle Information', {'fields': ['rem_vehicle']}),
('Validity', {'fields': ['rem_validfrom', 'rem_validto']}),
('Authorizations', {'fields': ['rem_lock', 'rem_engine', 'rem_trunk', 'rem_horn', 'rem_lights', 'rem_windows', 'rem_hazard']}),
]
list_display = ('rem_name', 'rem_device', 'rem_vehicle', 'rem_validfrom', 'rem_validto', 'rem_lock', 'rem_engine', 'rem_trunk', 'rem_horn', 'rem_lights', 'rem_windows', 'rem_hazard')
def send_remotes(self, request, remotes):
remotes_sent = 0
for remote in remotes:
logger.info('Sending Remote: %s', remote.get_name())
result = send_remote(remote)
if result:
logger.info('Sending Remote: %s - successful', remote.get_name())
remotes_sent += 1
else:
logger.error('Sending Remote: %s - failed', remote.get_name())
self.message_user(request, "Sending Remote: %s - failed." % remote.get_name(), messages.ERROR)
if (remotes_sent == 1):
self.message_user(request, "%s Remote was successfully sent." % remotes_sent, messages.INFO)
elif (remotes_sent > 1):
self.message_user(request, "%s Remotes were successfully sent." % remotes_sent, messages.INFO)
if (len(remotes) - remotes_sent > 0):
self.message_user(request, "Failed sending %s Remotes." % (len(remotes) - remotes_sent), messages.WARNING)
send_remotes.short_description = "Send selected Remotes"
actions = [send_remotes]
admin.site.register(Device, DeviceAdmin)
admin.site.register(Remote, RemoteAdmin)
|
rstreif/rvi_backend
|
web/devices/admin.py
|
Python
|
mpl-2.0
| 3,806
|
[
"Jaguar"
] |
5fdbc1591d03ff86298f52460d01454c6f7e6154fb7c9a30919c70b08af61015
|
Symbol
TFSC
TFSCR
TFSCU
TFSCW
PIH
FLWS
FCTY
FCCY
SRCE
VNET
TWOU
JOBS
SIXD
CAFD
EGHT
AVHI
SHLM
AAON
ABAX
ABY
ABGB
ABEO
ABEOW
ABIL
ABILW
ABMD
AXAS
ACTG
ACHC
ACAD
ACST
AXDX
XLRN
ANCX
ARAY
VXDN
VXUP
ACRX
ACET
AKAO
ACHN
ACIW
ACRS
ACNB
ACOR
ACTS
ACPW
ATVI
ACTA
ACUR
ACXM
ADMS
ADMP
ADAP
ADUS
AEY
IOTS
ADMA
ADBE
ADTN
ADRO
AAAP
AEIS
AMD
ADXS
ADXSW
MAUI
YPRO
AEGR
AEGN
AEHR
AMTX
AEPI
AERI
AVAV
AEZS
AEMD
AFMD
AFFX
AGEN
AGRX
AGYS
AGIO
AGFS
AGFSW
AIMT
AIRM
AIRT
ATSG
AMCN
AIXG
AKAM
AKTX
AKBA
AKER
AKRX
ALRM
ALSK
AMRI
ABDC
ADHD
ALDR
ALDX
ALXN
ALXA
ALCO
ALGN
ALIM
ALKS
ABTX
ALGT
AFOP
AIQ
AHGP
ARLP
AHPI
AMOT
ALQA
ALLT
MDRX
AFAM
ALNY
AOSL
GOOG
GOOGL
SMCP
ATEC
ASPS
AIMC
AMAG
AMRN
AMRK
AYA
AMZN
AMBC
AMBCW
AMBA
AMCX
DOX
AMDA
AMED
UHAL
ATAX
AMOV
AAL
AGNC
AGNCB
AGNCP
MTGE
MTGEP
ACSF
ACAS
GNOW
AETI
AMIC
AMNB
ANAT
APEI
ARII
AMRB
ASEI
AMSWA
AMSC
AMWD
CRMT
ABCB
AMSF
ASRV
ASRVP
ATLO
AMGN
FOLD
AMKR
AMPH
AMSG
AMSGP
ASYS
AFSI
AMRS
ANAC
ANAD
ADI
ALOG
AVXL
ANCB
ABCW
ANDA
ANDAR
ANDAU
ANDAW
ANGI
ANGO
ANIP
ANIK
ANSS
ATRS
ANTH
ABAC
ZLIG
ATNY
APIC
APOG
APOL
AINV
AMEH
APPF
AAPL
ARCI
APDN
APDNW
AGTC
AMAT
AMCC
AAOI
AREX
APRI
APTO
AQMS
AQXP
AUMA
AUMAU
AUMAW
ARDM
PETX
ABUS
ARCW
ABIO
RKDA
ARCB
ACGL
APLP
ACAT
ARDX
ARNA
ARCC
AGII
AGIIL
ARGS
ARIS
ARIA
ARKR
ARMH
ARTX
ARWA
ARWAR
ARWAU
ARWAW
ARQL
ARRY
ARRS
DWAT
AROW
ARWR
ARTNA
ARTW
PUMP
ASBB
ASNA
ASND
ASCMA
ASTI
APWC
ASML
AZPN
ASMB
ASFI
ASTE
ALOT
ATRO
ASTC
ASUR
ATAI
ATRA
ATHN
ATHX
AAPC
AAME
ACBI
ACFC
ATNI
ATLC
AAWW
AFH
TEAM
ATML
ATOS
ATRC
ATRI
ATTU
LIFE
AUBN
AUDC
AUPH
EARS
ADAT
ABTL
ADSK
AGMX
ADP
AVGO
AAVL
AVNU
AVEO
AVNW
AVID
AVGR
CAR
AWRE
ACLS
AXPW
AXPWW
AXGN
AXSM
AXTI
BCOM
RILY
BOSC
BEAV
BIDU
BCPC
BWINA
BWINB
BLDP
BANF
BANFP
BKMU
BOCH
BMRC
BKSC
BOTJ
OZRK
BFIN
BWFG
BANR
BZUN
BHAC
BHACR
BHACU
BHACW
BBSI
BSET
BYBK
BYLK
BV
BBCN
BCBP
BECN
BSF
BBGI
BEBE
BBBY
BELFA
BELFB
BLPH
BLCM
BNCL
BNFT
BNTC
BNTCW
BGCP
BGFV
BIND
ORPN
BASI
BCDA
BIOC
BCRX
BIOD
BDSI
BIIB
BIOL
BLFS
BLRX
BMRN
BVXV
BVXVW
BPTH
BIOS
BBC
BBP
BSTC
BSPM
BOTA
TECH
BEAT
BITI
BDMS
BJRI
BBOX
BDE
BLKB
BBRY
HAWK
BKCC
ADRA
ADRD
ADRE
ADRU
BLMN
BCOR
BLBD
BUFF
BBLU
BHBK
NILE
BLUE
BKEP
BKEPP
BPMC
ITEQ
STCK
BNCN
BOBE
BOFI
WIFI
BOJA
BOKF
BONA
BNSO
BPFH
BPFHP
BPFHW
EPAY
BDBD
BLVD
BLVDU
BLVDW
BOXL
BCLI
BBRG
BBEP
BBEPP
BDGE
BLIN
BRID
BCOV
BRCM
BSFT
BVSN
BYFC
BWEN
BRCD
BRKL
BRKS
BRKR
BMTC
BLMT
BSQR
BWLD
BLDR
BUR
CFFI
CHRW
CA
CCMP
CDNS
CDZI
CACQ
CZR
CSTE
PRSS
CLBS
CLMS
CHY
CHI
CCD
CFGE
CHW
CGO
CSQ
CAMP
CVGW
CFNB
CALA
CALD
CALM
CLMT
ABCD
CAC
CAMT
CSIQ
CGIX
CPHC
CBNJ
CPLA
CBF
CCBG
CPLP
CSWC
CPTA
CLAC
CLACU
CLACW
CFFN
CAPN
CAPNW
CAPR
CPST
CARA
CARB
CBYL
CRDC
CFNL
CRME
CSII
CATM
CDNA
CECO
CTRE
CKEC
CLBH
CARO
CART
CRZO
TAST
CRTN
CARV
CASM
CACB
CSCD
CWST
CASY
CASI
CASS
CATB
CBIO
CPRX
CATY
CATYW
CVCO
CAVM
CBFV
CNLM
CNLMR
CNLMU
CNLMW
CBOE
CDK
CDW
CECE
CPXX
CELG
CELGZ
CLDN
CLDX
CLRB
CLRBW
CLLS
CBMG
CLSN
CYAD
CEMP
CETX
CSFL
CETV
CFBK
CENT
CENTA
CVCY
CFCB
CENX
CNBKA
CNTY
CPHD
CRNT
CERC
CERCW
CERCZ
CERE
CERN
CERU
CERS
KOOL
CEVA
CSBR
CYOU
HOTR
HOTRW
CTHR
GTLS
CHTR
CHFN
CHKP
CHEK
CHEKW
CEMI
CHFC
CCXI
CHMG
CHKE
CHEV
CHMA
CBNK
PLCE
CMRX
CADC
CALI
CAAS
CBAK
CBPO
CCCL
CCCR
CCRC
JRJC
HGSH
CHLN
CNIT
CJJD
HTHT
CHNR
CREG
CRDI
CSUN
CNTF
CXDC
CNYD
CCIH
CNET
IMOS
CHSCL
CHSCM
CHSCN
CHSCO
CHSCP
CHDN
CHUY
CDTX
CIFC
CMCT
CMPR
CINF
CIDM
CTAS
CPHR
CRUS
CSCO
CTRN
CZNC
CZWI
CZFC
CIZN
CTXS
CHCO
CIVB
CIVBP
CDTI
CLNE
CLNT
CLFD
CLRO
CLIR
CBLI
CSBK
CLVS
CMFN
CME
CCNE
CISG
CNV
CWAY
COBZ
COKE
CDRB
CDXS
CVLY
JVA
CCOI
CGNT
CGNX
CTSH
COHR
CHRS
COHU
CLCT
COLL
CIGI
CBAN
CLCD
COLB
COLM
CMCO
CBMX
CMCSA
CBSH
CBSHP
CUBN
CVGI
COMM
CSAL
JCS
ESXB
CCFI
CYHHZ
CTBI
CWBC
COB
CVLT
CGEN
CPSI
CTG
SCOR
CHCI
CMTL
CNAT
CNCE
CXRX
CCUR
CDOR
CDORO
CDORP
CFMS
CONG
CNFR
CNMD
CTWS
CNOB
CNXR
CONN
CNSL
CWCO
CTCT
CPSS
CFRX
CFRXW
CTRV
CTRL
CPRT
COYN
COYNW
CRBP
CORT
BVA
CORE
CORI
CSOD
CRVL
COSI
CSGP
COST
CPAH
ICBK
CVTI
COVS
COWN
COWNL
PMTS
CPSH
CRAI
CBRL
BREW
CRAY
CACC
GLDI
CREE
CRESY
CRTO
CROX
CCRN
XRDC
CRDS
CRWS
CRWN
CYRX
CYRXW
CSGS
CCLP
CSPI
CSWI
CSX
CTCM
CTIC
CTIB
CTRP
CUNB
CUI
CPIX
CMLS
CRIS
CUTR
CVBF
CVV
CYAN
CYBR
CYBE
CYCC
CYCCP
CBAY
CYNA
CYNO
CY
CYRN
CONE
CYTK
CTMX
CYTX
CTSO
CYTR
DJCO
DAKT
DAIO
DTLK
DRAM
DWCH
PLAY
DTEA
DWSN
DBVT
DHRM
DFRG
TACO
TACOW
DCTH
DGAS
DELT
DELTW
DENN
XRAY
DEPO
DSCI
DERM
DEST
DXLG
DSWL
DTRM
DXCM
DHXM
DMND
DHIL
FANG
DCIX
DRNA
DGII
DMRC
DRAD
DGLY
APPS
DCOM
DMTX
DIOD
DPRX
DISCA
DISCB
DISCK
DSCO
DISH
DVCR
SAUC
DLHC
DNBF
DLTR
DGICA
DGICB
DMLP
DORM
EAGL
EAGLU
EAGLW
DRWI
DRWIW
DWA
DRYS
DSKX
DSPG
CADT
CADTR
CADTU
CADTW
DTSI
DLTH
DNKN
DRRX
DXPE
DYAX
BOOM
DYSL
DYNT
DVAX
ETFC
EBMT
EGBN
EGLE
EGRX
ELNK
EWBC
EACQ
EACQU
EACQW
EML
EVBS
EBAY
EBIX
ELON
ECHO
ECTE
SATS
EEI
ECAC
ECACR
ECACU
ESES
EDAP
EDGE
EDGW
EDUC
EFUT
EGAN
EGLT
EHTH
LOCO
EMITF
ESLT
ERI
ELRC
ESIO
EA
EFII
ELSE
ELEC
ELECU
ELECW
EBIO
RDEN
CAPX
ESBK
LONG
ELTK
EMCI
EMCF
EMKR
EMMS
EMMSP
NYNY
NYNYR
ERS
ENTA
ECPG
WIRE
ENDP
ECYT
ELGX
EIGI
WATT
EFOI
ERII
EXXI
ENOC
ENG
ENPH
ESGR
ENFC
ENTG
ENTL
ETRM
EBTC
EFSC
EGT
ENZN
ENZY
EPIQ
EPRS
EPZM
PLUS
EQIX
EQFN
EQBK
EAC
ERIC
ERIE
ESCA
ESMC
ESPR
ESSA
EPIX
ESND
ESSF
ETSY
CLWT
EEFT
ESEA
EVEP
EVK
EVLV
EVOK
EVOL
EXA
EXAS
EXAC
EXEL
EXFO
EXLS
EXPE
EXPD
EXPO
ESRX
EXTR
EYEG
EYEGW
EZCH
EZPW
FFIV
FB
FCS
FRP
FWM
FALC
DAVE
FARM
FFKT
FMNB
FARO
FAST
FATE
FBSS
FBRC
FDML
FNHC
FEIC
FHCO
FENX
GSM
FCSC
FGEN
ONEQ
LION
FDUS
FRGI
FSAM
FSC
FSCFL
FSFR
FITB
FITBI
FNGN
FISI
FNSR
FNJN
FNTC
FNTCU
FNTCW
FEYE
FBNC
FNLC
FRBA
BUSE
FBIZ
FCAP
FCFS
FCNCA
FCLF
FCBC
FCCO
FCFP
FBNK
FDEF
FFBC
FFBCW
FFIN
THFF
FFNW
FFWM
FGBI
INBK
FIBK
FRME
FMBH
FMBI
FNBC
FNFG
FNWB
FSFG
FSLR
FSBK
FPA
BICK
FBZ
FCAN
FTCS
FCA
FDT
FDTS
FV
IFV
FEM
FEMB
FEMS
FTSM
FEP
FEUZ
FGM
FTGC
FTHI
HYLS
FHK
FTAG
FTRI
FPXI
YDIV
SKYY
FJP
FLN
FTLB
LMBS
FMB
QABA
QCLN
GRID
CIBR
CARZ
RDVY
FONE
TDIV
QQEW
QQXT
QTEC
AIRR
QINC
FTSL
FKO
FCVT
FDIV
FSZ
FTW
TUSA
FKU
FUNC
SVVC
FMER
FSV
FISV
FIVE
FPRX
FIVN
FLML
FLKS
FLXN
SKOR
LKOR
MBSD
ASET
QLC
FLXS
FLEX
FLIR
FLDM
FFIC
FOMX
FOGO
FONR
FES
FORM
FORTY
FORR
FTNT
FBIO
FWRD
FORD
FWP
FOSL
FMI
FXCB
FOXF
FRAN
FELE
FRED
FREE
RAIL
FEIM
FRPT
FTR
FTRPR
FRPH
FSBW
FTD
FSYS
FTEK
FCEL
FORK
FULL
FULLL
FLL
FULT
FSNN
FFHL
GK
WILC
GAIA
GLPG
GALT
GALTU
GALTW
GALE
GLMD
GLPI
GPIC
GRMN
GGAC
GGACR
GGACU
GGACW
GARS
GCTS
GLSS
GENC
GNCMA
GFN
GFNCP
GFNSL
GENE
GNMK
GNCA
GHDX
GNST
GNTX
THRM
GNVC
GTWN
GEOS
GABC
GERN
GEVO
ROCK
GIGM
GIGA
GIII
GILT
GILD
GBCI
GLAD
GLADO
GOOD
GOODN
GOODO
GOODP
GAIN
GAINN
GAINO
GAINP
LAND
GLBZ
GBT
ENT
GBLI
GBLIZ
GPAC
GPACU
GPACW
GSOL
ACTX
QQQC
SOCL
ALTY
SRET
YLCO
GAI
GBIM
GLBS
GLRI
GLUU
GLYC
GOGO
GLNG
GMLP
GLDC
GDEN
GOGL
GBDC
GTIM
GPRO
GMAN
GRSH
GRSHU
GRSHW
GPIA
GPIAU
GPIAW
LOPE
GRVY
GBSN
GLDD
GSBC
GNBC
GRBK
GPP
GPRE
GCBC
GLRE
GRIF
GRFS
GRPN
OMAB
GGAL
GSIG
GSIT
GSVC
GTXI
GBNK
GFED
GUID
GIFI
GURE
GPOR
GWPH
GWGH
GYRO
HEES
HLG
HNRG
HALL
HALO
HBK
HMPR
HBHC
HBHCL
HNH
HAFC
HNSN
HQCL
HDNG
HLIT
HRMN
HRMNU
HRMNW
TINY
HART
HBIO
HCAP
HCAPL
HAS
HA
HCOM
HWKN
HWBK
HAYN
HDS
HIIQ
HCSG
HQY
HSTM
HWAY
HTLD
HTLF
HTWR
HTBX
HSII
HELE
HMNY
HMTV
HNNA
HCAC
HCACU
HCACW
HSIC
HERO
HTBK
HFWA
HEOP
HCCI
MLHR
HRTX
HSKA
HFFC
HIBB
HPJ
HIHO
HIMX
HIFS
HSGX
HMNF
HMSY
HOLI
HOLX
HBCP
HOMB
HFBL
HMIN
HMST
HTBI
CETC
HOFT
HFBC
HBNC
HZNP
HRZN
ZINC
HDP
HMHC
HWCC
HOVNP
HBMD
HSNI
HTGM
HUBG
HSON
HDSN
HBAN
HBANP
HURC
HURN
HTCH
HBP
HDRA
HDRAR
HDRAU
HDRAW
HYGS
IDSY
IACI
IKGH
IBKC
IBKCP
ICAD
IEP
ICFI
ICLR
ICON
ICUI
IPWR
INVE
IDRA
IDXX
DSKY
IROQ
IRG
RXDX
IIVI
KANG
IKNX
ILMN
ISNS
IMMR
ICCC
IMDZ
IMNP
IMGN
IMMU
IPXL
IMMY
INCR
SAAS
INCY
INDB
IBCP
IBTX
IDSA
INFN
INFI
IPCC
III
IFON
IMKTA
INWK
INNL
INOD
IPHS
IOSP
ISSC
INVA
INGN
ITEK
INOV
INO
NSIT
ISIG
INSM
IIIN
PODD
INSY
NTEC
IART
IDTI
IESC
INTC
IQNT
IPCI
IPAR
IBKR
ININ
ICPT
ICLD
ICLDW
IDCC
TILE
IMI
INAP
IBOC
ISCA
IGLD
IIJI
IDXG
XENT
INTX
ISIL
IILG
IVAC
INTL
INTLL
ITCI
IIN
INTU
ISRG
INVT
SNAK
ISTR
ISBC
ITIC
NVIV
IVTY
IONS
IPAS
IPGP
IRMD
IRIX
IRDM
IRDMB
IRBT
IRWD
IRCP
COMT
IFEU
IFGL
GNMA
ACWX
ACWI
AAXJ
EEMA
EEML
EUFN
IEUS
QAT
UAE
IBB
SOXX
EMIF
ICLN
WOOD
INDY
ISHG
IGOV
ISLE
ISRL
ITRI
ITRN
ITUS
XXIA
IXYS
JJSF
MAYS
JBHT
JCOM
JASO
JKHY
JACK
JXSB
JAXB
JAGX
JAKK
JMBA
JRVR
ERW
JASN
JASNW
JAZZ
JD
JBLU
JTPY
JCTCF
DATE
JST
JIVE
WYIG
WYIGU
WYIGW
JOEZ
JBSS
JOUT
JNP
JUNO
KTWO
KALU
KMDA
KNDI
KPTI
KBSF
KCAP
KRNY
KELYA
KELYB
KMPH
KFFB
KERX
GMCR
KEQU
KTEC
KTCC
KFRC
KE
KBAL
KIN
KNMD
KGJI
KINS
KONE
KIRK
KITE
KTOV
KTOVW
KLAC
KLOX
KLXI
KONA
KZ
KOPN
KRNT
KOSS
KWEB
KTOS
KUTV
KLIC
KURA
KVHI
FSTR
LJPC
LSBK
LSBG
LBAI
LKFN
LAKE
LRCX
LAMR
LANC
LNDC
LARK
LMRK
LE
LSTR
LNTH
LTRX
LPSB
LSCC
LAWS
LAYN
LCNB
LDRH
LBIX
LGCY
LGCYO
LGCYP
LTXB
DDBI
EDBI
LVHD
UDBI
LMAT
TREE
LXRX
LGIH
LHCG
LBRDA
LBRDK
LBTYA
LBTYB
LBTYK
LILA
LILAK
LVNTA
LVNTB
QVCA
QVCB
LMCA
LMCB
LMCK
TAX
LTRPA
LTRPB
LPNT
LCUT
LFVN
LWAY
LGND
LTBR
LPTH
LLEX
LIME
LLNW
LMNR
LINC
LECO
LIND
LINDW
LLTC
LNCO
LINE
LBIO
LIOX
LPCN
LQDT
LFUS
LIVN
LOB
LIVE
LPSN
LKQ
LMFA
LMFAW
LMIA
LOGI
LOGM
LOJN
EVAR
CNCR
LORL
LOXO
LPTN
LPLA
LRAD
LYTS
LULU
LITE
LMNX
LMOS
LUNA
MBTF
MTSI
MCBC
MFNC
MCUR
MGNX
MAGS
MGLN
MPET
MGIC
CALL
MNGA
MGYR
MHLD
MHLDO
MSFG
COOL
MMYT
MBUU
MLVF
MAMS
MANH
LOAN
MNTX
MTEX
MNKD
MANT
MAPI
MARA
MCHX
MARPS
MRNS
GNRX
MKTX
MKTO
MRKT
MRLN
MAR
MBII
MRTN
MMLP
MRVL
MASI
MTCH
MTLS
MTRX
MAT
MATR
MATW
MFRM
MTSN
MXIM
MXWL
MZOR
MBFI
MBFIP
MCFT
MGRC
MDCA
MCOX
TAXI
MDAS
MTBC
MTBCP
MNOV
MDSO
MDGS
MDVN
MDWD
MDVX
MDVXW
MEET
MEIP
MPEL
MLNX
MELR
MEMP
MRD
MENT
MTSL
MELI
MBWM
MERC
MBVT
MRCY
EBSB
VIVO
MMSI
MACK
MSLI
MLAB
MESO
CASH
MBLX
MEOH
METR
MFRI
MGCD
MGEE
MGPI
MCHP
MU
MICT
MICTW
MSCC
MSFT
MSTR
MVIS
MPB
MTP
MCEP
MBRG
MBCN
MSEX
MOFG
MIME
MDXG
MNDO
MB
NERV
MRTX
MIRN
MSON
MIND
MITK
MITL
MKSI
MMAC
MINI
MOBL
MOCO
MDSY
MLNK
MOKO
MOLG
MNTA
MOMO
MCRI
MNRK
MDLZ
MGI
MPWR
TYPE
MNRO
MRCC
MNST
MHGC
MORN
MOSY
MPAA
MDM
MRVC
MSBF
MSG
MTSC
MDIV
LABL
MFLX
MFSF
MYL
MYOK
MYOS
MYRG
MYGN
NBRV
NAKD
NANO
NSPH
NSTG
NK
NSSC
NDAQ
NTRA
NATH
NAUH
NKSH
FIZZ
NCMI
NCOM
NGHC
NGHCO
NGHCP
NGHCZ
NHLD
NATI
NATL
NPBC
NRCIA
NRCIB
NSEC
NWLI
NAII
NHTC
NATR
BABY
NVSL
NAVI
NBCP
NBTB
NCIT
NKTR
NEOG
NEO
NEON
NEOS
NEOT
NVCN
NRX
NEPT
UEPS
NETE
NTAP
NTES
NFLX
NTGR
NLST
NTCT
NTWK
CUR
NBIX
NDRM
NURO
NUROW
NSIG
NYMT
NYMTO
NYMTP
NBBC
NLNK
NEWP
NWS
NWSA
NEWS
NEWT
NEWTZ
NXST
NVET
NFEC
EGOV
NICE
NICK
NIHD
NVLS
NMIH
NNBR
NDLS
NDSN
NSYS
NTK
NBN
NTIC
NTRS
NTRSP
NFBK
NRIM
NWBI
NWBO
NWBOW
NWPX
NCLH
NWFL
NVFY
NVMI
NVDQ
MIFI
NVAX
NVCR
NVGN
NTLS
NUAN
NMRX
NUTR
NTRI
NUVA
QQQX
NVEE
NVEC
NVDA
NXPI
NXTM
NXTD
NXTDW
NYMX
OIIM
OVLY
OASM
OCAT
OBCI
OPTT
ORIG
OSHC
OCFC
OCRX
OCLR
OFED
OCUL
OCLS
OCLSW
OMEX
ODP
OFS
OHAI
OVBC
OHRP
ODFL
OLBK
ONB
OPOF
OSBC
OSBCP
OLLI
ZEUS
OFLX
OMER
OMCL
OVTI
ON
OTIV
OGXI
OMED
ONTX
ONCS
ONTY
OHGI
ONVI
OTEX
OPXA
OPXAW
OPGN
OPGNW
OPHT
OBAS
OCC
OPHC
OPB
ORMP
OSUR
ORBC
ORBK
ORLY
OREX
SEED
OESX
ORIT
ORRF
OFIX
OSIS
OSIR
OSN
OTEL
OTIC
OTTR
OUTR
OVAS
OSTK
OXBR
OXBRW
OXFD
OXLC
OXLCN
OXLCO
OXGN
PFIN
PTSI
PCAR
PACE
PACEU
PACEW
PACB
PCBK
PEIX
PMBC
PPBI
PAAC
PAACR
PAACU
PAACW
PSUN
PCRX
PACW
PTIE
PAAS
PNRA
PANL
PZZA
FRSH
PRGN
PRGNL
PRTK
PRXL
PCYG
PSTB
PKBK
PRKR
PKOH
PARN
PTNR
PBHC
PATK
PNBK
PATI
PEGI
PDCO
PTEN
PAYX
PCTY
PYDS
PYPL
PBBI
PCCC
PCMI
PCTI
PDCE
PDFS
PDLI
PDVW
SKIS
PGC
PEGA
PCO
PENN
PFLT
PNNT
PWOD
PTXP
PEBO
PEBK
PFBX
PFIS
PBCT
PUB
PRCP
PPHM
PPHMP
PRFT
PFMT
PERF
PERI
PESI
PTX
PERY
PRSN
PRSNW
PGLC
PETS
PFSW
PGTI
PHII
PHIIK
PAHC
PHMD
PLAB
PICO
PIRS
PPC
PME
PNK
PNFP
PPSI
PXLW
PLPM
PLXS
PLUG
PLBC
PSTI
PMCS
PMV
PBSK
PNTR
PCOM
PLCM
POOL
POPE
PLKI
BPOP
BPOPM
BPOPN
PBIB
PTLA
PBPB
PCH
POWL
POWI
PSIX
PDBC
DWTR
IDLB
PRFZ
PAGG
PSAU
IPKW
LDRI
LALT
PNQI
QQQ
USLB
PSCD
PSCC
PSCE
PSCF
PSCH
PSCI
PSCT
PSCM
PSCU
POZN
PRAA
PRAH
PRAN
PFBC
PLPC
PRXI
PFBI
PINC
LENS
PRGX
PSMT
PBMD
PNRG
PRMW
PRIM
PRZM
PVTB
PVTBP
PDEX
IPDN
PFIE
PGNX
PRGS
DNAI
PFPT
PRPH
PRQR
BIB
UBIO
TQQQ
ZBIO
SQQQ
BIS
PSEC
PRTO
PRTA
PWX
PVBC
PROV
PBIP
PSDV
PMD
PTC
PTCT
PULB
PULM
PCYO
PXS
QADA
QADB
QCCO
QCRH
QGEN
QIWI
QKLS
QLIK
QLGC
QLTI
QRVO
QCOM
QSII
QBAK
QLYS
QTWW
QRHC
QUIK
QDEL
QPAC
QPACU
QPACW
QNST
QUMU
QUNR
QTNT
RRD
RADA
RDCM
ROIA
ROIAK
RSYS
RDUS
RDNT
RDWR
RMBS
RAND
RLOG
GOLD
RPD
RPTP
RAVE
RAVN
ROLL
RICK
RCMT
RLOC
RDI
RDIB
RGSE
RELY
RNWK
RP
UTES
DAX
UK
QYLD
RCON
REPH
RRGB
RDHL
REDF
REGN
RGNX
DFVL
DFVS
DGLD
DLBL
DLBS
DSLV
DTUL
DTUS
DTYL
DTYS
FLAT
SLVO
STPP
TAPR
TVIX
TVIZ
UGLD
USLV
VIIX
VIIZ
XIV
ZIV
RGLS
REIS
RELV
RLYP
MARK
RNST
REGI
RNVA
RNVAW
RCII
RTK
RENT
RGEN
RPRX
RPRXW
RPRXZ
RJET
RBCAA
FRBK
RSAS
REFR
RESN
REXI
RECN
ROIC
SALE
RTRX
RVNC
RBIO
RVLT
RWLK
REXX
RFIL
RGCO
RIBT
RIBTW
RELL
RIGL
NAME
RNET
RITT
RITTW
RTTR
RIVR
RMI
RVSB
RLJE
RMGN
ROBO
FUEL
RMTI
RCKY
RMCF
RSTI
ROKA
ROSG
ROST
ROVI
RBPAA
RGLD
ROYL
RPXC
RRM
RTIX
RBCN
RUSHA
RUSHB
RUTH
RXII
RYAAY
STBA
SANW
SBRA
SBRAP
SABR
SAEX
SAFT
SAGE
SGNT
SAIA
SAJA
SALM
SAL
SAFM
SNDK
SASR
SGMO
SANM
GCVRZ
SPNS
SRPT
SBFG
SBFGP
SBAC
SCSC
SMIT
SCHN
SCHL
SCLN
SGMS
SQI
SCYX
SEAC
SBCF
STX
SHIP
SRSC
SHLD
SHLDW
SHOS
SPNE
SGEN
EYES
SNFCA
SEIC
SLCT
SCSS
SIGI
SELF
LEDS
SMLR
SMTC
SENEA
SENEB
SNMX
SQNM
SQBG
MCRB
SREV
SFBS
SEV
SVBI
SFXE
SGOC
SMED
SHSP
SHEN
SHLO
SCCI
SHPG
SCVL
SHBI
SHOR
SFLY
SIFI
SIEB
SIEN
BSRR
SWIR
SIFY
SIGM
SGMA
SGNL
SBNY
SBNYW
SLGN
SILC
SGI
SLAB
SIMO
SPIL
SSRI
SAMG
SFNC
SLP
SINA
SBGI
SINO
SVA
SIRI
SIRO
SRVA
SITO
SZMK
SKUL
SKYS
SKLN
SKLNU
MOBI
SPU
SKYW
SWKS
ISM
JSM
OSM
SLM
SLMAP
SLMBP
SMT
SMBK
SWHC
SMSI
SMTX
LNCE
SODA
SOHU
SLRC
SUNS
SLTD
SCTY
SEDG
SZYM
SONC
SOFO
SONS
SPHS
SORL
SRNE
SOHO
SOHOL
SOHOM
SFBC
SSB
SOCB
SFST
SMBC
SONA
SBSI
OKSB
SP
SPAN
SBSA
SGRP
SPKE
ONCE
SPAR
SPTN
SPPI
SPDC
ANY
SPEX
SAVE
SPLK
SPOK
SPWH
FUND
SFM
SPSC
SSNC
STAA
STAF
STMP
STLY
SPLS
SBLK
SBLKL
SBUX
STRZA
STRZB
STFC
STBZ
SNC
STDY
GASS
STLD
SXCL
SMRT
SBOT
STEM
STML
STXS
SRCL
SRCLP
STRL
SHOO
SSFN
SYBT
BANX
SGBK
SSKN
SSYS
STRT
STRS
STRA
STRM
SBBP
STB
SCMP
SUMR
SMMF
SSBI
SMMT
SNBC
SNHY
SNDE
SEMI
SNSS
STKL
SPWR
RUN
SBCP
SSH
SMCI
SPCB
SCON
SGC
SUPN
SPRT
SGRY
SCAI
SRDX
SBBX
TOR
SIVB
SIVBO
SYKE
SYMC
SSRG
SYNC
SYNL
SYNA
SNCR
SNDX
SGYP
SGYPU
SGYPW
ELOS
SNPS
SNTA
SYNT
SYMX
SYUT
SYPR
SYRX
TROW
TTOO
TAIT
TTWO
TLMR
TNDM
TLF
TNGO
TANH
TEDU
TASR
TATT
TAYD
TCPC
AMTD
TEAR
TECD
TCCO
TTGT
TGLS
TGEN
TSYS
TNAV
TTEC
TLGT
TENX
GLBL
TERP
TRTL
TRTLU
TRTLW
TBNK
TSRO
TESO
TSLA
TESS
TSRA
TTEK
TLOG
TTPH
TCBI
TCBIL
TCBIP
TCBIW
TXN
TXRH
TFSL
TGTX
ABCO
ANDE
TBBK
BONT
CG
CAKE
CHEF
TCFC
DSGX
DXYN
ENSG
XONE
FINL
FBMS
FLIC
TFM
GT
HABT
HCKT
HAIN
CUBA
INTG
JYNT
KEYW
KHC
MDCO
MIK
MIDD
NAVG
STKS
PCLN
PRSC
BITE
RMR
SPNC
ULTI
YORW
NCTY
TBPH
TST
TCRD
THLD
TICC
TTS
TIL
TSBK
TIPT
TITN
TTNP
TIVO
TMUS
TMUSP
TBRA
TKAI
TNXP
TISA
TOPS
TORM
TRCH
TSEM
TWER
CLUB
TOWN
TCON
TSCO
TWMC
TACT
TRNS
TBIO
TGA
TTHI
TZOO
TRVN
TCBK
TRIL
TRS
TRMB
TRIB
TRIP
TSC
TBK
TRIV
TROV
TROVU
TROVW
TRUE
THST
TRST
TRMK
TSRI
TTMI
TUBE
TCX
TUES
TOUR
HEAR
TUTI
TUTT
FOX
FOXA
TWIN
TRCB
USCR
PRTS
USEG
GROW
UREE
UBIC
UBNT
UFPT
ULTA
UCTT
RARE
ULBI
ULTR
UTEK
UMBF
UMPQ
UNAM
UNIS
UBSH
UNB
UNXL
QURE
UBCP
UBOH
UBSI
UCBA
UCBI
UCFC
UDF
UBNK
UFCS
UIHC
UNFI
UNTD
UBFO
USBI
USLM
UTHR
UG
UNTY
OLED
UEIC
UFPI
USAP
UACL
UVSP
UPIP
UPLD
URRE
URBN
ECOL
USAT
USATP
USAK
USMD
UTMD
UTIW
UTSI
VLRX
VALX
VALU
VNDA
VWOB
VNQI
VGIT
VCIT
VCLT
VGLT
VMBS
VNR
VNRAP
VNRBP
VNRCP
VONE
VONG
VONV
VTWO
VTWG
VTWV
VTHR
VCSH
VGSH
VTIP
BNDX
VXUS
VPCO
VPCOU
VRNS
VDSI
VBLT
VASC
VBIV
WOOF
VECO
APPY
VRA
VCYT
VSTM
VCEL
VRNT
VRSN
VRSK
VBTX
VRML
VSAR
VTNR
VRTX
VRTB
VIA
VIAB
VSAT
VIAV
VICL
VICR
CIZ
CID
CIL
CFO
CFA
CSF
CDC
CDL
CSB
CSA
VBND
VUSE
VIDI
VDTH
VGGL
VKTX
VBFC
VLGEA
VIP
VNOM
VIRC
VA
VIRT
VSCP
VRTS
VRTU
VISN
VTAE
VTL
VVUS
VOD
VLTC
VOXX
VYGR
VRNG
VSEC
VTVT
VUZI
VWR
WGBS
WBA
WRES
WAFD
WAFDW
WASH
WFBI
WSBF
WVE
WAVX
WNFM
WAYN
WSTG
WDFC
FLAG
WEB
WBMD
WB
WEBK
WEN
WERN
WSBC
WTBA
WSTC
WMAR
WABC
WBB
WSTL
WDC
WFD
WLB
WPRT
WEYS
WHLR
WHLRP
WHLRW
WHF
WHFBL
WFM
WILN
WHLM
WVVI
WVVIP
WLDN
WLFC
WLTW
WIBC
WIN
WING
WINA
WINS
WTFC
WTFCM
WTFCW
AGND
AGZD
HYND
HYZD
CXSE
EMCG
EMCB
DGRE
DXGE
WETF
DXJS
JGBB
DXKW
GULF
CRDT
DGRW
DGRS
DXPS
UBND
WIX
WLRH
WLRHU
WLRHW
WMIH
WBKC
WWD
WKHS
WRLD
WOWO
WPCS
WPPGY
WMGI
WMGIZ
WSFS
WSFSL
WSCI
WVFC
WYNN
XBIT
XELB
XCRA
XNCR
XBKS
XENE
XNPT
XGTI
XGTIW
XLNX
XOMA
XPLR
XCOM
XTLB
XNET
MESG
YHOO
YNDX
YOD
YCB
YRCW
YECO
YY
ZFGN
ZAGG
ZAIS
ZBRA
ZLTQ
ZHNE
Z
ZG
ZN
ZNWAA
ZION
ZIONW
ZIONZ
ZIOP
ZIXI
ZGNX
ZSAN
ZUMZ
ZYNE
ZNGA
|
computerpencils/ScraXBRL
|
scrape_lists/stock_exchanges/nasdaq.py
|
Python
|
mit
| 15,608
|
[
"CDK"
] |
f4cab10bdd46cc2f377290a463823acaf49c40101f92c3064555af1252dabc88
|
import numpy as np
from astroNN.lamost.lamost_shared import lamost_default_dr
def wavelength_solution(dr=None):
"""
To return wavelegnth_solution
:param dr: data release
:type dr: Union(int, NoneType)
:return: wavelength solution array
:rtype: ndarray
:History: 2018-Mar-15 - Written - Henry Leung (University of Toronto)
"""
lamost_default_dr(dr=dr)
# delibreately add 1e-5 to prevent numpy to generate an extra element
lamost_wavegrid = 10. ** np.arange(3.5682, 3.5682 - 1e-5 + 3909 * 10. ** -4., 10. ** -4.)
return lamost_wavegrid
def smooth_spec(flux, ivar, wavelength, L=50):
"""
Smooth a spectrum with a running Gaussian.
:param flux: The observed flux array.
:type flux: ndarray
:param ivar: The inverse variances of the fluxes.
:type ivar: ndarray
:param wavelength: An array of the wavelengths.
:type wavelength: ndarray
:param L: The width of the Gaussian in pixels.
:type L: int
:returns: An array of smoothed fluxes
:rtype: ndarray
"""
# Partial Credit: https://github.com/chanconrad/slomp/blob/master/lamost.py
w = np.exp(-0.5 * (wavelength[:, None] - wavelength[None, :]) ** 2 / L ** 2)
denominator = np.dot(ivar, w.T)
numerator = np.dot(flux * ivar, w.T)
bad_pixel = denominator == 0
smoothed = np.zeros(numerator.shape)
smoothed[~bad_pixel] = numerator[~bad_pixel] / denominator[~bad_pixel]
return smoothed
def pseudo_continuum(flux, ivar, wavelength=None, L=50, dr=None):
"""
Pseudo-Continuum normalise a spectrum by dividing by a Gaussian-weighted smoothed spectrum.
:param flux: The observed flux array.
:type flux: ndarray
:param ivar: The inverse variances of the fluxes.
:type ivar: ndarray
:param wavelength: An array of the wavelengths.
:type wavelength: ndarray
:param L: [optional] The width of the Gaussian in pixels.
:type L: int
:param dr: [optional] dara release
:type dr: int
:returns: Continuum normalized flux and flux uncerteinty
:rtype: ndarray
"""
# Partial Credit: https://github.com/chanconrad/slomp/blob/master/lamost.py
if dr is None:
dr = lamost_default_dr(dr)
if wavelength is None:
wavelength = wavelength_solution(dr=dr)
smoothed_spec = smooth_spec(wavelength, flux, ivar, L)
norm_flux = flux / smoothed_spec
norm_ivar = smoothed_spec * ivar * smoothed_spec
bad_pixel = ~np.isfinite(norm_flux)
norm_flux[bad_pixel] = 1.0
norm_ivar[bad_pixel] = 0.0
return norm_flux, norm_ivar
|
henrysky/astroNN
|
astroNN/lamost/chips.py
|
Python
|
mit
| 2,586
|
[
"Gaussian"
] |
384975d97dcaaf44748add2bbe50d4e27108b2cc078628e3acd0043a6809090c
|
# -*- coding: utf-8 -*-
# The defaults dict should contain all application-wide default keys, with their default value.
# Each key must have a value. If it is optional, the empty string will suffice.
defaults_dict = {
# Example default settings set by the EbaTableSpider
'app/scraping_application_name': 'Scrapy the Scraper',
'dataset/base_title':'app default base title',
'sheet/spider':'app default sheet spider',
# Real-life settings
'tags':'',
'author/name':'',
'dataset/distribution/type':'dcat:Download',
'dataset/distribution/format':'PDF'
}
def get_dictionary_default(path):
"""Returns the application-wide default value for the supplied key."""
if path in defaults_dict.keys():
return defaults_dict[path]
else:
return ''
def default_dict_keys():
"""Returns the keys of the default dictionary."""
return defaults_dict.keys()
prefixes_dict = {
# Example prefix set by the EbaTableSpider
'app/scraping_application_name': 'ODS',
# Real-life prefixes
}
def get_default_prefix(path):
"""Returns the application-wide default prefix value for the supplied key."""
if path in prefixes_dict.keys():
return prefixes_dict[path]
else:
return ''
_country_dict = {
u"kosovo": u"1A0",
u"aruba": u"ABW",
u"afghanistan": u"AFG",
u"french afar and issas": u"AFI",
u"angola": u"AGO",
u"anguilla": u"AIA",
u"åland islands": u"ALA",
u"albania": u"ALB",
u"andorra": u"AND",
u"netherlands antilles": u"ANT",
u"united arab emirates": u"ARE",
u"argentina": u"ARG",
u"armenia": u"ARM",
u"american samoa": u"ASM",
u"antarctica": u"ATA",
u"british antarctic territory": u"ATB",
u"french southern lands": u"ATF",
u"antigua and barbuda": u"ATG",
u"dronning maud land": u"ATN",
u"australia": u"AUS",
u"austria": u"AUT",
u"azerbaijan": u"AZE",
u"burundi": u"BDI",
u"belgium": u"BEL",
u"benin": u"BEN",
u"bonaire, saint eustatius and saba": u"BES",
u"burkina faso": u"BFA",
u"bangladesh": u"BGD",
u"bulgaria": u"BGR",
u"bulgaria": u"BGR",
u"bahrain": u"BHR",
u"bahamas": u"BHS",
u"bosnia and herzegovina": u"BIH",
u"saint barthélemy": u"BLM",
u"belarus": u"BLR",
u"belize": u"BLZ",
u"bermuda": u"BMU",
u"bolivia": u"BOL",
u"brazil": u"BRA",
u"barbados": u"BRB",
u"brunei": u"BRN",
u"bhutan": u"BTN",
u"burma": u"BUR",
u"bouvet island": u"BVT",
u"botswana": u"BWA",
u"belarus": u"BYS",
u"central african republic": u"CAF",
u"canada": u"CAN",
u"cocos (keeling) islands": u"CCK",
u"switzerland": u"CHE",
u"chile": u"CHL",
u"china": u"CHN",
u"côte d’ivoire": u"CIV",
u"cameroon": u"CMR",
u"democratic republic of the congo": u"COD",
u"congo": u"COG",
u"cook islands": u"COK",
u"colombia": u"COL",
u"comoros": u"COM",
u"clipperton": u"CPT",
u"cape verde": u"CPV",
u"costa rica": u"CRI",
u"czechoslovakia": u"CSK",
u"czechoslovakia": u"CSK",
u"czechoslovakia": u"CSK",
u"canton and enderbury islands": u"CTE",
u"cuba": u"CUB",
u"curaçao": u"CUW",
u"christmas island": u"CXR",
u"cayman islands": u"CYM",
u"cyprus": u"CYP",
u"czech republic": u"CZE",
u"east germany": u"DDR",
u"germany": u"DEU",
u"dahomey": u"DHY",
u"djibouti": u"DJI",
u"dominica": u"DMA",
u"denmark": u"DNK",
u"dominican republic": u"DOM",
u"algeria": u"DZA",
u"ecuador": u"ECU",
u"egypt": u"EGY",
u"eritrea": u"ERI",
u"western sahara": u"ESH",
u"spain": u"ESP",
u"spain": u"ESP",
u"estonia": u"EST",
u"ethiopia": u"ETH",
u"ethiopia": u"ETH",
u"finland": u"FIN",
u"fiji": u"FJI",
u"falkland islands": u"FLK",
u"french southern and antarctic lands": u"FQ0",
u"france": u"FRA",
u"faroe islands": u"FRO",
u"micronesia": u"FSM",
u"metropolitan france": u"FXX",
u"gabon": u"GAB",
u"united kingdom": u"GBR",
u"gilbert and ellice islands": u"GEL",
u"georgia": u"GEO",
u"guernsey": u"GGY",
u"ghana": u"GHA",
u"gibraltar": u"GIB",
u"guinea": u"GIN",
u"guadeloupe": u"GLP",
u"the gambia": u"GMB",
u"guinea-bissau": u"GNB",
u"equatorial guinea": u"GNQ",
u"greece": u"GRC",
u"greece": u"GRC",
u"grenada": u"GRD",
u"greenland": u"GRL",
u"guatemala": u"GTM",
u"french guiana": u"GUF",
u"guam": u"GUM",
u"guyana": u"GUY",
u"hong kong": u"HKG",
u"heard island and mcdonald islands": u"HMD",
u"honduras": u"HND",
u"croatia": u"HRV",
u"haiti": u"HTI",
u"hungary": u"HUN",
u"hungary": u"HUN",
u"hungary": u"HUN",
u"upper volta": u"HVO",
u"indonesia": u"IDN",
u"isle of man": u"IMN",
u"india": u"IND",
u"british indian ocean territory": u"IOT",
u"ireland": u"IRL",
u"iran": u"IRN",
u"iraq": u"IRQ",
u"iceland": u"ISL",
u"israel": u"ISR",
u"italy": u"ITA",
u"jamaica": u"JAM",
u"jersey": u"JEY",
u"jordan": u"JOR",
u"japan": u"JPN",
u"johnston island": u"JTN",
u"kazakhstan": u"KAZ",
u"kenya": u"KEN",
u"kyrgyzstan": u"KGZ",
u"cambodia": u"KHM",
u"kiribati": u"KIR",
u"saint kitts and nevis": u"KNA",
u"south korea": u"KOR",
u"kuwait": u"KWT",
u"laos": u"LAO",
u"lebanon": u"LBN",
u"liberia": u"LBR",
u"libya": u"LBY",
u"libya": u"LBY",
u"saint lucia": u"LCA",
u"liechtenstein": u"LIE",
u"sri lanka": u"LKA",
u"lesotho": u"LSO",
u"lithuania": u"LTU",
u"luxembourg": u"LUX",
u"latvia": u"LVA",
u"macau": u"MAC",
u"macau": u"MAC",
u"saint martin": u"MAF",
u"morocco": u"MAR",
u"monaco": u"MCO",
u"moldova": u"MDA",
u"madagascar": u"MDG",
u"maldives": u"MDV",
u"mexico": u"MEX",
u"marshall islands": u"MHL",
u"midway islands": u"MID",
u"former yugoslav republic of macedonia": u"MKD",
u"mali": u"MLI",
u"malta": u"MLT",
u"malta": u"MLT",
u"myanmar": u"MMR",
u"burma/myanmar": u"MMR",
u"myanmar/burma": u"MMR",
u"montenegro": u"MNE",
u"mongolia": u"MNG",
u"northern mariana islands": u"MNP",
u"mozambique": u"MOZ",
u"mauritania": u"MRT",
u"montserrat": u"MSR",
u"martinique": u"MTQ",
u"mauritius": u"MUS",
u"malawi": u"MWI",
u"malaysia": u"MYS",
u"mayotte": u"MYT",
u"mayotte": u"MYT",
u"namibia": u"NAM",
u"new caledonia": u"NCL",
u"new caledonia": u"NCL",
u"niger": u"NER",
u"norfolk island": u"NFK",
u"nigeria": u"NGA",
u"new hebrides": u"NHB",
u"nicaragua": u"NIC",
u"niue": u"NIU",
u"netherlands": u"NLD",
u"norway": u"NOR",
u"nepal": u"NPL",
u"nepal": u"NPL",
u"nepal": u"NPL",
u"nauru": u"NRU",
u"neutral zone": u"NTZ",
u"new zealand": u"NZL",
u"oman": u"OMN",
u"pakistan": u"PAK",
u"panama": u"PAN",
u"trust territory of the pacific islands": u"PCI",
u"pitcairn islands": u"PCN",
u"panama canal zone": u"PCZ",
u"peru": u"PER",
u"philippines": u"PHL",
u"palau": u"PLW",
u"papua new guinea": u"PNG",
u"poland": u"POL",
u"poland": u"POL",
u"puerto rico": u"PRI",
u"north korea": u"PRK",
u"portugal": u"PRT",
u"paraguay": u"PRY",
u"palestine": u"PSE",
u"us miscellaneous pacific islands": u"PUS",
u"french polynesia": u"PYF",
u"qatar": u"QAT",
u"réunion": u"REU",
u"southern rhodesia": u"RHO",
u"romania": u"ROU",
u"romania": u"ROU",
u"romania": u"ROU",
u"russia": u"RUS",
u"rwanda": u"RWA",
u"saudi arabia": u"SAU",
u"serbia and montenegro": u"SCG",
u"sudan": u"SDN",
u"senegal": u"SEN",
u"singapore": u"SGP",
u"south georgia and the south sandwich islands": u"SGS",
u"saint helena": u"SHN",
u"svalbard and jan mayen": u"SJM",
u"sikkim": u"SKM",
u"solomon islands": u"SLB",
u"sierra leone": u"SLE",
u"el salvador": u"SLV",
u"san marino": u"SMR",
u"somalia": u"SOM",
u"somalia": u"SOM",
u"saint pierre and miquelon": u"SPM",
u"serbia": u"SRB",
u"south sudan": u"SSD",
u"são tomé and príncipe": u"STP",
u"soviet union": u"SUN",
u"suriname": u"SUR",
u"slovakia": u"SVK",
u"slovenia": u"SVN",
u"sweden": u"SWE",
u"swaziland": u"SWZ",
u"sint maarten": u"SXM",
u"seychelles": u"SYC",
u"syria": u"SYR",
u"turks and caicos islands": u"TCA",
u"chad": u"TCD",
u"togo": u"TGO",
u"thailand": u"THA",
u"tajikistan": u"TJK",
u"tokelau": u"TKL",
u"turkmenistan": u"TKM",
u"timor-leste": u"TLS",
u"east timor": u"TMP",
u"tonga": u"TON",
u"trinidad and tobago": u"TTO",
u"tunisia": u"TUN",
u"turkey": u"TUR",
u"tuvalu": u"TUV",
u"taiwan": u"TWN",
u"tanzania": u"TZA",
u"uganda": u"UGA",
u"ukraine": u"UKR",
u"united states minor outlying islands": u"UMI",
u"uruguay": u"URY",
u"united states": u"USA",
u"uzbekistan": u"UZB",
u"vatican city": u"VAT",
u"saint vincent and the grenadines": u"VCT",
u"democratic republic of vietnam": u"VDR",
u"venezuela": u"VEN",
u"british virgin islands": u"VGB",
u"us virgin islands": u"VIR",
u"vietnam": u"VNM",
u"vietnam": u"VNM",
u"vanuatu": u"VUT",
u"wake islands": u"WAK",
u"wallis and futuna": u"WLF",
u"samoa": u"WSM",
u"yemen": u"YEM",
u"south yemen": u"YEM",
u"north yemen": u"YMD",
u"yugoslavia": u"YUG",
u"yugoslavia": u"YUG",
u"yugoslavia": u"YUG",
u"south africa": u"ZAF",
u"zambia": u"ZMB",
u"zaire": u"ZR0",
u"zimbabwe": u"ZWE"
}
def country_identifier(name):
"""Returns the eba identifier for the supplied country name, or the country name itself if no identifier could be found."""
if name.lower() in _country_dict.keys():
return _country_dict[name.lower()]
else:
return name
|
tenforce/ods-scraper
|
ods/dictionary.py
|
Python
|
apache-2.0
| 10,013
|
[
"BWA"
] |
54adcb6dd341d9dab5621377a85c17971ff12cef69304217a17fb6509392925a
|
# -*- coding: UTF-8 -*-
"""A module with Gaussian h-sssi process generators.
"""
import numpy as np
from pyfftw import FFTW, empty_aligned
from sklearn.base import BaseEstimator as BaseGenerator
from sklearn.utils import check_random_state
class FractionalGaussianNoise(BaseGenerator):
"""A class to generate fractional Gaussian process of fixed length using a
circulant matrix embedding method suggested by Dietrich and Newsam (1997).
For the best performance N-1 should be a power of two.
The circulant embedding method actually generates a pair of independent
long-range dependent processes.
"""
def __init__(self, N, hurst=0.5, sigma=1.0, random_state=None,
n_threads=1):
self.random_state = random_state
self.n_threads = n_threads
self.sigma = sigma
self.N = N
self.hurst = hurst
def start(self):
"""Initialize the generator.
"""
if hasattr(self, "initialized_") and self.initialized_:
return
N = self.N
# Allocate buffers and initialize the FFTW object
self.fft_in_ = empty_aligned(2 * N - 2, dtype=np.complex128)
self.fft_out_ = empty_aligned(2 * N - 2, dtype=np.complex128)
# FFTW has at least two options for performance: 'FFTW_ESTIMATE' and
# 'FFTW_MEASURE' the first one uses fast heuristics to choose an
# algorithm, whereas the latter actually does some timing and measures
# of performance of the various algorithms, and the chooses the best
# one. Unfortunately, it takes about `2` minutes for these measure,
# and in general the speed up if insignificant.
self.fftw_ = FFTW(self.fft_in_, self.fft_out_, threads=self.n_threads,
flags=('FFTW_DESTROY_INPUT', 'FFTW_ESTIMATE'),
direction='FFTW_FORWARD')
# Compute the fft of the autocorrelation function.
self.fft_acf_ = empty_aligned(2 * N - 2, dtype=np.float64)
# The autocorrelation structure for the fBM is constant provided the
# Hurst exponent and the size sample are fixed. "Synthese de la
# covariance du fGn", Synthesise the covariance of the fractional
# Gaussian noise. This autocorrelation function models long range
# (epochal) dependence.
R = np.arange(N, dtype=np.float128)
# The noise autocorrelation structure is directly derivable from the
# autocorrelation of the time-continuous fBM:
# r(s,t) = .5 * ( |s|^{2H}+|t|^{2H}-|s-t|^{2H} )
# If the noise is generated for an equally spaced. sampling of an fBM
# like process, then the autocorrelation function must be multiplied
# by ∆^{2H}. Since Fourier Transform is linear (even the discrete),
# this routine can just generate a unit variance fractional Gaussian
# noise.
R = 0.5 * self.sigma * self.sigma * (
np.abs(R - 1) ** (2.0 * self.hurst)
+ np.abs(R + 1) ** (2.0 * self.hurst)
- 2 * np.abs(R) ** (2.0 * self.hurst))
# Generate the first row of the 2Mx2M Toeplitz matrix, where
# `2 M = N + N-2`: it should be
# [ r_0, r_1, ..., r_{N-1}, r_{N-2}, ..., r_1 ]
self.fft_in_[:N] = R
self.fft_in_[:N-1:-1] = R[1:-1]
del R
# The circulant matrix, defined by the autocorrelation structure above
# is necessarily positive definite, which is equivalent to the FFT of
# any its row being non-negative.
self.fftw_()
# Due to numerical round-off errors we truncate close to zero negative
# real Fourier coefficients.
self.fft_acf_[:] = np.sqrt(np.maximum(self.fft_out_.real, 0.0) / (2 * N - 2))
# Set the random state
self.random_state_ = check_random_state(self.random_state)
self.queue_ = list()
self.initialized_ = True
def finish(self):
"""Deinitialize the generator.
"""
if hasattr(self, "initialized_") and self.initialized_:
self.initialized_ = False
self.fftw_ = None
self.fft_acf_ = None
self.fft_in_ = None
self.fft_out_ = None
def draw(self):
"""Draw a single realisation of the processes trajectory from the
generator.
"""
if not(hasattr(self, "initialized_") and self.initialized_):
raise RuntimeError("The generator has not been initialized properly. "
"Please call `.start()` before calling `.draw()`.")
if not self.queue_:
# The idea is to utilize the convolution property of the Fourier
# Transform and multiply the transform of the autocorrelation
# function by the independent Gaussian white noise in the
# frequency domain and then get back to the time domain.
# http://www.thefouriertransform.com/transform/properties.php
# Begin with generation of a complex Gaussian white noise with unit
# variance and zero mean.
self.fft_in_.real = self.random_state_.normal(size=2 * self.N - 2)
self.fft_in_.imag = self.random_state_.normal(size=2 * self.N - 2)
# Compute the "convolution" of the circulant row (of
# autocorrelations) with the noise.
self.fft_in_ *= self.fft_acf_
# %% ATTENTION: ne pas utiliser ifft, qui utilise une normalisation
# differente
# Compute this (see p.~1091 [Dietrich, Newsam; 1997]):
# F \times (\frac{1}{2M}\Lambda)^\frac{1}{2} \times w
self.fftw_()
# [Dietrich, Newsam; 1997] write : "In our case the real and
# imaginary parts of any N consecutive entries yield two
# independent realizations of \mathcal{N}_N(0,R) where $R$ is
# the autocorrelation structure of an fBM." Therefore take the
# first N complex draws to get a pair of independent realizations.
self.queue_.append(self.fft_out_.imag[:self.N].copy())
self.queue_.append(self.fft_out_.real[:self.N].copy())
# Generate the next sample only if needed.
return self.queue_.pop()
class FractionalBrownianMotion(BaseGenerator):
"""A derived class to produce sample paths of a Fractional Brownian Motion
with a specified fractional integration parameter (the Hurst exponent). For
the best performance N should be a power of two.
Returns a process sampled on :math:`0.0=t_0<t_1<\\ldots<t_N=1.0` with equal
spacing given by :math:`N^{-1}`.
"""
def __init__(self, N, hurst=0.5, random_state=None, n_threads=1):
self.random_state = random_state
self.n_threads = n_threads
self.N = N
self.hurst = hurst
def start(self):
"""Initialize the generator.
"""
if hasattr(self, "initialized_") and self.initialized_:
return
self.fgn_ = FractionalGaussianNoise(N=self.N + 1, hurst=self.hurst,
sigma=self.N ** -self.hurst,
random_state=self.random_state,
n_threads=self.n_threads)
self.fgn_.start()
self.initialized_ = True
def finish(self):
"""Deinitialize the generator.
"""
if hasattr(self, "initialized_") and self.initialized_:
self.initialized_ = False
self.fgn_.finish()
self.fgn_ = None
def draw(self):
"""Draw a single realisation of the processes trajectory from the
generator.
"""
if not(hasattr(self, "initialized_") and self.initialized_):
raise RuntimeError("The generator has not been initialized properly. "
"Please call `.start()` before calling `.draw()`.")
values_ = self.fgn_.draw()[:-1].cumsum()
return np.linspace(0, 1, num=self.N + 1), np.r_[0, values_]
|
ivannz/crossing_paper2017
|
crossing_tree/processes/gaussian.py
|
Python
|
mit
| 8,114
|
[
"Gaussian"
] |
ecc5cbbd8d18b07d92ab09ed892c8943d6ed35685471420eb5dc8914c67453eb
|
#!/usr/bin/env python
#
# Copyright 2008,2009,2011,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
SAMP_RATE_KEY = 'samp_rate'
GAIN_KEY = lambda x: 'gain:'+x
BWIDTH_KEY = 'bwidth'
TX_FREQ_KEY = 'tx_freq'
FREQ_CORR_KEY = 'freq_corr'
AMPLITUDE_KEY = 'amplitude'
AMPL_RANGE_KEY = 'ampl_range'
WAVEFORM_FREQ_KEY = 'waveform_freq'
WAVEFORM_OFFSET_KEY = 'waveform_offset'
WAVEFORM2_FREQ_KEY = 'waveform2_freq'
FREQ_RANGE_KEY = 'freq_range'
GAIN_RANGE_KEY = lambda x: 'gain_range:'+x
BWIDTH_RANGE_KEY = 'bwidth_range'
DC_OFFSET_REAL = 'dc_offset_real'
DC_OFFSET_IMAG = 'dc_offset_imag'
IQ_BALANCE_MAG = 'iq_balance_mag'
IQ_BALANCE_PHA = 'iq_balance_pha'
TYPE_KEY = 'type'
def setter(ps, key, val): ps[key] = val
import osmosdr
from gnuradio import blocks
from gnuradio import filter
from gnuradio import analog
from gnuradio import digital
from gnuradio import gr, gru, eng_notation
from gnuradio.gr.pubsub import pubsub
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
import math
import numpy
import random
n2s = eng_notation.num_to_str
waveforms = { analog.GR_SIN_WAVE : "Sinusoid",
analog.GR_CONST_WAVE : "Constant",
analog.GR_GAUSSIAN : "Gaussian Noise",
analog.GR_UNIFORM : "Uniform Noise",
"2tone" : "Two Tone (IMD)",
"sweep" : "Freq. Sweep",
"gsm" : "GSM Bursts" }
class gsm_source_c(gr.hier_block2):
def __init__(self, sample_rate, amplitude):
gr.hier_block2.__init__(self, "gsm_source_c",
gr.io_signature(0, 0, 0), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._symb_rate = 13e6 / 48;
self._samples_per_symbol = 2
self._data = blocks.vector_source_b(self.gen_gsm_seq(), True, 2)
self._split = blocks.vector_to_streams(gr.sizeof_char*1, 2)
self._pack = blocks.unpacked_to_packed_bb(1, gr.GR_MSB_FIRST)
self._mod = digital.gmsk_mod(self._samples_per_symbol, bt=0.35)
self._pwr_f = blocks.char_to_float(1, 1)
self._pwr_c = blocks.float_to_complex(1)
self._pwr_w = blocks.repeat(gr.sizeof_gr_complex*1, self._samples_per_symbol)
self._mul = blocks.multiply_vcc(1)
self._interpolate = filter.fractional_resampler_cc( 0,
(self._symb_rate * self._samples_per_symbol) / sample_rate )
self._scale = blocks.multiply_const_cc(amplitude)
self.connect(self._data, self._split)
self.connect((self._split, 0), self._pack, self._mod, (self._mul, 0))
self.connect((self._split, 1), self._pwr_f, self._pwr_c, self._pwr_w, (self._mul, 1))
self.connect(self._mul, self._interpolate, self._scale, self)
def set_amplitude(self, amplitude):
self._scale.set_k(amplitude)
def set_sampling_freq(self, sample_rate):
self._interpolate.set_interp_ratio( (self._symb_rate * self._samples_per_symbol) / sample_rate )
def gen_gsm_burst(self, l):
chunks = [
[0,0,0],
list(numpy.random.randint(0, 2, 58)),
[0,0,1,0,0,1,0,1,1,1,0,0,0,0,1,0,0,0,1,0,0,1,0,1,1,1],
list(numpy.random.randint(0, 2, 58)),
[0,0,0],
]
burst = sum(chunks,[])
burst = sum(map(list, zip(burst, (1,) * len(burst))), [])
burst += [1,0] * (l-148)
return map(int, burst)
def gen_gsm_frame(self):
return \
self.gen_gsm_burst(158) + \
self.gen_gsm_burst(158) + \
self.gen_gsm_burst(158) + \
self.gen_gsm_burst(159) + \
self.gen_gsm_burst(158) + \
self.gen_gsm_burst(158) + \
self.gen_gsm_burst(158) + \
self.gen_gsm_burst(159)
def gen_gsm_seq(self):
return sum([self.gen_gsm_frame() for i in range(10)], [])
#
# GUI-unaware GNU Radio flowgraph. This may be used either with command
# line applications or GUI applications.
#
class top_block(gr.top_block, pubsub):
def __init__(self, options, args):
gr.top_block.__init__(self)
pubsub.__init__(self)
self._verbose = options.verbose
#initialize values from options
self._setup_osmosdr(options)
self[SAMP_RATE_KEY] = options.samp_rate
self[TX_FREQ_KEY] = options.tx_freq
self[FREQ_CORR_KEY] = options.freq_corr
self[AMPLITUDE_KEY] = options.amplitude
self[WAVEFORM_FREQ_KEY] = options.waveform_freq
self[WAVEFORM_OFFSET_KEY] = options.offset
self[WAVEFORM2_FREQ_KEY] = options.waveform2_freq
# initialize reasonable defaults for DC / IQ correction
self[DC_OFFSET_REAL] = 0
self[DC_OFFSET_IMAG] = 0
self[IQ_BALANCE_MAG] = 0
self[IQ_BALANCE_PHA] = 0
#subscribe set methods
self.subscribe(SAMP_RATE_KEY, self.set_samp_rate)
for name in self.get_gain_names():
self.subscribe(GAIN_KEY(name), (lambda gain,self=self,name=name: self.set_named_gain(gain, name)))
self.subscribe(BWIDTH_KEY, self.set_bandwidth)
self.subscribe(TX_FREQ_KEY, self.set_freq)
self.subscribe(FREQ_CORR_KEY, self.set_freq_corr)
self.subscribe(AMPLITUDE_KEY, self.set_amplitude)
self.subscribe(WAVEFORM_FREQ_KEY, self.set_waveform_freq)
self.subscribe(WAVEFORM2_FREQ_KEY, self.set_waveform2_freq)
self.subscribe(TYPE_KEY, self.set_waveform)
self.subscribe(DC_OFFSET_REAL, self.set_dc_offset)
self.subscribe(DC_OFFSET_IMAG, self.set_dc_offset)
self.subscribe(IQ_BALANCE_MAG, self.set_iq_balance)
self.subscribe(IQ_BALANCE_PHA, self.set_iq_balance)
#force update on pubsub keys
for key in (SAMP_RATE_KEY, GAIN_KEY, BWIDTH_KEY,
TX_FREQ_KEY, FREQ_CORR_KEY, AMPLITUDE_KEY,
WAVEFORM_FREQ_KEY, WAVEFORM_OFFSET_KEY, WAVEFORM2_FREQ_KEY):
#print key, "=", self[key]
self[key] = self[key]
self[TYPE_KEY] = options.type #set type last
def _setup_osmosdr(self, options):
self._sink = osmosdr.sink(options.args)
try:
self._sink.get_sample_rates().start()
except RuntimeError:
print "Sink has no sample rates (wrong device arguments?)."
sys.exit(1)
if options.samp_rate is None:
options.samp_rate = self._sink.get_sample_rates().start()
self.set_samp_rate(options.samp_rate)
# Set the gain from options
if(options.gain):
gain = self._sink.set_gain(options.gain)
if self._verbose:
print "Set gain to:", gain
if self._verbose:
gain_names = self._sink.get_gain_names()
for name in gain_names:
range = self._sink.get_gain_range(name)
print "%s gain range: start %d stop %d step %d" % (name, range.start(), range.stop(), range.step())
if options.gains:
for tuple in options.gains.split(","):
name, gain = tuple.split(":")
gain = int(gain)
print "Setting gain %s to %d." % (name, gain)
self._sink.set_gain(gain, name)
if self._verbose:
rates = self._sink.get_sample_rates()
print 'Supported sample rates %d-%d step %d.' % (rates.start(), rates.stop(), rates.step())
# Set the antenna
if(options.antenna):
ant = self._sink.set_antenna(options.antenna, 0)
if self._verbose:
print "Set antenna to:", ant
self.publish(FREQ_RANGE_KEY, self._sink.get_freq_range)
for name in self.get_gain_names():
self.publish(GAIN_RANGE_KEY(name), (lambda self=self,name=name: self._sink.get_gain_range(name)))
self.publish(BWIDTH_RANGE_KEY, self._sink.get_bandwidth_range)
for name in self.get_gain_names():
self.publish(GAIN_KEY(name), (lambda self=self,name=name: self._sink.get_gain(name)))
self.publish(BWIDTH_KEY, self._sink.get_bandwidth)
def get_gain_names(self):
return self._sink.get_gain_names()
def set_samp_rate(self, sr):
sr = self._sink.set_sample_rate(sr)
if self[TYPE_KEY] in (analog.GR_SIN_WAVE, analog.GR_CONST_WAVE):
self._src.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "2tone":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "sweep":
self._src1.set_sampling_freq(self[SAMP_RATE_KEY])
self._src2.set_sampling_freq(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
elif self[TYPE_KEY] == "gsm":
self._src.set_sampling_freq(self[SAMP_RATE_KEY])
else:
return True # Waveform not yet set
if self._verbose:
print "Set sample rate to:", sr
return True
def set_named_gain(self, gain, name):
if gain is None:
g = self[GAIN_RANGE_KEY(name)]
gain = float(g.start()+g.stop())/2
if self._verbose:
print "Using auto-calculated mid-point gain"
self[GAIN_KEY(name)] = gain
return
gain = self._sink.set_gain(gain, name)
if self._verbose:
print "Set " + name + " gain to:", gain
def set_bandwidth(self, bw):
clipped_bw = self[BWIDTH_RANGE_KEY].clip(bw)
if self._sink.get_bandwidth() != clipped_bw:
bw = self._sink.set_bandwidth(clipped_bw)
if self._verbose:
print "Set bandwidth to:", bw
def set_dc_offset(self, value):
correction = complex( self[DC_OFFSET_REAL], self[DC_OFFSET_IMAG] )
try:
self._sink.set_dc_offset( correction )
if self._verbose:
print "Set DC offset to", correction
except RuntimeError as ex:
print ex
def set_iq_balance(self, value):
correction = complex( self[IQ_BALANCE_MAG], self[IQ_BALANCE_PHA] )
try:
self._sink.set_iq_balance( correction )
if self._verbose:
print "Set IQ balance to", correction
except RuntimeError as ex:
print ex
def set_freq(self, freq):
if freq is None:
f = self[FREQ_RANGE_KEY]
freq = float(f.start()+f.stop())/2.0
if self._verbose:
print "Using auto-calculated mid-point frequency"
self[TX_FREQ_KEY] = freq
return
freq = self._sink.set_center_freq(freq)
if freq is not None:
self._freq = freq
if self._verbose:
print "Set center frequency to", freq
elif self._verbose:
print "Failed to set freq."
return freq
def set_freq_corr(self, ppm):
if ppm is None:
ppm = 0.0
if self._verbose:
print "Using frequency corrrection of", ppm
self[FREQ_CORR_KEY] = ppm
return
ppm = self._sink.set_freq_corr(ppm)
if self._verbose:
print "Set frequency correction to:", ppm
def set_waveform_freq(self, freq):
if self[TYPE_KEY] == analog.GR_SIN_WAVE:
self._src.set_frequency(freq)
elif self[TYPE_KEY] == "2tone":
self._src1.set_frequency(freq)
elif self[TYPE_KEY] == 'sweep':
#there is no set sensitivity, redo fg
self[TYPE_KEY] = self[TYPE_KEY]
return True
def set_waveform2_freq(self, freq):
if freq is None:
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
return
if self[TYPE_KEY] == "2tone":
self._src2.set_frequency(freq)
elif self[TYPE_KEY] == "sweep":
self._src1.set_frequency(freq)
return True
def set_waveform(self, type):
self.lock()
self.disconnect_all()
if type == analog.GR_SIN_WAVE or type == analog.GR_CONST_WAVE:
self._src = analog.sig_source_c(self[SAMP_RATE_KEY], # Sample rate
type, # Waveform type
self[WAVEFORM_FREQ_KEY], # Waveform frequency
self[AMPLITUDE_KEY], # Waveform amplitude
self[WAVEFORM_OFFSET_KEY]) # Waveform offset
elif type == analog.GR_GAUSSIAN or type == analog.GR_UNIFORM:
self._src = analog.noise_source_c(type, self[AMPLITUDE_KEY])
elif type == "2tone":
self._src1 = analog.sig_source_c(self[SAMP_RATE_KEY],
analog.GR_SIN_WAVE,
self[WAVEFORM_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
if(self[WAVEFORM2_FREQ_KEY] is None):
self[WAVEFORM2_FREQ_KEY] = -self[WAVEFORM_FREQ_KEY]
self._src2 = analog.sig_source_c(self[SAMP_RATE_KEY],
analog.GR_SIN_WAVE,
self[WAVEFORM2_FREQ_KEY],
self[AMPLITUDE_KEY]/2.0,
0)
self._src = blocks.add_cc()
self.connect(self._src1,(self._src,0))
self.connect(self._src2,(self._src,1))
elif type == "sweep":
# rf freq is center frequency
# waveform_freq is total swept width
# waveform2_freq is sweep rate
# will sweep from (rf_freq-waveform_freq/2) to (rf_freq+waveform_freq/2)
if self[WAVEFORM2_FREQ_KEY] is None:
self[WAVEFORM2_FREQ_KEY] = 0.1
self._src1 = analog.sig_source_f(self[SAMP_RATE_KEY],
analog.GR_TRI_WAVE,
self[WAVEFORM2_FREQ_KEY],
1.0,
-0.5)
self._src2 = analog.frequency_modulator_fc(self[WAVEFORM_FREQ_KEY]*2*math.pi/self[SAMP_RATE_KEY])
self._src = blocks.multiply_const_cc(self[AMPLITUDE_KEY])
self.connect(self._src1,self._src2,self._src)
elif type == "gsm":
self._src = gsm_source_c(self[SAMP_RATE_KEY], self[AMPLITUDE_KEY])
else:
raise RuntimeError("Unknown waveform type")
self.connect(self._src, self._sink)
self.unlock()
if self._verbose:
print "Set baseband modulation to:", waveforms[type]
if type == analog.GR_SIN_WAVE:
print "Modulation frequency: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Initial phase:", self[WAVEFORM_OFFSET_KEY]
elif type == "2tone":
print "Tone 1: %sHz" % (n2s(self[WAVEFORM_FREQ_KEY]),)
print "Tone 2: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
elif type == "sweep":
print "Sweeping across %sHz to %sHz" % (n2s(-self[WAVEFORM_FREQ_KEY]/2.0),n2s(self[WAVEFORM_FREQ_KEY]/2.0))
print "Sweep rate: %sHz" % (n2s(self[WAVEFORM2_FREQ_KEY]),)
elif type == "gsm":
print "GSM Burst Sequence"
print "TX amplitude:", self[AMPLITUDE_KEY]
def set_amplitude(self, amplitude):
if amplitude < 0.0 or amplitude > 1.0:
if self._verbose:
print "Amplitude out of range:", amplitude
return False
if self[TYPE_KEY] in (analog.GR_SIN_WAVE, analog.GR_CONST_WAVE, analog.GR_GAUSSIAN, analog.GR_UNIFORM):
self._src.set_amplitude(amplitude)
elif self[TYPE_KEY] == "2tone":
self._src1.set_amplitude(amplitude/2.0)
self._src2.set_amplitude(amplitude/2.0)
elif self[TYPE_KEY] == "sweep":
self._src.set_k(amplitude)
elif self[TYPE_KEY] == "gsm":
self._src.set_amplitude(amplitude)
else:
return True # Waveform not yet set
if self._verbose:
print "Set amplitude to:", amplitude
return True
def get_options():
usage="%prog: [options]"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-a", "--args", type="string", default="",
help="Device args, [default=%default]")
parser.add_option("-A", "--antenna", type="string", default=None,
help="Select Rx Antenna where appropriate")
parser.add_option("-s", "--samp-rate", type="eng_float", default=None,
help="Set sample rate (bandwidth), minimum by default")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="Set gain in dB (default is midpoint)")
parser.add_option("-G", "--gains", type="string", default=None,
help="Set named gain in dB, name:gain,name:gain,...")
parser.add_option("-f", "--tx-freq", type="eng_float", default=None,
help="Set carrier frequency to FREQ [default=mid-point]",
metavar="FREQ")
parser.add_option("-c", "--freq-corr", type="int", default=None,
help="Set carrier frequency correction [default=0]")
parser.add_option("-x", "--waveform-freq", type="eng_float", default=0,
help="Set baseband waveform frequency to FREQ [default=%default]")
parser.add_option("-y", "--waveform2-freq", type="eng_float", default=None,
help="Set 2nd waveform frequency to FREQ [default=%default]")
parser.add_option("--sine", dest="type", action="store_const", const=analog.GR_SIN_WAVE,
help="Generate a carrier modulated by a complex sine wave",
default=analog.GR_SIN_WAVE)
parser.add_option("--const", dest="type", action="store_const", const=analog.GR_CONST_WAVE,
help="Generate a constant carrier")
parser.add_option("--offset", type="eng_float", default=0,
help="Set waveform phase offset to OFFSET [default=%default]")
parser.add_option("--gaussian", dest="type", action="store_const", const=analog.GR_GAUSSIAN,
help="Generate Gaussian random output")
parser.add_option("--uniform", dest="type", action="store_const", const=analog.GR_UNIFORM,
help="Generate Uniform random output")
parser.add_option("--2tone", dest="type", action="store_const", const="2tone",
help="Generate Two Tone signal for IMD testing")
parser.add_option("--sweep", dest="type", action="store_const", const="sweep",
help="Generate a swept sine wave")
parser.add_option("--gsm", dest="type", action="store_const", const="gsm",
help="Generate GMSK modulated GSM Burst Sequence")
parser.add_option("", "--amplitude", type="eng_float", default=0.3,
help="Set output amplitude to AMPL (0.1-1.0) [default=%default]",
metavar="AMPL")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="Use verbose console output [default=%default]")
(options, args) = parser.parse_args()
return (options, args)
# If this script is executed, the following runs. If it is imported,
# the below does not run.
def test_main():
if gr.enable_realtime_scheduling() != gr.RT_OK:
print "Note: failed to enable realtime scheduling, continuing"
# Grab command line options and create top block
try:
(options, args) = get_options()
tb = top_block(options, args)
except RuntimeError, e:
print e
sys.exit(1)
tb.start()
raw_input('Press Enter to quit: ')
tb.stop()
tb.wait()
# Make sure to create the top block (tb) within a function:
# That code in main will allow tb to go out of scope on return,
# which will call the decontructor on radio and stop transmit.
# Whats odd is that grc works fine with tb in the __main__,
# perhaps its because the try/except clauses around tb.
if __name__ == "__main__":
test_main()
|
geosphere/gr-osmosdr
|
apps/osmocom_siggen_base.py
|
Python
|
gpl-3.0
| 21,193
|
[
"Gaussian"
] |
94c3060d94fcb7f4256c204c8c83716c3fd6ae0bb1faa8e38d137734a3d7f1b3
|
import pytest
import json
import asyncio
from tornado import testing
from tornado import httpclient
from waterbutler.core import streams
from waterbutler.core import exceptions
from tests import utils
class TestCrudHandler(utils.HandlerTestCase):
HOOK_PATH = 'waterbutler.server.api.v0.crud.CRUDHandler._send_hook'
@testing.gen_test
def test_download_redirect(self):
redirect_url = 'http://queen.com/freddie.png'
self.mock_provider.download = utils.MockCoroutine(return_value=redirect_url)
with pytest.raises(httpclient.HTTPError) as exc:
yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png'),
follow_redirects=False,
)
assert exc.value.code == 302
assert exc.value.response.headers.get('Location') == redirect_url
calls = self.mock_provider.download.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'download'
@testing.gen_test
def test_download_stream(self):
data = b'freddie brian john roger'
stream = streams.StringStream(data)
stream.name = 'foo'
stream.content_type = 'application/octet-stream'
self.mock_provider.download = utils.MockCoroutine(return_value=stream)
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png'),
)
assert resp.body == data
calls = self.mock_provider.download.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'download'
@testing.gen_test
def test_download_stream_range(self):
data = b'freddie brian john roger'
stream = streams.StringStream(data)
stream.name = 'foo'
stream.partial = True
stream.content_range = '0-{}/{}'.format(len(data) - 1, len(data))
stream.content_type = 'application/octet-stream'
self.mock_provider.download = utils.MockCoroutine(return_value=stream)
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png'),
headers={'Range': 'bytes=0-'}
)
assert resp.code == 206
assert resp.body == data
calls = self.mock_provider.download.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'download'
assert kwargs.get('range') == (0, None)
@testing.gen_test
def test_download_content_type_switches(self):
"""waterbutler.core.mime_types contains content type
overrides.
"""
data = b'freddie brian john roger'
stream = streams.StringStream(data)
stream.name = None
stream.content_type = 'application/octet-stream'
self.mock_provider.download = utils.MockCoroutine(return_value=stream)
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.md'),
)
assert resp.body == data
assert resp.headers['Content-Type'] == 'text/x-markdown'
calls = self.mock_provider.download.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'download'
@testing.gen_test
def test_download_content_type_does_not_switch(self):
"""mime_types should not override file extension not in the dict
"""
data = b'freddie brian john roger'
stream = streams.StringStream(data)
stream.name = None
stream.content_type = 'application/octet-stream'
self.mock_provider.download = utils.MockCoroutine(return_value=stream)
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png'),
)
assert resp.body == data
assert resp.headers['Content-Type'] == 'application/octet-stream'
calls = self.mock_provider.download.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'download'
@testing.gen_test
def test_download_accept_url_false(self):
data = b'freddie brian john roger'
stream = streams.StringStream(data)
stream.name = 'foo'
stream.content_type = 'application/octet-stream'
self.mock_provider.download = utils.MockCoroutine(return_value=stream)
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png&accept_url=false'),
)
assert resp.body == data
calls = self.mock_provider.download.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'download'
assert kwargs.get('accept_url') is False
@testing.gen_test
def test_download_accept_url_default(self):
data = b'freddie brian john roger'
stream = streams.StringStream(data)
stream.name = 'foo'
stream.content_type = 'application/octet-stream'
self.mock_provider.download = utils.MockCoroutine(return_value=stream)
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png'),
)
assert resp.body == data
calls = self.mock_provider.download.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'download'
assert kwargs.get('accept_url') is True
@testing.gen_test
def test_download_accept_url_true(self):
data = b'freddie brian john roger'
stream = streams.StringStream(data)
stream.name = 'foo'
stream.content_type = 'application/octet-stream'
self.mock_provider.download = utils.MockCoroutine(return_value=stream)
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png&accept_url=true'),
)
assert resp.body == data
calls = self.mock_provider.download.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'download'
assert kwargs.get('accept_url') is True
@testing.gen_test
def test_download_accept_url_invalid(self):
self.mock_provider.download = utils.MockCoroutine()
with pytest.raises(httpclient.HTTPError) as exc:
yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png&accept_url=teapot'),
)
assert exc.value.code == 400
assert self.mock_provider.download.called is False
@testing.gen_test
def test_download_not_found(self):
self.mock_provider.download = utils.MockCoroutine(side_effect=exceptions.NotFoundError('/freddie.png'))
with pytest.raises(httpclient.HTTPError) as exc:
yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/freddie.png'),
)
assert exc.value.code == 404
@testing.gen_test
def test_upload(self):
data = b'stone cold crazy'
expected = utils.MockFileMetadata()
self.mock_provider.upload = utils.MockCoroutine(return_value=(expected, True))
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/roger.png'),
method='PUT',
body=data,
)
calls = self.mock_provider.upload.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert isinstance(args[0], streams.RequestStreamReader)
streamed = yield args[0].read()
assert streamed == data
assert kwargs['action'] == 'upload'
assert str(kwargs['path']) == '/roger.png'
assert expected.serialized() == json.loads(resp.body.decode())
@testing.gen_test
def test_delete(self):
self.mock_provider.delete = utils.MockCoroutine()
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/john.png'),
method='DELETE',
)
calls = self.mock_provider.delete.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'delete'
assert resp.code == 204
@testing.gen_test
def test_create_folder(self):
self.mock_provider.create_folder = utils.MockCoroutine(return_value=utils.MockFolderMetadata())
resp = yield self.http_client.fetch(
self.get_url('/file?provider=queenhub&path=/folder/'),
method='POST',
body=''
)
calls = self.mock_provider.create_folder.call_args_list
assert len(calls) == 1
args, kwargs = calls[0]
assert kwargs.get('action') == 'create_folder'
assert resp.code == 201
|
TomBaxter/waterbutler
|
tests/server/api/v0/test_crud.py
|
Python
|
apache-2.0
| 8,981
|
[
"Brian"
] |
ba315f60dc8a7b801a7e2e2dac7b4d461f85d683404acfd8be5a3a07f6d8622b
|
from collections import OrderedDict
import os
from gym import error, spaces
from gym.utils import seeding
import numpy as np
from os import path
import gym
try:
import mujoco_py
except ImportError as e:
raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e))
DEFAULT_SIZE = 500
def convert_observation_to_space(observation):
if isinstance(observation, dict):
space = spaces.Dict(OrderedDict([
(key, convert_observation_to_space(value))
for key, value in observation.items()
]))
elif isinstance(observation, np.ndarray):
low = np.full(observation.shape, -float('inf'), dtype=np.float32)
high = np.full(observation.shape, float('inf'), dtype=np.float32)
space = spaces.Box(low, high, dtype=observation.dtype)
else:
raise NotImplementedError(type(observation), observation)
return space
class MujocoGoalEnv(gym.Env):
"""SuperClass for all MuJoCo goal reaching environments"""
def __init__(self, model_path, frame_skip):
if model_path.startswith("/"):
fullpath = model_path
else:
fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
if not path.exists(fullpath):
raise IOError("File %s does not exist" % fullpath)
self.frame_skip = frame_skip
self.model = mujoco_py.load_model_from_path(fullpath)
self.sim = mujoco_py.MjSim(self.model)
self.data = self.sim.data
self.viewer = None
self._viewers = {}
self.metadata = {
'render.modes': ['human', 'rgb_array', 'depth_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt))
}
self.init_qpos = self.sim.data.qpos.ravel().copy()
self.init_qvel = self.sim.data.qvel.ravel().copy()
self._set_action_space()
action = self.action_space.sample()
# import ipdb; ipdb.set_trace()
observation, _reward, done, _info = self.step(action)
assert not done
self._set_observation_space(observation['observation'])
self.seed()
def _set_action_space(self):
bounds = self.model.actuator_ctrlrange.copy().astype(np.float32)
low, high = bounds.T
self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)
return self.action_space
# def _set_observation_space(self, observation):
# self.observation_space = convert_observation_to_space(observation)
# return self.observation_space
def _set_observation_space(self, observation):
temp_observation_space = convert_observation_to_space(observation)
self.observation_space = spaces.Dict(dict(
observation=temp_observation_space,
desired_goal=spaces.Box(-np.inf, np.inf, shape=(2,), dtype=np.float32),
achieved_goal=spaces.Box(-np.inf, np.inf, shape=(2,), dtype=np.float32),
))
return self.observation_space
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# methods to override:
# ----------------------------
def reset_model(self):
"""
Reset the robot degrees of freedom (qpos and qvel).
Implement this in each subclass.
"""
raise NotImplementedError
def viewer_setup(self):
"""
This method is called when the viewer is initialized.
Optionally implement this method, if you need to tinker with camera position
and so forth.
"""
pass
def reset(self):
self.sim.reset()
ob = self.reset_model()
return ob
def set_state(self, qpos, qvel):
assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
old_state = self.sim.get_state()
new_state = mujoco_py.MjSimState(old_state.time, qpos, qvel,
old_state.act, old_state.udd_state)
self.sim.set_state(new_state)
self.sim.forward()
@property
def dt(self):
return self.model.opt.timestep * self.frame_skip
def do_simulation(self, ctrl, n_frames):
self.sim.data.ctrl[:] = ctrl
for _ in range(n_frames):
self.sim.step()
def render(self,
mode='human',
width=DEFAULT_SIZE,
height=DEFAULT_SIZE,
camera_id=None,
camera_name=None):
if mode == 'rgb_array':
if camera_id is not None and camera_name is not None:
raise ValueError("Both `camera_id` and `camera_name` cannot be"
" specified at the same time.")
no_camera_specified = camera_name is None and camera_id is None
if no_camera_specified:
camera_name = 'track'
if camera_id is None and camera_name in self.model._camera_name2id:
camera_id = self.model.camera_name2id(camera_name)
self._get_viewer(mode).render(width, height, camera_id=camera_id)
# window size used for old mujoco-py:
data = self._get_viewer(mode).read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == 'depth_array':
self._get_viewer(mode).render(width, height)
# window size used for old mujoco-py:
# Extract depth part of the read_pixels() tuple
data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1]
# original image is upside-down, so flip it
return data[::-1, :]
elif mode == 'human':
self._get_viewer(mode).render()
def close(self):
if self.viewer is not None:
# self.viewer.finish()
self.viewer = None
self._viewers = {}
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == 'human':
self.viewer = mujoco_py.MjViewer(self.sim)
elif mode == 'rgb_array' or mode == 'depth_array':
self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)
self.viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
def get_body_com(self, body_name):
return self.data.get_body_xpos(body_name)
def state_vector(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat
])
|
rail-berkeley/d4rl
|
d4rl/locomotion/mujoco_goal_env.py
|
Python
|
apache-2.0
| 6,751
|
[
"TINKER"
] |
bffef171df1d7f68231cbeaa8adaecd7c7eb10737f8369927abc8eea26880e76
|
import boardgame as bg
from .core import *
from . import action
from . import tags
class Hydra(VillainGroup):
name = 'HYDRA'
def fill(self):
self.add(HydraKidnappers, 3)
self.add(HydraArmies, 3)
self.add(HydraViper, 1)
self.add(HydraSupreme, 1)
class HydraKidnappers(Villain):
power = 3
victory = 1
group = Hydra
name = 'HYDRA Kidnappers'
desc = 'Fight: You may gain a SHIELD Officer'
def on_fight(self, player):
if len(self.game.officers) > 0:
actions = [action.GainFrom(self.game.officers[0],
self.game.officers
),
]
self.game.choice(actions, allow_do_nothing=True)
class HydraArmies(Villain):
power = 4
victory = 3
group = Hydra
name = 'Endless Armies of HYDRA'
desc = 'Fight: Play the top two cards of the Villain deck.'
def on_fight(self, player):
self.game.play_villain()
self.game.play_villain()
class HydraViper(Villain):
power = 5
victory = 3
group = Hydra
name = 'Viper'
desc = ('Fight: Each player without other HYDRA in Victory Pile gains'
' Wound. Escape: Same effect')
def on_fight(self, player):
for p in self.game.players:
for v in p.victory_pile:
if v.group is Hydra and v != self:
break
else:
p.gain_wound(wounder=self)
def on_escape(self):
self.on_fight(None)
class HydraSupreme(Villain):
power = 6
group = Hydra
name = 'Supreme HYDRA'
desc = 'V+3 for each other HYDRA in Victory Pile'
victory = 3
def extra_victory(self, player):
pts = 0
for v in player.victory_pile:
if v.group is Hydra and v is not self:
pts += 3
return pts
class Sentinel(Henchman):
name = 'Sentinel'
power = 3
victory = 1
desc = 'Fight: KO one of your Heroes.'
def on_fight(self, player):
player.ko_hero_from(player.hand, player.played)
class SpiderFoes(VillainGroup):
name = 'Spider Foes'
def fill(self):
self.add(Venom, 2)
self.add(Lizard, 2)
self.add(GreenGoblin, 2)
self.add(DoctorOctopus, 2)
class DoctorOctopus(Villain):
power = 4
group = SpiderFoes
name = 'Doctor Octopus'
desc = 'When you draw at the end of this turn, draw 8 instead of 6'
victory = 2
def on_fight(self, player):
player.draw_target = 8
class GreenGoblin(Villain):
power = 6
group = SpiderFoes
name = 'Green Goblin'
desc = 'Ambush: Green Goblin captures a Bystander'
victory = 4
def on_ambush(self):
self.game.capture_bystander()
class Lizard(Villain):
power = 3
group = SpiderFoes
name = 'The Lizard'
desc = ('If you fight The Lizard in the Sewers,'
' each other player gains a Wound.')
victory = 2
def on_pre_fight(self, player):
if self is self.game.city[4]:
for p in player.other_players():
p.gain_wound(wounder=self)
class Venom(Villain):
power = 5
group = SpiderFoes
name = 'Venom'
desc = ("You can't defeat Venom unless you have <Cov>. "
"Escape: each player gains a Wound.")
victory = 3
def on_escape(self):
for p in self.game.players:
p.gain_wound(wounder=self)
def can_fight(self, player):
return player.count_played(tag=tags.Covert) > 0
class Skrulls(VillainGroup):
name = 'Skrulls'
def fill(self):
self.add(SuperSkrull, 3)
self.add(SkrullShapeshifter, 3)
self.add(SkrullQueen, 1)
self.add(PowerSkrull, 1)
class PowerSkrull(Villain):
power = 8
group = Skrulls
name = 'Paibok the Power Skrull'
desc = "Fight: Each player gains a Hero from the HQ."
victory = 3
def on_fight(self, player):
heroes = [h for h in self.game.hq if isinstance(h, Hero)]
for p in self.game.players:
actions = []
for h in heroes:
if h in self.game.hq:
actions.append(action.GainFrom(h, self.game.hq, player=p))
if len(actions) > 0:
self.game.choice(actions)
class SkrullQueen(Villain):
power = 0
group = Skrulls
name = 'Skrull Queen Veranke'
desc = ("Ambush: Capture the highest-cost Hero from HQ. P is Hero's C. "
"Fight: Gain that Hero.")
victory = 4
def on_ambush(self):
costs = [h.cost if h is not None else -1 for h in self.game.hq]
index = costs.index(max(costs))
self.stolen_hero = self.game.hq[index]
self.game.hq[index] = None
self.game.fill_hq()
if self.stolen_hero is not None:
self.power = self.stolen_hero.cost
self.game.event('Skrull Queen Veranke captures %s' %
self.stolen_hero)
def on_fight(self, player):
if self.stolen_hero is not None:
self.game.event('Gained %s' % self.stolen_hero)
player.discard.append(self.stolen_hero)
self.stolen_hero = None
class SkrullShapeshifter(Villain):
power = 0
group = Skrulls
name = 'Skrull Shapeshifters'
desc = ("Ambush: Capture the right-most Hero from HQ. P is Hero's C. "
"Fight: Gain that Hero.")
victory = 2
def on_ambush(self):
index = 4
while not isinstance(self.game.hq[index], Hero):
index -= 1
if index < 0:
self.stolen_hero = None
return
self.stolen_hero = self.game.hq[index]
self.game.hq[index] = None
self.game.fill_hq()
self.power = self.stolen_hero.cost
self.game.event('Skrull Shapeshifters capture %s' % self.stolen_hero)
def on_fight(self, player):
if self.stolen_hero is not None:
self.game.event('Gained %s' % self.stolen_hero)
player.discard.append(self.stolen_hero)
class SuperSkrull(Villain):
name = 'Super-Skrull'
group = Skrulls
power = 4
victory = 2
desc = 'Fight: Each player KOs one of their Heroes.'
def on_fight(self, player):
for p in self.game.players:
if len(p.hand + p.played) > 0:
p.ko_hero_from(p.hand, p.played)
class HandNinjas(Henchman):
name = 'Hand Ninjas'
power = 3
victory = 1
desc = 'Fight: You get S+1.'
def on_fight(self, player):
player.available_star += 1
class MastersOfEvil(VillainGroup):
name = 'Masters of Evil'
def fill(self):
self.add(Ultron, 2)
self.add(Whirlwind, 2)
self.add(Melter, 2)
self.add(BaronZemo, 2)
class BaronZemo(Villain):
name = 'Baron Zemo'
group = MastersOfEvil
power = 6
victory = 4
desc = 'Fight: For each <Avg>, rescue a Bystander.'
def on_fight(self, player):
for i in range(player.count_played(tag=tags.Avenger)):
player.rescue_bystander()
class Melter(Villain):
name = 'Melter'
group = MastersOfEvil
power = 5
victory = 3
desc = ('Fight: Each player reveals top card of deck. '
'You choose to KO or return it.')
def on_fight(self, player):
for p in self.game.players:
cards = p.reveal(1)
if len(cards) > 0:
actions = [
action.KOFrom(cards[0], cards),
action.ReturnFrom(cards[0], cards)
]
self.game.choice(actions)
class Whirlwind(Villain):
name = 'Whirlwind'
group = MastersOfEvil
power = 4
victory = 2
desc = 'Fight: If you fight on the Rooftops or Bridge, KO two heros.'
def on_pre_fight(self, player):
if self is self.game.city[0] or self is self.game.city[2]:
player.ko_hero_from(player.hand, player.played)
player.ko_hero_from(player.hand, player.played)
class Ultron(Villain):
name = 'Ultron'
group = MastersOfEvil
power = 6
victory = 2
desc = ('V+1 for each <Tec> you own. '
'Escape: Each player reveals <Tec> or gains a Wound.')
def extra_victory(self, player):
total = 0
for c in player.hand + player.discard + player.played + player.stack:
if tags.Tech in c.tags:
total += 1
return total
def on_escape(self):
for i, p in enumerate(self.game.players):
if p.reveal_tag(tags.Tech) is None:
p.gain_wound(wounder=self)
class DoombotLegion(Henchman):
name = 'Doombot Legion'
power = 3
victory = 1
desc = ('Fight: Reveal the top 2 cards of your deck. '
'KO one, return the other')
def on_fight(self, player):
index = len(player.hand)
player.draw(2)
cards = player.hand[index:]
if len(cards) > 0:
player.hand = player.hand[:index]
actions = []
for act in [action.KOFrom, action.ReturnFrom]:
for card in cards:
actions.append(act(card, cards))
repeat = len(cards) - 1
self.game.choice(actions, repeat=repeat, allow_same_type=False)
class SavageLandMutates(Henchman):
name = 'Savage Land Mutates'
power = 3
victory = 1
desc = ('Fight: When you draw a next hand at the end of your turn, '
'draw an extra card.')
def on_fight(self, player):
player.draw_hand_extra += 1
class EnemiesOfAsgard(VillainGroup):
name = 'Enemies of Asgard'
def fill(self):
self.add(Destroyer, 1)
self.add(Ymir, 2)
self.add(FrostGiant, 3)
self.add(Enchantress, 2)
class Destroyer(Villain):
name = 'Destroyer'
power = 7
victory = 5
desc = ('Fight: KO all your <Shd> Heros. '
'Escape: Each player KOs 2 Heros.')
def on_fight(self, player):
for c in player.hand[:]:
if tags.Shield in c.tags:
player.hand.remove(c)
self.game.ko.append(c)
self.game.event('Destroyer KOs %s' % c)
for c in player.played[:]:
if tags.Shield in c.tags:
player.played.remove(c)
self.game.ko.append(c)
self.game.event('Destroyer KOs %s' % c)
def on_escape(self):
for p in self.game.players:
p.ko_hero_from(p.hand, p.played)
p.ko_hero_from(p.hand, p.played)
class Ymir(Villain):
name = 'Ymir, Frost Giant King'
power = 6
victory = 4
desc = ('Ambush: Each player reveals <Rng> or gains a Wound.'
'Fight: Choose a player to KO all Wounds from hand and discard.')
def on_anbush(self):
for p in self.game.players:
if p.reveal_tag(tags.Ranged) is None:
p.gain_wound(wounder=self)
def on_fight(self, player):
actions = []
for p in self.game.players:
actions.append(bg.CustomAction(
'%s KOs all Wounds' % p.name,
func=self.on_ko_wounds,
kwargs=dict(player=p)))
self.game.choice(actions)
def on_ko_wounds(self, player):
for c in player.hand[:]:
if isinstance(c, Wound):
self.game.ko.append(c)
player.hand.remove(c)
for c in player.discard[:]:
if isinstance(c, Wound):
self.game.ko.append(c)
player.discard.remove(c)
class Enchantress(Villain):
name = 'Enchantress'
power = 6
victory = 4
desc = 'Fight: Draw 3 cards.'
def on_fight(self, player):
player.draw(3)
class FrostGiant(Villain):
name = 'Frost Giant'
power = 4
victory = 2
desc = ('Fight: Each player reveals <Rng> or gains a Wound.'
' Escape: same effect.')
def on_fight(self, player):
for p in self.game.players:
if p.reveal_tag(tags.Ranged) is None:
p.gain_wound(wounder=self)
def on_escape(self):
self.on_fight(None)
class Brotherhood(VillainGroup):
name = 'Brotherhood'
def fill(self):
self.add(Juggernaut, 2)
self.add(Sabretooth, 2)
self.add(Mystique, 2)
self.add(Blob, 2)
class Juggernaut(Villain):
name = 'Juggernaut'
power = 6
victory = 4
desc = ('Ambush: Each player KOs 2 Heroes from their discard pile.'
' Escape: Each player KOs 2 Heroes from their hand.')
def on_ambush(self):
self.game.event('Juggernaut ambushes!')
for p in self.game.players:
p.ko_hero_from(p.discard)
p.ko_hero_from(p.discard)
def on_escape(self):
for p in self.game.players:
p.ko_hero_from(p.hand)
p.ko_hero_from(p.hand)
class Sabretooth(Villain):
name = 'Sabretooth'
power = 5
victory = 3
desc = ('Fight: Each player reveals <XMn> or gains a Wound.'
' Escape: Same effect.')
def on_fight(self, player):
for p in self.game.players:
if p.reveal_tag(tags.XMen) is None:
p.gain_wound(wounder=self)
def on_escape(self):
self.on_fight(None)
class Mystique(Villain):
name = 'Mystique'
power = 5
victory = 3
desc = ('Escape: Mystique becomes a Scheme Twist that takes effect'
' immediately.')
def on_escape(self):
self.game.event('Mystique causes a Scheme Twist')
self.game.scheme_twist()
class Blob(Villain):
name = 'Blob'
power = 4
victory = 2
desc = "You can't defeat Blob unless you have <XMn>"
def can_fight(self, player):
return player.count_played(tag=tags.XMen) > 0
class EmissariesOfEvil(VillainGroup):
name = 'Emissaries of Evil'
def fill(self):
self.add(Electro, 2)
self.add(Rhino, 2)
self.add(Gladiator, 2)
self.add(Egghead, 2)
class Electro(Villain):
name = 'Electro'
power = 6
victory = 4
desc = ("Ambush: If top Villain card is a Scheme Twist, play it.")
def on_ambush(self):
if len(self.game.villain) == 0:
return
card = self.game.villain[0]
self.game.event('Electro reveals %s' % card.text())
if isinstance(card, SchemeTwist):
self.game.play_villain()
class Rhino(Villain):
name = 'Rhino'
power = 5
victory = 3
desc = ("Ambush: If top Villain card is Master Strike, each "
"player gains Wound. Escape: Each player gains Wound.")
def on_ambush(self):
if len(self.game.villain) == 0:
return
card = self.game.villain[0]
self.game.event('Rhino reveals %s' % card.text())
if isinstance(card, MasterStrike):
for p in self.game.players:
p.gain_wound(wounder=self)
def on_escape(self):
for p in self.game.players:
p.gain_wound(wounder=self)
class Gladiator(Villain):
name = 'Gladiator'
power = 5
victory = 3
desc = ("Ambush: If top Villain card is a Bystander, "
"Gladiator captures it.")
def on_ambush(self):
if len(self.game.villain) == 0:
return
card = self.game.villain[0]
self.game.event('Gladiator reveals %s' % card.text())
if isinstance(card, Bystander):
self.game.play_villain()
class Egghead(Villain):
name = 'Egghead'
power = 4
victory = 2
desc = ("Ambush: If top Villain card is a Villain, play it.")
def on_ambush(self):
if len(self.game.villain) == 0:
return
card = self.game.villain[0]
self.game.event('Egghead reveals %s' % card.text())
if isinstance(card, Villain):
self.game.play_villain()
class Radiation(VillainGroup):
name = 'Radiation'
def fill(self):
self.add(Maestro, 2)
self.add(Abomination, 2)
self.add(Zzzax, 2)
self.add(TheLeader, 2)
class Maestro(Villain):
name = 'Maestro'
power = 6
victory = 4
desc = "Fight: For each of your <Str> Heroes, KO one of your Heroes."
def on_fight(self, player):
for i in range(player.count_tagged(tags.Strength)):
player.ko_hero_from(player.hand, player.played)
class Abomination(Villain):
name = 'Abomination'
power = 5
victory = 3
desc = "Fight: If you fight on Streets or Bridge, rescue 3 Bystanders."
def on_pre_fight(self, player):
if self is self.game.city[0] or self is self.game.city[1]:
player.rescue_bystander()
player.rescue_bystander()
player.rescue_bystander()
class Zzzax(Villain):
name = 'Zzzax'
power = 5
victory = 3
desc = ("Fight: Each player reveals <Str> or gains a Wound. "
"Escape: same effect.")
def on_fight(self, player):
for p in self.game.players:
if p.reveal_tag(tags.Strength) is None:
p.gain_wound(wounder=self)
def on_escape(self):
self.on_fight(None)
class TheLeader(Villain):
name = 'The Leader'
power = 4
victory = 2
desc = "Ambush: Play the top Villain card."
def on_ambush(self):
self.game.play_villain()
class FourHorsemen(VillainGroup):
name = 'Four Horsemen'
def fill(self):
self.add(Death, 2)
self.add(War, 2)
self.add(Pestilence, 2)
self.add(Famine, 2)
class Death(Villain):
name = 'Death'
power = 7
victory = 5
desc = ("Fight: Each other player KOs a hero of cost 1 or more. "
"Escape: Each player does the same.")
def on_fight(self, player):
for p in player.other_players():
self.punish(p)
def on_escape(self):
for p in self.game.players:
self.punish(p)
def punish(self, p):
actions = []
for c in p.hand:
if c.cost >= 1:
actions.append(action.KOFrom(c, p.hand))
for c in p.played:
if c.cost >= 1:
actions.append(action.KOFrom(c, p.played))
if len(actions) > 0:
self.game.choice(actions)
class War(Villain):
name = 'War'
power = 6
victory = 4
desc = ("Fight: Each other player reveals <Ins> or gains Wound. "
"Escape: Each player does the same.")
def on_fight(self, player):
for p in player.other_players():
self.punish(p)
def on_escape(self):
for p in self.game.players:
self.punish(p)
def punish(self, p):
actions = []
if p.reveal_tag(tags.Instinct) is None:
p.gain_wound(wounder=self)
class Pestilence(Villain):
name = 'Pestilence'
power = 5
victory = 3
desc = ("Fight: Each other player reveals top 3 cards, discards C>0, puts "
"rest back in any order. Escape: Each player does the same.")
def on_fight(self, player):
for p in player.other_players():
self.punish(p)
def on_escape(self):
for p in self.game.players:
self.punish(p)
def punish(self, p):
cards = p.reveal(3)
for c in cards[:]:
if c.cost > 0:
p.discard_from(c, cards)
while len(cards) > 0:
actions = [action.ReturnFrom(c, cards) for c in cards]
self.game.choice(actions)
class Famine(Villain):
name = 'Famine'
power = 4
victory = 2
desc = ("Fight: Each other player reveals <Ins> or discards a card. "
"Escape: Each player does the same.")
def on_fight(self, player):
for p in player.other_players():
self.punish(p)
def on_escape(self):
for p in self.game.players:
self.punish(p)
def punish(self, p):
actions = []
if p.reveal_tag(tags.Instinct) == 0:
actions = [action.DiscardFrom(c, p.hand) for c in p.hand]
self.game.choice(actions)
class MaggiaGoons(Henchman):
name = 'Maggia Goons'
power = 4
victory = 1
bribe = True
desc = 'Bribe. Fight: KO one of your Heroes.'
def on_fight(self, player):
player.ko_hero_from(player.hand, player.played)
class StreetsOfNewYork(VillainGroup):
name = 'Streets of New York'
def fill(self):
self.add(Jigsaw, 2)
self.add(Tombstone, 2)
self.add(Bullseye, 2)
self.add(Hammerhead, 2)
class Jigsaw(Villain):
name = 'Jigsaw'
power = 11
victory = 5
bribe = True
desc = ("Bribe. Ambush: Each player discards 3 cards, then draws 2 cards.")
def on_ambush(self):
for p in self.game.players:
for i in range(3):
if len(p.hand) > 0:
actions = [action.DiscardFrom(c, p.hand) for c in p.hand]
self.game.choice(actions)
p.draw(2)
class Tombstone(Villain):
name = 'Tombstone'
power = 8
victory = 4
bribe = True
desc = ("Bribe. Escape: Each player reveals <Str> or gains a Wound.")
def on_escape(self):
for p in self.game.players:
if p.reveal_tag(tags.Strength) is None:
p.gain_wound(wounder=self)
class Bullseye(Villain):
name = 'Bullseye'
power = 6
victory = 4
desc = ("Fight: KO one Hero with S icon and one Hero with P icon.")
def on_fight(self, player):
actions = []
for c in player.hand:
if c.star > 0 or c.extra_star:
actions.append(action.KOFrom(c, player.hand))
for c in player.played:
if c.star > 0 or c.extra_star:
actions.append(action.KOFrom(c, player.played))
if len(actions) > 0:
self.game.choice(actions)
actions = []
for c in player.hand:
if c.power > 0 or c.extra_power:
actions.append(action.KOFrom(c, player.hand))
for c in player.played:
if c.power > 0 or c.extra_power:
actions.append(action.KOFrom(c, player.played))
if len(actions) > 0:
self.game.choice(actions)
class Hammerhead(Villain):
name = 'Hammerhead'
power = 5
victory = 2
bribe = True
desc = ("Bribe. Fight: KO one Hero with S icon.")
def on_fight(self, player):
actions = []
for c in player.hand:
if c.star > 0 or c.extra_star:
actions.append(action.KOFrom(c, player.hand))
for c in player.played:
if c.star > 0 or c.extra_star:
actions.append(action.KOFrom(c, player.played))
if len(actions) > 0:
self.game.choice(actions)
|
tcstewar/boardgame
|
boardgame/legendary/villains.py
|
Python
|
gpl-2.0
| 22,654
|
[
"Octopus"
] |
a332db13ea9931ca18a9792ec5448fbeb4275192bba4a6a75ab39ee7f5e72fdd
|
# goal = make a table of ase values for each gene.
import sys
import scipy.stats
import vcf
if len(sys.argv) != 6:
print('python aseValuesByGene.py [DNA vcf] [RNA vcf] [out file] [min snps per gene] [list of median expression levels]')
sys.exit()
dnaDict = {} #keys are (scaffold,pos) and value is a list of all the hets.
aseDict = {} #keys are (gene) value is dict where keys = ind, vals = list of diffs in het snps
med = {} #keys are id, values are the median.
#make med
medList = open(sys.argv[5],'r')
for line in medList:
ent = line.split()
med[ent[0]] = float(ent[1])
#read in DNA genotypes and add to dnaDict
vcf_reader = vcf.Reader( open(sys.argv[1],'r') )
indList = vcf_reader.samples
for record in vcf_reader:
hets = [x.sample for x in record.get_hets()]
dnaDict[(record.CHROM,record.POS)] = hets
#read through RNA genotypes and add to aseDict
rna_reader = vcf.VCFReader( open(sys.argv[2],'r'))
for record in rna_reader:
if record.CHROM not in aseDict.keys():
aseDict[record.CHROM] = {} #add gene to aseDict if it's not already there
for i in indList:
aseDict[record.CHROM][i] = []
for ind in dnaDict[(record.CHROM,record.POS)]: #read through inds with het genotypes
if ind not in aseDict[record.CHROM].keys():
aseDict[record.CHROM][ind] = [] #add gene to aseDict keys if not there already
try:
ad = record.genotype(ind).data.AD
except:
print(ind)
print(record)
print(record.samples)
print('') #want to make sure I'm not missing cases of complete ASE
continue
if ad == None: #missing data
continue
aseDif = float(abs(ad[1] - ad[0]))/med[ind]
aseDict[record.CHROM][ind].append(aseDif)
# print out -- table where rows are genes, columns are genotypes
out = open(sys.argv[3],'a')
#out.write("pac "+" ".join(indList))
for gene in aseDict:
out.write(gene)
for ind in indList:
if len(aseDict[gene][ind]) >= int(sys.argv[4]): #do we have data?
aseVal = sum(aseDict[gene][ind])/len(aseDict[gene][ind]) #calc aseMean for this ind
out.write(' '+str(aseVal))
else:
out.write(' NA')
out.write("\n")
|
emjosephs/eQTL
|
aseValuesByGene.py
|
Python
|
mit
| 2,135
|
[
"ASE"
] |
5c220f3f2d96855936457126afa18d474fea0554758197d54bed3f152350b5a2
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import session, _
from frappe.utils import today
from erpnext.utilities.transaction_base import TransactionBase
class WarrantyClaim(TransactionBase):
def get_feed(self):
return _("{0}: From {1}").format(self.status, self.customer_name)
def validate(self):
if session['user'] != 'Guest' and not self.customer:
frappe.throw(_("Customer is required"))
if self.status=="Closed" and \
frappe.db.get_value("Warranty Claim", self.name, "status")!="Closed":
self.resolution_date = today()
def on_cancel(self):
lst = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent = t1.name and t2.prevdoc_docname = %s and t1.docstatus!=2""",
(self.name))
if lst:
lst1 = ','.join([x[0] for x in lst])
frappe.throw(_("Cancel Material Visit {0} before cancelling this Warranty Claim").format(lst1))
else:
frappe.db.set(self, 'status', 'Cancelled')
def on_update(self):
pass
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc, map_child_doc
def _update_links(source_doc, target_doc, source_parent):
target_doc.prevdoc_doctype = source_parent.doctype
target_doc.prevdoc_docname = source_parent.name
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
target_doc = get_mapped_doc("Warranty Claim", source_name, {
"Warranty Claim": {
"doctype": "Maintenance Visit",
"field_map": {}
}
}, target_doc)
source_doc = frappe.get_doc("Warranty Claim", source_name)
if source_doc.get("item_code"):
table_map = {
"doctype": "Maintenance Visit Purpose",
"postprocess": _update_links
}
map_child_doc(source_doc, target_doc, table_map, source_doc)
return target_doc
|
mahabuber/erpnext
|
erpnext/support/doctype/warranty_claim/warranty_claim.py
|
Python
|
agpl-3.0
| 2,169
|
[
"VisIt"
] |
f5c5f70365e36abe0345d2f721686bf4120f18e0fb28143869eba18bb10d38ab
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import os.path
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, 'README.md')
readme = open(README).read()
setup(
name='regate',
version='0.10.0',
description='Registration of Galaxy Tools in Elixir',
long_description=readme,
keywords=['GalaxyProject'],
author='Olivia Doppelt-Azeroual and Fabien Mareuil',
author_email='olivia.doppelt@pasteur.fr and fabien.mareuil@pasteur.fr',
url='https://github.com/bioinfo-center-pasteur-fr/ReGaTE',
packages=['regate'],
install_requires=[
'ruamel.yaml',
'html5lib==1.0b8',
'rdflib',
'cheetah',
'requests',
'configparser',
'bioblend',
'lxml'
],
license="GPLv2",
entry_points={
'console_scripts': ['regate=regate.regate:run',
'remag=regate.remag:run'],
},
tests_require=['nose', 'nose_parameterized'],
test_suite='nose.collector',
include_package_data=True,
zip_safe=False
)
|
C3BI-pasteur-fr/ReGaTE
|
setup.py
|
Python
|
gpl-2.0
| 1,091
|
[
"Galaxy"
] |
405b042400a466d996ead05cab1d792e77c6ea22f2504259479a35d1fa379d9a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import numpy as np
import warnings
from pymatgen.core.operations import SymmOp
from pymatgen import Element, Molecule, Composition
from monty.io import zopen
from pymatgen.core.units import Ha_to_eV
from pymatgen.util.coord import get_angle
import scipy.constants as cst
from pymatgen.electronic_structure.core import Spin
"""
This module implements input and output processing from Gaussian.
"""
__author__ = 'Shyue Ping Ong, Germain Salvato-Vallverdu, Xin Chen'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
float_patt = re.compile(r"\s*([+-]?\d+\.\d+)")
def read_route_line(route):
"""
read route line in gaussian input/output and return functional basis_set
and a dictionary of other route parameters
Args:
route (str) : the route line
return
functional (str) : the method (HF, PBE ...)
basis_set (str) : the basis set
route (dict) : dictionary of parameters
"""
scrf_patt = re.compile(r"^([sS][cC][rR][fF])\s*=\s*(.+)")
multi_params_patt = re.compile("^([A-z]+[0-9]*)[\s=]+\((.*)\)$")
functional = None
basis_set = None
route_params = {}
dieze_tag = None
if route:
if "/" in route:
tok = route.split("/")
functional = tok[0].split()[-1]
basis_set = tok[1].split()[0]
for tok in [functional, basis_set, "/"]:
route = route.replace(tok, "")
for tok in route.split():
if scrf_patt.match(tok):
m = scrf_patt.match(tok)
route_params[m.group(1)] = m.group(2)
elif tok.upper() in ["#", "#N", "#P", "#T"]:
# does not store # in route to avoid error in input
if tok == "#":
dieze_tag = "#N"
else:
dieze_tag = tok
continue
else:
m = re.match(multi_params_patt, tok.strip("#"))
if m:
pars = {}
for par in m.group(2).split(","):
p = par.split("=")
pars[p[0]] = None if len(p) == 1 else p[1]
route_params[m.group(1)] = pars
else:
d = tok.strip("#").split("=")
route_params[d[0]] = None if len(d) == 1 else d[1]
return functional, basis_set, route_params, dieze_tag
class GaussianInput(object):
"""
An object representing a Gaussian input file.
Args:
mol: Input molecule. If molecule is a single string, it is used as a
direct input to the geometry section of the Gaussian input
file.
charge: Charge of the molecule. If None, charge on molecule is used.
Defaults to None. This allows the input file to be set a
charge independently from the molecule itself.
spin_multiplicity: Spin multiplicity of molecule. Defaults to None,
which means that the spin multiplicity is set to 1 if the
molecule has no unpaired electrons and to 2 if there are
unpaired electrons.
title: Title for run. Defaults to formula of molecule if None.
functional: Functional for run.
basis_set: Basis set for run.
route_parameters: Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
input_parameters: Additional input parameters for run as a dict. Used
for example, in PCM calculations. E.g., {"EPS":12}
link0_parameters: Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
dieze_tag: # preceding the route line. E.g. "#p"
gen_basis: allows a user-specified basis set to be used in a Gaussian
calculation. If this is not None, the attribute ``basis_set`` will
be set to "Gen".
"""
# Commonly used regex patterns
_zmat_patt = re.compile(r"^(\w+)*([\s,]+(\w+)[\s,]+(\w+))*[\-\.\s,\w]*$")
_xyz_patt = re.compile(r"^(\w+)[\s,]+([\d\.eE\-]+)[\s,]+([\d\.eE\-]+)[\s,]+"
r"([\d\.eE\-]+)[\-\.\s,\w.]*$")
def __init__(self, mol, charge=None, spin_multiplicity=None, title=None,
functional="HF", basis_set="6-31G(d)", route_parameters=None,
input_parameters=None, link0_parameters=None, dieze_tag="#P",
gen_basis=None):
self._mol = mol
self.charge = charge if charge is not None else mol.charge
nelectrons = - self.charge + mol.charge + mol.nelectrons
if spin_multiplicity is not None:
self.spin_multiplicity = spin_multiplicity
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self.charge, spin_multiplicity))
else:
self.spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
self.functional = functional
self.basis_set = basis_set
self.link0_parameters = link0_parameters if link0_parameters else {}
self.route_parameters = route_parameters if route_parameters else {}
self.input_parameters = input_parameters if input_parameters else {}
self.title = title if title else self._mol.composition.formula
self.dieze_tag = dieze_tag if dieze_tag[0] == "#" else "#" + dieze_tag
self.gen_basis = gen_basis
if gen_basis is not None:
self.basis_set = "Gen"
@property
def molecule(self):
"""
Returns molecule associated with this GaussianInput.
"""
return self._mol
@staticmethod
def _parse_coords(coord_lines):
"""
Helper method to parse coordinates.
"""
paras = {}
var_pattern = re.compile(r"^([A-Za-z]+\S*)[\s=,]+([\d\-\.]+)$")
for l in coord_lines:
m = var_pattern.match(l.strip())
if m:
paras[m.group(1).strip("=")] = float(m.group(2))
species = []
coords = []
# Stores whether a Zmatrix format is detected. Once a zmatrix format
# is detected, it is assumed for the remaining of the parsing.
zmode = False
for l in coord_lines:
l = l.strip()
if not l:
break
if (not zmode) and GaussianInput._xyz_patt.match(l):
m = GaussianInput._xyz_patt.match(l)
species.append(m.group(1))
toks = re.split(r"[,\s]+", l.strip())
if len(toks) > 4:
coords.append([float(i) for i in toks[2:5]])
else:
coords.append([float(i) for i in toks[1:4]])
elif GaussianInput._zmat_patt.match(l):
zmode = True
toks = re.split(r"[,\s]+", l.strip())
species.append(toks[0])
toks.pop(0)
if len(toks) == 0:
coords.append(np.array([0, 0, 0]))
else:
nn = []
parameters = []
while len(toks) > 1:
ind = toks.pop(0)
data = toks.pop(0)
try:
nn.append(int(ind))
except ValueError:
nn.append(species.index(ind) + 1)
try:
val = float(data)
parameters.append(val)
except ValueError:
if data.startswith("-"):
parameters.append(-paras[data[1:]])
else:
parameters.append(paras[data])
if len(nn) == 1:
coords.append(np.array([0, 0, parameters[0]]))
elif len(nn) == 2:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
bl = parameters[0]
angle = parameters[1]
axis = [0, 1, 0]
op = SymmOp.from_origin_axis_angle(coords1, axis,
angle, False)
coord = op.operate(coords2)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
elif len(nn) == 3:
coords1 = coords[nn[0] - 1]
coords2 = coords[nn[1] - 1]
coords3 = coords[nn[2] - 1]
bl = parameters[0]
angle = parameters[1]
dih = parameters[2]
v1 = coords3 - coords2
v2 = coords1 - coords2
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(
coords1, axis, angle, False)
coord = op.operate(coords2)
v1 = coord - coords1
v2 = coords1 - coords2
v3 = np.cross(v1, v2)
adj = get_angle(v3, axis)
axis = coords1 - coords2
op = SymmOp.from_origin_axis_angle(
coords1, axis, dih - adj, False)
coord = op.operate(coord)
vec = coord - coords1
coord = vec * bl / np.linalg.norm(vec) + coords1
coords.append(coord)
def _parse_species(sp_str):
"""
The species specification can take many forms. E.g.,
simple integers representing atomic numbers ("8"),
actual species string ("C") or a labelled species ("C1").
Sometimes, the species string is also not properly capitalized,
e.g, ("c1"). This method should take care of these known formats.
"""
try:
return int(sp_str)
except ValueError:
sp = re.sub(r"\d", "", sp_str)
return sp.capitalize()
species = [_parse_species(sp) for sp in species]
return Molecule(species, coords)
@staticmethod
def from_string(contents):
"""
Creates GaussianInput from a string.
Args:
contents: String representing an Gaussian input file.
Returns:
GaussianInput object
"""
lines = [l.strip() for l in contents.split("\n")]
link0_patt = re.compile(r"^(%.+)\s*=\s*(.+)")
link0_dict = {}
for i, l in enumerate(lines):
if link0_patt.match(l):
m = link0_patt.match(l)
link0_dict[m.group(1).strip("=")] = m.group(2)
route_patt = re.compile(r"^#[sSpPnN]*.*")
route = ""
route_index = None
for i, l in enumerate(lines):
if route_patt.match(l):
route += " " + l
route_index = i
# This condition allows for route cards spanning multiple lines
elif (l == "" or l.isspace()) and route_index:
break
functional, basis_set, route_paras, dieze_tag = read_route_line(route)
ind = 2
title = []
while lines[route_index + ind].strip():
title.append(lines[route_index + ind].strip())
ind += 1
title = ' '.join(title)
ind += 1
toks = re.split(r"[,\s]+", lines[route_index + ind])
charge = int(toks[0])
spin_mult = int(toks[1])
coord_lines = []
spaces = 0
input_paras = {}
ind += 1
for i in range(route_index + ind, len(lines)):
if lines[i].strip() == "":
spaces += 1
if spaces >= 2:
d = lines[i].split("=")
if len(d) == 2:
input_paras[d[0]] = d[1]
else:
coord_lines.append(lines[i].strip())
mol = GaussianInput._parse_coords(coord_lines)
mol.set_charge_and_spin(charge, spin_mult)
return GaussianInput(mol, charge=charge, spin_multiplicity=spin_mult,
title=title, functional=functional,
basis_set=basis_set,
route_parameters=route_paras,
input_parameters=input_paras,
link0_parameters=link0_dict,
dieze_tag=dieze_tag)
@staticmethod
def from_file(filename):
"""
Creates GaussianInput from a file.
Args:
filename: Gaussian input filename
Returns:
GaussianInput object
"""
with zopen(filename, "r") as f:
return GaussianInput.from_string(f.read())
def _find_nn_pos_before_site(self, siteindex):
"""
Returns index of nearest neighbor atoms.
"""
alldist = [(self._mol.get_distance(siteindex, i), i)
for i in range(siteindex)]
alldist = sorted(alldist, key=lambda x: x[0])
return [d[1] for d in alldist]
def get_zmatrix(self):
"""
Returns a z-matrix representation of the molecule.
"""
output = []
outputvar = []
for i, site in enumerate(self._mol):
if i == 0:
output.append("{}".format(site.specie))
elif i == 1:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
output.append("{} {} B{}".format(self._mol[i].specie,
nn[0] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
elif i == 2:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
output.append("{} {} B{} {} A{}".format(self._mol[i].specie,
nn[0] + 1, i,
nn[1] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
outputvar.append("A{}={:.6f}".format(i, angle))
else:
nn = self._find_nn_pos_before_site(i)
bondlength = self._mol.get_distance(i, nn[0])
angle = self._mol.get_angle(i, nn[0], nn[1])
dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2])
output.append("{} {} B{} {} A{} {} D{}"
.format(self._mol[i].specie, nn[0] + 1, i,
nn[1] + 1, i, nn[2] + 1, i))
outputvar.append("B{}={:.6f}".format(i, bondlength))
outputvar.append("A{}={:.6f}".format(i, angle))
outputvar.append("D{}={:.6f}".format(i, dih))
return "\n".join(output) + "\n\n" + "\n".join(outputvar)
def get_cart_coords(self):
"""
Return the cartesian coordinates of the molecule
"""
def to_s(x):
return "%0.6f" % x
outs = []
for i, site in enumerate(self._mol):
outs.append(" ".join([site.species_string,
" ".join([to_s(j) for j in site.coords])]))
return "\n".join(outs)
def __str__(self):
return self.to_string()
def to_string(self, cart_coords=False):
"""
Return GaussianInput string
Option: whe cart_coords sets to True return the cartesian coordinates
instead of the z-matrix
"""
def para_dict_to_string(para, joiner=" "):
para_str = []
# sorted is only done to make unittests work reliably
for par, val in sorted(para.items()):
if val is None or val == "":
para_str.append(par)
elif isinstance(val, dict):
val_str = para_dict_to_string(val, joiner=",")
para_str.append("{}=({})".format(par, val_str))
else:
para_str.append("{}={}".format(par, val))
return joiner.join(para_str)
output = []
if self.link0_parameters:
output.append(para_dict_to_string(self.link0_parameters, "\n"))
output.append("{diez} {func}/{bset} {route}"
.format(diez=self.dieze_tag, func=self.functional,
bset=self.basis_set,
route=para_dict_to_string(self.route_parameters))
)
output.append("")
output.append(self.title)
output.append("")
output.append("{} {}".format(self.charge, self.spin_multiplicity))
if isinstance(self._mol, Molecule):
if cart_coords is True:
output.append(self.get_cart_coords())
else:
output.append(self.get_zmatrix())
else:
output.append(str(self._mol))
output.append("")
if self.gen_basis is not None:
output.append("{:s}\n".format(self.gen_basis))
output.append(para_dict_to_string(self.input_parameters, "\n"))
output.append("\n")
return "\n".join(output)
def write_file(self, filename, cart_coords=False):
"""
Write the input string into a file
Option: see __str__ method
"""
with zopen(filename, "w") as f:
f.write(self.to_string(cart_coords))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"functional": self.functional,
"basis_set": self.basis_set,
"route_parameters": self.route_parameters,
"title": self.title,
"charge": self.charge,
"spin_multiplicity": self.spin_multiplicity,
"input_parameters": self.input_parameters,
"link0_parameters": self.link0_parameters,
"dieze_tag": self.dieze_tag}
@classmethod
def from_dict(cls, d):
return GaussianInput(mol=Molecule.from_dict(d["molecule"]),
functional=d["functional"],
basis_set=d["basis_set"],
route_parameters=d["route_parameters"],
title=d["title"],
charge=d["charge"],
spin_multiplicity=d["spin_multiplicity"],
input_parameters=d["input_parameters"],
link0_parameters=d["link0_parameters"])
class GaussianOutput(object):
"""
Parser for Gaussian output files.
Args:
filename: Filename of Gaussian output file.
.. note::
Still in early beta.
Attributes:
.. attribute:: structures
All structures from the calculation.
.. attribute:: energies
All energies from the calculation.
.. attribute:: eigenvalues
List of eigenvalues for the last geometry
.. attribute:: MO_coefficients
Matrix of MO coefficients for the last geometry
.. attribute:: cart_forces
All cartesian forces from the calculation.
.. attribute:: frequencies
A list for each freq calculation and for each mode of a dict with
{
"frequency": freq in cm-1,
"symmetry": symmetry tag
"r_mass": Reduce mass,
"f_constant": force constant,
"IR_intensity": IR Intensity,
"mode": normal mode
}
The normal mode is a 1D vector of dx, dy dz of each atom.
.. attribute:: hessian
Matrix of second derivatives of the energy with respect to cartesian
coordinates in the **input orientation** frame. Need #P in the
route section in order to be in the output.
.. attribute:: properly_terminated
True if run has properly terminated
.. attribute:: is_pcm
True if run is a PCM run.
.. attribute:: is_spin
True if it is an unrestricted run
.. attribute:: stationary_type
If it is a relaxation run, indicates whether it is a minimum (Minimum)
or a saddle point ("Saddle").
.. attribute:: corrections
Thermochemical corrections if this run is a Freq run as a dict. Keys
are "Zero-point", "Thermal", "Enthalpy" and "Gibbs Free Energy"
.. attribute:: functional
Functional used in the run.
.. attribute:: basis_set
Basis set used in the run
.. attribute:: route
Additional route parameters as a dict. For example,
{'SP':"", "SCF":"Tight"}
.. attribute:: dieze_tag
# preceding the route line, e.g. "#P"
.. attribute:: link0
Link0 parameters as a dict. E.g., {"%mem": "1000MW"}
.. attribute:: charge
Charge for structure
.. attribute:: spin_multiplicity
Spin multiplicity for structure
.. attribute:: num_basis_func
Number of basis functions in the run.
.. attribute:: electrons
number of alpha and beta electrons as (N alpha, N beta)
.. attribute:: pcm
PCM parameters and output if available.
.. attribute:: errors
error if not properly terminated (list to be completed in error_defs)
.. attribute:: Mulliken_charges
Mulliken atomic charges
.. attribute:: eigenvectors
Matrix of shape (num_basis_func, num_basis_func). Each column is an
eigenvectors and contains AO coefficients of an MO.
eigenvectors[Spin] = mat(num_basis_func, num_basis_func)
.. attribute:: molecular_orbital
MO development coefficients on AO in a more convenient array dict
for each atom and basis set label.
mo[Spin][OM j][atom i] = {AO_k: coeff, AO_k: coeff ... }
.. attribute:: atom_basis_labels
Labels of AO for each atoms. These labels are those used in the output
of molecular orbital coefficients (POP=Full) and in the
molecular_orbital array dict.
atom_basis_labels[iatom] = [AO_k, AO_k, ...]
.. attribute:: resumes
List of gaussian data resume given at the end of the output file before
the quotation. The resumes are given as string.
.. attribute:: title
Title of the gaussian run.
Methods:
.. method:: to_input()
Return a GaussianInput object using the last geometry and the same
calculation parameters.
.. method:: read_scan()
Read a potential energy surface from a gaussian scan calculation.
.. method:: get_scan_plot()
Get a matplotlib plot of the potential energy surface
.. method:: save_scan_plot()
Save a matplotlib plot of the potential energy surface to a file
"""
def __init__(self, filename):
self.filename = filename
self._parse(filename)
@property
def final_energy(self):
return self.energies[-1]
@property
def final_structure(self):
return self.structures[-1]
def _parse(self, filename):
start_patt = re.compile(r" \(Enter \S+l101\.exe\)")
route_patt = re.compile(r" #[pPnNtT]*.*")
link0_patt = re.compile(r"^\s(%.+)\s*=\s*(.+)")
charge_mul_patt = re.compile(r"Charge\s+=\s*([-\d]+)\s+"
r"Multiplicity\s+=\s*(\d+)")
num_basis_func_patt = re.compile(r"([0-9]+)\s+basis functions")
num_elec_patt = re.compile(r"(\d+)\s+alpha electrons\s+(\d+)\s+beta electrons")
pcm_patt = re.compile(r"Polarizable Continuum Model")
stat_type_patt = re.compile(r"imaginary frequencies")
scf_patt = re.compile(r"E\(.*\)\s*=\s*([-\.\d]+)\s+")
mp2_patt = re.compile(r"EUMP2\s*=\s*(.*)")
oniom_patt = re.compile(r"ONIOM:\s+extrapolated energy\s*=\s*(.*)")
termination_patt = re.compile(r"(Normal|Error) termination")
error_patt = re.compile(
r"(! Non-Optimized Parameters !|Convergence failure)")
mulliken_patt = re.compile(
r"^\s*(Mulliken charges|Mulliken atomic charges)")
mulliken_charge_patt = re.compile(
r'^\s+(\d+)\s+([A-Z][a-z]?)\s*(\S*)')
end_mulliken_patt = re.compile(
r'(Sum of Mulliken )(.*)(charges)\s*=\s*(\D)')
std_orientation_patt = re.compile(r"Standard orientation")
end_patt = re.compile(r"--+")
orbital_patt = re.compile(r"(Alpha|Beta)\s*\S+\s*eigenvalues --(.*)")
thermo_patt = re.compile(r"(Zero-point|Thermal) correction(.*)="
r"\s+([\d\.-]+)")
forces_on_patt = re.compile(
r"Center\s+Atomic\s+Forces\s+\(Hartrees/Bohr\)")
forces_off_patt = re.compile(r"Cartesian\s+Forces:\s+Max.*RMS.*")
forces_patt = re.compile(
r"\s+(\d+)\s+(\d+)\s+([0-9\.-]+)\s+([0-9\.-]+)\s+([0-9\.-]+)")
freq_on_patt = re.compile(
r"Harmonic\sfrequencies\s+\(cm\*\*-1\),\sIR\sintensities.*Raman.*")
freq_patt = re.compile(r"Frequencies\s--\s+(.*)")
normal_mode_patt = re.compile(
r"\s+(\d+)\s+(\d+)\s+([0-9\.-]{4,5})\s+([0-9\.-]{4,5}).*")
mo_coeff_patt = re.compile(r"Molecular Orbital Coefficients:")
mo_coeff_name_patt = re.compile(r"\d+\s((\d+|\s+)\s+([a-zA-Z]{1,2}|\s+))\s+(\d+\S+)")
hessian_patt = re.compile(r"Force constants in Cartesian coordinates:")
resume_patt = re.compile(r"^\s1\\1\\GINC-\S*")
resume_end_patt = re.compile(r"^\s.*\\\\@")
self.properly_terminated = False
self.is_pcm = False
self.stationary_type = "Minimum"
self.structures = []
self.corrections = {}
self.energies = []
self.pcm = None
self.errors = []
self.Mulliken_charges = {}
self.link0 = {}
self.cart_forces = []
self.frequencies = []
self.eigenvalues = []
self.is_spin = False
self.hessian = None
self.resumes = []
self.title = None
coord_txt = []
read_coord = 0
read_mulliken = False
read_eigen = False
eigen_txt = []
parse_stage = 0
num_basis_found = False
terminated = False
parse_forces = False
forces = []
parse_freq = False
frequencies = []
read_mo = False
parse_hessian = False
routeline = ""
with zopen(filename) as f:
for line in f:
if parse_stage == 0:
if start_patt.search(line):
parse_stage = 1
elif link0_patt.match(line):
m = link0_patt.match(line)
self.link0[m.group(1)] = m.group(2)
elif route_patt.search(line) or routeline != "":
if set(line.strip()) == {"-"}:
params = read_route_line(routeline)
self.functional = params[0]
self.basis_set = params[1]
self.route_parameters = params[2]
route_lower = {k.lower(): v
for k, v in
self.route_parameters.items()}
self.dieze_tag = params[3]
parse_stage = 1
else:
routeline += line.strip()
elif parse_stage == 1:
if set(line.strip()) == {"-"} and self.title is None:
self.title = ""
elif self.title == "":
self.title = line.strip()
elif charge_mul_patt.search(line):
m = charge_mul_patt.search(line)
self.charge = int(m.group(1))
self.spin_multiplicity = int(m.group(2))
parse_stage = 2
elif parse_stage == 2:
if self.is_pcm:
self._check_pcm(line)
if "freq" in route_lower and thermo_patt.search(line):
m = thermo_patt.search(line)
if m.group(1) == "Zero-point":
self.corrections["Zero-point"] = float(m.group(3))
else:
key = m.group(2).strip(" to ")
self.corrections[key] = float(m.group(3))
if read_coord:
if not end_patt.search(line):
coord_txt.append(line)
else:
read_coord = (read_coord + 1) % 4
if not read_coord:
sp = []
coords = []
for l in coord_txt[2:]:
toks = l.split()
sp.append(Element.from_Z(int(toks[1])))
coords.append([float(i)
for i in toks[3:6]])
self.structures.append(Molecule(sp, coords))
if parse_forces:
m = forces_patt.search(line)
if m:
forces.extend([float(_v)
for _v in m.groups()[2:5]])
elif forces_off_patt.search(line):
self.cart_forces.append(forces)
forces = []
parse_forces = False
# read molecular orbital eigenvalues
if read_eigen:
m = orbital_patt.search(line)
if m:
eigen_txt.append(line)
else:
read_eigen = False
self.eigenvalues = {Spin.up: []}
for eigenline in eigen_txt:
if "Alpha" in eigenline:
self.eigenvalues[Spin.up] += [float(e)
for e in float_patt.findall(eigenline)]
elif "Beta" in eigenline:
if Spin.down not in self.eigenvalues:
self.eigenvalues[Spin.down] = []
self.eigenvalues[Spin.down] += [float(e)
for e in float_patt.findall(eigenline)]
eigen_txt = []
# read molecular orbital coefficients
if (not num_basis_found) and \
num_basis_func_patt.search(line):
m = num_basis_func_patt.search(line)
self.num_basis_func = int(m.group(1))
num_basis_found = True
elif read_mo:
# build a matrix with all coefficients
all_spin = [Spin.up]
if self.is_spin:
all_spin.append(Spin.down)
mat_mo = {}
for spin in all_spin:
mat_mo[spin] = np.zeros((self.num_basis_func,
self.num_basis_func))
nMO = 0
end_mo = False
while nMO < self.num_basis_func and not end_mo:
f.readline()
f.readline()
self.atom_basis_labels = []
for i in range(self.num_basis_func):
line = f.readline()
# identify atom and OA labels
m = mo_coeff_name_patt.search(line)
if m.group(1).strip() != "":
iat = int(m.group(2)) - 1
# atname = m.group(3)
self.atom_basis_labels.append([m.group(4)])
else:
self.atom_basis_labels[iat].append(m.group(4))
# MO coefficients
coeffs = [float(c) for c in
float_patt.findall(line)]
for j in range(len(coeffs)):
mat_mo[spin][i, nMO + j] = coeffs[j]
nMO += len(coeffs)
line = f.readline()
# manage pop=regular case (not all MO)
if nMO < self.num_basis_func and \
("Density Matrix:" in line or
mo_coeff_patt.search(line)):
end_mo = True
warnings.warn("POP=regular case, matrix "
"coefficients not complete")
f.readline()
self.eigenvectors = mat_mo
read_mo = False
# build a more convenient array dict with MO
# coefficient of each atom in each MO.
# mo[Spin][OM j][atom i] =
# {AO_k: coeff, AO_k: coeff ... }
mo = {}
for spin in all_spin:
mo[spin] = [[{} for iat in
range(len(self.atom_basis_labels))]
for j in range(self.num_basis_func)]
for j in range(self.num_basis_func):
i = 0
for iat in range(len(self.atom_basis_labels)):
for label in self.atom_basis_labels[iat]:
mo[spin][j][iat][label] = self.eigenvectors[spin][i, j]
i += 1
self.molecular_orbital = mo
elif parse_freq:
while line.strip() != "": # blank line
ifreqs = [int(val) - 1 for val in line.split()]
for ifreq in ifreqs:
frequencies.append({"frequency": None,
"r_mass": None,
"f_constant": None,
"IR_intensity": None,
"symmetry": None,
"mode": []})
# read freq, intensity, masses, symmetry ...
while "Atom AN" not in line:
if "Frequencies --" in line:
freqs = map(float,
float_patt.findall(line))
for ifreq, freq in zip(ifreqs, freqs):
frequencies[ifreq]["frequency"] = freq
elif "Red. masses --" in line:
r_masses = map(float,
float_patt.findall(line))
for ifreq, r_mass in zip(ifreqs, r_masses):
frequencies[ifreq]["r_mass"] = r_mass
elif "Frc consts --" in line:
f_consts = map(float,
float_patt.findall(line))
for ifreq, f_const in zip(ifreqs, f_consts):
frequencies[ifreq]["f_constant"] = f_const
elif "IR Inten --" in line:
IR_intens = map(float,
float_patt.findall(line))
for ifreq, intens in zip(ifreqs, IR_intens):
frequencies[ifreq]["IR_intensity"] = intens
else:
syms = line.split()[:3]
for ifreq, sym in zip(ifreqs, syms):
frequencies[ifreq]["symmetry"] = sym
line = f.readline()
# read normal modes
line = f.readline()
while normal_mode_patt.search(line):
values = list(map(float,
float_patt.findall(line)))
for i, ifreq in zip(range(0, len(values), 3),
ifreqs):
frequencies[ifreq]["mode"].extend(values[i:i+3])
line = f.readline()
parse_freq = False
self.frequencies.append(frequencies)
frequencies = []
elif parse_hessian:
# read Hessian matrix under "Force constants in Cartesian coordinates"
# Hessian matrix is in the input orientation framework
# WARNING : need #P in the route line
parse_hessian = False
ndf = 3 * len(self.structures[0])
self.hessian = np.zeros((ndf, ndf))
j_indices = range(5)
jndf = 0
while jndf < ndf:
for i in range(jndf, ndf):
line = f.readline()
vals = re.findall(r"\s*([+-]?\d+\.\d+[eEdD]?[+-]\d+)", line)
vals = [float(val.replace("D", "E"))
for val in vals]
for jval, val in enumerate(vals):
j = j_indices[jval]
self.hessian[i, j] = val
self.hessian[j, i] = val
jndf += len(vals)
line = f.readline()
j_indices = [j + 5 for j in j_indices]
elif termination_patt.search(line):
m = termination_patt.search(line)
if m.group(1) == "Normal":
self.properly_terminated = True
terminated = True
elif error_patt.search(line):
error_defs = {
"! Non-Optimized Parameters !": "Optimization "
"error",
"Convergence failure": "SCF convergence error"
}
m = error_patt.search(line)
self.errors.append(error_defs[m.group(1)])
elif num_elec_patt.search(line):
m = num_elec_patt.search(line)
self.electrons = (int(m.group(1)), int(m.group(2)))
elif (not self.is_pcm) and pcm_patt.search(line):
self.is_pcm = True
self.pcm = {}
elif "freq" in route_lower and "opt" in route_lower and \
stat_type_patt.search(line):
self.stationary_type = "Saddle"
elif mp2_patt.search(line):
m = mp2_patt.search(line)
self.energies.append(float(m.group(1).replace("D",
"E")))
elif oniom_patt.search(line):
m = oniom_patt.matcher(line)
self.energies.append(float(m.group(1)))
elif scf_patt.search(line):
m = scf_patt.search(line)
self.energies.append(float(m.group(1)))
elif std_orientation_patt.search(line):
coord_txt = []
read_coord = 1
elif not read_eigen and orbital_patt.search(line):
eigen_txt.append(line)
read_eigen = True
elif mulliken_patt.search(line):
mulliken_txt = []
read_mulliken = True
elif not parse_forces and forces_on_patt.search(line):
parse_forces = True
elif freq_on_patt.search(line):
parse_freq = True
[f.readline() for i in range(3)]
elif mo_coeff_patt.search(line):
if "Alpha" in line:
self.is_spin = True
read_mo = True
elif hessian_patt.search(line):
parse_hessian = True
elif resume_patt.search(line):
resume = []
while not resume_end_patt.search(line):
resume.append(line)
line = f.readline()
# security if \\@ not in one line !
if line == "\n":
break
resume.append(line)
resume = "".join([r.strip() for r in resume])
self.resumes.append(resume)
if read_mulliken:
if not end_mulliken_patt.search(line):
mulliken_txt.append(line)
else:
m = end_mulliken_patt.search(line)
mulliken_charges = {}
for line in mulliken_txt:
if mulliken_charge_patt.search(line):
m = mulliken_charge_patt.search(line)
dic = {int(m.group(1)):
[m.group(2), float(m.group(3))]}
mulliken_charges.update(dic)
read_mulliken = False
self.Mulliken_charges = mulliken_charges
if not terminated:
warnings.warn("\n" + self.filename +
": Termination error or bad Gaussian output file !")
def _check_pcm(self, line):
energy_patt = re.compile(r"(Dispersion|Cavitation|Repulsion) energy"
r"\s+\S+\s+=\s+(\S*)")
total_patt = re.compile(r"with all non electrostatic terms\s+\S+\s+"
r"=\s+(\S*)")
parameter_patt = re.compile(r"(Eps|Numeral density|RSolv|Eps"
r"\(inf[inity]*\))\s+=\s*(\S*)")
if energy_patt.search(line):
m = energy_patt.search(line)
self.pcm['{} energy'.format(m.group(1))] = float(m.group(2))
elif total_patt.search(line):
m = total_patt.search(line)
self.pcm['Total energy'] = float(m.group(1))
elif parameter_patt.search(line):
m = parameter_patt.search(line)
self.pcm[m.group(1)] = float(m.group(2))
def as_dict(self):
"""
Json-serializable dict representation.
"""
structure = self.final_structure
d = {"has_gaussian_completed": self.properly_terminated,
"nsites": len(structure)}
comp = structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
d["is_pcm"] = self.is_pcm
d["errors"] = self.errors
d["Mulliken_charges"] = self.Mulliken_charges
unique_symbols = sorted(list(d["unit_cell_formula"].keys()))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["charge"] = self.charge
d["spin_multiplicity"] = self.spin_multiplicity
vin = {"route": self.route_parameters, "functional": self.functional,
"basis_set": self.basis_set,
"nbasisfunctions": self.num_basis_func,
"pcm_parameters": self.pcm}
d["input"] = vin
nsites = len(self.final_structure)
vout = {
"energies": self.energies,
"final_energy": self.final_energy,
"final_energy_per_atom": self.final_energy / nsites,
"molecule": structure.as_dict(),
"stationary_type": self.stationary_type,
"corrections": self.corrections
}
d['output'] = vout
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
def read_scan(self):
"""
Read a potential energy surface from a gaussian scan calculation.
Returns:
A dict: {"energies": [ values ],
"coords": {"d1": [ values ], "A2", [ values ], ... }}
"energies" are the energies of all points of the potential energy
surface. "coords" are the internal coordinates used to compute the
potential energy surface and the internal coordinates optimized,
labelled by their name as defined in the calculation.
"""
def floatList(l):
""" return a list of float from a list of string """
return [float(v) for v in l]
scan_patt = re.compile(r"^\sSummary of the potential surface scan:")
optscan_patt = re.compile(r"^\sSummary of Optimized Potential Surface Scan")
# data dict return
data = {"energies": list(), "coords": dict()}
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
while line != "":
if optscan_patt.match(line):
f.readline()
line = f.readline()
endScan = False
while not endScan:
data["energies"] += floatList(float_patt.findall(line))
line = f.readline()
while not re.search(r"(^\s+(\d+)|^\s-+)", line):
icname = line.split()[0].strip()
if icname in data["coords"]:
data["coords"][icname] += floatList(float_patt.findall(line))
else:
data["coords"][icname] = floatList(float_patt.findall(line))
line = f.readline()
if re.search(r"^\s-+", line):
endScan = True
else:
line = f.readline()
elif scan_patt.match(line):
line = f.readline()
data["coords"] = {icname: list()
for icname in line.split()[1:-1]}
f.readline()
line = f.readline()
while not re.search(r"^\s-+", line):
values = floatList(line.split())
data["energies"].append(values[-1])
for i, icname in enumerate(data["coords"]):
data["coords"][icname].append(values[i+1])
line = f.readline()
else:
line = f.readline()
return data
def get_scan_plot(self, coords=None):
"""
Get a matplotlib plot of the potential energy surface.
Args:
coords: internal coordinate name to use as abcissa.
"""
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(12, 8)
d = self.read_scan()
if coords and coords in d["coords"]:
x = d["coords"][coords]
plt.xlabel(coords)
else:
x = range(len(d["energies"]))
plt.xlabel("points")
plt.ylabel("Energy (eV)")
e_min = min(d["energies"])
y = [(e - e_min) * Ha_to_eV for e in d["energies"]]
plt.plot(x, y, "ro--")
return plt
def save_scan_plot(self, filename="scan.pdf",
img_format="pdf", coords=None):
"""
Save matplotlib plot of the potential energy surface to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
coords: internal coordinate name to use as abcissa.
"""
plt = self.get_scan_plot(coords)
plt.savefig(filename, format=img_format)
def read_excitation_energies(self):
"""
Read a excitation energies after a TD-DFT calculation.
Returns:
A list: A list of tuple for each transition such as
[(energie (eV), lambda (nm), oscillatory strength), ... ]
"""
transitions = list()
# read in file
with zopen(self.filename, "r") as f:
line = f.readline()
td = False
while line != "":
if re.search(r"^\sExcitation energies and oscillator strengths:", line):
td = True
if td:
if re.search(r"^\sExcited State\s*\d", line):
val = [float(v) for v in float_patt.findall(line)]
transitions.append(tuple(val[0:3]))
line = f.readline()
return transitions
def get_spectre_plot(self, sigma=0.05, step=0.01):
"""
Get a matplotlib plot of the UV-visible xas. Transition are plotted
as vertical lines and as a sum of normal functions with sigma with. The
broadening is applied in energy and the xas is plotted as a function
of the wavelength.
Args:
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
Returns:
A dict: {"energies": values, "lambda": values, "xas": values}
where values are lists of abscissa (energies, lamba) and
the sum of gaussian functions (xas).
A matplotlib plot.
"""
from pymatgen.util.plotting import pretty_plot
from matplotlib.mlab import normpdf
plt = pretty_plot(12, 8)
transitions = self.read_excitation_energies()
minval = min([val[0] for val in transitions]) - 5.0 * sigma
maxval = max([val[0] for val in transitions]) + 5.0 * sigma
npts = int((maxval - minval) / step) + 1
eneval = np.linspace(minval, maxval, npts) # in eV
lambdaval = [cst.h * cst.c / (val * cst.e) * 1.e9
for val in eneval] # in nm
# sum of gaussian functions
spectre = np.zeros(npts)
for trans in transitions:
spectre += trans[2] * normpdf(eneval, trans[0], sigma)
spectre /= spectre.max()
plt.plot(lambdaval, spectre, "r-", label="spectre")
data = {"energies": eneval, "lambda": lambdaval, "xas": spectre}
# plot transitions as vlines
plt.vlines([val[1] for val in transitions],
0.,
[val[2] for val in transitions],
color="blue",
label="transitions",
linewidth=2)
plt.xlabel("$\\lambda$ (nm)")
plt.ylabel("Arbitrary unit")
plt.legend()
return data, plt
def save_spectre_plot(self, filename="spectre.pdf", img_format="pdf",
sigma=0.05, step=0.01):
"""
Save matplotlib plot of the spectre to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
sigma: Full width at half maximum in eV for normal functions.
step: bin interval in eV
"""
d, plt = self.get_spectre_plot(sigma, step)
plt.savefig(filename, format=img_format)
def to_input(self, mol=None, charge=None,
spin_multiplicity=None, title=None, functional=None,
basis_set=None, route_parameters=None, input_parameters=None,
link0_parameters=None, dieze_tag=None, cart_coords=False):
"""
Create a new input object using by default the last geometry read in
the output file and with the same calculation parameters. Arguments
are the same as GaussianInput class.
Returns
gaunip (GaussianInput) : the gaussian input object
"""
if not mol:
mol = self.final_structure
if charge is None:
charge = self.charge
if spin_multiplicity is None:
spin_multiplicity = self.spin_multiplicity
if not title:
title = self.title
if not functional:
functional = self.functional
if not basis_set:
basis_set = self.basis_set
if not route_parameters:
route_parameters = self.route_parameters
if not link0_parameters:
link0_parameters = self.link0
if not dieze_tag:
dieze_tag = self.dieze_tag
return GaussianInput(mol=mol,
charge=charge,
spin_multiplicity=spin_multiplicity,
title=title,
functional=functional,
basis_set=basis_set,
route_parameters=route_parameters,
input_parameters=input_parameters,
link0_parameters=link0_parameters,
dieze_tag=dieze_tag)
|
johnson1228/pymatgen
|
pymatgen/io/gaussian.py
|
Python
|
mit
| 55,956
|
[
"Gaussian",
"pymatgen"
] |
db84c5fe07a5a8f290599905a87adc161d0551104c1e654e1386c73493a2bf68
|
import numpy as np
import dolfin
from dolfin import *
from mpi4py import MPI as pyMPI
from leopart import StokesStaticCondensation, FormsStokes
import geopart.stokes.incompressible
import dolfin_dg as dg
comm = pyMPI.COMM_WORLD
mpi_comm = MPI.comm_world
#mark whole boundary, inflow and outflow will overwrite)
class Noslip(SubDomain):
def inside(self, x, on_boundary):
return on_boundary
class Left(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], 0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return on_boundary and near(x[0], 1.0)
#Create a unit box mesh
n_ele = 6
aspect_ratio = 4
mesh = BoxMesh(comm, Point(0.0, 0.0,0.0), Point(1.0, 1.0, 1.0),
n_ele, n_ele, n_ele*aspect_ratio)
#read mesh and boundaries from file
boundaries = MeshFunction('size_t', mesh, mesh.topology().dim() - 1)
mark = {"Internal":0, "wall": 1,"inlet": 2,"outlet": 3 }
boundaries.set_all(mark["Internal"])
wall=Noslip()
wall.mark(boundaries, mark["wall"])
left = Left()
left.mark(boundaries, mark["inlet"])
right = Right()
right.mark(boundaries, mark["outlet"])
#read viscosity coefficient from file
mu = Constant(0.001)
#Define HDG element and function space
element_cls = geopart.stokes.incompressible.HDG2()
W = element_cls.function_space(mesh)
ds = dolfin.Measure('ds',domain=mesh,subdomain_data=boundaries)
n = dolfin.FacetNormal(mesh)
#Define boundary condition
U = element_cls.create_solution_variable(W)
p_in = dolfin.Constant(1.0) # pressure inlet
p_out = dolfin.Constant(0.0) # pressure outlet
noslip = dolfin.Constant([0.0]*mesh.geometry().dim()) # no-slip wall
#Boundary conditions
gN1 = (- p_out*dolfin.Identity(mesh.geometry().dim())) * n
Neumann_outlet=dg.DGNeumannBC(ds(mark["outlet"]), gN1)
gN2 = (- p_in*dolfin.Identity(mesh.geometry().dim())) * n
Neumann_inlet=dg.DGNeumannBC(ds(mark["inlet"]), gN2)
Dirichlet_wall=dg.DGDirichletBC(ds(mark["wall"]), noslip)
weak_bcs = [Dirichlet_wall,Neumann_inlet,Neumann_outlet]
#Body force term
f = dolfin.Constant([0.0]*mesh.geometry().dim())
model=geopart.stokes.StokesModel(eta=mu,f=f)
#Form and solve Stokes
A, b = dolfin.PETScMatrix(), dolfin.PETScVector()
element_cls.solve_stokes(W, U, (A, b), weak_bcs, model)
uh, ph = element_cls.get_velocity(U), element_cls.get_pressure(U)
#Output solution p,u to paraview
dolfin.XDMFFile("pressure.xdmf").write_checkpoint(ph, "p")
dolfin.XDMFFile("velocity.xdmf").write_checkpoint(uh, "u")
flux = [dolfin.assemble(dolfin.dot(uh, n)*ds(i)) for i in range(len(mark))]
if comm.Get_rank() == 0:
for key, value in mark.items():
print("Flux_%s= %.15lf"%(key,flux[value]))
|
BradHub/SL-SPH
|
hdg_test/anistropic/hdg_test.py
|
Python
|
mit
| 2,697
|
[
"ParaView"
] |
e5c822465fdc0f2510eb3952b1879c6c55d5e8cef3d2acfb50af0990bd206657
|
# sybase/base.py
# Copyright (C) 2010-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import operator
import re
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_):
return "UNITEXT"
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
if select._limit:
#if select._limit == 1:
#s += "FIRST "
#else:
#s += "TOP %s " % (select._limit,)
s += "TOP %s " % (select._limit,)
if select._offset:
if not select._limit:
# FIXME: sybase doesn't allow an offset without a limit
# so use a huge value for TOP here
s += "TOP 1000000 "
s += "START AT %s " % (select._offset + 1,)
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement), default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
#is this necessary
#if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = re.sub("DEFAULT", "", default).strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(REFCONSTRAINT_SQL,
table_id=table_id)
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
|
jessekl/flixr
|
venv/lib/python2.7/site-packages/sqlalchemy/dialects/sybase/base.py
|
Python
|
mit
| 28,800
|
[
"ASE"
] |
0ced06ff7f120ed6e6e0e0ea9bda2755b649a43e91c5b5d155e20d26f4183288
|
#!/usr/bin/env python
"""An object that provides various utilities for Gaussian random variables. Ben, 1 May 2008"""
import numpy as np
#import pylab
from scipy.linalg import eigh
from scipy.linalg import solve
from scipy.linalg import inv
from scipy.linalg import det
from scipy import randn
class Gauss(object):
"""This object provides various utilities for Gaussian random variables.
The class can be initialized by either providing the mean and covariance, or
data from which the mean and covariance will be estimated. The data is assumed to
a (d,n) array, where n is the number of data points and d the dimension of each data point, i.e.
each column of data corresponds to a d-dimensional observation, and there are n observations
Caution: No test is provided to test whether data is in the correct shape.
Example:
-------
>>>import gauss
>>>#provide the mean and covariance
>>>mean = [1.,1.]
>>>cov = [[2.,1.],[1.,3.]]
>>>g = gauss.Gauss(mean,cov)
>>>g.image()
>>>#provide data from which the mean and covariance are estimated
>>> g = gauss.Gauss(data=[[0.1,0.1,0.1],[0.,0.1,0.]])
>>> g.mean
array([ 0.1,0.03333333])
>>> g.cov
array([[ 0.015, 0.005],
[ 0.005, 0.005]])
---------
The class provides:
eigvec - Give the eigenvectors of cov
eigval - Give the eigenvalues of cov
get_mean() - Get the mean
set_mean() - Change the mean
get_cov() - Get the covariance
set_cov() - Change the covariance to a new value
eval() - Evaluate the Gaussian distribution
sample() - samples drawn from the distribution
image() - Display a 2d distribution
plot() - Plot a 1d distribution
conditional() - Calculates a conditional distribution
marginal() - Calculates a marignal distribution
bayes() - Calculates the conditional distribution p(x|y) and marginal p(y)
"""
def __init__(self, mean=None,cov=None,data=None):
"""A Gaussian random variable is defined by its mean and covariance.
Parameters
----------
Either specify data, or a mean and covariance. If the mean and covariance is estimated from data, each
column of data corresponds to one d-dimensional observation, and there are n observations (columns).
mean : (d,) ndarray
Mean of the distribution.
cov : (d,d) ndarray
Symmetric non-negative definite covariance matrix of the distribution.
data : (n,d) ndarray
If specified, the mean and covariance are calculated from these n, d-dimensional
data vectors.
"""
if not (mean is None and cov is None) and (not data is None):
raise ValueError("Either provide data, or mean and covariance.")
if data is None:
self.mean = np.asarray(mean).flatten()
self.cov = np.asarray(cov)
else:
data = np.asarray(data).transpose()
d,m = data.shape # Assume that m is the number of data points
if m < d:
raise ValueError('The number of data points is less than the dimension of the data')
self.mean = np.mean(data,axis=1) # Calculate the mean from the data
self.cov = np.cov(data) # Calculate the unbiased covariance from the data
self._gamma = []
self._x = []
def get_mean(self):
return self._mean
def set_mean(self,mean):
self._mean = np.array(mean,ndmin=1,copy=False)
mean = property(fget=get_mean, fset=set_mean)
def get_cov(self):
return self._cov
def set_cov(self,cov):
cov = np.array(cov,ndmin=2)
rows,cols = cov.shape
length = np.size(self.mean)
if not rows == cols:
raise ValueError('Covariance matrix must be square')
if not length == rows:
raise ValueError('The dimensions of the mean and covariance need to be the same')
eigval, eigvec = eigh(cov)
if np.min(eigval) < 0.:
raise ValueError('The covariance is not positive semi-definite')
self._eigval = eigval
self._eigvec = eigvec
self._cov = cov
cov = property(fget=get_cov, fset=set_cov)
@property
def eigval(self):
return self._eigval
@property
def eigvec(self):
return self._eigvec
def eval(self,x):
""" Evaluate the Gaussian distribution at the value x
Parameters
----------
Provide the value where the Gaussian needs to be evaluated.
x - (d,) ndarray,
Return
------
The value of the Gaussian at x
"""
x = np.array(x).flatten()
mean = self.mean.flatten()
cov = self.cov
discr = np.dot(x-mean,solve(cov,x-mean))
p = np.exp(-0.5*discr)/np.sqrt(det(2.*np.pi*cov))
return p
def sample(self, number_samples=1):
""" sample(number_samples=1)
Draw a sample from a d-dimensional normal distribution.
Input: number_samples - The number of samples that need to be generated
Output: x - A dxn array containing the random samples
ToDo: Calculate directly using numpy.random.multivariate_normal(mean,cov,number_samples)
"""
mean = self.mean
cov = self.cov
eigval = self.eigval
eigvec = self.eigvec
d = len(mean)
x = np.zeros([d,number_samples])
x = np.array(x,ndmin=2)
sqrt_eigval = np.sqrt(eigval)
#Note: This can be directly calculated using numpy.random.multivariate_normal(mean,cov,number_samples)
#One can also use Cholesky factorization instead of the SVD
for j in range(number_samples):
vec = np.dot(eigvec,randn(d)*sqrt_eigval)
vec.flatten().shape,mean.flatten().shape
x[:,j] = vec.flatten()+mean.flatten()
x = x.transpose()
return x
def image(self):
"""Display an image of the Gaussian distribution. This only accepts 2d distributions.
"""
mean = self.mean
cov = self.cov
d = mean.size
if d<>2:
raise ValueError('Not an 2d array!')
eigval = self.eigval
max_eigval = np.max(eigval)
xmin = mean[0]-2.*max_eigval
xmax = mean[0]+2.*max_eigval
ymin = mean[1]-2.*max_eigval
ymax = mean[1]+2.*max_eigval
x = np.linspace(xmin,xmax,100) - mean[0]
y = np.linspace(ymin,ymax,100) - mean[1]
# Calculate the image values from the mean and covariance.
im = np.zeros([np.size(x),np.size(y)])
for i in range(len(x)):
for j in range(len(y)):
xi = np.array([x[i],y[j]])
im[j,i] = np.exp(-0.5*np.dot(xi,solve(cov,xi)))/np.sqrt(det(2.*np.pi*cov))
# pylab.imshow(pylab.flipud(im),extent=(xmin,xmax,ymin,ymax))
# pylab.show()
def plot(self):
"""Plot a 1d Gaussian distribution. This only accepts 1d distributions.
"""
mean = self.mean.flatten()
cov = self.cov.flatten()
try:
x = np.linspace(mean-3.*np.sqrt(cov),mean+3.*np.sqrt(cov),101)
y = np.exp(-0.5*(x-mean)**2/cov)/np.sqrt(2.*np.pi*cov)
# pylab.plot(x,y)
# pylab.show()
except:
raise ValueError('Not a scalar variable')
def conditional(self,xb):
""" conditional(self,xb)
Calculates the mean and covariance of the conditional distribution
when the variables xb are observed.
Input: xb - The observed variables. It is assumed that the observed variables occupy the
last positions in the array of random variables, i.e. if x is the random variable
associated with the object, then it is partioned as x = [xa,xb]^T.
Output: mean_a_given_b - mean of the conditional distribution
cov_a_given_b - covariance of the conditional distribution
"""
xb = np.array(xb,ndmin=1)
nb = len(xb)
n_rand_var = len(self.mean)
if nb >= n_rand_var:
raise ValueError('The conditional vector should be smaller than the random variable!')
mean = self.mean
cov = self.cov
# Partition the mean and covariance
na = n_rand_var - nb
mean_a = self.mean[:na]
mean_b = self.mean[na:]
cov_a = self.cov[:na,:na]
cov_b = self.cov[na:,na:]
cov_ab = self.cov[:na,na:]
#Calculate the conditional mean and covariance
mean_a_given_b = mean_a.flatten() + np.dot(cov_ab,solve(cov_b,xb.flatten()-mean_b.flatten()))
cov_a_given_b = cov_a - np.dot(cov_ab,solve(cov_b,cov_ab.transpose()))
return mean_a_given_b, cov_a_given_b
def marginal(self,d):
""" marginal(self,d)
Marginalize over the last d elements of the random vector. This amounts to extracting the
appropriate partitions from the mean and covariance
Input: d - the number of variables over which the mariginalization is done. It is assumed
that the mariginalization is required over the last d elements.
Output: mean - The mean of the marginal (of dimention n-d)
cov - The covariance of the marginal (of dimension (n-d)x(n-d))
"""
n_rand_var = np.size(self.mean)
if d >= n_rand_var:
raise ValueError('The marginalization vector should be smaller than the random variable!')
# Partition the mean and covariance
na = n_rand_var - d
mean = self.mean[:na]
cov = self.cov[:na,:na]
return mean, cov
def bayes(self,A,b,L,*y):
""" bayes(self,A,b,L,y)
Assuming that the present object presents a Gaussian distribution p(x),
one can provide the Gaussian distribution p(y|x) in the form
p(y|x) = N(y|Ax+b,L)
where Ax+b is the mean (as a function of x) and L is the covariance.
This allows one to calculate the mariginal p(y) = int_x p(x,y)dx = int_x p(y|x)p(x)dx.
If a value of y is also given, one can use Bayes theorem to calculate
p(x|y) = p(y|x)p(x)/p(y).
This method returns the means and covariances of both p(y), and p(x|y) if a value
for y is specified.
Input: A, b - The parameters of the linear mean of p(y|x)
L - The covariance matrix of p(y|x)
y - The value of y on which p(x|y) is conditioned. Optional, if omitted,
p(y) is calculated
Output: y_mean - The mean of p(y)
y_cov - The covariance of p(y)
xy_mean - The mean of p(x|y) (if requested)
xy_cov - The covariance of p(x|y) (if requested)
"""
A = np.array(A)
b = np.array(b)
L = np.array(L)
y = np.array(y).flatten()
x_mean = self.mean
x_cov = self.cov
try:
# Calculate the mean and covariance of p(y)
y_mean = np.dot(A,x_mean) + b
y_cov = L + np.dot(A,np.dot(x_cov,A.transpose()))
# Calculate the mean and covariance of p(x|y) if y is given
if np.size(y) > 0:
# Calculate mean and covariance of p(x|y)
xy_cov = inv(inv(x_cov) + np.dot(A.transpose(),solve(L,A)))
xy_mean = np.dot(xy_cov,(np.dot(A.transpose(),solve(L,y-b)) + solve(x_cov,x_mean)))
return y_mean, y_cov, xy_mean, xy_cov
else:
return y_mean, y_cov
except:
raise ValueError('Check the consistency of the dimensions')
def reset_ess(self):
self._gamma = []
self._x = []
def update_ess(self, gamma, x):
self._gamma.append(gamma)
self._x.append(x)
def maximize_params(self):
_x = np.array(self._x).T
_gamma = np.array([self._gamma]).T
_N = _gamma.sum()
mu = np.dot(_x, _gamma)
mu = mu/_N
sigma = np.zeros(self.cov.shape)
cov_prior = np.diag(0.01*np.ones(self.cov.shape[0]))
for i in range(_x.shape[1]):
dm = np.asarray([_x]).T[i] - mu
sigma += _gamma[i]*np.outer(dm,dm)
sigma /= _N
sigma += cov_prior
self.mean = mu
self.cov = sigma
|
bhrzslm/uncertainty-reasoning
|
my_engine/others/GrMPy/lib/GrMPy/gauss.py
|
Python
|
mit
| 12,617
|
[
"Gaussian"
] |
79824fec275528b0b36394cba347536475db9ef4b04cbc5c5fd06ca07a159f2c
|
from urlparse import urlparse, parse_qs
from django.test import TransactionTestCase
from django.contrib.auth.models import Group
from django.http import HttpRequest
from hs_core.hydroshare import resource
from hs_core import hydroshare
from hs_tools_resource.models import RequestUrlBase, ToolVersion, SupportedResTypes, ToolResource,\
ToolIcon, AppHomePageUrl
from hs_tools_resource.receivers import metadata_element_pre_create_handler, \
metadata_element_pre_update_handler
from hs_tools_resource.utils import parse_app_url_template
class TestWebAppFeature(TransactionTestCase):
def setUp(self):
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.user = hydroshare.create_account(
'scrawley@byu.edu',
username='scrawley',
first_name='Shawn',
last_name='Crawley',
superuser=False,
groups=[self.group]
)
self.allowance = 0.00001
self.resWebApp = hydroshare.create_resource(
resource_type='ToolResource',
owner=self.user,
title='Test Web App Resource',
keywords=['kw1', 'kw2'])
self.resGeneric = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.user,
title='Test Generic Resource',
keywords=['kw1', 'kw2'])
def test_web_app_res_specific_metadata(self):
# Class: RequestUrlBase
# no RequestUrlBase obj
self.assertEqual(RequestUrlBase.objects.all().count(), 0)
# create 1 RequestUrlBase obj with required params
resource.create_metadata_element(self.resWebApp.short_id,
'RequestUrlBase',
value='https://www.google.com')
self.assertEqual(RequestUrlBase.objects.all().count(), 1)
# may not create additional instance of RequestUrlBase
with self.assertRaises(Exception):
resource.create_metadata_element(self.resWebApp.short_id, 'RequestUrlBase',
value='https://www.facebook.com')
self.assertEqual(RequestUrlBase.objects.all().count(), 1)
# update existing meta
resource.update_metadata_element(self.resWebApp.short_id, 'RequestUrlBase',
element_id=RequestUrlBase.objects.first().id,
value='https://www.yahoo.com')
self.assertEqual(RequestUrlBase.objects.first().value, 'https://www.yahoo.com')
# delete RequestUrlBase obj
resource.delete_metadata_element(self.resWebApp.short_id, 'RequestUrlBase',
element_id=RequestUrlBase.objects.first().id)
self.assertEqual(RequestUrlBase.objects.all().count(), 0)
# Class: ToolVersion
# verify no ToolVersion obj
self.assertEqual(ToolVersion.objects.all().count(), 0)
# no ToolVersion obj
self.assertEqual(ToolVersion.objects.all().count(), 0)
# create 1 ToolVersion obj with required params
resource.create_metadata_element(self.resWebApp.short_id, 'ToolVersion', value='1.0')
self.assertEqual(ToolVersion.objects.all().count(), 1)
# may not create additional instance of ToolVersion
with self.assertRaises(Exception):
resource.create_metadata_element(self.resWebApp.short_id, 'ToolVersion', value='2.0')
self.assertEqual(ToolVersion.objects.all().count(), 1)
# update existing meta
resource.update_metadata_element(self.resWebApp.short_id, 'ToolVersion',
element_id=ToolVersion.objects.first().id,
value='3.0')
self.assertEqual(ToolVersion.objects.first().value, '3.0')
# delete ToolVersion obj
resource.delete_metadata_element(self.resWebApp.short_id, 'ToolVersion',
element_id=ToolVersion.objects.first().id)
self.assertEqual(ToolVersion.objects.all().count(), 0)
# Class: SupportedResTypes
# no SupportedResTypes obj
self.assertEqual(SupportedResTypes.objects.all().count(), 0)
# create 2 SupportedResTypes obj with required params
resource.create_metadata_element(self.resWebApp.short_id, 'SupportedResTypes',
supported_res_types=['NetcdfResource'])
resource.create_metadata_element(self.resWebApp.short_id, 'SupportedResTypes',
supported_res_types=['NetcdfResource'])
self.assertEqual(SupportedResTypes.objects.all().count(), 2)
# update existing meta
resource.update_metadata_element(self.resWebApp.short_id, 'SupportedResTypes',
element_id=SupportedResTypes.objects.first().id,
supported_res_types=['TimeSeriesResource'])
self.assertEqual(SupportedResTypes.objects.first().supported_res_types.all()[0].description,
'TimeSeriesResource')
# try to delete 1st SupportedResTypes obj
with self.assertRaises(Exception):
resource.delete_metadata_element(self.resWebApp.short_id, 'SupportedResTypes',
element_id=SupportedResTypes.objects.first().id)
self.assertEqual(SupportedResTypes.objects.all().count(), 2)
# Class: ToolIcon
# verify no ToolIcon obj
self.assertEqual(ToolIcon.objects.all().count(), 0)
# no ToolIcon obj
self.assertEqual(ToolIcon.objects.all().count(), 0)
# create 1 ToolIcon obj with required params
resource.create_metadata_element(self.resWebApp.short_id,
'ToolIcon',
value='https://test_icon_url.png')
self.assertEqual(ToolIcon.objects.all().count(), 1)
# may not create additional instance of ToolIcon
with self.assertRaises(Exception):
resource.create_metadata_element(self.resWebApp.short_id,
'ToolIcon',
value='https://test_icon_url_2.png')
self.assertEqual(ToolIcon.objects.all().count(), 1)
# update existing meta
resource.update_metadata_element(self.resWebApp.short_id, 'ToolIcon',
element_id=ToolIcon.objects.first().id,
value='https://test_icon_url_3.png')
self.assertEqual(ToolIcon.objects.first().value, 'https://test_icon_url_3.png')
# delete ToolIcon obj
resource.delete_metadata_element(self.resWebApp.short_id, 'ToolIcon',
element_id=ToolIcon.objects.first().id)
self.assertEqual(ToolIcon.objects.all().count(), 0)
# Class: AppHomePageUrl
# verify no AppHomePageUrl obj
self.assertEqual(AppHomePageUrl.objects.all().count(), 0)
# create 1 AppHomePageUrl obj with required params
resource.create_metadata_element(self.resWebApp.short_id,
'AppHomePageUrl',
value='https://my_web_app.com')
self.assertEqual(AppHomePageUrl.objects.all().count(), 1)
# may not create additional instance of AppHomePageUrl
with self.assertRaises(Exception):
resource.create_metadata_element(self.resWebApp.short_id,
'AppHomePageUrl',
value='https://my_web_app_2.com')
self.assertEqual(AppHomePageUrl.objects.all().count(), 1)
# update existing meta
resource.update_metadata_element(self.resWebApp.short_id, 'AppHomePageUrl',
element_id=AppHomePageUrl.objects.first().id,
value='http://my_web_app_3.com')
self.assertEqual(AppHomePageUrl.objects.first().value, 'http://my_web_app_3.com')
# delete AppHomePageUrl obj
resource.delete_metadata_element(self.resWebApp.short_id, 'AppHomePageUrl',
element_id=AppHomePageUrl.objects.first().id)
self.assertEqual(AppHomePageUrl.objects.all().count(), 0)
def test_metadata_element_pre_create_and_update(self):
request = HttpRequest()
# RequestUrlBase
request.POST = {'value': 'https://www.msn.com'}
data = metadata_element_pre_create_handler(sender=ToolResource,
element_name="RequestUrlBase",
request=request)
self.assertTrue(data["is_valid"])
data = metadata_element_pre_update_handler(sender=ToolResource,
element_name="RequestUrlBase",
request=request)
self.assertTrue(data["is_valid"])
# ToolVersion
request.POST = {'value': '4.0'}
data = metadata_element_pre_create_handler(sender=ToolResource,
element_name="ToolVersion",
request=request)
self.assertTrue(data["is_valid"])
data = metadata_element_pre_update_handler(sender=ToolResource,
element_name="ToolVersion",
request=request)
self.assertTrue(data["is_valid"])
# SupportedResTypes
request.POST = {'supportedResTypes': ['NetCDF Resource']}
data = metadata_element_pre_create_handler(sender=ToolResource,
element_name="SupportedResTypes",
request=request)
self.assertTrue(data["is_valid"])
data = metadata_element_pre_update_handler(sender=ToolResource,
element_name="SupportedResTypes",
request=request)
self.assertTrue(data["is_valid"])
# ToolIcon
request.POST = {'icon': 'https://test_icon_url_3.png'}
data = metadata_element_pre_create_handler(sender=ToolResource,
element_name="ToolIcon",
request=request)
self.assertTrue(data["is_valid"])
def test_utils(self):
url_template_string = "http://www.google.com/?" \
"resid=${HS_RES_ID}&restype=${HS_RES_TYPE}&" \
"user=${HS_USR_NAME}"
term_dict_user = {"HS_USR_NAME": self.user.username}
new_url_string = parse_app_url_template(url_template_string,
[self.resGeneric.get_hs_term_dict(),
term_dict_user])
o = urlparse(new_url_string)
query = parse_qs(o.query)
self.assertEqual(query["resid"][0], self.resGeneric.short_id)
self.assertEqual(query["restype"][0], "GenericResource")
self.assertEqual(query["user"][0], self.user.username)
url_template_string = "http://www.google.com/?" \
"resid=${HS_RES_ID}&restype=${HS_RES_TYPE}&" \
"mypara=${HS_UNDEFINED_TERM}&user=${HS_USR_NAME}"
new_url_string = parse_app_url_template(url_template_string,
[self.resGeneric.get_hs_term_dict(),
term_dict_user])
self.assertEqual(new_url_string, None)
|
FescueFungiShare/hydroshare
|
hs_tools_resource/tests/test_web_app.py
|
Python
|
bsd-3-clause
| 12,169
|
[
"NetCDF"
] |
0d7aa4de4242ad0ffa48bb44291bbcdea9520cc18e0778c59863feab58c1a24f
|
# Copyright 2015 Google Inc. All Rights Reserved.
"""A collection of CLI walkers."""
import cStringIO
import os
from googlecloudsdk.core.util import files
from googlecloudsdk.calliope import cli_tree
from googlecloudsdk.calliope import markdown
from googlecloudsdk.calliope import walker
from googlecloudsdk.core.document_renderers import render_document
class DevSiteGenerator(walker.Walker):
"""Generates DevSite reference HTML in a directory hierarchy.
This implements gcloud meta generate-help-docs --manpage-dir=DIRECTORY.
Attributes:
_directory: The DevSite reference output directory.
_toc_root: The root TOC output stream.
_toc_main: The current main (just under root) TOC output stream.
"""
_REFERENCE = '/sdk/gcloud/reference' # TOC reference directory offset.
_TOC = '_toc.yaml'
def __init__(self, cli, directory):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The DevSite output directory path name.
"""
super(DevSiteGenerator, self).__init__(cli)
self._directory = directory
files.MakeDir(self._directory)
toc_path = os.path.join(self._directory, self._TOC)
self._toc_root = open(toc_path, 'w')
self._toc_root.write('toc:\n')
self._toc_root.write('- title: "Reference"\n')
self._toc_root.write(' path: %s\n' % self._REFERENCE)
self._toc_root.write(' section:\n')
self._toc_main = None
def Visit(self, node, parent, is_group):
"""Updates the TOC and Renders a DevSite doc for each node in the CLI tree.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The parent value, ignored here.
"""
def _UpdateTOC():
"""Updates the DevSIte TOC."""
depth = len(command) - 1
if not depth:
return
if depth == 1:
if self._toc_main:
# Close the current main group toc if needed.
self._toc_main.close()
# Create a new main group toc.
toc_path = os.path.join(directory, self._TOC)
toc = open(toc_path, 'w')
self._toc_main = toc
title = ' '.join(command)
toc.write('toc:\n')
toc.write('- title: "%s"\n' % title)
toc.write(' path: %s\n' % '/'.join([self._REFERENCE] + command[1:]))
toc.write(' section:\n')
toc = self._toc_root
indent = ' '
if is_group:
toc.write('%s- include: %s\n' % (
indent, '/'.join([self._REFERENCE] + command[1:] + [self._TOC])))
return
else:
toc = self._toc_main
indent = ' ' * (depth - 1)
title = command[-1]
toc.write('%s- title: "%s"\n' % (indent, title))
toc.write('%s path: %s\n' % (indent,
'/'.join([self._REFERENCE] + command[1:])))
if is_group:
toc.write('%s section:\n' % indent)
# Set up the destination dir for this level.
command = node.GetPath()
if is_group:
directory = os.path.join(self._directory, *command[1:])
files.MakeDir(directory, mode=0755)
else:
directory = os.path.join(self._directory, *command[1:-1])
# Render the DevSite document.
path = os.path.join(
directory, 'index' if is_group else command[-1]) + '.html'
with open(path, 'w') as f:
md = markdown.Markdown(node)
render_document.RenderDocument(style='devsite',
title=' '.join(command),
fin=cStringIO.StringIO(md),
out=f)
_UpdateTOC()
return parent
def Done(self):
"""Closes the TOC files after the CLI tree walk is done."""
self._toc_root.close()
if self._toc_main:
self._toc_main.close()
class HelpTextGenerator(walker.Walker):
"""Generates help text files in a directory hierarchy.
Attributes:
_directory: The help text output directory.
"""
def __init__(self, cli, directory):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The help text output directory path name.
"""
super(HelpTextGenerator, self).__init__(cli)
self._directory = directory
files.MakeDir(self._directory)
def Visit(self, node, parent, is_group):
"""Renders a help text doc for each node in the CLI tree.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The parent value, ignored here.
"""
# Set up the destination dir for this level.
command = node.GetPath()
if is_group:
directory = os.path.join(self._directory, *command[1:])
files.MakeDir(directory, mode=0755)
else:
directory = os.path.join(self._directory, *command[1:-1])
# Render the help text document.
path = os.path.join(directory, 'GROUP' if is_group else command[-1])
with open(path, 'w') as f:
md = markdown.Markdown(node)
render_document.RenderDocument(style='text', fin=cStringIO.StringIO(md),
out=f)
return parent
class ManPageGenerator(walker.Walker):
"""Generates manpage man(1) files in an output directory.
This implements gcloud meta generate-help-docs --manpage-dir=DIRECTORY.
The output directory will contain a manN subdirectory for each section N
required by markdown.
Attributes:
_directory: The manpage output directory.
"""
_SECTION_FORMAT = 'man{section}'
def __init__(self, cli, directory):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
directory: The manpage output directory path name.
"""
super(ManPageGenerator, self).__init__(cli)
# Currently all gcloud manpages are in section 1.
section_1 = self._SECTION_FORMAT.format(section=1)
self._directory = os.path.join(directory, section_1)
files.MakeDir(self._directory)
def Visit(self, node, parent, is_group):
"""Renders a manpage doc for each node in the CLI tree.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The parent value, ignored here.
"""
command = node.GetPath()
path = os.path.join(self._directory, '_'.join(command)) + '.1'
with open(path, 'w') as f:
md = markdown.Markdown(node)
render_document.RenderDocument(style='man',
title=' '.join(command),
fin=cStringIO.StringIO(md),
out=f)
return parent
class CommandTreeGenerator(walker.Walker):
"""Constructs a CLI command dict tree.
This implements the resource generator for gcloud meta list-commands.
"""
def __init__(self, cli):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
"""
super(CommandTreeGenerator, self).__init__(cli)
def Visit(self, node, parent, is_group):
"""Visits each node in the CLI command tree to construct the dict tree.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The subtree parent value, used here to construct a dict tree.
"""
name = node.name.replace('_', '-')
if is_group:
info = {}
info['_name_'] = name
if parent:
if 'groups' not in parent:
parent['groups'] = []
parent['groups'].append(info)
return info
if 'commands' not in parent:
parent['commands'] = []
parent['commands'].append(name)
return None
class GCloudTreeGenerator(walker.Walker):
"""Generates an external representation of the gcloud CLI tree.
This implements the resource generator for gcloud meta list-gcloud.
"""
def __init__(self, cli):
"""Constructor.
Args:
cli: The Cloud SDK CLI object.
"""
super(GCloudTreeGenerator, self).__init__(cli)
def Visit(self, node, parent, is_group):
"""Visits each node in the CLI command tree to construct the external rep.
Args:
node: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if node is a group, otherwise its is a command.
Returns:
The subtree parent value, used here to construct an external rep node.
"""
return cli_tree.Command(node, parent)
|
wemanuel/smry
|
smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/calliope/walker_util.py
|
Python
|
apache-2.0
| 8,744
|
[
"VisIt"
] |
66e57c313ba7e82da4e661a69e045a88ca1e870cf4616a029ca55fe980a94509
|
import pytest
from pysisyphus.calculators import XTB
from pysisyphus.cos.NEB import NEB
from pysisyphus.helpers import geom_loader
from pysisyphus.run import run_tsopt_from_cos
from pysisyphus.testing import using
@pytest.fixture
def cos(this_dir):
geoms = geom_loader(this_dir / "input.cycle_019.trj")
for i, geom in enumerate(geoms):
calc = XTB(calc_number=i)
geom.set_calculator(calc)
cos = NEB(geoms)
return cos
@using("xtb")
@pytest.mark.parametrize(
"coord_type",
[
"cart",
"dlc",
"redund",
],
)
def test_run_tsopt_from_cos(coord_type, cos):
calc_number = len(cos.images)
def calc_getter():
nonlocal calc_number
calc = XTB(calc_number=calc_number)
calc_number += 1
return calc
tsopt_key = "rsirfo"
tsopt_kwargs = {
"do_hess": True,
"hessian_recalc": 5,
"geom": {
"type": coord_type,
"coord_kwargs": {},
},
}
opt_result = run_tsopt_from_cos(cos, tsopt_key, tsopt_kwargs, calc_getter)
assert opt_result.geom.energy == pytest.approx(-11.44519302)
@using("xtb")
def test_run_tsopt_from_cos_dimer(cos):
opt_result = run_tsopt_from_cos(
cos,
tsopt_key="dimer",
tsopt_kwargs={
"geom": {
"type": "cart",
}
},
calc_getter=XTB,
)
assert opt_result.geom.energy == pytest.approx(-11.44519302, abs=2e-5)
|
eljost/pysisyphus
|
tests/test_run_tsopt_from_cos/test_run_tsopt_from_cos.py
|
Python
|
gpl-3.0
| 1,486
|
[
"xTB"
] |
1ea882bcc63d65dd71265631fce8a10d687722a37c29b713fea9662b92bb7bec
|
"""
@name: Modules/Computer/Nodes/_test/test_nodes_sync.py
@author: D. Brian Kimmel
@contact: d.briankimmel@gmail.com
@copyright: 2016-2020 by D. Brian Kimmel
@date: Created on Jun 2, 2016
@licencse: MIT License
@summary:
Passed all 8 tests - DBK - 2019-01-19
"""
__updated__ = '2020-01-24'
# Import system type stuff
from twisted.trial import unittest
# Import PyMh files and modules.
from _test.testing_mixin import SetupPyHouseObj
from Modules.Core.data_objects import NodeInterfaceData
from Modules.Computer.Nodes import NodeInformation
from Modules.Computer.Nodes.node_sync import Util
from Modules.Core.Utilities import json_tools
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
class SetupMixin(object):
def setUp(self):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj()
class A0(unittest.TestCase):
def test_00_Print(self):
_x = PrettyFormatAny.form('_test', 'title') # so it is defined when printing is cleaned up.
print('Id: test_nodes_sync')
class C1_Util(SetupMixin, unittest.TestCase):
"""
This section tests the setup of the _test
"""
def setUp(self):
SetupMixin.setUp(self)
self.m_interface_obj = NodeInterfaceData()
self.m_node_obj = NodeInformation()
def test_01_Who(self):
# Util.send_who_is_there(self.m_pyhouse_obj)
pass
def test_02_Who(self):
l_node = nodesXml._read_one_node_xml(self.m_xml.node)
# print(PrettyFormatAny.form(l_node, 'C1-01-A - Node'))
l_json = json_tools.encode_json(l_node)
# print(PrettyFormatAny.form(l_json, 'C1-01-B - PyHouse'))
l_msg = json_tools.decode_json_unicode(l_json)
Util.add_node(self.m_pyhouse_obj, l_msg)
# print(PrettyFormatAny.form(self.m_pyhouse_obj.Computer.Nodes, 'C1-01-C - PyHouse'))
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Nodes/_test/test_nodes_sync.py
|
Python
|
mit
| 1,875
|
[
"Brian"
] |
b70327163820bd3ae84566fdc14326a9898715e522fda631c042e41c717ecefe
|
# CamJam Edukit 1 - Basics
# Worksheet 7 - Traffic Lights - Solution
# Import Libraries
import os
import time
import RPi.GPIO as GPIO
# Set the GPIO pin naming mode
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set up variables for the LED, Buzzer and switch pins
PinGreen = 24
PinAmber = 23
PinRed = 18
PinBuzzer = 22
PinButton = 25
# PinGreenPedestrian = 17
# PinRedPedestrian = 27
# Set up each of the input (switch) and output (LEDs, Buzzer) pins
GPIO.setup(PinGreen, GPIO.OUT)
GPIO.setup(PinAmber, GPIO.OUT)
GPIO.setup(PinRed, GPIO.OUT)
GPIO.setup(PinBuzzer, GPIO.OUT)
GPIO.setup(PinButton, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# GPIO.setup(PinGreenPedestrian, GPIO.OUT)
# GPIO.setup(PinRedPedestrian, GPIO.OUT)
GPIO.output(PinGreen, GPIO.LOW)
GPIO.output(PinAmber, GPIO.LOW)
GPIO.output(PinRed, GPIO.LOW)
GPIO.output(PinBuzzer, GPIO.LOW)
# GPIO.output(PinGreenPedestrian, GPIO.LOW)
# GPIO.output(PinRedPedestrian, GPIO.LOW)
# Define a function for the initial state (Green LED on, rest off)
# (If you have the second 'pedestrian LEDs, turn the red on & green
# off)
def startgreen():
# Remember all code in the function is indented
GPIO.output(PinGreen, GPIO.HIGH)
# GPIO.output(PinRedPedestrian, GPIO.HIGH)
# Turn the green off and the amber on for 3 seconds
# ('Pedestrian' red LED stays lit)
def steadyamber():
# Remember all code in the function is indented
GPIO.output(PinGreen, GPIO.LOW)
GPIO.output(PinAmber, GPIO.HIGH)
time.sleep(3)
# Turn the amber off, and then the red on for 1 second
def steadyred():
# Remember all code in the function is indented
GPIO.output(PinAmber, GPIO.LOW)
GPIO.output(PinRed, GPIO.HIGH)
time.sleep(1)
# Sound the buzzer for 4 seconds
# (If you have the 'pedestrian' LEDs, turn the red off and green on)
def startwalking():
# Make the buzzer buzz on and off, half a second of
# sound followed by half a second of silence
# GPIO.output(PinRedPedestrian, GPIO.LOW)
# GPIO.output(PinGreenPedestrian, GPIO.HIGH)
iCount = 1
while iCount <= 4:
GPIO.output(PinBuzzer, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(PinBuzzer, GPIO.LOW)
time.sleep(0.5)
iCount += 1
# Turn the buzzer off and wait for 2 seconds
# (If you have a second green 'pedestrian' LED, make it flash on and
# off for the two seconds)
def dontwalk():
# Remember all code in the function is indented
GPIO.output(PinBuzzer, GPIO.LOW)
iCount = 1
while iCount <= 2:
# GPIO.output(PinGreenPedestrian, GPIO.HIGH)
time.sleep(0.5)
# GPIO.output(PinGreenPedestrian, GPIO.LOW)
time.sleep(0.5)
iCount += 1
# Flash the amber on and off for 6 seconds
# (And the green 'pedestrian' LED too)
def flashingambergreen():
# Remember all code in the function is indented
GPIO.output(PinRed, GPIO.LOW)
iCount = 1
while iCount <= 6:
GPIO.output(PinAmber, GPIO.HIGH)
# GPIO.output(PinGreenPedestrian, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(PinAmber, GPIO.LOW)
# GPIO.output(PinGreenPedestrian, GPIO.LOW)
time.sleep(0.5)
iCount += 1
# Flash the amber for one more second
# (Turn the green 'pedestrian' LED off and the red on)
def flashingamber():
# Remember all code in the function is indented
# GPIO.output(PinRedPedestrian, GPIO.HIGH)
GPIO.output(PinAmber, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(PinAmber, GPIO.LOW)
time.sleep(0.5)
# Go through the traffic light sequence by calling each function
# one after the other.
def trafficlightsequence():
# Remember all code in the function is indented
# Green will already be on
steadyamber()
steadyred()
startwalking()
dontwalk()
flashingambergreen()
flashingamber()
startgreen()
os.system('clear') # Clears the terminal
print("Traffic Lights")
# Initialise the traffic lights
startgreen()
# Here is the loop that waits at lease 20 seconds before
# stopping the cars if the button has been pressed
while True: # Loop around forever
buttonnotpressed = True # Button has not been pressed
start = time.time() # Records the current time
while buttonnotpressed: # While the button has not been pressed
time.sleep(0.1) # Wait for 0.1s
if GPIO.input(PinButton) == False: # If the button is pressed
print("Button has been pressed")
now = time.time()
buttonnotpressed = False # Button has been pressed
if (now - start) <= 20: # If under 20 seconds
time.sleep(20 - (now - start)) # Wait until 20s is up
trafficlightsequence() # Run the traffic light sequence
|
CamJam-EduKit/EduKit1
|
CamJam Edukit 1 - RPI.GPIO/Code (RPI.GPIO)/7-TrafficLights-solution.py
|
Python
|
mit
| 4,708
|
[
"Amber"
] |
7d93f4d7ccef0f1fdf2688464443c2c9b4aadcaece1dbed29afa958a2b731c23
|
import matplotlib
matplotlib.use('Agg')
import cPickle
from matplotlib import pyplot as plt
import os
data_dir = '/Volumes/Storage/models'
img_save_dir = '/Volumes/Storage/weights'
models = [('clean', 'AE1110_Scale_Warp_Blocks_2049_500_tanh_tanh_gpu_sgd_clean_continue_20141110_1235_21624029'),
('gaussian', 'AE1110_Scale_Warp_Blocks_2049_500_tanh_tanh_gpu_sgd_gaussian_continue_20141110_1250_49502872'),
('maskout', 'AE1110_Scale_Warp_Blocks_2049_500_tanh_tanh_gpu_sgd_maskout_continue_20141110_1251_56190462'),
('blackout', 'AE1110_Scale_Warp_Blocks_2049_500_tanh_tanh_gpu_sgd_blackout_continue_20141110_1249_12963320'),
('batchout', 'AE1110_Scale_Warp_Blocks_2049_500_tanh_tanh_gpu_sgd_batchout_continue_20141111_0957_22484008')]
for name, md in models:
with open("{}/{}/model.pkl".format(data_dir, md)) as fin:
print name
model = cPickle.load(fin)
W = model.layers[0].W.get_value()
plt.hist(W.flatten(), 100, range=(W.min(), W.max()), fc='k', ec='k')
plt.xlabel('weights')
plt.ylabel('frequency of occurrence')
plt.title('Histogram of Weights')
plt.savefig('{}/{}_hist.pdf'.format(img_save_dir, name))
plt.close()
plt.imshow(W, vmin=-0.05, vmax=0.05)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.xlabel('bottleneck dim', fontsize=10)
plt.ylabel('input dim', fontsize=12)
plt.colorbar()
plt.savefig('{}/{}.pdf'.format(img_save_dir, name), bbox_inches='tight')
plt.close()
# for name, md in models:
# os.system('python sync_model.py --from_to helios home --model {}'.format(md))
|
hycis/Pynet
|
scripts/nii/show_weights.py
|
Python
|
apache-2.0
| 1,681
|
[
"Gaussian"
] |
2c5f8f60a0e350d37bd20384d544dce5c7b4525e597162367677322b590a8352
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import numpy as np
from pyscf.nao.m_fact import sgn, onedivsqrt4pi,rttwo
lmx = 7
l2lmhl = np.array( [0]+[np.sqrt((l-0.5)/l) for l in range(1,lmx+1) ])
l2tlm1 = np.array( [0]+[np.sqrt(2*l-1.0) for l in range(1,lmx+1) ])
l2tlp1 = np.array( [np.sqrt(2*l+1.0) for l in range(lmx+1) ])
lm2aa = np.zeros(((lmx+1)**2))
lm2bb = np.zeros(((lmx+1)**2))
for l in range(lmx+1):
for m, ind in zip(range(-l,l+1), range(l*(l+1)-l,l*(l+1)+l+1)):
lm2aa[ind] = np.sqrt(1.0*l**2-m**2)
lm2bb[ind] = 1.0/np.sqrt(1.0*(l+1)**2-m**2)
#
#
#
def rsphar(r,lmax,res):
"""
Computes (all) real spherical harmonics up to the angular momentum lmax
Args:
r : Cartesian coordinates defining correct theta and phi angles for spherical harmonic
lmax : Integer, maximal angular momentum
Result:
1-d numpy array of float64 elements with all spherical harmonics stored in order 0,0; 1,-1; 1,0; 1,+1 ... lmax,lmax, althogether 0 : (lmax+1)**2 elements.
"""
xxpyy = r[0]**2 + r[1]**2
dd=np.sqrt(xxpyy + r[2]**2)
if dd < 1e-10:
res[:] =0.0 # compatible with numba in contrary to fill method
res[0]=onedivsqrt4pi
return 0
if r[0]==0.0:
phi = 0.5*np.pi if r[1]<0.0 else -0.5*np.pi
else:
phi = np.arctan( r[1]/r[0] ) if r[0]>=0.0 else np.arctan( r[1]/r[0] )+np.pi
res[0]=onedivsqrt4pi
if lmax==0: return 0
ss=np.sqrt(xxpyy)/dd
cc=r[2]/dd
for l in range(1,lmax+1):
twol,l2 = l+l,l*l
il1,il2 = l2-1,l2+twol
res[il2]=-ss*l2lmhl[l]*res[il1]
res[il2-1]=cc*l2tlm1[l]*res[il1]
if lmax>=2:
for m in range(lmax-1):
if m<lmax:
for l in range(m+1,lmax):
ind=l*(l+1)+m
zz=(l+l+1)*cc*res[ind]-lm2aa[ind]*res[ind-l-l]
res[ind+l+l+2]=zz*lm2bb[ind]
for l in range(lmax+1):
ll2=l*(l+1)
res[ll2] = res[ll2]*l2tlp1[l]
for m in range(1,l+1):
cs,cc,P = np.sin(m*phi), np.cos(m*phi), res[ll2+m]*sgn[m]*l2tlp1[l]*rttwo
res[ll2+m]=cc*P
res[ll2-m]=cs*P
return 0
|
gkc1000/pyscf
|
pyscf/nao/m_rsphar.py
|
Python
|
apache-2.0
| 2,649
|
[
"PySCF"
] |
fcc4d8697622409a40b665cccb31982847d2e6bafa20fc8a4ac83d11ad6940f5
|
#!/usr/bin/env python
import logging
import apache_beam as beam
from apache_beam.io import ReadFromAvro
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.utils.pipeline_options import SetupOptions
from apache_beam.utils.pipeline_options import StandardOptions
from common.tableLoader import TableLoader
from common.configReader import ConfigReader
class IngestionToBigQuery(object):
def __init__(self, config, tables):
self.config = config
self.tables = tables
self.pipeline = self.__init_pipeline()
def __init_pipeline(self):
pipeline_args = self.config['pipeline_args']
options = PipelineOptions()
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = pipeline_args['project']
google_cloud_options.job_name = pipeline_args['job_name']
google_cloud_options.staging_location = pipeline_args['staging_location']
google_cloud_options.temp_location = pipeline_args['temp_location']
options.view_as(StandardOptions).runner = pipeline_args['runner']
options.view_as(SetupOptions).setup_file = pipeline_args['setup_file']
options.view_as(SetupOptions).save_main_session = True
return beam.Pipeline(options=options)
def __filter_columns(self, record, columns):
return {col: record.get(col, '') for col in columns}
def __run_ingestion(self, storage_input_path, columns, output_table):
(self.pipeline
| output_table + ': read table ' >> ReadFromAvro(storage_input_path)
| output_table + ': filter columns' >> beam.Map(self.__filter_columns, columns=columns)
| output_table + ': write to BigQuery' >> beam.Write(
beam.io.BigQuerySink(output_table,
create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)))
def ingest_table(self):
bucket_name = self.config['gs_bucket']
avro_prefix = self.config['final_avro_prefix']
avro_suffix = self.config['final_avro_suffix']
for table in self.tables:
path_format = ('gs://{gs_bucket}{final_avro_prefix}'
'{source_schema}.{table_name}{final_avro_suffix}')
gs_input_path = path_format.format(gs_bucket=bucket_name,
final_avro_prefix=avro_prefix,
final_avro_suffix=avro_suffix,
source_schema=table.source_schema,
table_name=table.table_name)
self.__run_ingestion(gs_input_path, table.columns, table.output_table)
self.pipeline.run().wait_until_finish() # block until pipeline completion
def main():
logging.getLogger().setLevel(logging.INFO)
config = ConfigReader('columbus-config') #TODO read from args
tables = TableLoader('experience')
ingestor = IngestionToBigQuery(config.configuration, tables.list_of_tables)
ingestor.ingest_table()
if __name__ == '__main__':
main()
|
chrisbangun/dataflow-appengine
|
recsys_data_pipeline/ingestion.py
|
Python
|
apache-2.0
| 3,269
|
[
"COLUMBUS"
] |
63b70f295a661963f8450d8b4f2d835fa6cfadc864bb5f7ead6e72058c70ea21
|
"""The MIT License (MIT)
Copyright (c) 2016-2017 Marcus Ottosson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Documentation
Map all bindings to PySide2
Project goals:
Qt.py was born in the film and visual effects industry to address
the growing need for the development of software capable of running
with more than one flavour of the Qt bindings for Python - PySide,
PySide2, PyQt4 and PyQt5.
1. Build for one, run with all
2. Explicit is better than implicit
3. Support co-existence
Default resolution order:
- PySide2
- PyQt5
- PySide
- PyQt4
Usage:
>> import sys
>> from Qt import QtWidgets
>> app = QtWidgets.QApplication(sys.argv)
>> button = QtWidgets.QPushButton("Hello World")
>> button.show()
>> app.exec_()
All members of PySide2 are mapped from other bindings, should they exist.
If no equivalent member exist, it is excluded from Qt.py and inaccessible.
The idea is to highlight members that exist across all supported binding,
and guarantee that code that runs on one binding runs on all others.
For more details, visit https://github.com/mottosso/Qt.py
"""
import os
import sys
import types
import shutil
__version__ = "1.0.0.b1"
# Enable support for `from Qt import *`
__all__ = [
"QtGui",
"QtCore",
"QtWidgets",
"QtNetwork",
"QtXml",
"QtHelp",
"QtCompat"
]
# Flags from environment variables
QT_VERBOSE = bool(os.getenv("QT_VERBOSE"))
QT_PREFERRED_BINDING = os.getenv("QT_PREFERRED_BINDING")
QT_STRICT = bool(os.getenv("QT_STRICT"))
# Supported submodules
QtGui = types.ModuleType("QtGui")
QtCore = types.ModuleType("QtCore")
QtWidgets = types.ModuleType("QtWidgets")
QtWidgets = types.ModuleType("QtWidgets")
QtNetwork = types.ModuleType("QtNetwork")
QtXml = types.ModuleType("QtXml")
QtHelp = types.ModuleType("QtHelp")
QtCompat = types.ModuleType("QtCompat")
Qt = sys.modules[__name__] # Reference to this module
# To use other modules, such as QtTest and QtScript,
# use conditional branching and import these explicitly.
def _pyside2():
from PySide2 import (
QtWidgets,
QtGui,
QtCore,
QtNetwork,
QtXml,
QtHelp,
QtUiTools,
__version__
)
Qt.__binding__ = "PySide2"
Qt.__qt_version__ = QtCore.qVersion()
Qt.__binding_version__ = __version__
QtCompat.load_ui = lambda fname: QtUiTools.QUiLoader().load(fname)
QtCompat.setSectionResizeMode = QtWidgets.QHeaderView.setSectionResizeMode
QtCompat.translate = QtCore.QCoreApplication.translate
return QtCore, QtGui, QtWidgets, QtNetwork, QtXml, QtHelp
def _pyside():
from PySide import (
QtGui,
QtCore,
QtNetwork,
QtXml,
QtHelp,
QtUiTools,
__version__
)
QtWidgets = QtGui
Qt.__binding__ = "PySide"
Qt.__qt_version__ = QtCore.qVersion()
Qt.__binding_version__ = __version__
QtCompat.load_ui = lambda fname: QtUiTools.QUiLoader().load(fname)
QtCompat.setSectionResizeMode = QtGui.QHeaderView.setResizeMode
QtCompat.translate = (
lambda context, sourceText, disambiguation, n:
QtCore.QCoreApplication.translate(context,
sourceText,
disambiguation,
QtCore.QCoreApplication.CodecForTr,
n))
return QtCore, QtGui, QtWidgets, QtNetwork, QtXml, QtHelp
def _pyqt5():
from PyQt5 import (
QtWidgets,
QtGui,
QtCore,
QtNetwork,
QtXml,
QtHelp,
uic
)
Qt.__binding__ = "PyQt5"
Qt.__qt_version__ = QtCore.QT_VERSION_STR
Qt.__binding_version__ = QtCore.PYQT_VERSION_STR
QtCompat.load_ui = lambda fname: uic.loadUi(fname)
QtCompat.translate = QtCore.QCoreApplication.translate
QtCompat.setSectionResizeMode = QtWidgets.QHeaderView.setSectionResizeMode
return QtCore, QtGui, QtWidgets, QtNetwork, QtXml, QtHelp
def _pyqt4():
import sip
try:
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
sip.setapi("QDate", 2)
sip.setapi("QDateTime", 2)
sip.setapi("QTextStream", 2)
sip.setapi("QTime", 2)
sip.setapi("QUrl", 2)
except AttributeError as e:
raise ImportError(str(e))
# PyQt4 < v4.6
except ValueError as e:
# API version already set to v1
raise ImportError(str(e))
from PyQt4 import (
QtGui,
QtCore,
QtNetwork,
QtXml,
QtHelp,
uic
)
QtWidgets = QtGui
Qt.__binding__ = "PyQt4"
Qt.__qt_version__ = QtCore.QT_VERSION_STR
Qt.__binding_version__ = QtCore.PYQT_VERSION_STR
QtCompat.load_ui = lambda fname: uic.loadUi(fname)
QtCompat.setSectionResizeMode = QtGui.QHeaderView.setResizeMode
# PySide2 differs from Qt4 in that Qt4 has one extra argument
# which is always `None`. The lambda arguments represents the PySide2
# interface, whereas the arguments passed to `.translate` represent
# those expected of a Qt4 binding.
QtCompat.translate = (
lambda context, sourceText, disambiguation, n:
QtCore.QCoreApplication.translate(context,
sourceText,
disambiguation,
QtCore.QCoreApplication.CodecForTr,
n))
return QtCore, QtGui, QtWidgets, QtNetwork, QtXml, QtHelp
def _none():
"""Internal option (used in installer)"""
Mock = type("Mock", (), {"__getattr__": lambda Qt, attr: None})
Qt.__binding__ = "None"
Qt.__qt_version__ = "0.0.0"
Qt.__binding_version__ = "0.0.0"
QtCompat.load_ui = lambda fname: None
QtCompat.setSectionResizeMode = lambda *args, **kwargs: None
return Mock(), Mock(), Mock(), Mock(), Mock(), Mock()
def _log(text):
if QT_VERBOSE:
sys.stdout.write(text + "\n")
def _convert(lines):
"""Convert compiled .ui file from PySide2 to Qt.py
Arguments:
lines (list): Each line of of .ui file
Usage:
>> with open("myui.py") as f:
.. lines = _convert(f.readlines())
"""
def parse(line):
line = line.replace("from PySide2 import", "from Qt import")
line = line.replace("QtWidgets.QApplication.translate",
"Qt.QtCompat.translate")
return line
parsed = list()
for line in lines:
line = parse(line)
parsed.append(line)
return parsed
def _cli(args):
"""Qt.py command-line interface"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--convert",
help="Path to compiled Python module, e.g. my_ui.py")
parser.add_argument("--compile",
help="Accept raw .ui file and compile with native "
"PySide2 compiler.")
parser.add_argument("--stdout",
help="Write to stdout instead of file",
action="store_true")
parser.add_argument("--stdin",
help="Read from stdin instead of file",
action="store_true")
args = parser.parse_args(args)
if args.stdout:
raise NotImplementedError("--stdout")
if args.stdin:
raise NotImplementedError("--stdin")
if args.compile:
raise NotImplementedError("--compile")
if args.convert:
sys.stdout.write("#\n"
"# WARNING: --convert is an ALPHA feature.\n#\n"
"# See https://github.com/mottosso/Qt.py/pull/132\n"
"# for details.\n"
"#\n")
#
# ------> Read
#
with open(args.convert) as f:
lines = _convert(f.readlines())
backup = "%s_backup%s" % os.path.splitext(args.convert)
sys.stdout.write("Creating \"%s\"..\n" % backup)
shutil.copy(args.convert, backup)
#
# <------ Write
#
with open(args.convert, "w") as f:
f.write("".join(lines))
sys.stdout.write("Successfully converted \"%s\"\n" % args.convert)
# Default order (customise order and content via QT_PREFERRED_BINDING)
_bindings = (_pyside2, _pyqt5, _pyside, _pyqt4)
if QT_PREFERRED_BINDING:
_preferred = QT_PREFERRED_BINDING.split(os.pathsep)
_available = {
"PySide2": _pyside2,
"PyQt5": _pyqt5,
"PySide": _pyside,
"PyQt4": _pyqt4,
"None": _none
}
try:
_bindings = [_available[binding] for binding in _preferred]
except KeyError:
raise ImportError(
("Requested %s, available: " % _preferred) +
"\n".join(_available.keys())
)
del(_preferred)
del(_available)
_log("Preferred bindings: %s" % list(_b.__name__ for _b in _bindings))
_found_binding = False
for _binding in _bindings:
_log("Trying %s" % _binding.__name__)
try:
_QtCore, _QtGui, _QtWidgets, _QtNetwork, _QtXml, _QtHelp = _binding()
_found_binding = True
break
except ImportError as e:
_log("ImportError: %s" % e)
continue
if not _found_binding:
# If not binding were found, throw this error
raise ImportError("No Qt binding were found.")
"""Members of Qt.py
This is where each member of Qt.py is explicitly defined.
It is based on a "lowest commond denominator" of all bindings;
including members found in each of the 4 bindings.
Find or add excluded members in build_membership.py
"""
_strict_members = {
"QtGui": [
"QAbstractTextDocumentLayout",
"QActionEvent",
"QBitmap",
"QBrush",
"QClipboard",
"QCloseEvent",
"QColor",
"QConicalGradient",
"QContextMenuEvent",
"QCursor",
"QDoubleValidator",
"QDrag",
"QDragEnterEvent",
"QDragLeaveEvent",
"QDragMoveEvent",
"QDropEvent",
"QFileOpenEvent",
"QFocusEvent",
"QFont",
"QFontDatabase",
"QFontInfo",
"QFontMetrics",
"QFontMetricsF",
"QGradient",
"QHelpEvent",
"QHideEvent",
"QHoverEvent",
"QIcon",
"QIconDragEvent",
"QIconEngine",
"QImage",
"QImageIOHandler",
"QImageReader",
"QImageWriter",
"QInputEvent",
"QInputMethodEvent",
"QIntValidator",
"QKeyEvent",
"QKeySequence",
"QLinearGradient",
"QMatrix2x2",
"QMatrix2x3",
"QMatrix2x4",
"QMatrix3x2",
"QMatrix3x3",
"QMatrix3x4",
"QMatrix4x2",
"QMatrix4x3",
"QMatrix4x4",
"QMouseEvent",
"QMoveEvent",
"QMovie",
"QPaintDevice",
"QPaintEngine",
"QPaintEngineState",
"QPaintEvent",
"QPainter",
"QPainterPath",
"QPainterPathStroker",
"QPalette",
"QPen",
"QPicture",
"QPictureIO",
"QPixmap",
"QPixmapCache",
"QPolygon",
"QPolygonF",
"QQuaternion",
"QRadialGradient",
"QRegExpValidator",
"QRegion",
"QResizeEvent",
"QSessionManager",
"QShortcutEvent",
"QShowEvent",
"QStandardItem",
"QStandardItemModel",
"QStatusTipEvent",
"QSyntaxHighlighter",
"QTabletEvent",
"QTextBlock",
"QTextBlockFormat",
"QTextBlockGroup",
"QTextBlockUserData",
"QTextCharFormat",
"QTextCursor",
"QTextDocument",
"QTextDocumentFragment",
"QTextFormat",
"QTextFragment",
"QTextFrame",
"QTextFrameFormat",
"QTextImageFormat",
"QTextInlineObject",
"QTextItem",
"QTextLayout",
"QTextLength",
"QTextLine",
"QTextList",
"QTextListFormat",
"QTextObject",
"QTextObjectInterface",
"QTextOption",
"QTextTable",
"QTextTableCell",
"QTextTableCellFormat",
"QTextTableFormat",
"QTransform",
"QValidator",
"QVector2D",
"QVector3D",
"QVector4D",
"QWhatsThisClickedEvent",
"QWheelEvent",
"QWindowStateChangeEvent",
"qAlpha",
"qBlue",
"qGray",
"qGreen",
"qIsGray",
"qRed",
"qRgb",
"qRgb",
],
"QtWidgets": [
"QAbstractButton",
"QAbstractGraphicsShapeItem",
"QAbstractItemDelegate",
"QAbstractItemView",
"QAbstractScrollArea",
"QAbstractSlider",
"QAbstractSpinBox",
"QAction",
"QActionGroup",
"QApplication",
"QBoxLayout",
"QButtonGroup",
"QCalendarWidget",
"QCheckBox",
"QColorDialog",
"QColumnView",
"QComboBox",
"QCommandLinkButton",
"QCommonStyle",
"QCompleter",
"QDataWidgetMapper",
"QDateEdit",
"QDateTimeEdit",
"QDesktopWidget",
"QDial",
"QDialog",
"QDialogButtonBox",
"QDirModel",
"QDockWidget",
"QDoubleSpinBox",
"QErrorMessage",
"QFileDialog",
"QFileIconProvider",
"QFileSystemModel",
"QFocusFrame",
"QFontComboBox",
"QFontDialog",
"QFormLayout",
"QFrame",
"QGesture",
"QGestureEvent",
"QGestureRecognizer",
"QGraphicsAnchor",
"QGraphicsAnchorLayout",
"QGraphicsBlurEffect",
"QGraphicsColorizeEffect",
"QGraphicsDropShadowEffect",
"QGraphicsEffect",
"QGraphicsEllipseItem",
"QGraphicsGridLayout",
"QGraphicsItem",
"QGraphicsItemGroup",
"QGraphicsLayout",
"QGraphicsLayoutItem",
"QGraphicsLineItem",
"QGraphicsLinearLayout",
"QGraphicsObject",
"QGraphicsOpacityEffect",
"QGraphicsPathItem",
"QGraphicsPixmapItem",
"QGraphicsPolygonItem",
"QGraphicsProxyWidget",
"QGraphicsRectItem",
"QGraphicsRotation",
"QGraphicsScale",
"QGraphicsScene",
"QGraphicsSceneContextMenuEvent",
"QGraphicsSceneDragDropEvent",
"QGraphicsSceneEvent",
"QGraphicsSceneHelpEvent",
"QGraphicsSceneHoverEvent",
"QGraphicsSceneMouseEvent",
"QGraphicsSceneMoveEvent",
"QGraphicsSceneResizeEvent",
"QGraphicsSceneWheelEvent",
"QGraphicsSimpleTextItem",
"QGraphicsTextItem",
"QGraphicsTransform",
"QGraphicsView",
"QGraphicsWidget",
"QGridLayout",
"QGroupBox",
"QHBoxLayout",
"QHeaderView",
"QInputDialog",
"QItemDelegate",
"QItemEditorCreatorBase",
"QItemEditorFactory",
"QKeyEventTransition",
"QLCDNumber",
"QLabel",
"QLayout",
"QLayoutItem",
"QLineEdit",
"QListView",
"QListWidget",
"QListWidgetItem",
"QMainWindow",
"QMdiArea",
"QMdiSubWindow",
"QMenu",
"QMenuBar",
"QMessageBox",
"QMouseEventTransition",
"QPanGesture",
"QPinchGesture",
"QPlainTextDocumentLayout",
"QPlainTextEdit",
"QProgressBar",
"QProgressDialog",
"QPushButton",
"QRadioButton",
"QRubberBand",
"QScrollArea",
"QScrollBar",
"QShortcut",
"QSizeGrip",
"QSizePolicy",
"QSlider",
"QSpacerItem",
"QSpinBox",
"QSplashScreen",
"QSplitter",
"QSplitterHandle",
"QStackedLayout",
"QStackedWidget",
"QStatusBar",
"QStyle",
"QStyleFactory",
"QStyleHintReturn",
"QStyleHintReturnMask",
"QStyleHintReturnVariant",
"QStyleOption",
"QStyleOptionButton",
"QStyleOptionComboBox",
"QStyleOptionComplex",
"QStyleOptionDockWidget",
"QStyleOptionFocusRect",
"QStyleOptionFrame",
"QStyleOptionGraphicsItem",
"QStyleOptionGroupBox",
"QStyleOptionHeader",
"QStyleOptionMenuItem",
"QStyleOptionProgressBar",
"QStyleOptionRubberBand",
"QStyleOptionSizeGrip",
"QStyleOptionSlider",
"QStyleOptionSpinBox",
"QStyleOptionTab",
"QStyleOptionTabBarBase",
"QStyleOptionTabWidgetFrame",
"QStyleOptionTitleBar",
"QStyleOptionToolBar",
"QStyleOptionToolBox",
"QStyleOptionToolButton",
"QStyleOptionViewItem",
"QStylePainter",
"QStyledItemDelegate",
"QSwipeGesture",
"QSystemTrayIcon",
"QTabBar",
"QTabWidget",
"QTableView",
"QTableWidget",
"QTableWidgetItem",
"QTableWidgetSelectionRange",
"QTapAndHoldGesture",
"QTapGesture",
"QTextBrowser",
"QTextEdit",
"QTimeEdit",
"QToolBar",
"QToolBox",
"QToolButton",
"QToolTip",
"QTreeView",
"QTreeWidget",
"QTreeWidgetItem",
"QTreeWidgetItemIterator",
"QUndoCommand",
"QUndoGroup",
"QUndoStack",
"QUndoView",
"QVBoxLayout",
"QWhatsThis",
"QWidget",
"QWidgetAction",
"QWidgetItem",
"QWizard",
"QWizardPage",
],
"QtCore": [
"QAbstractAnimation",
"QAbstractEventDispatcher",
"QAbstractItemModel",
"QAbstractListModel",
"QAbstractState",
"QAbstractTableModel",
"QAbstractTransition",
"QAnimationGroup",
"QBasicTimer",
"QBitArray",
"QBuffer",
"QByteArray",
"QByteArrayMatcher",
"QChildEvent",
"QCoreApplication",
"QCryptographicHash",
"QDataStream",
"QDate",
"QDateTime",
"QDir",
"QDirIterator",
"QDynamicPropertyChangeEvent",
"QEasingCurve",
"QElapsedTimer",
"QEvent",
"QEventLoop",
"QEventTransition",
"QFile",
"QFileInfo",
"QFileSystemWatcher",
"QFinalState",
"QGenericArgument",
"QGenericReturnArgument",
"QHistoryState",
"QIODevice",
"QLibraryInfo",
"QLine",
"QLineF",
"QLocale",
"QMargins",
"QMetaClassInfo",
"QMetaEnum",
"QMetaMethod",
"QMetaObject",
"QMetaProperty",
"QMimeData",
"QModelIndex",
"QMutex",
"QMutexLocker",
"QObject",
"QParallelAnimationGroup",
"QPauseAnimation",
"QPersistentModelIndex",
"QPluginLoader",
"QPoint",
"QPointF",
"QProcess",
"QProcessEnvironment",
"QPropertyAnimation",
"QReadLocker",
"QReadWriteLock",
"QRect",
"QRectF",
"QRegExp",
"QResource",
"QRunnable",
"QSemaphore",
"QSequentialAnimationGroup",
"QSettings",
"QSignalMapper",
"QSignalTransition",
"QSize",
"QSizeF",
"QSocketNotifier",
"QState",
"QStateMachine",
"QSysInfo",
"QSystemSemaphore",
"QTemporaryFile",
"QTextBoundaryFinder",
"QTextCodec",
"QTextDecoder",
"QTextEncoder",
"QTextStream",
"QTextStreamManipulator",
"QThread",
"QThreadPool",
"QTime",
"QTimeLine",
"QTimer",
"QTimerEvent",
"QTranslator",
"QUrl",
"QVariantAnimation",
"QWaitCondition",
"QWriteLocker",
"QXmlStreamAttribute",
"QXmlStreamAttributes",
"QXmlStreamEntityDeclaration",
"QXmlStreamEntityResolver",
"QXmlStreamNamespaceDeclaration",
"QXmlStreamNotationDeclaration",
"QXmlStreamReader",
"QXmlStreamWriter",
"Qt",
"QtCriticalMsg",
"QtDebugMsg",
"QtFatalMsg",
"QtMsgType",
"QtSystemMsg",
"QtWarningMsg",
"qAbs",
"qAddPostRoutine",
"qChecksum",
"qCritical",
"qDebug",
"qFatal",
"qFuzzyCompare",
"qIsFinite",
"qIsInf",
"qIsNaN",
"qIsNull",
"qRegisterResourceData",
"qUnregisterResourceData",
"qVersion",
"qWarning",
"qrand",
"qsrand",
],
"QtXml": [
"QDomAttr",
"QDomCDATASection",
"QDomCharacterData",
"QDomComment",
"QDomDocument",
"QDomDocumentFragment",
"QDomDocumentType",
"QDomElement",
"QDomEntity",
"QDomEntityReference",
"QDomImplementation",
"QDomNamedNodeMap",
"QDomNode",
"QDomNodeList",
"QDomNotation",
"QDomProcessingInstruction",
"QDomText",
"QXmlAttributes",
"QXmlContentHandler",
"QXmlDTDHandler",
"QXmlDeclHandler",
"QXmlDefaultHandler",
"QXmlEntityResolver",
"QXmlErrorHandler",
"QXmlInputSource",
"QXmlLexicalHandler",
"QXmlLocator",
"QXmlNamespaceSupport",
"QXmlParseException",
"QXmlReader",
"QXmlSimpleReader"
],
"QtHelp": [
"QHelpContentItem",
"QHelpContentModel",
"QHelpContentWidget",
"QHelpEngine",
"QHelpEngineCore",
"QHelpIndexModel",
"QHelpIndexWidget",
"QHelpSearchEngine",
"QHelpSearchQuery",
"QHelpSearchQueryWidget",
"QHelpSearchResultWidget"
],
"QtNetwork": [
"QAbstractNetworkCache",
"QAbstractSocket",
"QAuthenticator",
"QHostAddress",
"QHostInfo",
"QLocalServer",
"QLocalSocket",
"QNetworkAccessManager",
"QNetworkAddressEntry",
"QNetworkCacheMetaData",
"QNetworkConfiguration",
"QNetworkConfigurationManager",
"QNetworkCookie",
"QNetworkCookieJar",
"QNetworkDiskCache",
"QNetworkInterface",
"QNetworkProxy",
"QNetworkProxyFactory",
"QNetworkProxyQuery",
"QNetworkReply",
"QNetworkRequest",
"QNetworkSession",
"QSsl",
"QTcpServer",
"QTcpSocket",
"QUdpSocket"
]
}
"""Augment QtCompat
QtCompat contains wrappers and added functionality
to the original bindings, such as the CLI interface
and otherwise incompatible members between bindings,
such as `QHeaderView.setSectionResizeMode`.
"""
QtCompat._cli = _cli
QtCompat._convert = _convert
"""Apply strict mode
This make Qt.py into a subset of PySide2 members that exist
across all other bindings.
"""
for module, members in _strict_members.items():
for member in members:
orig = getattr(sys.modules[__name__], "_%s" % module)
repl = getattr(sys.modules[__name__], module)
setattr(repl, member, getattr(orig, member))
# Enable direct import of submodules
# E.g. import Qt.QtCore
sys.modules.update({
__name__ + ".QtGui": QtGui,
__name__ + ".QtCore": QtCore,
__name__ + ".QtWidgets": QtWidgets,
__name__ + ".QtXml": QtXml,
__name__ + ".QtNetwork": QtNetwork,
__name__ + ".QtHelp": QtHelp,
__name__ + ".QtCompat": QtCompat,
})
"""
Special case
In some bindings, members are either misplaced or renamed.
TODO: This is difficult to read, compared to the above dictionary.
Find a better way of implementing this, that also simplifies
adding or removing members.
"""
if "PySide2" == Qt.__binding__:
QtCore.QAbstractProxyModel = _QtCore.QAbstractProxyModel
QtCore.QSortFilterProxyModel = _QtCore.QSortFilterProxyModel
QtCore.QStringListModel = _QtGui.QStringListModel
QtCore.QItemSelection = _QtCore.QItemSelection
QtCore.QItemSelectionModel = _QtCore.QItemSelectionModel
if "PyQt5" == Qt.__binding__:
QtCore.QAbstractProxyModel = _QtCore.QAbstractProxyModel
QtCore.QSortFilterProxyModel = _QtCore.QSortFilterProxyModel
QtCore.QStringListModel = _QtCore.QStringListModel
QtCore.QItemSelection = _QtCore.QItemSelection
QtCore.QItemSelectionModel = _QtCore.QItemSelectionModel
if "PySide" == Qt.__binding__:
QtCore.QAbstractProxyModel = _QtGui.QAbstractProxyModel
QtCore.QSortFilterProxyModel = _QtGui.QSortFilterProxyModel
QtCore.QStringListModel = _QtGui.QStringListModel
QtCore.QItemSelection = _QtGui.QItemSelection
QtCore.QItemSelectionModel = _QtGui.QItemSelectionModel
if "PyQt4" == Qt.__binding__:
QtCore.QAbstractProxyModel = _QtGui.QAbstractProxyModel
QtCore.QSortFilterProxyModel = _QtGui.QSortFilterProxyModel
QtCore.QItemSelection = _QtGui.QItemSelection
QtCore.QStringListModel = _QtGui.QStringListModel
QtCore.QItemSelectionModel = _QtGui.QItemSelectionModel
if "PyQt" in Qt.__binding__:
QtCore.Property = _QtCore.pyqtProperty
QtCore.Signal = _QtCore.pyqtSignal
QtCore.Slot = _QtCore.pyqtSlot
else:
QtCore.Property = _QtCore.Property
QtCore.Signal = _QtCore.Signal
QtCore.Slot = _QtCore.Slot
# Hide internal members from external use.
del(_QtCore)
del(_QtGui)
del(_QtWidgets)
del(_bindings)
del(_binding)
del(_found_binding)
# Enable command-line interface
if __name__ == "__main__":
_cli(sys.argv[1:])
|
alijafargholi/prman_rfmPrimVarTool
|
src/gui/Qt.py
|
Python
|
gpl-3.0
| 26,880
|
[
"VisIt"
] |
8273a1074fd21481b5bf6339c3adaa99909172dcdc30724b18ace0846430a146
|
'''This example uses a convolutional stack followed by a recurrent stack
and a CTC logloss function to perform optical character recognition
of generated text images. I have no evidence of whether it actually
learns general shapes of text, or just is able to recognize all
the different fonts thrown at it...the purpose is more to demonstrate CTC
inside of Keras. Note that the font list may need to be updated
for the particular OS in use.
This starts off with 4 letter words. For the first 12 epochs, the
difficulty is gradually increased using the TextImageGenerator class
which is both a generator class for test/train data and a Keras
callback class. After 20 epochs, longer sequences are thrown at it
by recompiling the model to handle a wider image and rebuilding
the word list to include two words separated by a space.
The table below shows normalized edit distance values. Theano uses
a slightly different CTC implementation, hence the different results.
Norm. ED
Epoch | TF | TH
------------------------
10 0.027 0.064
15 0.038 0.035
20 0.043 0.045
25 0.014 0.019
This requires cairo and editdistance packages:
pip install cairocffi
pip install editdistance
Created by Mike Henry
https://github.com/mbhenry/
'''
import os
import itertools
import re
import datetime
import cairocffi as cairo
import editdistance
import numpy as np
from scipy import ndimage
import pylab
from keras import backend as K
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Activation
from keras.layers import Reshape, Lambda
from keras.layers.merge import add, concatenate
from keras.models import Model
from keras.layers.recurrent import GRU
from keras.optimizers import SGD
from keras.utils.data_utils import get_file
from keras.preprocessing import image
import keras.callbacks
OUTPUT_DIR = 'image_ocr'
np.random.seed(55)
# this creates larger "blotches" of noise which look
# more realistic than just adding gaussian noise
# assumes greyscale with pixels ranging from 0 to 1
def speckle(img):
severity = np.random.uniform(0, 0.6)
blur = ndimage.gaussian_filter(np.random.randn(*img.shape) * severity, 1)
img_speck = (img + blur)
img_speck[img_speck > 1] = 1
img_speck[img_speck <= 0] = 0
return img_speck
# paints the string in a random location the bounding box
# also uses a random font, a slight random rotation,
# and a random amount of speckle noise
def paint_text(text, w, h, rotate=False, ud=False, multi_fonts=False):
surface = cairo.ImageSurface(cairo.FORMAT_RGB24, w, h)
with cairo.Context(surface) as context:
context.set_source_rgb(1, 1, 1) # White
context.paint()
# this font list works in Centos 7
if multi_fonts:
fonts = ['Century Schoolbook', 'Courier', 'STIX', 'URW Chancery L', 'FreeMono']
context.select_font_face(np.random.choice(fonts), cairo.FONT_SLANT_NORMAL,
np.random.choice([cairo.FONT_WEIGHT_BOLD, cairo.FONT_WEIGHT_NORMAL]))
else:
context.select_font_face('Courier', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD)
context.set_font_size(25)
box = context.text_extents(text)
border_w_h = (4, 4)
if box[2] > (w - 2 * border_w_h[1]) or box[3] > (h - 2 * border_w_h[0]):
raise IOError('Could not fit string into image. Max char count is too large for given image width.')
# teach the RNN translational invariance by
# fitting text box randomly on canvas, with some room to rotate
max_shift_x = w - box[2] - border_w_h[0]
max_shift_y = h - box[3] - border_w_h[1]
top_left_x = np.random.randint(0, int(max_shift_x))
if ud:
top_left_y = np.random.randint(0, int(max_shift_y))
else:
top_left_y = h // 2
context.move_to(top_left_x - int(box[0]), top_left_y - int(box[1]))
context.set_source_rgb(0, 0, 0)
context.show_text(text)
buf = surface.get_data()
a = np.frombuffer(buf, np.uint8)
a.shape = (h, w, 4)
a = a[:, :, 0] # grab single channel
a = a.astype(np.float32) / 255
a = np.expand_dims(a, 0)
if rotate:
a = image.random_rotation(a, 3 * (w - top_left_x) / w + 1)
a = speckle(a)
return a
def shuffle_mats_or_lists(matrix_list, stop_ind=None):
ret = []
assert all([len(i) == len(matrix_list[0]) for i in matrix_list])
len_val = len(matrix_list[0])
if stop_ind is None:
stop_ind = len_val
assert stop_ind <= len_val
a = list(range(stop_ind))
np.random.shuffle(a)
a += list(range(stop_ind, len_val))
for mat in matrix_list:
if isinstance(mat, np.ndarray):
ret.append(mat[a])
elif isinstance(mat, list):
ret.append([mat[i] for i in a])
else:
raise TypeError('shuffle_mats_or_lists only supports '
'numpy.array and list objects')
return ret
def text_to_labels(text, num_classes):
ret = []
for char in text:
if char >= 'a' and char <= 'z':
ret.append(ord(char) - ord('a'))
elif char == ' ':
ret.append(26)
return ret
# only a-z and space..probably not to difficult
# to expand to uppercase and symbols
def is_valid_str(in_str):
search = re.compile(r'[^a-z\ ]').search
return not bool(search(in_str))
# Uses generator functions to supply train/test with
# data. Image renderings are text are created on the fly
# each time with random perturbations
class TextImageGenerator(keras.callbacks.Callback):
def __init__(self, monogram_file, bigram_file, minibatch_size,
img_w, img_h, downsample_factor, val_split,
absolute_max_string_len=16):
self.minibatch_size = minibatch_size
self.img_w = img_w
self.img_h = img_h
self.monogram_file = monogram_file
self.bigram_file = bigram_file
self.downsample_factor = downsample_factor
self.val_split = val_split
self.blank_label = self.get_output_size() - 1
self.absolute_max_string_len = absolute_max_string_len
def get_output_size(self):
return 28
# num_words can be independent of the epoch size due to the use of generators
# as max_string_len grows, num_words can grow
def build_word_list(self, num_words, max_string_len=None, mono_fraction=0.5):
assert max_string_len <= self.absolute_max_string_len
assert num_words % self.minibatch_size == 0
assert (self.val_split * num_words) % self.minibatch_size == 0
self.num_words = num_words
self.string_list = [''] * self.num_words
tmp_string_list = []
self.max_string_len = max_string_len
self.Y_data = np.ones([self.num_words, self.absolute_max_string_len]) * -1
self.X_text = []
self.Y_len = [0] * self.num_words
# monogram file is sorted by frequency in english speech
with open(self.monogram_file, 'rt') as f:
for line in f:
if len(tmp_string_list) == int(self.num_words * mono_fraction):
break
word = line.rstrip()
if max_string_len == -1 or max_string_len is None or len(word) <= max_string_len:
tmp_string_list.append(word)
# bigram file contains common word pairings in english speech
with open(self.bigram_file, 'rt') as f:
lines = f.readlines()
for line in lines:
if len(tmp_string_list) == self.num_words:
break
columns = line.lower().split()
word = columns[0] + ' ' + columns[1]
if is_valid_str(word) and \
(max_string_len == -1 or max_string_len is None or len(word) <= max_string_len):
tmp_string_list.append(word)
if len(tmp_string_list) != self.num_words:
raise IOError('Could not pull enough words from supplied monogram and bigram files. ')
# interlace to mix up the easy and hard words
self.string_list[::2] = tmp_string_list[:self.num_words // 2]
self.string_list[1::2] = tmp_string_list[self.num_words // 2:]
for i, word in enumerate(self.string_list):
self.Y_len[i] = len(word)
self.Y_data[i, 0:len(word)] = text_to_labels(word, self.get_output_size())
self.X_text.append(word)
self.Y_len = np.expand_dims(np.array(self.Y_len), 1)
self.cur_val_index = self.val_split
self.cur_train_index = 0
# each time an image is requested from train/val/test, a new random
# painting of the text is performed
def get_batch(self, index, size, train):
# width and height are backwards from typical Keras convention
# because width is the time dimension when it gets fed into the RNN
if K.image_data_format() == 'channels_first':
X_data = np.ones([size, 1, self.img_w, self.img_h])
else:
X_data = np.ones([size, self.img_w, self.img_h, 1])
labels = np.ones([size, self.absolute_max_string_len])
input_length = np.zeros([size, 1])
label_length = np.zeros([size, 1])
source_str = []
for i in range(0, size):
# Mix in some blank inputs. This seems to be important for
# achieving translational invariance
if train and i > size - 4:
if K.image_data_format() == 'channels_first':
X_data[i, 0, 0:self.img_w, :] = self.paint_func('')[0, :, :].T
else:
X_data[i, 0:self.img_w, :, 0] = self.paint_func('',)[0, :, :].T
labels[i, 0] = self.blank_label
input_length[i] = self.img_w // self.downsample_factor - 2
label_length[i] = 1
source_str.append('')
else:
if K.image_data_format() == 'channels_first':
X_data[i, 0, 0:self.img_w, :] = self.paint_func(self.X_text[index + i])[0, :, :].T
else:
X_data[i, 0:self.img_w, :, 0] = self.paint_func(self.X_text[index + i])[0, :, :].T
labels[i, :] = self.Y_data[index + i]
input_length[i] = self.img_w // self.downsample_factor - 2
label_length[i] = self.Y_len[index + i]
source_str.append(self.X_text[index + i])
inputs = {'the_input': X_data,
'the_labels': labels,
'input_length': input_length,
'label_length': label_length,
'source_str': source_str # used for visualization only
}
outputs = {'ctc': np.zeros([size])} # dummy data for dummy loss function
return (inputs, outputs)
def next_train(self):
while 1:
ret = self.get_batch(self.cur_train_index, self.minibatch_size, train=True)
self.cur_train_index += self.minibatch_size
if self.cur_train_index >= self.val_split:
self.cur_train_index = self.cur_train_index % 32
(self.X_text, self.Y_data, self.Y_len) = shuffle_mats_or_lists(
[self.X_text, self.Y_data, self.Y_len], self.val_split)
yield ret
def next_val(self):
while 1:
ret = self.get_batch(self.cur_val_index, self.minibatch_size, train=False)
self.cur_val_index += self.minibatch_size
if self.cur_val_index >= self.num_words:
self.cur_val_index = self.val_split + self.cur_val_index % 32
yield ret
def on_train_begin(self, logs={}):
self.build_word_list(16000, 4, 1)
self.paint_func = lambda text: paint_text(text, self.img_w, self.img_h,
rotate=False, ud=False, multi_fonts=False)
def on_epoch_begin(self, epoch, logs={}):
# rebind the paint function to implement curriculum learning
if epoch >= 3 and epoch < 6:
self.paint_func = lambda text: paint_text(text, self.img_w, self.img_h,
rotate=False, ud=True, multi_fonts=False)
elif epoch >= 6 and epoch < 9:
self.paint_func = lambda text: paint_text(text, self.img_w, self.img_h,
rotate=False, ud=True, multi_fonts=True)
elif epoch >= 9:
self.paint_func = lambda text: paint_text(text, self.img_w, self.img_h,
rotate=True, ud=True, multi_fonts=True)
if epoch >= 21 and self.max_string_len < 12:
self.build_word_list(32000, 12, 0.5)
# the actual loss calc occurs here despite it not being
# an internal Keras loss function
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
# the 2 is critical here since the first couple outputs of the RNN
# tend to be garbage:
y_pred = y_pred[:, 2:, :]
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
# For a real OCR application, this should be beam search with a dictionary
# and language model. For this example, best path is sufficient.
def decode_batch(test_func, word_batch):
out = test_func([word_batch])[0]
ret = []
for j in range(out.shape[0]):
out_best = list(np.argmax(out[j, 2:], 1))
out_best = [k for k, g in itertools.groupby(out_best)]
# 26 is space, 27 is CTC blank char
outstr = ''
for c in out_best:
if c >= 0 and c < 26:
outstr += chr(c + ord('a'))
elif c == 26:
outstr += ' '
ret.append(outstr)
return ret
class VizCallback(keras.callbacks.Callback):
def __init__(self, run_name, test_func, text_img_gen, num_display_words=6):
self.test_func = test_func
self.output_dir = os.path.join(
OUTPUT_DIR, run_name)
self.text_img_gen = text_img_gen
self.num_display_words = num_display_words
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def show_edit_distance(self, num):
num_left = num
mean_norm_ed = 0.0
mean_ed = 0.0
while num_left > 0:
word_batch = next(self.text_img_gen)[0]
num_proc = min(word_batch['the_input'].shape[0], num_left)
decoded_res = decode_batch(self.test_func, word_batch['the_input'][0:num_proc])
for j in range(0, num_proc):
edit_dist = editdistance.eval(decoded_res[j], word_batch['source_str'][j])
mean_ed += float(edit_dist)
mean_norm_ed += float(edit_dist) / len(word_batch['source_str'][j])
num_left -= num_proc
mean_norm_ed = mean_norm_ed / num
mean_ed = mean_ed / num
print('\nOut of %d samples: Mean edit distance: %.3f Mean normalized edit distance: %0.3f'
% (num, mean_ed, mean_norm_ed))
def on_epoch_end(self, epoch, logs={}):
self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))
self.show_edit_distance(256)
word_batch = next(self.text_img_gen)[0]
res = decode_batch(self.test_func, word_batch['the_input'][0:self.num_display_words])
if word_batch['the_input'][0].shape[0] < 256:
cols = 2
else:
cols = 1
for i in range(self.num_display_words):
pylab.subplot(self.num_display_words // cols, cols, i + 1)
if K.image_data_format() == 'channels_first':
the_input = word_batch['the_input'][i, 0, :, :]
else:
the_input = word_batch['the_input'][i, :, :, 0]
pylab.imshow(the_input.T, cmap='Greys_r')
pylab.xlabel('Truth = \'%s\'\nDecoded = \'%s\'' % (word_batch['source_str'][i], res[i]))
fig = pylab.gcf()
fig.set_size_inches(10, 13)
pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))
pylab.close()
def train(run_name, start_epoch, stop_epoch, img_w):
# Input Parameters
img_h = 64
words_per_epoch = 16000
val_split = 0.2
val_words = int(words_per_epoch * (val_split))
# Network parameters
conv_filters = 16
kernel_size = (3, 3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
if K.image_data_format() == 'channels_first':
input_shape = (1, img_w, img_h)
else:
input_shape = (img_w, img_h, 1)
fdir = os.path.dirname(get_file('wordlists.tgz',
origin='http://www.mythic-ai.com/datasets/wordlists.tgz', untar=True))
img_gen = TextImageGenerator(monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
minibatch_size=32,
img_w=img_w,
img_h=img_h,
downsample_factor=(pool_size ** 2),
val_split=words_per_epoch - val_words
)
act = 'relu'
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
# cuts down input size going into RNN:
inner = Dense(time_dense_size, activation=act, name='dense1')(inner)
# Two layers of bidirecitonal GRUs
# GRU seems to work as well, if not better than LSTM:
gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
# transforms RNN output to character activations:
inner = Dense(img_gen.get_output_size(), kernel_initializer='he_normal',
name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labels = Input(name='the_labels', shape=[img_gen.absolute_max_string_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
# clipnorm seems to speeds up convergence
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
if start_epoch > 0:
weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
model.load_weights(weight_file)
# captures output of softmax so we can decode the output during visualization
test_func = K.function([input_data], [y_pred])
viz_cb = VizCallback(run_name, test_func, img_gen.next_val())
model.fit_generator(generator=img_gen.next_train(), steps_per_epoch=(words_per_epoch - val_words),
epochs=stop_epoch, validation_data=img_gen.next_val(), validation_steps=val_words,
callbacks=[viz_cb, img_gen], initial_epoch=start_epoch)
if __name__ == '__main__':
run_name = datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S')
train(run_name, 0, 20, 128)
# increase to wider images and start at epoch 20. The learned weights are reloaded
train(run_name, 20, 25, 512)
|
baojianzhou/DLReadingGroup
|
keras/examples/image_ocr.py
|
Python
|
apache-2.0
| 20,934
|
[
"Gaussian"
] |
0b116d3d718b819303e0b390bcade2117a5d87de5f1cd6e147cb9bda98cd9a1e
|
#! /usr/bin/env python
from MDAnalysis import *
#from MDAnalysis.analysis.align import *
import numpy
import math
import sys
my_traj = sys.argv[1]
end = my_traj.find('.pdb')
u = Universe("init.pdb",my_traj)
v = Universe("init.pdb")
# linker 1
a1 = u.selectAtoms("segid J and resid 123")
b1 = u.selectAtoms("segid N and resid 1")
# linker 2
a2 = u.selectAtoms("segid K and resid 1")
b2 = u.selectAtoms("segid M and resid 123")
fout_dist = my_traj[0:end] + '_linker_dist.dat'
f = open(fout_dist,'w')
for ts in u.trajectory:
distance1 = numpy.linalg.norm(a1.centerOfMass() - b1.centerOfMass())
distance2 = numpy.linalg.norm(a2.centerOfMass() - b2.centerOfMass())
f.write('%7.3f %7.3f\n' % (distance1,distance2))
f.close()
|
demharters/git_scripts
|
dist_DA10_linker.py
|
Python
|
apache-2.0
| 750
|
[
"MDAnalysis"
] |
3bfe4e1236d5771828b6f8e34c18bb62d2f12098254c461fa02355940e514dec
|
#!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Run on a single gbs file to get a periodic table printout or on two to compare gbs contents."""
from __future__ import print_function
import os
import sys
import subprocess
qcdb_module = os.path.normpath(os.path.dirname(os.path.abspath(__file__)) + '../../../../../driver')
sys.path.append(qcdb_module)
import qcdb
from qcdb.libmintsbasissetparser import Gaussian94BasisSetParser
import qcelemental as qcel
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def bas_sanitize(fl):
if fl[-4] == '.gbs':
fl = fl[:-4]
return fl.lower().replace('+', 'p').replace('*', 's').replace('(', '_').replace(')', '_').replace(',', '_')
parser = Gaussian94BasisSetParser()
elements = qcel.periodictable.E
os.system("echo '#differing basis sets' > basisdunningdiffer.txt")
with open(sys.argv[1], 'r') as basfile:
bascontents = basfile.readlines()
bname = bas_sanitize(sys.argv[1])
isdiff = False
if len(sys.argv) > 2:
isdiff = True
with open(sys.argv[2], 'r') as reffile:
refcontents = reffile.readlines()
rname = bas_sanitize(os.path.basename(sys.argv[2]))
if isdiff:
if bname != rname:
print('%s / %s' % (bname, rname), end='')
else:
print('%-40s' % (bname), end='')
else:
print('%-40s' % (bname), end='')
anychange = False
forbiddenchange = False
postKr = False
for el in elements:
if el.upper() == "RB":
postKr = True
shells, msg, ecp_shells, ecp_msg, ecp_ncore = parser.parse(el.upper(), bascontents)
if isdiff:
rshells, rmsg, recp_shells, recp_msg, recp_ncore = parser.parse(el.upper(), refcontents)
if not shells and not rshells:
print('%s' % ('' if postKr else ' '), end='')
continue
if shells and not rshells:
print(bcolors.OKBLUE + '{:3}'.format(el.upper()) + bcolors.ENDC, end='')
anychange = True
if not shells and rshells:
print(bcolors.FAIL + '{:3}'.format(el.upper()) + bcolors.ENDC, end='')
anychange = True
forbiddenchange = True
if shells and rshells:
mol = qcdb.Molecule("""\n{}\n""".format(el))
mol.update_geometry()
mol.set_basis_all_atoms(bname, role='BASIS')
bdict = {bname: ''.join(bascontents)}
rdict = {bname: ''.join(refcontents)}
bs, msg, ecp = qcdb.BasisSet.construct(parser, mol, 'BASIS', None, bdict, False)
rbs, rmsg, recp = qcdb.BasisSet.construct(parser, mol, 'BASIS', None, rdict, False)
#if bs.allclose(rbs, verbose=2): # see changed coeff/exp
if bs.allclose(rbs): # one line per BS
print('{:3}'.format(el.lower()), end='')
else:
print(bcolors.WARNING + '{:3}'.format(el.upper()) + bcolors.ENDC, end='')
anychange = True
tbs = bs.print_detail(out='tmpB.txt')
rtbs = rbs.print_detail(out='tmpR.txt')
try:
outdiff = subprocess.check_output("diff -bwy -W 180 tmpB.txt tmpR.txt >> basisdunningdiffer.txt", shell=True)
#outdiff = subprocess.check_output("diff -bw --context=1 tmpB.txt tmpR.txt >> basisdunningdiffer.txt", shell=True)
except subprocess.CalledProcessError:
pass
else:
if not shells:
print('%s' % ('' if postKr else ' '), end='')
else:
print('{:3}'.format(el.lower()), end='')
print('')
if anychange and not forbiddenchange:
os.system("echo 'mv {} ../' >> basisdunningfiles.txt".format(sys.argv[1]))
|
susilehtola/psi4
|
psi4/share/psi4/basis/primitives/diff_gbs.py
|
Python
|
lgpl-3.0
| 4,694
|
[
"Psi4"
] |
8f36a5ba18710e80414a7b5f5ba89658f718028d697cb7a3329930d45cd332d5
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import numpy as np
import tempfile
import os
import sys
import mdtraj as md
from mdtraj.formats import LH5TrajectoryFile
from mdtraj.testing import eq
import pytest
on_win = (sys.platform == 'win32')
on_py3 = (sys.version_info >= (3, 0))
# special pytest global to mark all tests in this module
pytestmark = pytest.mark.skipif(on_win and on_py3, reason='lh5 not supported on windows on python 3')
fd, temp = tempfile.mkstemp(suffix='.lh5')
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by pytest"""
os.close(fd)
os.unlink(temp)
def test_write_coordinates():
coordinates = np.random.randn(4, 10, 3)
with LH5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with LH5TrajectoryFile(temp) as f:
eq(f.read(), coordinates, decimal=3)
with LH5TrajectoryFile(temp) as f:
f.seek(2)
eq(f.read(), coordinates[2:], decimal=3)
f.seek(0)
eq(f.read(), coordinates[0:], decimal=3)
f.seek(-1, 2)
eq(f.read(), coordinates[3:], decimal=3)
def test_write_coordinates_reshape():
coordinates = np.random.randn(10, 3)
with LH5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with LH5TrajectoryFile(temp) as f:
eq(f.read(), coordinates.reshape(1, 10, 3), decimal=3)
def test_write_multiple():
coordinates = np.random.randn(4, 10, 3)
with LH5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
f.write(coordinates)
with LH5TrajectoryFile(temp) as f:
eq(f.read(), np.vstack((coordinates, coordinates)), decimal=3)
def test_topology(get_fn):
top = md.load(get_fn('native.pdb')).topology
with LH5TrajectoryFile(temp, 'w') as f:
f.topology = top
with LH5TrajectoryFile(temp) as f:
assert f.topology == top
def test_read_slice_0():
coordinates = np.random.randn(4, 10, 3)
with LH5TrajectoryFile(temp, 'w') as f:
f.write(coordinates)
with LH5TrajectoryFile(temp) as f:
eq(f.read(n_frames=2), coordinates[:2], decimal=3)
eq(f.read(n_frames=2), coordinates[2:4], decimal=3)
with LH5TrajectoryFile(temp) as f:
eq(f.read(stride=2), coordinates[::2], decimal=3)
with LH5TrajectoryFile(temp) as f:
eq(f.read(stride=2, atom_indices=np.array([0, 1])), coordinates[::2, [0, 1], :], decimal=3)
def test_vsite_elements(get_fn):
# Test case for issue #263
pdb_filename = get_fn('GG-tip4pew.pdb')
trj = md.load(pdb_filename)
trj.save_lh5(temp)
trj2 = md.load(temp, top=pdb_filename)
def test_do_overwrite_0():
with open(temp, 'w') as f:
f.write('a')
with LH5TrajectoryFile(temp, 'w', force_overwrite=True) as f:
f.write(np.random.randn(10, 5, 3))
def test_do_overwrite_1():
with open(temp, 'w') as f:
f.write('a')
with pytest.raises(IOError):
with LH5TrajectoryFile(temp, 'w', force_overwrite=False) as f:
f.write(np.random.randn(10, 5, 3))
|
rmcgibbo/mdtraj
|
tests/test_lh5.py
|
Python
|
lgpl-2.1
| 4,056
|
[
"MDTraj"
] |
cf010878d724b89ff50e4ebb17f653795ea1d342ed0803ed1e43a45d7da81bc2
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import CertificateGenerationConfiguration
from certificates import api as certs_api
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': has_access(request.user, 'instructor', course),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': has_access(request.user, 'staff', course),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
sections = [
_section_course_info(course, access),
_section_membership(course, access),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
_section_analytics(course, access),
]
#check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
is_white_label = CourseMode.is_white_label(course_key)
if (settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']):
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _("To gain insights into student enrollment and participation {link_start}visit {analytics_dashboard_name}, our new course analytics product{link_end}.")
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
context = {
'course': course,
'old_dashboard_url': reverse('instructor_dashboard_legacy', kwargs={'course_id': unicode(course_key)}),
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
)
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=getattr(course_honor_mode[0], 'min_price'), currency=getattr(course_honor_mode[0], 'currency'),
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, course.grade_cutoffs.items(), "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'get_distribution_url': reverse('get_distribution', kwargs={'course_id': unicode(course_key)}),
'proxy_legacy_analytics_url': reverse('proxy_legacy_analytics', kwargs={'course_id': unicode(course_key)}),
}
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Demographic data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['demographic_message'] = message
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
|
antonve/s4-project-mooc
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 24,093
|
[
"VisIt"
] |
33fc5472c851d7032e3b250d4e0b922342dca1a7e8225f06e629e69d28264bf8
|
#
# AtHomePowerlineServer - networked server for CM11/CM11A/XTB-232 X10 controllers
# Copyright (C) 2014 Dave Hocker
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# See the LICENSE file for more details.
#
# Commands
|
dhocker/athomepowerlineserver
|
commands/__init__.py
|
Python
|
gpl-3.0
| 387
|
[
"xTB"
] |
9d999f524f5e3079621296a4f91a33c5dc00d8e90e50b6d3457d2555254714b1
|
""" manage PyTables query interface via Expressions """
import ast
from functools import partial
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.compat import u, string_types, DeepChainMap
from pandas.core.base import StringMixin
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.computation import expr, ops
from pandas.core.computation.ops import is_term, UndefinedVariableError
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.common import _ensure_decoded
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type
class Scope(expr.Scope):
__slots__ = 'queryables',
def __init__(self, level, global_dict=None, local_dict=None,
queryables=None):
super(Scope, self).__init__(level + 1, global_dict=global_dict,
local_dict=local_dict)
self.queryables = queryables or dict()
class Term(ops.Term):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = StringMixin.__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
super(Term, self).__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == 'left':
if self.name not in self.env.queryables:
raise NameError('name {0!r} is not defined'.format(self.name))
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
def __init__(self, op, lhs, rhs, queryables, encoding):
super(BinOp, self).__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.filter = None
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
""" create and return a new specialized BinOp from myself """
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if (isinstance(left, ConditionBinOp) and
isinstance(right, ConditionBinOp)):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if (isinstance(left, FilterBinOp) and
isinstance(right, FilterBinOp)):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(self.op, left, right, queryables=self.queryables,
encoding=self.encoding).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self):
""" return True if this is a valid field """
return self.lhs in self.queryables
@property
def is_in_table(self):
""" return True if this is a valid column name for generation (e.g. an
actual column in the table) """
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
""" the kind of my field """
return getattr(self.queryables.get(self.lhs), 'kind', None)
@property
def meta(self):
""" the meta of my field """
return getattr(self.queryables.get(self.lhs), 'meta', None)
@property
def metadata(self):
""" the metadata of my field """
return getattr(self.queryables.get(self.lhs), 'metadata', None)
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "(%s %s %s)" % (self.lhs, self.op, val)
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == u('datetime64') or kind == u('datetime'):
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = pd.Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == u('timedelta64') or kind == u('timedelta'):
v = _coerce_scalar_to_timedelta_type(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == u('category'):
metadata = com._values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, u('integer'))
elif kind == u('integer'):
v = int(float(v))
return TermValue(v, v, kind)
elif kind == u('float'):
v = float(v)
return TermValue(v, v, kind)
elif kind == u('bool'):
if isinstance(v, string_types):
v = not v.strip().lower() in [u('false'), u('f'), u('no'),
u('n'), u('none'), u('0'),
u('[]'), u('{}'), u('')]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, string_types):
# string quoting
return TermValue(v, stringify(v), u('string'))
else:
raise TypeError(("Cannot compare {v} of type {typ}"
" to {kind} column").format(v=v, typ=type(v),
kind=kind))
def convert_values(self):
pass
class FilterBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Filter : [{0}] -> "
"[{1}]".format(self.filter[0], self.filter[1]))
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
def format(self):
""" return the actual filter format """
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
rhs = self.conform(self.rhs)
values = [TermValue(v, v, self.kind) for v in rhs]
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ['==', '!='] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
return self
return None
# equality conditions
if self.op in ['==', '!=']:
filter_op = self.generate_filter_op()
self.filter = (
self.lhs,
filter_op,
pd.Index([v.value for v in values]))
else:
raise TypeError(
"passing a filterable condition to a non-table indexer [%s]" %
self)
return self
def generate_filter_op(self, invert=False):
if (self.op == '!=' and not invert) or (self.op == '==' and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __unicode__(self):
return pprint_thing("[Condition : [{0}]]".format(self.condition))
def invert(self):
""" invert the condition """
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError("cannot use an invert condition when "
"passing to numexpr")
def format(self):
""" return the actual ne format """
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError("query term is not valid [%s]" % self)
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ['==', '!=']:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = "(%s)" % ' | '.join(vs)
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = "(%s %s %s)" % (
self.lhs.condition,
self.op,
self.rhs.condition)
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != '~':
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None:
if issubclass(klass, ConditionBinOp):
if operand.condition is not None:
return operand.invert()
elif issubclass(klass, FilterBinOp):
if operand.filter is not None:
return operand.invert()
return None
_op_classes = {'unary': UnaryOp}
class ExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super(ExprVisitor, self).__init__(env, engine, parser)
for bin_op in self.binary_ops:
setattr(self, 'visit_{0}'.format(self.binary_op_nodes_map[bin_op]),
lambda node, bin_op=bin_op: partial(BinOp, bin_op,
**kwargs))
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp('~', self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError('Unary addition not supported')
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0],
comparators=[node.value])
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple suscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except:
pass
try:
return self.const_type(value[slobj], self.env)
except TypeError:
raise ValueError("cannot subscript {0!r} with "
"{1!r}".format(value, slobj))
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx.__class__
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overriden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, string_types)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
class Expr(expr.Expr):
""" hold a pytables like expression, comprised of possibly multiple 'terms'
Parameters
----------
where : string term expression, Expr, or list-like of Exprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
an Expr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
def __init__(self, where, queryables=None, encoding=None, scope_level=0):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict = DeepChainMap()
if isinstance(where, Expr):
local_dict = where.env.scope
where = where.expr
elif isinstance(where, (list, tuple)):
for idx, w in enumerate(where):
if isinstance(w, Expr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
where = ' & ' .join(["(%s)" % w for w in where]) # noqa
self.expr = where
self.env = Scope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, string_types):
self.env.queryables.update(queryables)
self._visitor = ExprVisitor(self.env, queryables=queryables,
parser='pytables', engine='pytables',
encoding=encoding)
self.terms = self.parse()
def __unicode__(self):
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid condition".format(self.expr, self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{0}], [{1}] is not a "
"valid filter".format(self.expr, self))
return self.condition, self.filter
class TermValue(object):
""" hold a term value the we use to construct a condition/filter """
def __init__(self, value, converted, kind):
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == u'string':
if encoding is not None:
return self.converted
return '"%s"' % self.converted
elif self.kind == u'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, string_types):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/core/computation/pytables.py
|
Python
|
mit
| 18,930
|
[
"VisIt"
] |
3b92f3203718f3190dbdd0bb7a1eba150e0837b113933924d45b0bb11a738c19
|
# ======================================================================
# Given the following input data w.r.t. a mitral valve setup:
# - a 3D volume mesh (vtu),
# - a 2D surface mesh representation of the MV segmentation (vtp), and
# - an Annuloplasty Ring representation including vertex IDs (vtp)
# this script computes and sets the Dirichlet Boundary Conditions (BCs)
# data for the HiFlow3-based MVR-Simulation.
#
# NOTE: Adding additional BC-points by means of linear interpolation
# requires the input IDs to be ordered and going around annulus once!!!
#
# How to run the script:
# python script.py valve3d.vtu valve2d.vtp ring.vtp outputname.xml
#
# Author: Nicolai Schoch, EMCL; 2015-04-12.
# ======================================================================
__author__ = 'schoch'
import numpy as np
from numpy import linalg as LA
import sys
import vtk
import xml.etree.ElementTree as ET # NEEDED?!
#from .msmlvtk import * # NEEDED?!
def BCdata_for_Hf3Sim_Producer(inputfilename, surfaceMesh, ringFilename, outputfilename):
# ======================================================================
# define number of given annulus point IDs -----------------------------
# (see notation/representation of Annuloplasty Rings by DKFZ and corresponding addInfo)
numberOfAnnulusPtIDs_ = 16
# get system arguments -------------------------------------------------
valve3dFilename_ = inputfilename
valve2dFilename_ = surfaceMesh
ringFilename_ = ringFilename
outputFilename_ = outputfilename
print " "
print "====================================================================================="
print "=== Execute Python script to produce BCdata for the HiFlow3-based MVR-Simulation ==="
print "====================================================================================="
print " "
# ======================================================================
# read in files: -------------------------------------------------------
# read in 3d valve
vtureader = vtk.vtkXMLUnstructuredGridReader()
vtureader.SetFileName(valve3dFilename_)
vtureader.Update()
valve3d_ = vtureader.GetOutput()
# get surface mesh of valve3d_
geometryFilter = vtk.vtkGeometryFilter()
if vtk.vtkVersion().GetVTKMajorVersion() >= 6:
geometryFilter.SetInputData(valve3d_)
else:
geometryFilter.SetInput(valve3d_)
geometryFilter.Update()
valve3dSurface_ = geometryFilter.GetOutput()
# read in 2d valve
vtpreader = vtk.vtkXMLPolyDataReader()
vtpreader.SetFileName(valve2dFilename_)
vtpreader.Update()
valve2d_ = vtpreader.GetOutput()
# read in ring
vtpreader = vtk.vtkXMLPolyDataReader()
vtpreader.SetFileName(ringFilename_)
vtpreader.Update()
ring_ = vtpreader.GetOutput()
# get vertex ids of valve2d_ and ring_ ---------------------------------
valve2dVertexIds_ = valve2d_.GetPointData().GetArray('VertexIDs')
ringVertexIds_ = ring_.GetPointData().GetArray('VertexIDs')
print "Reading input files: DONE."
# ======================================================================
# init. tree for closest point search ----------------------------------
kDTree = vtk.vtkKdTreePointLocator()
kDTree.SetDataSet(valve3dSurface_)
kDTree.BuildLocator()
# ======================================================================
# arrays for storage of coordinates of annulus points (and interpolated points) on the MV surface and on the ring -----------------
ringPoints_ = np.zeros((2*numberOfAnnulusPtIDs_,3))
valvePoints_ = np.zeros((2*numberOfAnnulusPtIDs_,3))
# Store coordiantes in arrays ---------------------------------------------------------------------------
# NOTE: Alternatively, instead of a loop over all points and looking for their IDs,
# one could also loop over the array of vertexIDs and get the pointID.
# find coordinates of points of ring_
for i in range(ring_.GetNumberOfPoints()):
if 0 <= int(ringVertexIds_.GetTuple1(i)) and int(ringVertexIds_.GetTuple1(i)) < numberOfAnnulusPtIDs_:
ringPoints_[int(ringVertexIds_.GetTuple1(i))] = np.array(ring_.GetPoint(i))
# find coordinates of points of valve2d_
for i in range(valve2d_.GetNumberOfPoints()):
if 0 <= int(valve2dVertexIds_.GetTuple1(i)) and int(valve2dVertexIds_.GetTuple1(i)) < numberOfAnnulusPtIDs_:
valvePoints_[int(valve2dVertexIds_.GetTuple1(i))] = np.array(valve2d_.GetPoint(i))
# find closest points to points stored in valvePoints_ on valve3dSurface_ and store (i.e. overwrite) them in valvePoints_
for i in range(numberOfAnnulusPtIDs_):
iD = kDTree.FindClosestPoint(valvePoints_[i])
kDTree.GetDataSet().GetPoint(iD, valvePoints_[i])
# ======================================================================
# add additional boundary conditions by linear interpolation -------------------------------------------
# NOTE: this requires the IDs to be ordered and going around annulus once!!!
for i in range(numberOfAnnulusPtIDs_):
valvePoints_[numberOfAnnulusPtIDs_+i] = 0.5 * (valvePoints_[i]+valvePoints_[(i+1)%numberOfAnnulusPtIDs_])
ringPoints_[numberOfAnnulusPtIDs_+i] = 0.5 * (ringPoints_[i]+ringPoints_[(i+1)%numberOfAnnulusPtIDs_])
# ======================================================================
# Compute displacements ------------------------------------------------
displacement_ = ringPoints_ - valvePoints_
# ======================================================================
# convert arrays to strings --------------------------------------------
valvePointString_ = ""
displacementString_ = ""
for i in range(2*numberOfAnnulusPtIDs_):
for j in range(3):
valvePointString_ += str(valvePoints_[i][j])
displacementString_ += str(displacement_[i][j])
if j == 2:
if i < 2*numberOfAnnulusPtIDs_-1:
valvePointString_ += ";"
displacementString_ += ";"
else:
valvePointString_ += ","
displacementString_ += ","
print "Computing BC data: DONE."
# ======================================================================
# Write BC data to XML file --------------------------------------------
# build a tree structure
root = ET.Element("Param")
BCData = ET.SubElement(root, "BCData")
DisplacementConstraintsBCs = ET.SubElement(BCData, "DisplacementConstraintsBCs")
numberOfDPoints = ET.SubElement(DisplacementConstraintsBCs, "NumberOfDisplacedDirichletPoints")
numberOfDPoints.text = str(2*numberOfAnnulusPtIDs_)
dDPoints = ET.SubElement(DisplacementConstraintsBCs, "dDPoints")
dDPoints.text = valvePointString_
dDisplacements = ET.SubElement(DisplacementConstraintsBCs, "dDisplacements")
dDisplacements.text = displacementString_
# wrap it in an ElementTree instance, and save as XML
tree = ET.ElementTree(root)
tree.write(outputFilename_)
# ======================================================================
print "Writing mvrSimBCdata.xml output file: DONE."
print "==========================================="
print " "
|
CognitionGuidedSurgery/msml
|
src/msml/ext/mvrBCdataProducer.py
|
Python
|
gpl-3.0
| 6,923
|
[
"VTK"
] |
34b48d1a3d0bad22faa17648e2e5b7b53e2be7ab2837bea48f239129c38effb3
|
"""LAMMPS data format."""
import itertools as it
from collections import OrderedDict
from warnings import warn
import numpy as np
from parmed.parameters import ParameterSet
from scipy.constants import epsilon_0
from mbuild import Box
from mbuild.utils.conversion import RB_to_OPLS
from mbuild.utils.sorting import natural_sort
__all__ = ["write_lammpsdata"]
# returns True if both mins and maxs have been defined, and each have length 3
# otherwise returns False
def _check_minsmaxs(mins, maxs):
if mins and maxs:
if len(mins) == 3 and len(maxs) == 3:
return True
else:
warn(
"mins and maxs passed to write_lammpsdata, but list size is "
"incorrect. mins and maxs will be ignored."
)
return False
else:
return False
def write_lammpsdata(
structure,
filename,
atom_style="full",
unit_style="real",
mins=None,
maxs=None,
detect_forcefield_style=True,
nbfix_in_data_file=True,
use_urey_bradleys=False,
use_rb_torsions=True,
use_dihedrals=False,
):
"""Output a LAMMPS data file.
Outputs a LAMMPS data file in the 'full' atom style format. Default units
are 'real' units. See http://lammps.sandia.gov/doc/atom_style.html for
more information on atom styles.
Parameters
----------
structure : parmed.Structure
ParmEd structure object
filename : str
Path of the output file
atom_style: str
Defines the style of atoms to be saved in a LAMMPS data file. The
following atom styles are currently supported:
'full', 'atomic', 'charge', 'molecular'
see http://lammps.sandia.gov/doc/atom_style.html for more information
on atom styles.
unit_style: str
Defines to unit style to be save in a LAMMPS data file. Defaults to
'real' units. Current styles are supported: 'real', 'lj'
see https://lammps.sandia.gov/doc/99/units.html for more information
on unit styles
mins : list
minimum box dimension in x, y, z directions
maxs : list
maximum box dimension in x, y, z directions
detect_forcefield_style: boolean
If True, format lammpsdata parameters based on the contents of
the parmed Structure
use_urey_bradleys: boolean
If True, will treat angles as CHARMM-style angles with urey bradley
terms while looking for `structure.urey_bradleys`
use_rb_torsions:
If True, will treat dihedrals OPLS-style torsions while looking for
`structure.rb_torsions`
use_dihedrals:
If True, will treat dihedrals as CHARMM-style dihedrals while looking
for `structure.dihedrals`
Notes
-----
See http://lammps.sandia.gov/doc/2001/data_format.html for a full
description of the LAMMPS data format. Currently the following sections are
supported (in addition to the header): *Masses*, *Nonbond Coeffs*,
*Bond Coeffs*, *Angle Coeffs*, *Dihedral Coeffs*, *Atoms*, *Bonds*,
*Angles*, *Dihedrals*, *Impropers*
OPLS and CHARMM forcefield styles are supported, AMBER forcefield styles
are NOT
Some of this function has beed adopted from `mdtraj`'s support of the
LAMMPSTRJ trajectory format. See
https://github.com/mdtraj/mdtraj/blob/master/mdtraj/formats/lammpstrj.py
for details.
unique_types : a sorted list of unique atomtypes for all atoms in the
structure where atomtype = atom.type.
unique_bond_types: an enumarated OrderedDict of unique bond types for all
bonds in the structure.
Defined by bond parameters and component atomtypes, in order:
k : bond.type.k
req : bond.type.req
atomtypes : sorted((bond.atom1.type, bond.atom2.type))
unique_angle_types: an enumerated OrderedDict of unique angle types for all
angles in the structure.
Defined by angle parameters and component atomtypes, in order:
k : angle.type.k
theteq : angle.type.theteq
vertex atomtype: angle.atom2.type
atomtypes: sorted((bond.atom1.type, bond.atom3.type))
unique_dihedral_types: an enumerated OrderedDict of unique dihedrals type
for all dihedrals in the structure.
Defined by dihedral parameters and component atomtypes, in order:
c0 : dihedral.type.c0
c1 : dihedral.type.c1
c2 : dihedral.type.c2
c3 : dihedral.type.c3
c4 : dihedral.type.c4
c5 : dihedral.type.c5
scee : dihedral.type.scee
scnb : dihedral.type.scnb
atomtype 1 : dihedral.atom1.type
atomtype 2 : dihedral.atom2.type
atomtype 3 : dihedral.atom3.type
atomtype 4 : dihedral.atom4.type
"""
if atom_style not in ["atomic", "charge", "molecular", "full"]:
raise ValueError(
'Atom style "{atom_style}" is invalid or is not currently supported'
)
# Check if structure is paramterized
if unit_style == "lj":
if any([atom.sigma for atom in structure.atoms]) is None:
raise ValueError(
"LJ units specified but one or more atoms has undefined LJ "
"parameters."
)
xyz = np.array([[atom.xx, atom.xy, atom.xz] for atom in structure.atoms])
forcefield = True
if structure[0].type == "":
forcefield = False
if forcefield:
types = [atom.type for atom in structure.atoms]
else:
types = [atom.name for atom in structure.atoms]
unique_types = list(set(types))
unique_types.sort(key=natural_sort)
charges = np.array([atom.charge for atom in structure.atoms])
# Convert coordinates to LJ units
if unit_style == "lj":
# Get sigma, mass, and epsilon conversions by finding maximum of each
sigma_conversion_factor = np.max([a.sigma for a in structure.atoms])
epsilon_conversion_factor = np.max([a.epsilon for a in structure.atoms])
mass_conversion_factor = np.max([a.mass for a in structure.atoms])
xyz = xyz / sigma_conversion_factor
charges = (charges * 1.6021e-19) / np.sqrt(
4
* np.pi
* (sigma_conversion_factor * 1e-10)
* (epsilon_conversion_factor * 4184)
* epsilon_0
)
charges[np.isinf(charges)] = 0
else:
sigma_conversion_factor = 1
epsilon_conversion_factor = 1
mass_conversion_factor = 1
# lammps does not require the box to be centered at any a specific origin
# min and max dimensions are therefore needed to write the file in a
# consistent way the parmed structure only stores the box length. It is
# not rigorous to assume bounds are 0 to L or -L/2 to L/2
# NOTE: 0 to L is current default, mins and maxs should be passed by user
if _check_minsmaxs(mins, maxs):
box = Box.from_mins_maxs_angles(
mins=mins, maxs=maxs, angles=structure.box[3:6]
)
else:
# Internally use nm
box = Box(
lengths=np.array([0.1 * val for val in structure.box[0:3]]),
angles=structure.box[3:6],
)
warn(
"Explicit box bounds (i.e., mins and maxs) were not provided. Box "
"bounds are assumed to be min = 0 and max = length in each "
"direction. This may not produce a system with the expected "
"spatial location and may cause non-periodic systems to fail. "
"Bounds can be defined explicitly by passing the them to the "
"write_lammpsdata function or by passing box info to the save "
"function."
)
# Divide by conversion factor
Lx = box.Lx * (1 / sigma_conversion_factor)
Ly = box.Ly * (1 / sigma_conversion_factor)
Lz = box.Lz * (1 / sigma_conversion_factor)
box = Box(lengths=(Lx, Ly, Lz), angles=box.angles)
# Lammps syntax depends on the functional form
# Infer functional form based on the properties of the structure
if detect_forcefield_style:
# Check angles
if len(structure.urey_bradleys) > 0:
print("Urey bradley terms detected, will use angle_style charmm")
use_urey_bradleys = True
else:
print(
"No urey bradley terms detected, will use angle_style harmonic"
)
use_urey_bradleys = False
# Check dihedrals
if len(structure.rb_torsions) > 0:
print("RB Torsions detected, will use dihedral_style opls")
use_rb_torsions = True
else:
use_rb_torsions = False
if len(structure.dihedrals) > 0:
print("Charmm dihedrals detected, will use dihedral_style charmm")
use_dihedrals = True
else:
use_dihedrals = False
if use_rb_torsions and use_dihedrals:
raise ValueError(
"Multiple dihedral styles detected, check your "
"Forcefield XML and structure"
)
# Check impropers
for dihedral in structure.dihedrals:
if dihedral.improper:
raise ValueError(
"Amber-style impropers are currently not supported"
)
bonds = [[b.atom1.idx + 1, b.atom2.idx + 1] for b in structure.bonds]
angles = [
[angle.atom1.idx + 1, angle.atom2.idx + 1, angle.atom3.idx + 1]
for angle in structure.angles
]
if use_rb_torsions:
dihedrals = [
[d.atom1.idx + 1, d.atom2.idx + 1, d.atom3.idx + 1, d.atom4.idx + 1]
for d in structure.rb_torsions
]
elif use_dihedrals:
dihedrals = [
[d.atom1.idx + 1, d.atom2.idx + 1, d.atom3.idx + 1, d.atom4.idx + 1]
for d in structure.dihedrals
]
else:
dihedrals = []
impropers = [
[i.atom1.idx + 1, i.atom2.idx + 1, i.atom3.idx + 1, i.atom4.idx + 1]
for i in structure.impropers
]
if bonds:
if len(structure.bond_types) == 0:
bond_types = np.ones(len(bonds), dtype=int)
else:
bond_types, unique_bond_types = _get_bond_types(
structure,
bonds,
sigma_conversion_factor,
epsilon_conversion_factor,
)
if angles:
angle_types, unique_angle_types = _get_angle_types(
structure,
use_urey_bradleys,
sigma_conversion_factor,
epsilon_conversion_factor,
)
if dihedrals:
dihedral_types, unique_dihedral_types = _get_dihedral_types(
structure, use_rb_torsions, use_dihedrals, epsilon_conversion_factor
)
if impropers:
improper_types, unique_improper_types = _get_impropers(
structure, epsilon_conversion_factor
)
with open(filename, "w") as data:
data.write(f"{filename} - created by mBuild; units = {unit_style}\n\n")
data.write("{:d} atoms\n".format(len(structure.atoms)))
if atom_style in ["full", "molecular"]:
data.write("{:d} bonds\n".format(len(bonds)))
data.write("{:d} angles\n".format(len(angles)))
data.write("{:d} dihedrals\n".format(len(dihedrals)))
data.write("{:d} impropers\n\n".format(len(impropers)))
data.write("{:d} atom types\n".format(len(set(types))))
if atom_style in ["full", "molecular"]:
if bonds:
data.write("{:d} bond types\n".format(len(set(bond_types))))
if angles:
data.write("{:d} angle types\n".format(len(set(angle_types))))
if dihedrals:
data.write(
"{:d} dihedral types\n".format(len(set(dihedral_types)))
)
if impropers:
data.write(
"{:d} improper types\n".format(len(set(improper_types)))
)
data.write("\n")
# Box data
# NOTE: Needs better logic handling maxs and mins of a bounding box
# NOTE: JBG, "this should be a method/attribute of Compound?"
if np.allclose(box.angles, 90) and (mins is None):
for i, dim in enumerate(["x", "y", "z"]):
data.write(
"{0:.6f} {1:.6f} {2}lo {2}hi\n".format(
0.0, 10.0 * box.lengths[i], dim
)
)
# NOTE:
# currently non-orthogonal bounding box translates
# Compound such that mins are new origin
else:
a = 10.0 * box.Lx
b = 10.0 * box.Ly
c = 10.0 * box.Lz
alpha, beta, gamma = np.radians(box.angles)
xy = box.xy
xz = box.xz
yz = box.yz
# NOTE: using (0,0,0) as origin
xlo, ylo, zlo = (0.0, 0.0, 0.0)
xhi = xlo + a
yhi = ylo + b
zhi = zlo + c
xlo_bound = xlo + np.min([0.0, xy, xz, xy + xz])
xhi_bound = xhi + np.max([0.0, xy, xz, xy + xz])
ylo_bound = ylo + np.min([0.0, yz])
yhi_bound = yhi + np.max([0.0, yz])
zlo_bound = zlo
zhi_bound = zhi
data.write("{0:.6f} {1:.6f} xlo xhi\n".format(xlo_bound, xhi_bound))
data.write("{0:.6f} {1:.6f} ylo yhi\n".format(ylo_bound, yhi_bound))
data.write("{0:.6f} {1:.6f} zlo zhi\n".format(zlo_bound, zhi_bound))
data.write("{0:.6f} {1:.6f} {2:6f} xy xz yz\n".format(xy, xz, yz))
# Mass data
masses = (
np.array([atom.mass for atom in structure.atoms])
/ mass_conversion_factor
)
mass_dict = dict(
[
(unique_types.index(atom_type) + 1, mass)
for atom_type, mass in zip(types, masses)
]
)
data.write("\nMasses\n\n")
for atom_type, mass in sorted(mass_dict.items()):
data.write(
"{:d}\t{:.6f}\t# {}\n".format(
atom_type, mass, unique_types[atom_type - 1]
)
)
if forcefield:
epsilons = (
np.array([atom.epsilon for atom in structure.atoms])
/ epsilon_conversion_factor
)
sigmas = (
np.array([atom.sigma for atom in structure.atoms])
/ sigma_conversion_factor
)
forcefields = [atom.type for atom in structure.atoms]
epsilon_dict = dict(
[
(unique_types.index(atom_type) + 1, epsilon)
for atom_type, epsilon in zip(types, epsilons)
]
)
sigma_dict = dict(
[
(unique_types.index(atom_type) + 1, sigma)
for atom_type, sigma in zip(types, sigmas)
]
)
forcefield_dict = dict(
[
(unique_types.index(atom_type) + 1, forcefield)
for atom_type, forcefield in zip(types, forcefields)
]
)
# Modified cross-interactions
if structure.has_NBFIX():
params = ParameterSet.from_structure(structure)
# Sort keys (maybe they should be sorted in ParmEd)
new_nbfix_types = OrderedDict()
for key, val in params.nbfix_types.items():
sorted_key = tuple(sorted(key))
if sorted_key in new_nbfix_types:
warn("Sorted key matches an existing key")
if new_nbfix_types[sorted_key]:
warn(
"nbfixes are not symmetric, overwriting old "
"nbfix"
)
new_nbfix_types[sorted_key] = params.nbfix_types[key]
params.nbfix_types = new_nbfix_types
warn(
"Explicitly writing cross interactions using mixing rule: "
"{}".format(structure.combining_rule)
)
coeffs = OrderedDict()
for combo in it.combinations_with_replacement(unique_types, 2):
# Attempt to find pair coeffis in nbfixes
if combo in params.nbfix_types:
type1 = unique_types.index(combo[0]) + 1
type2 = unique_types.index(combo[1]) + 1
epsilon = params.nbfix_types[combo][
0
] # kcal OR lj units
rmin = params.nbfix_types[combo][
1
] # Angstrom OR lj units
sigma = rmin / 2 ** (1 / 6)
coeffs[(type1, type2)] = (
round(sigma, 8),
round(epsilon, 8),
)
else:
type1 = unique_types.index(combo[0]) + 1
type2 = unique_types.index(combo[1]) + 1
# Might not be necessary to be this explicit
if type1 == type2:
sigma = sigma_dict[type1]
epsilon = epsilon_dict[type1]
else:
if structure.combining_rule == "lorentz":
sigma = (
sigma_dict[type1] + sigma_dict[type2]
) * 0.5
elif structure.combining_rule == "geometric":
sigma = (
sigma_dict[type1] * sigma_dict[type2]
) ** 0.5
else:
raise ValueError(
"Only lorentz and geometric combining "
"rules are supported"
)
epsilon = (
epsilon_dict[type1] * epsilon_dict[type2]
) ** 0.5
coeffs[(type1, type2)] = (
round(sigma, 8),
round(epsilon, 8),
)
if nbfix_in_data_file:
data.write("\nPairIJ Coeffs # modified lj\n")
data.write(
"# type1 type2\tepsilon (kcal/mol)\tsigma (Angstrom)\n"
)
for (type1, type2), (sigma, epsilon) in coeffs.items():
data.write(
"{0} \t{1} \t{2} \t\t{3}\t\t# {4}\t{5}\n".format(
type1,
type2,
epsilon,
sigma,
forcefield_dict[type1],
forcefield_dict[type2],
)
)
else:
data.write("\nPair Coeffs # lj\n\n")
for idx, epsilon in sorted(epsilon_dict.items()):
data.write(
"{}\t{:.5f}\t{:.5f}\n".format(
idx, epsilon, sigma_dict[idx]
)
)
print("Copy these commands into your input script:\n")
print(
"# type1 type2\tepsilon (kcal/mol)\tsigma (Angstrom)\n"
)
for (type1, type2), (sigma, epsilon) in coeffs.items():
print(
"pair_coeff\t{0} \t{1} \t{2} \t\t{3} \t\t# {4} \t{5}".format(
type1,
type2,
epsilon,
sigma,
forcefield_dict[type1],
forcefield_dict[type2],
)
)
# Pair coefficients
else:
data.write("\nPair Coeffs # lj \n")
if unit_style == "real":
data.write("#\tepsilon (kcal/mol)\t\tsigma (Angstrom)\n")
elif unit_style == "lj":
data.write("#\treduced_epsilon \t\treduced_sigma \n")
for idx, epsilon in sorted(epsilon_dict.items()):
data.write(
"{}\t{:.5f}\t\t{:.5f}\t\t# {}\n".format(
idx, epsilon, sigma_dict[idx], forcefield_dict[idx]
)
)
# Bond coefficients
if bonds:
data.write("\nBond Coeffs # harmonic\n")
if unit_style == "real":
data.write("#\tk(kcal/mol/angstrom^2)\t\treq(angstrom)\n")
elif unit_style == "lj":
data.write("#\treduced_k\t\treduced_req\n")
sorted_bond_types = {
k: v
for k, v in sorted(
unique_bond_types.items(), key=lambda item: item[1]
)
}
for params, idx in sorted_bond_types.items():
data.write(
"{}\t{}\t\t{}\t\t# {}\t{}\n".format(
idx,
params[0],
params[1],
params[2][0],
params[2][1],
)
)
# Angle coefficients
if angles:
sorted_angle_types = {
k: v
for k, v in sorted(
unique_angle_types.items(), key=lambda item: item[1]
)
}
if use_urey_bradleys:
data.write("\nAngle Coeffs # charmm\n")
data.write(
"#\tk(kcal/mol/rad^2)\t\ttheteq(deg)\tk(kcal/mol/angstrom^2)\treq(angstrom)\n"
)
for params, idx in sorted_angle_types.items():
data.write(
"{}\t{}\t{:.5f}\t{:.5f}\t{:.5f}\n".format(
idx, *params
)
)
else:
data.write("\nAngle Coeffs # harmonic\n")
data.write("#\treduced_k\t\ttheteq(deg)\n")
for params, idx in sorted_angle_types.items():
data.write(
"{}\t{}\t\t{:.5f}\t# {}\t{}\t{}\n".format(
idx,
params[0],
params[1],
params[3][0],
params[2],
params[3][1],
)
)
# Dihedral coefficients
if dihedrals:
sorted_dihedral_types = {
k: v
for k, v in sorted(
unique_dihedral_types.items(), key=lambda item: item[1]
)
}
if use_rb_torsions:
data.write("\nDihedral Coeffs # opls\n")
if unit_style == "real":
data.write(
"#\tf1(kcal/mol)\tf2(kcal/mol)\tf3(kcal/mol)\tf4(kcal/mol)\n"
)
elif unit_style == "lj":
data.write("#\tf1\tf2\tf3\tf4 (all lj reduced units)\n")
for params, idx in sorted_dihedral_types.items():
opls_coeffs = RB_to_OPLS(
params[0],
params[1],
params[2],
params[3],
params[4],
params[5],
)
data.write(
"{}\t{:.5f}\t{:.5f}\t\t{:.5f}\t\t{:.5f}\t# {}\t{}\t{}\t{}\n".format(
idx,
opls_coeffs[0],
opls_coeffs[1],
opls_coeffs[2],
opls_coeffs[3],
params[8],
params[9],
params[10],
params[11],
)
)
elif use_dihedrals:
data.write("\nDihedral Coeffs # charmm\n")
data.write("#k, n, phi, weight\n")
for params, idx in sorted_dihedral_types.items():
data.write(
"{}\t{:.5f}\t{:d}\t{:d}\t{:.5f}\t# {}\t{}\t{}\t{}\n".format(
idx,
params[0],
params[1],
params[2],
params[3],
params[6],
params[7],
params[8],
params[9],
)
)
# Improper coefficients
if impropers:
sorted_improper_types = {
k: v
for k, v in sorted(
unique_improper_types.items(), key=lambda item: item[1]
)
}
data.write("\nImproper Coeffs # harmonic\n")
data.write("#k, phi\n")
for params, idx in sorted_improper_types.items():
data.write(
"{}\t{:.5f}\t{:.5f}\t# {}\t{}\t{}\t{}\n".format(
idx,
params[0],
params[1],
params[2],
params[3],
params[4],
params[5],
)
)
# Atom data
data.write("\nAtoms\n\n")
if atom_style == "atomic":
atom_line = "{index:d}\t{type_index:d}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n"
elif atom_style == "charge":
if unit_style == "real":
atom_line = "{index:d}\t{type_index:d}\t{charge:.6f}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n"
elif unit_style == "lj":
atom_line = "{index:d}\t{type_index:d}\t{charge:.4ef}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n"
elif atom_style == "molecular":
atom_line = "{index:d}\t{zero:d}\t{type_index:d}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n"
elif atom_style == "full":
if unit_style == "real":
atom_line = "{index:d}\t{zero:d}\t{type_index:d}\t{charge:.6f}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n"
elif unit_style == "lj":
atom_line = "{index:d}\t{zero:d}\t{type_index:d}\t{charge:.4e}\t{x:.6f}\t{y:.6f}\t{z:.6f}\n"
for i, coords in enumerate(xyz):
data.write(
atom_line.format(
index=i + 1,
type_index=unique_types.index(types[i]) + 1,
zero=structure.atoms[i].residue.idx,
charge=charges[i],
x=coords[0],
y=coords[1],
z=coords[2],
)
)
if atom_style in ["full", "molecular"]:
# Bond data
if bonds:
data.write("\nBonds\n\n")
for i, bond in enumerate(bonds):
data.write(
"{:d}\t{:d}\t{:d}\t{:d}\n".format(
i + 1, bond_types[i], bond[0], bond[1]
)
)
# Angle data
if angles:
data.write("\nAngles\n\n")
for i, angle in enumerate(angles):
data.write(
"{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n".format(
i + 1, angle_types[i], angle[0], angle[1], angle[2]
)
)
# Dihedral data
if dihedrals:
data.write("\nDihedrals\n\n")
for i, dihedral in enumerate(dihedrals):
data.write(
"{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n".format(
i + 1,
dihedral_types[i],
dihedral[0],
dihedral[1],
dihedral[2],
dihedral[3],
)
)
# Dihedral data
if impropers:
data.write("\nImpropers\n\n")
for i, improper in enumerate(impropers):
data.write(
"{:d}\t{:d}\t{:d}\t{:d}\t{:d}\t{:d}\n".format(
i + 1,
improper_types[i],
improper[2],
improper[1],
improper[0],
improper[3],
)
)
def _get_bond_types(
structure, bonds, sigma_conversion_factor, epsilon_conversion_factor
):
unique_bond_types = dict(
enumerate(
set(
[
(
round(
bond.type.k
* (
sigma_conversion_factor ** 2
/ epsilon_conversion_factor
),
3,
),
round(bond.type.req / sigma_conversion_factor, 3),
tuple(sorted((bond.atom1.type, bond.atom2.type))),
)
for bond in structure.bonds
]
)
)
)
unique_bond_types = OrderedDict(
[(y, x + 1) for x, y in unique_bond_types.items()]
)
bond_types = [
unique_bond_types[
(
round(
bond.type.k
* (
sigma_conversion_factor ** 2 / epsilon_conversion_factor
),
3,
),
round(bond.type.req / sigma_conversion_factor, 3),
tuple(sorted((bond.atom1.type, bond.atom2.type))),
)
]
for bond in structure.bonds
]
return bond_types, unique_bond_types
def _get_angle_types(
structure,
use_urey_bradleys,
sigma_conversion_factor,
epsilon_conversion_factor,
):
if use_urey_bradleys:
charmm_angle_types = []
for angle in structure.angles:
ub_k = 0
ub_req = 0
for ub in structure.urey_bradleys:
if (angle.atom1, angle.atom3) == (ub.atom1, ub.atom2):
ub_k = ub.type.k
ub_req = ub.type.req
charmm_angle_types.append(
(
round(
angle.type.k
* (
sigma_conversion_factor ** 2
/ epsilon_conversion_factor
),
3,
),
round(angle.type.theteq, 3),
round(ub_k / epsilon_conversion_factor, 3),
round(ub_req, 3),
tuple(sorted((angle.atom1.type, angle.atom3.type))),
)
)
unique_angle_types = dict(enumerate(set(charmm_angle_types)))
unique_angle_types = OrderedDict(
[(y, x + 1) for x, y in unique_angle_types.items()]
)
angle_types = [
unique_angle_types[ub_info] for ub_info in charmm_angle_types
]
else:
unique_angle_types = dict(
enumerate(
set(
[
(
round(
angle.type.k
* (
sigma_conversion_factor ** 2
/ epsilon_conversion_factor
),
3,
),
round(angle.type.theteq, 3),
angle.atom2.type,
tuple(sorted((angle.atom1.type, angle.atom3.type))),
)
for angle in structure.angles
]
)
)
)
unique_angle_types = OrderedDict(
[(y, x + 1) for x, y in unique_angle_types.items()]
)
angle_types = [
unique_angle_types[
(
round(
angle.type.k
* (
sigma_conversion_factor ** 2
/ epsilon_conversion_factor
),
3,
),
round(angle.type.theteq, 3),
angle.atom2.type,
tuple(sorted((angle.atom1.type, angle.atom3.type))),
)
]
for angle in structure.angles
]
return angle_types, unique_angle_types
def _get_dihedral_types(
structure, use_rb_torsions, use_dihedrals, epsilon_conversion_factor
):
lj_unit = 1 / epsilon_conversion_factor
if use_rb_torsions:
unique_dihedral_types = dict(
enumerate(
set(
[
(
round(dihedral.type.c0 * lj_unit, 3),
round(dihedral.type.c1 * lj_unit, 3),
round(dihedral.type.c2 * lj_unit, 3),
round(dihedral.type.c3 * lj_unit, 3),
round(dihedral.type.c4 * lj_unit, 3),
round(dihedral.type.c5 * lj_unit, 3),
round(dihedral.type.scee, 1),
round(dihedral.type.scnb, 1),
dihedral.atom1.type,
dihedral.atom2.type,
dihedral.atom3.type,
dihedral.atom4.type,
)
for dihedral in structure.rb_torsions
]
)
)
)
unique_dihedral_types = OrderedDict(
[(y, x + 1) for x, y in unique_dihedral_types.items()]
)
dihedral_types = [
unique_dihedral_types[
(
round(dihedral.type.c0 * lj_unit, 3),
round(dihedral.type.c1 * lj_unit, 3),
round(dihedral.type.c2 * lj_unit, 3),
round(dihedral.type.c3 * lj_unit, 3),
round(dihedral.type.c4 * lj_unit, 3),
round(dihedral.type.c5 * lj_unit, 3),
round(dihedral.type.scee, 1),
round(dihedral.type.scnb, 1),
dihedral.atom1.type,
dihedral.atom2.type,
dihedral.atom3.type,
dihedral.atom4.type,
)
]
for dihedral in structure.rb_torsions
]
elif use_dihedrals:
charmm_dihedrals = []
structure.join_dihedrals()
for dihedral in structure.dihedrals:
if not dihedral.improper:
weight = 1 / len(dihedral.type)
for dih_type in dihedral.type:
charmm_dihedrals.append(
(
round(dih_type.phi_k * lj_unit, 3),
int(round(dih_type.per, 0)),
int(round(dih_type.phase, 0)),
round(weight, 4),
round(dih_type.scee, 1),
round(dih_type.scnb, 1),
dihedral.atom1.type,
dihedral.atom2.type,
dihedral.atom3.type,
dihedral.atom4.type,
)
)
unique_dihedral_types = dict(enumerate(set(charmm_dihedrals)))
unique_dihedral_types = OrderedDict(
[(y, x + 1) for x, y in unique_dihedral_types.items()]
)
dihedral_types = [
unique_dihedral_types[dihedral_info]
for dihedral_info in charmm_dihedrals
]
return dihedral_types, unique_dihedral_types
def _get_impropers(structure, epsilon_conversion_factor):
lj_unit = 1 / epsilon_conversion_factor
unique_improper_types = dict(
enumerate(
set(
[
(
round(improper.type.psi_k * lj_unit, 3),
round(improper.type.psi_eq, 3),
improper.atom3.type,
improper.atom2.type,
improper.atom1.type,
improper.atom4.type,
)
for improper in structure.impropers
]
)
)
)
unique_improper_types = OrderedDict(
[(y, x + 1) for x, y in unique_improper_types.items()]
)
improper_types = [
unique_improper_types[
(
round(improper.type.psi_k * lj_unit, 3),
round(improper.type.psi_eq, 3),
improper.atom3.type,
improper.atom2.type,
improper.atom1.type,
improper.atom4.type,
)
]
for improper in structure.impropers
]
return improper_types, unique_improper_types
def _get_box_information(
structure,
):
pass
|
iModels/mbuild
|
mbuild/formats/lammpsdata.py
|
Python
|
mit
| 38,981
|
[
"Amber",
"CHARMM",
"LAMMPS",
"MDTraj"
] |
d34ed777f3abe136ca36f41be2dd325867446670684b2dff2b9764ea4f093820
|
"""
Some useful tools for dealing with the oddities of XML serialisation
"""
import re, sys
from octopus.core import app
###########################################################
# XML Character encoding hacks
###########################################################
_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x1F),
(0x7F, 0x84), (0x86, 0x9F),
(0xFDD0, 0xFDDF), (0xFFFE, 0xFFFF)]
if sys.maxunicode >= 0x10000: # not narrow build
_illegal_unichrs.extend([(0x1FFFE, 0x1FFFF), (0x2FFFE, 0x2FFFF),
(0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF),
(0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF),
(0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF),
(0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF),
(0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF)])
_illegal_ranges = ["%s-%s" % (unichr(low), unichr(high))
for (low, high) in _illegal_unichrs]
_illegal_xml_chars_RE = re.compile(u'[%s]' % u''.join(_illegal_ranges))
def valid_XML_char_ordinal(i):
"""
Is the character i an allowed XML character
:param i: the character
:return: True if allowed, False if not
"""
return ( # conditions ordered by presumed frequency
0x20 <= i <= 0xD7FF
or i in (0x9, 0xA, 0xD)
or 0xE000 <= i <= 0xFFFD
or 0x10000 <= i <= 0x10FFFF
)
def clean_unreadable(input_string):
"""
Take the string and strip any illegal XML characters
:param input_string: an unreadable XML string
:return: a cleaned string - it will lose information, but what else can you do?
"""
try:
return _illegal_xml_chars_RE.sub("", input_string)
except TypeError as e:
app.logger.error("Unable to strip illegal XML chars from: {x}, {y}".format(x=input_string, y=type(input_string)))
return None
def xml_clean(input_string):
"""
Brute force clean all the characters in a string until they absolutely definitely will
serialise in XML (slower than clean_unreadable, but more reliable)
:param input_string: illegal XML string
:return: legal XML string
"""
cleaned_string = ''.join(c for c in input_string if valid_XML_char_ordinal(ord(c)))
return cleaned_string
def set_text(element, input_string):
"""
Set the given text on the given element, carrying out whatever XML cleanup is also requried.
:param element: element to write to
:param input_string: string to write
:return:
"""
if input_string is None:
return
input_string = clean_unreadable(input_string)
try:
element.text = input_string
except ValueError:
element.text = xml_clean(input_string)
|
JiscPER/jper-oaipmh
|
service/xml.py
|
Python
|
apache-2.0
| 2,928
|
[
"Octopus"
] |
732da3b213b5b9c1b5c0614ddb14bfbe2bbcfc83ec8f95840d94a684df949084
|
"""Multiprocessing module to handle parallelization.
This module can optionally update a statusbar and can divide tasks
between cores using weights (so that each core gets a set of tasks with
the same total weight).
Adapted from a module by Brian Refsdal at SAO, available at AstroPython
(http://www.astropython.org/snippet/2010/3/Parallel-map-using-multiprocessing).
"""
from __future__ import print_function
import traceback
import sys
import numpy
_multi = False
_ncpus = 1
try:
# May raise ImportError
import multiprocessing
# Set spawn method to "fork". This is needed for macOS on Python 3.8+ where the
# default has been changed to "spawn", causing problems (see the discussion at
# https://github.com/ipython/ipython/issues/12396)
if sys.platform == 'darwin':
if sys.version_info[0] == 3 and sys.version_info[1] >= 8:
multiprocessing.set_start_method('fork')
_multi = True
# May raise NotImplementedError
_ncpus = min(multiprocessing.cpu_count(), 8)
except:
pass
__all__ = ('parallel_map',)
def worker(f, ii, chunk, out_q, err_q, lock, bar, bar_state):
"""
A worker function that maps an input function over a
slice of the input iterable.
:param f : callable function that accepts argument from iterable
:param ii : process ID
:param chunk: slice of input iterable
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param lock : thread-safe lock to protect a resource
( useful in extending parallel_map() )
:param bar: statusbar to update during fit
:param bar_state: statusbar state dictionary
"""
vals = []
# iterate over slice
for val in chunk:
try:
result = f(val)
except Exception as e:
etype,val,tbk=sys.exc_info()
print('Thread raised exception',e)
print('Traceback of thread is:')
print('-------------------------')
traceback.print_tb(tbk)
print('-------------------------')
err_q.put(e)
return
vals.append(result)
# update statusbar
if bar is not None:
if bar_state['started']:
bar.pos = bar_state['pos']
bar.spin_pos = bar_state['spin_pos']
bar.started = bar_state['started']
increment = bar.increment()
bar_state['started'] = bar.started
bar_state['pos'] += increment
bar_state['spin_pos'] += increment
if bar_state['spin_pos'] >= 4:
bar_state['spin_pos'] = 0
# output the result and task ID to output queue
out_q.put( (ii, vals) )
def run_tasks(procs, err_q, out_q, num):
"""
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
"""
# function to terminate processes that are still running.
die = (lambda vals : [val.terminate() for val in vals
if val.exitcode is None])
try:
for proc in procs:
proc.start()
for proc in procs:
proc.join()
except Exception as e:
# kill all slave processes on ctrl-C
die(procs)
raise e
if not err_q.empty():
# kill all on any exception from any one slave
die(procs)
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results=[None]*num;
for i in range(num):
idx, result = out_q.get()
results[idx] = result
# Remove extra dimension added by array_split
result_list = []
for result in results:
result_list += result
return result_list
def parallel_map(function, sequence, numcores=None, bar=None, weights=None):
"""
A parallelized version of the native Python map function that
utilizes the Python multiprocessing module to divide and
conquer a sequence.
parallel_map does not yet support multiple argument sequences.
:param function: callable function that accepts argument from iterable
:param sequence: iterable sequence
:param numcores: number of cores to use (if None, all are used)
:param bar: statusbar to update during fit
:param weights: weights to use when splitting the sequence
"""
if not callable(function):
raise TypeError("input function '%s' is not callable" %
repr(function))
if not numpy.iterable(sequence):
raise TypeError("input '%s' is not iterable" %
repr(sequence))
sequence = list(sequence)
size = len(sequence)
if not _multi or size == 1:
results = list(map(function, sequence))
if bar is not None:
bar.stop()
return results
# Set default number of cores to use. Try to leave one core free for pyplot.
if numcores is None:
numcores = _ncpus - 1
if numcores > _ncpus - 1:
numcores = _ncpus - 1
if numcores < 1:
numcores = 1
# Returns a started SyncManager object which can be used for sharing
# objects between processes. The returned manager object corresponds
# to a spawned child process and has methods which will create shared
# objects and return corresponding proxies.
manager = multiprocessing.Manager()
# Create FIFO queue and lock shared objects and return proxies to them.
# The managers handles a server process that manages shared objects that
# each slave process has access to. Bottom line -- thread-safe.
out_q = manager.Queue()
err_q = manager.Queue()
lock = manager.Lock()
bar_state = manager.dict()
if bar is not None:
bar_state['pos'] = bar.pos
bar_state['spin_pos'] = bar.spin_pos
bar_state['started'] = bar.started
# if sequence is less than numcores, only use len sequence number of
# processes
if size < numcores:
numcores = size
# group sequence into numcores-worth of chunks
if weights is None or numcores == size:
# No grouping specified (or there are as many cores as
# processes), so divide into equal chunks
sequence = numpy.array_split(sequence, numcores)
else:
# Group so that each group has roughly an equal sum of weights
weight_per_core = numpy.sum(weights)/float(numcores)
cut_values = []
temp_sum = 0.0
for indx, weight in enumerate(weights):
temp_sum += weight
if temp_sum > weight_per_core:
cut_values.append(indx+1)
temp_sum = weight
if len(cut_values) > numcores - 1:
cut_values = cut_values[0:numcores-1]
sequence = numpy.array_split(sequence, cut_values)
# Make sure there are no empty chunks at the end of the sequence
while len(sequence[-1]) == 0:
sequence.pop()
procs = [multiprocessing.Process(target=worker,
args=(function, ii, chunk, out_q, err_q, lock, bar, bar_state))
for ii, chunk in enumerate(sequence)]
try:
results = run_tasks(procs, err_q, out_q, len(sequence))
if bar is not None:
if bar.started:
bar.stop()
return results
except KeyboardInterrupt:
for proc in procs:
if proc.exitcode is None:
proc.terminate()
proc.join()
raise
|
lofar-astron/PyBDSF
|
bdsf/multi_proc.py
|
Python
|
gpl-3.0
| 7,726
|
[
"Brian"
] |
2c3257de94ee2c3d1b17b9534add53df0f048ab7c00ad9df65b2e82d66f348a1
|
"""
Container page in Studio
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise, Promise
from common.test.acceptance.pages.common.utils import click_css, confirm_prompt
from common.test.acceptance.pages.studio import BASE_URL
from common.test.acceptance.pages.studio.utils import HelpMixin, type_in_codemirror
class ContainerPage(PageObject, HelpMixin):
"""
Container page in Studio
"""
NAME_SELECTOR = '.page-header-title'
NAME_INPUT_SELECTOR = '.page-header .xblock-field-input'
NAME_FIELD_WRAPPER_SELECTOR = '.page-header .wrapper-xblock-field'
ADD_MISSING_GROUPS_SELECTOR = '.notification-action-button[data-notification-action="add-missing-groups"]'
def __init__(self, browser, locator):
super(ContainerPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""URL to the container page for an xblock."""
return "{}/container/{}".format(BASE_URL, self.locator)
@property
def name(self):
titles = self.q(css=self.NAME_SELECTOR).text
if titles:
return titles[0]
else:
return None
def is_browser_on_page(self):
def _xblock_count(class_name, request_token):
return len(self.q(css='{body_selector} .xblock.{class_name}[data-request-token="{request_token}"]'.format(
body_selector=XBlockWrapper.BODY_SELECTOR, class_name=class_name, request_token=request_token
)).results)
def _is_finished_loading():
is_done = False
# Get the request token of the first xblock rendered on the page and assume it is correct.
data_request_elements = self.q(css='[data-request-token]')
if len(data_request_elements) > 0:
request_token = data_request_elements.first.attrs('data-request-token')[0]
# Then find the number of Studio xblock wrappers on the page with that request token.
num_wrappers = len(self.q(css='{} [data-request-token="{}"]'.format(XBlockWrapper.BODY_SELECTOR, request_token)).results)
# Wait until all components have been loaded and marked as either initialized or failed.
# See:
# - common/static/js/xblock/core.js which adds the class "xblock-initialized"
# at the end of initializeBlock.
# - common/static/js/views/xblock.js which adds the class "xblock-initialization-failed"
# if the xblock threw an error while initializing.
num_initialized_xblocks = _xblock_count('xblock-initialized', request_token)
num_failed_xblocks = _xblock_count('xblock-initialization-failed', request_token)
is_done = num_wrappers == (num_initialized_xblocks + num_failed_xblocks)
return (is_done, is_done)
def _loading_spinner_hidden():
""" promise function to check loading spinner state """
is_spinner_hidden = self.q(css='div.ui-loading.is-hidden').present
return is_spinner_hidden, is_spinner_hidden
# First make sure that an element with the view-container class is present on the page,
# and then wait for the loading spinner to go away and all the xblocks to be initialized.
return (
self.q(css='body.view-container').present and
Promise(_loading_spinner_hidden, 'loading spinner is hidden.').fulfill() and
Promise(_is_finished_loading, 'Finished rendering the xblock wrappers.').fulfill()
)
def wait_for_component_menu(self):
"""
Waits until the menu bar of components is present on the page.
"""
EmptyPromise(
lambda: self.q(css='div.add-xblock-component').present,
'Wait for the menu of components to be present'
).fulfill()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
@property
def inactive_xblocks(self):
"""
Return a list of inactive xblocks loaded on the container page.
"""
return self._get_xblocks(".is-inactive ")
@property
def active_xblocks(self):
"""
Return a list of active xblocks loaded on the container page.
"""
return self._get_xblocks(".is-active ")
@property
def displayed_children(self):
"""
Return a list of displayed xblocks loaded on the container page.
"""
return self._get_xblocks()[0].children
@property
def publish_title(self):
"""
Returns the title as displayed on the publishing sidebar component.
"""
return self.q(css='.pub-status').first.text[0]
@property
def release_title(self):
"""
Returns the title before the release date in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .title').first.text[0]
@property
def release_date(self):
"""
Returns the release date of the unit (with ancestor inherited from), as displayed
in the publishing sidebar component.
"""
return self.q(css='.wrapper-release .copy').first.text[0]
@property
def last_saved_text(self):
"""
Returns the last saved message as displayed in the publishing sidebar component.
"""
return self.q(css='.wrapper-last-draft').first.text[0]
@property
def last_published_text(self):
"""
Returns the last published message as displayed in the sidebar.
"""
return self.q(css='.wrapper-last-publish').first.text[0]
@property
def currently_visible_to_students(self):
"""
Returns True if the unit is marked as currently visible to students
(meaning that a warning is being displayed).
"""
warnings = self.q(css='.container-message .warning')
if not warnings.is_present():
return False
warning_text = warnings.first.text[0]
return warning_text == "Caution: The last published version of this unit is live. By publishing changes you will change the student experience."
def shows_inherited_staff_lock(self, parent_type=None, parent_name=None):
"""
Returns True if the unit inherits staff lock from a section or subsection.
"""
return self.q(css='.bit-publishing .wrapper-visibility .copy .inherited-from').visible
@property
def sidebar_visibility_message(self):
"""
Returns the text within the sidebar visibility section.
"""
return self.q(css='.bit-publishing .wrapper-visibility').first.text[0]
@property
def publish_action(self):
"""
Returns the link for publishing a unit.
"""
return self.q(css='.action-publish').first
def publish(self):
"""
Publishes the container.
"""
self.publish_action.click()
self.wait_for_ajax()
def discard_changes(self):
"""
Discards draft changes (which will then re-render the page).
"""
click_css(self, 'a.action-discard', 0, require_notification=False)
confirm_prompt(self)
self.wait_for_ajax()
@property
def is_staff_locked(self):
""" Returns True if staff lock is currently enabled, False otherwise """
for attr in self.q(css='a.action-staff-lock>.fa').attrs('class'):
if 'fa-check-square-o' in attr:
return True
return False
def toggle_staff_lock(self, inherits_staff_lock=False):
"""
Toggles "hide from students" which enables or disables a staff-only lock.
Returns True if the lock is now enabled, else False.
"""
was_locked_initially = self.is_staff_locked
if not was_locked_initially:
self.q(css='a.action-staff-lock').first.click()
else:
click_css(self, 'a.action-staff-lock', 0, require_notification=False)
if not inherits_staff_lock:
confirm_prompt(self)
self.wait_for_ajax()
return not was_locked_initially
def view_published_version(self):
"""
Clicks "View Live Version", which will open the published version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-view').first.click()
self._switch_to_lms()
def verify_publish_title(self, expected_title):
"""
Waits for the publish title to change to the expected value.
"""
def wait_for_title_change():
"""
Promise function to check publish title.
"""
return (self.publish_title == expected_title, self.publish_title)
Promise(wait_for_title_change, "Publish title incorrect. Found '" + self.publish_title + "'").fulfill()
def preview(self):
"""
Clicks "Preview", which will open the draft version of the unit page in the LMS.
Switches the browser to the newly opened LMS window.
"""
self.q(css='.button-preview').first.click()
self._switch_to_lms()
def _switch_to_lms(self):
"""
Assumes LMS has opened-- switches to that window.
"""
browser_window_handles = self.browser.window_handles
# Switch to browser window that shows HTML Unit in LMS
# The last handle represents the latest windows opened
self.browser.switch_to_window(browser_window_handles[-1])
def _get_xblocks(self, prefix=""):
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
def duplicate(self, source_index):
"""
Duplicate the item with index source_index (based on vertical placement in page).
"""
click_css(self, '.duplicate-button', source_index)
def delete(self, source_index):
"""
Delete the item with index source_index (based on vertical placement in page).
Only visible items are counted in the source_index.
The index of the first item is 0.
"""
# Click the delete button
click_css(self, '.delete-button', source_index, require_notification=False)
# Click the confirmation dialog button
confirm_prompt(self)
def edit(self):
"""
Clicks the "edit" button for the first component on the page.
"""
return _click_edit(self, '.edit-button', '.xblock-studio_view')
def edit_visibility(self):
"""
Clicks the edit visibility button for this container.
"""
return _click_edit(self, '.access-button', '.xblock-visibility_view')
def verify_confirmation_message(self, message, verify_hidden=False):
"""
Verify for confirmation message is present or hidden.
"""
def _verify_message():
""" promise function to check confirmation message state """
text = self.q(css='#page-alert .alert.confirmation #alert-confirmation-title').text
return text and message not in text[0] if verify_hidden else text and message in text[0]
self.wait_for(_verify_message, description='confirmation message {status}'.format(
status='hidden' if verify_hidden else 'present'
))
def click_undo_move_link(self):
"""
Click undo move link.
"""
click_css(self, '#page-alert .alert.confirmation .nav-actions .action-primary')
def click_take_me_there_link(self):
"""
Click take me there link.
"""
click_css(self, '#page-alert .alert.confirmation .nav-actions .action-secondary', require_notification=False)
def add_missing_groups(self):
"""
Click the "add missing groups" link.
Note that this does an ajax call.
"""
self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).first.click()
self.wait_for_ajax()
# Wait until all xblocks rendered.
self.wait_for_page()
def missing_groups_button_present(self):
"""
Returns True if the "add missing groups" button is present.
"""
return self.q(css=self.ADD_MISSING_GROUPS_SELECTOR).present
def get_xblock_information_message(self):
"""
Returns an information message for the container page.
"""
return self.q(css=".xblock-message.information").first.text[0]
def get_xblock_access_message(self):
"""
Returns a message detailing the access to the specified unit
"""
access_message = self.q(css=".access-message").first
if access_message:
return access_message.text[0]
else:
return ""
def is_inline_editing_display_name(self):
"""
Return whether this container's display name is in its editable form.
"""
return "is-editing" in self.q(css=self.NAME_FIELD_WRAPPER_SELECTOR).first.attrs("class")[0]
def get_category_tab_names(self, category_type):
"""
Returns list of tab name in a category.
Arguments:
category_type (str): category type
Returns:
list
"""
self.q(css='.add-xblock-component-button[data-type={}]'.format(category_type)).first.click()
return self.q(css='.{}-type-tabs>li>a'.format(category_type)).text
def get_category_tab_components(self, category_type, tab_index):
"""
Return list of component names in a tab in a category.
Arguments:
category_type (str): category type
tab_index (int): tab index in a category
Returns:
list
"""
css = '#tab{tab_index} button[data-category={category_type}] span'.format(
tab_index=tab_index,
category_type=category_type
)
return self.q(css=css).html
class XBlockWrapper(PageObject):
"""
A PageObject representing a wrapper around an XBlock child shown on the Studio container page.
"""
url = None
BODY_SELECTOR = '.studio-xblock-wrapper'
NAME_SELECTOR = '.xblock-display-name'
VALIDATION_SELECTOR = '.xblock-message.validation'
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'settings_tab': '.editor-modes .settings-button',
'save_settings': '.action-save',
}
def __init__(self, browser, locator):
super(XBlockWrapper, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `CourseOutlineChild` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
@property
def student_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
"""
return self.q(css=self._bounded_selector('.xblock-student_view'))[0].text
@property
def author_content(self):
"""
Returns the text content of the xblock as displayed on the container page.
(For blocks which implement a distinct author_view).
"""
return self.q(css=self._bounded_selector('.xblock-author_view'))[0].text
@property
def name(self):
titles = self.q(css=self._bounded_selector(self.NAME_SELECTOR)).text
if titles:
return titles[0]
else:
return None
@property
def children(self):
"""
Will return any first-generation descendant xblocks of this xblock.
"""
descendants = self.q(css=self._bounded_selector(self.BODY_SELECTOR)).filter(lambda el: el.is_displayed()).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))).results
# Now remove any non-direct descendants.
grandkids = []
for descendant in descendants:
grandkids.extend(descendant.children)
grand_locators = [grandkid.locator for grandkid in grandkids]
return [descendant for descendant in descendants if descendant.locator not in grand_locators]
@property
def has_validation_message(self):
""" Is a validation warning/error/message shown? """
return self.q(css=self._bounded_selector(self.VALIDATION_SELECTOR)).present
def _validation_paragraph(self, css_class):
""" Helper method to return the <p> element of a validation warning """
return self.q(css=self._bounded_selector('{} p.{}'.format(self.VALIDATION_SELECTOR, css_class)))
@property
def has_validation_warning(self):
""" Is a validation warning shown? """
return self._validation_paragraph('warning').present
@property
def has_validation_error(self):
""" Is a validation error shown? """
return self._validation_paragraph('error').present
@property
# pylint: disable=invalid-name
def has_validation_not_configured_warning(self):
""" Is a validation "not configured" message shown? """
return self._validation_paragraph('not-configured').present
@property
def validation_warning_text(self):
""" Get the text of the validation warning. """
return self._validation_paragraph('warning').text[0]
@property
def validation_error_text(self):
""" Get the text of the validation error. """
return self._validation_paragraph('error').text[0]
@property
def validation_error_messages(self):
return self.q(css=self._bounded_selector('{} .xblock-message-item.error'.format(self.VALIDATION_SELECTOR))).text
@property
# pylint: disable=invalid-name
def validation_not_configured_warning_text(self):
""" Get the text of the validation "not configured" message. """
return self._validation_paragraph('not-configured').text[0]
@property
def preview_selector(self):
return self._bounded_selector('.xblock-student_view,.xblock-author_view')
@property
def has_group_visibility_set(self):
return self.q(css=self._bounded_selector('.wrapper-xblock.has-group-visibility-set')).is_present()
@property
def has_duplicate_button(self):
"""
Returns true if this xblock has a 'duplicate' button
"""
return self.q(css=self._bounded_selector('.duplicate-button'))
@property
def has_delete_button(self):
"""
Returns true if this xblock has a 'delete' button
"""
return self.q(css=self._bounded_selector('.delete-button'))
@property
def has_edit_visibility_button(self):
"""
Returns true if this xblock has an 'edit visibility' button
:return:
"""
return self.q(css=self._bounded_selector('.access-button')).is_present()
@property
def has_move_modal_button(self):
"""
Returns True if this xblock has move modal button else False
"""
return self.q(css=self._bounded_selector('.move-button')).is_present()
@property
def get_partition_group_message(self):
"""
Returns the message about user partition group visibility, shown under the display name
(if not present, returns None).
"""
message = self.q(css=self._bounded_selector('.xblock-group-visibility-label'))
return None if len(message) == 0 else message.first.text[0]
def go_to_container(self):
"""
Open the container page linked to by this xblock, and return
an initialized :class:`.ContainerPage` for that xblock.
"""
return ContainerPage(self.browser, self.locator).visit()
def edit(self):
"""
Clicks the "edit" button for this xblock.
"""
return _click_edit(self, '.edit-button', '.xblock-studio_view', self._bounded_selector)
def edit_visibility(self):
"""
Clicks the edit visibility button for this xblock.
"""
return _click_edit(self, '.access-button', '.xblock-visibility_view', self._bounded_selector)
def open_advanced_tab(self):
"""
Click on Advanced Tab.
"""
self._click_button('advanced_tab')
def open_basic_tab(self):
"""
Click on Basic Tab.
"""
self._click_button('basic_tab')
def open_settings_tab(self):
"""
If editing, click on the "Settings" tab
"""
self._click_button('settings_tab')
def open_move_modal(self):
"""
Opens the move modal.
"""
click_css(self, '.move-button', require_notification=False)
self.wait_for(
lambda: self.q(css='.modal-window.move-modal').visible, description='move modal is visible'
)
def set_field_val(self, field_display_name, field_value):
"""
If editing, set the value of a field.
"""
selector = '{} li.field label:contains("{}") + input'.format(self.editor_selector, field_display_name)
script = "$(arguments[0]).val(arguments[1]).change();"
self.browser.execute_script(script, selector, field_value)
def reset_field_val(self, field_display_name):
"""
If editing, reset the value of a field to its default.
"""
scope = '{} li.field label:contains("{}")'.format(self.editor_selector, field_display_name)
script = "$(arguments[0]).siblings('.setting-clear').click();"
self.browser.execute_script(script, scope)
def set_codemirror_text(self, text, index=0):
"""
Set the text of a CodeMirror editor that is part of this xblock's settings.
"""
type_in_codemirror(self, index, text, find_prefix='$("{}").find'.format(self.editor_selector))
def set_license(self, license_type):
"""
Uses the UI to set the course's license to the given license_type (str)
"""
css_selector = (
"ul.license-types li[data-license={license_type}] button"
).format(license_type=license_type)
self.wait_for_element_presence(
css_selector,
"{license_type} button is present".format(license_type=license_type)
)
self.q(css=css_selector).click()
def save_settings(self):
"""
Click on settings Save button.
"""
self._click_button('save_settings')
@property
def editor_selector(self):
return '.xblock-studio_view'
def _click_button(self, button_name):
"""
Click on a button as specified by `button_name`
Arguments:
button_name (str): button name
"""
self.q(css=self.COMPONENT_BUTTONS[button_name]).first.click()
self.wait_for_ajax()
def go_to_group_configuration_page(self):
"""
Go to the Group Configuration used by the component.
"""
self.q(css=self._bounded_selector('span.message-text a')).first.click()
def is_placeholder(self):
"""
Checks to see if the XBlock is rendered as a placeholder without a preview.
"""
return not self.q(css=self._bounded_selector('.wrapper-xblock article')).present
@property
def group_configuration_link_name(self):
"""
Get Group Configuration name from link.
"""
return self.q(css=self._bounded_selector('span.message-text a')).first.text[0]
def _click_edit(page_object, button_css, view_css, bounded_selector=lambda(x): x):
"""
Click on the first editing button found and wait for the Studio editor to be present.
"""
page_object.q(css=bounded_selector(button_css)).first.click()
EmptyPromise(
lambda: page_object.q(css=view_css).present,
'Wait for the Studio editor to be present'
).fulfill()
return page_object
|
pepeportela/edx-platform
|
common/test/acceptance/pages/studio/container.py
|
Python
|
agpl-3.0
| 24,434
|
[
"VisIt"
] |
38818e7b607443545799e32864cdafca9ea9b7503755b9ab791dfc525e7ef488
|
# This is a main file to run the diceseq software, which will return the
# isoform proportions ratio for each gene at all time points.
import os
import sys
import gzip
import time
import pysam
import numpy as np
import multiprocessing
from optparse import OptionParser, OptionGroup
# import pyximport; pyximport.install()
from .utils.gtf_utils import loadgene
from .utils.run_utils import get_psi, sort_dice_file
FID1 = None
FID2 = None
PROCESSED = 0
TOTAL_GENE = 0
TOTAL_READ = []
START_TIME = time.time()
def show_progress(RV=None):
global PROCESSED, TOTAL_GENE, START_TIME, FID1, FID2
if RV is None:
return RV
else:
FID1.writelines(RV["dice_line"])
if FID2 is not None: FID2.writelines(RV["sample_line"])
PROCESSED += 1
bar_len = 20
run_time = time.time() - START_TIME
percents = 100.0 * PROCESSED / TOTAL_GENE
filled_len = int(round(bar_len * percents / 100))
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('\r[DICEseq] [%s] %.1f%% done in %.1f sec.'
% (bar, percents, run_time))
sys.stdout.flush()
return RV
def main():
# import warnings
# warnings.filterwarnings('error')
# parse command line options
parser = OptionParser()
parser.add_option("--anno_file", "-a", dest="anno_file", default=None,
help="Annotation file for genes and transcripts in GTF or GFF3")
parser.add_option("--sam_list", "-s", dest="sam_list", default=None,
help=("Sorted and indexed bam/sam files, use ',' for replicates "
"and '---' for time points, e.g., T1_rep1.bam,T1_rep2.bam---T2.bam"))
parser.add_option("--time_seq", "-t", dest="time_seq", default=None,
help="The time for the input samples [Default: 0,1,2,...]")
parser.add_option("--out_file", "-o", dest="out_file", default="output",
help="Prefix of the output files with full path")
group = OptionGroup(parser, "Optional arguments")
group.add_option("--nproc", "-p", type="int", dest="nproc", default="4",
help="Number of subprocesses [default: %default]")
group.add_option("--add_premRNA", action="store_true", dest="add_premRNA",
default=False, help="Add the pre-mRNA as a transcript")
group.add_option("--fLen", type="float", nargs=2, dest="frag_leng",
default=[None,None], help=("Two arguments for fragment length: "
"mean and standard diveation, default: auto-detected"))
group.add_option("--bias", nargs=3, dest="bias_args",
default=["unif","None","None"], help=("Three argments for bias "
"correction: BIAS_MODE,REF_FILE,BIAS_FILE(s). BIAS_MODE: unif, end5, "
"end3, both. REF_FILE: the genome reference file in fasta format. "
"BIAS_FILE(s): bias files from dice-bias, use '---' for time specific "
"files, [default: unif None None]"))
group.add_option("--thetas", nargs=2, dest="thetas", default=[3,"None"],
help=("Two arguments for hyperparameters in GP model: theta1,theta2. "
"default: [3 None], where theta2 covers 1/3 duration."))
group.add_option("--mcmc", type="int", nargs=4, dest="mcmc_run",
default=[0,20000,1000,100], help=("Four arguments for in MCMC "
"iterations: save_sample,max_run,min_run,gap_run. Required: "
"save_sample =< 3/4*mim_run. [default: 0 20000 1000 100]"))
# SAVE_NUM: the number of samples for saving out;
# MAX_NUM,MIN_NUM: the maximum and the minmum samples;
# GAP_NUM: after min_num, the gap_run added till convergency.
# group.add_option("--anno_type", dest="anno_type", default="GTF",
# help="Type of annotation file: GTF, GFF3, UCSC_table "
# "[default: %default]")
parser.add_option_group(group)
##### FOR DEVELOPMENT #####
# parser.add_option("--mate_mode", dest="mate_mode", default="pair",
# help=("The mode for using paired-end reads: auto, pair, single "
# "[default: %default]."))
# parser.add_option("--auto_min", dest="auto_min", default="200",
# help=("The minimum pairs of read mates in auto mode. "
# "[default: %default]."))
# parser.add_option("--print_detail", action="store_true", dest="print_detail",
# default=False, help="print the detail of the sampling.")
# parser.add_option("--no_twice", action="store_true", dest="no_twice",
# default=False, help="No quick estimate of the variance, but use fixed.")
(options, args) = parser.parse_args()
if len(sys.argv[1:]) == 0:
print("Welcome to diceseq!\n")
print("use -h or --help for help on argument.")
sys.exit(1)
if options.anno_file == None:
print("[DICEseq] Error: need --anno_file for annotation.")
sys.exit(1)
else:
sys.stdout.write("\r[DICEseq] loading annotation file...")
sys.stdout.flush()
# anno = load_annotation(options.anno_file, options.anno_type)
# genes = anno["genes"]
genes = loadgene(options.anno_file)
sys.stdout.write("\r[DICEseq] loading annotation file... Done.\n")
sys.stdout.flush()
global TOTAL_GENE
TOTAL_GENE = len(genes)
if options.sam_list == None:
print("[DICEseq] Error: need --sam_list for aliged & indexed reads.")
sys.exit(1)
else:
sam_list = options.sam_list.split("---")
global TOTAL_READ
for i in range(len(sam_list)):
sam_list[i] = sam_list[i].split(",")
_cnt = 0
for ss in sam_list[i]:
if not os.path.isfile(ss):
print("Error: No such file\n -- %s" %ss)
sys.exit(1)
pysam_stats = pysam.idxstats(ss)
if type(pysam_stats) is not list:
pysam_stats = pysam_stats.split("\n")
for tp in pysam_stats:
tmp = tp.strip().split("\t")
if len(tmp) >= 3:
_cnt += float(tmp[2])
TOTAL_READ.append(_cnt)
no_twice = False
auto_min = 200
mate_mode = "single"
print_detail = False
nproc = options.nproc
out_file = options.out_file
add_premRNA = options.add_premRNA
FLmean, FLstd = options.frag_leng
sample_num, Mmax, Mmin, Mgap = options.mcmc_run
if options.time_seq is None:
X = np.arange(len(sam_list))
else:
X = np.array(options.time_seq.split(","), "float")
theta1, theta2 = options.thetas
theta1 = float(theta1)
if theta2 is None or ["None", "Auto", "auto"].count(theta2) == 1:
theta2 = ((max(X) - min(X) + 0.1) / 3.0)**2
elif theta2 == 'learn':
theta2 = None
else:
theta2 = max(0.00001, float(theta2))
bias_mode, ref_file, bias_file = options.bias_args
if bias_mode == "unif":
ref_file = None
bias_file = None
elif ref_file is "None":
ref_file = None
bias_file = None
bias_mode = "unif"
print("[DICEseq] No reference sequence, change to uniform mode.")
elif bias_file is "None":
ref_file = None
bias_file = None
bias_mode = "unif"
print("[DICEseq] No bias parameter file, change to uniform mode.")
else:
bias_file = bias_file.split("---")
global FID1, FID2
# if not os.path.exists(os.path.dirname(out_file)):
# try:
# os.makedirs(os.path.dirname(out_file))
# except OSError as exc: # Guard against race condition
# if exc.errno != errno.EEXIST:
# raise
FID1 = open(out_file + ".dice", "w")
headline = "tran_id\tgene_id\tlogLik\ttransLen"
for i in range(len(X)):
_t = str(X[i])
headline += "\tFPKM_T%s\tratio_T%s\tratio_lo_T%s\tratio_hi_T%s" %(_t,
_t, _t, _t)
FID1.writelines(headline + "\n")
if sample_num > 0:
FID2 = gzip.open(out_file + ".sample.gz", "w")
FID2.writelines("# MCMC samples for latent Y\n")
FID2.writelines("# @gene|transcripts|theta2\n")
FID2.writelines("# y_c1t1,y_c2t1;y_c1t2,y_c2t2;...\n")
print("[DICEseq] running diceseq for %d genes with %d cores..." %(
TOTAL_GENE, nproc))
tran_ids = []
if nproc <= 1:
for g in genes:
if add_premRNA: g.add_premRNA()
for t in g.trans: tran_ids.append(t.tranID)
RV = get_psi(g, sam_list, ref_file, bias_file, bias_mode, X, Mmax,
Mmin, Mgap, theta1, theta2, no_twice, sample_num, print_detail,
FLmean, FLstd, mate_mode, auto_min, TOTAL_READ)
show_progress(RV)
else:
pool = multiprocessing.Pool(processes=nproc)
for g in genes:
if add_premRNA: g.add_premRNA()
for t in g.trans: tran_ids.append(t.tranID)
pool.apply_async(get_psi, (g, sam_list, ref_file, bias_file,
bias_mode, X, Mmax, Mmin, Mgap, theta1, theta2, no_twice,
sample_num, print_detail, FLmean, FLstd, mate_mode,
auto_min, TOTAL_READ), callback=show_progress)
pool.close()
pool.join()
FID1.close()
if FID2 is not None: FID2.close()
sort_dice_file(out_file+".dice", tran_ids)
print("")
if __name__ == "__main__":
main()
|
huangyh09/diceseq
|
diceseq/diceseq.py
|
Python
|
apache-2.0
| 9,351
|
[
"pysam"
] |
aeb0a256deb6bcbe1f8e04c2254d947a1a4d2c717d4b4382a226e54647cf4a80
|
#!/usr/bin/env python3
# ver 0.1 - coding python by Hyuntae Jung on 03/29/2018
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='conver MC input file to npy trajectory files')
## args
parser.add_argument('-i', '--input', default='traj.trr', nargs='?',
help='input trajectory file')
parser.add_argument('-s', '--structure', default='topol.tpr', nargs='?',
help='.tpr structure file')
parser.add_argument('-m', '--mass', nargs='?',
help='divider for normalization and masses for selected molecules')
parser.add_argument('-select1', '--select1', nargs='?',
help='a file1 with a command-line for select_atoms in MDAnalysis')
parser.add_argument('-select2', '--select2', nargs='?',
help='a file2 with a command-line for select_atoms in MDAnalysis')
parser.add_argument('-nbin', '--nbin', nargs='?', type=int,
help='number of bins')
parser.add_argument('-axis', '--axis', default=2, nargs='?', type=int,
help='which axis for histogram (x axis (0), y axis (1), z axis (2))')
parser.add_argument('-o', '--output', default='traj', nargs='?',
help='output prefix for unalign and align mass1 fraction trajectory')
parser.add_argument('args', nargs=argparse.REMAINDER)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.1')
## read args
args = parser.parse_args()
## Check arguments for log
print(" input arguments: {0}".format(args))
## import modules
import sys
sys.path.append('/home/htjung/Utility/python/')
import hjung
from hjung import *
import numpy as np
# default for args
args.omassf = args.output + '.massf'
args.otmass = args.output + '.tmass'
## check vaulable setting
print("="*30)
if args.axis < 0 or args.axis > 2:
raise ValueError("wrong input of axis for histogram")
## timer
start_proc, start_prof = hjung.time.init()
## read a topology and a trajectory using module MDAnalysis with selection
coordinates1, coordinates2, unit_cells = hjung.io.read_trr_3d_select2(args.structure, args.input, args.select1, args.select2, 'pos')
## reduce 3d-coordinates to 1d-coordinates
unit_cells = hjung.array.convert_unitcell_3d(unit_cells, args.structure, args.input)
unit_cells_1d = unit_cells[:,args.axis]
coordinates1_1d = coordinates1[:,:,args.axis]
coordinates2_1d = coordinates2[:,:,args.axis]
## number histograms for each frame
number1_1d_t, bin_1d_t = hjung.analyze.histo_t_1d_nbin(coordinates1_1d, unit_cells_1d, args.nbin)
number2_1d_t, bin_1d_t = hjung.analyze.histo_t_1d_nbin(coordinates2_1d, unit_cells_1d, args.nbin)
print("Done: making number trajectory with respect to bins")
## read args.mass file for weights
mw, divider = hjung.io.read_mass2(args.mass)
## Calculate mass fraction of each bins with weights
mass1_1d_t = np.array(number1_1d_t*mw[0]/divider[0],dtype=np.float)
mass2_1d_t = np.array(number2_1d_t*mw[1]/divider[1],dtype=np.float)
totalmass_1d_t = mass1_1d_t + mass2_1d_t
massfrac_1d_t = np.divide(mass1_1d_t,totalmass_1d_t)
## save number histogram trajectory
np.savetxt(args.omassf, massfrac_1d_t,
header='[%d, %d], mass1 fraction by molecules in nbins, %d' \
%(len(massfrac_1d_t),args.nbin,args.nbin), fmt='%f', comments='# ')
np.save(args.omassf, massfrac_1d_t)
np.savetxt(args.otmass, totalmass_1d_t,
header='[%d, %d], total mass by molecules in nbins, %d' \
%(len(totalmass_1d_t),args.nbin,args.nbin), fmt='%f', comments='# ')
np.save(args.otmass, totalmass_1d_t)
## bin-size info
box_axis_avg, box_axis_std = hjung.coord.box_1d_mode(unit_cells_1d,'box-z length','v')
print(" bin size = {0:.5f} +- {1:.5f}".format((box_axis_avg/float(args.nbin)),(box_axis_std/float(args.nbin))))
## timer
hjung.time.end_print(start_proc, start_prof)
|
jht0664/Utility_python_gromacs
|
python/mc_ic_2_npy.py
|
Python
|
mit
| 3,788
|
[
"MDAnalysis"
] |
c44abe9bde50bcbb0cecaf95808095398752aa9bef0ff50c96bf1be09fc77e38
|
from __future__ import unicode_literals
import unittest
from django.utils import html
class TestUtilsHtml(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{0} {1} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('<f', '<f'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
def test_clean_html(self):
f = html.clean_html
items = (
('<p>I <i>believe</i> in <b>semantic markup</b>!</p>', '<p>I <em>believe</em> in <strong>semantic markup</strong>!</p>'),
('I escape & I don\'t <a href="#" target="_blank">target</a>', 'I escape & I don\'t <a href="#" >target</a>'),
('<p>I kill whitespace</p><br clear="all"><p> </p>', '<p>I kill whitespace</p>'),
# also a regression test for #7267: this used to raise an UnicodeDecodeError
('<p>* foo</p><p>* bar</p>', '<ul>\n<li> foo</li><li> bar</li>\n</ul>'),
)
for value, output in items:
self.check_output(f, value, output)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
self.assertEqual(f(value, tags), output)
|
mammique/django
|
tests/regressiontests/utils/html.py
|
Python
|
bsd-3-clause
| 6,494
|
[
"ADF"
] |
7cb30e3c54fb2d4a7b21947e8ce3a1ef5a04c9365682e08c6740265e01eba481
|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import os
import subprocess
import qisys.command
def check_gettext():
gettext = qisys.command.find_program("xgettext", raises=False)
if not gettext:
return False
return True
def test_update(qilinguist_action):
if not check_gettext():
return
trad = qilinguist_action.trad
fr_FR_po_file = os.path.join(trad.path, "po", "fr_FR.po")
en_US_po_file = os.path.join(trad.path, "po", "en_US.po")
pot_file = os.path.join(trad.path, "po", "translate.pot")
assert not os.path.exists(fr_FR_po_file)
assert not os.path.exists(en_US_po_file)
assert not os.path.exists(pot_file)
qilinguist_action("update", "translate")
assert os.path.exists(fr_FR_po_file)
assert os.path.exists(en_US_po_file)
assert os.path.exists(pot_file)
def test_release(qilinguist_action):
if not check_gettext():
return
trad = qilinguist_action.trad
fr_FR_mo_file = os.path.join(trad.path, "po", "share", "locale", "translate", "fr_FR", "LC_MESSAGES", "translate.mo")
en_US_mo_file = os.path.join(trad.path, "po", "share", "locale", "translate", "fr_FR", "LC_MESSAGES", "translate.mo")
assert not os.path.exists(fr_FR_mo_file)
assert not os.path.exists(en_US_mo_file)
qilinguist_action("update", "translate")
qilinguist_action.create_po(trad)
qilinguist_action("release", "translate")
assert os.path.exists(fr_FR_mo_file)
assert os.path.exists(en_US_mo_file)
def test_cplusplus_sdk_workflow(qilinguist_action):
if not check_gettext():
return
trad = qilinguist_action.trad
qilinguist_action.create_po(trad)
qilinguist_action("update", "translate")
qilinguist_action("release", "translate")
trad.configure()
trad.build()
## check binary output
binary = os.path.join(trad.sdk_directory, "bin", "translate")
dictPath = os.path.join(trad.path, "po", "share", "locale", "translate")
env = os.environ.copy()
env["LANGUAGE"] = "fr_FR.UTF-8" # for Ubuntu
env["LC_ALL"] = "fr_FR.UTF-8" # for Arch Linux
cmd = [binary, dictPath]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, _ = process.communicate()
out_fr = """Bonjour, mon nom est NAO.
O\xc3\xb9 est Brian ?
Brian est dans la cuisine.
"""
assert out_fr in out
env = os.environ.copy()
env["LANGUAGE"] = "en_US.UTF-8"
cmd = [binary, dictPath]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr = subprocess.PIPE, env=env)
out, _ = process.communicate()
out_en = """Hi, my name is NAO.
Where is Brian?
Brian is in the kitchen.
"""
assert out_en in out
def test_cplusplus_install_workflow(qilinguist_action, tmpdir):
if not check_gettext():
return
trad = qilinguist_action.trad
qilinguist_action.create_po(trad)
qilinguist_action("update", "translate")
qilinguist_action("release", "translate")
trad.configure()
trad.build()
trad.install(tmpdir.strpath)
## check mo files
fr_FR_mo_file = tmpdir.join("share", "locale", "translate", "fr_FR", "LC_MESSAGES", "translate.mo").strpath
en_US_mo_file = tmpdir.join("share", "locale", "translate", "en_US", "LC_MESSAGES", "translate.mo").strpath
assert os.path.exists(fr_FR_mo_file)
assert os.path.exists(en_US_mo_file)
## check binary output
binary = tmpdir.join("bin", "translate").strpath
dictPath = tmpdir.join("share", "locale", "translate").strpath
env = os.environ.copy()
env["LANGUAGE"] = "fr_FR.UTF-8" # for Ubuntu
env["LC_ALL"] = "fr_FR.UTF-8" # for Arch Linux
cmd = [binary, dictPath]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, _ = process.communicate()
out_fr = """Bonjour, mon nom est NAO.
O\xc3\xb9 est Brian ?
Brian est dans la cuisine.
"""
assert out_fr in out
env = os.environ.copy()
env["LANGUAGE"] = "en_US.UTF-8"
cmd = [binary, dictPath]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, _ = process.communicate()
out_en = """Hi, my name is NAO.
Where is Brian?
Brian is in the kitchen.
"""
assert out_en in out
|
dmerejkowsky/qibuild
|
python/qilinguist/test/test_gettext.py
|
Python
|
bsd-3-clause
| 4,505
|
[
"Brian"
] |
eaa56c6b320698dcf4df12a34e612e85526cd81022f535dbcfc494ee8192a5bf
|
"""
Copyright (c) 2011-2015 Nathan Boley
This file is part of GRIT.
GRIT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GRIT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GRIT. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os
import pysam
import numpy
import shutil
import subprocess
import tempfile
import time
from itertools import izip
sys.path.insert( 0, os.path.join( os.path.dirname( __file__ ), ".." ) )
from grit.files.reads import clean_chr_name, fix_chrm_name_for_ucsc, \
CAGEReads, RAMPAGEReads, RNAseqReads, PolyAReads, ChIPSeqReads
from grit.lib.multiprocessing_utils import ProcessSafeOPStream
import multiprocessing
import threading
# if we choose the --ucsc option, then replace thsi function
# with fix_chrm_name_for_ucsc
def fix_chrm_name(name):
return name
BUFFER_SIZE = 50000000
def populate_cvg_array_for_contig(
merged_ofp, reads, chrm, chrm_length, strand ):
if VERBOSE: print "Starting ", chrm, strand
# re-open the reads to make this multi-process safe
reads = reads.reload()
# open a tempory file to write this to
with tempfile.NamedTemporaryFile(delete=True) as ofp:
# only find blocks of BUFFER_SIZE - to avoid running out of memory
for block_index in xrange(int(chrm_length/BUFFER_SIZE)+1):
buffer_array = reads.build_read_coverage_array(
chrm, strand,
block_index*BUFFER_SIZE,
(block_index+1)*BUFFER_SIZE )
write_array_to_opstream(
ofp, buffer_array, block_index*BUFFER_SIZE,
chrm, chrm_length, strand)
ofp.seek(0)
merged_ofp.write( ofp.read() )
if VERBOSE: print "Finished ", chrm, strand
return
def write_array_to_opstream(ofp, buffer, buff_start,
chrm, chrm_length, strand ):
"""write buffer to disk, buff_start determines the start of buffer in
genomic coordinates.
"""
chrm = fix_chrm_name( clean_chr_name( chrm ) )
prev_pos = 0
prev_val = buffer[0]
for pos, val in enumerate(buffer[1:]):
# make sure this doesn't extend past the end of the chromosome
# bedGraphs are 0-based, so use chrm_length-1
if buff_start+pos+1 >= chrm_length:
pos = chrm_length-buff_start-1
break
if val != prev_val:
if prev_val > 1e-12:
write_val = -prev_val if strand == '-' else prev_val
line = "%s\t%i\t%i\t%.2f" % (
chrm, buff_start+prev_pos, buff_start+pos+1, write_val )
ofp.write(line+"\n")
prev_pos, prev_val = pos+1, val
if prev_val > 1e-12:
write_val = -prev_val if strand == '-' else prev_val
line = "%s\t%i\t%i\t%.2f" % (
chrm, buff_start+prev_pos, buff_start+pos+1, write_val )
ofp.write(line+"\n")
return
def build_chrm_sizes_file(reads):
chrm_sizes_file = tempfile.NamedTemporaryFile(delete=True)
# find the chrm names and their associated lengths
chrm_lengths = zip(reads.references, reads.lengths)
#write out the chromosomes and its corrosponding size to disk
for chrm, chrm_length in chrm_lengths:
chrm_sizes_file.write(fix_chrm_name(chrm) + " " + str(chrm_length) +"\n")
chrm_sizes_file.flush()
return chrm_sizes_file
def generate_wiggle(reads, ofps, num_threads=1, contig=None ):
all_args = []
for chrm_length, chrm in sorted(izip(reads.lengths, reads.references)):
strands = ['+', '-'] if len(ofps) == 2 else [None,]
# skip regions not in the specified contig, if requested
if contig != None and clean_chr_name(chrm) != clean_chr_name(contig):
continue
for strand in strands:
ofp = ofps[strand]
assert (ofp, reads, chrm, chrm_length, strand ) not in all_args
all_args.append((ofp, reads, chrm, chrm_length, strand ))
if num_threads == 1:
for args in reversed(all_args):
populate_cvg_array_for_contig( *args )
else:
ps = [None]*num_threads
while len( all_args ) > 0:
for i in xrange(num_threads):
if ps[i] == None or not ps[i].is_alive():
ps[i] = multiprocessing.Process(
target=populate_cvg_array_for_contig,
args=all_args.pop() )
ps[i].start()
break
time.sleep( 0.1 )
for p in ps:
if p != None: p.join()
for fp in ofps.values(): fp.close()
return
def parse_arguments():
allowed_assays = ['cage', 'rampage', 'rnaseq', 'polya', 'atacseq', 'chipseq']
import argparse
parser = argparse.ArgumentParser(
description='Get coverage bedgraphs from aligned reads.')
parser.add_argument( '--mapped-reads-fname', required=True,
help='BAM or SAM file(s) containing the mapped reads.')
parser.add_argument( '--out-fname-prefix', '-o',
help='Output file(s) will be bigWig')
parser.add_argument( '--assay', '-a', required=True,
choices=allowed_assays, help='The assay type')
parser.add_argument( '--bigwig', '-b', default=False, action='store_true',
help='Build a bigwig instead of bedgraph.')
parser.add_argument( '--ucsc', default=False, action='store_true',
help='Format the contig names to work with the UCSC genome browser.')
parser.add_argument( '--verbose', '-v', default=False, action='store_true',
help='Whether or not to print status information.')
parser.add_argument( '--threads', '-t', default=1, type=int,
help='The number of threads to run.')
parser.add_argument( '--region',
help='Only use the specified region ( currently only accepts a contig name ).')
parser.add_argument( '--reverse-read-strand', '-r', default=False, action='store_true',
help='Whether or not to reverse the strand of the read. default: False')
parser.add_argument( '--unstranded', default=False, action='store_true',
help='Merge both data strands.')
parser.add_argument( '--read-filter', default=None, choices=['1','2'],
help='Filter paired end reads to only accept this read pair (ie uses the is_read1 pysam attribute)')
args = parser.parse_args()
global VERBOSE
VERBOSE = args.verbose
global fix_chrm_name
if args.ucsc: fix_chrm_name = fix_chrm_name_for_ucsc
assert args.read_filter in ( '1', '2', None )
read_filter = int(args.read_filter) if args.read_filter != None else None
if args.assay not in allowed_assays:
raise ValueError, "Unrecongized assay (%s)" % args.assay
region = args.region
if region != None:
if ':' in region or '-' in region:
assert False, "Invalid contig name: %s" % region
# if an output prefix isn't provided, then use the bam filename prefix
if args.out_fname_prefix == None:
fname_data = args.mapped_reads_fname.split(".")
# remove bam and sorted suffixes
while fname_data[-1] in ('bam', 'sorted'):
del fname_data[-1]
args.out_fname_prefix = ".".join(fname_data)
return ( args.assay, not args.unstranded, args.mapped_reads_fname, args.out_fname_prefix,
args.bigwig, args.reverse_read_strand, read_filter,
args.region, args.threads )
def build_bigwig_from_bedgraph(bedgraph_fp, chrm_sizes_file, op_fname):
with tempfile.NamedTemporaryFile(delete=True) as sorted_ofp:
if VERBOSE: print "Sorting ", bedgraph_fp.name
subprocess.call(
["sort -k1,1 -k2,2n " + bedgraph_fp.name,],
stdout=sorted_ofp, shell=True )
sorted_ofp.flush()
if VERBOSE: print "Building wig for", bedgraph_fp.name
subprocess.check_call( [ "bedGraphToBigWig",
sorted_ofp.name,
chrm_sizes_file.name,
op_fname ] )
return
def main():
( assay, stranded, reads_fname, op_prefix, build_bigwig,
reverse_read_strand, read_filter, region, num_threads) = parse_arguments()
# initialize the assay specific options
if assay == 'cage':
reads = CAGEReads( reads_fname, "rb" )
reads.init(reverse_read_strand=False)
stranded = True
assert not reverse_read_strand
elif assay == 'rampage':
reads = RAMPAGEReads( reads_fname, "rb" )
reads.init(reverse_read_strand=False)
stranded = True
elif assay == 'polya':
reads = PolyAReads( reads_fname, "rb" )
reads.init(reverse_read_strand=reverse_read_strand,
pairs_are_opp_strand=True)
stranded = True
elif assay == 'rnaseq':
reads = RNAseqReads( reads_fname, "rb" )
# the read strand reversal is done later, so set this to False
reads.init(reverse_read_strand=reverse_read_strand,
reads_are_stranded=stranded)
elif assay == 'atacseq':
reads = RNAseqReads( reads_fname, "rb" )
# the read strand reversal is done later, so set this to False
reads.init(reverse_read_strand=reverse_read_strand,
pairs_are_opp_strand=True)
elif assay == 'chipseq':
reads = ChIPSeqReads( reads_fname, "rb" )
reads.init(reverse_read_strand=reverse_read_strand)
stranded = False
else:
raise ValueError, "Unrecognized assay: '%s'" % assay
# if we want to build a bigwig, make sure that the script is on the path
if build_bigwig:
try:
subprocess.check_call(["which", "bedGraphToBigWig"], stdout=None)
except subprocess.CalledProcessError:
raise ValueError, "bedGraphToBigWig does not exist on $PATH. " + \
"You can still build a bedGraph by removing the --bigwig(-b) option."
# Open the output files
if stranded:
ofps = { '+' : ProcessSafeOPStream(
open(op_prefix+".plus.bedgraph","w")),
'-' : ProcessSafeOPStream(
open(op_prefix+".minus.bedgraph", "w"))
}
else:
ofps = { None: ProcessSafeOPStream(open(op_prefix+".bedgraph", "w")) }
# write the bedgraph header information
if not build_bigwig:
for key, fp in ofps.iteritems():
strand_str = "" if key == None else {
'+': '.plus', '-': '.minus'}[key]
fp.write( "track name=%s%s type=bedGraph\n" \
% ( os.path.basename(op_prefix), strand_str ) )
generate_wiggle( reads, ofps, num_threads, region )
# finally, if we are building a bigwig, build it, and then remove the bedgraph files
if build_bigwig:
# build the chrm sizes file.
with build_chrm_sizes_file(reads) as chrm_sizes_file:
threads = []
for strand, bedgraph_fp in ofps.iteritems():
strand_str = "" if strand == None else (
{'+': '.plus', '-': '.minus'}[strand] )
op_fname = op_prefix + strand_str + ".bw"
t = threading.Thread(
target=build_bigwig_from_bedgraph,
args=(bedgraph_fp, chrm_sizes_file, op_fname) )
t.start()
threads.append( t )
for t in threads:
t.join()
chrm_sizes_file.close()
# close the reads files
reads.close()
if __name__ == "__main__":
main()
|
nboley/grit
|
bin/bam2wig.py
|
Python
|
gpl-3.0
| 12,286
|
[
"pysam"
] |
38fdbf0c34adefe68d613c441a7be1d585833a8c39eef0380728a670913ec3f7
|
#! /usr/bin/env python
import roslib; roslib.load_manifest('ist_tasks')
import rospy
import actionlib
from perception_msgs.msg import *
from ist_grasp_generation_msgs.srv import *
from ist_grasp_generation_msgs.msg import *
import numpy
from orca_proxy.srv import *
from geometry_msgs.msg import Point
import math
from array import array
import time
class ObjectTest:
def __init__(self, name, sim_id, orientations, orientation_base, offset, category):
self.name = name
self.sim_id = sim_id
self.orientations = orientations
self.orientation_base = orientation_base
self.offset = offset
self.category = category
def perception_client():
# Region bounding the plannar surface and the objects of interest
table_region=perception_msgs.msg.TableRegion()
table_region=perception_msgs.msg.TableRegion()
#UPPER SHELF
table_region.x_filter_max=1.4
table_region.x_filter_min=0.7
table_region.y_filter_max=0.6
table_region.y_filter_min=-0.6
table_region.z_filter_max=1.5
table_region.z_filter_min=0.9
objectList = [ ObjectTest('griddle','freeform_griddle120', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0.2, 0.0, 0 ]), 'can'), # usar
#ObjectTest('small_griddle','freeform_frying_pan138', 8, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, 0, -0.01 ]), 'can'), # usar
ObjectTest('old_frying_pan','freeform_old_frying_pan139', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0, 0, 0 ]), 'can'), # usar
#ObjectTest('long_pan','freeform_pan_long113', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0, 0, 0.03]), 'can')]
#ObjectTest('medium_bowl','freeform_bowl2128', 1, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, 0, 0 ]), 'can'),
#ObjectTest('big_bowl','freeform_bowl3129', 1, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, 0, 0 ]), 'can'),
ObjectTest('ikea_mug','freeform_mug_ikea106', 8, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0.15, -0.15, 0 ]), 'can'), # usar
ObjectTest('short_mug','freeform_mug_short107', 8, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, -0.05, 0 ]), 'can'), # usar
ObjectTest('large_mug','freeform_mug_large108', 8, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, -0.05, 0 ]), 'can'), # usar
#ObjectTest('cocktail_glass','freeform_cocktail_glass131', 1, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, 0, 0 ]), 'can')] # usar
ObjectTest('wine_glass','freeform_wine_glass132', 1, numpy.matrix([[1, 0, 0],[0, 0, -1],[0, 1, 0]]), numpy.array([0, 0.3, 0 ]), 'can'), # usar
ObjectTest('champagne_glass','freeform_glass_champagne104', 1, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, 0, 0 ]), 'can'), # usar
ObjectTest('cognac_glass','freeform_glass_cognac103', 1, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, 0, 0 ]), 'can'),
ObjectTest('beer_bottle','freeform_beer_bottle125', 1, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, 0, 0 ]), 'can'), # usar
#ObjectTest('lying_beer_bottle','freeform_beer_bottle125', 8, numpy.matrix([[1, 0, 0],[0, 0, -1],[0, 1, 0]]), numpy.array([0, 0, 0.05 ]), 'can'), # usar
ObjectTest('booze_bottle','freeform_booze_bottle118', 1, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0, 0, 0.17]), 'can'), # usar
#ObjectTest('lying_booze_bottle','freeform_booze_bottle118', 8, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0, 0, 0.1]), 'can'), # usar
#ObjectTest('wine_bottle','freeform_wine_bottle127', 1, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0 , 0, 0.17]), 'can'), # usar
ObjectTest('champagne_bottle','freeform_champagne_bottle126', 1, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0 , 0, 0 ]), 'can'),
ObjectTest('rounded_can','freeform_can_rounded110', 4, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0.3 , 0, 0 ]), 'can'), # usar
ObjectTest('squared_can','freeform_can_square109', 4, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0.3 , 0, 0 ]), 'can'), # usar
ObjectTest('ellipsoid_can','freeform_can_ellipsoid111', 4, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0.3 , 0, 0 ]), 'can'), # usar
#ObjectTest('long_pot','freeform_pot_long125', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0 , 0, 0 ]), 'can'),
#ObjectTest('coffeemaker','freeform_coffemaker115', 8, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0.3 , 0, 0.1 ]), 'can'),
#ObjectTest('short_pot','freeform_pot_short112', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0 , 0, 0 ]), 'can'),
#ObjectTest('martini_glass','freeform_martini_glass130', 1, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]), numpy.array([0 , 0.3, 0]), 'can'),
ObjectTest('wide_hammer','freeform_wide_hammer134', 8, numpy.matrix([[0, 0, 1],[0, 1, 0],[1, 0, 0]]) , numpy.array([0 , 0, 0.155]), 'can'),
ObjectTest('claw_hammer','freeform_claw_hammer123', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, -1, 0]]), numpy.array([0 , 0, 0]), 'can'),
ObjectTest('hunting_knife','freeform_hunting_knife117', 8, numpy.matrix([[0, 1, 0],[1, 0, 0],[0, 0, 1]]) , numpy.array([0 , 0, 0.03]), 'can'),
ObjectTest('butcher_knife','freeform_butcher_knife136', 8, numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]]) , numpy.array([0.05 , 0.06, 0]), 'can'),
ObjectTest('small_screwdriver','freeform_screwdriver_small142', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]) , numpy.array([0 , 0, 0]), 'can'),
ObjectTest('big_screwdriver','freeform_screwdriver_big_two143', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]) , numpy.array([0 , 0.1, 0]), 'can')]
#objectList = [#ObjectTest('wide_hammer','freeform_wide_hammer135', 8, numpy.matrix([[0, 0, 1],[0, 1, 0],[1, 0, 0]]), numpy.array([0 , 0, 0.155]), 'can')]
#ObjectTest('hunting_knife','freeform_hunting_knife118', 8, numpy.matrix([[0, 1, 0],[1, 0, 0],[0, 0, 1]]), numpy.array([0 , 0, 0.03]), 'can')]
#ObjectTest('big_screwdriver','freeform_screwdriver_big_two144', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0 , 0, 0]), 'can')]
#ObjectTest('small_screwdriver','freeform_screwdriver_small143', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, 1, 0]]), numpy.array([0 , 0, 0]), 'can')]
#ObjectTest('claw_hammer','freeform_claw_hammer124', 8, numpy.matrix([[1, 0, 0],[0, 0, 1],[0, -1, 0]]), numpy.array([0 , 0, 0]), 'can')]
# Creates the SimpleActionClient, passing the type of the action (DetectObjectsAction) to the constructor.
client = actionlib.SimpleActionClient('detect_clusters_server', perception_msgs.msg.DetectClustersAction)
# Waits until the action server has started up and started listening for goals.
client.wait_for_server()
print 'Object list length: ' + str(len(objectList))
for elemCounter in range(len(objectList)):
print 'waiting for orca set object position service...'
rospy.wait_for_service('orca_set_object_position')
try:
set_position = rospy.ServiceProxy('orca_set_object_position',SetObjectPosition)
position_array = numpy.mat(numpy.array([-4.5, 4.9, 1.0]))+numpy.mat(objectList[elemCounter].offset)
print position_array
position = Point(x=position_array[0,0],y=position_array[0,1],z=position_array[0,2])
floorPosition = Point(x=-5.5,y=3.8,z=1.0)#-5.5,4.8,1.0
axesFloor = numpy.matrix([[1, 0, 0],[0, 1, 0],[0, 0, 1]])
for orient in range(0,objectList[elemCounter].orientations):
f = open(objectList[elemCounter].name+str(orient), "w")
print 'waiting for orca set object position service...'
rospy.wait_for_service('orca_set_object_position')
try:
if orient == 0 and elemCounter > 0:
resp = set_position(objectList[elemCounter-1].sim_id,floorPosition, array('d',(numpy.resize(axesFloor,(1,9))).flat))#place object
except rospy.ServiceException, e:
print "set position service call failed: %s"%e
a=orient/float(objectList[elemCounter].orientations)*2.0*math.pi;
print 'Current angle: ' + str(a)
#-sin(a), cos(a), 0, -cos(a), -sin(a), 0.0, 0.0, 0.0, 1.0
axes = numpy.matrix([[-math.sin(a), math.cos(a), 0.0], [-math.cos(a), -math.sin(a), 0.0],[ 0.0, 0.0, 1.0]])*objectList[elemCounter].orientation_base
axes_vector = array('d',(numpy.resize(axes,(1,9))).flat)
#resp = set_position(objectList[elemCounter].sim_id,position, )#rotate object
# Creates a goal to send to the action server.
goal = perception_msgs.msg.DetectClustersGoal()
goal.table_region = table_region
goal.object_name = objectList[elemCounter].sim_id
goal.position = position
goal.axes = axes_vector
# Sends the goal to the action server.
resp = set_position(objectList[elemCounter].sim_id,position, axes_vector)#rotate object
#time.sleep(1)
client.send_goal(goal)#, self.perception_done_cb, self.perception_active_cb, self.perception_feedback_cb)
#client.cancel_goal()
client.get_state()
# Waits for the server to finish performing the action.
client.wait_for_result()
if len(client.get_result().clusters_list) == 0:
print 'Detected 0 objects.\n'
#print "Result:", ', '+ str(client.get_result())
f.write(str(client.get_result()))
f.close()
#return False
###print 'OBJECT LIST SIZEEEEEEEEEEEEEEEEEE:', str(len(client.get_result().clusters_list))
###print 'Current orientation: ' + str(orient)
except rospy.ServiceException, e:
print "set position service call failed: %s"%e
# Prints out the result of executing the action
###return client.get_result()
if __name__ == '__main__':
try:
# Initializes a rospy node so that the SimpleActionClient can
# publish and subscribe over ROS.
rospy.init_node('detect_object_clusters_client')
perception_client()
except rospy.ROSInterruptException:
print "program interrupted before completion"
|
kuri-kustar/seekur_jr_perception
|
ist_object_detection/scripts/detect_clusters_client.py
|
Python
|
lgpl-3.0
| 11,760
|
[
"ORCA"
] |
9e389c1c8aa6cbd938e3ffa1f97429906dfec98da17cb87dab044d7c9b022285
|
#!/usr/bin/env
"""
epic_make_monthly.py
From a list of netcdf files calculate for chosen parameter
30.42 day average and output
year month var_ave number_of_samples
2001 1 30.1 15
2001 2 30.0 14
2001 3 1e35 0
print output to screen to be captured
Example:
--------
python epic_make_monthly.py cb1_uv_brg_brg_f35.point U_320 > cb1_mean_U320.txt
"""
#System Stack
import datetime
import argparse
from netCDF4 import Dataset
#Science Stack
import numpy as np
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 04, 29)
__modified__ = datetime.datetime(2014, 04, 29)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'monthly','averages'
"""--------------------------------time Routines---------------------------------------"""
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
"""--------------------------------netcdf Routines---------------------------------------"""
def get_global_atts(nchandle):
g_atts = {}
att_names = nchandle.ncattrs()
for name in att_names:
g_atts[name] = nchandle.getncattr(name)
return g_atts
def get_vars(nchandle):
return nchandle.variables
def ncreadfile_dic(nchandle, params):
data = {}
for j, v in enumerate(params):
if v in nchandle.variables.keys(): #check for nc variable
data[v] = nchandle.variables[v][:]
else: #if parameter doesn't exist fill the array with zeros
data[v] = None
return (data)
"""------------------------------- MAIN--------------------------------------------"""
parser = argparse.ArgumentParser(description='make 30.42 day monthly files from timeseries data')
parser.add_argument('inputpath', metavar='inputpath', type=str, help='full path pointer file with paths to data')
#parser.add_argument('output', metavar='output', type=str, help='optional output path')
parser.add_argument('epic_key', metavar='epic_key', type=str, help='epic key code for variable to be processed')
args = parser.parse_args()
#ptr file has one mooring per line
if '.txt' in args.inputpath:
with open(args.inputpath,'r') as fid:
ifile = fid.read()
fid.close()
nc_files = ifile.split('\n')
elif '.point' in args.inputpath:
with open(args.inputpath,'r') as fid:
ifile = fid.read()
fid.close()
nc_files = ifile.strip().split('\n')
else:
nc_files = [args.inputpath,]
### READ .nc files -- assumes the order of files in ptr file is increasing time
## Only retain designated variable
time = []
var = []
for ncfile in nc_files:
print "Reading file {0}".format(ncfile)
nchandle = Dataset(ncfile,'r')
global_atts = get_global_atts(nchandle)
vars_dic = get_vars(nchandle)
data = ncreadfile_dic(nchandle, vars_dic.keys())
nchandle.close()
time = np.hstack((time,date2pydate(data['time'],data['time2'])))
var = np.hstack((var,data[args.epic_key][:,0,0,0]))
### After all files are read in, find smallest date
year_min = datetime.datetime.fromordinal(int(np.min(time))).year
year_max = np.ceil(np.max(time))
year_ord = datetime.datetime.toordinal(datetime.datetime(year_min,1,1))
month = 1
window = 30.42
for mind in np.arange(year_ord,year_max,window):
data_ind = np.where((time >= mind) & (time < mind + window))
missing_ind = np.where(var[data_ind] < 1e30)
window_average = np.average(var[data_ind][missing_ind])
if np.isnan(window_average):
window_average = 1e35
print "{0} {1} {2} {3}".format(datetime.datetime.fromordinal(int(np.min(mind))).year, month,
window_average, len(var[data_ind][missing_ind]) )
if month == 12:
month = 1
else:
month +=1
|
shaunwbell/FOCI_Analysis
|
ReanalysisRetreival_orig/CB_monthly_aves/epic_make_monthly.py
|
Python
|
mit
| 4,676
|
[
"NetCDF"
] |
f032cdfa9b4af31c1e44e7cfa39241852a9d67b10b30c431362a8c21e5b56abd
|
########################################################################
# $HeadURL $
# File: FileCatalogProxyHandler.py
########################################################################
""" :mod: FileCatalogProxyHandler
================================
.. module: FileCatalogProxyHandler
:synopsis: This is a service which represents a DISET proxy to the File Catalog
"""
## imports
import os
from types import StringTypes, DictType, TupleType
## from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
__RCSID__ = "$Id$"
def initializeFileCatalogProxyHandler( serviceInfo ):
""" service initalisation """
return S_OK()
class FileCatalogProxyHandler( RequestHandler ):
"""
.. class:: FileCatalogProxyHandler
"""
types_callProxyMethod = [ StringTypes, StringTypes, TupleType, DictType ]
def export_callProxyMethod( self, fcName, methodName, args, kargs ):
""" A generic method to call methods of the Storage Element.
"""
res = pythonCall( 120, self.__proxyWrapper, fcName, methodName, args, kargs )
if res['OK']:
return res['Value']
else:
return res
def __proxyWrapper( self, fcName, methodName, args, kwargs ):
""" The wrapper will obtain the client proxy and set it up in the environment.
The required functionality is then executed and returned to the client.
:param self: self reference
:param str name: fcn name
:param tuple args: fcn args
:param dict kwargs: fcn keyword args
"""
result = self.__prepareSecurityDetails()
if not result['OK']:
return result
proxyLocation =result['Value']
try:
fileCatalog = FileCatalog( [fcName] )
method = getattr( fileCatalog, methodName )
except AttributeError, error:
errStr = "%s proxy: no method named %s" % ( fcName, methodName )
gLogger.exception( errStr, methodName, error )
return S_ERROR( errStr )
try:
result = method( *args, **kwargs )
if os.path.exists(proxyLocation):
os.remove(proxyLocation)
return result
except Exception, error:
if os.path.exists(proxyLocation):
os.remove(proxyLocation)
errStr = "%s proxy: Exception while performing %s" % ( fcName, methodName )
gLogger.exception( errStr, error )
return S_ERROR( errStr )
def __prepareSecurityDetails( self, vomsFlag = True ):
""" Obtains the connection details for the client """
try:
credDict = self.getRemoteCredentials()
clientDN = credDict[ 'DN' ]
clientUsername = credDict['username']
clientGroup = credDict['group']
gLogger.debug( "Getting proxy for %s@%s (%s)" % ( clientUsername, clientGroup, clientDN ) )
if vomsFlag:
result = gProxyManager.downloadVOMSProxyToFile( clientDN, clientGroup )
else:
result = gProxyManager.downloadProxyToFile( clientDN, clientGroup )
if not result['OK']:
return result
gLogger.debug( "Updating environment." )
os.environ['X509_USER_PROXY'] = result['Value']
return result
except Exception, error:
exStr = "__getConnectionDetails: Failed to get client connection details."
gLogger.exception( exStr, '', error )
return S_ERROR( exStr )
|
avedaee/DIRAC
|
DataManagementSystem/Service/FileCatalogProxyHandler.py
|
Python
|
gpl-3.0
| 3,492
|
[
"DIRAC"
] |
04d95d95f75518a7460b1e2dd3044f102febea1e7a80c3084b65665ad55b143d
|
import click
from functools import update_wrapper
from PIL import Image, ImageFilter, ImageEnhance
@click.group(chain=True)
def cli():
"""This script processes a bunch of images through pillow in a unix
pipe. One commands feeds into the next.
Example:
\b
imagepipe open -i example01.jpg resize -w 128 display
imagepipe open -i example02.jpg blur save
"""
@cli.resultcallback()
def process_commands(processors):
"""This result callback is invoked with an iterable of all the chained
subcommands. As in this example each subcommand returns a function
we can chain them together to feed one into the other, similar to how
a pipe on unix works.
"""
# Start with an empty iterable.
stream = ()
# Pipe it through all stream processors.
for processor in processors:
stream = processor(stream)
# Evaluate the stream and throw away the items.
for _ in stream:
pass
def processor(f):
"""Helper decorator to rewrite a function so that it returns another
function from it.
"""
def new_func(*args, **kwargs):
def processor(stream):
return f(stream, *args, **kwargs)
return processor
return update_wrapper(new_func, f)
def generator(f):
"""Similar to the :func:`processor` but passes through old values
unchanged and does not pass through the values as parameter.
"""
@processor
def new_func(stream, *args, **kwargs):
for item in stream:
yield item
for item in f(*args, **kwargs):
yield item
return update_wrapper(new_func, f)
def copy_filename(new, old):
new.filename = old.filename
return new
@cli.command('open')
@click.option('-i', '--image', 'images', type=click.Path(),
multiple=True, help='The image file to open.')
@generator
def open_cmd(images):
"""Loads one or multiple images for processing. The input parameter
can be specified multiple times to load more than one image.
"""
for image in images:
try:
click.echo('Opening "%s"' % image)
if image == '-':
img = Image.open(click.get_binary_stdin())
img.filename = '-'
else:
img = Image.open(image)
yield img
except Exception as e:
click.echo('Could not open image "%s": %s' % (image, e), err=True)
@cli.command('save')
@click.option('--filename', default='processed-%04d.png', type=click.Path(),
help='The format for the filename.',
show_default=True)
@processor
def save_cmd(images, filename):
"""Saves all processed images to a series of files."""
for idx, image in enumerate(images):
try:
fn = filename % (idx + 1)
click.echo('Saving "%s" as "%s"' % (image.filename, fn))
yield image.save(fn)
except Exception as e:
click.echo('Could not save image "%s": %s' %
(image.filename, e), err=True)
@cli.command('display')
@processor
def display_cmd(images):
"""Opens all images in an image viewer."""
for image in images:
click.echo('Displaying "%s"' % image.filename)
image.show()
yield image
@cli.command('resize')
@click.option('-w', '--width', type=int, help='The new width of the image.')
@click.option('-h', '--height', type=int, help='The new height of the image.')
@processor
def resize_cmd(images, width, height):
"""Resizes an image by fitting it into the box without changing
the aspect ratio.
"""
for image in images:
w, h = (width or image.size[0], height or image.size[1])
click.echo('Resizing "%s" to %dx%d' % (image.filename, w, h))
image.thumbnail((w, h))
yield image
@cli.command('crop')
@click.option('-b', '--border', type=int, help='Crop the image from all '
'sides by this amount.')
@processor
def crop_cmd(images, border):
"""Crops an image from all edges."""
for image in images:
box = [0, 0, image.size[0], image.size[1]]
if border is not None:
for idx, val in enumerate(box):
box[idx] = max(0, val - border)
click.echo('Cropping "%s" by %dpx' % (image.filename, border))
yield copy_filename(image.crop(box), image)
else:
yield image
def convert_rotation(ctx, param, value):
if value is None:
return
value = value.lower()
if value in ('90', 'r', 'right'):
return (Image.ROTATE_90, 90)
if value in ('180', '-180'):
return (Image.ROTATE_180, 180)
if value in ('-90', '270', 'l', 'left'):
return (Image.ROTATE_270, 270)
raise click.BadParameter('invalid rotation "%s"' % value)
def convert_flip(ctx, param, value):
if value is None:
return
value = value.lower()
if value in ('lr', 'leftright'):
return (Image.FLIP_LEFT_RIGHT, 'left to right')
if value in ('tb', 'topbottom', 'upsidedown', 'ud'):
return (Image.FLIP_LEFT_RIGHT, 'top to bottom')
raise click.BadParameter('invalid flip "%s"' % value)
@cli.command('transpose')
@click.option('-r', '--rotate', callback=convert_rotation,
help='Rotates the image (in degrees)')
@click.option('-f', '--flip', callback=convert_flip,
help='Flips the image [LR / TB]')
@processor
def transpose_cmd(images, rotate, flip):
"""Transposes an image by either rotating or flipping it."""
for image in images:
if rotate is not None:
mode, degrees = rotate
click.echo('Rotate "%s" by %ddeg' % (image.filename, degrees))
image = copy_filename(image.transpose(mode), image)
if flip is not None:
mode, direction = flip
click.echo('Flip "%s" %s' % (image.filename, direction))
image = copy_filename(image.transpose(mode), image)
yield image
@cli.command('blur')
@click.option('-r', '--radius', default=2, show_default=True,
help='The blur radius.')
@processor
def blur_cmd(images, radius):
"""Applies gaussian blur."""
blur = ImageFilter.GaussianBlur(radius)
for image in images:
click.echo('Blurring "%s" by %dpx' % (image.filename, radius))
yield copy_filename(image.filter(blur), image)
@cli.command('smoothen')
@click.option('-i', '--iterations', default=1, show_default=True,
help='How many iterations of the smoothen filter to run.')
@processor
def smoothen_cmd(images, iterations):
"""Applies a smoothening filter."""
for image in images:
click.echo('Smoothening "%s" %d time%s' %
(image.filename, iterations, iterations != 1 and 's' or '',))
for x in xrange(iterations):
image = copy_filename(image.filter(ImageFilter.BLUR), image)
yield image
@cli.command('emboss')
@processor
def emboss_cmd(images):
"""Embosses an image."""
for image in images:
click.echo('Embossing "%s"' % image.filename)
yield copy_filename(image.filter(ImageFilter.EMBOSS), image)
@cli.command('sharpen')
@click.option('-f', '--factor', default=2.0,
help='Sharpens the image.', show_default=True)
@processor
def sharpen_cmd(images, factor):
"""Sharpens an image."""
for image in images:
click.echo('Sharpen "%s" by %f' % (image.filename, factor))
enhancer = ImageEnhance.Sharpness(image)
yield copy_filename(enhancer.enhance(max(1.0, factor)), image)
@cli.command('paste')
@click.option('-l', '--left', default=0, help='Offset from left.')
@click.option('-r', '--right', default=0, help='Offset from right.')
@processor
def paste_cmd(images, left, right):
"""Pastes the second image on the first image and leaves the rest
unchanged.
"""
imageiter = iter(images)
image = next(imageiter, None)
to_paste = next(imageiter, None)
if to_paste is None:
if image is not None:
yield image
return
click.echo('Paste "%s" on "%s"' %
(to_paste.filename, image.filename))
mask = None
if to_paste.mode == 'RGBA' or 'transparency' in to_paste.info:
mask = to_paste
image.paste(to_paste, (left, right), mask)
image.filename += '+' + to_paste.filename
yield image
for image in imageiter:
yield image
|
staranjeet/fjord
|
vendor/packages/click/examples/imagepipe/imagepipe.py
|
Python
|
bsd-3-clause
| 8,425
|
[
"Gaussian"
] |
1f2f5b8700c3c03d88e71ca1128aeccecadf3214651751f8e0abd7f4b16f782c
|
"""
File format detector
"""
import logging, sys, os, csv, tempfile, shutil, re, zipfile
import registry
from galaxy import util
log = logging.getLogger(__name__)
def get_test_fname(fname):
"""Returns test data filename"""
path, name = os.path.split(__file__)
full_path = os.path.join(path, 'test', fname)
return full_path
def stream_to_file( stream, suffix='', prefix='', dir=None, text=False ):
"""Writes a stream to a temporary file, returns the temporary file's name"""
fd, temp_name = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=dir, text=text )
CHUNK_SIZE = 1048576
data_checked = False
is_compressed = False
is_binary = False
is_multi_byte = False
while 1:
chunk = stream.read( CHUNK_SIZE )
if not chunk:
break
if not data_checked:
# See if we're uploading a compressed file
if zipfile.is_zipfile( temp_name ):
is_compressed = True
else:
try:
if unicode( chunk[:2] ) == unicode( util.gzip_magic ):
is_compressed = True
except:
pass
if not is_compressed:
# See if we have a multi-byte character file
chars = chunk[:100]
is_multi_byte = util.is_multi_byte( chars )
if not is_multi_byte:
for char in chars:
if ord( char ) > 128:
is_binary = True
break
data_checked = True
if not is_compressed and not is_binary:
os.write( fd, chunk.encode( "utf-8" ) )
else:
# Compressed files must be encoded after they are uncompressed in the upload utility,
# while binary files should not be encoded at all.
os.write( fd, chunk )
os.close( fd )
return temp_name, is_multi_byte
def check_newlines( fname, bytes_to_read=52428800 ):
"""
Determines if there are any non-POSIX newlines in the first
number_of_bytes (by default, 50MB) of the file.
"""
CHUNK_SIZE = 2 ** 20
f = open( fname, 'r' )
for chunk in f.read( CHUNK_SIZE ):
if f.tell() > bytes_to_read:
break
if chunk.count( '\r' ):
f.close()
return True
f.close()
return False
def convert_newlines( fname ):
"""
Converts in place a file from universal line endings
to Posix line endings.
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\r3 4")
>>> convert_newlines(fname)
2
>>> file(fname).read()
'1 2\\n3 4\\n'
"""
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname, "U" ) ):
fp.write( "%s\n" % line.rstrip( "\r\n" ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def sep2tabs(fname, patt="\\s+"):
"""
Transforms in place a 'sep' separated file to a tab separated one
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\n3 4\\n")
>>> sep2tabs(fname)
2
>>> file(fname).read()
'1\\t2\\n3\\t4\\n'
"""
regexp = re.compile( patt )
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname ) ):
line = line.rstrip( '\r\n' )
elems = regexp.split( line )
fp.write( "%s\n" % '\t'.join( elems ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def convert_newlines_sep2tabs( fname, patt="\\s+" ):
"""
Combines above methods: convert_newlines() and sep2tabs()
so that files do not need to be read twice
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("1 2\\r3 4")
>>> convert_newlines_sep2tabs(fname)
2
>>> file(fname).read()
'1\\t2\\n3\\t4\\n'
"""
regexp = re.compile( patt )
fd, temp_name = tempfile.mkstemp()
fp = os.fdopen( fd, "wt" )
for i, line in enumerate( file( fname, "U" ) ):
line = line.rstrip( '\r\n' )
elems = regexp.split( line )
fp.write( "%s\n" % '\t'.join( elems ) )
fp.close()
shutil.move( temp_name, fname )
# Return number of lines in file.
return i + 1
def get_headers( fname, sep, count=60, is_multi_byte=False ):
"""
Returns a list with the first 'count' lines split by 'sep'
>>> fname = get_test_fname('complete.bed')
>>> get_headers(fname,'\\t')
[['chr7', '127475281', '127491632', 'NM_000230', '0', '+', '127486022', '127488767', '0', '3', '29,172,3225,', '0,10713,13126,'], ['chr7', '127486011', '127488900', 'D49487', '0', '+', '127486022', '127488767', '0', '2', '155,490,', '0,2399']]
"""
headers = []
for idx, line in enumerate(file(fname)):
line = line.rstrip('\n\r')
if is_multi_byte:
# TODO: fix this - sep is never found in line
line = unicode( line, 'utf-8' )
sep = sep.encode( 'utf-8' )
headers.append( line.split(sep) )
if idx == count:
break
return headers
def is_column_based( fname, sep='\t', skip=0, is_multi_byte=False ):
"""
Checks whether the file is column based with respect to a separator
(defaults to tab separator).
>>> fname = get_test_fname('test.gff')
>>> is_column_based(fname)
True
>>> fname = get_test_fname('test_tab.bed')
>>> is_column_based(fname)
True
>>> is_column_based(fname, sep=' ')
False
>>> fname = get_test_fname('test_space.txt')
>>> is_column_based(fname)
False
>>> is_column_based(fname, sep=' ')
True
>>> fname = get_test_fname('test_ensembl.tab')
>>> is_column_based(fname)
True
>>> fname = get_test_fname('test_tab1.tabular')
>>> is_column_based(fname, sep=' ', skip=0)
False
>>> fname = get_test_fname('test_tab1.tabular')
>>> is_column_based(fname)
True
"""
headers = get_headers( fname, sep, is_multi_byte=is_multi_byte )
count = 0
if not headers:
return False
for hdr in headers[skip:]:
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) > 1:
count = len(hdr)
break
if count < 2:
return False
for hdr in headers[skip:]:
if hdr and hdr[0] and not hdr[0].startswith('#'):
if len(hdr) != count:
return False
return True
def guess_ext( fname, sniff_order=None, is_multi_byte=False ):
"""
Returns an extension that can be used in the datatype factory to
generate a data for the 'fname' file
>>> fname = get_test_fname('megablast_xml_parser_test1.blastxml')
>>> guess_ext(fname)
'blastxml'
>>> fname = get_test_fname('interval.interval')
>>> guess_ext(fname)
'interval'
>>> fname = get_test_fname('interval1.bed')
>>> guess_ext(fname)
'bed'
>>> fname = get_test_fname('test_tab.bed')
>>> guess_ext(fname)
'bed'
>>> fname = get_test_fname('sequence.maf')
>>> guess_ext(fname)
'maf'
>>> fname = get_test_fname('sequence.fasta')
>>> guess_ext(fname)
'fasta'
>>> fname = get_test_fname('file.html')
>>> guess_ext(fname)
'html'
>>> fname = get_test_fname('test.gff')
>>> guess_ext(fname)
'gff'
>>> fname = get_test_fname('gff_version_3.gff')
>>> guess_ext(fname)
'gff3'
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("a\\t2\\nc\\t1\\nd\\t0")
>>> guess_ext(fname)
'tabular'
>>> fname = get_test_fname('temp.txt')
>>> file(fname, 'wt').write("a 1 2 x\\nb 3 4 y\\nc 5 6 z")
>>> guess_ext(fname)
'txt'
>>> fname = get_test_fname('test_tab1.tabular')
>>> guess_ext(fname)
'tabular'
>>> fname = get_test_fname('alignment.lav')
>>> guess_ext(fname)
'lav'
"""
if sniff_order is None:
datatypes_registry = registry.Registry()
sniff_order = datatypes_registry.sniff_order
for datatype in sniff_order:
"""
Some classes may not have a sniff function, which is ok. In fact, the
Tabular and Text classes are 2 examples of classes that should never have
a sniff function. Since these classes are default classes, they contain
few rules to filter out data of other formats, so they should be called
from this function after all other datatypes in sniff_order have not been
successfully discovered.
"""
try:
if datatype.sniff( fname ):
return datatype.file_ext
except:
pass
headers = get_headers( fname, None )
is_binary = False
if is_multi_byte:
is_binary = False
else:
for hdr in headers:
for char in hdr:
if len( char ) > 1:
for c in char:
if ord( c ) > 128:
is_binary = True
break
elif ord( char ) > 128:
is_binary = True
break
if is_binary:
break
if is_binary:
break
if is_binary:
return 'data' #default binary data type file extension
if is_column_based( fname, '\t', 1, is_multi_byte=is_multi_byte ):
return 'tabular' #default tabular data type file extension
return 'txt' #default text data type file extension
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
dbcls/dbcls-galaxy
|
lib/galaxy/datatypes/sniff.py
|
Python
|
mit
| 9,773
|
[
"Galaxy"
] |
e91919f7b98f33ba1a4823e419be6e43789e2264ecfdda9f4669b6c71ef9a01d
|
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Prabhu Ramachandran
# License: BSD Style.
# Enthought library imports.
from traits.api import HasTraits, List, Str, Instance
from apptools.preferences.api import PreferencesHelper
###############################################################################
# `PreferencesMirror` class.
###############################################################################
class PreferencesMirror(HasTraits):
"""
This class mirrors preferences from a PreferencesHelper such that
users can use them and change them but nothing is saved to disk till
the user asks for an explicit save.
"""
# The preferences we mirror.
preferences = Instance(PreferencesHelper)
# Private trait to store names of traits.
_trait_names = List(Str)
######################################################################
# Public interface.
######################################################################
def save(self):
"""Updates the actual preferences and thereby persists them to
disk.
"""
for name in self._trait_names:
setattr(self.preferences, name, getattr(self, name))
######################################################################
# Private interface.
######################################################################
def _preferences_changed(self):
"""Setup traits of our own based on those of the mayavi
preferences.
"""
trait_names = []
opts = self.preferences
for key, value in opts.traits().iteritems():
if key not in ['trait_added', 'trait_modified',
'preferences', 'preferences_path']:
self.add_trait(key, value)
setattr(self, key, getattr(opts, key))
trait_names.append(key)
opts.on_trait_change(self._update, key)
self._trait_names = trait_names
def _update(self, obj, name, old, new):
setattr(self, name, new)
|
liulion/mayavi
|
mayavi/tools/preferences_mirror.py
|
Python
|
bsd-3-clause
| 2,102
|
[
"Mayavi"
] |
efbf7fc6e297951bf928dfde0f5c4b0c5924fa98f154986a43453b1de64351d7
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import filecmp
import os
import re
import shutil
import unittest
import pandas as pd
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.io.lammps.data import LammpsData
from pymatgen.io.lammps.inputs import LammpsRun, write_lammps_inputs
from pymatgen.util.testing import PymatgenTest
class LammpsRunTest(unittest.TestCase):
maxDiff = None
def test_md(self):
s = Structure.from_spacegroup(225, Lattice.cubic(3.62126), ["Cu"], [[0, 0, 0]])
ld = LammpsData.from_structure(s, atom_style="atomic")
ff = "\n".join(["pair_style eam", "pair_coeff * * Cu_u3.eam"])
md = LammpsRun.md(data=ld, force_field=ff, temperature=1600.0, nsteps=10000)
md.write_inputs(output_dir="md")
with open(os.path.join("md", "in.md")) as f:
md_script = f.read()
script_string = """# Sample input script template for MD
# Initialization
units metal
atom_style atomic
# Atom definition
read_data md.data
#read_restart md.restart
# Force field settings (consult official document for detailed formats)
pair_style eam
pair_coeff * * Cu_u3.eam
# Create velocities
velocity all create 1600.0 142857 mom yes rot yes dist gaussian
# Ensemble constraints
#fix 1 all nve
fix 1 all nvt temp 1600.0 1600.0 0.1
#fix 1 all npt temp 1600.0 1600.0 0.1 iso $pressure $pressure 1.0
# Various operations within timestepping
#fix ...
#compute ...
# Output settings
#thermo_style custom ... # control the thermo data type to output
thermo 100 # output thermo data every N steps
#dump 1 all atom 100 traj.*.gz # dump a snapshot every 100 steps
# Actions
run 10000
"""
self.assertEqual(md_script, script_string)
self.assertTrue(os.path.exists(os.path.join("md", "md.data")))
@classmethod
def tearDownClass(cls):
temp_dirs = ["md"]
for td in temp_dirs:
if os.path.exists(td):
shutil.rmtree(td)
class FuncTest(unittest.TestCase):
def test_write_lammps_inputs(self):
# script template
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps", "kappa.txt")) as f:
kappa_template = f.read()
kappa_settings = {"method": "heat"}
write_lammps_inputs(output_dir="heat", script_template=kappa_template, settings=kappa_settings)
with open(os.path.join("heat", "in.lammps")) as f:
kappa_script = f.read()
fix_hot = re.search(r"fix\s+hot\s+all\s+([^\s]+)\s+", kappa_script)
# placeholders supposed to be filled
self.assertEqual(fix_hot.group(1), "heat")
fix_cold = re.search(r"fix\s+cold\s+all\s+([^\s]+)\s+", kappa_script)
self.assertEqual(fix_cold.group(1), "heat")
lattice = re.search(r"lattice\s+fcc\s+(.*)\n", kappa_script)
# parentheses not supposed to be filled
self.assertEqual(lattice.group(1), "${rho}")
pair_style = re.search(r"pair_style\slj/cut\s+(.*)\n", kappa_script)
self.assertEqual(pair_style.group(1), "${rc}")
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps", "in.peptide")) as f:
peptide_script = f.read()
# copy data file
src = os.path.join(PymatgenTest.TEST_FILES_DIR, "lammps", "data.quartz")
write_lammps_inputs(output_dir="path", script_template=peptide_script, data=src)
dst = os.path.join("path", "data.peptide")
self.assertTrue(filecmp.cmp(src, dst, shallow=False))
# write data file from obj
obj = LammpsData.from_file(src, atom_style="atomic")
write_lammps_inputs(output_dir="obj", script_template=peptide_script, data=obj)
obj_read = LammpsData.from_file(os.path.join("obj", "data.peptide"), atom_style="atomic")
pd.testing.assert_frame_equal(obj_read.masses, obj.masses)
pd.testing.assert_frame_equal(obj_read.atoms, obj.atoms)
@classmethod
def tearDownClass(cls):
temp_dirs = ["heat", "path", "obj"]
for td in temp_dirs:
if os.path.exists(td):
shutil.rmtree(td)
if __name__ == "__main__":
unittest.main()
|
vorwerkc/pymatgen
|
pymatgen/io/lammps/tests/test_inputs.py
|
Python
|
mit
| 4,338
|
[
"Gaussian",
"LAMMPS",
"pymatgen"
] |
da9452b9dbbf585a6820efe3c29c31805439d322489cddbc3d2077394499f364
|
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import subprocess
import tarfile
from testrunner.local import testsuite
from testrunner.objects import testcase
class BenchmarksTestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(BenchmarksTestSuite, self).__init__(name, root)
self.testroot = root
def ListTests(self, context):
tests = []
for test in [
"kraken/ai-astar",
"kraken/audio-beat-detection",
"kraken/audio-dft",
"kraken/audio-fft",
"kraken/audio-oscillator",
"kraken/imaging-darkroom",
"kraken/imaging-desaturate",
"kraken/imaging-gaussian-blur",
"kraken/json-parse-financial",
"kraken/json-stringify-tinderbox",
"kraken/stanford-crypto-aes",
"kraken/stanford-crypto-ccm",
"kraken/stanford-crypto-pbkdf2",
"kraken/stanford-crypto-sha256-iterative",
"octane/box2d",
"octane/code-load",
"octane/crypto",
"octane/deltablue",
"octane/earley-boyer",
"octane/gbemu",
"octane/mandreel",
"octane/navier-stokes",
"octane/pdfjs",
"octane/raytrace",
"octane/regexp",
"octane/richards",
"octane/splay",
"sunspider/3d-cube",
"sunspider/3d-morph",
"sunspider/3d-raytrace",
"sunspider/access-binary-trees",
"sunspider/access-fannkuch",
"sunspider/access-nbody",
"sunspider/access-nsieve",
"sunspider/bitops-3bit-bits-in-byte",
"sunspider/bitops-bits-in-byte",
"sunspider/bitops-bitwise-and",
"sunspider/bitops-nsieve-bits",
"sunspider/controlflow-recursive",
"sunspider/crypto-aes",
"sunspider/crypto-md5",
"sunspider/crypto-sha1",
"sunspider/date-format-tofte",
"sunspider/date-format-xparb",
"sunspider/math-cordic",
"sunspider/math-partial-sums",
"sunspider/math-spectral-norm",
"sunspider/regexp-dna",
"sunspider/string-base64",
"sunspider/string-fasta",
"sunspider/string-tagcloud",
"sunspider/string-unpack-code",
"sunspider/string-validate-input"]:
tests.append(testcase.TestCase(self, test))
return tests
def GetFlagsForTestCase(self, testcase, context):
result = []
result += context.mode_flags
if testcase.path.startswith("kraken"):
result.append(os.path.join(self.testroot, "%s-data.js" % testcase.path))
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
elif testcase.path.startswith("octane"):
result.append(os.path.join(self.testroot, "octane/base.js"))
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
result += ["-e", "BenchmarkSuite.RunSuites({});"]
elif testcase.path.startswith("sunspider"):
result.append(os.path.join(self.testroot, "%s.js" % testcase.path))
return testcase.flags + result
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
with open(filename) as f:
return f.read()
def _DownloadIfNecessary(self, url, revision, target_dir):
# Maybe we're still up to date?
revision_file = "CHECKED_OUT_%s" % target_dir
checked_out_revision = None
if os.path.exists(revision_file):
with open(revision_file) as f:
checked_out_revision = f.read()
if checked_out_revision == revision:
return
# If we have a local archive file with the test data, extract it.
if os.path.exists(target_dir):
shutil.rmtree(target_dir)
archive_file = "downloaded_%s_%s.tar.gz" % (target_dir, revision)
if os.path.exists(archive_file):
with tarfile.open(archive_file, "r:gz") as tar:
tar.extractall()
with open(revision_file, "w") as f:
f.write(revision)
return
# No cached copy. Check out via SVN, and pack as .tar.gz for later use.
command = "svn co %s -r %s %s" % (url, revision, target_dir)
code = subprocess.call(command, shell=True)
if code != 0:
raise Exception("Error checking out %s benchmark" % target_dir)
with tarfile.open(archive_file, "w:gz") as tar:
tar.add("%s" % target_dir)
with open(revision_file, "w") as f:
f.write(revision)
def DownloadData(self):
old_cwd = os.getcwd()
os.chdir(os.path.abspath(self.root))
self._DownloadIfNecessary(
("http://svn.webkit.org/repository/webkit/trunk/PerformanceTests/"
"SunSpider/tests/sunspider-1.0/"),
"153700", "sunspider")
self._DownloadIfNecessary(
("http://kraken-mirror.googlecode.com/svn/trunk/kraken/tests/"
"kraken-1.1/"),
"8", "kraken")
self._DownloadIfNecessary(
"http://octane-benchmark.googlecode.com/svn/trunk/",
"22", "octane")
os.chdir(old_cwd)
def VariantFlags(self, testcase, default_flags):
# Both --nocrankshaft and --stressopt are very slow.
return [[]]
def GetSuite(name, root):
return BenchmarksTestSuite(name, root)
|
h0x91b/redis-v8
|
redis/deps/v8/test/benchmarks/testcfg.py
|
Python
|
mit
| 6,634
|
[
"Gaussian"
] |
6445dd9a5930d7e95952b22010b3d792489b2ba6eebdaa1ab7eb5969e9339007
|
import sys
import numpy as np
try:
# Matplotlib is not a dependency
import matplotlib as mpl
mpl.use('Agg') # force the antigrain backend
except (ImportError, RuntimeError):
mpl = None
from ase.units import Bohr
from gpaw.mpi import world, distribute_cpus
from gpaw.utilities.tools import md5_array
from gpaw.utilities.gauss import gaussian_wave
from gpaw.band_descriptor import BandDescriptor
from gpaw.grid_descriptor import GridDescriptor
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.kohnsham_layouts import BandLayouts
from gpaw.parameters import InputParameters
from gpaw.xc import XC
from gpaw.setup import SetupData, Setups
from gpaw.wavefunctions.base import WaveFunctions
from gpaw.wavefunctions.fd import FDWaveFunctions
from gpaw.fd_operators import Laplace # required but not really used
from gpaw.pair_overlap import GridPairOverlap, ProjectorPairOverlap
# -------------------------------------------------------------------
from gpaw.test.ut_common import ase_svnversion, shapeopt, TestCase, \
TextTestRunner, CustomTextTestRunner, defaultTestLoader, \
initialTestLoader, create_random_atoms, create_parsize_minbands
# -------------------------------------------------------------------
p = InputParameters(spinpol=False)
xc = XC(p.xc)
p.setups = dict([(symbol, SetupData(symbol, xc.name)) for symbol in 'HO'])
class UTDomainParallelSetup(TestCase):
"""
Setup a simple domain parallel calculation."""
# Number of bands
nbands = 1
# Spin-paired, single kpoint
nspins = 1
nibzkpts = 1
# Mean spacing and number of grid points per axis (G x G x G)
h = 0.25 / Bohr
G = 48
# Type of boundary conditions employed
boundaries = None
def setUp(self):
for virtvar in ['boundaries']:
assert getattr(self,virtvar) is not None, 'Virtual "%s"!' % virtvar
parsize_domain, parsize_bands = create_parsize_minbands(self.nbands, world.size)
assert self.nbands % np.prod(parsize_bands) == 0
domain_comm, kpt_comm, band_comm = distribute_cpus(parsize_domain,
parsize_bands, self.nspins, self.nibzkpts)
# Set up band descriptor:
self.bd = BandDescriptor(self.nbands, band_comm)
# Set up grid descriptor:
res, ngpts = shapeopt(300, self.G**3, 3, 0.2)
cell_c = self.h * np.array(ngpts)
pbc_c = {'zero' : False, \
'periodic': True, \
'mixed' : (True, False, True)}[self.boundaries]
self.gd = GridDescriptor(ngpts, cell_c, pbc_c, domain_comm, parsize_domain)
# What to do about kpoints?
self.kpt_comm = kpt_comm
def tearDown(self):
del self.bd, self.gd, self.kpt_comm
# =================================
def verify_comm_sizes(self):
if world.size == 1:
return
comm_sizes = tuple([comm.size for comm in [world, self.bd.comm, \
self.gd.comm, self.kpt_comm]])
self._parinfo = '%d world, %d band, %d domain, %d kpt' % comm_sizes
self.assertEqual(self.nbands % self.bd.comm.size, 0)
self.assertEqual((self.nspins*self.nibzkpts) % self.kpt_comm.size, 0)
class UTDomainParallelSetup_Zero(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'zero'
class UTDomainParallelSetup_Periodic(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'periodic'
class UTDomainParallelSetup_Mixed(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__
boundaries = 'mixed'
# -------------------------------------------------------------------
# Helper functions/classes here
class FDWFS(FDWaveFunctions):
def __init__(self, gd, bd, kd, setups, dtype): # override constructor
assert kd.comm.size == 1
WaveFunctions.__init__(self, gd, 1, setups, bd, dtype, world,
kd, None)
self.kin = Laplace(gd, -0.5, dtype=dtype)
self.diagksl = None
self.orthoksl = BandLayouts(gd, bd, dtype)
self.initksl = None
self.overlap = None
self.rank_a = None
def allocate_arrays_for_projections(self, my_atom_indices): # no alloc
pass
def collect_projections(self, P_ani):
if self.gd.comm.size == 1 and self.bd.comm.size == 1:
return np.concatenate([P_ni.T for P_ni in P_ani.values()])
assert len(self.kpt_u) == 1
self.kpt_u[0].P_ani = P_ani
all_P_ni = WaveFunctions.collect_projections(self, 0, 0)
if self.world.rank == 0:
P_In = all_P_ni.T.copy()
else:
nproj = sum([setup.ni for setup in self.setups])
P_In = np.empty((nproj, self.bd.nbands), self.pt.dtype)
self.world.broadcast(P_In, 0)
return P_In
# -------------------------------------------------------------------
class UTGaussianWavefunctionSetup(UTDomainParallelSetup):
__doc__ = UTDomainParallelSetup.__doc__ + """
The pseudo wavefunctions are moving gaussians centered around each atom."""
allocated = False
dtype = None
# Default arguments for scaled Gaussian wave
_sigma0 = 2.0 #0.75
_k0_c = 2*np.pi*np.array([1/5., 1/3., 0.])
def setUp(self):
UTDomainParallelSetup.setUp(self)
for virtvar in ['dtype']:
assert getattr(self,virtvar) is not None, 'Virtual "%s"!' % virtvar
# Create randomized atoms
self.atoms = create_random_atoms(self.gd, 5) # also tested: 10xNH3/BDA
# XXX DEBUG START
if False:
from ase import view
view(self.atoms*(1+2*self.gd.pbc_c))
# XXX DEBUG END
# Do we agree on the atomic positions?
pos_ac = self.atoms.get_positions()
pos_rac = np.empty((world.size,)+pos_ac.shape, pos_ac.dtype)
world.all_gather(pos_ac, pos_rac)
if (pos_rac-pos_rac[world.rank,...][np.newaxis,...]).any():
raise RuntimeError('Discrepancy in atomic positions detected.')
# Create setups for atoms
self.Z_a = self.atoms.get_atomic_numbers()
self.setups = Setups(self.Z_a, p.setups, p.basis,
p.lmax, xc)
# K-point descriptor
bzk_kc = np.array([[0, 0, 0]], dtype=float)
self.kd = KPointDescriptor(bzk_kc, 1)
self.kd.set_symmetry(self.atoms, self.setups, usesymm=True)
self.kd.set_communicator(self.kpt_comm)
# Create gamma-point dummy wavefunctions
self.wfs = FDWFS(self.gd, self.bd, self.kd, self.setups,
self.dtype)
spos_ac = self.atoms.get_scaled_positions() % 1.0
self.wfs.set_positions(spos_ac)
self.pt = self.wfs.pt # XXX shortcut
## Also create pseudo partial waveves
#from gpaw.lfc import LFC
#self.phit = LFC(self.gd, [setup.phit_j for setup in self.setups], \
# self.kpt_comm, dtype=self.dtype)
#self.phit.set_positions(spos_ac)
self.r_cG = None
self.buf_G = None
self.psit_nG = None
self.allocate()
def tearDown(self):
UTDomainParallelSetup.tearDown(self)
del self.r_cG, self.buf_G, self.psit_nG
del self.pt, self.setups, self.atoms
self.allocated = False
def allocate(self):
self.r_cG = self.gd.get_grid_point_coordinates()
cell_cv = self.atoms.get_cell() / Bohr
assert np.abs(cell_cv-self.gd.cell_cv).max() < 1e-9
center_c = 0.5*cell_cv.diagonal()
self.buf_G = self.gd.empty(dtype=self.dtype)
self.psit_nG = self.gd.empty(self.bd.mynbands, dtype=self.dtype)
for myn,psit_G in enumerate(self.psit_nG):
n = self.bd.global_index(myn)
psit_G[:] = self.get_scaled_gaussian_wave(center_c, scale=10+2j*n)
k_c = 2*np.pi*np.array([1/2., -1/7., 0.])
for pos_c in self.atoms.get_positions() / Bohr:
sigma = self._sigma0/(1+np.sum(pos_c**2))**0.5
psit_G += self.get_scaled_gaussian_wave(pos_c, sigma, k_c, n+5j)
self.allocated = True
def get_scaled_gaussian_wave(self, pos_c, sigma=None, k_c=None, scale=None):
if sigma is None:
sigma = self._sigma0
if k_c is None:
k_c = self._k0_c
if scale is None:
A = None
else:
# 4*pi*int(exp(-r^2/(2*w^2))^2*r^2, r=0...infinity)= w^3*pi^(3/2)
# = scale/A^2 -> A = scale*(sqrt(Pi)*w)^(-3/2) hence int -> scale^2
A = scale/(sigma*(np.pi)**0.5)**1.5
return gaussian_wave(self.r_cG, pos_c, sigma, k_c, A, self.dtype, self.buf_G)
def check_and_plot(self, P_ani, P0_ani, digits, keywords=''):
# Collapse into viewable matrices
P_In = self.wfs.collect_projections(P_ani)
P0_In = self.wfs.collect_projections(P0_ani)
# Construct fingerprint of input matrices for comparison
fingerprint = np.array([md5_array(P_In, numeric=True),
md5_array(P0_In, numeric=True)])
# Compare fingerprints across all processors
fingerprints = np.empty((world.size, 2), np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
# If assertion fails, catch temporarily while plotting, then re-raise
try:
self.assertAlmostEqual(np.abs(P_In-P0_In).max(), 0, digits)
except AssertionError:
if world.rank == 0 and mpl is not None:
from matplotlib.figure import Figure
fig = Figure()
ax = fig.add_axes([0.0, 0.1, 1.0, 0.83])
ax.set_title(self.__class__.__name__)
im = ax.imshow(np.abs(P_In-P0_In), interpolation='nearest')
fig.colorbar(im)
fig.text(0.5, 0.05, 'Keywords: ' + keywords, \
horizontalalignment='center', verticalalignment='top')
from matplotlib.backends.backend_agg import FigureCanvasAgg
img = 'ut_invops_%s_%s.png' % (self.__class__.__name__, \
'_'.join(keywords.split(',')))
FigureCanvasAgg(fig).print_figure(img.lower(), dpi=90)
raise
# =================================
def test_projection_linearity(self):
kpt = self.wfs.kpt_u[0]
Q_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(self.psit_nG, Q_ani, q=kpt.q)
for Q_ni in Q_ani.values():
self.assertTrue(Q_ni.dtype == self.dtype)
P0_ani = dict([(a,Q_ni.copy()) for a,Q_ni in Q_ani.items()])
self.pt.add(self.psit_nG, Q_ani, q=kpt.q)
self.pt.integrate(self.psit_nG, P0_ani, q=kpt.q)
#rank_a = self.gd.get_ranks_from_positions(spos_ac)
#my_atom_indices = np.argwhere(self.gd.comm.rank == rank_a).ravel()
# ~ a ~ a'
#TODO XXX should fix PairOverlap-ish stuff for < p | phi > overlaps
# i i'
#spos_ac = self.pt.spos_ac # NewLFC doesn't have this
spos_ac = self.atoms.get_scaled_positions() % 1.0
gpo = GridPairOverlap(self.gd, self.setups)
B_aa = gpo.calculate_overlaps(spos_ac, self.pt)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
P_ani = dict([(a,Q_ni.copy()) for a,Q_ni in Q_ani.items()])
for a1 in range(len(self.atoms)):
if a1 in P_ani.keys():
P_ni = P_ani[a1]
else:
# Atom a1 is not in domain so allocate a temporary buffer
P_ni = np.zeros((self.bd.mynbands,self.setups[a1].ni,),
dtype=self.dtype)
for a2, Q_ni in Q_ani.items():
# B_aa are the projector overlaps across atomic pairs
B_ii = gpo.extract_atomic_pair_matrix(B_aa, a1, a2)
P_ni += np.dot(Q_ni, B_ii.T) #sum over a2 and last i in B_ii
self.gd.comm.sum(P_ni)
self.check_and_plot(P_ani, P0_ani, 8, 'projection,linearity')
def test_extrapolate_overlap(self):
kpt = self.wfs.kpt_u[0]
ppo = ProjectorPairOverlap(self.wfs, self.atoms)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(ppo.B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
work_nG = np.empty_like(self.psit_nG)
P_ani = ppo.apply(self.psit_nG, work_nG, self.wfs, kpt, \
calculate_P_ani=True, extrapolate_P_ani=True)
P0_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(work_nG, P0_ani, kpt.q)
del work_nG
self.check_and_plot(P_ani, P0_ani, 11, 'extrapolate,overlap')
def test_extrapolate_inverse(self):
kpt = self.wfs.kpt_u[0]
ppo = ProjectorPairOverlap(self.wfs, self.atoms)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(ppo.B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
work_nG = np.empty_like(self.psit_nG)
P_ani = ppo.apply_inverse(self.psit_nG, work_nG, self.wfs, kpt, \
calculate_P_ani=True, extrapolate_P_ani=True)
P0_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(work_nG, P0_ani, kpt.q)
del work_nG
self.check_and_plot(P_ani, P0_ani, 11, 'extrapolate,inverse')
def test_overlap_inverse_after(self):
kpt = self.wfs.kpt_u[0]
kpt.P_ani = self.pt.dict(self.bd.mynbands)
ppo = ProjectorPairOverlap(self.wfs, self.atoms)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(ppo.B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
work_nG = np.empty_like(self.psit_nG)
self.pt.integrate(self.psit_nG, kpt.P_ani, kpt.q)
P0_ani = dict([(a,P_ni.copy()) for a,P_ni in kpt.P_ani.items()])
ppo.apply(self.psit_nG, work_nG, self.wfs, kpt, calculate_P_ani=False)
res_nG = np.empty_like(self.psit_nG)
ppo.apply_inverse(work_nG, res_nG, self.wfs, kpt, calculate_P_ani=True)
del work_nG
P_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(res_nG, P_ani, kpt.q)
abserr = np.empty(1, dtype=float)
for n in range(self.nbands):
band_rank, myn = self.bd.who_has(n)
if band_rank == self.bd.comm.rank:
abserr[:] = np.abs(self.psit_nG[myn] - res_nG[myn]).max()
self.gd.comm.max(abserr)
self.bd.comm.broadcast(abserr, band_rank)
self.assertAlmostEqual(abserr.item(), 0, 10)
self.check_and_plot(P_ani, P0_ani, 10, 'overlap,inverse,after')
def test_overlap_inverse_before(self):
kpt = self.wfs.kpt_u[0]
kpt.P_ani = self.pt.dict(self.bd.mynbands)
ppo = ProjectorPairOverlap(self.wfs, self.atoms)
# Compare fingerprints across all processors
fingerprint = np.array([md5_array(ppo.B_aa, numeric=True)])
fingerprints = np.empty(world.size, np.int64)
world.all_gather(fingerprint, fingerprints)
if fingerprints.ptp(0).any():
raise RuntimeError('Distributed matrices are not identical!')
work_nG = np.empty_like(self.psit_nG)
self.pt.integrate(self.psit_nG, kpt.P_ani, kpt.q)
P0_ani = dict([(a,P_ni.copy()) for a,P_ni in kpt.P_ani.items()])
ppo.apply_inverse(self.psit_nG, work_nG, self.wfs, kpt, calculate_P_ani=False)
res_nG = np.empty_like(self.psit_nG)
ppo.apply(work_nG, res_nG, self.wfs, kpt, calculate_P_ani=True)
del work_nG
P_ani = self.pt.dict(self.bd.mynbands)
self.pt.integrate(res_nG, P_ani, kpt.q)
abserr = np.empty(1, dtype=float)
for n in range(self.nbands):
band_rank, myn = self.bd.who_has(n)
if band_rank == self.bd.comm.rank:
abserr[:] = np.abs(self.psit_nG[myn] - res_nG[myn]).max()
self.gd.comm.max(abserr)
self.bd.comm.broadcast(abserr, band_rank)
self.assertAlmostEqual(abserr.item(), 0, 10)
self.check_and_plot(P_ani, P0_ani, 10, 'overlap,inverse,before')
# -------------------------------------------------------------------
def UTGaussianWavefunctionFactory(boundaries, dtype):
sep = '_'
classname = 'UTGaussianWavefunctionSetup' \
+ sep + {'zero':'Zero', 'periodic':'Periodic', 'mixed':'Mixed'}[boundaries] \
+ sep + {float:'Float', complex:'Complex'}[dtype]
class MetaPrototype(UTGaussianWavefunctionSetup, object):
__doc__ = UTGaussianWavefunctionSetup.__doc__
boundaries = boundaries
dtype = dtype
MetaPrototype.__name__ = classname
return MetaPrototype
# -------------------------------------------------------------------
if __name__ in ['__main__', '__builtin__']:
# We may have been imported by test.py, if so we should redirect to logfile
if __name__ == '__builtin__':
testrunner = CustomTextTestRunner('ut_invops.log', verbosity=2)
else:
from gpaw.utilities import devnull
stream = (world.rank == 0) and sys.stdout or devnull
testrunner = TextTestRunner(stream=stream, verbosity=2)
parinfo = []
for test in [UTDomainParallelSetup_Zero, UTDomainParallelSetup_Periodic, \
UTDomainParallelSetup_Mixed]:
info = ['', test.__name__, test.__doc__.strip('\n'), '']
testsuite = initialTestLoader.loadTestsFromTestCase(test)
map(testrunner.stream.writeln, info)
testresult = testrunner.run(testsuite)
assert testresult.wasSuccessful(), 'Initial verification failed!'
parinfo.extend([' Parallelization options: %s' % tci._parinfo for \
tci in testsuite._tests if hasattr(tci, '_parinfo')])
parinfo = np.unique(np.sort(parinfo)).tolist()
testcases = []
for boundaries in ['zero', 'periodic', 'mixed']:
for dtype in [float, complex]:
testcases.append(UTGaussianWavefunctionFactory(boundaries, \
dtype))
for test in testcases:
info = ['', test.__name__, test.__doc__.strip('\n')] + parinfo + ['']
testsuite = defaultTestLoader.loadTestsFromTestCase(test)
map(testrunner.stream.writeln, info)
testresult = testrunner.run(testsuite)
# Provide feedback on failed tests if imported by test.py
if __name__ == '__builtin__' and not testresult.wasSuccessful():
raise SystemExit('Test failed. Check ut_invops.log for details.')
|
robwarm/gpaw-symm
|
gpaw/test/parallel/ut_invops.py
|
Python
|
gpl-3.0
| 19,632
|
[
"ASE",
"GPAW",
"Gaussian"
] |
38242e33c204e158b4ab7997fe117751114dfc8d10ce5e7b2f198df3a680c5df
|
"""
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
import json
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST,require_GET
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError,HttpResponse
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from openedx.core.lib.xblock_utils import wrap_xblock
from openedx.core.lib.url_utils import quote_slashes
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from openedx.core.djangoapps.course_groups.cohorts import get_course_cohorts, is_course_cohorted, DEFAULT_COHORT_NAME
from student.models import CourseEnrollment,User,CourseEnrollment,CourseEnrollmentAllowed,UserPreprofile
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
CertificateInvalidation,
)
from certificates import api as certs_api
from bulk_email.models import BulkEmailFlag
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.markup import HTML, Text
#GEOFFREY
from course_progress.helpers import get_overall_progress
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from courseware.courses import get_course_by_id
from django.db import connection,connections
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from course_progress.helpers import get_overall_progress
from lms.djangoapps.grades.new.course_grade import CourseGradeFactory
#GEOFFREY 2
from courseware.models import StudentModule
from course_api.blocks.api import get_blocks
from course_api.blocks.views import BlocksInCourseView,BlocksView
from django.db.models import Q
from lms.djangoapps.tma_grade_tracking.models import dashboardStats
from xlwt import *
import os
#GEOFFREY
log = logging.getLogger(__name__)
from pprint import pformat
#AGATHE
from course_progress.helpers import get_overall_progress
from course_progress.models import StudentCourseProgress
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
def show_analytics_dashboard_message(course_key):
"""
Defines whether or not the analytics dashboard URL should be displayed.
Arguments:
course_key (CourseLocator): The course locator to display the analytics dashboard message on.
"""
if hasattr(course_key, 'ccx'):
ccx_analytics_enabled = settings.FEATURES.get('ENABLE_CCX_ANALYTICS_DASHBOARD_URL', False)
return settings.ANALYTICS_DASHBOARD_URL and ccx_analytics_enabled
return settings.ANALYTICS_DASHBOARD_URL
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
reports_enabled = configuration_helpers.get_value('SHOW_ECOMMERCE_REPORTS', False)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if show_analytics_dashboard_message(course_key):
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = HTML("<a href=\"{}\" target=\"_blank\">").format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = Text(analytics_dashboard_message).format(
link_start=link_start, link_end=HTML("</a>"), analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if BulkEmailFlag.feature_enabled(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, reports_enabled))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
# Note: This is hidden for all CCXs
certs_enabled = CertificateGenerationConfiguration.current().enabled and not hasattr(course_key, 'ccx')
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'is_self_paced': course.self_paced,
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': course.start,
'end_date': course.end,
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if show_analytics_dashboard_message(course_key):
# dashboard_link is already made safe in _get_dashboard_link
dashboard_link = _get_dashboard_link(course_key)
# so we can use Text() here so it's not double-escaped and rendering HTML on the front-end
message = Text(_("Enrollment data is now available in {dashboard_link}.")).format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
ccx_enabled = hasattr(course_key, 'ccx')
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
'verified_track_cohorting_url': reverse(
'verified_track_cohorting', kwargs={'course_key_string': unicode(course_key)}
),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
cohorts = []
if is_course_cohorted(course_key):
cohorts = get_course_cohorts(course)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'cohorts': cohorts,
'default_cohort_name': DEFAULT_COHORT_NAME,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = HTML(u"<a href=\"{0}\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': unicode(course.id),
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
# GEOFFREY STAT DASHBOARD
# GEOFFREY STAT DASHBOARD
# GEOFFREY STAT DASHBOARD
# GEOFFREY STAT DASHBOARD
@login_required
def stat_dashboard(request, course_id):
#GET course_key
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_key_modulestore = CourseKey.from_string(course_id)
#course_module
course_module = modulestore().get_course(course_key, depth=0)
#course cutoff
course_cutoff = course_module.grade_cutoffs['Pass']
#GET COURSE
course = get_course_by_id(course_key)
#overview
overview = CourseOverview.get_from_id(course_key)
#Get all course-enrollment
row = User.objects.raw('SELECT a.id ,a.email FROM auth_user a,student_courseenrollment b WHERE a.id=b.user_id AND b.course_id=%s' ,[course_id])
invite = CourseEnrollmentAllowed.objects.all().filter(course_id=course_key)
participant_list = []
all_user = 0
for _user in row:
participant_list.append(_user.email)
all_user = all_user + 1
for _u in invite:
if not str(_u.email) in str(participant_list):
all_user = all_user + 1
#number of user who started the course
user_course_started = 0
#number of users who completed the entire quiz
users_completed_quiz = 0
#count passed
num_passed = 0
#add course average grade
course_average_grade = 0
course_average_grade_global = 0
#number of user who finished the course
user_finished = 0
# Users who completed the quiz entirely
user_completed_quiz = 0
user_completed_quiz_list = []
#course_structure
course_structure = get_course_structure(request,course_id)
course_usage_key = modulestore().make_course_usage_key(course_key)
blocks = get_blocks(request,course_usage_key,depth='all',requested_fields=['display_name','children'])
# Users who completed the quiz (overall_progress equals 100.0 only if user completed the quiz)
for user in row:
overall_progress = get_overall_progress(user.id, course_key)
if overall_progress == 100.0:
users_completed_quiz = users_completed_quiz + 1
user_completed_quiz_list.append(user.username)
# connect mongodb return values:
mongo_persist = dashboardStats()
collection = mongo_persist.connect()
find_mongo_persist_course = mongo_persist.find_by_course_id(collection,course_id)
for n in row:
user_id = n.id
users = User.objects.get(pk=user_id)
try:
users_info = find_mongo_persist_course['users_info']
for key, value in users_info.iteritems():
#log.info("user_info key:"+pformat(key)+" value"+pformat(value))
_passed = value['passed']
_percent = value['percent']
user_course_started = user_course_started + 1
# Average grade of all users who completed the quiz
_username = value['username']
if _username in user_completed_quiz_list:
course_average_grade_global = course_average_grade_global + (_percent * 100)
# Average grade of users who passed the quiz
if _passed:
course_average_grade = course_average_grade + (_percent * 100)
user_finished = user_finished + 1
if _percent >= course_cutoff:
num_passed = num_passed + 1
except:
pass
#return context
if user_finished != 0:
final_course_average_grade = round((course_average_grade / user_finished),1)
else :
final_course_average_grade=0.0
if users_completed_quiz !=0:
course_average_grade_global = round((course_average_grade_global / users_completed_quiz), 1)
else :
course_average_grade_global=0.0
#store problems components order
problem_components=[]
for chapter in course_structure:
for section in chapter['children']:
for vertical in section['children']:
for component in vertical['children']:
if 'problem' in str(component):
problem_components.append(str(component))
context = {
"course_id":course_id,
"course":course,
"row":row,
'course_module':course_module,
"all_user":all_user,
"num_passed":num_passed,
"user_course_started":user_course_started,
'course_average_grade':final_course_average_grade,
'course_average_grade_global': course_average_grade_global,
'user_finished':user_finished,
'course_structure':course_structure,
'overview':overview,
'language_course':get_course_langue(course.language),
'problem_components':problem_components
}
return render_to_response('courseware/stat.html', context)
@ensure_csrf_cookie
@login_required
def get_dashboard_username(request,course_id,email):
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
row = User.objects.raw('SELECT a.id,a.email,a.first_name,a.last_name FROM auth_user a,student_courseenrollment b WHERE a.id=b.user_id AND b.course_id=%s' ,[course_id])
emails = []
email = str(email).lower()
for n in row:
low = [
n.email.lower(),
n.first_name.lower(),
n.last_name.lower()
]
if email in str(low).lower():
q = {
"values" : [
n.email,
n.first_name,
n.last_name
],
"id":n.email
}
emails.append(q)
response = JsonResponse({
"usernames":emails,
"email":email
})
return response
@ensure_csrf_cookie
@login_required
def stat_dashboard_username(request, course_id, email):
try:
# get users info
users = User.objects.get(email=email)
#user_email
user_email = users.email
lvl_1 = ''
lvl_2 = ''
lvl_3 = ''
lvl_4 = ''
try:
preprofile = UserPreprofile.objects.filter(email=user_email).first()
lvl_1 = preprofile.level_1
lvl_2 = preprofile.level_2
lvl_3 = preprofile.level_3
lvl_4 = preprofile.level_4
except:
pass
#ordered course
course_grade = []
ordered_course_grade=[]
quiz_order=get_quiz_structure(request, course_id)
# get user id
user_id= users.id
# get course_key from url's param
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# get course from course_key
course = get_course_by_id(course_key)
# get all courses block of the site
course_block = StudentModule.objects.all().filter(student_id=user_id,course_id=course_key,max_grade__isnull=False)
# var of grades / course_structure
course_grade = []
# get course_users_info
course_user_info = CourseGradeFactory().create(users, course)
# user info responses
user_info = [
{'Score':str(course_user_info.percent * 100)+'%'},
{'First_name':users.first_name},
{'Last_name':users.last_name},
{'Email':users.email},
{'Niveau_1':lvl_1},
{'Niveau_2':lvl_2},
{'Niveau_3':lvl_3},
{'Niveau_4':lvl_4}
]
for n in course_block:
q = {}
usage_key = n.module_state_key
block_view = BlocksView()
block_name = get_blocks(request,usage_key,depth='all',requested_fields=['display_name'])
root = block_name['root']
display_name = block_name['blocks'][root]['display_name']
q['earned'] = n.grade
q['possible'] = n.max_grade
q['display_name'] = display_name
q['root'] = root
course_grade.append(q)
#Order blocks
for id in quiz_order:
for block in course_grade :
if block['root']==str(id):
ordered_course_grade.append(block)
return JsonResponse({
"course_id":course_id,
"email":email,
"user_id":user_id,
"course_grade": ordered_course_grade,
"user_info": user_info,
"quiz_order":quiz_order
})
except:
return JsonResponse({
"course_id":course_id,
"username":username,
"user_id": '',
"course_grade": [],
"user_info": '',
})
@login_required
def get_course_structure(request, course_id):
course_key = CourseKey.from_string(course_id)
course_usage_key = modulestore().make_course_usage_key(course_key)
blocks = get_blocks(request,course_usage_key,depth='all',requested_fields=['display_name','children'])
root = blocks['root']
blocks_overviews = []
try:
children = blocks['blocks'][root]['children']
for z in children:
q = {}
child = blocks['blocks'][z]
q['display_name'] = child['display_name']
q['id'] = child['id']
try:
sub_section = child['children']
q['children'] = []
for s in sub_section:
sub_ = blocks['blocks'][s]
a = {}
a['id'] = sub_['id']
a['display_name'] = sub_['display_name']
vertical = sub_['children']
try:
a['children'] = []
for v in vertical:
unit = blocks['blocks'][v]
w = {}
w['id'] = unit['id']
w['display_name'] = unit['display_name']
try:
w['children'] = unit['children']
except:
w['children'] = []
a['children'].append(w)
except:
a['children'] = []
q['children'].append(a)
except:
q['children'] = []
blocks_overviews.append(q)
except:
children = ''
return blocks_overviews
@ensure_csrf_cookie
@login_required
@require_POST
def get_course_blocks_grade(request,course_id):
data = json.loads(request.body)
data_id = data.get('data_id')
course_block = StudentModule.objects.raw("SELECT id,AVG(grade) AS moyenne,count(id) AS total,MAX(max_grade) AS max_grade,course_id,module_id FROM courseware_studentmodule WHERE course_id = %s AND max_grade IS NOT NULL AND grade <= max_grade GROUP BY module_id", [course_id])
course_grade = {}
for n in course_block:
usage_key = n.module_state_key
block_view = BlocksView()
try:
block_name = get_blocks(request,usage_key,depth='all',requested_fields=['display_name'])
root = block_name['root']
for z in data_id:
if root in z.get('id'):
if not root in course_grade:
course_grade[root] = {}
course_grade[root]['moyenne'] = n.moyenne
course_grade[root]['total'] = n.total
course_grade[root]['max_grade'] = n.max_grade
course_grade[root]['course_id'] = str(n.course_id)
course_grade[root]['module_id'] = str(n.module_state_key)
course_grade[root]['display_name'] = block_name['blocks'][root]['display_name']
course_grade[root]['vertical_name'] = z.get('title')
except:
pass
return JsonResponse({'course_grade':course_grade})
def get_result_page_info(request,course_id):
response = JsonResponse({
"course_id":course_id
})
return response
@ensure_csrf_cookie
@login_required
@require_GET
def get_course_users(request,course_id):
#Get all course-enrollment
"""
UserPreprofile
CourseEnrollment
CourseEnrollmentAllowed
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
invite = CourseEnrollmentAllowed.objects.all().filter(course_id=course_key)
enroll = CourseEnrollment.objects.all().filter(course_id=course_key)
users = []
for _ui in invite:
email = _ui.email
if not str(email) in str(users):
q = {}
q['email'] = email
q['statut'] = 'sent'
q['Nom'] = ''
q['Prenom'] = ''
q['Niveau 1'] = ''
q['Niveau 2'] = ''
q['Niveau 3'] = ''
q['Niveau 4'] = ''
users.append(q)
for _ue in enroll:
try:
email = User.objects.get(pk=_ue.user_id).email
if not str(email) in str(users):
q = {}
q['email'] = email
q['statut'] = 'accepted'
q['Nom'] = ''
q['Prenom'] = ''
q['Niveau 1'] = ''
q['Niveau 2'] = ''
q['Niveau 3'] = ''
q['Niveau 4'] = ''
users.append(q)
else:
for user in users:
if user['email'] == email:
user['statut'] = 'accepted'
except:
pass
for user in users:
try:
email = user['email']
profile = UserPreprofile.objects.filter(email=email).first()
user['Nom'] = profile.last_name
user['Prenom'] = profile.first_name
user['Niveau 1'] = profile.level_1
user['Niveau 2'] = profile.level_2
user['Niveau 3'] = profile.level_3
user['Niveau 4'] = profile.level_4
except:
pass
filename = '{}_registered_users.xls'.format(course_id).replace('+','_')
filepath = '/edx/var/edxapp/'+filename
HEADERS = (u"Nom",u"Prenom",u"Adresse email",u"Niveau 1",u"Niveau 2",u"Niveau 3",u"Niveau 4",u"Statut")
wb = Workbook(encoding='utf-8')
sheet = wb.add_sheet('Users')
for i, header in enumerate(HEADERS):
sheet.write(0, i, header)
j = 0
for i in range(len(users)):
j=j+1
try:
sheet.write(j, 0, users[i]['Nom'])
except:
sheet.write(j, 0, ' ')
try:
sheet.write(j, 1, users[i]['Prenom'])
except:
sheet.write(j, 1, ' ')
try:
sheet.write(j, 2, users[i]['email'])
except:
sheet.write(j, 2, ' ')
try:
sheet.write(j, 3, users[i]['Niveau 1'])
except:
sheet.write(j, 3, ' ')
try:
sheet.write(j, 4, users[i]['Niveau 2'])
except:
sheet.write(j, 4, ' ')
try:
sheet.write(j, 5, users[i]['Niveau 3'])
except:
sheet.write(j, 5, ' ')
try:
sheet.write(j, 6, users[i]['Niveau 4'])
except:
sheet.write(j, 6, ' ')
try:
sheet.write(j, 7, users[i]['statut'])
except:
sheet.write(j, 7, ' ')
wb.save(filepath)
context = {
'filename':filename,
'users':str(users)
}
return JsonResponse(context)
def download_xls(request,filename):
full_path = '/edx/var/edxapp/'+filename
_file = open(full_path,'r')
_content = _file.read()
response = HttpResponse(_content, content_type="application/vnd.ms-excel")
response['Content-Disposition'] = "attachment; filename="+filename
os.remove(full_path)
return response
#generate current_course grade reports
@ensure_csrf_cookie
@login_required
@require_GET
def get_course_users_grades(request,course_id):
# connect mongodb return values:
mongo_persist = dashboardStats()
collection = mongo_persist.connect()
find_mongo_persist_course = mongo_persist.find_by_course_id(collection,course_id)
# get users saved data
users_info = find_mongo_persist_course.get('users_info')
#get users id
users_id = users_info.keys()
q = {
'title': [
'email','first name','last name'
],
'users': []
}
k = 0
for _user_id in users_id:
#try:
current = users_info[_user_id]
user = User.objects.get(pk=users_info[str(_user_id)]["user_id"])
percent = str(current["percent"] * 100)+'%'
summary = current["summary"]["section_breakdown"]
user_info = {
'email':user.email,
'first_name':user.first_name,
'last_name':user.last_name,
'percent': percent,
'grades':[]
}
for section in summary:
if k == 0:
if not section['label'] in q['title']:
q['title'].append(section['label'])
_section = {
'label':section['label'],
'percent':str(section['percent'] * 100)+'%'
}
user_info['grades'].append(_section)
q['users'].append(user_info)
k = k + 1
"""
except:
pass
"""
if not 'final grade' in q['title']:
q['title'].append('final grade')
filename = '{}_grades_reports.xls'.format(course_id).replace('+','_')
filepath = '/edx/var/edxapp/'+filename
HEADERS = q['title']
wb = Workbook(encoding='utf-8')
sheet = wb.add_sheet('Grades')
for i, header in enumerate(HEADERS):
sheet.write(0, i, header)
j = 0
for i in range(len(q['users'])):
j=j+1
try:
sheet.write(j, 0, q['users'][i]['email'])
except:
sheet.write(j, 0, ' ')
try:
sheet.write(j, 1, q['users'][i]['first_name'])
except:
sheet.write(j, 1, ' ')
try:
sheet.write(j, 2, q['users'][i]['last_name'])
except:
sheet.write(j, 2, ' ')
d = 2
for grade in q['users'][i]['grades']:
d = d + 1
try:
sheet.write(j, d, grade['percent'])
except:
sheet.write(j, d, ' ')
d = d + 1
sheet.write(j, d, q['users'][i]['percent'])
wb.save(filepath)
context = {
'filename':filename,
'course_id':course_id
}
return JsonResponse(context)
def download_grades(request,filename):
full_path = '/edx/var/edxapp/'+filename
_file = open(full_path,'r')
_content = _file.read()
response = HttpResponse(_content, content_type="application/vnd.ms-excel")
response['Content-Disposition'] = "attachment; filename="+filename
os.remove(full_path)
return response
def get_list_lang():
language_options_tulp=settings.ALL_LANGUAGES
language_options_dict={}
for lang, label in language_options_tulp:
language_options_dict[lang]=label
return language_options_dict
def get_course_langue(lang_code):
language_options_dict=get_list_lang()
course_language=language_options_dict[lang_code]
return course_language
def get_quiz_structure(request, course_id):
course_key = CourseKey.from_string(course_id)
course_usage_key = modulestore().make_course_usage_key(course_key)
course_blocks = get_blocks(request,course_usage_key,depth='all',requested_fields=['display_name','children'])
blocks_overviews = []
quiz_elements=[]
blocks_list=[]
for block in course_blocks['blocks'] :
if course_blocks['blocks'][block].get('children') and "problem" in course_blocks['blocks'][block].get('children')[0]:
blocks_list=course_blocks['blocks'][block]['children']
return blocks_list
|
TheMOOCAgency/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 52,741
|
[
"VisIt"
] |
1944ba5f719c43b9efe1464bf278a916a070111cc7a70ba4c0409b185e83f011
|
#!/usr/bin/env python
import sys
lines_gaff = sys.stdin.readlines()
#pair_style = 'lj/charmm/coul/long'
# NOTE: Long-range coulombic forces were disabled intentionally. (See below)
# If you want to use long-range electrostatics, uncomment these lines:
# Instead I use hybrid lj/charmm/coul/charmm by default, because
# LAMMPS complains if you attempt to use lj/charmm/coul/long on a
# system if it does not contain any charged particles.
# Currently, moltemplate does not assign atomic charge,
# so this problem occurs frequently.
#pair_style = 'lj/charmm/coul/charmm'
pair_style = 'lj/charmm/coul/long'
sys.stdout.write(' write_once(\"In Settings\") {\n')
for i in range(0, len(lines_gaff)):
line = lines_gaff[i]
tokens= line.split()
atype = tokens[0]
sig=tokens[1]
eps=tokens[2]
comments=' '.join(tokens[3:])
sys.stdout.write(' pair_coeff @atom:'+atype+' @atom:'+atype+' '+pair_style+' '+eps+' '+sig+' # '+comments+'\n')
sys.stdout.write(' } # (end of pair_coeffs)\n')
sys.stdout.write('\n')
|
slitvinov/lammps-swimmer
|
tools/moltemplate/common/amber/amberparm_pair_to_lt.py
|
Python
|
gpl-2.0
| 1,072
|
[
"CHARMM",
"LAMMPS"
] |
c913b04ae90e1f47237523af84e70e8a48e739783ca73a1510e45c13f6486c1c
|
# Created by DrLecter, based on DraX' scripts
# This script is part of the L2J Official Datapack Project
# Visit us at http://www.l2jdp.com/
# See readme-dp.txt and gpl.txt for license and distribution details
# Let us know if you did not receive a copy of such files.
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "elven_human_mystics_2"
#print "Elven human mystics 2"
#Quest items
MARK_OF_SCHOLAR = 2674
MARK_OF_TRUST = 2734
MARK_OF_MAGUS = 2840
MARK_OF_LIFE = 3140
MARK_OF_WITCHCRAFT = 3307
MARK_OF_SUMMONER = 3336
#JUREK,ARKENIAS,VALLERIA,SCRAIDE,DRIKIYAN,JAVIER
NPCS=[30115,30174,30176,30694,30854,31996]
#event:[newclass,req_class,req_race,low_ni,low_i,ok_ni,ok_i,req_item]
#low_ni : level too low, and you dont have quest item
#low_i: level too low, despite you have the item
#ok_ni: level ok, but you don't have quest item
#ok_i: level ok, you got quest item, class change takes place
CLASSES = {
"EW":[27,26,1,"18","19","20","21",[MARK_OF_SCHOLAR,MARK_OF_LIFE,MARK_OF_MAGUS]],
"ES":[28,26,1,"22","23","24","25",[MARK_OF_SCHOLAR,MARK_OF_LIFE,MARK_OF_SUMMONER]],
"HS":[12,11,0,"26","27","28","29",[MARK_OF_SCHOLAR,MARK_OF_TRUST,MARK_OF_MAGUS]],
"HN":[13,11,0,"30","31","32","33",[MARK_OF_SCHOLAR,MARK_OF_TRUST,MARK_OF_WITCHCRAFT]],
"HW":[14,11,0,"34","35","36","37",[MARK_OF_SCHOLAR,MARK_OF_TRUST,MARK_OF_SUMMONER]]
}
#Messages
default = "No Quest"
def change(st,player,newclass,items) :
for item in items :
st.takeItems(item,1)
st.playSound("ItemSound.quest_fanfare_2")
player.setClassId(newclass)
player.setBaseClass(newclass)
player.broadcastUserInfo()
return
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAdvEvent (self,event,npc,player) :
npcId = npc.getNpcId()
htmltext = default
suffix = ''
st = player.getQuestState(qn)
if not st : return
race = player.getRace().ordinal()
classid = player.getClassId().getId()
level = player.getLevel()
if npcId not in NPCS : return
if not event in CLASSES.keys() :
return event
else :
newclass,req_class,req_race,low_ni,low_i,ok_ni,ok_i,req_item=CLASSES[event]
if race == req_race and classid == req_class :
item = True
for i in req_item :
if not st.getQuestItemsCount(i):
item = False
if level < 40 :
suffix = low_i
if not item :
suffix = low_ni
else :
if not item :
suffix = ok_ni
else :
suffix = ok_i
change(st,player,newclass,req_item)
st.exitQuest(1)
htmltext = "30115-"+suffix+".htm"
return htmltext
def onTalk (self,npc,player):
st = player.getQuestState(qn)
npcId = npc.getNpcId()
race = player.getRace().ordinal()
classId = player.getClassId()
id = classId.getId()
htmltext = default
if player.isSubClassActive() :
st.exitQuest(1)
return htmltext
# Elven and Human wizards only
if npcId in NPCS :
htmltext = "30115"
if race in [0,1] :
if id == 26 : # elven wizard
return htmltext+"-01.htm"
elif id == 11 : # human wizard
return htmltext+"-08.htm"
elif not classId.isMage() : # all elf/human fighters from all occupation levels
htmltext += "-40.htm"
elif classId.level() == 0 : # first occupation change not made yet
htmltext += "-38.htm"
elif classId.level() == 1 : # buffers/oracles
htmltext += "-40.htm"
elif classId.level() >= 2 : # second/third occupation change already made
htmltext += "-39.htm"
else :
htmltext += "-40.htm" # other races
st.exitQuest(1)
return htmltext
QUEST = Quest(99994,qn,"village_master")
CREATED = State('Start', QUEST)
QUEST.setInitialState(CREATED)
for npc in NPCS :
QUEST.addStartNpc(npc)
QUEST.addTalkId(npc)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/village_master/elven_human_mystics_2/__init__.py
|
Python
|
gpl-3.0
| 4,139
|
[
"VisIt"
] |
78500fe02d75c2d6739f213f5aeb0ad6a8c41ae6bb3cd5002173214fb8d2f683
|
from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata
from base64 import b64decode
from string import printable
class dendroid(AndroidParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="dendroid",
bot_name="Dendroid",
description="Android RAT",
authors=["Brian Wallace (@botnet_hunter)"],
version="1.0.0",
date="August 18, 2014",
references=[]
)
AndroidParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("dendroid.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
uri = None
password = None
for s in data_strings(file_data, charset="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/="):
try:
line = b64decode(s)
if len(line) == 0:
continue
valid = True
for c in line:
if c not in printable:
valid = False
if not valid:
continue
if line.lower().startswith("https://") or line.lower().startswith("http://"):
uri = line
continue
if uri is not None:
password = line
break
except TypeError:
continue
if uri is not None:
results["c2_uri"] = uri
if password is not None:
try:
password.decode("utf8")
results["password"] = password
except UnicodeDecodeError:
results["password"] = "h" + password.encode("hex")
return results
Modules.list.append(dendroid())
|
bwall/bamfdetect
|
BAMF_Detect/modules/dendroid.py
|
Python
|
mit
| 2,007
|
[
"Brian"
] |
ac48112ca2713a20c8cc4c30601257d2d1811cf6d8e2233ba6beaec9f5047f30
|
"""Test Axis event instance API.
pytest --cov-report term-missing --cov=axis.event_instances tests/test_event_instances.py
"""
import pytest
from axis.event_instances import EventInstances, URL, get_events
import respx
from .conftest import HOST
from .event_fixtures import (
EVENT_INSTANCES,
EVENT_INSTANCE_PIR_SENSOR,
EVENT_INSTANCE_STORAGE_ALERT,
EVENT_INSTANCE_VMD4_PROFILE1,
)
@pytest.fixture
def event_instances(axis_device) -> EventInstances:
"""Returns the event_instances mock object."""
return EventInstances(axis_device.vapix.request)
@respx.mock
@pytest.mark.asyncio
async def test_full_list_of_event_instances(event_instances):
"""Test loading of event instances work."""
respx.post(f"http://{HOST}:80{URL}").respond(
text=EVENT_INSTANCES,
headers={"Content-Type": "application/soap+xml; charset=utf-8"},
)
await event_instances.update()
assert len(event_instances) == 44
@pytest.mark.parametrize(
"response,expected",
[
(
EVENT_INSTANCE_PIR_SENSOR,
{
"topic": "tns1:Device/tnsaxis:Sensor/PIR",
"topic_filter": "onvif:Device/axis:Sensor/PIR",
"is_available": True,
"is_application_data": False,
"name": "PIR sensor",
"message": {
"stateful": True,
"stateless": False,
"source": {
"@NiceName": "Sensor",
"@Type": "xsd:int",
"@Name": "sensor",
"Value": "0",
},
"data": {
"@NiceName": "Active",
"@Type": "xsd:boolean",
"@Name": "state",
"@isPropertyState": "true",
},
},
},
),
(
EVENT_INSTANCE_STORAGE_ALERT,
{
"topic": "tnsaxis:Storage/Alert",
"topic_filter": "axis:Storage/Alert",
"is_available": True,
"is_application_data": False,
"name": "Storage alert",
"message": {
"stateful": True,
"stateless": False,
"source": {
"@NiceName": "Disk",
"@Type": "xsd:string",
"@Name": "disk_id",
"Value": ["SD_DISK", "NetworkShare"],
},
"data": [
{
"@NiceName": "Temperature",
"@Type": "xsd:int",
"@Name": "temperature",
},
{
"@isPropertyState": "true",
"@NiceName": "Alert",
"@Type": "xsd:boolean",
"@Name": "alert",
},
{"@NiceName": "Wear", "@Type": "xsd:int", "@Name": "wear"},
{
"@NiceName": "Overall Health",
"@Type": "xsd:int",
"@Name": "overall_health",
},
],
},
},
),
(
EVENT_INSTANCE_VMD4_PROFILE1,
{
"topic": "tnsaxis:CameraApplicationPlatform/VMD/Camera1Profile1",
"topic_filter": "axis:CameraApplicationPlatform/VMD/Camera1Profile1",
"is_available": True,
"is_application_data": False,
"name": "VMD 4: VMD 4 ACAP",
"message": {
"stateful": True,
"stateless": False,
"source": {},
"data": {
"@Type": "xsd:boolean",
"@Name": "active",
"@isPropertyState": "true",
},
},
},
),
],
)
@respx.mock
@pytest.mark.asyncio
async def test_single_event_instance(
event_instances: EventInstances, response: bytes, expected: dict
):
"""Verify expected outcome from different event instances."""
respx.post(f"http://{HOST}:80{URL}").respond(
text=response, headers={"Content-Type": "application/soap+xml; charset=utf-8"}
)
await event_instances.update()
assert len(event_instances) == 1
event = event_instances[expected["topic"]]
assert event.topic == expected["topic"]
assert event.topic_filter == expected["topic_filter"]
assert event.is_available == expected["is_available"]
assert event.is_application_data == expected["is_application_data"]
assert event.name == expected["name"]
assert event.stateful == expected["message"]["stateful"]
assert event.stateless == expected["message"]["stateless"]
assert event.source == expected["message"]["source"]
assert event.data == expected["message"]["data"]
@pytest.mark.parametrize(
"input,output",
[
(
{
"tns1:Device": {
"@NiceName": "Device",
"tnsaxis:Sensor": {
"@NiceName": "Device sensors",
"PIR": {
"@topic": "true",
"@NiceName": "PIR sensor",
"MessageInstance": {
"@isProperty": "true",
"SourceInstance": {
"SimpleItemInstance": {
"@NiceName": "Sensor",
"@Type": "xsd:int",
"@Name": "sensor",
"Value": "0",
}
},
"DataInstance": {
"SimpleItemInstance": {
"@NiceName": "Active",
"@Type": "xsd:boolean",
"@Name": "state",
"@isPropertyState": "true",
}
},
},
},
},
}
},
[
{
"topic": "tns1:Device/tnsaxis:Sensor/PIR",
"data": {
"@topic": "true",
"@NiceName": "PIR sensor",
"MessageInstance": {
"@isProperty": "true",
"SourceInstance": {
"SimpleItemInstance": {
"@NiceName": "Sensor",
"@Type": "xsd:int",
"@Name": "sensor",
"Value": "0",
}
},
"DataInstance": {
"SimpleItemInstance": {
"@NiceName": "Active",
"@Type": "xsd:boolean",
"@Name": "state",
"@isPropertyState": "true",
}
},
},
},
}
],
),
(
{
"tnsaxis:CameraApplicationPlatform": {
"VMD": {
"@NiceName": "Video Motion Detection",
"Camera1Profile1": {
"@topic": "true",
"@NiceName": "VMD 4: VMD 4 ACAP",
"MessageInstance": {
"@isProperty": "true",
"DataInstance": {
"SimpleItemInstance": {
"@Type": "xsd:boolean",
"@Name": "active",
"@isPropertyState": "true",
}
},
},
},
}
}
},
[
{
"topic": "tnsaxis:CameraApplicationPlatform/VMD/Camera1Profile1",
"data": {
"@topic": "true",
"@NiceName": "VMD 4: VMD 4 ACAP",
"MessageInstance": {
"@isProperty": "true",
"DataInstance": {
"SimpleItemInstance": {
"@Type": "xsd:boolean",
"@Name": "active",
"@isPropertyState": "true",
}
},
},
},
}
],
),
],
)
def test_get_events(input: dict, output: list):
"""Verify expected output of get_events."""
assert get_events(input) == output
|
Kane610/axis
|
tests/test_event_instances.py
|
Python
|
mit
| 9,698
|
[
"VMD"
] |
ca4ab2f583d2b7883f9c867c44b13d493dbdd49058d40f7c53e66f73cc41e9bc
|
"""
Timothy James Lang
tjlangco@gmail.com
Last Updated 04 September 2015 (Python 2.7/3.4)
Last Updated 26 July 2005 (IDL)
csu_kdp v1.4
Change Log
----------
v1.4 Major Changes (09/04/2015):
1. Added window keyword to enable stretching the FIR window (e.g.,
use a 21-pt filter over 5 km with 250-m gate spacing
2. Forcing FIR order to be even, _calc_kdp_ray will crash otherwise
v1.3 Major Changes (08/05/2015):
1. Made Python 3 compatible.
2. Fixed issue with non-integer array indices.
v1.2 Major Changes (07/10/2015):
1. Made sub-module pep8 compliant.
v1.1 Major Changes (04/27/2015):
1. Made algorithm work with a user-defined gate spacing (via gs keyword).
Untested on gate spacings that do not divide evenly into the 3-km window
used for filtering the PHIDP data, however. But common gate spacings
like 50, 100, 150, 200, 250, and 300 meters should all work fine.
2. Made the algorithm capable of receiving 2D array inputs (i.e., azimuth &
range) as well as 1D inputs (range only). If 2D, rng needs to be 2D as
well. However, thsd should remain a scalar, or 1D and only vary by range.
To Do
-----
1. Performance improvements
2. Make object-oriented
"""
from __future__ import division, print_function
import numpy as np
from numpy import linalg
from scipy.signal import firwin
from warnings import warn
# import time
VERSION = '1.4'
# Used by FIR coefficient function (get_fir)
FIR_GS = 150.0
FIR_WIN = 3.0
FIR_ORDER = 20
FIR_GAIN = 1.0
FIR_FREQ = 0.08
FIR_STD = 28.0
KM2M = 1000.0
def calc_kdp_bringi(dp=None, dz=None, rng=None, thsd=12, nfilter=1,
bad=-32768, gs=FIR_GS, window=FIR_WIN):
"""
Overview
--------
This is an old algorithm that uses an FIR filter to process differential
phase and extract specific differential phase. It works on polarimetric
radar data. It is based on code provided by V. N. Bringi and Yanting Wang
of CSU Electrical Engineering. It assumes differential phase has been
unfolded already. You can send this function either 1D or 2D arrays of
data. If 2D, it assumes the first index is azimuth so it will loop over
that, calculating KDP along individual rays.
Steps
-----
1. Standard deviation of differential phase is calculated and used to
QC the phase data. The stdev calculation uses up to 11 consecutive
gates regardless of gate spacing.
2. Differential phase is filtered using the FIR filter, which has been
tuned to the number of gates contained within the FIR window. This
algorithm only works for window / gate spacing = even number.
3. Specific differential phase is calculated by consulting reflectivity.
As reflectivity declines progressively more and more gates are needed
in the window used to fit a line to the filtered phase. Specific
differential phase is half the slope of that line.
Reference
---------
Timothy J. Lang, David A. Ahijevych, Stephen W. Nesbitt, Richard E.
Carbone, Steven A. Rutledge, and Robert Cifelli, 2007: Radar-Observed
Characteristics of Precipitating Systems during NAME 2004. J. Climate,
20, 1713–1733. doi: http://dx.doi.org/10.1175/JCLI4082.1
Arguments
---------
dp = Differential phase (deg, 1D or 2D array)
dz = Reflectivity (dBZ, 1D or 2D array)
rng = Range (km, 1D or 2D array -
use np.meshgrid() first tp make rng 2D if needed)
thsd = Threshold for standard deviation of differential phase, above which
the data are not considered when filtering or calculating specific
differential phase. The user can specify a 1D vector of spatially
varying thresholds instead (i.e., vary by range).
nfilter = Number of times to apply the FIR filter
bad = Value for bad/missing data
gs = Gate spacing of radar (meters)
window = Changes window over which FIR filter is applied (km). Also affects
the width of the adaptive KDP calculations.
Returns
-------
kd_lin = Specific differential phase (deg/km, 1D or 2D array)
dp_lin = Filtered differential phase (deg, 1D or 2D array)
sd_lin = Standard deviation of diff. phase (deg, 1D or 2D array)
"""
# Quick check on all vars. Used keywords so order doesn't matter.
if dp is None or dz is None or rng is None:
warn('Missing needed variables (dp, dz, and/or rng), failing ...')
return
if np.ndim(dp) != np.ndim(dz) or np.ndim(dp) != np.ndim(rng):
warn('Array sizes don\'t match, failing ...')
return
fir = get_fir(gs=gs, window=window)
# If array is 2D, then it assumes the first index refers to azimuth.
# Thus it loops over that.
if np.ndim(dp) == 2:
kd_lin = np.zeros_like(dp) + bad
dp_lin = np.zeros_like(dp) + bad
sd_lin = np.zeros_like(dp) + 100.0
for ray in np.arange(np.shape(dp)[0]):
kd_lin[ray], dp_lin[ray], sd_lin[ray] = \
_calc_kdp_ray(dp[ray], dz[ray], rng[ray], thsd=thsd,
nfilter=nfilter, bad=bad, fir=fir)
# Or
elif np.ndim(dp) == 1:
kd_lin, dp_lin, sd_lin = _calc_kdp_ray(dp, dz, rng, thsd=thsd, fir=fir,
nfilter=nfilter, bad=bad)
else:
warn('Need 2D or 1D array, failing ...')
return
return kd_lin, dp_lin, sd_lin
def get_fir(gs=FIR_GS, window=FIR_WIN):
"""
gs = Gate Spacing (m)
window = Filter Window (km)
window divided by gs should be an even number!
"""
fir = {}
fir['order'] = np.int32(window * KM2M / gs)
if fir['order'] % 2 != 0:
warn('gs / window must be an even number! #Failing ...')
return
fir['gain'] = FIR_GAIN
# ratio = FIR_GS / gs
ratio = fir['order'] / FIR_ORDER
freq = FIR_FREQ / ratio
std = ratio * FIR_STD
fir['coef'] = firwin(fir['order'] + 1, freq, window=('gaussian', std))
# print('debug', fir)
return fir
def _calc_kdp_ray(dp, dz, rng, thsd=12, nfilter=1, bad=-32768, fir=None):
"""
Arguments
---------
dp = 1D ray of differential phase
dz = 1D ray of reflectivity
rng = 1D ray of range
thsd = Scalar or 1D ray of diff phase standard deviation thresholds
nfilter = Number of times to filter the data
bad = Bad/missing data value
fir = Dictionary containing FIR filter parameters
Returns
-------
kd_lin = Specific differential phase (deg/km, 1D array)
dp_lin = Filtered differential phase (deg, 1D array)
sd_lin = Standard deviation of diff. phase (deg, 1D array)
"""
# Define needed variables
kd_lin = np.zeros_like(rng) + bad
sd_lin = np.zeros_like(rng) + 100.0
# User can provide a spatially varying stddev(dp) threshold
if not hasattr(thsd, '__len__'):
thsd = np.zeros_like(rng) + thsd
length = len(rng)
lin = np.arange(length)
# Half window size for calculating stdev of phase (fixed @ 11 gates)
half_std_win = 5
half_fir_win = fir['order'] // 2 # Half window size for FIR filtering
y = np.zeros(length) + bad # Dummy variable to store filtered phase
z = 1.0 * dp # Dummy variable to store un/pre-processed phase
# print(time.time() - begin_time, 'seconds since start (DEF)')
#####################################################################
# Calculate standard deviation of phidp
mask = dp != bad
for i in lin[mask]:
index1 = np.int32(i - half_std_win)
index2 = np.int32(i + half_std_win)
if index1 >= 0 and index2 < length - 1:
yy = dp[index1:index2]
tmp_mask = mask[index1:index2]
if len(yy[tmp_mask]) > half_std_win:
sd_lin[i] = _quick_std(yy, tmp_mask)
# ------------- MAIN LOOP of Phidp Adaptive Filtering ------------------
# FIR FILTER SECTION
for mloop in np.arange(nfilter):
mask = np.logical_and(sd_lin <= thsd, z != bad)
for i in lin[mask]:
index1 = np.int32(i - half_fir_win)
index2 = np.int32(i + half_fir_win)
if index1 >= 0 and index2 < length - 1:
yy = z[index1:index2+1]
xx = rng[index1:index2+1]
tmp_mask = mask[index1:index2+1]
siz = len(yy[tmp_mask])
if siz > 0.8 * fir['order']:
if siz < fir['order'] + 1:
result = _leastsqrs(xx, yy, siz, tmp_mask)
yy[~tmp_mask] = result[0] * xx[~tmp_mask] + result[1]
y[i] = fir['gain'] * np.dot(fir['coef'], yy)
z = 1.0 * y # Enables re-filtering of processed phase
dp_lin = 1.0 * y
# print(time.time() - begin_time, 'seconds since start (FDP)')
# *****************END LOOP for Phidp Adaptive Filtering******************
# CALCULATE KDP
# Default value for nadp is half_fir_win, but varies based on Zh
nadp = np.int16(0 * dz + half_fir_win)
tmp_mask = dz < 35
nadp[tmp_mask] = 3 * half_fir_win
tmp_mask = np.logical_and(dz >= 35, dz < 45)
nadp[tmp_mask] = 2 * half_fir_win
mask = dp_lin != bad
for i in lin[mask]:
index1, index2 = _get_nadp_indices(nadp, i)
if index1 >= 0 and index2 <= length:
tmp_mask = mask[index1:index2]
xx = rng[index1:index2]
siz = len(xx[tmp_mask])
# Improved Kdp based on LSE fit to Adap filt Phidp
if siz >= 0.8 * nadp[i]:
yy = dp_lin[index1:index2]
kd_lin[i] = _fit_line_and_get_kdp(xx, yy, siz, tmp_mask)
# *******************END KDP CALCULATION****************************
# print(time.time() - begin_time, 'seconds since start (KDP/Done)')
return kd_lin, dp_lin, sd_lin
def _leastsqrs(xx, yy, siz, tmp_mask):
"""
Following is faster than np.polyfit
e.g., return np.polyfit(xx[tmp_mask], yy[tmp_mask], 1)
"""
A = np.array([xx[tmp_mask], np.ones(siz)])
return linalg.lstsq(A.T, yy[tmp_mask])[0]
def _get_nadp_indices(nadp, i):
half_nadp = nadp[i] / 2
return np.int32(i - half_nadp), np.int32(i + half_nadp + 1)
def _fit_line_and_get_kdp(xx, yy, siz, tmp_mask):
result = _leastsqrs(xx, yy, siz, tmp_mask)
return 0.5 * result[0]
def _quick_std(array, mask):
"""Following is faster than np.std()"""
a = array[mask]
m = a.mean()
c = a - m
return (np.dot(c, c) / a.size)**0.5
|
jjhelmus/CSU_RadarTools
|
csu_radartools/csu_kdp.py
|
Python
|
gpl-2.0
| 10,457
|
[
"Gaussian"
] |
a11e26c2634cdffdbe9998879d52c48d0c1870d79186a20f76585b3add9a9d75
|
#
# Copyright (C) 2006-2007 Cooper Street Innovations Inc.
# Charles Eidsness <charles@cooper-street.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
"""
This module provides all of basic eispice waveforms for use with
speific device, e.g. V, I, VI.
Basic Waveform Classes:
-- These classes are wrappers around waveforms defined in the
simulator library (written in C)
PWL -- Piece-Wise Linear
PWC -- Piece-Wise Cubic-Spline
SFFM -- Single Frequency FM
Exp -- Exponential Rise and/or Fall
Pulse -- Pulse Train
Gauss -- Pulse Train with Gaussian Edges
Sin -- Sine Wave
SFFM
"""
from numpy import array, double
import units
import simulator_
class PWL(simulator_.PWL_):
"""
Piece-Wise Linear Waveform
-- A 2D curve, points between defined data points are calculated
via linear interpolation.
Example:
>>> import eispice
>>> wave = eispice.PWL([['2n', 4],['12n', 3],['50n', 20],['75n', -20], \
['95n', -22]])
>>> cct = eispice.Circuit('PWL Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 8.815789474e+00, '25n')
True
>>> cct.check_v(1, -2.000000000e+01, '75n')
True
"""
def __init__(self, data):
"""
Arguments:
data -- 2D Array Representing the PWL Curve
"""
data = units.floatList2D(data)
data = data[data[:,0].argsort(),] # sort by first column for simulator
simulator_.PWL_.__init__(self, data)
class PWC(simulator_.PWC_):
"""
Piece-Wise Cubic-Spline Waveform
-- A 2D curve, points between defined data points are calculated
as cubic splines.
Example:
>>> import eispice
>>> wave = eispice.PWC([['2n', 4],['12n', 3],['50n', 20],['75n', -20], \
['95n', -22]])
>>> cct = eispice.Circuit('PWC Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 1.148836888e+01, '25n')
True
>>> cct.check_v(1, -2.000000000e+01, '75n')
True
"""
def __init__(self, data):
"""
Arguments:
data -- 2D Array Representing the PWC Curve
"""
data = units.floatList2D(data)
data = data[data[:,0].argsort(),] # sort by first column for simulator
simulator_.PWC_.__init__(self, data)
class SFFM(simulator_.SFFM_):
"""Single Frequency FM Waveform
Example:
>>> import eispice
>>> wave = eispice.SFFM(1, 4, '100M', 2, '10M')
>>> cct = eispice.Circuit('SFFM Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, -2.630296891e+00, '25n')
True
>>> cct.check_v(1, 4.631842886e+00, '75n')
True
"""
def __init__(self, *args):
"""
Arguments:
Vo -- Offset
Va -- Amplitude
Fc -- (optional) Carrier Frequency, default = 1/tstop
MDI -- (optional) Modulation Index, default = 0.0
Fs -- (optional) Signal Frequency, default = 1/tstop
"""
simulator_.SFFM_.__init__(self,*units.floatList1D(args))
class Exp(simulator_.Exp_):
"""Exponential Rise and/or Fall Waveform
Example:
>>> import eispice
>>> wave = eispice.Exp(0, 4, '5n', '2n', '25n', '5n')
>>> cct = eispice.Circuit('Exp Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 3.999818400e+00, '25n')
True
>>> cct.check_v(1, 1.818267660e-04, '75n')
True
"""
def __init__(self, *args):
"""
Arguments:
V1 -- Initial Value
V2 -- Pulsed Value
Td1 -- (optional) Rise Delay Time, default = 0.0
Tau1 -- (optional) Rise Time Constant, default = tstep
Td2 -- (optional) Fall Delay Time, default = td1 + tstep
Tau2 -- (optional) Fall Time Constant, default = tstep
"""
simulator_.Exp_.__init__(self,*units.floatList1D(args))
class Pulse(simulator_.Pulse_):
"""Pulse Train Waveform
Example:
>>> import eispice
>>> wave = eispice.Pulse(4, 8, '10n', '2n', '3n', '5n', '20n')
>>> cct = eispice.Circuit('Pulse Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 4, '25n')
True
>>> cct.check_v(1, 8, '75n')
True
"""
def __init__(self, *args):
"""
Arguments:
V1 -- Initial Value
V2 -- Pulsed Value
Td -- (optional) Delay Time, default = 0.0
Tr -- (optional) Rise Time, default = tstep
Tf -- (optional) Fall Time, default = tstep
PW -- (optional) Pulse Width, default = tstop
Per -- (optional) Period, default = tstop
"""
simulator_.Pulse_.__init__(self,*units.floatList1D(args))
class Gauss(simulator_.Gauss_):
"""Pulse Train Waveform with Gaussian Edges
Example:
>>> import eispice
>>> wave = eispice.Gauss(0, 3.3, '0n', '2n', '5n', '10n', '50n')
>>> cct = eispice.Circuit('Gauss Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, 1.517639357e-01, '25n')
True
>>> cct.check_v(1, 3.148220583e+00, '65n')
True
"""
def __init__(self, *args):
"""
Arguments:
V1 -- Initial Value
V2 -- Pulsed Value
Td -- (optional) Delay Time, default = 0.0
Tr -- (optional) Rise Time (20% to 80%), default = tstep
Tf -- (optional) Fall Time (20% to 80%), default = tstep
PW -- (optional) Pulse Width, default = tstop
Per -- (optional) Period, default = tstop
"""
simulator_.Gauss_.__init__(self,*units.floatList1D(args))
class Sin(simulator_.Sin_):
"""Sine Wave Waveform
Example:
>>> import eispice
>>> wave = wave = eispice.Sin(0, 4, '50M', '5n', '10M')
>>> cct = eispice.Circuit('Sin Test')
>>> cct.Rv = eispice.R(1, eispice.GND, 1)
>>> cct.Vx = eispice.V(1, 0, eispice.GND, wave)
>>> cct.tran('0.5n','100n')
>>> cct.check_v(1, -6.561833244e-04, '25n')
True
>>> cct.check_v(1, 1.910792387e-04, '75n')
True
"""
def __init__(self, *args):
"""
Arguments:
Vo --> Offset
Va --> Amplitude
Fc --> (optional) Frequency, default = 1/tstop
Td --> (optional) Delay, default = 0.0
DF --> (optional) Damping Factor, default = 0.0
"""
simulator_.Sin_.__init__(self,*units.floatList1D(args))
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=False)
print 'Testing Complete'
|
fuesika/teispice
|
module/waveform.py
|
Python
|
gpl-2.0
| 6,850
|
[
"Gaussian"
] |
f859c8dcc29654b5ec5d921023fe4e05217aed14a7137f5c4c1f751048b3e148
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from unittest import TestCase, main
from collections import Counter, defaultdict
import numpy as np
from skbio.core.sequence import NucleotideSequence, DNASequence, RNASequence
from skbio.core.alignment import SequenceCollection, Alignment
from skbio.core.exception import SequenceCollectionError
from skbio.core.distance import DistanceMatrix
class SequenceCollectionTests(TestCase):
"""Tests of the SequenceCollection class """
def setUp(self):
"""Initialize values to be used in tests
"""
self.d1 = DNASequence('GATTACA', identifier="d1")
self.d2 = DNASequence('TTG', identifier="d2")
self.d1_lower = DNASequence('gattaca', identifier="d1")
self.d2_lower = DNASequence('ttg', identifier="d2")
self.r1 = RNASequence('GAUUACA', identifier="r1")
self.r2 = RNASequence('UUG', identifier="r2")
self.r3 = RNASequence('U-----UGCC--', identifier="r3")
self.i1 = DNASequence('GATXACA', identifier="i1")
self.seqs1 = [self.d1, self.d2]
self.seqs1_lower = [self.d1_lower, self.d2_lower]
self.seqs2 = [self.r1, self.r2, self.r3]
self.seqs3 = self.seqs1 + self.seqs2
self.seqs1_t = [('d1', 'GATTACA'), ('d2', 'TTG')]
self.seqs2_t = [('r1', 'GAUUACA'), ('r2', 'UUG'),
('r3', 'U-----UGCC--')]
self.seqs3_t = self.seqs1_t + self.seqs2_t
self.s1 = SequenceCollection(self.seqs1)
self.s1_lower = SequenceCollection(self.seqs1_lower)
self.s2 = SequenceCollection(self.seqs2)
self.s3 = SequenceCollection(self.seqs3)
self.empty = SequenceCollection([])
self.invalid_s1 = SequenceCollection([self.i1])
def test_init(self):
"""Initialization functions as expected with varied input types
"""
SequenceCollection(self.seqs1)
SequenceCollection(self.seqs2)
SequenceCollection(self.seqs3)
SequenceCollection([])
def test_init_fail(self):
"""initialization with sequences with overlapping identifiers fails
"""
s1 = [self.d1, self.d1]
self.assertRaises(SequenceCollectionError, SequenceCollection, s1)
def test_init_validate(self):
"""initialization with validation functions as expected
"""
SequenceCollection(self.seqs1, validate=True)
SequenceCollection(self.seqs1, validate=True)
# can't validate self.seqs2 as a DNASequence
self.assertRaises(SequenceCollectionError, SequenceCollection,
self.invalid_s1, validate=True)
def test_from_fasta_records(self):
"""Initialization from list of tuples functions as expected
"""
SequenceCollection.from_fasta_records(self.seqs1_t, DNASequence)
SequenceCollection.from_fasta_records(self.seqs2_t, RNASequence)
SequenceCollection.from_fasta_records(self.seqs3_t, NucleotideSequence)
def test_contains(self):
"""in operator functions as expected
"""
self.assertTrue('d1' in self.s1)
self.assertTrue('r2' in self.s2)
self.assertFalse('r2' in self.s1)
def test_eq(self):
"""equality operator functions as expected
"""
self.assertTrue(self.s1 == self.s1)
self.assertFalse(self.s1 == self.s2)
# different objects can be equal
self.assertTrue(self.s1 == SequenceCollection([self.d1, self.d2]))
self.assertTrue(SequenceCollection([self.d1, self.d2]) == self.s1)
# SequenceCollections with different number of sequences are not equal
self.assertFalse(self.s1 == SequenceCollection([self.d1]))
class FakeSequenceCollection(SequenceCollection):
pass
# SequenceCollections of different types are not equal
self.assertFalse(self.s1 == FakeSequenceCollection([self.d1, self.d2]))
self.assertFalse(self.s1 == Alignment([self.d1, self.d2]))
# SequenceCollections with different sequences are not equal
self.assertFalse(self.s1 == SequenceCollection([self.d1, self.r1]))
def test_getitem(self):
"""getitem functions as expected
"""
self.assertEqual(self.s1[0], self.d1)
self.assertEqual(self.s1[1], self.d2)
self.assertEqual(self.s2[0], self.r1)
self.assertEqual(self.s2[1], self.r2)
self.assertRaises(IndexError, self.empty.__getitem__, 0)
self.assertRaises(KeyError, self.empty.__getitem__, '0')
def test_iter(self):
"""iter functions as expected
"""
s1_iter = iter(self.s1)
count = 0
for actual, expected in zip(s1_iter, self.seqs1):
count += 1
self.assertEqual(actual, expected)
self.assertEqual(count, len(self.seqs1))
self.assertRaises(StopIteration, s1_iter.next)
def test_len(self):
"""len functions as expected
"""
self.assertEqual(len(self.s1), 2)
self.assertEqual(len(self.s2), 3)
self.assertEqual(len(self.s3), 5)
self.assertEqual(len(self.empty), 0)
def test_ne(self):
"""inequality operator functions as expected
"""
self.assertFalse(self.s1 != self.s1)
self.assertTrue(self.s1 != self.s2)
# SequenceCollections with different number of sequences are not equal
self.assertTrue(self.s1 != SequenceCollection([self.d1]))
class FakeSequenceCollection(SequenceCollection):
pass
# SequenceCollections of different types are not equal
self.assertTrue(self.s1 != FakeSequenceCollection([self.d1, self.d2]))
self.assertTrue(self.s1 != Alignment([self.d1, self.d2]))
# SequenceCollections with different sequences are not equal
self.assertTrue(self.s1 !=
SequenceCollection([self.d1, self.r1]))
def test_repr(self):
"""repr functions as expected
"""
self.assertEqual(repr(self.s1),
"<SequenceCollection: n=2; "
"mean +/- std length=5.00 +/- 2.00>")
self.assertEqual(repr(self.s2),
"<SequenceCollection: n=3; "
"mean +/- std length=7.33 +/- 3.68>")
self.assertEqual(repr(self.s3),
"<SequenceCollection: n=5; "
"mean +/- std length=6.40 +/- 3.32>")
self.assertEqual(repr(self.empty),
"<SequenceCollection: n=0; "
"mean +/- std length=0.00 +/- 0.00>")
def test_reversed(self):
"""reversed functions as expected
"""
s1_iter = reversed(self.s1)
count = 0
for actual, expected in zip(s1_iter, self.seqs1[::-1]):
count += 1
self.assertEqual(actual, expected)
self.assertEqual(count, len(self.seqs1))
self.assertRaises(StopIteration, s1_iter.next)
def test_str(self):
"""str functions as expected
"""
exp1 = ">d1\nGATTACA\n>d2\nTTG\n"
self.assertEqual(str(self.s1), exp1)
exp2 = ">r1\nGAUUACA\n>r2\nUUG\n>r3\nU-----UGCC--\n"
self.assertEqual(str(self.s2), exp2)
exp4 = ""
self.assertEqual(str(self.empty), exp4)
def test_distribution_stats(self):
"""distribution_stats functions as expected
"""
actual1 = self.s1.distribution_stats()
self.assertEqual(actual1[0], 2)
self.assertAlmostEqual(actual1[1], 5.0, 3)
self.assertAlmostEqual(actual1[2], 2.0, 3)
actual2 = self.s2.distribution_stats()
self.assertEqual(actual2[0], 3)
self.assertAlmostEqual(actual2[1], 7.333, 3)
self.assertAlmostEqual(actual2[2], 3.682, 3)
actual3 = self.s3.distribution_stats()
self.assertEqual(actual3[0], 5)
self.assertAlmostEqual(actual3[1], 6.400, 3)
self.assertAlmostEqual(actual3[2], 3.323, 3)
actual4 = self.empty.distribution_stats()
self.assertEqual(actual4[0], 0)
self.assertEqual(actual4[1], 0.0)
self.assertEqual(actual4[2], 0.0)
def test_degap(self):
"""degap functions as expected
"""
expected = [(id_, seq.replace('.', '').replace('-', ''))
for id_, seq in self.seqs2_t]
expected = SequenceCollection.from_fasta_records(expected, RNASequence)
actual = self.s2.degap()
self.assertEqual(actual, expected)
def test_get_seq(self):
"""getseq functions asexpected
"""
self.assertEqual(self.s1.get_seq('d1'), self.d1)
self.assertEqual(self.s1.get_seq('d2'), self.d2)
def test_identifiers(self):
"""identifiers functions as expected
"""
self.assertEqual(self.s1.identifiers(), ['d1', 'd2'])
self.assertEqual(self.s2.identifiers(), ['r1', 'r2', 'r3'])
self.assertEqual(self.s3.identifiers(),
['d1', 'd2', 'r1', 'r2', 'r3'])
self.assertEqual(self.empty.identifiers(), [])
def test_int_map(self):
"""int_map functions as expected
"""
expected1 = {"1": self.d1, "2": self.d2}
expected2 = {"1": "d1", "2": "d2"}
self.assertEqual(self.s1.int_map(), (expected1, expected2))
expected1 = {"h-1": self.d1, "h-2": self.d2}
expected2 = {"h-1": "d1", "h-2": "d2"}
self.assertEqual(self.s1.int_map(prefix='h-'), (expected1, expected2))
def test_is_empty(self):
"""is_empty functions as expected
"""
self.assertFalse(self.s1.is_empty())
self.assertFalse(self.s2.is_empty())
self.assertFalse(self.s3.is_empty())
self.assertTrue(self.empty.is_empty())
def test_is_valid(self):
"""is_valid functions as expected
"""
self.assertTrue(self.s1.is_valid())
self.assertTrue(self.s2.is_valid())
self.assertTrue(self.s3.is_valid())
self.assertTrue(self.empty.is_valid())
self.assertFalse(self.invalid_s1.is_valid())
def test_iteritems(self):
"""iteritems functions as expected
"""
self.assertEqual(list(self.s1.iteritems()),
[(s.identifier, s) for s in self.s1])
def test_lower(self):
"""lower functions as expected
"""
self.assertEqual(self.s1.lower(), self.s1_lower)
def test_sequence_count(self):
"""num_seqs functions as expected
"""
self.assertEqual(self.s1.sequence_count(), 2)
self.assertEqual(self.s2.sequence_count(), 3)
self.assertEqual(self.s3.sequence_count(), 5)
self.assertEqual(self.empty.sequence_count(), 0)
def test_sequence_lengths(self):
"""sequence_lengths functions as expected
"""
self.assertEqual(self.s1.sequence_lengths(), [7, 3])
self.assertEqual(self.s2.sequence_lengths(), [7, 3, 12])
self.assertEqual(self.s3.sequence_lengths(), [7, 3, 7, 3, 12])
self.assertEqual(self.empty.sequence_lengths(), [])
def test_to_fasta(self):
"""to_fasta functions as expected
"""
exp1 = ">d1\nGATTACA\n>d2\nTTG\n"
self.assertEqual(self.s1.to_fasta(), exp1)
exp2 = ">r1\nGAUUACA\n>r2\nUUG\n>r3\nU-----UGCC--\n"
self.assertEqual(self.s2.to_fasta(), exp2)
def test_upper(self):
"""upper functions as expected
"""
self.assertEqual(self.s1_lower.upper(), self.s1)
class AlignmentTests(TestCase):
def setUp(self):
self.d1 = DNASequence('..ACC-GTTGG..', identifier="d1")
self.d2 = DNASequence('TTACCGGT-GGCC', identifier="d2")
self.d3 = DNASequence('.-ACC-GTTGC--', identifier="d3")
self.r1 = RNASequence('UUAU-', identifier="r1")
self.r2 = RNASequence('ACGUU', identifier="r2")
self.seqs1 = [self.d1, self.d2, self.d3]
self.seqs2 = [self.r1, self.r2]
self.seqs1_t = [('d1', '..ACC-GTTGG..'), ('d2', 'TTACCGGT-GGCC'),
('d3', '.-ACC-GTTGC--')]
self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
self.a1 = Alignment(self.seqs1)
self.a2 = Alignment(self.seqs2)
self.empty = Alignment([])
def test_degap(self):
"""degap functions as expected
"""
expected = [(id_, seq.replace('.', '').replace('-', ''))
for id_, seq in self.seqs1_t]
expected = SequenceCollection.from_fasta_records(expected, DNASequence)
actual = self.a1.degap()
self.assertEqual(actual, expected)
expected = [(id_, seq.replace('.', '').replace('-', ''))
for id_, seq in self.seqs2_t]
expected = SequenceCollection.from_fasta_records(expected, RNASequence)
actual = self.a2.degap()
self.assertEqual(actual, expected)
def test_distances(self):
"""distances functions as expected
"""
expected = [[0, 6./13, 4./13],
[6./13, 0, 7./13],
[4./13, 7./13, 0]]
expected = DistanceMatrix(expected, ['d1', 'd2', 'd3'])
actual = self.a1.distances()
self.assertEqual(actual, expected)
def test_subalignment(self):
"""subalignment functions as expected
"""
# keep seqs by identifiers
actual = self.a1.subalignment(seqs_to_keep=['d1', 'd3'])
expected = Alignment([self.d1, self.d3])
self.assertEqual(actual, expected)
# keep seqs by indices
actual = self.a1.subalignment(seqs_to_keep=[0, 2])
expected = Alignment([self.d1, self.d3])
self.assertEqual(actual, expected)
# keep seqs by identifiers (invert)
actual = self.a1.subalignment(seqs_to_keep=['d1', 'd3'],
invert_seqs_to_keep=True)
expected = Alignment([self.d2])
self.assertEqual(actual, expected)
# keep seqs by indices (invert)
actual = self.a1.subalignment(seqs_to_keep=[0, 2],
invert_seqs_to_keep=True)
expected = Alignment([self.d2])
self.assertEqual(actual, expected)
# keep positions
actual = self.a1.subalignment(positions_to_keep=[0, 2, 3])
d1 = DNASequence('.AC', identifier="d1")
d2 = DNASequence('TAC', identifier="d2")
d3 = DNASequence('.AC', identifier="d3")
expected = Alignment([d1, d2, d3])
self.assertEqual(actual, expected)
# keep positions (invert)
actual = self.a1.subalignment(positions_to_keep=[0, 2, 3],
invert_positions_to_keep=True)
d1 = DNASequence('.C-GTTGG..', identifier="d1")
d2 = DNASequence('TCGGT-GGCC', identifier="d2")
d3 = DNASequence('-C-GTTGC--', identifier="d3")
expected = Alignment([d1, d2, d3])
self.assertEqual(actual, expected)
# keep seqs and positions
actual = self.a1.subalignment(seqs_to_keep=[0, 2],
positions_to_keep=[0, 2, 3])
d1 = DNASequence('.AC', identifier="d1")
d3 = DNASequence('.AC', identifier="d3")
expected = Alignment([d1, d3])
self.assertEqual(actual, expected)
# keep seqs and positions (invert)
actual = self.a1.subalignment(seqs_to_keep=[0, 2],
positions_to_keep=[0, 2, 3],
invert_seqs_to_keep=True,
invert_positions_to_keep=True)
d2 = DNASequence('TCGGT-GGCC', identifier="d2")
expected = Alignment([d2])
self.assertEqual(actual, expected)
def test_init_validate(self):
"""initialization with validation functions as expected
"""
Alignment(self.seqs1, validate=True)
# invalid DNA character
invalid_seqs1 = [self.d1, self.d2, self.d3,
DNASequence('.-ACC-GTXGC--', identifier="i1")]
self.assertRaises(SequenceCollectionError, Alignment,
invalid_seqs1, validate=True)
# invalid lengths (they're not all equal)
invalid_seqs2 = [self.d1, self.d2, self.d3,
DNASequence('.-ACC-GTGC--', identifier="i2")]
self.assertRaises(SequenceCollectionError, Alignment,
invalid_seqs2, validate=True)
def test_is_valid(self):
"""is_valid functions as expected
"""
self.assertTrue(self.a1.is_valid())
self.assertTrue(self.a2.is_valid())
self.assertTrue(self.empty.is_valid())
# invalid because of length mismatch
d1 = DNASequence('..ACC-GTTGG..', identifier="d1")
d2 = DNASequence('TTACCGGT-GGC', identifier="d2")
self.assertFalse(Alignment([d1, d2]).is_valid())
# invalid because of invalid charaters
d1 = DNASequence('..ACC-GTXGG..', identifier="d1")
d2 = DNASequence('TTACCGGT-GGCC', identifier="d2")
self.assertFalse(Alignment([d1, d2]).is_valid())
def test_iter_positions(self):
"""iter_positions functions as expected
"""
actual = list(self.a2.iter_positions())
expected = [map(RNASequence, list('UA')),
map(RNASequence, list('UC')),
map(RNASequence, list('AG')),
map(RNASequence, list('UU')),
map(RNASequence, list('-U'))]
self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
self.assertEqual(actual, expected)
actual = list(self.a2.iter_positions(constructor=str))
expected = [list('UA'),
list('UC'),
list('AG'),
list('UU'),
list('-U')]
self.seqs2_t = [('r1', 'UUAU-'), ('r2', 'ACGUU')]
self.assertEqual(actual, expected)
def test_majority_consensus(self):
"""majority_consensus functions as expected
"""
d1 = DNASequence('TTT', identifier="d1")
d2 = DNASequence('TT-', identifier="d2")
d3 = DNASequence('TC-', identifier="d3")
a1 = Alignment([d1, d2, d3])
self.assertEqual(a1.majority_consensus(), DNASequence('TT-'))
d1 = DNASequence('T', identifier="d1")
d2 = DNASequence('A', identifier="d2")
a1 = Alignment([d1, d2])
self.assertTrue(a1.majority_consensus() in
[DNASequence('T'), DNASequence('A')])
self.assertEqual(self.empty.majority_consensus(), '')
def test_omit_gap_positions(self):
"""omitting gap positions functions as expected
"""
expected = self.a2
self.assertEqual(self.a2.omit_gap_positions(1.0), expected)
self.assertEqual(self.a2.omit_gap_positions(0.51), expected)
r1 = RNASequence('UUAU', identifier="r1")
r2 = RNASequence('ACGU', identifier="r2")
expected = Alignment([r1, r2])
self.assertEqual(self.a2.omit_gap_positions(0.49), expected)
r1 = RNASequence('UUAU', identifier="r1")
r2 = RNASequence('ACGU', identifier="r2")
expected = Alignment([r1, r2])
self.assertEqual(self.a2.omit_gap_positions(0.0), expected)
self.assertEqual(self.empty.omit_gap_positions(0.0), self.empty)
self.assertEqual(self.empty.omit_gap_positions(0.49), self.empty)
self.assertEqual(self.empty.omit_gap_positions(1.0), self.empty)
def test_omit_gap_sequences(self):
"""omitting gap sequences functions as expected
"""
expected = self.a2
self.assertEqual(self.a2.omit_gap_sequences(1.0), expected)
self.assertEqual(self.a2.omit_gap_sequences(0.20), expected)
expected = Alignment([self.r2])
self.assertEqual(self.a2.omit_gap_sequences(0.19), expected)
self.assertEqual(self.empty.omit_gap_sequences(0.0), self.empty)
self.assertEqual(self.empty.omit_gap_sequences(0.2), self.empty)
self.assertEqual(self.empty.omit_gap_sequences(1.0), self.empty)
def test_position_counters(self):
"""position_counters functions as expected
"""
expected = [Counter({'U': 1, 'A': 1}),
Counter({'U': 1, 'C': 1}),
Counter({'A': 1, 'G': 1}),
Counter({'U': 2}),
Counter({'-': 1, 'U': 1})]
self.assertEqual(self.a2.position_counters(), expected)
self.assertEqual(self.empty.position_counters(), [])
def test_position_frequencies(self):
"""computing position frequencies functions as expected
"""
expected = [defaultdict(int, {'U': 0.5, 'A': 0.5}),
defaultdict(int, {'U': 0.5, 'C': 0.5}),
defaultdict(int, {'A': 0.5, 'G': 0.5}),
defaultdict(int, {'U': 1.0}),
defaultdict(int, {'-': 0.5, 'U': 0.5})]
self.assertEqual(self.a2.position_frequencies(), expected)
self.assertEqual(self.empty.position_frequencies(), [])
def test_position_entropies(self):
"""computing positional uncertainties functions as expected
tested by calculating values as described in this post:
http://stackoverflow.com/a/15476958/3424666
"""
expected = [0.69314, 0.69314, 0.69314, 0.0, np.nan]
np.testing.assert_almost_equal(self.a2.position_entropies(),
expected, 5)
expected = [1.0, 1.0, 1.0, 0.0, np.nan]
np.testing.assert_almost_equal(self.a2.position_entropies(base=2),
expected, 5)
np.testing.assert_almost_equal(self.empty.position_entropies(base=2),
[])
def test_sequence_frequencies(self):
"""sequence_frequencies functions as expected
"""
expected = [defaultdict(int, {'U': 3/5, 'A': 1/5, '-': 1/5}),
defaultdict(int, {'A': 1/5, 'C': 1/5, 'G': 1/5, 'U': 2/5})]
actual = self.a2.sequence_frequencies()
for a, e in zip(actual, expected):
a_keys = a.keys()
a_keys.sort()
a_values = a.values()
a_values.sort()
e_keys = e.keys()
e_keys.sort()
e_values = e.values()
e_values.sort()
self.assertEqual(a_keys, e_keys, 5)
np.testing.assert_almost_equal(a_values, e_values, 5)
def test_sequence_length(self):
"""sequence_length functions as expected
"""
self.assertEqual(self.a1.sequence_length(), 13)
self.assertEqual(self.a2.sequence_length(), 5)
self.assertEqual(self.empty.sequence_length(), 0)
def test_to_phylip(self):
"""to_phylip functions as expected
"""
d1 = DNASequence('..ACC-GTTGG..', identifier="d1")
d2 = DNASequence('TTACCGGT-GGCC', identifier="d2")
d3 = DNASequence('.-ACC-GTTGC--', identifier="d3")
a = Alignment([d1, d2, d3])
phylip_str, id_map = a.to_phylip(map_labels=False)
self.assertEqual(id_map, {'d1': 'd1',
'd3': 'd3',
'd2': 'd2'})
expected = "\n".join(["3 13",
"d1 ..ACC-GTTGG..",
"d2 TTACCGGT-GGCC",
"d3 .-ACC-GTTGC--"])
self.assertEqual(phylip_str, expected)
def test_to_phylip_map_labels(self):
"""to_phylip functions as expected with label mapping
"""
d1 = DNASequence('..ACC-GTTGG..', identifier="d1")
d2 = DNASequence('TTACCGGT-GGCC', identifier="d2")
d3 = DNASequence('.-ACC-GTTGC--', identifier="d3")
a = Alignment([d1, d2, d3])
phylip_str, id_map = a.to_phylip(map_labels=True, label_prefix="s")
self.assertEqual(id_map, {'s1': 'd1',
's3': 'd3',
's2': 'd2'})
expected = "\n".join(["3 13",
"s1 ..ACC-GTTGG..",
"s2 TTACCGGT-GGCC",
"s3 .-ACC-GTTGC--"])
self.assertEqual(phylip_str, expected)
def test_validate_lengths(self):
"""
"""
self.assertTrue(self.a1._validate_lengths())
self.assertTrue(self.a2._validate_lengths())
self.assertTrue(self.empty._validate_lengths())
self.assertTrue(Alignment([
DNASequence('TTT', identifier="d1")])._validate_lengths())
self.assertFalse(Alignment([
DNASequence('TTT', identifier="d1"),
DNASequence('TT', identifier="d2")])._validate_lengths())
if __name__ == "__main__":
main()
|
Jorge-C/bipy
|
skbio/core/tests/test_alignment.py
|
Python
|
bsd-3-clause
| 25,230
|
[
"scikit-bio"
] |
de96dc82988421196574d98c068657cbb7c6a9aa143e2eceac43a4c816e09d6c
|
"""Test Register for Family Camp Bookings.
Usage:
test_register_family.py
test_register_family.py <email> <adults> <children> <infants>
Options:
-d,--debug Turn on debug output.
-h,--help Show this screen.
--version Show version.
"""
from splinter import Browser
from time import sleep
import datetime
import random
from docopt import docopt
ACTIVITIES = ['Archery',
'Blindfold Trail',
'BMX Biking',
'Canoeing',
'Caving',
'Climbing',
'Crystal Maze',
'Fire Lighting',
'Pottery Painting',
"It's a Knockout"]
URL = "https://docs.google.com/forms/d/1v-m3d7kMGW8QXxFaqMRQf7sZk66BsMWy1m52NWMuRaU/viewform"
def fill_camper(browser, NAME, camper_no, age_type=None):
age_type = age_type if age_type is not None else random.choice(("adult", "child", "infant"))
browser.find_by_xpath(
'//input[starts-with(@aria-label,"First Name (Camper {})")]'
''.format(camper_no)).fill("First {}".format(camper_no))
browser.find_by_xpath(
'//input[starts-with(@aria-label,"Surname (Camper {})")]'
''.format(camper_no)).fill(NAME)
browser.find_by_xpath(
'//input[starts-with(@aria-label,"Dietary Requirements (Camper {})")]'
''.format(camper_no)).fill(random.choice(("None","Vegy","Vegan")))
if (age_type == "adult"):
browser.find_by_xpath(
'//div[@class="ss-form-entry"]//input[@value="Adult (over 18 years)"]'
'[./../../../../../label/div[contains(.,"Camper {}")]]'.format(camper_no)).click()
browser.find_by_xpath(
'//select[starts-with(@aria-label,"DBS Status")]').select(
random.choice(('Pending', 'Received', 'Unknown', 'None')))
elif (age_type == "child"):
browser.find_by_xpath(
'//div[@class="ss-form-entry"]//input'
'[@value="Child (between 5 and 18 years)"]'
'[./../../../../../label/div[contains(.,"Camper {}")]]'.format(camper_no)).click()
browser.find_by_xpath(
'//input[starts-with(@aria-label,"Age at start of camp (if under 18) (Camper {})")]'
''.format(camper_no)).fill(str(random.randint(5, 18)))
else: # infant
browser.find_by_xpath(
'//div[@class="ss-form-entry"]//input'
'[@value="Infant (under 5 years)"]'
'[./../../../../../label/div[contains(.,"Camper {}")]]'.format(camper_no)).click()
browser.find_by_xpath(
'//input[starts-with(@aria-label,"Age at start of camp (if under 18) (Camper {})")]'
''.format(camper_no)).fill(str(random.randint(0, 4)))
for act in random.sample(ACTIVITIES, 2):
browser.find_by_xpath(
'//input[starts-with(@value,"{}")]'
'[./../../../../../label/div'
'[contains(.,"Primary Activities (Camper {})")]]'
''.format(act, camper_no)).click()
for act in random.sample(ACTIVITIES, random.randint(0, len(ACTIVITIES))):
browser.find_by_xpath(
'//input[starts-with(@value,"{}")]'
'[./../../../../../label/div'
'[contains(.,"Other Activities (Camper {})")]]'
''.format(act, camper_no)).click()
def register(email, adults, children, infants):
NAME = datetime.datetime.now().strftime("%Y%m%d%H%M")
with Browser() as browser:
browser.visit(URL)
sleep(5)
browser.find_by_xpath('//label[*[text()[contains(.,"My child")]]]').click()
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Name")]').fill(NAME)
browser.find_by_xpath('//input[starts-with(@aria-label,"Email Address")]').fill(email)
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Address")]').fill("Address")
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Telephone Number")]').fill("123")
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Association with 7th Lichfield")]').fill("Test")
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Number of Tents")]').fill(str(random.randint(1, 2)))
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Number of Caravans or Motorhomes")]').fill(str(random.randint(1, 2)))
browser.find_by_xpath('//input[starts-with(@aria-label,"Home Contact Name")]').fill("Home")
browser.find_by_xpath('//input[starts-with(@aria-label,"Home Contact Number")]').fill("123")
browser.find_by_xpath('//input[starts-with(@value,"We are happy to provide")]').click()
browser.find_by_xpath('//input[starts-with(@value,"We would like to have another")]').click()
browser.find_by_xpath("//input[@name='continue']").click()
fill_camper(browser, NAME, "1", "adult")
adults -= 1
browser.find_by_xpath('//input[@aria-label="Yes"]').click()
browser.find_by_xpath("//input[@name='continue']").click()
camper_no = 2
if adults > 0:
age_type = 'adult'
elif children > 0:
age_type = 'child'
else:
age_type = 'infant'
while (camper_no < 10):
if (age_type == 'adult' and (adults > 0)):
fill_camper(browser, NAME, str(camper_no), "adult")
adults -= 1
if adults == 0:
age_type = 'child'
elif (age_type == 'child' and (children > 0)):
fill_camper(browser, NAME, str(camper_no), "child")
children -= 1
if children == 0:
age_type = 'infant'
elif (age_type == 'infant' and (infants > 0)):
fill_camper(browser, NAME, str(camper_no), "infant")
infants -= 1
if (camper_no == 5):
browser.find_by_xpath('//input[@aria-label="Yes"]').click()
browser.find_by_xpath("//input[@name='continue']").click()
camper_no += 1
# import pdb;pdb.set_trace()
browser.find_by_xpath("//input[@name='submit']").click()
def register_random():
NAME = datetime.datetime.now().strftime("%Y%m%d%H%M")
with Browser() as browser:
browser.visit(URL)
sleep(5)
browser.find_by_xpath('//label[*[text()[contains(.,"My family")]]]').click()
browser.find_by_xpath('//label[*[text()[contains(.,"My child")]]]').click()
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Name")]').fill(NAME)
browser.find_by_xpath('//input[starts-with(@aria-label,"Email Address")]').fill("rjt-family-{}@gmail.com".format(NAME))
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Address")]').fill("Address")
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Telephone Number")]').fill("123")
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Association with 7th Lichfield")]').fill("Test")
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Number of Tents")]').fill(str(random.randint(1, 2)))
browser.find_by_xpath('//input[starts-with(@aria-label,"Family Number of Caravans or Motorhomes")]').fill(str(random.randint(1, 2)))
browser.find_by_xpath('//input[starts-with(@aria-label,"Home Contact Name")]').fill("Home")
browser.find_by_xpath('//input[starts-with(@aria-label,"Home Contact Number")]').fill("123")
browser.find_by_xpath('//input[starts-with(@value,"We are happy to provide")]').click()
browser.find_by_xpath('//input[starts-with(@value,"We would like to have another")]').click()
browser.find_by_xpath("//input[@name='continue']").click()
fill_camper(browser, NAME, "1")
browser.find_by_xpath('//input[@aria-label="Yes"]').click()
browser.find_by_xpath("//input[@name='continue']").click()
fill_camper(browser, NAME, "2")
fill_camper(browser, NAME, "3")
fill_camper(browser, NAME, "4")
fill_camper(browser, NAME, "5")
browser.find_by_xpath('//input[@aria-label="Yes"]').click()
browser.find_by_xpath("//input[@name='continue']").click()
fill_camper(browser, NAME, "6")
fill_camper(browser, NAME, "7")
fill_camper(browser, NAME, "8")
fill_camper(browser, NAME, "9")
fill_camper(browser, NAME, "10")
browser.find_by_xpath("//input[@name='submit']").click()
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
if (args['<email>']):
register(args['<email>'],
int(args['<adults>']),
int(args['<children>']),
int(args['<infants>']))
else:
register_random()
|
hippysurfer/family-camp
|
family_camp/test/test_register_family.py
|
Python
|
mit
| 8,847
|
[
"CRYSTAL",
"VisIt"
] |
8082e1a03636fdef50712a93f61019f77d9dae9601d62f8fd4c332a4c27c7e2c
|
from __future__ import absolute_import, division, unicode_literals
named_entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
|
kevinhendricks/ePub3-itizer
|
src/html_namedentities.py
|
Python
|
lgpl-2.1
| 57,273
|
[
"Bowtie"
] |
1252f47bbef9008c8540df0c7242fcd7eb2ec2f7aba523a286c141265062b3fd
|
#!/usr/bin/env python
"""
Install.py tool to download, unpack, build, and link to the n2p2 library
used to automate the steps described in the README file in this dir
"""
from __future__ import print_function
import sys, os, platform, subprocess, shutil
from argparse import ArgumentParser
sys.path.append('..')
from install_helpers import get_cpus, fullpath, geturl, checkmd5sum
parser = ArgumentParser(prog='Install.py',
description="LAMMPS library build wrapper script")
# settings
version = "2.1.4"
# help message
HELP = """
Syntax from src dir: make lib-hdnnp args="-b"
or: make lib-hdnnp args="-b -v 2.1.4"
or: make lib-hdnnp args="-p /usr/local/n2p2"
Syntax from lib dir: python Install.py -b -v 2.1.4
or: python Install.py -b
or: python Install.py -p /usr/local/n2p2
Example:
make lib-hdnnp args="-b" # download/build in lib/hdnnp/n2p2
make lib-hdnnp args="-p $HOME/n2p2" # use existing n2p2 installation in $HOME/n2p2
"""
# known checksums for different n2p2 versions. used to validate the download.
checksums = { \
'2.1.4' : '9595b066636cd6b90b0fef93398297a5', \
}
# parse and process arguments
pgroup = parser.add_mutually_exclusive_group()
pgroup.add_argument("-b", "--build", action="store_true",
help="download and build the n2p2 library")
pgroup.add_argument("-p", "--path",
help="specify folder of existing n2p2 installation")
parser.add_argument("-v", "--version", default=version, choices=checksums.keys(),
help="set version of n2p2 to download and build (default: %s)" % version)
args = parser.parse_args()
# print help message and exit, if neither build nor path options are given
if not args.build and not args.path:
parser.print_help()
sys.exit(HELP)
buildflag = args.build
pathflag = args.path is not None
n2p2path = args.path
homepath = fullpath('.')
homedir = "%s/n2p2" % (homepath)
if pathflag:
if not os.path.isdir(n2p2path):
sys.exit("n2p2 path %s does not exist" % n2p2path)
homedir = fullpath(n2p2path)
if not os.path.isfile(os.path.join(homedir, 'include', 'InterfaceLammps.h')):
sys.exit("No n2p2 installation found at %s" % n2p2path)
# download and unpack n2p2 tarball
if buildflag:
url = "https://github.com/CompPhysVienna/n2p2/archive/v%s.tar.gz" % (version)
filename = "n2p2-%s.tar.gz" %version
print("Downloading n2p2 ...")
geturl(url, filename)
# verify downloaded archive integrity via md5 checksum, if known.
if version in checksums:
if not checkmd5sum(checksums[version], filename):
sys.exit("Checksum for n2p2 library does not match")
print("Unpacking n2p2 source tarball ...")
if os.path.exists("%s/n2p2-%s" % (homepath, version)):
shutil.rmtree("%s/n2p2-%s" % (homepath, version))
if os.path.exists(homedir):
shutil.rmtree(homedir)
cmd = 'cd "%s"; tar -xzvf %s' % (homepath, filename)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
os.remove(os.path.join(homepath, filename))
# build n2p2
print("Building n2p2 ...")
n_cpus = get_cpus()
cmd = 'unset MAKEFLAGS MAKELEVEL MAKEOVERRIDES MFLAGS && cd %s/n2p2-%s/src && make -j%d libnnpif' % (homepath, version, n_cpus)
try:
txt = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
print(txt.decode('UTF-8'))
except subprocess.CalledProcessError as e:
print("Make failed with:\n %s" % e.output.decode('UTF-8'))
sys.exit(1)
# set correct homedir for linking step
homedir = "%s/n2p2-%s" % (homepath, version)
# create 2 links in lib/hdnnp to n2p2 installation dir
print("Creating links to n2p2 include and lib files")
if os.path.isfile("includelink") or os.path.islink("includelink"):
os.remove("includelink")
if os.path.isfile("liblink") or os.path.islink("liblink"):
os.remove("liblink")
if os.path.isfile("Makefile.lammps") or os.path.islink("Makefile.lammps"):
os.remove("Makefile.lammps")
os.symlink(os.path.join(homedir, 'include'), 'includelink')
os.symlink(os.path.join(homedir, 'lib'), 'liblink')
os.symlink(os.path.join(homedir, 'lib', 'Makefile.lammps-extra'), 'Makefile.lammps')
|
lammps/lammps
|
lib/hdnnp/Install.py
|
Python
|
gpl-2.0
| 4,218
|
[
"LAMMPS"
] |
5fe8ac67441a3eafac6823448bc5900874dd16f5f6a7d8c08813eeb7cc04fe0c
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import unittest
from commoncode import version
class TestVersionHint(unittest.TestCase):
def test_version_hint(self):
data = {
'/xmlgraphics/fop/source/fop-1.0-src.zip': '1.0',
'/xml/xindice/xml-xindice-1.2m1-src.zip': '1.2m1',
'/xmlgraphics/fop/binaries/fop-0.94-bin-jdk1.3.tar.gz': '0.94',
'/xmlgraphics/batik/batik-src-1.7beta1.zip': '1.7beta1',
'/xmlgraphics/batik/batik-1.7-jre13.zip': '1.7',
'/xmlbeans/source/xmlbeans-2.3.0-src.tgz': '2.3.0',
'/xml/xindice/source/xml-xindice-1.2m1-src.tar.gz': '1.2m1',
'/xml/xerces-p/binaries/XML-Xerces-2.3.0-4-win32.zip': '2.3.0-4',
'/xml/xerces-p/source/XML-Xerces-2.3.0-3.tar.gz': '2.3.0-3',
'/xml/xalan-j/source/xalan-j_2_7_0-src-2jars.tar.gz': '2_7_0',
'/xml/security/java-library/xml-security-src-1_0_5D2.zip': '1_0_5D2',
'/xml/commons/binaries/xml-commons-external-1.4.01-bin.zip': '1.4.01',
'/xml/commons/xml-commons-1.0.b2.zip': '1.0.b2',
'/xml/cocoon/3.0/cocoon-all-3.0.0-alpha-1-dist.tar.gz': '3.0.0-alpha-1',
'/xerces/j/source/Xerces-J-tools.2.10.0-xml-schema-1.1-beta.tar.gz': '2.10.0',
'/xerces/c/3/binaries/xerces-c-3.1.1-x86_64-solaris-cc-5.10.tar.gz': '3.1.1',
'/xerces/c/3/binaries/xerces-c-3.1.1-x86_64-windows-vc-8.0.zip': '3.1.1',
'/xerces/c/2/binaries/xerces-c_2_8_0-x86-windows-vc_7_1.zip': '2_8_0',
'/ws/woden/1.0M8/apache-woden-src-1.0M8.tar.gz': '1.0M8',
'/ws/scout/0_7rc1/source/scout-0.7rc1-src.zip': '0.7rc1',
'/ws/juddi/3_0/juddi-portal-bundle-3.0.0.rc1.zip': '3.0.0.rc1',
'/ws/juddi/3_0/juddi-portal-bundle-3.0.0.beta.zip': '3.0.0.beta',
'/ws/juddi/2_0RC7/juddi-tomcat-2.0rc7.zip': '2.0rc7',
'/ws/axis2/tools/1_4_1/axis2-wsdl2code-maven-plugin-1.4.1.jar': '1.4.1',
'/ws/axis/1_4/axis-src-1_4.zip': '1_4',
'/ws/axis-c/source/win32/axis-c-1.6b-Win32-trace-src.zip': '1.6b',
'/tuscany/java/sca/2.0-M5/apache-tuscany-sca-all-2.0-M5-src.tar.gz': '2.0-M5',
'/turbine/turbine-2.3.3-rc1/source/turbine-2.3.3-RC1-src.zip': '2.3.3-RC1',
'/tomcat/tomcat-connectors/jk/binaries/win64/jk-1.2.30/ia64/symbols-1.2.30.zip': '1.2.30',
'/tomcat/tomcat-7/v7.0.0-beta/bin/apache-tomcat-7.0.0-windows-i64.zip': '7.0.0',
'/tomcat/tomcat-4/v4.1.40/bin/apache-tomcat-4.1.40-LE-jdk14.exe': '4.1.40',
'/tapestry/tapestry-src-5.1.0.5.tar.gz': '5.1.0.5',
'/spamassassin/source/Mail-SpamAssassin-rules-3.3.0.r901671.tgz': '3.3.0.r901671',
'/spamassassin/Mail-SpamAssassin-rules-3.3.1.r923257.tgz': '3.3.1.r923257',
'/shindig/1.1-BETA5-incubating/shindig-1.1-BETA5-incubating-source.zip': '1.1-BETA5',
'/servicemix/nmr/1.0.0-m3/apache-servicemix-nmr-1.0.0-m3-src.tar.gz': '1.0.0-m3',
'/qpid/0.6/qpid-dotnet-0-10-0.6.zip': '0.6',
'/openjpa/2.0.0-beta/apache-openjpa-2.0.0-beta-binary.zip': '2.0.0-beta',
'/myfaces/source/portlet-bridge-2.0.0-alpha-2-src-all.tar.gz': '2.0.0-alpha-2',
'/myfaces/source/myfaces-extval20-2.0.3-src.tar.gz': '2.0.3',
'/harmony/milestones/6.0/debian/amd64/harmony-6.0-classlib_0.0r946981-1_amd64.deb': '6.0',
'/geronimo/eclipse/updates/plugins/org.apache.geronimo.st.v21.ui_2.1.1.jar': '2.1.1',
'/directory/studio/update/1.x/plugins/org.apache.directory.studio.aciitemeditor_1.5.2.v20091211.jar': '1.5.2.v20091211',
'/db/torque/torque-3.3/source/torque-gen-3.3-RC3-src.zip': '3.3-RC3',
'/cayenne/cayenne-3.0B1.tar.gz': '3.0B1',
'/cayenne/cayenne-3.0M4-macosx.dmg': '3.0M4',
'/xmlgraphics/batik/batik-docs-current.zip': 'current',
'/xmlgraphics/batik/batik-docs-previous.zip': 'previous',
'/poi/dev/bin/poi-bin-3.7-beta1-20100620.zip': '3.7-beta1-20100620',
'/excalibur/avalon-logkit/source/excalibur-logkit-2.0.dev-0-src.zip': '2.0.dev-0',
'/db/derby/db-derby-10.4.2.0/derby_core_plugin_10.4.2.zip': '10.4.2',
'/httpd/modpython/win/2.7.1/mp152dll.zip': '2.7.1',
'/perl/mod_perl-1.31/apaci/mod_perl.config.sh': '1.31',
'/xml/xerces-j/old_xerces2/Xerces-J-bin.2.0.0.alpha.zip': '2.0.0.alpha',
'/xml/xerces-p/archives/XML-Xerces-1.7.0_0.tar.gz': '1.7.0_0',
'/httpd/docs/tools-2004-05-04.zip': '2004-05-04',
'/ws/axis2/c/M0_5/axis2c-src-M0.5.tar.gz': 'M0.5',
'/jakarta/poi/dev/src/jakarta-poi-1.8.0-dev-src.zip': '1.8.0-dev',
'/tapestry/tapestry-4.0-beta-8.zip': '4.0-beta-8',
'/openejb/3.0-beta-1/openejb-3.0-beta-1.zip': '3.0-beta-1',
'/tapestry/tapestry-4.0-rc-1.zip': '4.0-rc-1',
'/jakarta/tapestry/source/3.0-rc-3/Tapestry-3.0-rc-3-src.zip': '3.0-rc-3',
'/jakarta/lucene/binaries/lucene-1.3-final.tar.gz': '1.3-final',
'/jakarta/tapestry/binaries/3.0-beta-1a/Tapestry-3.0-beta-1a-bin.zip': '3.0-beta-1a',
'/poi/release/bin/poi-bin-3.0-FINAL-20070503.tar.gz': '3.0-FINAL-20070503',
'/harmony/milestones/M4/apache-harmony-hdk-r603534-linux-x86-32-libstdc++v6-snapshot.tar.gz': 'r603534',
'/ant/antidote/antidote-20050330.tar.bz2': '20050330',
'/apr/not-released/apr_20020725223645.tar.gz': '20020725223645',
'/ibatis/source/ibatis.net/src-revision-709676.zip': 'revision-709676',
'/ws/axis-c/source/win32/axis-c-src-1-2-win32.zip': '1-2',
'/jakarta/slide/most-recent-2.0rc1-binaries/jakarta-slide 2.0rc1 jakarta-tomcat-4.1.30.zip': '2.0rc1',
'/httpd/modpython/win/3.0.1/python2.2.1-apache2.0.43.zip': '2.2.1',
'/ant/ivyde/updatesite/features/org.apache.ivy.feature_2.1.0.cr1_20090319213629.jar': '2.1.0.cr1_20090319213629',
'/jakarta/poi/dev/bin/poi-2.0-pre1-20030517.jar': '2.0-pre1-20030517',
'/jakarta/poi/release/bin/jakarta-poi-1.5.0-FINAL-bin.zip': '1.5.0-FINAL',
'/jakarta/poi/release/bin/poi-bin-2.0-final-20040126.zip': '2.0-final-20040126',
'/activemq/apache-activemq/5.0.0/apache-activemq-5.0.0-sources.jar': '5.0.0',
'/turbine/turbine-2.2/source/jakarta-turbine-2.2-B1.tar.gz': '2.2-B1',
'/ant/ivyde/updatesite/features/org.apache.ivy.feature_2.0.0.cr1.jar': '2.0.0.cr1',
'/ant/ivyde/updatesite/features/org.apache.ivy.feature_2.0.0.final_20090108225011.jar': '2.0.0.final_20090108225011',
'/ws/axis/1_2RC3/axis-src-1_2RC3.zip': '1_2RC3',
'/commons/lang/old/v1.0-b1.1/commons-lang-1.0-b1.1.zip': '1.0-b1.1',
'/commons/net/binaries/commons-net-1.2.0-release.tar.gz': '1.2.0-release',
'/ant/ivyde/2.0.0.final/apache-ivyde-2.0.0.final-200907011148-RELEASE.tgz': '2.0.0.final-200907011148-RELEASE',
'/geronimo/eclipse/updates/plugins/org.apache.geronimo.jetty.j2ee.server.v11_1.0.0.jar': 'v11_1.0.0',
'/jakarta/cactus/binaries/jakarta-cactus-13-1.7.1-fixed.zip': '1.7.1-fixed',
'/jakarta/jakarta-turbine-maven/maven/jars/maven-1.0-b5-dev.20020731.085427.jar': '1.0-b5-dev.20020731.085427',
'/xml/xalan-j/source/xalan-j_2_5_D1-src.tar.gz': '2_5_D1',
'/ws/woden/IBuilds/I20051002_1145/woden-I20051002_1145.tar.bz2': 'I20051002_1145',
'/commons/beanutils/source/commons-beanutils-1.8.0-BETA-src.tar.gz': '1.8.0-BETA',
'/cocoon/BINARIES/cocoon-2.0.3-vm14-bin.tar.gz': '2.0.3-vm14',
'/felix/xliff_filters_v1_2_7_unix.jar': 'v1_2_7',
'/excalibur/releases/200702/excalibur-javadoc-r508111-15022007.tar.gz': 'r508111-15022007',
'/geronimo/eclipse/updates/features/org.apache.geronimo.v20.feature_2.0.0.jar': 'v20.feature_2.0.0',
'/geronimo/2.1.6/axis2-jaxws-1.3-G20090406.jar': '1.3-G20090406',
'/cassandra/debian/pool/main/c/cassandra/cassandra_0.4.0~beta1-1.diff.gz': '0.4.0~beta1-1',
'/ha-api-3.1.6.jar': '3.1.6',
'ha-api-3.1.6.jar': '3.1.6'
}
# FIXME: generate a test function for each case
for path in data:
expected = data[path]
if not expected.lower().startswith('v'):
expected = 'v ' + expected
assert expected == version.hint(path)
|
yasharmaster/scancode-toolkit
|
tests/commoncode/test_version.py
|
Python
|
apache-2.0
| 9,896
|
[
"VisIt"
] |
2696ab93019aba9ac00e30e8f5ac521599f901f56ddf38ebeb235ee69536539d
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.electrostatics
import espressomd.electrostatic_extensions
import numpy as np
@utx.skipIfMissingFeatures(["ELECTROSTATICS", "EXTERNAL_FORCES"])
class test_icc(ut.TestCase):
system = espressomd.System(box_l=[10, 10, 10])
def tearDown(self):
self.system.actors.clear()
self.system.part.clear()
def add_icc_particles(self, side_num_particles,
initial_charge, z_position):
number = side_num_particles**2
areas = self.system.box_l[0] * \
self.system.box_l[1] / number * np.ones(number)
normals = np.zeros((number, 3))
normals[:, 2] = 1
x_position = np.linspace(
0,
self.system.box_l[0],
side_num_particles,
endpoint=False)
y_position = np.linspace(
0,
self.system.box_l[1],
side_num_particles,
endpoint=False)
x_pos, y_pos = np.meshgrid(x_position, y_position)
positions = np.stack((x_pos, y_pos, np.full_like(
x_pos, z_position)), axis=-1).reshape(-1, 3)
charges = np.full(number, initial_charge)
fix = [(True, True, True)] * number
return self.system.part.add(
pos=positions, q=charges, fix=fix), normals, areas
def common_setup(self, kwargs, error):
part_slice, normals, areas = self.add_icc_particles(2, 0.01, 0)
params = {"n_icc": len(part_slice),
"normals": normals,
"areas": areas,
"epsilons": np.ones_like(areas),
"first_id": part_slice.id[0],
"check_neutrality": False}
params.update(kwargs)
icc = espressomd.electrostatic_extensions.ICC(**params)
with self.assertRaisesRegex(Exception, error):
self.system.actors.add(icc)
def test_params(self):
params = [({"n_icc": -1}, 'ICC: invalid number of particles'),
({"first_id": -1}, 'ICC: invalid first_id'),
({"max_iterations": -1}, 'ICC: invalid max_iterations'),
({"convergence": -1}, 'ICC: invalid convergence value'),
({"relaxation": -1}, 'ICC: invalid relaxation value'),
({"relaxation": 2.1}, 'ICC: invalid relaxation value'),
({"eps_out": -1}, 'ICC: invalid eps_out'),
({"ext_field": 0}, 'A single value was given but 3 were expected'), ]
for kwargs, error in params:
self.common_setup(kwargs, error)
self.tearDown()
def test_core_params(self):
part_slice, normals, areas = self.add_icc_particles(5, 0.01, 0)
params = {"n_icc": len(part_slice),
"normals": normals,
"areas": areas,
"epsilons": np.ones_like(areas),
"first_id": part_slice.id[0],
"check_neutrality": False}
icc = espressomd.electrostatic_extensions.ICC(**params)
self.system.actors.add(icc)
icc_params = icc.get_params()
for key, value in params.items():
np.testing.assert_allclose(value, np.copy(icc_params[key]))
@utx.skipIfMissingFeatures(["P3M"])
def test_dipole_system(self):
BOX_L = 20.
BOX_SPACE = 5.
self.system.box_l = [BOX_L, BOX_L, BOX_L + BOX_SPACE]
self.system.cell_system.skin = 0.4
self.system.time_step = 0.01
N_ICC_SIDE_LENGTH = 10
DIPOLE_DISTANCE = 5.0
DIPOLE_CHARGE = 10.0
part_slice_lower, normals_lower, areas_lower = self.add_icc_particles(
N_ICC_SIDE_LENGTH, -0.0001, 0.)
part_slice_upper, normals_upper, areas_upper = self.add_icc_particles(
N_ICC_SIDE_LENGTH, 0.0001, BOX_L)
assert (part_slice_upper.id[-1] - part_slice_lower.id[0] +
1) == 2 * N_ICC_SIDE_LENGTH**2, "ICC particles not continuous"
normals = np.vstack((normals_lower, -normals_upper))
areas = np.hstack((areas_lower, areas_upper))
epsilons = np.full_like(areas, 1e8)
sigmas = np.zeros_like(areas)
icc = espressomd.electrostatic_extensions.ICC(
n_icc=2 * N_ICC_SIDE_LENGTH**2,
normals=normals,
areas=areas,
epsilons=epsilons,
sigmas=sigmas,
convergence=1e-6,
max_iterations=100,
first_id=part_slice_lower.id[0],
eps_out=1.,
relaxation=0.75,
ext_field=[0, 0, 0])
# Dipole in the center of the simulation box
BOX_L_HALF = BOX_L / 2
self.system.part.add(pos=[BOX_L_HALF, BOX_L_HALF, BOX_L_HALF - DIPOLE_DISTANCE / 2],
q=DIPOLE_CHARGE, fix=[True, True, True])
self.system.part.add(pos=[BOX_L_HALF, BOX_L_HALF, BOX_L_HALF + DIPOLE_DISTANCE / 2],
q=-DIPOLE_CHARGE, fix=[True, True, True])
p3m = espressomd.electrostatics.P3M(
prefactor=1, mesh=32, cao=7, accuracy=1e-5)
self.system.actors.add(p3m)
self.system.actors.add(icc)
self.system.integrator.run(0)
charge_lower = sum(part_slice_lower.q)
charge_upper = sum(part_slice_upper.q)
testcharge_dipole = DIPOLE_CHARGE * DIPOLE_DISTANCE
induced_dipole = 0.5 * (abs(charge_lower) + abs(charge_upper)) * BOX_L
self.assertAlmostEqual(1, induced_dipole / testcharge_dipole, places=4)
if __name__ == "__main__":
ut.main()
|
pkreissl/espresso
|
testsuite/python/icc.py
|
Python
|
gpl-3.0
| 6,336
|
[
"ESPResSo"
] |
c28bc5c44cd0a442703ba6c82192a5acb8ae0bcf02db4e8f6e704f275bfaff20
|
from models import Entry
from models import User
from models import Project
from models import Vendor
from models import Objective
from models import EvaluationCriteria
from models import DEFAULT_PROJECT_NAME
from models import project_db_key
from functools import wraps
import json
import time
import datetime
from flask import request, session, Response, url_for, redirect
from google.appengine.api import mail
from google.appengine.api import app_identity
import math
import urllib2
firebase_server_key = "key=AIzaSyDxwE1m7WjI6400WD9GadNJqoZfJvBmjGs"
fcm_server = "https://fcm.googleapis.com/fcm/send"
fcm_headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'Authorization' : firebase_server_key}
sender_address = "DAR Admin <jaisairam0170@gmail.com> "
total_max_limit = 1000
gae_environments = {'daranalysis-200000' : 'blue',
'daranalysis-160000' : 'red',
'daranalysis-201000' : 'amber',
'daranalysis-202000' : 'yellow',
'daranalysis-203000' : 'green'}
super_user_name = "Superuser"
CREATE_MODE = "__CREATE__"
ENTRY_SAVED_TITLE = "DAR Entry Saved"
ENTRY_SAVED_MESSAGE = "Hello {toUser}, {aboutUser} has just saved DAR entry"
DAR_TITLE = 'This is my {string} formatted with {args} arguments'
PROJECT_REMINDER_TITLE = "DAR Project Reminder ({env}) : Your DAR needs to completed"
PROJECT_REMINDER_MESSAGE = "As an admin your DAR {projectId} in {env} environment, \
it needs to be attended to, please remind users using Manage button"
def get_project_db_name(rname=DEFAULT_PROJECT_NAME):
return rname
#Gets evaluation_criteria from db - this needs to implement evaluation_criteria-lifecycle - right now it is a singleton
def get_projects_from_db(userId):
if userId:
project_query = Project.query(Project.userIds.IN([userId]))
else:
project_query = Project.query()
return project_query.fetch(total_max_limit)
#Gets evaluation_criteria from db - this needs to implement evaluation_criteria-lifecycle - right now it is a singleton
def get_project_from_db(projectId):
project_query = Project.query(Project.projectId == projectId)
if project_query.count() < 1:
return None
else:
return project_query.fetch(1)[-1]
def get_entry_from_db(projectId, userId):
entrys_query = Entry.query(Entry.user.identity == userId, Entry.project.projectId == projectId)
if entrys_query.count() < 1:
return None
else:
return entrys_query.fetch(1)[-1]
def get_entrys_from_given_project_db(projectId):
entrys_query = Entry.query(Entry.project.projectId == projectId)
return entrys_query.fetch(total_max_limit)
def get_entrys_from_given_user_db(projectId, userId):
entrys_query = Entry.query(Entry.user.identity == userId, Entry.project.projectId == projectId)
return entrys_query.fetch(total_max_limit)
def get_users_from_db(projectId=None):
if projectId and projectId != "":
project = get_project_from_db(projectId)
if project is not None:
userIds = project.userIds
users = []
for userId in userIds:
user = get_user_from_db(userId)
users.append(user)
return users
else:
users_q = User.query(User.type != super_user_name)
users = users_q.fetch(total_max_limit)
return users
return None
def get_user_from_db(userId):
if "@" in userId:
users_q = User.query(User.email == userId)
else:
users_q = User.query(User.identity == userId)
if users_q.count() < 1:
return None
else:
return users_q.fetch(1)[-1]
def update_users_project(projectId, userIds):
project = get_project_from_db(projectId)
project.userIds = userIds
project.put()
return project
def update_user(userId, email, type, password, projectIds):
user = get_user_from_db(userId)
if user is None:
project_name = get_project_db_name()
user = User(parent=project_db_key(project_name))
user.identity = userId
user.projectIds = []
user.email = email
user.type = type
user.password = password
if projectIds:
for projId in projectIds:
if projId and projId != CREATE_MODE and projId not in user.projectIds:
user.projectIds.append(projId)
project = get_project_from_db(projId)
if project:
project.userIds.append(userId)
project.put()
user.put()
time.sleep(1)
#repeat to create empty entrys by default
if projectIds:
for projId in projectIds:
project = get_project_from_db(projId)
if project:
entry = get_entry_from_db(projId, userId)
if entry is None:
update_entry(projId, userId, None, None, None, None)
return user
def getArrayOfDict(bos):
# this is not good
bot = bos[0]
# this is even worse
bot = '[' + bot + ']'
bol = json.loads(bot)
return bol
def update_project(projectId, department, group, description, defaultPassword, userIds, vendorIds, due_date, bos):
project_name = get_project_db_name()
project = get_project_from_db(projectId)
if project is None:
project = Project(parent=project_db_key(project_name))
project.projectId = projectId
project.objectiveIds = []
project.department = department
project.description = description
project.group = group
project.defaultPassword = defaultPassword
project.userIds = userIds
project.vendorIds = vendorIds
for ui in userIds:
user = get_user_from_db(ui)
if user and projectId not in user.projectIds:
user.projectIds.append(projectId)
user.put()
for vi in vendorIds:
vendor = get_user_from_db(vi)
if vendor and projectId not in vendor.projectIds:
vendor.projectIds.append(projectId)
vendor.put()
if due_date is None or due_date == "":
project.due_date = datetime.datetime.now()
else:
project.due_date = datetime.datetime.strptime(due_date.split(" ")[0], "%Y-%m-%d")
bol = getArrayOfDict(bos)
if len(project.objectiveIds) > 0:
nnbos = []
for bo in bol:
nnbos.append(bo["objectiveId"])
nnecs = []
if "evaluation_criteria" in bo:
for ec in bo["evaluation_criteria"]:
nnecs.append(ec["evaluation_criterionId"])
rbo = get_objective_from_db(projectId, bo["objectiveId"])
if rbo:
for pec in rbo.evaluation_criteriaIds:
if pec not in nnecs:
print "deleting " + pec
delete_evaluation_criterion_from_db(projectId, rbo.objectiveId, pec)
for pbo in project.objectiveIds:
if pbo not in nnbos:
print "deleting " + pbo
project.objectiveIds.remove(pbo)
delete_objective_from_db(projectId, pbo)
# get basic in
project.put()
#get more stuff
for bo in bol:
#print bo["objectiveId"] + ", " + bo["description"] + ", " + bo["weight"]
boid = bo["objectiveId"]
nbo = get_objective_from_db(projectId, boid)
if nbo is None:
nbo = Objective(parent=project_db_key(project_name))
nbo.objectiveId = boid
nbo.projectId = projectId
nbo.evaluation_criteriaIds = []
nbo.description = bo["description"]
nbo.weight = int(bo["weight"])
if "evaluation_criteria" in bo:
for ec in bo["evaluation_criteria"]:
ecid = ec["evaluation_criterionId"]
nec = get_evaluation_criteria_from_db(projectId, boid, ecid)
#print "\t" + projectId + ", " + ec["evaluation_criterionId"] + ", " + ec["evaluation_criterion"] + "\n\t" + str(nec)
if nec is None:
nec = EvaluationCriteria(parent=project_db_key(project_name))
nec.evaluation_criterionId = ecid
nec.objectiveId = boid
nec.projectId = projectId
nec.evaluation_criterion = ec["evaluation_criterion"]
nec.put()
if ecid in nbo.evaluation_criteriaIds:
iiidx = nbo.evaluation_criteriaIds.index(ecid)
nbo.evaluation_criteriaIds[iiidx] = ecid
else:
nbo.evaluation_criteriaIds.append(ecid)
nbo.put()
if nbo.objectiveId in project.objectiveIds:
iidx = project.objectiveIds.index(nbo.objectiveId)
project.objectiveIds[iidx] = nbo.objectiveId
else:
project.objectiveIds.append(nbo.objectiveId)
project.put()
return project
def get_objective_from_db(projectId, objectiveId):
objectives_query = Objective.query(Objective.objectiveId == objectiveId,
Objective.projectId == projectId)
if objectives_query.count() < 1:
return None
else:
return objectives_query.fetch(1)[-1]
def get_evaluation_criteria_from_db(projectId, objectiveId, evaluation_criterionId):
evaluation_criteria_query = EvaluationCriteria.query(EvaluationCriteria.evaluation_criterionId == evaluation_criterionId,
EvaluationCriteria.objectiveId == objectiveId,
EvaluationCriteria.projectId == projectId)
if evaluation_criteria_query.count() < 1:
return None
else:
return evaluation_criteria_query.fetch(1)[-1]
def get_entry_status(projectId, userId):
project = get_project_from_db(projectId)
entry = get_entry_from_db(projectId, userId)
lenv = len(project.vendorIds)
lene = 0
for objectiveId in project.objectiveIds:
objective = get_objective_from_db(projectId, objectiveId)
if objective:
evaluation_criteriaIds = objective.evaluation_criteriaIds
if evaluation_criteriaIds:
lene += len(evaluation_criteriaIds)
tlenv = lene * lenv
if entry and entry.vendor_output:
vsplits = json.loads(entry.vendor_output)
elenv = len(vsplits.keys())
if entry is None:
return "Incomplete"
else:
if (entry and entry.evaluation_criteria_output is None) or \
(entry and entry.evaluation_criteria_output and len(entry.evaluation_criteria_output) == 0) or \
(entry and entry.evaluation_criteria_output and len(entry.evaluation_criteria_output) < lene) or \
(entry and entry.vendor_output is None) or \
(entry and entry.vendor_output and elenv == 0) or \
(entry and entry.vendor_output and elenv < tlenv):
cur_date = datetime.datetime.now()
if project.due_date < cur_date:
return "Late"
else:
return "Incomplete"
else:
return "OK"
def get_project_status(projectId):
entrys = get_entrys_from_given_project_db(projectId)
status = "OK"
total = len(entrys)
if total > 0:
current = 0
for entry in entrys:
status = get_entry_status(projectId, entry.user.identity)
if status == "OK":
current += 1
percentage = float (current * 100 / total)
else:
percentage = 0
status = "Incomplete"
return status, percentage
def delete_evaluation_criterion_from_db(projectId, objectiveId, ecid):
eval_criterion = get_evaluation_criteria_from_db(projectId, objectiveId, ecid)
objective = get_objective_from_db(projectId, objectiveId)
objective.evaluation_criteriaIds.remove(ecid)
objective.put()
if eval_criterion:
key = eval_criterion.key
if key:
print '\tdeleting ' + eval_criterion.evaluation_criterionId
key.delete()
def delete_objective_from_db(projectId, objectiveId):
objective = get_objective_from_db(projectId, objectiveId)
if objective:
for ecid in objective.evaluation_criteriaIds:
# print objective
# print " *** looking for : " + objectiveId + ", " + ecid
evaluation_criterion = get_evaluation_criteria_from_db(projectId, objectiveId, ecid)
if evaluation_criterion:
key = evaluation_criterion.key
if key:
print '\tdeleting ' + evaluation_criterion.evaluation_criterion
key.delete()
key = objective.key
if key:
print '\tdeleting ' + objective.objectiveId
key.delete()
def delete_project_from_db(projectId):
print 'deleting ' + projectId
project = get_project_from_db(projectId)
entrys = get_entrys_from_given_project_db(projectId)
vendorIds = project.vendorIds
for vendorId in vendorIds:
vendor = get_vendor_from_db(vendorId)
if vendor:
if projectId in vendor.projectIds:
vendor.projectIds.remove(projectId)
vendor.put()
userIds = project.userIds
for userId in userIds:
user = get_vendor_from_db(userId)
if user:
if projectId in user.projectIds:
user.projectIds.remove(projectId)
user.put()
for objectiveId in project.objectiveIds:
delete_objective_from_db(projectId, objectiveId)
for entry in entrys:
key = entry.key
if key:
key.delete()
key = project.key
if key:
key.delete()
def delete_entry_from_db(entry):
key = entry.key
if key:
key.delete()
def delete_users_from_db():
users = get_users_from_db(None)
if users:
for user in users:
delete_user_from_db(user.identity)
def delete_user_from_db(userId):
user = get_user_from_db(userId)
if user:
projectIds = user.projectIds
for projectId in projectIds:
entrys = get_entry_from_db(projectId, userId)
for entry in entrys:
delete_entry_from_db(entry)
key = user.key
if key:
key.delete()
def update_entry(projectId, userId, evaluation_criteria, evaluation_criteria_output, vendor_output, weights):
entry = get_entry_from_db(projectId, userId)
if entry is None:
project_name = DEFAULT_PROJECT_NAME
entry = Entry(parent=project_db_key(project_name))
entry.user = get_user_from_db(userId)
entry.project = get_project_from_db(projectId)
if evaluation_criteria:
entry.evaluation_criteria = evaluation_criteria.split(",")
if evaluation_criteria_output:
entry.evaluation_criteria_output = evaluation_criteria_output.split(",")
if weights:
sweights = json.loads(weights)
for weight in sweights:
entry.weights.append(weight + ":" + str(sweights[weight]))
if vendor_output:
entry.vendor_output = vendor_output
entry.put()
return entry
def get_vendors_from_db(projectId=None):
if projectId and projectId != "":
project = get_project_from_db(projectId)
if project is not None:
vendorIds = project.vendorIds
vendors = []
for vendorId in vendorIds:
vendor = get_vendor_from_db(vendorId)
vendors.append(vendor)
return vendors
else:
vendors_q = Vendor.query()
vendors = vendors_q.fetch(total_max_limit)
return vendors
return None
def get_vendor_from_db(vendorId):
vendors_q = Vendor.query(Vendor.identity == vendorId)
if vendors_q.count() < 1:
return None
else:
return vendors_q.fetch(1)[-1]
def update_vendor(vendorId, email, projectIds):
vendor = get_vendor_from_db(vendorId)
if vendor is None:
project_name = get_project_db_name()
vendor = Vendor(parent=project_db_key(project_name))
vendor.identity = vendorId
vendor.projectIds = []
vendor.email = email
if projectIds:
for projId in projectIds:
if projId and projId != CREATE_MODE and projId not in vendor.projectIds:
vendor.projectIds.append(projId)
project = get_project_from_db(projId)
if project:
project.vendorIds.append(vendorId)
project.put()
vendor.put()
time.sleep(1)
return vendor
def delete_vendor_from_db(vendorId):
vendor = get_vendor_from_db(vendorId)
if vendor:
key = vendor.key
if key:
key.delete()
def delete_vendors_from_db():
vendors = get_vendors_from_db(None)
if vendors:
for vendor in vendors:
key = vendor.key
if key:
key.delete()
def get_all_data_from_calc(project):
entrys = get_entrys_from_given_project_db(project.projectId)
criteria_average_dict = {}
vendor_scores_dict = {}
criteria_to_users_map = {}
total = len(entrys)
if total > 0:
for entry in entrys:
for weight_splits in entry.weights:
req_weight = weight_splits.split(":")
try:
f_weight = float(req_weight[1])
except ValueError:
f_weight = 0.0
if req_weight[0] in criteria_average_dict:
criteria_average_dict[req_weight[0]] += f_weight
else:
criteria_average_dict[req_weight[0]] = f_weight
rkey = req_weight[0].replace(" ", "^")
if rkey not in criteria_to_users_map:
criteria_to_users_map[rkey] = []
criteria_to_users_map[rkey].append({"userId": str(entry.user.identity), "weight": str(req_weight[1])})
if entry.vendor_output:
vsplits = json.loads(entry.vendor_output)
for key in vsplits:
score = int(vsplits[key])
nkey = str(key)
if key in vendor_scores_dict:
vendor_scores_dict[nkey] += int(score)
else:
vendor_scores_dict[nkey] = int(score)
for key in criteria_average_dict:
criteria_average_dict[key] = float(criteria_average_dict[key]) / float(total)
for key in vendor_scores_dict:
vendor_scores_dict[key] = float(vendor_scores_dict[key]) / float(total)
return criteria_average_dict, vendor_scores_dict, criteria_to_users_map
def get_business_objectives_from_db(projectId, withCalc):
bos_db = []
topVendor = None
project = get_project_from_db(projectId)
start = time.clock()
criteria_to_users_map = None
if withCalc:
start = time.clock()
criteria_average_dict, vendor_scores_dict, criteria_to_users_map = get_all_data_from_calc(project)
print str(time.clock() - start)
start = time.clock()
for objectiveId in project.objectiveIds:
objective = get_objective_from_db(projectId, objectiveId)
if objective:
evaluation_criteriaIds = objective.evaluation_criteriaIds
evaluation_criteria = []
for evaluation_criterionId in evaluation_criteriaIds:
evaluation_criterion = get_evaluation_criteria_from_db(projectId, objectiveId, evaluation_criterionId)
if evaluation_criterion:
if withCalc:
calculations = {}
if evaluation_criterion.evaluation_criterion in criteria_average_dict:
criteria_average = criteria_average_dict[evaluation_criterion.evaluation_criterion]
calculations["criteria_average"] = criteria_average
else:
calculations["criteria_average"] = 0
calculations["criteria_weight"] = 0
for vendorId in project.vendorIds:
key = str(vendorId) + "_vendor_score"
skey = str(vendorId).replace(" ", "^")+"^"+evaluation_criterion.evaluation_criterion.replace(" ", "^")
if skey in vendor_scores_dict:
vendor_score = float(vendor_scores_dict[skey])
else:
vendor_score = 0
calculations[key] = vendor_score
evaluation_criterion.calculations = calculations
evaluation_criteria.append(evaluation_criterion)
objective.evaluation_criteria = evaluation_criteria
bos_db.append(objective)
print str(time.clock() - start)
return bos_db, criteria_to_users_map
def send_reminders(tolist, title, content):
for toaddr in tolist:
user = get_user_from_db(toaddr)
send_message(user, title, content)
def send_entry_completion(projectId, userId):
user = get_admin_user(projectId)
if user:
title = ENTRY_SAVED_TITLE
content = ENTRY_SAVED_MESSAGE.format(toUser=user.identity, aboutUser=userId)
send_message(user, title, content)
def get_admin_user(projectId):
users = get_users_from_db(projectId)
for user in users:
if user.type != "User":
return user
return None
def run_manage():
gae_app_id = app_identity.get_application_id()
gae_env = None
if gae_app_id in gae_environments:
gae_env = gae_environments[gae_app_id]
print "Running in " + gae_env + " : " + gae_app_id
else:
print 'Running in ' + gae_app_id
if gae_app_id is None and gae_env is None:
gae_env = "purple"
project_query = Project.query()
projects = project_query.fetch(total_max_limit)
if projects:
print "Managing " + str(len(projects))
count = 0
for project in projects:
print project.projectId
count += 1
if count > 5 :
time.sleep(2)
status, percentage = get_project_status(project.projectId)
print "\t" + str(status) + ", " + str(percentage)
if status != "OK" or percentage < 100:
user = get_admin_user(project.projectId)
print "\tAdmin to " + project.projectId + " is " + user.identity
if user:
title = PROJECT_REMINDER_TITLE.format(env=gae_env)
message = PROJECT_REMINDER_MESSAGE.format(projectId=project.projectId, env=gae_env)
send_message(user, title, message)
time.sleep(2)
def send_message(user, title, message):
print "Sending email to " + user.email
mail.send_mail(sender=sender_address,
to=user.email,
subject=title,
body=message)
if hasattr(user, 'token') and user.token:
send_notification(user.token, title, message)
def send_notification(toaddr, title, content):
print 'send_notification ' + toaddr + ", " + title + ", " + content
headers = fcm_headers
url = fcm_server
data = {'priority': 'high', 'to': toaddr, \
'notification' : {'badge': '1', 'sound' : 'default', 'title' : title, 'body' : content}}
try:
opener = urllib2.build_opener()
req = urllib2.Request(url, data=json.dumps(data), headers=headers)
resp = opener.open(req)
print "OK - Notification sent"
except urllib2.HTTPError as e:
error_message = e.read()
print error_message
def update_token(userId, token):
print "In update_token: " + userId + ", " + token
user = get_user_from_db(userId)
if user.token != token:
user.token = token
user.put()
def get_user_type_from_db(identity):
user = get_user_from_db(identity)
return user.type
def is_user_first_login(identity):
user = get_user_from_db(identity)
return user.isFirstLogin
|
paddyvishnubhatt/daranalysis
|
utils.py
|
Python
|
apache-2.0
| 24,212
|
[
"Amber"
] |
dbfae0cbd424b5afe4decd6375c1ef7d60e5e7edc540b3e94b0a6ecdf282f2aa
|
#!/usr/bin/env python
"""
tree_encode.py
Contains methods that convert a binary tree (NetworkX DiGraph)
into a bit string.
Henrik Ronellenfitsch 2013
"""
#import sys
#sys.path.append('../')
#import analyzer
import networkx as nx
import matplotlib.pyplot as plt
import numpy.random
from numpy import *
import bz2
import zlib
import pylzma
import zss
import seaborn as sns
def pargsort(seq):
""" Like numpy's argsort, but works on python lists.
"""
# http://stackoverflow.com/questions/3071415/efficient-method-to-calculate-the-rank-vector-of-a-list-in-python
return sorted(range(len(seq)), key = seq.__getitem__)
def dfs_preorder_nodes_ordered(G,source=None):
"""Produce nodes in a depth-first-search pre-ordering starting at source."""
pre=(v for u,v,d in dfs_labeled_edges_ordered(G,source=source)
if d['dir']=='forward')
# chain source to beginning of pre-ordering
# return chain([source],pre)
return pre
def dfs_labeled_edges_ordered(G,source=None):
"""Produce edges in a depth-first-search starting at source and
labeled by direction type (forward, reverse, nontree).
Supportes ordered traversal of graph.
"""
# Based on http://www.ics.uci.edu/~eppstein/PADS/DFS.py
# by D. Eppstein, July 2004.
if source is None:
# produce edges for all components
nodes=G
else:
# produce edges for components with source
nodes=[source]
orderfun = lambda x: G.node[x]['order']
visited=set()
for start in nodes:
if start in visited:
continue
yield start,start,{'dir':'forward'}
visited.add(start)
stack = [(start, iter(sorted(G[start], key=orderfun)))]
while stack:
parent,children = stack[-1]
try:
child = next(children)
if child in visited:
yield parent,child,{'dir':'nontree'}
else:
yield parent,child,{'dir':'forward'}
visited.add(child)
stack.append((child,
iter(sorted(G[child], key=orderfun))))
except StopIteration:
stack.pop()
if stack:
yield stack[-1][0],parent,{'dir':'reverse'}
yield start,start,{'dir':'reverse'}
def encode_tree(G, root=None):
""" Encodes the binary tree G into a string containing only
zeros and ones.
All attributes are ignored, we only encode the pure tree structure.
However, each node must have the 'order' attribute indicating
whether it is the "left" or "right" subtree by being set to
some number. The larger number indicated the right subtree.
Parameters:
G: NetworkX DiGraph
root: Root node to use. If set to none, will use
G.graph['root']
Returns:
s: String which corresponds to the encoded tree.
"""
if root == None:
root = G.graph['root']
s = ''
for node in dfs_preorder_nodes_ordered(G, source=root):
if len(G.successors(node)) == 2:
s += '1'
else:
s += '0'
return s
def canonize_tree(G):
""" Orders the binary tree G by bringing it into AHU canonical
form. Effectively, we calculate the complete tree isomorphism
invariant, and then reconstruct the canonical tree using that
description. The algorithm loosely follows
Aho, Hopcroft, Ullman; The Design and Analysis of Computer Algorithms,
Addison-Wesley 1974
The graph G is ordered in-place by adding 'order' node attributes,
such that it can be traversed using dfs_labeled_egdes_ordered
Parameters:
G: NetworkX DiGraph binary tree.
The root's node label should be in G.graph['root']
Returns:
cname: The canonical name (string) associated to the graph G.
"""
cname = ''
# Assign canonical names
for node in nx.dfs_postorder_nodes(G, source=G.graph['root']):
succ = G.successors(node)
if len(succ) == 0:
G.node[node]['canonical-name'] = '10'
else:
cname0 = G.node[succ[0]]['canonical-name']
cname1 = G.node[succ[1]]['canonical-name']
# Sort children's canonical names
children_cnames = [cname0] + [cname1]
cinds = pargsort(children_cnames)
cname = '1' + children_cnames[cinds[0]] + \
children_cnames[cinds[1]] + '0'
G.node[node]['canonical-name'] = cname
G.node[succ[cinds[0]]]['order'] = 1
G.node[succ[cinds[1]]]['order'] = 2
G.node[node]['order'] = 0
return cname
def decode_tree(s):
""" Decodes the string s, which contains only zeros and ones,
into a NetworkX DiGraph corresponding to the encoded binary tree.
Parameters:
s: A string containing zeros and ones, describing a binary tree.
Returns:
G: NetworkX DiGraph corresponding to s.
"""
G = nx.DiGraph()
G.add_node(0, order=0)
G.graph['root'] = 0
not_yet_visited = []
max_node_id = 0
next_node_id = 0
# Construct the tree
for i in xrange(len(s) - 1):
char = s[i]
if char == '1':
# Create new junction
G.add_edge(next_node_id, max_node_id + 1, order=1)
G.add_edge(next_node_id, max_node_id + 2, order=2)
G.add_node(max_node_id + 1, order=1)
G.add_node(max_node_id + 2, order=2)
next_node_id = max_node_id + 1
not_yet_visited.append(max_node_id + 2)
max_node_id += 2
elif char == '0':
# Reached leaf node. Next node to visit is on stack.
next_node_id = not_yet_visited.pop()
return G
def decode_tree_zss(s):
""" Decodes the string s containing zeros and ones
into a tree object compatible with the zss library
implementing the Zhang & Shasha (1989) tree edit distance algorithm.
The node labels are strings containing integers ordered according to
pre-order tree traversal.
Parameters:
s: A string containing zeros and ones, describing a binary tree.
Returns:
G: A zss.Node object representing the tree stored in s.
"""
G = zss.Node('0')
not_yet_visited = []
max_node_id = 0
next_node = G
# Construct the tree
for i in xrange(len(s) - 1):
char = s[i]
if char == '1':
left = zss.Node(str(max_node_id + 1))
right = zss.Node(str(max_node_id + 2))
next_node.addkid(left)
next_node.addkid(right)
next_node = left
not_yet_visited.append(right)
max_node_id += 2
else:
next_node = not_yet_visited.pop()
return G
def _random_binary_tree(G, root, depth, p):
""" Recursively constructs a random binary tree.
"""
if depth == 0:
return
i = G.number_of_nodes()
j = i + 1
if numpy.random.random() < p:
G.add_edge(root, i)
G.add_edge(root, j)
_random_binary_tree(G, i, depth - 1, p)
_random_binary_tree(G, j, depth - 1, p)
def random_binary_tree(depth, p=.8):
""" Constructs a random binary tree with given maximal depth
and probability p of bifurcating.
Parameters:
depth: The maximum depth the tree can have (might not be reached
due to probabilistic nature of algorithm)
p: Probability that any node bifurcates.
Returns:
G: Random binary tree NetworkX DiGraph
"""
G = nx.DiGraph()
G.add_node(0)
G.graph['root'] = 0
_random_binary_tree(G, 0, depth, p)
return G
def random_binary_tree_bottomup(degree):
""" Constructs a random binary tree with given degree by
randomly connecting nodes bottom up.
This models a completely random hierarchical tree.
Parameters:
degree: The degree of the final tree graph.
Returns:
T: the final random tree graph.
"""
T = nx.DiGraph()
cur_nodes = range(degree)
for i in xrange(degree - 1):
# Connect two random nodes
j1 = numpy.random.randint(degree - i)
n1 = cur_nodes[j1]
del cur_nodes[j1]
j2 = numpy.random.randint(degree - i - 1)
n2 = cur_nodes[j2]
del cur_nodes[j2]
new_node = degree + i
T.add_edge(new_node, n1)
T.add_edge(new_node, n2)
cur_nodes.append(new_node)
T.graph['root'] = new_node
return T
def random_binary_tree_bottomup_preferential(degree):
""" Constructs a random binary tree with given degree by
randomly connecting nodes bottom up.
This models a completely random hierarchical tree, but with
preferential attachment proportional to subtree degree
Parameters:
degree: The degree of the final tree graph.
Returns:
T: the final random tree graph.
"""
T = nx.DiGraph()
cur_nodes = range(degree)
for i in xrange(degree - 1):
# Connect two random nodes
j1 = numpy.random.randint(degree - i)
n1 = cur_nodes[j1]
del cur_nodes[j1]
j2 = numpy.random.randint(degree - i - 1)
n2 = cur_nodes[j2]
del cur_nodes[j2]
new_node = degree + i
T.add_edge(new_node, n1)
T.add_edge(new_node, n2)
cur_nodes.append(new_node)
T.graph['root'] = new_node
return T
def _random_binary_tree_uniform(root, degree_max, eps, p):
""" Recursively constructs a random binary tree.
Stops if number of nodes exceeds nodes
"""
G = nx.DiGraph()
G.add_node(0)
G.graph['root'] = 0
stack = [root]
cur_degree = 1
while stack:
cur_root = stack.pop()
if cur_degree > degree_max + eps:
return G
i = G.number_of_nodes()
j = i + 1
if numpy.random.random() < p:
G.add_edge(cur_root, i)
G.add_edge(cur_root, j)
stack.append(i)
stack.append(j)
cur_degree += 1
return G
def uniform_random_tree_sample(degree, size, eps):
""" Obtains a uniformly random sample of binary
tree of desired size and degree.
Uses a Boltzmann sampling algorithm.
Parameters:
degree: The degree of trees to sample (i.e. number of leaf nodes)
size: The sample size
eps: The allowed error in degree
Returns:
sample: List of NetworkX digraphs with the desired sample
degs: List of degrees of the sample
"""
print "Sampling uniform {} random trees with degree {}, eps={}".format(
size, degree, eps)
sample = []
degs = []
while len(sample) < size:
# Try to generate a new tree
G = _random_binary_tree_uniform(0, degree, eps, 0.5)
# Count degree in horribly inefficient way
deg = 0
for n, d in G.degree_iter():
if d == 1:
deg += 1
if deg >= degree - eps and deg <= degree + eps:
sample.append(G)
degs.append(deg)
print "Found {}/{} sample trees.".format(len(sample), size)
return sample, degs
if __name__ == '__main__':
pass
# Do some tests
# print "Uniformly random distribution of trees"
# uniform_random_tree_sample(100, 50, 1)
#
# print "Random binary tree..."
# #G = random_binary_tree(15, p=0.8)
# G = random_binary_tree_bottomup(100)
# canon = canonize_tree(G)
#
# print "Encoding..."
# s = encode_tree(G)
#
# print "Compressing"
#
# print "Length (string): ", len(s)
# print "Length (bitstring):", ceil(len(s)/8.)
# ss = bz2.compress(s)
# sz = zlib.compress(s)
# sl = pylzma.compress(s)
# print "Compressed Length (bz2):", len(ss)
# print "Compressed Length (zlib):", len(sz)
# print "Compressed Length (lzma):", len(sl)
#
# print "Canonical Length (string):", len(canon)
# print "Canonical Length (bitstring):", ceil(len(canon)/8.)
# cs = bz2.compress(canon)
# cz = zlib.compress(canon)
# cl = pylzma.compress(canon)
# print "Canonical Compressed Length (bz2):", len(cs)
# print "Canonical Compressed Length (zlib):", len(cz)
# print "Canonical Compressed Length (lzma):", len(cl)
#
# print "Decoding..."
# GG = decode_tree(s)
#
# gg_canon = canonize_tree(GG)
#
# print "Isomorphic by canonical name: ", canon == gg_canon
# #print "Isomorphic by VF2:", nx.is_isomorphic(G, GG)
#
# # Large number of concatenated trees
# s = ''
# sc = ''
# i = 0
# for i in xrange(150):
# try:
# G = random_binary_tree(17, p=0.8)
# sc += canonize_tree(G)
# s += encode_tree(G)
# i += 1
# except:
# pass
#
# ss = bz2.compress(s)
# scs = bz2.compress(sc)
#
# print i
# print len(s)
# print len(ss)
#
# print len(sc)
# print len(scs)
#
# sns.set_style('white')
# def power_dist(fnc, args, kwargs, n):
# ts = [fnc(*args, **kwargs) for i in xrange(n)]
# ts = [t for t in ts if t.number_of_nodes() > 2**7]
# print len(ts)
#
# def get_power(t):
# analyzer.mark_subtrees(t)
#
# degs = [d['subtree-degree'] for n, d in t.nodes_iter(data=True)
# if d['subtree-degree'] > 1]
#
# all_degs = array(sorted(list(set(degs))))
#
# counts = []
# for d in all_degs:
# counts.append(where(degs == d)[0].shape[0])
#
# z = polyfit(log(all_degs)[all_degs < 2**6],
# log(counts)[all_degs < 2**6], 1)
#
# return z[0]
#
# print "Calculating power law..."
# pws = [get_power(t) for t in ts]
# print "Done."
# return array(pws)
#
# # completely random
# plt.figure()
# ps = [0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 1.0]
# ix = [20000, 10000, 800, 200, 100, 100, 10]
# means = []
# stds = []
# for p, i in zip(ps, ix):
# pws = power_dist(random_binary_tree, (10,), {'p': p}, i)
# #plt.hist(pws, normed=True, label='$p={}$'.format(p),
# # rwidth=0.9, linewidth=0, range=(-1.6, -0.8), bins=27)
#
# means.append(pws.mean())
# stds.append(pws.std(ddof=1))
#
# #plt.legend()
# #plt.xlabel('subtree degree distribution $\gamma$')
# #plt.ylabel('probability density')
# plt.errorbar(ps, means, yerr=stds)
# plt.xlabel('bifurcation probability')
# plt.ylabel('subtree degree distribution power')
# plt.savefig('PaperPlots/rand_tree_subtree_deg_dist.svg',
# bbox_inches='tight')
# # bottom up random
# #plt.figure()
# #for i in xrange(11, 14):
# # pws = power_dist(random_binary_tree_bottomup, (2**i,), {}, 100)
# # plt.hist(pws, normed=True, rwidth=0.9, linewidth=0,
# # label='deg = 2^{}'.format(i))
# #plt.legend()
|
hronellenfitsch/nesting
|
tree_encode.py
|
Python
|
mit
| 15,060
|
[
"VisIt"
] |
aecae1792fe9eadc6341dda11359945eac9752263c216ce8754608a5057b7df9
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that call the four main :py:mod:`driver`
functions: :py:mod:`driver.energy`, :py:mod:`driver.optimize`,
:py:mod:`driver.response`, and :py:mod:`driver.frequency`.
"""
import collections
import math
import os
import re
import sys
from psi4.driver import constants
from psi4.driver import p4util
from psi4.driver.driver import *
# never import aliases into this file
#########################
## Start of Database ##
#########################
DB_RGT = {}
DB_RXN = {}
def database(name, db_name, **kwargs):
r"""Function to access the molecule objects and reference energies of
popular chemical databases.
:aliases: db()
:returns: (*float*) Mean absolute deviation of the database in kcal/mol
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`db_name DATABASE MEAN SIGNED DEVIATION`
* :psivar:`db_name DATABASE MEAN ABSOLUTE DEVIATION`
* :psivar:`db_name DATABASE ROOT-MEAN-SQUARE DEVIATION`
* Python dictionaries of results accessible as ``DB_RGT`` and ``DB_RXN``.
.. note:: It is very easy to make a database from a collection of xyz files
using the script :source:`psi4/share/psi4/scripts/ixyz2database.py`.
See :ref:`sec:createDatabase` for details.
.. caution:: Some features are not yet implemented. Buy a developer some coffee.
- In sow/reap mode, use only global options (e.g., the local option set by ``set scf scf_type df`` will not be respected).
.. note:: To access a database that is not embedded in a |PSIfour|
distribution, add the path to the directory containing the database
to the environment variable :envvar:`PYTHONPATH`.
:type name: str
:param name: ``'scf'`` || ``'sapt0'`` || ``'ccsd(t)'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the database. May be any valid argument to
:py:func:`psi4.energy`.
:type db_name: str
:param db_name: ``'BASIC'`` || ``'S22'`` || ``'HTBH'`` || etc.
Second argument, usually unlabeled. Indicates the requested database
name, matching (case insensitive) the name of a python file in
``psi4/share/databases`` or :envvar:`PYTHONPATH`. Consult that
directory for available databases and literature citations.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``energy`` |dr| || ``optimize`` || ``cbs``
Indicates the type of calculation to be performed on each database
member. The default performs a single-point ``energy('name')``, while
``optimize`` perfoms a geometry optimization on each reagent, and
``cbs`` performs a compound single-point energy. If a nested series
of python functions is intended (see :ref:`sec:intercalls`), use
keyword ``db_func`` instead of ``func``.
:type mode: str
:param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'``
Indicates whether the calculations required to complete the
database are to be run in one file (``'continuous'``) or are to be
farmed out in an embarrassingly parallel fashion
(``'sow'``/``'reap'``). For the latter, run an initial job with
``'sow'`` and follow instructions in its output file.
:type cp: :ref:`boolean <op_py_boolean>`
:param cp: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether counterpoise correction is employed in computing
interaction energies. Use this option and NOT the ``bsse_type="cp"``
function for BSSE correction in database(). Option available
(See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes.
:type rlxd: :ref:`boolean <op_py_boolean>`
:param rlxd: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether correction for deformation energy is
employed in computing interaction energies. Option available
(See :ref:`sec:availableDatabases`) only for databases of bimolecular complexes
with non-frozen monomers, e.g., HBC6.
:type symm: :ref:`boolean <op_py_boolean>`
:param symm: |dl| ``'on'`` |dr| || ``'off'``
Indicates whether the native symmetry of the database reagents is
employed (``'on'``) or whether it is forced to :math:`C_1` symmetry
(``'off'``). Some computational methods (e.g., SAPT) require no
symmetry, and this will be set by database().
:type zpe: :ref:`boolean <op_py_boolean>`
:param zpe: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether zero-point-energy corrections are appended to
single-point energy values. Option valid only for certain
thermochemical databases. Disabled until Hessians ready.
:type benchmark: str
:param benchmark: |dl| ``'default'`` |dr| || ``'S22A'`` || etc.
Indicates whether a non-default set of reference energies, if
available (See :ref:`sec:availableDatabases`), are employed for the
calculation of error statistics.
:type tabulate: List[str]
:param tabulate: |dl| ``[]`` |dr| || ``['scf total energy', 'natom']`` || etc.
Indicates whether to form tables of variables other than the
primary requested energy. Available for any PSI variable.
:type subset: Union[str, List[str]]
:param subset:
Indicates a subset of the full database to run. This is a very
flexible option and can be used in three distinct ways, outlined
below. Note that two take a string and the last takes an array.
See :ref:`sec:availableDatabases` for available values.
* ``'small'`` || ``'large'`` || ``'equilibrium'``
Calls predefined subsets of the requested database, either
``'small'``, a few of the smallest database members,
``'large'``, the largest of the database members, or
``'equilibrium'``, the equilibrium geometries for a database
composed of dissociation curves.
* ``'BzBz_S'`` || ``'FaOOFaON'`` || ``'ArNe'`` || ``'HB'`` || etc.
For databases composed of dissociation curves, or otherwise
divided into subsets, individual curves and subsets can be
called by name. Consult the database python files for available
molecular systems (case insensitive).
* ``[1,2,5]`` || ``['1','2','5']`` || ``['BzMe-3.5', 'MeMe-5.0']`` || etc.
Specify a list of database members to run. Consult the
database python files for available molecular systems. This
is the only portion of database input that is case sensitive;
choices for this keyword must match the database python file.
:examples:
>>> # [1] Two-stage SCF calculation on short, equilibrium, and long helium dimer
>>> db('scf','RGC10',cast_up='sto-3g',subset=['HeHe-0.85','HeHe-1.0','HeHe-1.5'], tabulate=['scf total energy','natom'])
>>> # [2] Counterpoise-corrected interaction energies for three complexes in S22
>>> # Error statistics computed wrt an old benchmark, S22A
>>> database('mp2','S22',cp=1,subset=[16,17,8],benchmark='S22A')
>>> # [3] SAPT0 on the neon dimer dissociation curve
>>> db('sapt0',subset='NeNe',cp=0,symm=0,db_name='RGC10')
>>> # [4] Optimize system 1 in database S22, producing tables of scf and mp2 energy
>>> db('mp2','S22',db_func=optimize,subset=[1], tabulate=['mp2 total energy','current energy'])
>>> # [5] CCSD on the smallest systems of HTBH, a hydrogen-transfer database
>>> database('ccsd','HTBH',subset='small', tabulate=['ccsd total energy', 'mp2 total energy'])
"""
lowername = name #TODO
kwargs = p4util.kwargs_lower(kwargs)
# Wrap any positional arguments into kwargs (for intercalls among wrappers)
if not('name' in kwargs) and name:
kwargs['name'] = name #.lower()
if not('db_name' in kwargs) and db_name:
kwargs['db_name'] = db_name
# Establish function to call
func = kwargs.pop('db_func', kwargs.pop('func', energy))
kwargs['db_func'] = func
# Bounce to CP if bsse kwarg (someday)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("""Database: Cannot specify bsse_type for database. Use the cp keyword withing database instead.""")
allowoptexceeded = kwargs.get('allowoptexceeded', False)
optstash = p4util.OptionsState(
['WRITER_FILE_LABEL'],
['SCF', 'REFERENCE'])
# Wrapper wholly defines molecule. discard any passed-in
kwargs.pop('molecule', None)
# Paths to search for database files: here + PSIPATH + library + PYTHONPATH
db_paths = []
db_paths.append(os.getcwd())
db_paths.extend(os.environ.get('PSIPATH', '').split(os.path.pathsep))
db_paths.append(os.path.join(core.get_datadir(), 'databases'))
db_paths.append(os.path.dirname(__file__))
db_paths = list(map(os.path.abspath, db_paths))
sys.path[1:1] = db_paths
# TODO this should be modernized a la interface_cfour
# Define path and load module for requested database
database = p4util.import_ignorecase(db_name)
if database is None:
core.print_out('\nPython module for database %s failed to load\n\n' % (db_name))
core.print_out('\nSearch path that was tried:\n')
core.print_out(", ".join(map(str, sys.path)))
raise ValidationError("Python module loading problem for database " + str(db_name))
else:
dbse = database.dbse
HRXN = database.HRXN
ACTV = database.ACTV
RXNM = database.RXNM
BIND = database.BIND
TAGL = database.TAGL
GEOS = database.GEOS
try:
DATA = database.DATA
except AttributeError:
DATA = {}
user_writer_file_label = core.get_global_option('WRITER_FILE_LABEL')
user_reference = core.get_global_option('REFERENCE')
# Configuration based upon e_name & db_name options
# Force non-supramolecular if needed
if not hasattr(lowername, '__call__') and re.match(r'^.*sapt', lowername):
try:
database.ACTV_SA
except AttributeError:
raise ValidationError('Database %s not suitable for non-supramolecular calculation.' % (db_name))
else:
ACTV = database.ACTV_SA
# Force open-shell if needed
openshell_override = 0
if user_reference in ['RHF', 'RKS']:
try:
database.isOS
except AttributeError:
pass
else:
if p4util.yes.match(str(database.isOS)):
openshell_override = 1
core.print_out('\nSome reagents in database %s require an open-shell reference; will be reset to UHF/UKS as needed.\n' % (db_name))
# Configuration based upon database keyword options
# Option symmetry- whether symmetry treated normally or turned off (currently req'd for dfmp2 & dft)
db_symm = kwargs.get('symm', True)
symmetry_override = 0
if db_symm is False:
symmetry_override = 1
elif db_symm is True:
pass
else:
raise ValidationError("""Symmetry mode '%s' not valid.""" % (db_symm))
# Option mode of operation- whether db run in one job or files farmed out
db_mode = kwargs.pop('db_mode', kwargs.pop('mode', 'continuous')).lower()
kwargs['db_mode'] = db_mode
if db_mode == 'continuous':
pass
elif db_mode == 'sow':
pass
elif db_mode == 'reap':
db_linkage = kwargs.get('linkage', None)
if db_linkage is None:
raise ValidationError("""Database execution mode 'reap' requires a linkage option.""")
else:
raise ValidationError("""Database execution mode '%s' not valid.""" % (db_mode))
# Option counterpoise- whether for interaction energy databases run in bsse-corrected or not
db_cp = kwargs.get('cp', False)
if db_cp is True:
try:
database.ACTV_CP
except AttributeError:
raise ValidationError("""Counterpoise correction mode 'yes' invalid for database %s.""" % (db_name))
else:
ACTV = database.ACTV_CP
elif db_cp is False:
pass
else:
raise ValidationError("""Counterpoise correction mode '%s' not valid.""" % (db_cp))
# Option relaxed- whether for non-frozen-monomer interaction energy databases include deformation correction or not?
db_rlxd = kwargs.get('rlxd', False)
if db_rlxd is True:
if db_cp is True:
try:
database.ACTV_CPRLX
database.RXNM_CPRLX
except AttributeError:
raise ValidationError('Deformation and counterpoise correction mode \'yes\' invalid for database %s.' % (db_name))
else:
ACTV = database.ACTV_CPRLX
RXNM = database.RXNM_CPRLX
elif db_cp is False:
try:
database.ACTV_RLX
except AttributeError:
raise ValidationError('Deformation correction mode \'yes\' invalid for database %s.' % (db_name))
else:
ACTV = database.ACTV_RLX
elif db_rlxd is False:
#elif no.match(str(db_rlxd)):
pass
else:
raise ValidationError('Deformation correction mode \'%s\' not valid.' % (db_rlxd))
# Option zero-point-correction- whether for thermochem databases jobs are corrected by zpe
db_zpe = kwargs.get('zpe', False)
if db_zpe is True:
raise ValidationError('Zero-point-correction mode \'yes\' not yet implemented.')
elif db_zpe is False:
pass
else:
raise ValidationError('Zero-point-correction \'mode\' %s not valid.' % (db_zpe))
# Option benchmark- whether error statistics computed wrt alternate reference energies
db_benchmark = 'default'
if 'benchmark' in kwargs:
db_benchmark = kwargs['benchmark']
if db_benchmark.lower() == 'default':
pass
else:
BIND = p4util.getattr_ignorecase(database, 'BIND_' + db_benchmark)
if BIND is None:
raise ValidationError('Special benchmark \'%s\' not available for database %s.' % (db_benchmark, db_name))
# Option tabulate- whether tables of variables other than primary energy method are formed
# TODO db(func=cbs,tabulate=[non-current-energy]) # broken
db_tabulate = []
if 'tabulate' in kwargs:
db_tabulate = kwargs['tabulate']
# Option subset- whether all of the database or just a portion is run
db_subset = HRXN
if 'subset' in kwargs:
db_subset = kwargs['subset']
if isinstance(db_subset, (str, bytes)):
if db_subset.lower() == 'small':
try:
database.HRXN_SM
except AttributeError:
raise ValidationError("""Special subset 'small' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_SM
elif db_subset.lower() == 'large':
try:
database.HRXN_LG
except AttributeError:
raise ValidationError("""Special subset 'large' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_LG
elif db_subset.lower() == 'equilibrium':
try:
database.HRXN_EQ
except AttributeError:
raise ValidationError("""Special subset 'equilibrium' not available for database %s.""" % (db_name))
else:
HRXN = database.HRXN_EQ
else:
HRXN = p4util.getattr_ignorecase(database, db_subset)
if HRXN is None:
HRXN = p4util.getattr_ignorecase(database, 'HRXN_' + db_subset)
if HRXN is None:
raise ValidationError("""Special subset '%s' not available for database %s.""" % (db_subset, db_name))
else:
temp = []
for rxn in db_subset:
if rxn in HRXN:
temp.append(rxn)
else:
raise ValidationError("""Subset element '%s' not a member of database %s.""" % (str(rxn), db_name))
HRXN = temp
temp = []
for rxn in HRXN:
temp.append(ACTV['%s-%s' % (dbse, rxn)])
HSYS = p4util.drop_duplicates(sum(temp, []))
# Sow all the necessary reagent computations
core.print_out("\n\n")
p4util.banner(("Database %s Computation" % (db_name)))
core.print_out("\n")
# write index of calcs to output file
instructions = """\n The database single-job procedure has been selected through mode='continuous'.\n"""
instructions += """ Calculations for the reagents will proceed in the order below and will be followed\n"""
instructions += """ by summary results for the database.\n\n"""
for rgt in HSYS:
instructions += """ %-s\n""" % (rgt)
core.print_out(instructions)
# Loop through chemical systems
ERGT = {}
ERXN = {}
VRGT = {}
VRXN = {}
for rgt in HSYS:
VRGT[rgt] = {}
core.print_out('\n')
p4util.banner(' Database {} Computation: Reagent {} \n {}'.format(db_name, rgt, TAGL[rgt]))
core.print_out('\n')
molecule = core.Molecule.from_dict(GEOS[rgt].to_dict())
molecule.set_name(rgt)
molecule.update_geometry()
if symmetry_override:
molecule.reset_point_group('c1')
molecule.fix_orientation(True)
molecule.fix_com(True)
molecule.update_geometry()
if (openshell_override) and (molecule.multiplicity() != 1):
if user_reference == 'RHF':
core.set_global_option('REFERENCE', 'UHF')
elif user_reference == 'RKS':
core.set_global_option('REFERENCE', 'UKS')
core.set_global_option('WRITER_FILE_LABEL', user_writer_file_label + ('' if user_writer_file_label == '' else '-') + rgt)
if allowoptexceeded:
try:
ERGT[rgt] = func(molecule=molecule, **kwargs)
except ConvergenceError:
core.print_out(f"Optimization exceeded cycles for {rgt}")
ERGT[rgt] = 0.0
else:
ERGT[rgt] = func(molecule=molecule, **kwargs)
core.print_variables()
core.print_out(" Database Contributions Map:\n {}\n".format('-' * 75))
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if rgt in ACTV[db_rxn]:
core.print_out(' reagent {} contributes by {:.4f} to reaction {}\n'.format(rgt, RXNM[db_rxn][rgt], db_rxn))
core.print_out('\n')
for envv in db_tabulate:
VRGT[rgt][envv.upper()] = core.variable(envv)
core.set_global_option("REFERENCE", user_reference)
core.clean()
#core.opt_clean()
core.clean_variables()
# Reap all the necessary reaction computations
core.print_out("\n")
p4util.banner(("Database %s Results" % (db_name)))
core.print_out("\n")
maxactv = []
for rxn in HRXN:
maxactv.append(len(ACTV[dbse + '-' + str(rxn)]))
maxrgt = max(maxactv)
table_delimit = '-' * (62 + 20 * maxrgt)
tables = ''
# find any reactions that are incomplete
FAIL = collections.defaultdict(int)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
for i in range(len(ACTV[db_rxn])):
if abs(ERGT[ACTV[db_rxn][i]]) < 1.0e-12:
if not allowoptexceeded:
FAIL[rxn] = 1
# tabulate requested process::environment variables
tables += """ For each VARIABLE requested by tabulate, a 'Reaction Value' will be formed from\n"""
tables += """ 'Reagent' values according to weightings 'Wt', as for the REQUESTED ENERGY below.\n"""
tables += """ Depending on the nature of the variable, this may or may not make any physical sense.\n"""
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
VRXN[db_rxn] = {}
for envv in db_tabulate:
envv = envv.upper()
tables += """\n ==> %s <==\n\n""" % (envv.title())
tables += _tblhead(maxrgt, table_delimit, 2)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if FAIL[rxn]:
tables += """\n%23s %8s %8s %8s %8s""" % (db_rxn, '', '****', '', '')
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (VRGT[ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]])
else:
VRXN[db_rxn][envv] = 0.0
for i in range(len(ACTV[db_rxn])):
VRXN[db_rxn][envv] += VRGT[ACTV[db_rxn][i]][envv] * RXNM[db_rxn][ACTV[db_rxn][i]]
tables += """\n%23s %16.8f """ % (db_rxn, VRXN[db_rxn][envv])
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (VRGT[ACTV[db_rxn][i]][envv], RXNM[db_rxn][ACTV[db_rxn][i]])
tables += """\n %s\n""" % (table_delimit)
# tabulate primary requested energy variable with statistics
count_rxn = 0
minDerror = 100000.0
maxDerror = 0.0
MSDerror = 0.0
MADerror = 0.0
RMSDerror = 0.0
tables += """\n ==> %s <==\n\n""" % ('Requested Energy')
tables += _tblhead(maxrgt, table_delimit, 1)
for rxn in HRXN:
db_rxn = dbse + '-' + str(rxn)
if FAIL[rxn]:
tables += """\n%23s %8.4f %8s %10s %10s""" % (db_rxn, BIND[db_rxn], '****', '****', '****')
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]], RXNM[db_rxn][ACTV[db_rxn][i]])
else:
ERXN[db_rxn] = 0.0
for i in range(len(ACTV[db_rxn])):
ERXN[db_rxn] += ERGT[ACTV[db_rxn][i]] * RXNM[db_rxn][ACTV[db_rxn][i]]
error = constants.hartree2kcalmol * ERXN[db_rxn] - BIND[db_rxn]
tables += """\n%23s %8.4f %8.4f %10.4f %10.4f""" % (db_rxn, BIND[db_rxn], constants.hartree2kcalmol * ERXN[db_rxn],
error, error * constants.cal2J)
for i in range(len(ACTV[db_rxn])):
tables += """ %16.8f %2.0f""" % (ERGT[ACTV[db_rxn][i]], RXNM[db_rxn][ACTV[db_rxn][i]])
if abs(error) < abs(minDerror):
minDerror = error
if abs(error) > abs(maxDerror):
maxDerror = error
MSDerror += error
MADerror += abs(error)
RMSDerror += error * error
count_rxn += 1
tables += """\n %s\n""" % (table_delimit)
if count_rxn:
MSDerror /= float(count_rxn)
MADerror /= float(count_rxn)
RMSDerror = math.sqrt(RMSDerror / float(count_rxn))
tables += """%23s %19s %10.4f %10.4f\n""" % ('Minimal Dev', '', minDerror, minDerror * constants.cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Maximal Dev', '', maxDerror, maxDerror * constants.cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Mean Signed Dev', '', MSDerror, MSDerror * constants.cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('Mean Absolute Dev', '', MADerror, MADerror * constants.cal2J)
tables += """%23s %19s %10.4f %10.4f\n""" % ('RMS Dev', '', RMSDerror, RMSDerror * constants.cal2J)
tables += """ %s\n""" % (table_delimit)
core.set_variable('%s DATABASE MEAN SIGNED DEVIATION' % (db_name), MSDerror)
core.set_variable('%s DATABASE MEAN ABSOLUTE DEVIATION' % (db_name), MADerror)
core.set_variable('%s DATABASE ROOT-MEAN-SQUARE DEVIATION' % (db_name), RMSDerror)
core.print_out(tables)
finalenergy = MADerror
else:
finalenergy = 0.0
optstash.restore()
DB_RGT.clear()
DB_RGT.update(VRGT)
DB_RXN.clear()
DB_RXN.update(VRXN)
return finalenergy
def _tblhead(tbl_maxrgt, tbl_delimit, ttype):
r"""Function that prints the header for the changable-width results tables in db().
*tbl_maxrgt* is the number of reagent columns the table must plan for. *tbl_delimit*
is a string of dashes of the correct length to set off the table. *ttype* is 1 for
tables comparing the computed values to the reference or 2 for simple tabulation
and sum of the computed values.
"""
tbl_str = ''
tbl_str += """ %s""" % (tbl_delimit)
if ttype == 1:
tbl_str += """\n%23s %19s %21s""" % ('Reaction', 'Reaction Energy', 'Reaction Error')
elif ttype == 2:
tbl_str += """\n%23s %19s %17s""" % ('Reaction', 'Reaction Value', '')
for i in range(tbl_maxrgt):
tbl_str += """%20s""" % ('Reagent ' + str(i + 1))
if ttype == 1:
tbl_str += """\n%23s %8s %8s %10s %10s""" % ('', 'Ref', 'Calc', '[kcal/mol]', '[kJ/mol]')
elif ttype == 2:
tbl_str += """\n%65s""" % ('')
for i in range(tbl_maxrgt):
if ttype == 1:
tbl_str += """%20s""" % ('[Eh] Wt')
elif ttype == 2:
tbl_str += """%20s""" % ('Value Wt')
tbl_str += """\n %s""" % (tbl_delimit)
return tbl_str
## Aliases ##
db = database
#######################
## End of Database ##
#######################
|
lothian/psi4
|
psi4/driver/wrapper_database.py
|
Python
|
lgpl-3.0
| 26,159
|
[
"Psi4"
] |
dc400b6f4adba275bd01fb0ed1e6a3f50eb44ed55fc0d0fd9806246aedf64a7d
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.core.numeric import isscalar
import copy
from pyorderedfuzzy.ofnumbers import branch
from pyorderedfuzzy.ofnumbers import defuzzy
__author__ = "amarszalek"
class OFNumber(object):
def __init__(self, branch_f, branch_g):
super(OFNumber, self).__init__()
if isinstance(branch_f, branch.Branch):
self.branch_f = branch_f
elif isinstance(branch_f, list) or isinstance(branch_f, np.ndarray):
self.branch_f = branch.Branch(branch_f)
else:
raise ValueError('branch_f must be instance of Branch, list or np.ndarray')
if isinstance(branch_g, branch.Branch):
self.branch_g = branch_g
elif isinstance(branch_g, list) or isinstance(branch_g, np.ndarray):
self.branch_g = branch.Branch(branch_g)
else:
raise ValueError('branch_g must be instance of Branch, list or np.ndarray')
# deep copy operator
def copy(self):
return copy.deepcopy(self)
def contains_zero(self):
if self.branch_f.contains_zero() or self.branch_g.contains_zero():
return True
min_val = np.min([self.branch_f.fvalue_y, self.branch_g.fvalue_y])
max_val = np.max([self.branch_f.fvalue_y, self.branch_g.fvalue_y])
if min_val <= 0.0 <= max_val:
return True
return False
# add method left side
def __add__(self, right):
res = self.copy()
if isinstance(right, OFNumber):
res.branch_f = res.branch_f + right.branch_f
res.branch_g = res.branch_g + right.branch_g
elif isscalar(right):
res.branch_f = res.branch_f + right
res.branch_g = res.branch_g + right
else:
raise ValueError('right must be instance of OFNumber class or scalar')
return res
# add method right side
def __radd__(self, left):
res = self.copy()
if isinstance(left, OFNumber):
res.branch_f = res.branch_f + left.branch_f
res.branch_g = res.branch_g + left.branch_g
elif isscalar(left):
res.branch_f = res.branch_f + left
res.branch_g = res.branch_g + left
else:
raise ValueError('left must be instance of OFNumber class or scalar')
return res
# sub method left side
def __sub__(self, right):
res = self.copy()
if isinstance(right, OFNumber):
res.branch_f = res.branch_f - right.branch_f
res.branch_g = res.branch_g - right.branch_g
elif isscalar(right):
res.branch_f = res.branch_f - right
res.branch_g = res.branch_g - right
else:
raise ValueError('right must be instance of OFNumber class or scalar')
return res
# sub method right side
def __rsub__(self, left):
res = self.copy()
if isinstance(left, OFNumber):
res.branch_f = left.branch_f - res.branch_f
res.branch_g = left.branch_g - res.branch_g
elif isscalar(left):
res.branch_f = left - res.branch_f
res.branch_g = left - res.branch_g
else:
raise ValueError('left must be instance of OFNumber class or scalar')
return res
# mul method left side
def __mul__(self, right):
res = self.copy()
if isinstance(right, OFNumber):
res.branch_f = res.branch_f * right.branch_f
res.branch_g = res.branch_g * right.branch_g
elif isscalar(right):
res.branch_f = res.branch_f * right
res.branch_g = res.branch_g * right
else:
raise ValueError('right must be instance of OFNumber class or scalar')
return res
# mul method right side
def __rmul__(self, left):
res = self.copy()
if isinstance(left, OFNumber):
res.branch_f = res.branch_f * left.branch_f
res.branch_g = res.branch_g * left.branch_g
elif isscalar(left):
res.branch_f = res.branch_f * left
res.branch_g = res.branch_g * left
else:
raise ValueError('left must be instance of OFNumber class or scalar')
return res
# div method left side
def __truediv__(self, right):
res = self.copy()
if isinstance(right, OFNumber):
res.branch_f = res.branch_f / right.branch_f
res.branch_g = res.branch_g / right.branch_g
elif isscalar(right):
res.branch_f = res.branch_f / right
res.branch_g = res.branch_g / right
else:
raise ValueError('right must be instance of OFNumber class or scalar')
return res
# div method right side
def __rtruediv__(self, left):
res = self.copy()
if isinstance(left, OFNumber):
res.branch_f = left.branch_f / res.branch_f
res.branch_g = left.branch_g / res.branch_g
elif isscalar(left):
res.branch_f = left / res.branch_f
res.branch_g = left / res.branch_g
else:
raise ValueError('left must be instance of OFNumber class or scalar')
return res
def conjugate(self):
res = self.copy()
return res
# neg method
def __neg__(self):
res = self.copy()
res.branch_f = -res.branch_f
res.branch_g = -res.branch_g
return res
# < method
def __lt__(self, val):
if isinstance(val, OFNumber):
f = self.branch_f < val.branch_f
g = self.branch_g < val.branch_g
elif isscalar(val):
f = self.branch_f < val
g = self.branch_g < val
else:
raise ValueError('val must be instance of OFNumber class or scalar')
if f and g:
return True
return False
# <= method
def __le__(self, val):
if isinstance(val, OFNumber):
f = self.branch_f <= val.branch_f
g = self.branch_g <= val.branch_g
elif isscalar(val):
f = self.branch_f <= val
g = self.branch_g <= val
else:
raise ValueError('val must be instance of OFNumber class or scalar')
if f and g:
return True
return False
# > method
def __gt__(self, val):
if isinstance(val, OFNumber):
f = self.branch_f > val.branch_f
g = self.branch_g > val.branch_g
elif isscalar(val):
f = self.branch_f > val
g = self.branch_g > val
else:
raise ValueError('val must be instance of OFNumber class or scalar')
if f and g:
return True
return False
# >= method
def __ge__(self, val):
if isinstance(val, OFNumber):
f = self.branch_f >= val.branch_f
g = self.branch_g >= val.branch_g
elif isscalar(val):
f = self.branch_f >= val
g = self.branch_g >= val
else:
raise ValueError('val must be instance of OFNumber class or scalar')
if f and g:
return True
return False
# == method
def __eq__(self, val):
if isinstance(val, OFNumber):
f = self.branch_f == val.branch_f
g = self.branch_g == val.branch_g
elif isscalar(val):
f = self.branch_f == val
g = self.branch_g == val
else:
raise ValueError('val must be instance of OFNumber class or scalar')
if f and g:
return True
return False
# != method
def __ne__(self, val):
if isinstance(val, OFNumber):
f = self.branch_f != val.branch_f
g = self.branch_g != val.branch_g
elif isscalar(val):
f = self.branch_f != val
g = self.branch_g != val
else:
raise ValueError('val must be instance of OFNumber class or scalar')
if f or g:
return True
return False
# to string method
def __str__(self):
return "branch_f:\n{0}\nbranch_g:\n{1}".format(self.branch_f, self.branch_g)
def to_array(self):
f = self.branch_f.to_array()
g = self.branch_g.to_array()
return np.concatenate((f, g))
def acut(self, alpha):
fmin, fmax = self.branch_f.acut(alpha)
gmin, gmax = self.branch_g.acut(alpha)
return np.min([fmin, gmin]), np.max([fmax, gmax])
def supp(self):
return self.acut(0.0)
def ker(self):
return self.acut(1.0)
# defuzzy operator
def defuzzy(self, method='scog', args=(0.5,)):
if method == 'scog':
return defuzzy.scog(self, args[0])
elif method == 'expected':
return defuzzy.expected(self)
else:
raise ValueError('wrong defuzzy method')
def order(self, dfuzzy=None, proper=False, method='scog', args=(0.5,)):
f = self.branch_f.fvalue_y
odr = 0
if proper:
g = self.branch_g.fvalue_y
if f[0] < g[0]:
return 1
elif f[0] > g[0]:
return -1
else:
return 0
if dfuzzy is None:
dfuzzy = self.defuzzy(method=method, args=args)
if (dfuzzy is None) or (dfuzzy == f[0]):
odr = 0
elif dfuzzy > f[0]:
odr = 1
elif dfuzzy < f[0]:
odr = -1
return odr
def plot_ofn(self, ax, plot_as='ordered', kwargs_f={'c': 'k'}, kwargs_g={'c': 'k'}):
self.branch_f.plot_branch(ax, plot_as=plot_as, **kwargs_f)
self.branch_g.plot_branch(ax, plot_as=plot_as, **kwargs_g)
# methods
# initialize OFN of linear type y = a * x + b, y = c * x + d
def init_trapezoid_abcd(a, b, c, d, dim=11):
if dim < 2:
raise ValueError('dim must be > 1')
branch_f = branch.init_linear_ab(a, b, dim=dim)
branch_g = branch.init_linear_ab(c, d, dim=dim)
ofn = OFNumber(branch_f, branch_g)
return ofn
# initialize OFN of linear type
# y = (fx1 - fx0) * x + fx0, y = (gx1 - gx0) * x + gx0
def init_trapezoid_x0x1(fx0, fx1, gx0, gx1, dim=11):
if dim < 2:
raise ValueError('dim must be > 1')
branch_f = branch.init_linear_x0x1(fx0, fx1, dim=dim)
branch_g = branch.init_linear_x0x1(gx0, gx1, dim=dim)
ofn = OFNumber(branch_f, branch_g)
return ofn
# initialize branch of gaussian type y = s * sqrt(-2 * ln(x)) + m
def init_gaussian(mf, sf, mg, sg, dim=11):
if dim < 2:
raise ValueError('dim must be > 1')
branch_f = branch.init_gauss(mf, sf, dim=dim)
branch_g = branch.init_gauss(mg, sg, dim=dim)
ofn = OFNumber(branch_f, branch_g)
return ofn
def init_from_scalar(scalar, dim=11):
branch_f = branch.init_from_scalar(scalar, dim=dim)
branch_g = branch.init_from_scalar(scalar, dim=dim)
ofn = OFNumber(branch_f, branch_g)
return ofn
def fabs(ofn):
res = ofn.copy()
res.branch_f = branch.fabs(res.branch_f)
res.branch_g = branch.fabs(res.branch_g)
return res
def flog(ofn):
res = ofn.copy()
res.branch_f = branch.flog(res.branch_f)
res.branch_g = branch.flog(res.branch_g)
return res
def fexp(ofn):
res = ofn.copy()
res.branch_f = branch.fexp(res.branch_f)
res.branch_g = branch.fexp(res.branch_g)
return res
def fpower(ofn, p):
res = ofn.copy()
if isinstance(p, OFNumber):
res.branch_f = branch.fpower(res.branch_f, p.branch_f)
res.branch_g = branch.fpower(res.branch_g, p.branch_g)
elif isscalar(p):
res.branch_f = branch.fpower(res.branch_f, p)
res.branch_g = branch.fpower(res.branch_g, p)
else:
raise ValueError('p must be instance of OFNumber class or scalar')
return res
def fmax(ofn1, ofn2):
if (not isinstance(ofn1, OFNumber)) and (not isinstance(ofn2, OFNumber)):
raise ValueError('at least one of arguments must be a OFNumber')
ofn_1 = init_from_scalar(ofn1, dim=ofn2.branch_f.dim) if isscalar(ofn1) else ofn1.copy()
ofn_2 = init_from_scalar(ofn2, dim=ofn1.branch_f.dim) if isscalar(ofn2) else ofn2.copy()
res = ofn_1.copy()
res.branch_f = branch.fmax(ofn_1.branch_f, ofn_2.branch_f)
res.branch_g = branch.fmax(ofn_1.branch_g, ofn_2.branch_g)
return res
def fmin(ofn1, ofn2):
if (not isinstance(ofn1, OFNumber)) and (not isinstance(ofn2, OFNumber)):
raise ValueError('at least one of arguments must be a OFNumber')
ofn_1 = init_from_scalar(ofn1, dim=ofn2.branch_f.dim) if isscalar(ofn1) else ofn1.copy()
ofn_2 = init_from_scalar(ofn2, dim=ofn1.branch_f.dim) if isscalar(ofn2) else ofn2.copy()
res = ofn_1.copy()
res.branch_f = branch.fmin(ofn_1.branch_f, ofn_2.branch_f)
res.branch_g = branch.fmin(ofn_1.branch_g, ofn_2.branch_g)
return res
|
amarszalek/PyOrderedFuzzyTools
|
pyorderedfuzzy/ofnumbers/ofnumber.py
|
Python
|
mit
| 12,894
|
[
"Gaussian"
] |
b9ba3792a554fe79075100e92326e120504aed5017a695047cb69f98980b69ed
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import numpy as np
from pymatgen.optimization.linear_assignment import LinearAssignment # type: ignore
class LinearAssignmentTest(unittest.TestCase):
def test(self):
w0 = np.array(
[
[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20],
[73, 11, 98, 50, 19, 96, 61, 73, 98, 14],
]
)
w1 = np.array(
[
[95, 60, 89, 38, 36, 38, 58, 94, 66, 23],
[37, 0, 40, 58, 97, 85, 18, 54, 86, 21],
[9, 74, 11, 45, 65, 64, 27, 88, 24, 26],
[58, 90, 6, 36, 17, 21, 2, 12, 80, 90],
[33, 0, 74, 75, 11, 84, 34, 7, 39, 0],
[17, 61, 94, 68, 27, 41, 33, 86, 59, 2],
[61, 94, 36, 53, 66, 33, 15, 87, 97, 11],
[22, 20, 57, 69, 15, 9, 15, 8, 82, 68],
[40, 0, 13, 61, 67, 40, 29, 25, 72, 44],
[13, 97, 97, 54, 5, 30, 44, 75, 16, 0],
]
)
w2 = np.array(
[
[34, 44, 72, 13, 10, 58, 16, 1, 10, 61],
[54, 70, 99, 4, 64, 0, 15, 94, 39, 46],
[49, 21, 80, 68, 96, 58, 24, 87, 79, 67],
[86, 46, 58, 83, 83, 56, 83, 65, 4, 96],
[48, 95, 64, 34, 75, 82, 64, 47, 35, 19],
[11, 49, 6, 57, 80, 26, 47, 63, 75, 75],
[74, 7, 15, 83, 64, 26, 78, 17, 67, 46],
[19, 13, 2, 26, 52, 16, 65, 24, 2, 98],
[36, 7, 93, 93, 11, 39, 94, 26, 46, 69],
[32, 95, 37, 50, 97, 96, 12, 70, 40, 93],
]
)
la0 = LinearAssignment(w0)
self.assertEqual(la0.min_cost, 194, "Incorrect cost")
la1 = LinearAssignment(w1)
self.assertEqual(la0.min_cost, la0.min_cost, "Property incorrect")
self.assertEqual(la1.min_cost, 125, "Incorrect cost")
la2 = LinearAssignment(w2)
self.assertEqual(la2.min_cost, 110, "Incorrect cost")
def test_rectangular(self):
w0 = np.array(
[
[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20],
]
)
la0 = LinearAssignment(w0)
w1 = np.array(
[
[19, 95, 9, 43, 62, 90, 10, 77, 71, 27],
[26, 30, 88, 78, 87, 2, 14, 71, 78, 11],
[48, 70, 26, 82, 32, 16, 36, 26, 42, 79],
[47, 46, 93, 66, 38, 20, 73, 39, 55, 51],
[1, 81, 31, 49, 20, 24, 95, 80, 82, 11],
[81, 48, 35, 54, 35, 55, 27, 87, 96, 7],
[42, 17, 60, 73, 37, 36, 79, 3, 60, 82],
[14, 57, 23, 69, 93, 78, 56, 49, 83, 36],
[11, 37, 24, 70, 62, 35, 64, 18, 99, 20],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
]
)
la1 = LinearAssignment(w1)
self.assertEqual(len(la1.solution), 10)
self.assertEqual(la0.min_cost, la1.min_cost)
self.assertRaises(ValueError, LinearAssignment, w0.T)
def another_test_case(self):
w1 = np.array(
[
[
0.03900238875468465,
0.003202415721817453,
0.20107156847937024,
0.0,
0.5002116398420846,
0.11951326861160616,
0.0,
0.5469032363997579,
0.3243791041219123,
0.1119882291981289,
],
[
0.6048342640688928,
0.3847629088356139,
0.0,
0.44358269535118944,
0.45925670625165016,
0.31416882324798145,
0.8065128182180494,
0.0,
0.26153475286065075,
0.6862799559241944,
],
[
0.5597215814025246,
0.15133664165478322,
0.0,
0.6218101659263295,
0.15438455134183793,
0.17281467064043232,
0.8458127968475472,
0.020860721537078075,
0.1926886361228456,
0.0,
],
[
0.0,
0.0,
0.6351848838666995,
0.21261247074659906,
0.4811603832432241,
0.6663733668270337,
0.63970145187428,
0.1415815172623256,
0.5294574133825874,
0.5576702829768786,
],
[
0.25052904388309016,
0.2309392544588127,
0.0656162006684271,
0.0248922362001176,
0.0,
0.2101808638720748,
0.6529031699724193,
0.1503003886507902,
0.375576165698992,
0.7368328849560374,
],
[
0.0,
0.042215873587668984,
0.10326920761908365,
0.3562551151517992,
0.9170343984958856,
0.818783531026254,
0.7896770426052844,
0.0,
0.6573135097946438,
0.17806189728574429,
],
[
0.44992199118890386,
0.0,
0.38548898339412585,
0.6269193883601244,
1.0022861602564634,
0.0,
0.1869765500803764,
0.03474156273982543,
0.3715310534696664,
0.6197122486230232,
],
[
0.37939853696836545,
0.2421427374018027,
0.5586150342727723,
0.0,
0.7171485794073893,
0.8021029235865014,
0.11213464903613135,
0.6497896761660467,
0.3274108706187846,
0.0,
],
[
0.6674685746225324,
0.5347953626128863,
0.11461835366075113,
0.0,
0.8170639855163434,
0.7291931505979982,
0.3149153087053108,
0.1008681103294512,
0.0,
0.18751172321112997,
],
[
0.6985944652913342,
0.6139921045056471,
0.0,
0.4393266955771965,
0.0,
0.47265399761400695,
0.3674241844351025,
0.04731761392352629,
0.21484886069716147,
0.16488710920126137,
],
]
)
la = LinearAssignment(w1)
self.assertAlmostEqual(la.min_cost, 0)
def test_small_range(self):
# can be tricky for the augment step
x = np.array(
[
[4, 5, 5, 6, 8, 4, 7, 4, 7, 8],
[5, 6, 6, 6, 7, 6, 6, 5, 6, 7],
[4, 4, 5, 7, 7, 4, 8, 4, 7, 7],
[6, 7, 6, 6, 7, 6, 6, 6, 6, 6],
[4, 4, 4, 6, 6, 4, 7, 4, 7, 7],
[4, 5, 5, 6, 8, 4, 7, 4, 7, 8],
[5, 7, 5, 5, 5, 6, 4, 5, 4, 6],
[8, 9, 8, 4, 5, 9, 4, 8, 4, 4],
[5, 6, 6, 6, 7, 6, 6, 5, 6, 7],
[5, 6, 6, 6, 7, 6, 6, 5, 6, 7],
]
)
self.assertAlmostEqual(LinearAssignment(x).min_cost, 48)
def test_boolean_inputs(self):
w = np.ones((135, 135), dtype=np.bool)
np.fill_diagonal(w, False)
la = LinearAssignment(w)
# if the input doesn't get converted to a float, the masking
# doesn't work properly
self.assertEqual(la.orig_c.dtype, np.float64)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/optimization/tests/test_linear_assignment.py
|
Python
|
mit
| 9,635
|
[
"pymatgen"
] |
364c369a09984fd2d5e256d4a0ce0aeffd324fbee67959eabf9c5053b42fb460
|
"""
Unit tests for `dbgroup` module.
"""
__author__ = "Dan Gunter"
__copyright__ = "Copyright 2014, The Materials Project"
__email__ = "dkgunter@lbl.gov"
__date__ = "2014-04-29"
import json
import mongomock
import os
import tempfile
import unittest
from pymatgen.db.dbgroup import ConfigGroup
from pymatgen.db import dbconfig
_opj = os.path.join
mockdb = mongomock.MongoClient()
doc = {"hello": "world"}
mockdb.testdb.data.insert_one(doc)
# add some nested collections
mockcoll = ["data.a1", "data.a1.b1", "data.a1.b2", "data.a2"]
[mockdb.testdb[c].insert_one(doc) for c in mockcoll]
class MockQueryEngine:
def __init__(self, **kwargs):
self.kw = kwargs
def __eq__(self, other):
return other.kw == self.kw
@property
def collection(self):
return mockdb.testdb[self.kw["collection"]]
@property
def db(self):
class DB:
_c = mockcoll
def collection_names(self, x=None):
return self._c
return DB()
class Cfg:
def __init__(self, v):
self.settings = {"collection": v}
self.collection = v
def copy(self):
return Cfg(self.collection)
class ConfigGroupTestCase(unittest.TestCase):
def setUp(self):
self.g = ConfigGroup(qe_class=MockQueryEngine)
self.configs = [Cfg(f"qe{i:d}") for i in range(5)]
def test_add(self):
"""ConfigGroup add and lookup"""
keys = ["foo", "bar", "foo.a", "foo.b"]
expect = {}
for i, k in enumerate(keys):
self.g.add(k, self.configs[i])
expect[k] = MockQueryEngine(**self.configs[i].settings)
self.assertEqual(self.g["foo"], expect["foo"])
self.assertEqual(self.g["bar"], expect["bar"])
self.assertEqual(self.g["bar*"], {"bar": expect["bar"]})
self.assertEqual(self.g["foo.a"], expect["foo.a"])
self.assertEqual(self.g["foo.*"], {"foo.a": expect["foo.a"], "foo.b": expect["foo.b"]})
def test_add_path(self):
"""Add set of query engines from a path."""
# directory of pretend configs
d = tempfile.mkdtemp()
try:
# fill with some configs
c = {}
for root in ("foo", "bar"):
for sub in ("a", "b.1", "b.2"):
config = {dbconfig.DB_KEY: root, dbconfig.COLL_KEY: sub}
filename = f"mg_core_{root}_{sub}.json"
with open(_opj(d, filename), "w") as fp:
json.dump(config, fp)
c[f"{root}.{sub}"] = config
# read them
self.g.add_path(d)
# check all were added
self.assertEqual(sorted(self.g.keys()), sorted(c.keys()))
# check one
qe1 = self.g["foo.a"].kw
c1 = c["foo.a"]
self.assertTrue(dict_subset(c1, qe1))
# check with prefix
self.g.set_prefix("foo.b")
self.assertTrue(dict_subset(c["foo.b.1"], self.g["1"].kw))
self.assertRaises(KeyError, self.g.__getitem__, "bla")
# check list with prefix
gkeys = sorted(self.g["*"].keys())
self.assertEqual(gkeys, ["foo.b.1", "foo.b.2"])
# check list w/o prefix
self.g.set_prefix()
gkeys = sorted(self.g["bar.b.*"].keys())
self.assertEqual(gkeys, ["bar.b.1", "bar.b.2"])
finally:
# rm -r $d
for f in os.listdir(d):
os.unlink(os.path.join(d, f))
os.rmdir(d)
def test_uncache(self):
"""Remove cached query engine(s) from ConfigGroup."""
keys = ("foo.a", "foo", "bar")
for i in range(len(keys)):
self.g.add(keys[i], self.configs[i])
# force instantiation/caching
for i in range(len(keys)):
self.g[keys[i]]
left_behind = self.g[keys[2]]
# remove all foo from cache
self.g.uncache("foo*")
# check that they are not cached
for i in range(2):
self.assertRaises(KeyError, self.g._cached.__getitem__, keys[i])
# check that un-removed remain
self.assertEqual(self.g[keys[2]], left_behind)
def test_expand(self):
"""Add multiple collections at once with 'expand'."""
self.g.add("foo", Cfg("data"), expand=True)
# check that data.* got added as foo.*
keys = set(self.g.keys())
expect = set(["foo"] + [f.replace("data", "foo") for f in mockcoll])
self.assertEqual(expect, keys)
def dict_subset(a, b):
for k in a.keys():
if k not in b or b[k] != a[k]:
return False
return True
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen-db
|
pymatgen/db/tests/test_dbgroup.py
|
Python
|
mit
| 4,724
|
[
"pymatgen"
] |
e539709386e1697921177c3146a3a570fa316e2e516335b637b5373883326e9c
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import unittest
import time
import os
import csv
from globalvalues import RPI
if RPI:
import RPi.GPIO as GPIO
import sensor
import sender
import auxiliaries
import cust_crypt
from auxiliaries import get_data
from manager import Manager
from globalvalues import POWER_LED_PIN, NETWORK_LED_PIN, COUNTS_LED_PIN
from globalvalues import DEFAULT_CONFIG, DEFAULT_PUBLICKEY
from globalvalues import ANSI_RESET, ANSI_GR, ANSI_RED
TEST_DATALOG = 'data-log-testfile.txt'
if RPI:
test_config_path = DEFAULT_CONFIG
test_publickey_path = DEFAULT_PUBLICKEY
configs_present = True
else:
# obviously, for security, the config and public key should NOT be
# included in the (public) repo!
# these paths are for Brian's LBL desktop, but you could put them
# here for other machines too.
test_config_path = './testconfig/config.csv'
test_publickey_path = './testconfig/id_rsa_lbl.pub'
if (os.path.exists(test_config_path) and
os.path.exists(test_publickey_path)):
print('Found config files')
configs_present = True
else:
print('Config files not found!')
configs_present = False
TEST_LOGFILE = 'test.log'
class TestVerbosity(unittest.TestCase):
class Verbosity1(object):
def __init__(self, vlevel=1):
auxiliaries.set_verbosity(self, verbosity=vlevel)
def setUp(self):
self.verbose_obj = TestVerbosity.Verbosity1(vlevel=1)
print('Testing set_verbosity()')
def test_verbosity(self):
print('Two words of {}green text{} should appear here: '.format(
ANSI_GR, ANSI_RESET))
self.verbose_obj.vprint(0, '{}one{}'.format(ANSI_GR, ANSI_RESET))
self.verbose_obj.vprint(1, '{}two{}'.format(ANSI_GR, ANSI_RESET))
self.verbose_obj.vprint(2, '{}three{}'.format(ANSI_RED, ANSI_RESET))
self.verbose_obj.vprint(3, '{}four{}'.format(ANSI_RED, ANSI_RESET))
print()
self.assertTrue(True)
def tearDown(self):
del(self.verbose_obj)
print()
class TestLogging(unittest.TestCase):
class Verbosity2(object):
def __init__(self, vlevel=1, logfile=TEST_LOGFILE):
try:
os.remove(logfile)
except OSError:
pass
auxiliaries.set_verbosity(self, verbosity=vlevel, logfile=logfile)
def setUp(self):
self.verbose_obj = TestLogging.Verbosity2(vlevel=1)
print('Testing logging')
def test_logging(self):
print('Two words of {}green text{} should appear here: '.format(
ANSI_GR, ANSI_RESET))
textlines = [
'{}one{}'.format(ANSI_GR, ANSI_RESET),
'{}two{}'.format(ANSI_GR, ANSI_RESET),
'{}three{}'.format(ANSI_RED, ANSI_RESET),
'{}four{}'.format(ANSI_RED, ANSI_RESET)
]
[self.verbose_obj.vprint(i, textlines[i]) for i in range(4)]
print()
with open(TEST_LOGFILE, 'r') as f:
fline = f.readline()
self.assertTrue(fline.endswith(textlines[0] + '\n'))
fline = f.readline()
self.assertTrue(fline.endswith(textlines[1] + '\n'))
fline = f.readline()
self.assertFalse(fline)
def tearDown(self):
del(self.verbose_obj)
try:
os.remove(TEST_LOGFILE)
except OSError:
pass
print()
@unittest.skipUnless(RPI, "LED test only runs on a Raspberry Pi")
class TestLEDs(unittest.TestCase):
def setUp(self):
pins = (POWER_LED_PIN, NETWORK_LED_PIN, COUNTS_LED_PIN)
self.LEDs = [auxiliaries.LED(pin=p) for p in pins]
print('Testing LEDs',)
def test_LED(self):
print('on')
[LED.on() for LED in self.LEDs]
time.sleep(1)
print('off')
[LED.off() for LED in self.LEDs]
time.sleep(1)
print('flash')
[LED.flash() for LED in self.LEDs]
time.sleep(1)
print('start blink')
[LED.start_blink(interval=0.5) for LED in self.LEDs]
time.sleep(3)
print('stop blink')
[LED.stop_blink() for LED in self.LEDs]
time.sleep(1)
def tearDown(self):
GPIO.cleanup()
print()
@unittest.skipUnless(configs_present, "Config test requires config files")
class TestConfig(unittest.TestCase):
def test(self):
config = auxiliaries.Config(test_config_path, verbosity=2)
self.assertIsNotNone(config.ID)
self.assertIsNotNone(config.hash)
self.assertIsNotNone(config.lat)
self.assertIsNotNone(config.long)
@unittest.skipUnless(configs_present, "PublicKey test requires config files")
class TestPublicKey(unittest.TestCase):
def setUp(self):
self.publickey = auxiliaries.PublicKey(
test_publickey_path, verbosity=2)
self.assertIsInstance(
self.publickey.encrypter, cust_crypt.PublicDEncrypt)
def test_encrypt(self):
test_packet = 'This is a string with which we are testing encryption'
encrypted_packet = self.publickey.encrypter.encrypt_message(
test_packet)[0]
self.assertIsInstance(encrypted_packet, str)
class TestSensor(unittest.TestCase):
def setUp(self):
# fake sensor - only simulating counts
self.sensor = sensor.Sensor(max_accumulation_time_s=2, use_gpio=False)
def tearDown(self):
self.sensor.cleanup()
self.sensor = None
def test_basic_counts(self):
self.assertEqual(len(self.sensor.get_all_counts()), 0)
n = 3
[self.sensor.count() for _ in xrange(n)]
self.assertEqual(len(self.sensor.get_all_counts()), n)
def test_max_accum(self):
self.assertEqual(len(self.sensor.get_all_counts()), 0)
n1 = 3
[self.sensor.count() for _ in xrange(n1)]
self.assertEqual(len(self.sensor.get_all_counts()), n1)
time.sleep(1)
n2 = 4
[self.sensor.count() for _ in xrange(n2)]
self.assertEqual(len(self.sensor.get_all_counts()), n1 + n2)
time.sleep(1.5)
self.assertEqual(len(self.sensor.get_all_counts()), n2)
time.sleep(1)
self.assertEqual(len(self.sensor.get_all_counts()), 0)
class TestSender(unittest.TestCase):
def test_missing_config(self):
ss = sender.ServerSender(
manager=None,
config=None,
publickey=None,
verbosity=4)
with self.assertRaises(sender.MissingFile):
ss.send_cpm(0, 0)
@unittest.skipUnless(configs_present, "Sender tests require config files")
def test_missing_publickey(self):
ss = sender.ServerSender(
manager=None,
config=auxiliaries.Config(test_config_path),
publickey=None,
verbosity=4)
with self.assertRaises(sender.MissingFile):
ss.send_cpm(0, 0)
@unittest.skipUnless(configs_present, "Test packets require config files")
def test_send_test_udp(self):
sender.send_test_packets(
mode='udp',
config=test_config_path,
publickey=test_publickey_path,
n=1)
print(' ~ Check that the server received a test UDP packet ~')
self.assertTrue(True)
@unittest.skipUnless(configs_present, "Test packets require config files")
def test_send_test_tcp(self):
sender.send_test_packets(
mode='tcp',
config=test_config_path,
publickey=test_publickey_path,
n=1)
print(' ~ Check that the server received a test TCP packet ~')
self.assertTrue(True)
class TestDataLog(unittest.TestCase):
def setUp(self):
print('Checking local data')
def test_get_data(self):
"""
Checks the data log functionality.
Creates a test data log, simulates 2 counts,
checks that the test data log was created,
checks that there are 2 counts, and then deletes the test datalog.
"""
mgr = Manager(test=True, datalog=TEST_DATALOG)
now = time.time()
mgr.handle_cpm(now - 10, now)
[mgr.sensor.count() for _ in xrange(2)]
mgr.handle_cpm(now, now + 10)
output = get_data(TEST_DATALOG)
print(output)
GPIO.cleanup()
del(mgr)
self.assertIsNotNone(output)
self.assertEqual(len(output), 2)
def tearDown(self):
os.remove(TEST_DATALOG)
print()
class DequeObject(unittest.TestCase):
def setUp(self):
print('Testing Deque Object')
def test_no_network(self):
"""
Creates a deque data structure, runs manager with no network,
checks if deque was created, checks if cpm data was added to
the deque object, checks if the Data_Handler class was
created.
"""
mgr = Manager(protocol='new', test=True)
now = time.time()
mgr.handle_cpm(now - 10, now)
print(mgr.data_handler.queue)
[mgr.sensor.count() for _ in xrange(2)]
mgr.handle_cpm(now, now + 10)
print(mgr.data_handler.queue)
self.assertIsNotNone(mgr.data_handler.queue)
self.assertEqual(len(mgr.data_handler.queue), 2)
self.assertIsNotNone(mgr.data_handler)
GPIO.cleanup()
del(mgr)
def tearDown(self):
print()
if __name__ == '__main__':
unittest.main()
|
bearing/dosenet-raspberrypi
|
tests.py
|
Python
|
mit
| 9,484
|
[
"Brian"
] |
2cce0d532338c574ee6c6b3f39db07c3c5d37f123e8ef4db87330ea54d2393a1
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class assetCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'analyze_iam_policy': ('analysis_query', 'execution_timeout', ),
'analyze_iam_policy_longrunning': ('analysis_query', 'output_config', ),
'analyze_move': ('resource', 'destination_parent', 'view', ),
'batch_get_assets_history': ('parent', 'asset_names', 'content_type', 'read_time_window', 'relationship_types', ),
'create_feed': ('parent', 'feed_id', 'feed', ),
'delete_feed': ('name', ),
'export_assets': ('parent', 'output_config', 'read_time', 'asset_types', 'content_type', 'relationship_types', ),
'get_feed': ('name', ),
'list_assets': ('parent', 'read_time', 'asset_types', 'content_type', 'page_size', 'page_token', 'relationship_types', ),
'list_feeds': ('parent', ),
'search_all_iam_policies': ('scope', 'query', 'page_size', 'page_token', 'asset_types', 'order_by', ),
'search_all_resources': ('scope', 'query', 'asset_types', 'page_size', 'page_token', 'order_by', 'read_mask', ),
'update_feed': ('feed', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=assetCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the asset client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-asset
|
scripts/fixup_asset_v1_keywords.py
|
Python
|
apache-2.0
| 6,935
|
[
"VisIt"
] |
a30332cf9b41579fd38ff44daed4bc80d782e5f77e639f915a5df50087fa95e9
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2012 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" myhdl toVHDL conversion module.
"""
import sys
import math
import inspect
from datetime import datetime
#import compiler
#from compiler import ast as astNode
import ast
from types import GeneratorType, ClassType
from cStringIO import StringIO
import warnings
from copy import copy
import string
import myhdl
from myhdl import *
from myhdl import ToVHDLError, ToVHDLWarning
from myhdl._extractHierarchy import (_HierExtr, _isMem, _getMemInfo,
_UserVhdlCode, _userCodeMap)
from myhdl._instance import _Instantiator
from myhdl.conversion._misc import (_error,_kind,_context,
_ConversionMixin, _Label, _genUniqueSuffix, _isConstant)
from myhdl.conversion._analyze import (_analyzeSigs, _analyzeGens, _analyzeTopFunc,
_Ram, _Rom, _enumTypeSet, _constDict, _extConstDict)
from myhdl._Signal import _Signal,_WaiterList
from myhdl.conversion._toVHDLPackage import _package
_version = myhdl.__version__.replace('.','')
_shortversion = _version.replace('dev','')
_converting = 0
_profileFunc = None
_enumPortTypeSet = set()
def _checkArgs(arglist):
for arg in arglist:
if not isinstance(arg, (GeneratorType, _Instantiator, _UserVhdlCode)):
raise ToVHDLError(_error.ArgType, arg)
def _flatten(*args):
arglist = []
for arg in args:
if id(arg) in _userCodeMap['vhdl']:
arglist.append(_userCodeMap['vhdl'][id(arg)])
elif isinstance(arg, (list, tuple, set)):
for item in arg:
arglist.extend(_flatten(item))
else:
arglist.append(arg)
return arglist
def _makeDoc(doc, indent=''):
if doc is None:
return ''
doc = inspect.cleandoc(doc)
pre = '\n' + indent + '-- '
doc = '-- ' + doc
doc = doc.replace('\n', pre)
return doc
class _ToVHDLConvertor(object):
__slots__ = ("name",
"component_declarations",
"header",
"no_myhdl_header",
"no_myhdl_package",
"library",
"use_clauses",
"architecture",
"numeric_ports",
)
def __init__(self):
self.name = None
self.component_declarations = None
self.header = ''
self.no_myhdl_header = False
self.no_myhdl_package = False
self.library = "work"
self.architecture = "MyHDL"
self.numeric_ports = True
self.use_clauses = None
def __call__(self, func, *args, **kwargs):
global _converting
if _converting:
return func(*args, **kwargs) # skip
else:
# clean start
sys.setprofile(None)
from myhdl import _traceSignals
if _traceSignals._tracing:
raise ToVHDLError("Cannot use toVHDL while tracing signals")
if not callable(func):
raise ToVHDLError(_error.FirstArgType, "got %s" % type(func))
_converting = 1
if self.name is None:
name = func.func_name
else:
name = str(self.name)
try:
h = _HierExtr(name, func, *args, **kwargs)
finally:
_converting = 0
compDecls = self.component_declarations
useClauses = self.use_clauses
vpath = name + ".vhd"
vfile = open(vpath, 'w')
ppath = "pck_myhdl_%s.vhd" % _shortversion
pfile = None
# # write MyHDL package always during development, as it may change
# pfile = None
# if not os.path.isfile(ppath):
# pfile = open(ppath, 'w')
if not self.no_myhdl_package:
pfile = open(ppath, 'w')
### initialize properly ###
_genUniqueSuffix.reset()
_enumTypeSet.clear()
_constDict.clear()
_extConstDict.clear()
siglist, memlist = _analyzeSigs(h.hierarchy, hdl='VHDL')
arglist = _flatten(h.top)
# print h.top
_checkArgs(arglist)
genlist = _analyzeGens(arglist, h.absnames)
_annotateTypes(genlist)
### infer interface
intf = _analyzeTopFunc(func, *args, **kwargs)
intf.name = name
# sanity checks on interface
for portname in intf.argnames:
s = intf.argdict[portname]
if s._name is None:
raise ToVHDLError(_error.ShadowingSignal, portname)
if s._inList:
raise ToVHDLError(_error.PortInList, portname)
# add enum types to port-related set
if isinstance(s._val, EnumItemType):
obj = s._val._type
assert obj in _enumTypeSet
_enumTypeSet.remove(obj)
_enumPortTypeSet.add(obj)
doc = _makeDoc(inspect.getdoc(func))
needPck = len(_enumPortTypeSet) > 0
lib = self.library
arch = self.architecture
numeric = self.numeric_ports
self._convert_filter(h, intf, siglist, memlist, genlist)
if pfile:
_writeFileHeader(pfile, ppath)
print >> pfile, _package
pfile.close()
_writeFileHeader(vfile, vpath)
if needPck:
_writeCustomPackage(vfile, intf)
_writeModuleHeader(vfile, intf, needPck, lib, arch, useClauses, doc, numeric)
_writeFuncDecls(vfile)
_writeConstants(vfile)
_writeTypeDefs(vfile)
_writeSigDecls(vfile, intf, siglist, memlist)
_writeCompDecls(vfile, compDecls)
_convertGens(genlist, siglist, memlist, vfile)
_writeModuleFooter(vfile, arch)
vfile.close()
# tbfile.close()
### clean-up properly ###
self._cleanup(siglist)
return h.top
def _cleanup(self, siglist):
# clean up signal names
for sig in siglist:
sig._clear()
# sig._name = None
# sig._driven = False
# sig._read = False
# clean up attributes
self.name = None
self.component_declarations = None
self.header = ''
self.no_myhdl_header = False
self.no_myhdl_package = False
self.architecture = "MyHDL"
self.numeric_ports = True
def _convert_filter(self, h, intf, siglist, memlist, genlist):
# intended to be a entry point for other uses:
# code checking, optimizations, etc
pass
toVHDL = _ToVHDLConvertor()
myhdl_header = """\
-- File: $filename
-- Generated by MyHDL $version
-- Date: $date
"""
def _writeFileHeader(f, fn):
vars = dict(filename=fn,
version=myhdl.__version__,
date=datetime.today().ctime()
)
if toVHDL.header:
print >> f, string.Template(toVHDL.header).substitute(vars)
if not toVHDL.no_myhdl_header:
print >> f, string.Template(myhdl_header).substitute(vars)
print >> f
def _writeCustomPackage(f, intf):
print >> f
print >> f, "package pck_%s is" % intf.name
print >> f
sortedList = list(_enumPortTypeSet)
sortedList.sort(cmp=lambda a, b: cmp(a._name, b._name))
for t in sortedList:
print >> f, " %s" % t._toVHDL()
print >> f
print >> f, "end package pck_%s;" % intf.name
print >> f
def _writeModuleHeader(f, intf, needPck, lib, arch, useClauses, doc, numeric):
print >> f, "library IEEE;"
print >> f, "use IEEE.std_logic_1164.all;"
print >> f, "use IEEE.numeric_std.all;"
print >> f, "use std.textio.all;"
print >> f
if lib != "work":
print >> f, "library %s;" % lib
if useClauses is not None:
f.write(useClauses)
f.write("\n")
else:
print >> f, "use %s.pck_myhdl_%s.all;" % (lib, _shortversion)
print >> f
if needPck:
print >> f, "use %s.pck_%s.all;" % (lib, intf.name)
print >> f
print >> f, "entity %s is" % intf.name
if intf.argnames:
f.write(" port (")
c = ''
for portname in intf.argnames:
s = intf.argdict[portname]
f.write("%s" % c)
c = ';'
# make sure signal name is equal to its port name
s._name = portname
# make it non-numeric optionally
if s._type is intbv:
s._numeric = numeric
r = _getRangeString(s)
p = _getTypeString(s)
if s._driven:
if s._read:
warnings.warn("%s: %s" % (_error.OutputPortRead, portname),
category=ToVHDLWarning
)
f.write("\n %s: inout %s%s" % (portname, p, r))
else:
f.write("\n %s: out %s%s" % (portname, p, r))
else:
if not s._read:
warnings.warn("%s: %s" % (_error.UnusedPort, portname),
category=ToVHDLWarning
)
f.write("\n %s: in %s%s" % (portname, p, r))
f.write("\n );\n")
print >> f, "end entity %s;" % intf.name
print >> f, doc
print >> f
print >> f, "architecture %s of %s is" % (arch, intf.name)
print >> f
def _writeFuncDecls(f):
return
# print >> f, package
def _writeConstants(f):
f.write("\n")
# guess nice representation
for c in _constDict:
if c in _extConstDict:
continue
v = _constDict[c]
s = str(int(v))
sign = ''
if v < 0:
sign = '-'
for i in range(4, 31):
if abs(v) == 2**i:
s = "%s2**%s" % (sign, i)
break
if abs(v) == 2**i-1:
s = "%s2**%s-1" % (sign, i)
break
v = _constDict[c]
f.write("constant %s: integer := %s;\n" % (c, s))
f.write("\n")
def _writeTypeDefs(f):
f.write("\n")
sortedList = list(_enumTypeSet)
sortedList.sort(cmp=lambda a, b: cmp(a._name, b._name))
for t in sortedList:
f.write("%s\n" % t._toVHDL())
f.write("\n")
constwires = []
def _writeSigDecls(f, intf, siglist, memlist):
del constwires[:]
for s in siglist:
if not s._used:
continue
if s._name in intf.argnames:
continue
r = _getRangeString(s)
p = _getTypeString(s)
if s._driven:
if not s._read:
warnings.warn("%s: %s" % (_error.UnreadSignal, s._name),
category=ToVHDLWarning
)
# the following line implements initial value assignments
# print >> f, "%s %s%s = %s;" % (s._driven, r, s._name, int(s._val))
print >> f, "signal %s: %s%s;" % (s._name, p, r)
elif s._read:
# the original exception
# raise ToVHDLError(_error.UndrivenSignal, s._name)
# changed to a warning and a continuous assignment to a wire
warnings.warn("%s: %s" % (_error.UndrivenSignal, s._name),
category=ToVHDLWarning
)
constwires.append(s)
print >> f, "signal %s: %s%s;" % (s._name, p, r)
for m in memlist:
if not m._used:
continue
# infer attributes for the case of named signals in a list
for i, s in enumerate(m.mem):
if not m._driven and s._driven:
m._driven = s._driven
if not m._read and s._read:
m._read = s._read
if not m._driven and not m._read:
continue
r = _getRangeString(m.elObj)
p = _getTypeString(m.elObj)
t = "t_array_%s" % m.name
print >> f, "type %s is array(0 to %s-1) of %s%s;" % (t, m.depth, p, r)
print >> f, "signal %s: %s;" % (m.name, t)
print >> f
def _writeCompDecls(f, compDecls):
if compDecls is not None:
print >> f, compDecls
def _writeModuleFooter(f, arch):
print >> f, "end architecture %s;" % arch
def _getRangeString(s):
if isinstance(s._val, EnumItemType):
return ''
elif s._type is bool:
return ''
elif s._nrbits is not None:
ls = getattr(s, 'lenStr', False)
if ls:
msb = ls + '-1'
else:
msb = s._nrbits-1
return "(%s downto 0)" % msb
else:
raise AssertionError
def _getTypeString(s):
if isinstance(s._val, EnumItemType):
return s._val._type._name
elif s._type is bool:
return "std_logic"
if not s._numeric:
return "std_logic_vector"
if s._min is not None and s._min < 0:
return "signed "
else:
return 'unsigned'
def _convertGens(genlist, siglist, memlist, vfile):
blockBuf = StringIO()
funcBuf = StringIO()
for tree in genlist:
if isinstance(tree, _UserVhdlCode):
blockBuf.write(str(tree))
continue
if tree.kind == _kind.ALWAYS:
Visitor = _ConvertAlwaysVisitor
elif tree.kind == _kind.INITIAL:
Visitor = _ConvertInitialVisitor
elif tree.kind == _kind.SIMPLE_ALWAYS_COMB:
Visitor = _ConvertSimpleAlwaysCombVisitor
elif tree.kind == _kind.ALWAYS_DECO:
Visitor = _ConvertAlwaysDecoVisitor
elif tree.kind == _kind.ALWAYS_SEQ:
Visitor = _ConvertAlwaysSeqVisitor
else: # ALWAYS_COMB
Visitor = _ConvertAlwaysCombVisitor
v = Visitor(tree, blockBuf, funcBuf)
v.visit(tree)
vfile.write(funcBuf.getvalue()); funcBuf.close()
print >> vfile, "begin"
print >> vfile
for s in constwires:
if s._type is bool:
c = int(s._val)
pre, suf = "'", "'"
elif s._type is intbv:
c = int(s._val)
w = len(s)
assert w != 0
if s._min < 0:
pre, suf = "to_signed(", ", %s)" % w
else:
pre, suf = "to_unsigned(", ", %s)" % w
else:
raise ToVHDLError("Unexpected type for constant signal", s._name)
print >> vfile, "%s <= %s%s%s;" % (s._name, pre, c, suf)
print >> vfile
# shadow signal assignments
for s in siglist:
if hasattr(s, 'toVHDL') and s._read:
print >> vfile, s.toVHDL()
# hack for slice signals in a list
for m in memlist:
if m._read:
for s in m.mem:
if hasattr(s, 'toVHDL'):
print >> vfile, s.toVHDL()
print >> vfile
vfile.write(blockBuf.getvalue()); blockBuf.close()
opmap = {
ast.Add : '+',
ast.Sub : '-',
ast.Mult : '*',
ast.Div : '/',
ast.Mod : 'mod',
ast.Pow : '**',
ast.LShift : 'shift_left',
ast.RShift : 'shift_right',
ast.BitOr : 'or',
ast.BitAnd : 'and',
ast.BitXor : 'xor',
ast.FloorDiv : '/',
ast.Invert : 'not ',
ast.Not : 'not ',
ast.UAdd : '+',
ast.USub : '-',
ast.Eq : '=',
ast.Gt : '>',
ast.GtE : '>=',
ast.Lt : '<',
ast.LtE : '<=',
ast.NotEq : '/=',
ast.And : 'and',
ast.Or : 'or',
}
class _ConvertVisitor(ast.NodeVisitor, _ConversionMixin):
def __init__(self, tree, buf):
self.tree = tree
self.buf = buf
self.returnLabel = tree.name
self.ind = ''
self.SigAss = False
self.isLhs = False
self.labelStack = []
self.context = None
def write(self, arg):
self.buf.write("%s" % arg)
def writeline(self, nr=1):
for i in range(nr):
self.buf.write("\n%s" % self.ind)
def writeDoc(self, node):
assert hasattr(node, 'doc')
doc = _makeDoc(node.doc, self.ind)
self.write(doc)
self.writeline()
def IntRepr(self, obj):
if obj >= 0:
s = "%s" % int(obj)
else:
s = "(- %s)" % abs(int(obj))
return s
def BitRepr(self, item, var):
return '"%s"' % bin(item, len(var))
def inferCast(self, vhd, ori):
pre, suf = "", ""
if isinstance(vhd, vhd_int):
if not isinstance(ori, vhd_int):
pre, suf = "to_integer(", ")"
elif isinstance(vhd, vhd_unsigned):
if isinstance(ori, vhd_unsigned):
if vhd.size != ori.size:
pre, suf = "resize(", ", %s)" % vhd.size
elif isinstance(ori, vhd_signed):
if vhd.size != ori.size:
# note the order of resizing and casting here (otherwise bug!)
pre, suf = "resize(unsigned(", "), %s)" % vhd.size
else:
pre, suf = "unsigned(", ")"
else:
pre, suf = "to_unsigned(", ", %s)" % vhd.size
elif isinstance(vhd, vhd_signed):
if isinstance(ori, vhd_signed):
if vhd.size != ori.size:
pre, suf = "resize(", ", %s)" % vhd.size
elif isinstance(ori, vhd_unsigned):
if vhd.size != ori.size:
# I think this should be the order of resizing and casting here
pre, suf = "signed(resize(", ", %s))" % vhd.size
else:
pre, suf = "signed(", ")"
else:
pre, suf = "to_signed(", ", %s)" % vhd.size
elif isinstance(vhd, vhd_boolean):
if not isinstance(ori, vhd_boolean):
pre, suf = "bool(", ")"
elif isinstance(vhd, vhd_std_logic):
if not isinstance(ori, vhd_std_logic):
pre, suf = "stdl(", ")"
elif isinstance(vhd, vhd_string):
if isinstance(ori, vhd_enum):
pre, suf = "%s'image(" % ori._type._name, ")"
return pre, suf
def writeIntSize(self, n):
# write size for large integers (beyond 32 bits signed)
# with some safety margin
if n >= 2**30:
size = int(math.ceil(math.log(n+1,2))) + 1 # sign bit!
self.write("%s'sd" % size)
def writeDeclaration(self, obj, name, kind="", dir="", endchar=";", constr=True):
if isinstance(obj, EnumItemType):
tipe = obj._type._name
elif isinstance(obj, _Ram):
tipe = "t_array_%s" % name
elt = inferVhdlObj(obj.elObj).toStr(True)
self.write("type %s is array(0 to %s-1) of %s;" % (tipe, obj.depth, elt))
self.writeline()
else:
vhd = inferVhdlObj(obj)
if isinstance(vhd, vhd_enum):
tipe = obj._val._type._name
else:
tipe = vhd.toStr(constr)
if kind: kind += " "
if dir: dir += " "
self.write("%s%s: %s%s%s" % (kind, name, dir, tipe, endchar))
def writeDeclarations(self):
if self.tree.hasPrint:
self.writeline()
self.write("variable L: line;")
for name, obj in self.tree.vardict.items():
if isinstance(obj, _loopInt):
continue # hack for loop vars
self.writeline()
self.writeDeclaration(obj, name, kind="variable")
def indent(self):
self.ind += ' ' * 4
def dedent(self):
self.ind = self.ind[:-4]
def visit_BinOp(self, node):
if isinstance(node.op, (ast.LShift, ast.RShift)):
self.shiftOp(node)
elif isinstance(node.op, (ast.BitAnd, ast.BitOr, ast.BitXor)):
self.BitOp(node)
elif isinstance(node.op, ast.Mod) and (self.context == _context.PRINT):
self.visit(node.left)
self.write(", ")
self.visit(node.right)
else:
self.BinOp(node)
def inferBinaryOpCast(self, node, left, right, op):
ns, os = node.vhd.size, node.vhdOri.size
ds = ns - os
if ds > 0:
if isinstance(left.vhd, vhd_vector) and isinstance(right.vhd, vhd_vector):
if isinstance(op, (ast.Add, ast.Sub)):
left.vhd.size = ns
# in general, resize right also
# for a simple name, resizing is not necessary
if not isinstance(right, ast.Name):
right.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.Mod):
right.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.FloorDiv):
left.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.Mult):
left.vhd.size += ds
node.vhdOri.size = ns
else:
raise AssertionError("unexpected op %s" % op)
elif isinstance(left.vhd, vhd_vector) and isinstance(right.vhd, vhd_int):
if isinstance(op, (ast.Add, ast.Sub, ast.Mod, ast.FloorDiv)):
left.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.Mult):
left.vhd.size += ds
node.vhdOri.size = 2 * left.vhd.size
else:
raise AssertionError("unexpected op %s" % op)
elif isinstance(left.vhd, vhd_int) and isinstance(right.vhd, vhd_vector):
if isinstance(op, (ast.Add, ast.Sub, ast.Mod, ast.FloorDiv)):
right.vhd.size = ns
node.vhdOri.size = ns
elif isinstance(op, ast.Mult):
node.vhdOri.size = 2 * right.vhd.size
else:
raise AssertionError("unexpected op %s" % op)
pre, suf = self.inferCast(node.vhd, node.vhdOri)
if pre == "":
pre, suf = "(", ")"
return pre, suf
def BinOp(self, node):
pre, suf = self.inferBinaryOpCast(node, node.left, node.right, node.op)
self.write(pre)
self.visit(node.left)
self.write(" %s " % opmap[type(node.op)])
self.visit(node.right)
self.write(suf)
def inferShiftOpCast(self, node, left, right, op):
ns, os = node.vhd.size, node.vhdOri.size
ds = ns - os
if ds > 0:
if isinstance(node.left.vhd, vhd_vector):
left.vhd.size = ns
node.vhdOri.size = ns
pre, suf = self.inferCast(node.vhd, node.vhdOri)
return pre, suf
def shiftOp(self, node):
pre, suf = self.inferShiftOpCast(node, node.left, node.right, node.op)
self.write(pre)
self.write("%s(" % opmap[type(node.op)])
self.visit(node.left)
self.write(", ")
self.visit(node.right)
self.write(")")
self.write(suf)
def BitOp(self, node):
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.write("(")
self.visit(node.left)
self.write(" %s " % opmap[type(node.op)])
self.visit(node.right)
self.write(")")
self.write(suf)
def visit_BoolOp(self, node):
if isinstance(node.vhd, vhd_std_logic):
self.write("stdl")
self.write("(")
self.visit(node.values[0])
for n in node.values[1:]:
self.write(" %s " % opmap[type(node.op)])
self.visit(n)
self.write(")")
def visit_UnaryOp(self, node):
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.write("(")
self.write(opmap[type(node.op)])
self.visit(node.operand)
self.write(")")
self.write(suf)
def visit_Attribute(self, node):
if isinstance(node.ctx, ast.Store):
self.setAttr(node)
else:
self.getAttr(node)
def setAttr(self, node):
assert node.attr == 'next'
self.SigAss = True
if isinstance(node.value, ast.Name):
sig = self.tree.symdict[node.value.id]
self.SigAss = sig._name
self.visit(node.value)
node.obj = self.getObj(node.value)
def getAttr(self, node):
assert isinstance(node.value, ast.Name), node.value
n = node.value.id
if n in self.tree.symdict:
obj = self.tree.symdict[n]
elif n in self.tree.vardict:
obj = self.tree.vardict[n]
else:
raise AssertionError("object not found")
if isinstance(obj, _Signal):
if node.attr == 'next':
sig = self.tree.symdict[node.value.id]
self.SigAss = obj._name
self.visit(node.value)
elif node.attr == 'posedge':
self.write("rising_edge(")
self.visit(node.value)
self.write(")")
elif node.attr == 'negedge':
self.write("falling_edge(")
self.visit(node.value)
self.write(")")
elif node.attr == 'val':
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.visit(node.value)
self.write(suf)
if isinstance(obj, (_Signal, intbv)):
if node.attr in ('min', 'max'):
self.write("%s" % node.obj)
if isinstance(obj, EnumType):
assert hasattr(obj, node.attr)
e = getattr(obj, node.attr)
self.write(e._toVHDL())
def visit_Assert(self, node):
# XXX
self.write("assert ")
self.visit(node.test)
self.indent()
self.writeline()
self.write('report "*** AssertionError ***"')
self.writeline()
self.write("severity error;")
self.dedent()
def visit_Assign(self, node):
lhs = node.targets[0]
rhs = node.value
# shortcut for expansion of ROM in case statement
if isinstance(node.value, ast.Subscript) and \
isinstance(node.value.slice, ast.Index) and \
isinstance(node.value.value.obj, _Rom):
rom = node.value.value.obj.rom
self.write("case ")
self.visit(node.value.slice)
self.write(" is")
self.indent()
size = lhs.vhd.size
for i, n in enumerate(rom):
self.writeline()
if i == len(rom)-1:
self.write("when others => ")
else:
self.write("when %s => " % i)
self.visit(lhs)
if self.SigAss:
self.write(' <= ')
self.SigAss = False
else:
self.write(' := ')
if isinstance(lhs.vhd, vhd_std_logic):
self.write("'%s';" % n)
elif isinstance(lhs.vhd, vhd_int):
self.write("%s;" % n)
else:
self.write('"%s";' % bin(n, size))
self.dedent()
self.writeline()
self.write("end case;")
return
elif isinstance(node.value, ast.ListComp):
# skip list comprehension assigns for now
return
# default behavior
convOpen, convClose = "", ""
if isinstance(lhs.vhd, vhd_type):
rhs.vhd = lhs.vhd
self.isLhs = True
self.visit(lhs)
self.isLhs = False
if self.SigAss:
if isinstance(lhs.value, ast.Name):
sig = self.tree.symdict[lhs.value.id]
if not sig._numeric:
#if not isinstance(rhs, ast.Num):
convOpen, convClose = "std_logic_vector(", ")"
self.write(' <= ')
self.SigAss = False
else:
self.write(' := ')
self.write(convOpen)
# node.expr.target = obj = self.getObj(node.nodes[0])
self.visit(rhs)
self.write(convClose)
self.write(';')
def visit_AugAssign(self, node):
# XXX apparently no signed context required for augmented assigns
left, op, right = node.target, node.op, node.value
isFunc = False
pre, suf = "", ""
if isinstance(op, (ast.Add, ast.Sub, ast.Mult, ast.Mod, ast.FloorDiv)):
pre, suf = self.inferBinaryOpCast(node, left, right, op)
elif isinstance(op, (ast.LShift, ast.RShift)):
isFunc = True
pre, suf = self.inferShiftOpCast(node, left, right, op)
self.visit(left)
self.write(" := ")
self.write(pre)
if isFunc:
self.write("%s(" % opmap[type(op)])
self.visit(left)
if isFunc:
self.write(", ")
else:
self.write(" %s " % opmap[type(op)])
self.visit(right)
if isFunc:
self.write(")")
self.write(suf)
self.write(";")
def visit_Break(self, node):
self.write("exit;")
def visit_Call(self, node):
fn = node.func
# assert isinstance(fn, astNode.Name)
f = self.getObj(fn)
opening, closing = '(', ')'
sep = ", "
if f is bool:
opening, closing = '', ''
arg = node.args[0]
arg.vhd = node.vhd
elif f is len:
val = self.getVal(node)
self.require(node, val is not None, "cannot calculate len")
self.write(`val`)
return
elif f is now:
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.write("(now / 1 ns)")
self.write(suf)
return
elif f is ord:
opening, closing = '', ''
if isinstance(node.args[0], ast.Str):
if len(node.args[0].s) > 1:
self.raiseError(node, _error.UnsupportedType, "Strings with length > 1" )
else:
node.args[0].s = ord(node.args[0].s)
elif f in (int, long):
opening, closing = '', ''
# convert number argument to integer
if isinstance(node.args[0], ast.Num):
node.args[0].n = int(node.args[0].n)
elif inspect.isclass(f) and issubclass(f, intbv):
pre, post = "", ""
arg = node.args[0]
if isinstance(node.vhd, vhd_unsigned):
pre, post = "to_unsigned(", ", %s)" % node.vhd.size
elif isinstance(node.vhd, vhd_signed):
pre, post = "to_signed(", ", %s)" % node.vhd.size
self.write(pre)
self.visit(arg)
self.write(post)
return
elif f == intbv.signed: # note equality comparison
# this call comes from a getattr
arg = fn.value
pre, suf = self.inferCast(node.vhd, node.vhdOri)
opening, closing = '', ''
if isinstance(arg.vhd, vhd_unsigned):
opening, closing = "signed(", ")"
self.write(pre)
self.write(opening)
self.visit(arg)
self.write(closing)
self.write(suf)
return
elif type(f) is ClassType and issubclass(f, Exception):
self.write(f.__name__)
elif f in (posedge, negedge):
opening, closing = ' ', ''
self.write(f.__name__)
elif f is delay:
self.visit(node.args[0])
self.write(" ns")
return
elif f is concat:
opening, closing = "unsigned'(", ")"
sep = " & "
elif hasattr(node, 'tree'):
self.write(node.tree.name)
else:
self.write(f.__name__)
if node.args:
self.write(opening)
self.visit(node.args[0])
for arg in node.args[1:]:
self.write(sep)
self.visit(arg)
self.write(closing)
if hasattr(node, 'tree'):
if node.tree.kind == _kind.TASK:
Visitor = _ConvertTaskVisitor
else:
Visitor = _ConvertFunctionVisitor
v = Visitor(node.tree, self.funcBuf)
v.visit(node.tree)
def visit_Compare(self, node):
n = node.vhd
ns = node.vhd.size
pre, suf = "(", ")"
if isinstance(n, vhd_std_logic):
pre = "stdl("
elif isinstance(n, vhd_unsigned):
pre, suf = "to_unsigned(", ", %s)" % ns
elif isinstance(n, vhd_signed):
pre, suf = "to_signed(", ", %s)" % ns
self.write(pre)
self.visit(node.left)
op, right = node.ops[0], node.comparators[0]
self.write(" %s " % opmap[type(op)])
self.visit(right)
self.write(suf)
def visit_Num(self, node):
n = node.n
if isinstance(node.vhd, vhd_std_logic):
self.write("'%s'" % n)
elif isinstance(node.vhd, vhd_boolean):
self.write("%s" % bool(n))
#elif isinstance(node.vhd, (vhd_unsigned, vhd_signed)):
# self.write('"%s"' % bin(n, node.vhd.size))
elif isinstance(node.vhd, vhd_unsigned):
if abs(n) < 2**31:
self.write("to_unsigned(%s, %s)" % (n, node.vhd.size))
else:
self.write('unsigned\'("%s")' % bin(n, node.vhd.size))
elif isinstance(node.vhd, vhd_signed):
if abs(n) < 2**31:
self.write("to_signed(%s, %s)" % (n, node.vhd.size))
else:
self.write('signed\'("%s")' % bin(n, node.vhd.size))
else:
if n < 0:
self.write("(")
self.write(n)
if n < 0:
self.write(")")
def visit_Str(self, node):
typemark = 'string'
if isinstance(node.vhd, vhd_unsigned):
typemark = 'unsigned'
self.write("%s'(\"%s\")" % (typemark, node.s))
def visit_Continue(self, node, *args):
self.write("next;")
def visit_Expr(self, node):
expr = node.value
# docstrings on unofficial places
if isinstance(expr, ast.Str):
doc = _makeDoc(expr.s, self.ind)
self.write(doc)
return
# skip extra semicolons
if isinstance(expr, ast.Num):
return
self.visit(expr)
# ugly hack to detect an orphan "task" call
if isinstance(expr, ast.Call) and hasattr(expr, 'tree'):
self.write(';')
def visit_IfExp(self, node):
pre, suf = self.inferCast(node.vhd, node.body.vhdOri)
self.write(pre)
self.visit(node.body)
self.write(suf)
self.write(' when ')
self.visit(node.test)
self.write(' else ')
pre, suf = self.inferCast(node.vhd, node.orelse.vhdOri)
self.write(pre)
self.visit(node.orelse)
self.write(suf)
def visit_For(self, node):
self.labelStack.append(node.breakLabel)
self.labelStack.append(node.loopLabel)
var = node.target.id
cf = node.iter
f = self.getObj(cf.func)
args = cf.args
assert len(args) <= 3
self.require(node, len(args) < 3, "explicit step not supported")
if f is range:
cmp = '<'
op = 'to'
oneoff = ''
if len(args) == 1:
start, stop, step = None, args[0], None
elif len(args) == 2:
start, stop, step = args[0], args[1], None
else:
start, stop, step = args
else: # downrange
cmp = '>='
op = 'downto'
if len(args) == 1:
start, stop, step = args[0], None, None
elif len(args) == 2:
start, stop, step = args[0], args[1], None
else:
start, stop, step = args
assert step is None
## if node.breakLabel.isActive:
## self.write("begin: %s" % node.breakLabel)
## self.writeline()
## if node.loopLabel.isActive:
## self.write("%s: " % node.loopLabel)
self.write("for %s in " % var)
if start is None:
self.write("0")
else:
self.visit(start)
if f is downrange:
self.write("-1")
self.write(" %s " % op)
if stop is None:
self.write("0")
else:
self.visit(stop)
if f is range:
self.write("-1")
self.write(" loop")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end loop;")
## if node.breakLabel.isActive:
## self.writeline()
## self.write("end")
self.labelStack.pop()
self.labelStack.pop()
def visit_FunctionDef(self, node):
raise AssertionError("To be implemented in subclass")
def visit_If(self, node):
if node.ignore:
return
# only map to VHDL case if it's a full case
if node.isFullCase:
self.mapToCase(node)
else:
self.mapToIf(node)
def mapToCase(self, node):
var = node.caseVar
obj = self.getObj(var)
self.write("case ")
self.visit(var)
self.write(" is")
self.indent()
for i, (test, suite) in enumerate(node.tests):
self.writeline()
item = test.case[1]
if isinstance(item, EnumItemType):
itemRepr = item._toVHDL()
else:
itemRepr = self.BitRepr(item, obj)
comment = ""
# potentially use default clause for last test
if (i == len(node.tests)-1) and not node.else_:
self.write("when others")
comment = " -- %s" % itemRepr
else:
self.write("when ")
self.write(itemRepr)
self.write(" =>%s" % comment)
self.indent()
self.visit_stmt(suite)
self.dedent()
if node.else_:
self.writeline()
self.write("when others =>")
self.indent()
self.visit_stmt(node.else_)
self.dedent()
self.dedent()
self.writeline()
self.write("end case;")
def mapToIf(self, node):
first = True
for test, suite in node.tests:
if first:
ifstring = "if "
first = False
else:
ifstring = "elsif "
self.writeline()
self.write(ifstring)
self.visit(test)
self.write(" then")
self.indent()
self.visit_stmt(suite)
self.dedent()
if node.else_:
self.writeline()
edges = self.getEdge(node)
if edges is not None:
edgeTests = [e._toVHDL() for e in edges]
self.write("elsif ")
self.write("or ".join(edgeTests))
self.write(" then")
else:
self.write("else")
self.indent()
self.visit_stmt(node.else_)
self.dedent()
self.writeline()
self.write("end if;")
def visit_ListComp(self, node):
pass # do nothing
def visit_Module(self, node):
for stmt in node.body:
self.visit(stmt)
def visit_Name(self, node):
if isinstance(node.ctx, ast.Store):
self.setName(node)
else:
self.getName(node)
def setName(self, node):
self.write(node.id)
def getName(self, node):
n = node.id
if n == 'False':
if isinstance(node.vhd, vhd_std_logic):
s = "'0'"
else:
s = "False"
elif n == 'True':
if isinstance(node.vhd, vhd_std_logic):
s = "'1'"
else:
s = "True"
elif n == 'None':
s = "(others => 'Z')"
elif n in self.tree.vardict:
s = n
obj = self.tree.vardict[n]
ori = inferVhdlObj(obj)
pre, suf = self.inferCast(node.vhd, ori)
s = "%s%s%s" % (pre, s, suf)
elif n in self.tree.argnames:
assert n in self.tree.symdict
obj = self.tree.symdict[n]
vhd = inferVhdlObj(obj)
if isinstance(vhd, vhd_std_logic) and isinstance(node.vhd, vhd_boolean):
s = "(%s = '1')" % n
else:
s = n
elif n in self.tree.symdict:
obj = self.tree.symdict[n]
s = n
if isinstance(obj, bool):
s = "'%s'" % int(obj)
elif isinstance(obj, (int, long)):
# print the symbol for integer in the global constant dict
if n in _constDict and obj == _constDict[n]:
assert abs(obj) < 2**31
if isinstance(node.vhd, vhd_int):
s = n
elif isinstance(node.vhd, vhd_std_logic):
s = "stdl(%s)" % n
elif isinstance(node.vhd, vhd_unsigned):
s = "to_unsigned(%s, %s)" % (n, node.vhd.size)
elif isinstance(node.vhd, vhd_signed):
s = "to_signed(%s, %s)" % (n, node.vhd.size)
else:
if isinstance(node.vhd, vhd_int):
s = self.IntRepr(obj)
elif isinstance(node.vhd, vhd_std_logic):
s = "'%s'" % int(obj)
elif isinstance(node.vhd, vhd_unsigned):
if abs(obj) < 2** 31:
s = "to_unsigned(%s, %s)" % (n, node.vhd.size)
else:
s = 'unsigned\'("%s")' % bin(obj, node.vhd.size)
elif isinstance(node.vhd, vhd_signed):
if abs(obj) < 2** 31:
s = "to_signed(%s, %s)" % (n, node.vhd.size)
else:
s = 'signed\'("%s")' % bin(obj, node.vhd.size)
elif isinstance(obj, _Signal):
s = str(obj)
ori = inferVhdlObj(obj)
# print 'name', n
# support for non-numeric signals
if self.SigAss is not obj._name and not obj._numeric:
if obj._min < 0:
s = "signed(%s)" %s
else:
s = "unsigned(%s)" %s
pre, suf = self.inferCast(node.vhd, ori)
s = "%s%s%s" % (pre, s, suf)
elif _isMem(obj):
m = _getMemInfo(obj)
assert m.name
s = m.name
elif isinstance(obj, EnumItemType):
s = obj._toVHDL()
elif type(obj) is ClassType and issubclass(obj, Exception):
s = n
else:
self.raiseError(node, _error.UnsupportedType, "%s, %s" % (n, type(obj)))
else:
raise AssertionError("name ref: %s" % n)
self.write(s)
def visit_Pass(self, node):
self.write("null;")
def visit_Print(self, node):
argnr = 0
for s in node.format:
if isinstance(s, str):
self.write('write(L, string\'("%s"));' % s)
else:
a = node.args[argnr]
argnr += 1
if s.conv is int:
a.vhd = vhd_int()
else:
if isinstance(a.vhdOri, vhd_vector):
a.vhd = vhd_int()
elif isinstance(a.vhdOri, vhd_std_logic):
a.vhd = vhd_boolean()
elif isinstance(a.vhdOri, vhd_enum):
a.vhd = vhd_string()
self.write("write(L, ")
self.context = _context.PRINT
self.visit(a)
self.context = None
if s.justified == 'LEFT':
self.write(", justified=>LEFT")
if s.width:
self.write(", field=>%s" % s.width)
self.write(")")
self.write(';')
self.writeline()
self.write("writeline(output, L);")
def visit_Raise(self, node):
self.write('assert False report "End of Simulation" severity Failure;')
def visit_Return(self, node):
pass
def visit_Subscript(self, node):
if isinstance(node.slice, ast.Slice):
self.accessSlice(node)
else:
self.accessIndex(node)
def accessSlice(self, node):
if isinstance(node.value, ast.Call) and \
node.value.func.obj in (intbv, modbv) and \
_isConstant(node.value.args[0], self.tree.symdict):
c = self.getVal(node)._val
pre, post = "", ""
if node.vhd.size <= 30:
if isinstance(node.vhd, vhd_unsigned):
pre, post = "to_unsigned(", ", %s)" % node.vhd.size
elif isinstance(node.vhd, vhd_signed):
pre, post = "to_signed(", ", %s)" % node.vhd.size
else:
if isinstance(node.vhd, vhd_unsigned):
pre, post = "unsigned'(", ")"
c = '"%s"' % bin(c, node.vhd.size)
elif isinstance(node.vhd, vhd_signed):
pre, post = "signed'(", ")"
c = '"%s"' % bin(c, node.vhd.size)
self.write(pre)
self.write("%s" % c)
self.write(post)
return
pre, suf = self.inferCast(node.vhd, node.vhdOri)
if isinstance(node.value.vhd, vhd_signed) and isinstance(node.ctx, ast.Load):
pre = pre + "unsigned("
suf = ")" + suf
self.write(pre)
self.visit(node.value)
lower, upper = node.slice.lower, node.slice.upper
# special shortcut case for [:] slice
if lower is None and upper is None:
self.write(suf)
return
self.write("(")
if lower is None:
self.write("%s" % node.obj._nrbits)
else:
self.visit(lower)
self.write("-1 downto ")
if upper is None:
self.write("0")
else:
self.visit(upper)
self.write(")")
self.write(suf)
def accessIndex(self, node):
pre, suf = self.inferCast(node.vhd, node.vhdOri)
self.write(pre)
self.visit(node.value)
self.write("(")
#assert len(node.subs) == 1
self.visit(node.slice.value)
self.write(")")
self.write(suf)
def visit_stmt(self, body):
for stmt in body:
self.writeline()
self.visit(stmt)
# ugly hack to detect an orphan "task" call
if isinstance(stmt, ast.Call) and hasattr(stmt, 'tree'):
self.write(';')
def visit_Tuple(self, node):
assert self.context != None
sep = ", "
tpl = node.elts
self.visit(tpl[0])
for elt in tpl[1:]:
self.write(sep)
self.visit(elt)
def visit_While(self, node):
self.labelStack.append(node.breakLabel)
self.labelStack.append(node.loopLabel)
self.write("while ")
self.visit(node.test)
self.write(" loop")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end loop")
self.write(";")
self.labelStack.pop()
self.labelStack.pop()
def visit_Yield(self, node):
self.write("wait ")
yieldObj = self.getObj(node.value)
if isinstance(yieldObj, delay):
self.write("for ")
elif isinstance(yieldObj, _WaiterList):
self.write("until ")
else:
self.write("on ")
self.context = _context.YIELD
self.visit(node.value)
self.context = _context.UNKNOWN
self.write(";")
def manageEdges(self, ifnode, senslist):
""" Helper method to convert MyHDL style template into VHDL style"""
first = senslist[0]
if isinstance(first, _WaiterList):
bt = _WaiterList
elif isinstance(first, _Signal):
bt = _Signal
elif isinstance(first, delay):
bt = delay
assert bt
for e in senslist:
if not isinstance(e, bt):
self.raiseError(ifnode, "base type error in sensitivity list")
if len(senslist) >= 2 and bt == _WaiterList:
# ifnode = node.code.nodes[0]
# print ifnode
assert isinstance(ifnode, ast.If)
asyncEdges = []
for test, suite in ifnode.tests:
e = self.getEdge(test)
if e is None:
self.raiseError(ifnode, "No proper edge value test")
asyncEdges.append(e)
if not ifnode.else_:
self.raiseError(ifnode, "No separate else clause found")
edges = []
for s in senslist:
for e in asyncEdges:
if s is e:
break
else:
edges.append(s)
ifnode.edge = edges
senslist = [s.sig for s in senslist]
return senslist
class _ConvertAlwaysVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node):
self.writeDoc(node)
w = node.body[-1]
y = w.body[0]
if isinstance(y, ast.Expr):
y = y.value
assert isinstance(y, ast.Yield)
senslist = y.senslist
senslist = self.manageEdges(w.body[1], senslist)
singleEdge = (len(senslist) == 1) and isinstance(senslist[0], _WaiterList)
self.write("%s: process (" % self.tree.name)
if singleEdge:
self.write(senslist[0].sig)
else:
for e in senslist[:-1]:
self.write(e)
self.write(', ')
self.write(senslist[-1])
self.write(") is")
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
if singleEdge:
self.writeline()
self.write("if %s then" % senslist[0]._toVHDL())
self.indent()
# assert isinstance(w.body, ast.stmt)
for stmt in w.body[1:]:
self.writeline()
self.visit(stmt)
self.dedent()
if singleEdge:
self.writeline()
self.write("end if;")
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
class _ConvertInitialVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node):
self.writeDoc(node)
self.write("%s: process is" % self.tree.name)
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
self.visit_stmt(node.body)
self.writeline()
self.write("wait;")
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
class _ConvertAlwaysCombVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node):
self.writeDoc(node)
senslist = self.tree.senslist
self.write("%s: process (" % self.tree.name)
for e in senslist[:-1]:
self.write(e)
self.write(', ')
self.write(senslist[-1])
self.write(") is")
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
class _ConvertSimpleAlwaysCombVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_Attribute(self, node):
if isinstance(node.ctx, ast.Store):
self.SigAss = True
if isinstance(node.value, ast.Name):
sig = self.tree.symdict[node.value.id]
self.SigAss = sig._name
self.visit(node.value)
else:
self.getAttr(node)
def visit_FunctionDef(self, node, *args):
self.writeDoc(node)
self.visit_stmt(node.body)
self.writeline(2)
class _ConvertAlwaysDecoVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node, *args):
self.writeDoc(node)
assert self.tree.senslist
senslist = self.tree.senslist
senslist = self.manageEdges(node.body[-1], senslist)
singleEdge = (len(senslist) == 1) and isinstance(senslist[0], _WaiterList)
self.write("%s: process (" % self.tree.name)
if singleEdge:
self.write(senslist[0].sig)
else:
for e in senslist[:-1]:
self.write(e)
self.write(', ')
self.write(senslist[-1])
self.write(") is")
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
if singleEdge:
self.writeline()
self.write("if %s then" % senslist[0]._toVHDL())
self.indent()
self.visit_stmt(node.body)
self.dedent()
if singleEdge:
self.writeline()
self.write("end if;")
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
def _convertInitVal(reg, init):
pre, suf = '', ''
if isinstance(reg, _Signal):
tipe = reg._type
if not reg._numeric:
pre, suf = 'std_logic_vector(', ')'
else:
assert isinstance(reg, intbv)
tipe = intbv
if tipe is bool:
v = "'1'" if init else "'0'"
elif tipe is intbv:
vhd_tipe = 'unsigned'
if reg._min is not None and reg._min < 0:
vhd_tipe = 'signed'
if abs(init) < 2**31:
v = '%sto_%s(%s, %s)%s' % (pre, vhd_tipe, init, len(reg), suf)
else:
v = '%s%s\'"%s"%s' % (pre, vhd_tipe, bin(init, len(reg)), suf)
else:
assert isinstance(init, EnumItemType)
v = init._toVHDL()
return v
class _ConvertAlwaysSeqVisitor(_ConvertVisitor):
def __init__(self, tree, blockBuf, funcBuf):
_ConvertVisitor.__init__(self, tree, blockBuf)
self.funcBuf = funcBuf
def visit_FunctionDef(self, node, *args):
self.writeDoc(node)
assert self.tree.senslist
senslist = self.tree.senslist
edge = senslist[0]
reset = self.tree.reset
async = reset is not None and reset.async
sigregs = self.tree.sigregs
varregs = self.tree.varregs
self.write("%s: process (" % self.tree.name)
self.write(edge.sig)
if async:
self.write(', ')
self.write(reset)
self.write(") is")
self.indent()
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
if not async:
self.writeline()
self.write("if %s then" % edge._toVHDL())
self.indent()
if reset is not None:
self.writeline()
self.write("if (%s = '%s') then" % (reset, int(reset.active)))
self.indent()
for s in sigregs:
self.writeline()
self.write("%s <= %s;" % (s, _convertInitVal(s, s._init)))
for v in varregs:
n, reg, init = v
self.writeline()
self.write("%s := %s;" % (n, _convertInitVal(reg, init)))
self.dedent()
self.writeline()
if async:
self.write("elsif %s then" % edge._toVHDL())
else:
self.write("else")
self.indent()
self.visit_stmt(node.body)
self.dedent()
if reset is not None:
self.writeline()
self.write("end if;")
self.dedent()
if not async:
self.writeline()
self.write("end if;")
self.dedent()
self.writeline()
self.write("end process %s;" % self.tree.name)
self.writeline(2)
class _ConvertFunctionVisitor(_ConvertVisitor):
def __init__(self, tree, funcBuf):
_ConvertVisitor.__init__(self, tree, funcBuf)
self.returnObj = tree.returnObj
self.returnLabel = _Label("RETURN")
def writeOutputDeclaration(self):
self.write(self.tree.vhd.toStr(constr=False))
def writeInputDeclarations(self):
endchar = ""
for name in self.tree.argnames:
self.write(endchar)
endchar = ";"
obj = self.tree.symdict[name]
self.writeline()
self.writeDeclaration(obj, name, dir="in", constr=False, endchar="")
def visit_FunctionDef(self, node):
self.write("function %s(" % self.tree.name)
self.indent()
self.writeInputDeclarations()
self.writeline()
self.write(") return ")
self.writeOutputDeclaration()
self.write(" is")
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end function %s;" % self.tree.name)
self.writeline(2)
def visit_Return(self, node):
self.write("return ")
self.visit(node.value)
self.write(";")
class _ConvertTaskVisitor(_ConvertVisitor):
def __init__(self, tree, funcBuf):
_ConvertVisitor.__init__(self, tree, funcBuf)
self.returnLabel = _Label("RETURN")
def writeInterfaceDeclarations(self):
endchar = ""
for name in self.tree.argnames:
self.write(endchar)
endchar = ";"
obj = self.tree.symdict[name]
output = name in self.tree.outputs
input = name in self.tree.inputs
inout = input and output
dir = (inout and "inout") or (output and "out") or "in"
self.writeline()
self.writeDeclaration(obj, name, dir=dir, constr=False, endchar="")
def visit_FunctionDef(self, node):
self.write("procedure %s" % self.tree.name)
if self.tree.argnames:
self.write("(")
self.indent()
self.writeInterfaceDeclarations()
self.write(")")
self.write(" is")
self.writeDeclarations()
self.dedent()
self.writeline()
self.write("begin")
self.indent()
self.visit_stmt(node.body)
self.dedent()
self.writeline()
self.write("end procedure %s;" % self.tree.name)
self.writeline(2)
# type inference
class vhd_type(object):
def __init__(self, size=0):
self.size = size
def __repr__(self):
return "%s(%s)" % (type(self).__name__, self.size)
class vhd_string(vhd_type):
pass
class vhd_enum(vhd_type):
def __init__(self, tipe):
self._type = tipe
class vhd_std_logic(vhd_type):
def __init__(self, size=0):
vhd_type.__init__(self)
self.size = 1
def toStr(self, constr=True):
return 'std_logic'
class vhd_boolean(vhd_type):
def __init__(self, size=0):
vhd_type.__init__(self)
self.size = 1
def toStr(self, constr=True):
return 'boolean'
class vhd_vector(vhd_type):
def __init__(self, size=0, lenStr=False):
vhd_type.__init__(self, size)
self.lenStr = lenStr
class vhd_unsigned(vhd_vector):
def toStr(self, constr=True):
if constr:
ls = self.lenStr
if ls:
return "unsigned(%s-1 downto 0)" % ls
else:
return "unsigned(%s downto 0)" % (self.size-1)
else:
return "unsigned"
class vhd_signed(vhd_vector):
def toStr(self, constr=True):
if constr:
ls = self.lenStr
if ls:
return "signed(%s-1 downto 0)" % ls
else:
return "signed(%s downto 0)" % (self.size-1)
else:
return "signed"
class vhd_int(vhd_type):
def toStr(self, constr=True):
return "integer"
class vhd_nat(vhd_int):
def toStr(self, constr=True):
return "natural"
class _loopInt(int):
pass
def maxType(o1, o2):
s1 = s2 = 0
if isinstance(o1, vhd_type):
s1 = o1.size
if isinstance(o2, vhd_type):
s2 = o2.size
s = max(s1, s2)
if isinstance(o1, vhd_signed) or isinstance(o2, vhd_signed):
return vhd_signed(s)
elif isinstance(o1, vhd_unsigned) or isinstance(o2, vhd_unsigned):
return vhd_unsigned(s)
elif isinstance(o1, vhd_std_logic) or isinstance(o2, vhd_std_logic):
return vhd_std_logic()
elif isinstance(o1, vhd_int) or isinstance(o2, vhd_int):
return vhd_int()
else:
return None
def inferVhdlObj(obj):
vhd = None
if (isinstance(obj, _Signal) and obj._type is intbv) or \
isinstance(obj, intbv):
ls = getattr(obj, 'lenStr', False)
if obj.min < 0:
vhd = vhd_signed(size=len(obj), lenStr=ls)
else:
vhd = vhd_unsigned(size=len(obj), lenStr=ls)
elif (isinstance(obj, _Signal) and obj._type is bool) or \
isinstance(obj, bool):
vhd = vhd_std_logic()
elif (isinstance(obj, _Signal) and isinstance(obj._val, EnumItemType)) or\
isinstance(obj, EnumItemType):
if isinstance(obj, _Signal):
tipe = obj._val._type
else:
tipe = obj._type
vhd = vhd_enum(tipe)
elif isinstance(obj, (int, long)):
if obj >= 0:
vhd = vhd_nat()
else:
vhd = vhd_int()
# vhd = vhd_int()
return vhd
def maybeNegative(vhd):
if isinstance(vhd, vhd_signed):
return True
if isinstance(vhd, vhd_int) and not isinstance(vhd, vhd_nat):
return True
return False
class _AnnotateTypesVisitor(ast.NodeVisitor, _ConversionMixin):
def __init__(self, tree):
self.tree = tree
def visit_FunctionDef(self, node):
# don't visit arguments and decorators
for stmt in node.body:
self.visit(stmt)
def visit_Attribute(self, node):
self.generic_visit(node)
node.vhd = copy(node.value.vhd)
node.vhdOri = copy(node.vhd)
def visit_Assert(self, node):
self.visit(node.test)
node.test.vhd = vhd_boolean()
def visit_AugAssign(self, node):
self.visit(node.target)
self.visit(node.value)
if isinstance(node.op, (ast.BitOr, ast.BitAnd, ast.BitXor)):
node.value.vhd = copy(node.target.vhd)
node.vhdOri = copy(node.target.vhd)
elif isinstance(node.op, (ast.RShift, ast.LShift)):
node.value.vhd = vhd_int()
node.vhdOri = copy(node.target.vhd)
else:
node.left, node.right = node.target, node.value
self.inferBinOpType(node)
node.vhd = copy(node.target.vhd)
def visit_Call(self, node):
fn = node.func
# assert isinstance(fn, astNode.Name)
f = self.getObj(fn)
node.vhd = inferVhdlObj(node.obj)
self.generic_visit(node)
if f is concat:
s = 0
for a in node.args:
if isinstance(a, ast.Str):
a.vhd = vhd_unsigned(a.vhd.size)
s += a.vhd.size
node.vhd = vhd_unsigned(s)
elif f is bool:
node.vhd = vhd_boolean()
elif f in (int, long, ord):
node.vhd = vhd_int()
node.args[0].vhd = vhd_int()
elif f in (intbv, modbv):
node.vhd = vhd_int()
elif f is len:
node.vhd = vhd_int()
elif f is now:
node.vhd = vhd_nat()
elif f == intbv.signed: # note equality comparison
# this comes from a getattr
node.vhd = vhd_signed(fn.value.vhd.size)
elif hasattr(node, 'tree'):
v = _AnnotateTypesVisitor(node.tree)
v.visit(node.tree)
node.vhd = node.tree.vhd = inferVhdlObj(node.tree.returnObj)
node.vhdOri = copy(node.vhd)
def visit_Compare(self, node):
node.vhd = vhd_boolean()
self.generic_visit(node)
left, op, right = node.left, node.ops[0], node.comparators[0]
if isinstance(left.vhd, vhd_std_logic) or isinstance(right.vhd, vhd_std_logic):
left.vhd = right.vhd = vhd_std_logic()
elif isinstance(left.vhd, vhd_unsigned) and maybeNegative(right.vhd):
left.vhd = vhd_signed(left.vhd.size + 1)
elif maybeNegative(left.vhd) and isinstance(right.vhd, vhd_unsigned):
right.vhd = vhd_signed(right.vhd.size + 1)
node.vhdOri = copy(node.vhd)
def visit_Str(self, node):
node.vhd = vhd_string()
node.vhdOri = copy(node.vhd)
def visit_Num(self, node):
if node.n < 0:
node.vhd = vhd_int()
else:
node.vhd = vhd_nat()
node.vhdOri = copy(node.vhd)
def visit_For(self, node):
var = node.target.id
# make it possible to detect loop variable
self.tree.vardict[var] = _loopInt(-1)
self.generic_visit(node)
def visit_Name(self, node):
if node.id in self.tree.vardict:
node.obj = self.tree.vardict[node.id]
node.vhd = inferVhdlObj(node.obj)
node.vhdOri = copy(node.vhd)
def visit_BinOp(self, node):
self.generic_visit(node)
if isinstance(node.op, (ast.LShift, ast.RShift)):
self.inferShiftType(node)
elif isinstance(node.op, (ast.BitAnd, ast.BitOr, ast.BitXor)):
self.inferBitOpType(node)
elif isinstance(node.op, ast.Mod) and isinstance(node.left, ast.Str): # format string
pass
else:
self.inferBinOpType(node)
def inferShiftType(self, node):
node.vhd = copy(node.left.vhd)
node.right.vhd = vhd_nat()
node.vhdOri = copy(node.vhd)
def inferBitOpType(self, node):
obj = maxType(node.left.vhd, node.right.vhd)
node.vhd = node.left.vhd = node.right.vhd = obj
node.vhdOri = copy(node.vhd)
def inferBinOpType(self, node):
left, op, right = node.left, node.op, node.right
if isinstance(left.vhd, (vhd_boolean, vhd_std_logic)):
left.vhd = vhd_unsigned(1)
if isinstance(right.vhd, (vhd_boolean, vhd_std_logic)):
right.vhd = vhd_unsigned(1)
if maybeNegative(left.vhd) and isinstance(right.vhd, vhd_unsigned):
right.vhd = vhd_signed(right.vhd.size + 1)
if isinstance(left.vhd, vhd_unsigned) and maybeNegative(right.vhd):
left.vhd = vhd_signed(left.vhd.size + 1)
l, r = left.vhd, right.vhd
ls, rs = l.size, r.size
if isinstance(r, vhd_vector) and isinstance(l, vhd_vector):
if isinstance(op, (ast.Add, ast.Sub)):
s = max(ls, rs)
elif isinstance(op, ast.Mod):
s = rs
elif isinstance(op, ast.FloorDiv):
s = ls
elif isinstance(op, ast.Mult):
s = ls + rs
else:
raise AssertionError("unexpected op %s" % op)
elif isinstance(l, vhd_vector) and isinstance(r, vhd_int):
if isinstance(op, (ast.Add, ast.Sub, ast.Mod, ast.FloorDiv)):
s = ls
elif isinstance(op, ast.Mult):
s = 2 * ls
else:
raise AssertionError("unexpected op %s" % op)
elif isinstance(l, vhd_int) and isinstance(r, vhd_vector):
if isinstance(op, (ast.Add, ast.Sub, ast.Mod, ast.FloorDiv)):
s = rs
elif isinstance(op, ast.Mult):
s = 2 * rs
else:
raise AssertionError("unexpected op %s" % op)
if isinstance(l, vhd_int) and isinstance(r, vhd_int):
node.vhd = vhd_int()
elif isinstance(l, (vhd_signed, vhd_int)) and isinstance(r, (vhd_signed, vhd_int)):
node.vhd = vhd_signed(s)
elif isinstance(l, (vhd_unsigned, vhd_int)) and isinstance(r, (vhd_unsigned, vhd_int)):
node.vhd = vhd_unsigned(s)
else:
node.vhd = vhd_int()
node.vhdOri = copy(node.vhd)
def visit_BoolOp(self, node):
self.generic_visit(node)
for n in node.values:
n.vhd = vhd_boolean()
node.vhd = vhd_boolean()
node.vhdOri = copy(node.vhd)
def visit_If(self, node):
if node.ignore:
return
self.generic_visit(node)
for test, suite in node.tests:
test.vhd = vhd_boolean()
def visit_IfExp(self, node):
self.generic_visit(node)
node.test.vhd = vhd_boolean()
def visit_ListComp(self, node):
pass # do nothing
def visit_Subscript(self, node):
if isinstance(node.slice, ast.Slice):
self.accessSlice(node)
else:
self.accessIndex(node)
def accessSlice(self, node):
self.generic_visit(node)
lower = node.value.vhd.size
t = type(node.value.vhd)
# node.expr.vhd = vhd_unsigned(node.expr.vhd.size)
if node.slice.lower:
node.slice.lower.vhd = vhd_int()
lower = self.getVal(node.slice.lower)
upper = 0
if node.slice.upper:
node.slice.upper.vhd = vhd_int()
upper = self.getVal(node.slice.upper)
if isinstance(node.ctx, ast.Store):
node.vhd = t(lower-upper)
else:
node.vhd = vhd_unsigned(lower-upper)
node.vhdOri = copy(node.vhd)
def accessIndex(self, node):
self.generic_visit(node)
node.vhd = vhd_std_logic() # XXX default
node.slice.value.vhd = vhd_int()
obj = node.value.obj
if isinstance(obj, list):
assert len(obj)
node.vhd = inferVhdlObj(obj[0])
elif isinstance(obj, _Ram):
node.vhd = inferVhdlObj(obj.elObj)
elif isinstance(obj, _Rom):
node.vhd = vhd_int()
elif isinstance(obj, intbv):
node.vhd = vhd_std_logic()
node.vhdOri = copy(node.vhd)
def visit_UnaryOp(self, node):
self.visit(node.operand)
node.vhd = copy(node.operand.vhd)
if isinstance(node.op, ast.Not):
# postpone this optimization until initial values are written
# if isinstance(node.operand.vhd, vhd_std_logic):
# node.vhd = vhd_std_logic()
# else:
# node.vhd = node.operand.vhd = vhd_boolean()
node.vhd = node.operand.vhd = vhd_boolean()
elif isinstance(node.op, ast.USub):
if isinstance(node.vhd, vhd_unsigned):
node.vhd = vhd_signed(node.vhd.size+1)
elif isinstance(node.vhd, vhd_nat):
node.vhd = vhd_int()
node.vhdOri = copy(node.vhd)
def visit_While(self, node):
self.generic_visit(node)
node.test.vhd = vhd_boolean()
def _annotateTypes(genlist):
for tree in genlist:
if isinstance(tree, _UserVhdlCode):
continue
v = _AnnotateTypesVisitor(tree)
v.visit(tree)
|
cordoval/myhdl-python
|
myhdl/conversion/_toVHDL.py
|
Python
|
lgpl-2.1
| 74,347
|
[
"VisIt"
] |
b4680559850f66333a455139339e4c13dc7f9af5c2c6ad796b4953d0c67ac151
|
## \file
## \ingroup tutorial_roofit
## \notebook
##
## \brief Organization and simultaneous fits: easy interactive access to workspace contents - CINT
## to CLING code migration
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
def fillWorkspace(w):
# Create pdf and fill workspace
# --------------------------------------------------------
# Declare observable x
x = ROOT.RooRealVar("x", "x", 0, 10)
# Create two Gaussian PDFs g1(x,mean1,sigma) anf g2(x,mean2,sigma) and
# their parameters
mean = ROOT.RooRealVar("mean", "mean of gaussians", 5, 0, 10)
sigma1 = ROOT.RooRealVar("sigma1", "width of gaussians", 0.5)
sigma2 = ROOT.RooRealVar("sigma2", "width of gaussians", 1)
sig1 = ROOT.RooGaussian("sig1", "Signal component 1", x, mean, sigma1)
sig2 = ROOT.RooGaussian("sig2", "Signal component 2", x, mean, sigma2)
# Build Chebychev polynomial p.d.f.
a0 = ROOT.RooRealVar("a0", "a0", 0.5, 0., 1.)
a1 = ROOT.RooRealVar("a1", "a1", -0.2, 0., 1.)
bkg = ROOT.RooChebychev("bkg", "Background", x, ROOT.RooArgList(a0, a1))
# Sum the signal components into a composite signal p.d.f.
sig1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in signal", 0.8, 0., 1.)
sig = ROOT.RooAddPdf(
"sig", "Signal", ROOT.RooArgList(
sig1, sig2), ROOT.RooArgList(sig1frac))
# Sum the composite signal and background
bkgfrac = ROOT.RooRealVar("bkgfrac", "fraction of background", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model",
"g1+g2+a",
ROOT.RooArgList(
bkg,
sig),
ROOT.RooArgList(bkgfrac))
w.Import(model)
# Create and fill workspace
# ------------------------------------------------
# Create a workspace named 'w'
# With CINT w could exports its contents to
# a same-name C++ namespace in CINT 'namespace w'.
# but self does not work anymore in CLING.
# so self tutorial is an example on how to
# change the code
w = ROOT.RooWorkspace("w", ROOT.kTRUE)
# Fill workspace with p.d.f. and data in a separate function
fillWorkspace(w)
# Print workspace contents
w.Print()
# self does not work anymore with CLING
# use normal workspace functionality
# Use workspace contents
# ----------------------------------------------
# Old syntax to use the name space prefix operator to access the workspace contents
#
#d = w.model.generate(w.x,1000)
#r = w.model.fitTo(*d)
# use normal workspace methods
model = w.pdf("model")
x = w.var("x")
d = model.generate(ROOT.RooArgSet(x), 1000)
r = model.fitTo(d)
# old syntax to access the variable x
# frame = w.x.frame()
frame = x.frame()
d.plotOn(frame)
# OLD syntax to ommit x.
# NB: The 'w.' prefix can be omitted if namespace w is imported in local namespace
# in the usual C++ way
#
# using namespace w
# model.plotOn(frame)
# model.plotOn(frame, ROOT.RooFit.Components(bkg), ROOT.RooFit.LineStyle(ROOT.kDashed))
# correct syntax
bkg = w.pdf("bkg")
model.plotOn(frame)
ras_bkg = ROOT.RooArgSet(bkg)
model.plotOn(frame, ROOT.RooFit.Components(ras_bkg),
ROOT.RooFit.LineStyle(ROOT.kDashed))
# Draw the frame on the canvas
c = ROOT.TCanvas("rf509_wsinteractive", "rf509_wsinteractive", 600, 600)
ROOT.gPad.SetLeftMargin(0.15)
frame.GetYaxis().SetTitleOffset(1.4)
frame.Draw()
c.SaveAs("rf509_wsinteractive.png")
|
karies/root
|
tutorials/roofit/rf509_wsinteractive.py
|
Python
|
lgpl-2.1
| 3,412
|
[
"Gaussian"
] |
c34be77d27f80544d81750ea804bcddcfb0556caae5667ffdc835f4b00f45a52
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# isjobactive - sandbox job kill helper
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.isjobactive import main
from shared.cgiscriptstub import run_cgi_script_possibly_with_cert
run_cgi_script_possibly_with_cert(main)
|
heromod/migrid
|
mig/cgi-sid/isjobactive.py
|
Python
|
gpl-2.0
| 1,116
|
[
"Brian"
] |
cd26cf9856e9a994b1bde8e772d91dda5e66b68afe0eab2695b101b1648124c8
|
import unittest
from circlator import external_progs
class TestExternalProgs(unittest.TestCase):
def test_canu_version(self):
'''Test canu version'''
self.assertEqual('1.6', self.check_regex_version_extraction('canu', """
Canu 1.6
""" ))
def test_spades_version(self):
'''Test spades version'''
self.assertEqual('3.11.0', self.check_regex_version_extraction('spades', """
SPAdes v3.11.0
""" ))
self.assertEqual('3.7.1', self.check_regex_version_extraction('spades', """
SPAdes v3.7.1
""" ))
self.assertEqual('3.5.0', self.check_regex_version_extraction('spades', """
SPAdes genome assembler v.3.5.0
""" ))
def test_prodigal_version(self):
'''Test prodigal version'''
self.assertEqual('2.60', self.check_regex_version_extraction('prodigal', """
Prodigal V2.60: October, 2011
""" ))
def test_bwa_version(self):
'''Test bwa version'''
self.assertEqual('0.7.10', self.check_regex_version_extraction('bwa', """
Program: bwa (alignment via Burrows-Wheeler transformation)
Version: 0.7.10-r789
Contact: Heng Li <lh3@sanger.ac.uk>
""" ))
self.assertEqual('0.7.12', self.check_regex_version_extraction('bwa', """
Program: bwa (alignment via Burrows-Wheeler transformation)
Version: 0.7.12-r1039
Contact: Heng Li <lh3@sanger.ac.uk>
""" ))
def test_nucmer_version(self):
'''Test nucmer version'''
self.assertEqual('3.1', self.check_regex_version_extraction('nucmer', """
nucmer
NUCmer (NUCleotide MUMmer) version 3.1
""" ))
self.assertEqual('4.0.0', self.check_regex_version_extraction('nucmer', """
4.0.0beta1
""" ))
def test_samtools_version(self):
'''Test samtools version'''
self.assertEqual('1.6', self.check_regex_version_extraction('samtools', """
Program: samtools (Tools for alignments in the SAM format)
Version: 1.6 (using htslib 1.6)
""" ))
def test_samtools_original_version(self):
'''Test samtools original version'''
self.assertEqual('0.1.19', self.check_regex_version_extraction('samtools', """
Program: samtools (Tools for alignments in the SAM format)
Version: 0.1.19-44428cd
Usage: samtools <command> [options]""" ))
def check_regex_version_extraction(self, prog, raw_version_output ):
cmd, regex = external_progs.prog_to_version_cmd[prog]
raw_output_lines = raw_version_output.splitlines()
for line in raw_output_lines:
hits = regex.search(line)
if hits:
return str(hits.group(1))
return None
|
sanger-pathogens/circlator
|
circlator/tests/external_progs_test.py
|
Python
|
gpl-3.0
| 2,735
|
[
"BWA"
] |
4f4f216ca42cdd206bd2867601fc23c4ef3a280ca81410a8e95283560a4ee7d7
|
import matplotlib.pyplot as plt
import numpy as np
gaussian_numbers = np.random.randn(1000)
plt.hist(gaussian_numbers)
plt.title("Gaussian Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
fig = plt.gcf()
plt.show()
|
andyraib/data-storage
|
python_scripts/Python_DS/histogram.py
|
Python
|
apache-2.0
| 226
|
[
"Gaussian"
] |
edd0317e1ae222f75cf4e3cf586b054915f5a649ac3994270eed26adaf572cbc
|
# -*- coding: utf-8 -*-
#
# Bio-Formats documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 29 15:42:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath('../sphinx/_ext'))
import re
import subprocess
from datetime import datetime
def popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
copy = os.environ.copy()
shell = (sys.platform == "win32")
return subprocess.Popen(args,
env=copy,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.)
extensions = ['sphinx.ext.extlinks', 'edit_on_github']
## Configuration for the edit_on_github extension
edit_on_github_project = 'openmicroscopy/bioformats'
edit_on_github_branch = 'develop'
edit_on_github_prefix = 'docs/sphinx'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../sphinx/_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Bio-Formats'
title = u'%s Documentation' % project
author = u'The Open Microscopy Environment'
now = datetime.now()
copyright = u'2000-%d, %s ' % (now.year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
if "BF_RELEASE" in os.environ and len(os.environ.get('BF_RELEASE')) > 0:
release = os.environ.get('BF_RELEASE')
else:
p = popen(['git','describe'])
tag = p.communicate()
split_tag = re.split("^(v)?(.*?)(-[0-9]+)?((-)g(.*?))?$",tag[0])
# The full version, including alpha/beta/rc tags.
release = split_tag[2]
split_release = re.split("^([0-9]\.[0-9])(\.[0-9]+)(.*?)$",release)
# The short X.Y version.
version = split_release[1]
except:
version = 'UNKNOWN'
release = 'UNKNOWN'
rst_prolog = """
.. warning:: **This documentation is a PREVIEW for the as-yet unreleased Bio-Formats 5.1.
It is provided for the benefit of developers and should be considered a work in
progress until the public release.** Please refer to the documentation for the
`latest Bio-Formats 5.0.x version
<http://www.openmicroscopy.org/site/support/bio-formats5/>`_ or the
:legacy_plone:`previous versions <>` page to find documentation for the
version you are using.
"""
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# Variables used to define Github extlinks
if "SOURCE_BRANCH" in os.environ and len(os.environ.get('SOURCE_BRANCH')) > 0:
source_branch = os.environ.get('SOURCE_BRANCH')
else:
source_branch = 'develop'
if "SOURCE_USER" in os.environ and len(os.environ.get('SOURCE_USER')) > 0:
user = os.environ.get('SOURCE_USER')
else:
user = 'openmicroscopy'
github_root = 'https://github.com/'
bf_github_root = github_root + user + '/bioformats/'
bf_github_tree = bf_github_root + 'tree/' + source_branch + '/'
bf_github_blob = bf_github_root + 'blob/' + source_branch + '/'
gpl_formats = bf_github_blob + 'components/formats-gpl/src/loci/formats/'
bsd_formats = bf_github_blob + 'components/formats-bsd/src/loci/formats/'
# Variables used to define Jenkins extlinks
jenkins_root = 'http://ci.openmicroscopy.org'
jenkins_job_root = jenkins_root + '/job'
jenkins_view_root = jenkins_root + '/view'
# Variables used to define other extlinks
cvs_root = 'http://cvs.openmicroscopy.org.uk'
trac_root = 'http://trac.openmicroscopy.org.uk/ome'
oo_root = 'http://www.openmicroscopy.org'
oo_site_root = oo_root + '/site'
lists_root = 'http://lists.openmicroscopy.org.uk'
downloads_root = 'http://downloads.openmicroscopy.org'
if "OMERODOC_URI" in os.environ and len(os.environ.get('OMERODOC_URI')) > 0:
omerodoc_uri = os.environ.get('OMERODOC_URI')
else:
omerodoc_uri = oo_site_root + '/support/omero5'
extlinks = {
# Trac links
'ticket' : (trac_root + '/ticket/%s', '#'),
'milestone' : (trac_root + '/milestone/%s', ''),
'report' : (trac_root + '/report/%s', ''),
# Github links
'source' : (bf_github_blob + '%s', ''),
'sourcedir' : (bf_github_tree + '%s', ''),
'bfreader' : (gpl_formats + 'in/%s', ''),
'bsd-reader' : (bsd_formats + 'in/%s', ''),
'bfwriter' : (gpl_formats + 'out/' + '%s', ''),
'bsd-writer' : (bsd_formats + 'out/' + '%s', ''),
# Jenkins links
'jenkins' : (jenkins_root + '/%s', ''),
'jenkinsjob' : (jenkins_job_root + '/%s', ''),
'jenkinsview' : (jenkins_view_root + '/%s', ''),
# Mailing list/forum links
'mailinglist' : (lists_root + '/mailman/listinfo/%s', ''),
'forum' : (oo_root + '/community/%s', ''),
# Plone links. Separating them out so that we can add prefixes and
# suffixes during testing.
'community_plone' : (oo_site_root + '/community/%s', ''),
'products_plone' : (oo_site_root + '/products/%s', ''),
'model_doc' : (oo_site_root + '/support/ome-model/%s', ''),
'legacy_plone' : (oo_site_root + '/support/legacy/%s', ''),
'about_plone' : (oo_site_root + '/about/%s', ''),
'team_plone' : (oo_site_root + '/team/%s', ''),
'faq_plone' : (oo_site_root + '/support/faq/%s', ''),
'omerodoc' : (omerodoc_uri + '/%s', ''),
'devs_doc' : (oo_site_root + '/support/contributing/%s', ''),
# Downloads
'downloads' : (downloads_root + '/latest/bio-formats5.1/%s', ''),
'javadoc' : (downloads_root + '/latest/bio-formats5.1/api/%s', ''),
'doxygen' : (downloads_root + '/latest/bio-formats5.1/doxygen/%s', ''),
# Miscellaneous links
'doi' : ('http://dx.doi.org/%s', ''),
'schema' : (oo_root + '/Schemas/Documentation/Generated/%s', '')
}
rst_epilog = """
.. _Hibernate: http://www.hibernate.org
.. _ZeroC: http://www.zeroc.com
.. _Ice: http://www.zeroc.com
.. |Poor| image:: /images/crystal-1.png
:alt: 1 - Poor
.. |Fair| image:: /images/crystal-2.png
:alt: 2 - Fair
.. |Good| image:: /images/crystal-3.png
:alt: 3 - Good
.. |Very Good| image:: /images/crystal-4.png
:alt: 4 - Very Good
.. |Outstanding| image:: /images/crystal-5.png
:alt: 5 - Outstanding
.. |no| image:: /images/crystal-no.png
:alt: No
.. |yes| image:: /images/crystal-yes.png
:alt: Yes
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = { '**' : ['globalbftoc.html', 'pagetoc.html',
'relations.html', 'searchbox.html', 'sourcelink.html'] }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright …" is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Bio-Formatsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
'classoptions': ',oneside',
'pointsize': '10pt',
'inputenc': '%% Unused',
'utf8extra': '%% Unused',
'fontenc' : '%% Unused',
'fontpkg': '%% Unused',
'babel': '%% Unused',
'printindex': '''\\phantomsection
\\addcontentsline{toc}{part}{\indexname}
\\printindex''',
'preamble': '''
\input{../../preamble.tex}
''',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
target = project + '-' + release + '.tex'
latex_documents = [
(master_doc, target, title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "images/bio-formats-logo.pdf"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
latex_show_urls = 'footnote'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'OMERO', title, author, 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, title, author, 'omedocs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for the linkcheck builder ----------------------------------------
# Regular expressions that match URIs that should not be checked when doing a linkcheck build
linkcheck_ignore = ['http://www.openmicroscopy.org/site/support/faq',]
import urllib
brokenfiles_url = 'https://raw.github.com/openmicroscopy/sphinx-ignore-links/master/broken_links.txt'
linkcheck_ignore.extend(urllib.urlopen(brokenfiles_url).read().splitlines())
|
ctrueden/bioformats
|
docs/sphinx/conf.py
|
Python
|
gpl-2.0
| 13,943
|
[
"CRYSTAL"
] |
9ad998a59f42923cc8885932a7515750d1455eccf6ea256b03d35a1419a800dc
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
from gzip import open as gzip_open
from itertools import chain
from .iterator import FastaIterator, FastqIterator, QseqIterator
FILEEXT_MAP = {'fna': (FastaIterator, open),
'fna.gz': (FastaIterator, gzip_open),
'fasta': (FastaIterator, open),
'fasta.gz': (FastaIterator, gzip_open),
'qual': (FastaIterator, open),
'qual.gz': (FastaIterator, gzip_open),
'fastq': (FastqIterator, open),
'fastq.gz': (FastqIterator, gzip_open),
'fq': (FastqIterator, open),
'fq.gz': (FastqIterator, gzip_open),
'qseq': (QseqIterator, open),
'qseq.gz': (QseqIterator, gzip_open)}
def _determine_types_and_openers(files):
"""Attempt to determine the appropriate iterators and openers"""
if files is None:
return [], []
iters = []
openers = []
for fpath in files:
if fpath.endswith('.gz'):
ext = '.'.join(fpath.rsplit('.', 2)[-2:])
else:
ext = fpath.rsplit('.', 1)[-1]
i, o = FILEEXT_MAP.get(ext, (None, None))
if i is None:
raise IOError("Unknown filetype for %s" % fpath)
iters.append(i)
openers.append(o)
return iters, openers
def _is_single_iterator_type(iters):
"""Determine if there is a single or multiple type of iterator
If iters is [], this method returns True it considers the null case to be
a single iterator type.
"""
if iters:
return len(set(iters)) == 1
else:
return True
def _open_or_none(opener, f):
"""Open a file or returns None"""
if not opener:
return None
else:
name = opener.__name__
if not os.path.exists(f):
raise IOError("%s does not appear to exist!" % f)
try:
opened = opener(f)
except IOError:
raise IOError("Could not open %s with %s!" % (f, name))
return opened
def load(seqs, qual=None, constructor=None, **kwargs):
"""Construct the appropriate iterator for all your processing needs
This method will attempt to open all files correctly and to feed the
appropriate objects into the correct iterators.
Seqs can list multiple types of files (e.g., FASTA and FASTQ), but if
multiple file types are specified, qual must be None
Parameters
----------
seqs : str or list of sequence file paths
qual : str or list of qual file paths or None
constructor : force a constructor on seqs
kwargs : dict
passed into the subsequent generators.
Returns
-------
SequenceIterator
the return is ``Iterable``
See Also
--------
SequenceIterator
FastaIterator
FastqIterator
QseqIterator
"""
if not seqs:
raise ValueError("Must supply sequences.")
if isinstance(seqs, str):
seqs = [seqs]
if isinstance(qual, str):
qual = [qual]
# i -> iters, o -> openers
if constructor is not None:
i_seqs = [constructor] * len(seqs)
o_seqs = [open] * len(seqs)
else:
i_seqs, o_seqs = _determine_types_and_openers(seqs)
i_qual, o_qual = _determine_types_and_openers(qual)
seqs = [_open_or_none(o, f) for f, o in zip(seqs, o_seqs)]
qual = [_open_or_none(o, f) for f, o in zip(qual or [], o_qual or [])]
if not qual:
qual = None
if not _is_single_iterator_type(i_seqs) and qual is not None:
# chaining Fasta/Fastq for sequence is easy, but it gets nasty quick
# if seqs is a mix of fasta/fastq, with qual coming in as there aren't
# 1-1 mappings. This could be addressed if necessary, but seems like
# an unnecessary block of code right now
raise ValueError("Cannot handle multiple sequence file types and qual "
"file(s) at the same time.")
if _is_single_iterator_type(i_seqs):
seqs_constructor = i_seqs[0]
gen = seqs_constructor(seq=seqs, qual=qual, **kwargs)
else:
gen = chain(*[c(seq=[fp], **kwargs) for c, fp in zip(i_seqs, seqs)])
return gen
|
JWDebelius/scikit-bio
|
skbio/parse/sequences/factory.py
|
Python
|
bsd-3-clause
| 4,532
|
[
"scikit-bio"
] |
4c8ff35cdc3ff2fdc3f468415a7ebbd112c4e414b44cf825c71337bec39585ad
|
import pandas as pd
import numpy as np
import myio as Mio
import mymath as Mmath
def getAndStoreDump(simname,increment,rootdir,dumpdir='Dump_Files/',pickledir='Pickle_Files/',override=False,bounds=None,**kwargs):
return Mio.getAndStore(readDumpFile,getDumpFilename,override=override,subdirstore=rootdir+pickledir,simname=simname,increment=increment,subdirread=rootdir+dumpdir,bounds=bounds)
def readDumpFile(simname,increment,subdirread,bounds,**kwargs):
filename = subdirread + getDumpFilename(simname,increment) + '.dump'
headerlines = getHeaderLinesDumpFile(filename)
boxdims = getBoxDimDumpFile(filename)
res = cutDownDumpFile(filename,headerlines,bounds,boxdims)
return {'boxdims': boxdims, 'array': unscaleCoords(res,boxdims)}
def cutDownDumpFile(filename,headerlines,bounds,boxdims,indices=range(2,5),chunksize=1000000):
if bounds is not None:
boundsscaled = scaleBounds(bounds,boxdims)
reader = pd.read_csv(filename,sep=' ',skiprows=headerlines,iterator=True,chunksize=chunksize)
chunkall = None
for chunk in reader:
values = chunk.values
indexall = np.ones((values.shape[0],), dtype=bool)
if bounds is not None:
for [posmin, posmax], index in zip(boundsscaled,indices):
valuescurr = values[:,index]
indexall = indexall & (posmin <= valuescurr) & (valuescurr <= posmax)
chunknew = values[indexall,:]
try:
chunkall = np.concatenate((chunkall,chunknew),axis=0)
except ValueError:
chunkall = chunknew
return chunkall
def unscaleCoords(dumparray,boxdims,indexstart=2):
for i in range(3):
dumparray[:,i+indexstart] = Mmath.rescaleCoords(dumparray[:,i+indexstart],[0,1],boxdims[i,:])
return dumparray
def scaleBounds(bounds,boxdims):
boundsnew = np.empty(np.shape(bounds))
for i in range(3): # dimensions
boundsnew[i,:] = Mmath.rescaleCoords(bounds[i,:],boxdims[i,:],[0,1]) # scale bounds
return boundsnew
def getDumpFilename(simname,increment,**kwargs):
return simname + '.' + str(increment)
def getBoxDimDumpFile(filename):
lines, _ = Mio.readFileForKey(filename,'ITEM: BOX',4)
return np.loadtxt(lines[1:])
def getHeaderLinesDumpFile(filename):
_, linenum = Mio.readFileForKey(filename,'ITEM: ATOMS')
return linenum
def getBoxDimLogFile(filename): # get box dimensions from lammps log file
lines, _ = Mio.readFileForKey(filename,'Created orthogonal box')
line = lines[0]
indicesstart = Mio.findCharIndices(line,'(')
indicesend = Mio.findCharIndices(line,')')
bounds = [line[istart+1:iend] for istart, iend in zip(indicesstart,indicesend)]
return np.transpose(np.loadtxt(bounds))
def getOptimizedConfig(filename): # just get the last line of the MS run (output: numpy array)
blockall = []
with open(filename,'r') as f:
for line in f:
line = line.strip()
if line.startswith('Loop time'): # stop reading, store results
blockall.append(lineprev)
lineprev = line
return np.loadtxt(blockall)
def readLogFile(filename): # get all lines of MS/MD run (output: list of numpy arrays)
blockall, blockoflines = [], []
with open(filename,'r') as f:
for line in f:
line = line.strip()
if line.startswith('Step'): # start reading, get key, clear previous results
blockoflines = []
elif line.startswith('Loop time'): # stop reading, store results
array = np.loadtxt(blockoflines) # convert to numerical array
blockall.append(array)
else:
blockoflines.append(line)
return blockall
# obsolete
def readLogFileObs(filename): # reads in all numeric lines from log file
return Mio.readBlockFile(filename,[],[],fortranoption=False,keyfun=getKeyLogFileObs)
def getKeyLogFileObs(line,keystarts,keyends,keyold):
if 'Step' not in line: # to remove some spurious chunks found by readBlockFile
return None
else:
return keyold + 1
|
varun-rajan/python-modules
|
mdutilities_io.py
|
Python
|
gpl-2.0
| 4,140
|
[
"LAMMPS"
] |
d80d65931b0c73a9eef1d312ebb581e3a451be785a356c877cdbfa23c620a4fb
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
USAGE: python list_repos.py --url_galaxy_ref <url galaxy reference> --url_galaxy_target <url galaxy target>
--adminkey_galaxy_target <glaxy target admin api key> --url_toolshed <url toolshed with tools>
--output_yaml <yaml output file>
'''
from bioblend.toolshed import ToolShedInstance
from bioblend.galaxy import GalaxyInstance
import yaml
import argparse
import pprint
def toolshed_to_dict(options):
ts = ToolShedInstance(url=options.url_toolshed)
ts.verify = False
repositories = ts.repositories.get_repositories()
listrepos = []
for repo in repositories:
revisions = ts.repositories.get_ordered_installable_revisions(repo['name'], repo['owner'])
if len(revisions) > 0:
revision = revisions[-1:]
listrepos.append({'name': repo['name'], 'owner': repo['owner'], 'tool_panel_section_id': '', 'tool_shed_url': options.url_toolshed,
'tool_panel_section_label': '', 'revisions': revision, 'verify_ssl': False})
listrepos = set_section_id(ts, listrepos, options.url_galaxy_ref)
dict_repos = {'api_key': options.adminkey_galaxy_target, 'galaxy_instance': options.url_galaxy_target, 'tools': listrepos}
write_yaml(dict_repos, options.output_yaml)
def return_panel(guid, reftools):
for reftool in reftools:
if reftool['id'] == guid:
return [reftool['panel_section_id'], reftool['panel_section_name']]
def set_section_id(ts, repos, url_galaxy_ref):
gi = GalaxyInstance(url_galaxy_ref)
gi.verify = False
tools = gi.tools.get_tools()
clean_repos = []
for repo in repos:
for revision in repo['revisions']:
if not repo['tool_panel_section_id']:
revision_info = ts.repositories.get_repository_revision_install_info(repo['name'], repo['owner'], revision)
if 'valid_tools' in revision_info[1]:
for tool in revision_info[1]['valid_tools']:
panel_info = return_panel(tool['guid'], tools)
if panel_info:
repo['tool_panel_section_id'] = panel_info[0]
repo['tool_panel_section_label'] = panel_info[1]
clean_repos.append(repo)
break
return clean_repos
def write_yaml(repositories, yamlfile):
pprint.pprint(repositories)
with open(yamlfile, 'w') as outfile:
outfile.write( yaml.safe_dump(repositories, encoding='utf-8', allow_unicode=True) )
if __name__ == "__main__":
# Arguments parser
parser = argparse.ArgumentParser(description='')
parser.add_argument('--url_galaxy_ref', help='Reference Galaxy instance URL/IP address')
parser.add_argument('--url_galaxy_target', help='Target Galaxy instance URL/IP address')
parser.add_argument('--adminkey_galaxy_target', help='Galaxy admin user API key')
parser.add_argument('--url_toolshed', help='The Tool Shed URL where to install the tool from')
parser.add_argument('--output_yaml', help='tool list to install yaml file')
args = parser.parse_args()
toolshed_to_dict(args)
|
C3BI-pasteur-fr/Galaxy-playbook
|
galaxy-pasteur/roles/galaxy_tools/files/list_repos.py
|
Python
|
gpl-2.0
| 3,186
|
[
"Galaxy"
] |
4ef9473ae02764d1c97ddffa3add6ebab402cc64dbd02b4c65bf2fa4bfac14c5
|
# coding=utf-8
from datetime import timedelta
from django.contrib.auth.tokens import default_token_generator
from django.core import mail
from django.test import TestCase, LiveServerTestCase
from django.utils.timezone import now
from selenium import webdriver
from alert.userHandling.models import UserProfile
class UserTest(TestCase):
fixtures = ['authtest_data.json']
def test_creating_a_new_user(self):
"""Can we register a new user in the front end?"""
params = {
'username': 'pan',
'email': 'pan@courtlistener.com',
'password1': 'a',
'password2': 'a',
'first_name': 'dora',
'last_name': '☠☠☠☠☠☠☠☠☠☠☠',
'skip_me_if_alive': '',
}
response = self.client.post('/register/', params, follow=True)
self.assertRedirects(response, 'http://testserver/register/success/?next=/')
def test_signing_in(self):
"""Can we create a user on the backend then sign them into the front end?"""
params = {
'username': 'pandora',
'password': 'password',
}
r = self.client.post('/sign-in/', params, follow=True)
self.assertRedirects(r, 'http://testserver/')
def test_confirming_an_email_address(self):
"""Tests whether we can confirm the case where an email is associated
with a single account.
"""
# Update the expiration since the fixture has one some time ago.
u = UserProfile.objects.get(pk=2)
u.key_expires = now() + timedelta(days=2)
u.save()
r = self.client.get('/email/confirm/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/')
self.assertEqual(200, r.status_code,
msg="Did not get 200 code when activating account. "
"Instead got %s" % r.status_code)
self.assertIn('has been confirmed', r.content,
msg="Test string not found in response.content")
def test_confirming_an_email_when_it_is_associated_with_multiple_accounts(self):
# Test the trickier case when an email is associated with many accounts
UserProfile.objects.filter(pk__in=(3, 4,))\
.update(key_expires=now() + timedelta(days=2))
r = self.client.get('/email/confirm/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab/')
self.assertIn('has been confirmed', r.content,
msg="Test string not found in response.content")
self.assertEqual(200, r.status_code,
msg="Did not get 200 code when activating account. "
"Instead got %s" % r.status_code)
ups = UserProfile.objects.filter(pk__in=(3, 4,))
for up in ups:
self.assertTrue(up.email_confirmed)
class LiveUserTest(LiveServerTestCase):
fixtures = ['authtest_data.json']
@classmethod
def setUpClass(cls):
cls.selenium = webdriver.PhantomJS(
executable_path='/usr/local/phantomjs/phantomjs',
service_log_path='/var/log/courtlistener/django.log',
)
super(LiveUserTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(LiveUserTest, cls).tearDownClass()
def test_reset_password_using_the_HTML(self):
"""Can we use the HTML form to send a reset email?
This test checks that the email goes out and that the status code
returned is valid.
"""
self.selenium.get('%s%s' % (self.live_server_url, '/reset-password/'))
email_input = self.selenium.find_element_by_name("email")
email_input.send_keys('pandora@courtlistener.com')
email_input.submit()
#self.selenium.save_screenshot('/home/mlissner/phantom.png')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
self.selenium.current_url,
'%s%s' % (self.live_server_url,
'/reset-password/instructions-sent/')
)
def test_set_password_using_the_HTML(self):
"""Can we reset our password after generating a confirmation link?"""
# Generate a token and use it to visit a generated reset URL
up = UserProfile.objects.get(pk=1)
token = default_token_generator.make_token(up.user)
url = '%s/confirm-password/%s/%s/' % (
self.live_server_url,
up.user.pk,
token,
)
self.selenium.get(url)
#self.selenium.save_screenshot('/home/mlissner/phantom.png')
self.assertIn(
"Enter New Password",
self.selenium.page_source
)
# Next, change the user's password and submit the form.
pwd1 = self.selenium.find_element_by_name('new_password1')
pwd1.send_keys('password')
pwd2 = self.selenium.find_element_by_name('new_password2')
pwd2.send_keys('password')
pwd2.submit()
self.assertEqual(
self.selenium.current_url,
'%s%s' % (self.live_server_url, '/reset-password/complete/')
)
|
shashi792/courtlistener
|
alert/userHandling/tests.py
|
Python
|
agpl-3.0
| 5,129
|
[
"VisIt"
] |
41f16f75528ecba3bd61cf06c874d7119c91ece153794601ed1de18d160c0a83
|
# Licensed under an MIT open source license - see LICENSE
import numpy as np
import astropy.units as u
import networkx as nx
import warnings
import scipy.ndimage as nd
from astropy.nddata import extract_array
import astropy.modeling as mod
from astropy.modeling.models import Gaussian1D, Const1D
import sys
if sys.version_info[0] >= 3:
import _pickle as pickle
else:
import cPickle as pickle
from .length import (init_lengths, main_length, make_final_skeletons,
pre_graph, longest_path, prune_graph)
from .pixel_ident import pix_identify
from .utilities import pad_image, in_ipynb, red_chisq
from .base_conversions import UnitConverter
from .rollinghough import rht
from .width import (radial_profile, gaussian_model, fit_radial_model,
nonparam_width)
class FilamentNDBase(object):
"""
Analysis and properties of a single filament object.
"""
@property
def pixel_coords(self):
return self._pixel_coords
@property
def pixel_extents(self):
return [tuple([coord.min() for coord in self._orig_pixel_coords]),
tuple([coord.max() for coord in self._orig_pixel_coords])]
def position(self, world_coord=False):
'''
Return the centre position of the filament based on the pixel
coordinates.
'''
centres = [np.median(coord) for coord in self._orig_pixel_coords]
if world_coord:
if hasattr(self._converter, '_wcs'):
wcs = self._converter._wcs
# Convert to world coordinates
posn_tuple = centres + [0]
w_centres = wcs.all_pix2world(*posn_tuple)
# Attach units
wu_centres = [val * u.Unit(wcs.wcs.cunit[i]) for i, val
in enumerate(w_centres)]
return wu_centres
else:
warnings.warn("No WCS information given. Returning pixel"
" position.")
return [centre * u.pix for centre in centres]
else:
return [centre * u.pix for centre in centres]
class Filament2D(FilamentNDBase):
"""
Analysis and properties of a 2D filament.
Parameters
----------
pixel_coords : tuple of `~np.ndarray`
Pixel coordinates as a set of arrays (i.e., the output from
`~numpy.where`).
converter : `~fil_finder.base_conversions.UnitConverter`, optional
Unit converter class.
wcs : `~astropy.wcs.WCS`, optional
WCS information for the pixel set.
distance : `~astropy.units.Quantity`, optional
Distance to the region described by the pixel set. Requires for
conversions to physical units.
"""
def __init__(self, pixel_coords, converter=None, wcs=None, distance=None):
super(Filament2D, self).__init__()
self._pixel_coords = pixel_coords
# Create a separate account of the initial skeleton pixels
self._orig_pixel_coords = pixel_coords
if converter is not None:
self._converter = converter
else:
self._converter = UnitConverter(wcs=wcs, distance=distance)
def image_slicer(self, image, out_shape, pad_size=0):
'''
Create a cut-out of a given image to some output shape with optional
padding on the edges. The given image must be on the same pixel grid
as the image used to create the skeleton.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
Image to slice out around the skeleton.
out_shape : tuple
2D output shape.
pad_size : int, optional
Number of pixels to pad.
Returns
-------
out_arr : `~numpy.ndarray` or `~astropy.units.Quantity`
Output array with given shape.
'''
arr_cent = [(out_shape[0] - pad_size * 2 - 1) / 2. +
self.pixel_extents[0][0],
(out_shape[1] - pad_size * 2 - 1) / 2. +
self.pixel_extents[0][1]]
out_arr = extract_array(image, out_shape, arr_cent)
# astropy v4.0 now retains the unit. So only add a unit
# when out_arr isn't a Quantity
if hasattr(image, "unit") and not hasattr(out_arr, 'unit'):
out_arr = out_arr * image.unit
return out_arr
def skeleton(self, pad_size=0, corner_pix=None, out_type='all'):
'''
Create a mask from the pixel coordinates.
Parameters
----------
pad_size : int, optional
Number of pixels to pad along each edge.
corner_pix : tuple of ints, optional
The position of the left-bottom corner of the pixels in the
skeleton. Used for offsetting the location of the pixels.
out_type : {"all", "longpath"}, optional
Return the entire skeleton or just the longest path. Default is to
return the whole skeleton.
Returns
-------
mask : `~numpy.ndarray`
Boolean mask containing the skeleton pixels.
'''
pad_size = int(pad_size)
if pad_size < 0:
raise ValueError("pad_size must be a positive integer.")
if corner_pix is None:
# Place the smallest pixel in the set at the pad size
corner_pix = [pad_size, pad_size]
out_types = ['all', 'longpath']
if out_type not in out_types:
raise ValueError("out_type must be 'all' or 'longpath'.")
y_shape = self.pixel_extents[1][0] - self.pixel_extents[0][0] + \
2 * pad_size + 1
x_shape = self.pixel_extents[1][1] - self.pixel_extents[0][1] + \
2 * pad_size + 1
mask = np.zeros((y_shape, x_shape), dtype=bool)
if out_type == 'all':
pixels = self.pixel_coords
else:
if not hasattr(self, '_longpath_pixel_coords'):
raise AttributeError("longest path is not defined. Run "
"`Filament2D.skeleton_analysis` first.")
pixels = self.longpath_pixel_coords
mask[pixels[0] - self.pixel_extents[0][0] + corner_pix[0],
pixels[1] - self.pixel_extents[0][1] + corner_pix[1]] = True
return mask
def skeleton_analysis(self, image, verbose=False, save_png=False,
save_name=None, prune_criteria='all',
relintens_thresh=0.2, max_prune_iter=10,
branch_thresh=0 * u.pix):
'''
Run the skeleton analysis.
Separates skeleton structures into branches and intersections. Branches
below the pruning criteria are removed. The structure is converted into
a graph object to find the longest path. The pruned skeleton is used in
the subsequent analysis steps.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
Data the filament was extracted from.
verbose : bool, optional
Show intermediate plots.
save_png : bool, optional
Save the plots in verbose mode.
save_name : str, optional
Prefix for the saved plots.
prune_criteria : {'all', 'intensity', 'length'}, optional
Choose the property to base pruning on. 'all' requires that the
branch fails to satisfy the length and relative intensity checks.
relintens_thresh : float, optional
Value between 0 and 1 that sets the relative importance of the
intensity-to-length criteria when pruning. Only used if
`prune_criteria='all'`.
max_prune_iter : int, optional
Maximum number of pruning iterations to apply.
branch_thresh : `~astropy.units.Quantity`, optional
Minimum length for a branch to be eligible to be pruned.
'''
# NOTE:
# All of these functions are essentially the same as those used for
# fil_finder_2D. For now, they all are expecting lists with each
# filament property as an element. Everything is wrapped to be a list
# because of this, but will be removed once fil_finder_2D is removed.
# A lot of this can be streamlined in that process.
if save_png and save_name is None:
raise ValueError("save_name must be given when save_png=True.")
# Must have a pad size of 1 for the morphological operations.
pad_size = 1
self._pad_size = pad_size
branch_thresh = self._converter.to_pixel(branch_thresh)
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size)
skel_mask = self.skeleton(pad_size=pad_size)
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skel_mask.shape:
input_image = self.image_slicer(input_image, skel_mask.shape,
pad_size=pad_size)
# The mask and sliced image better have the same shape!
if input_image.shape != skel_mask.shape:
raise AssertionError("Sliced image shape does not equal the mask "
"shape. This should never happen! If you see"
" this issue, please report it as a bug!")
iter = 0
while True:
skel_mask = self.skeleton(pad_size=pad_size)
interpts, hubs, ends, filbranches, labeled_mask = \
pix_identify([skel_mask], 1)
branch_properties = init_lengths(labeled_mask, filbranches,
[[(0, 0), (0, 0)]],
input_image)
edge_list, nodes, loop_edges = \
pre_graph(labeled_mask, branch_properties, interpts, ends)
max_path, extremum, G = \
longest_path(edge_list, nodes,
verbose=False,
skeleton_arrays=labeled_mask)
# Skip pruning if skeleton has only one branch
if len(G[0].nodes()) > 1:
updated_lists = \
prune_graph(G, nodes, edge_list, max_path, labeled_mask,
branch_properties, loop_edges,
prune_criteria=prune_criteria,
length_thresh=branch_thresh.value,
relintens_thresh=relintens_thresh,
max_iter=1)
labeled_mask, edge_list, nodes, branch_properties = \
updated_lists
final_fil_arrays =\
make_final_skeletons(labeled_mask, interpts,
verbose=False)
# Update the skeleton pixels
good_pix = np.where(final_fil_arrays[0])
self._pixel_coords = \
(good_pix[0] + self.pixel_extents[0][0] - pad_size,
good_pix[1] + self.pixel_extents[0][1] - pad_size)
if iter == 0:
prev_G = G[0]
iter += 1
if iter == max_prune_iter:
break
else:
continue
# Isomorphic comparison is failing for networkx 2.1
# I don't understand the error, so we'll instead require
# that the nodes be the same. This should be safe as
# pruning can only remove nodes.
# edge_match = iso.numerical_edge_match('weight', 1)
# if nx.is_isomorphic(prev_G, G[0],
# edge_match=edge_match):
# the node attribute was removed in 2.4.
if hasattr(G, 'node'):
if prev_G.node == G[0].node:
break
if hasattr(G, 'nodes'):
if prev_G.nodes == G[0].nodes:
break
prev_G = G[0]
iter += 1
if iter >= max_prune_iter:
warnings.warn("Graph pruning reached max iterations.")
break
self._graph = G[0]
# Run final analyses for plotting, etc.
max_path, extremum, G = \
longest_path(edge_list, nodes,
verbose=verbose,
save_png=save_png,
save_name="{0}_graphstruct.png".format(save_name),
skeleton_arrays=labeled_mask)
length_output = main_length(max_path, edge_list, labeled_mask,
interpts,
branch_properties["length"],
1.,
verbose=verbose, save_png=save_png,
save_name="{0}_longestpath.png".format(save_name))
lengths, long_path_array = length_output
good_long_pix = np.where(long_path_array[0])
self._longpath_pixel_coords = \
(good_long_pix[0] + self.pixel_extents[0][0] - pad_size,
good_long_pix[1] + self.pixel_extents[0][1] - pad_size)
self._length = lengths[0] * u.pix
final_fil_arrays =\
make_final_skeletons(labeled_mask, interpts,
verbose=verbose, save_png=save_png,
save_name="{0}_finalskeleton.png".format(save_name))
# Track the final intersection and end points
interpts, hubs, ends = \
pix_identify([final_fil_arrays[0].copy()], 1)[:3]
# Adjust intersection and end points to be in the original array
# positions
corr_inters = []
for inter in interpts[0]:
per_inter = []
for ints in inter:
per_inter.append((ints[0] + self.pixel_extents[0][0] - pad_size,
ints[1] + self.pixel_extents[0][1] - pad_size))
corr_inters.append(per_inter)
self._interpts = corr_inters
corr_ends = []
for end in ends[0]:
corr_ends.append((end[0] + self.pixel_extents[0][0] - pad_size,
end[1] + self.pixel_extents[0][1] - pad_size))
self._endpts = corr_ends
# Update the skeleton pixels
good_pix = np.where(final_fil_arrays[0])
self._pixel_coords = \
(good_pix[0] + self.pixel_extents[0][0] - pad_size,
good_pix[1] + self.pixel_extents[0][1] - pad_size)
self._branch_properties = \
{'length': branch_properties['length'][0] * u.pix,
'intensity': np.array(branch_properties['intensity'][0]),
'number': branch_properties['number'][0],
'pixels': branch_properties['pixels'][0]}
@property
def branch_properties(self):
'''
Dictionary with branch lengths, average intensity, and pixels.
'''
return self._branch_properties
def branch_pts(self, img_coords=False):
'''
Pixels within each skeleton branch.
Parameters
----------
img_coords : bool
Return the branch pts in coordinates of the original image.
'''
if not img_coords:
return self.branch_properties['pixels']
# Transform from per-filament to image coords
img_branch_pts = []
for bpts in self.branch_properties['pixels']:
bpts_copy = bpts.copy()
bpts_copy[:, 0] = bpts[:, 0] + self.pixel_extents[0][0] - self._pad_size
bpts_copy[:, 1] = bpts[:, 1] + self.pixel_extents[0][1] - self._pad_size
img_branch_pts.append(bpts_copy)
return img_branch_pts
@property
def intersec_pts(self):
'''
Skeleton pixels associated intersections.
'''
return self._interpts
@property
def end_pts(self):
'''
Skeleton pixels associated branch end.
'''
return self._endpts
def length(self, unit=u.pixel):
'''
The longest path length of the skeleton
Parameters
----------
unit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
'''
return self._converter.from_pixel(self._length, unit)
@property
def longpath_pixel_coords(self):
'''
Pixel coordinates of the longest path.
'''
return self._longpath_pixel_coords
@property
def graph(self):
'''
The networkx graph for the filament.
'''
return self._graph
def plot_graph(self, save_name=None, layout_func=nx.spring_layout):
'''
Plot the graph structure.
Parameters
----------
save_name : str, optional
Name of saved plot. A plot is only saved if a name is given.
layout_func : networkx layout function, optional
Layout function from networkx. Defaults to `spring_layout`.
'''
import matplotlib.pyplot as plt
G = self.graph
elist = [(u, v) for (u, v, d) in G.edges(data=True)]
posns = layout_func(G)
nx.draw_networkx_nodes(G, posns, node_size=200)
nx.draw_networkx_edges(G, posns, edgelist=elist, width=2)
nx.draw_networkx_labels(G, posns, font_size=10,
font_family='sans-serif')
plt.axis('off')
if save_name is not None:
# Save the plot
plt.savefig(save_name)
plt.close()
else:
plt.show()
# Add in the ipynb checker
def rht_analysis(self, radius=10 * u.pix, ntheta=180,
background_percentile=25):
'''
Use the RHT to find the filament orientation and dispersion of the
longest path.
Parameters
----------
radius : `~astropy.units.Quantity`, optional
Radius of the region to compute the orientation within. Converted
to pixel units and rounded to the nearest integer.
ntheta : int, optional
Number of angles to sample at. Default is 180.
background_percentile : float, optional
Float between 0 and 100 that sets a background level for the RHT
distribution before calculating orientation and curvature.
'''
if not hasattr(radius, 'unit'):
warnings.warn("Radius has no given units. Assuming pixel units.")
radius *= u.pix
radius = int(round(self._converter.to_pixel(radius).value))
longpath_arr = self.skeleton(out_type='longpath')
longpath_arr = np.fliplr(longpath_arr)
theta, R, quant = rht(longpath_arr, radius, ntheta,
background_percentile)
twofive, mean, sevenfive = quant
self._orientation = mean * u.rad
if sevenfive > twofive:
self._curvature = np.abs(sevenfive - twofive) * u.rad
else:
self._curvature = (np.abs(sevenfive - twofive) + np.pi) * u.rad
self._orientation_hist = [theta, R]
self._orientation_quantiles = [twofive, sevenfive]
@property
def orientation_hist(self):
'''
Distribution of orientations from the RHT along the longest path.
Contains the angles of the distribution bins and the values in those
bins.
'''
return self._orientation_hist
@property
def orientation(self):
'''
Mean orientation of the filament along the longest path.
'''
return self._orientation
@property
def curvature(self):
'''
Interquartile range of the RHT orientation distribution along the
longest path.
'''
return self._curvature
def plot_rht_distrib(self, save_name=None):
'''
Plot the RHT distribution from `Filament2D.rht_analysis`.
Parameters
----------
save_name : str, optional
Name of saved plot. A plot is only saved if a name is given.
'''
theta = self.orientation_hist[0]
R = self.orientation_hist[1]
import matplotlib.pyplot as plt
median = self.orientation.value
twofive, sevenfive = self._orientation_quantiles
ax1 = plt.subplot(121, polar=True)
ax1.plot(2 * theta, R / R.max(), "kD")
ax1.fill_between(2 * theta, 0,
R[:, 0] / R.max(),
facecolor="blue",
interpolate=True, alpha=0.5)
ax1.set_rmax(1.0)
ax1.plot([2 * median] * 2, np.linspace(0.0, 1.0, 2), "g")
ax1.plot([2 * twofive] * 2, np.linspace(0.0, 1.0, 2),
"b--")
ax1.plot([2 * sevenfive] * 2, np.linspace(0.0, 1.0, 2),
"b--")
plt.subplot(122)
plt.imshow(self.skeleton(out_type='longpath'),
cmap="binary", origin="lower")
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
def rht_branch_analysis(self, radius=10 * u.pix, ntheta=180,
background_percentile=25,
min_branch_length=3 * u.pix):
'''
Use the RHT to find the filament orientation and dispersion of each
branch in the filament.
Parameters
----------
radius : `~astropy.units.Quantity`, optional
Radius of the region to compute the orientation within. Converted
to pixel units and rounded to the nearest integer.
ntheta : int, optional
Number of angles to sample at. Default is 180.
background_percentile : float, optional
Float between 0 and 100 that sets a background level for the RHT
distribution before calculating orientation and curvature.
min_branch_length : `~astropy.units.Quantity`, optional
Minimum length of a branch to run the RHT on. Branches that are
too short will cause spikes along the axis angles or 45 deg. off.
'''
# Convert length cut to pixel units
if not hasattr(radius, 'unit'):
warnings.warn("Radius has no given units. Assuming pixel units.")
radius *= u.pix
if not hasattr(min_branch_length, 'unit'):
warnings.warn("min_branch_length has no given units. Assuming "
"pixel units.")
min_branch_length *= u.pix
radius = int(round(self._converter.to_pixel(radius).value))
min_branch_length = self._converter.to_pixel(min_branch_length).value
means = []
iqrs = []
# Make padded arrays from individual branches
for i, (pix, length) in enumerate(zip(self.branch_pts(img_coords=False),
self.branch_properties['length'])):
if length.value < min_branch_length:
means.append(np.NaN)
iqrs.append(np.NaN)
continue
# Setup size of array
ymax = pix[:, 0].max()
ymin = pix[:, 0].min()
xmax = pix[:, 1].max()
xmin = pix[:, 1].min()
shape = (ymax - ymin + 1 + 2 * radius,
xmax - xmin + 1 + 2 * radius)
branch_array = np.zeros(shape, dtype=bool)
branch_array[pix[:, 0] - ymin + radius,
pix[:, 1] - xmin + radius] = True
branch_array = np.fliplr(branch_array)
theta, R, quant = rht(branch_array, radius, ntheta,
background_percentile)
twofive, mean, sevenfive = quant
means.append(mean)
if sevenfive > twofive:
iqrs.append(np.abs(sevenfive - twofive))
else:
iqrs.append(np.abs(sevenfive - twofive) + np.pi)
self._orientation_branches = np.array(means) * u.rad
self._curvature_branches = np.array(iqrs) * u.rad
@property
def orientation_branches(self):
'''
Orientations along each branch in the filament.
'''
return self._orientation_branches
@property
def curvature_branches(self):
'''
Curvature along each branch in the filament.
'''
return self._curvature_branches
def width_analysis(self, image, all_skeleton_array=None,
max_dist=10 * u.pix,
pad_to_distance=0 * u.pix,
fit_model='gaussian_bkg',
fitter=None,
try_nonparam=True,
use_longest_path=False,
add_width_to_length=False,
deconvolve_width=True,
beamwidth=None,
fwhm_function=None,
chisq_max=10.,
**kwargs):
'''
Create an average radial profile for the filament and fit a given
model.
Parameters
----------
image : `~astropy.unit.Quantity` or `~numpy.ndarray`
The image from which the filament was extracted.
all_skeleton_array : np.ndarray
An array with the skeletons of other filaments. This is used to
avoid double-counting pixels in the radial profiles in nearby
filaments.
max_dist : `~astropy.units.Quantity`, optional
Largest radius around the skeleton to create the profile from. This
can be given in physical, angular, or physical units.
pad_to_distance : `~astropy.units.Quantity`, optional
Force all pixels within this distance to be kept, even if a pixel
is closer to another skeleton, as given in `all_skeleton_array`.
fit_model : str or `~astropy.modeling.Fittable1DModel`, optional
The model to fit to the profile. Built-in models include
'gaussian_bkg' for a Gaussian with a constant background,
'gaussian_nobkg' for just a Gaussian, 'nonparam' for the
non-parametric estimator. Defaults to 'gaussian_bkg'.
fitter : `~astropy.modeling.fitting.Fitter`, optional
One of the astropy fitting classes. Defaults to a
Levenberg-Marquardt fitter.
try_nonparam : bool, optional
If the chosen model fit fails, fall back to a non-parametric
estimate.
use_longest_path : bool, optional
Only fit profile to the longest path skeleton. Disabled by
default.
add_width_to_length : bool, optional
Add the FWHM to the filament length. This accounts for the
expected shortening in the medial axis transform. Enabled by
default.
deconvolve_width : bool, optional
Deconvolve the beam width from the FWHM. Enabled by default.
beamwidth : `~astropy.units.Quantity`, optional
The beam width to deconvolve the FWHM from. Required if
`deconvolve_width = True`.
fwhm_function : function, optional
Convert the width parameter to the FWHM. Must take the fit model
as an argument and return the FWHM and its uncertainty. If no
function is given, the Gaussian FWHM is used.
chisq_max : float, optional
Enable the fail flag if the reduced chi-squared value is above
this limit.
kwargs : Passed to `~fil_finder.width.radial_profile`.
'''
# Convert quantities to pixel units.
max_dist = self._converter.to_pixel(max_dist).value
pad_to_distance = self._converter.to_pixel(pad_to_distance).value
if deconvolve_width and beamwidth is None:
raise ValueError("beamwidth must be given when deconvolve_width is"
" enabled.")
if beamwidth is not None:
beamwidth = self._converter.to_pixel(beamwidth)
# Use the max dist as the pad size
pad_size = int(np.ceil(max_dist))
# if given a master skeleton array, require it to be the same shape as
# the image
if all_skeleton_array is not None:
if all_skeleton_array.shape != image.shape:
raise ValueError("The shape of all_skeleton_array must match"
" the given image.")
if use_longest_path:
skel_array = self.skeleton(pad_size=pad_size, out_type='longpath')
else:
skel_array = self.skeleton(pad_size=pad_size, out_type='all')
out_shape = skel_array.shape
input_image = self.image_slicer(image, out_shape, pad_size=pad_size)
if all_skeleton_array is not None:
input_all_skeleton_array = \
self.image_slicer(all_skeleton_array, out_shape,
pad_size=pad_size)
else:
input_all_skeleton_array = None
# Create distance arrays to build profile from
dist_skel_arr = nd.distance_transform_edt(np.logical_not(skel_array))
# And create a distance array from the full skeleton array if given
if input_all_skeleton_array is not None:
dist_skel_all = nd.distance_transform_edt(np.logical_not(input_all_skeleton_array))
else:
dist_skel_all = None
# Need the unbinned data for the non-parametric fit.
out = radial_profile(input_image, dist_skel_all,
dist_skel_arr,
[(0, 0), (0, 0)],
max_distance=max_dist,
pad_to_distance=pad_to_distance,
**kwargs)
if out is None:
raise ValueError("Building radial profile failed. Check the input"
" image for NaNs.")
else:
dist, radprof, weights, unbin_dist, unbin_radprof = out
# Attach units
xunit = u.pix
if hasattr(image, 'unit'):
yunit = image.unit
else:
yunit = u.dimensionless_unscaled
self._yunit = yunit
radprof = radprof * yunit
dist = dist * xunit
self._radprofile = [dist, radprof]
self._unbin_radprofile = [unbin_dist * xunit,
unbin_radprof * yunit]
# Make sure the given model is valid
if not isinstance(fit_model, mod.Model):
skip_fitting = False
self._radprof_type = fit_model
# Check the default types
if fit_model == "gaussian_bkg":
fit_model = gaussian_model(dist, radprof, with_bkg=True)
elif fit_model == "gaussian_nobkg":
fit_model = gaussian_model(dist, radprof, with_bkg=False)
elif fit_model == "nonparam":
skip_fitting = True
else:
raise ValueError("fit_model must be an "
"astropy.modeling.Fittable1DModel or "
"one of the default models: 'gaussian_bkg',"
" 'gaussian_nobkg', or 'nonparam'.")
else:
# Record the fit type
self._radprof_type = fit_model.name
if not skip_fitting:
fitted_model, fitter = fit_radial_model(dist, radprof, fit_model,
weights=weights)
# Only keep the non-fixed parameters. The fixed parameters won't
# appear in the covariance matrix.
params = []
names = []
for name in fitted_model.param_names:
# Check if it is fixed:
if fitted_model.fixed[name]:
continue
param = getattr(fitted_model, name)
if param.quantity is not None:
params.append(param.quantity)
else:
# Assign a dimensionless unit
params.append(param.value * u.dimensionless_unscaled)
names.append(name)
self._radprof_params = params
npar = len(self.radprof_params)
self._radprof_parnames = names
self._radprof_model = fitted_model
self._radprof_fitter = fitter
# Fail checks
fail_flag = False
param_cov = fitter.fit_info.get('param_cov')
if param_cov is not None:
fit_uncert = list(np.sqrt(np.diag(param_cov)))
else:
fit_uncert = [np.NaN] * npar
fail_flag = True
if len(fit_uncert) != len(params):
raise ValueError("The number of parameters does not match the "
"number from the covariance matrix. Check for"
" fixed parameters.")
# Add units to errors
for i, par in enumerate(params):
fit_uncert[i] = fit_uncert[i] * par.unit
self._radprof_errors = fit_uncert
# Check if units should be kept
if fitted_model._supports_unit_fitting:
modvals = fitted_model(dist)
radprof_vals = radprof
else:
modvals = fitted_model(dist.value)
radprof_vals = radprof.value
chisq = red_chisq(radprof_vals, modvals, npar, 1)
if chisq > chisq_max:
fail_flag = True
if (skip_fitting or fail_flag) and try_nonparam:
fit, fit_error, fail_flag = \
nonparam_width(dist.value, radprof.value,
unbin_dist, unbin_radprof,
None, 5, 99)
self._radprof_type = 'nonparam'
# Make the equivalent Gaussian model w/ a background
self._radprof_model = Gaussian1D() + Const1D()
if self._radprof_model._supports_unit_fitting:
add_unit_if_none = lambda x, unit: x * unit if not hasattr(x, 'unit') else x
self._radprof_model.amplitude_0 = add_unit_if_none(fit[0], yunit)
self._radprof_model.mean_0 = 0.0 * xunit
# At some point this parameter name changed? Or something?
# Anyways, you can set whatever attribute name you want and it
# doesn't complain. So catch those cases manually.
if hasattr(self._radprof_model, 'sigma_0'):
self._radprof_model.sigma_0 = add_unit_if_none(fit[1], xunit)
if hasattr(self._radprof_model, 'stddev_0'):
self._radprof_model.stddev_0 = add_unit_if_none(fit[1], xunit)
else:
raise AttributeError("Cannot find stddev parameter.")
self._radprof_model.amplitude_1 = add_unit_if_none(fit[0], yunit)
else:
self._radprof_model.amplitude_0 = fit[0]
self._radprof_model.mean_0 = 0.0
self._radprof_model.sigma_0 = fit[1]
if hasattr(self._radprof_model, 'sigma_0'):
self._radprof_model.sigma_0 = fit[1]
if hasattr(self._radprof_model, 'stddev_0'):
self._radprof_model.stddev_0 = fit[1]
else:
raise AttributeError("Cannot find stddev parameter.")
self._radprof_model.amplitude_1 = fit[2]
# Slice out the FWHM and add units
params = [fit[0] * yunit, fit[1] * xunit, fit[2] * yunit]
errs = [fit_error[0] * yunit, fit_error[1] * xunit,
fit_error[2] * yunit]
self._radprof_params = params
self._radprof_errors = errs
self._radprof_parnames = ['amplitude_0', 'stddev_0', 'amplitude_1']
if fwhm_function is not None:
fwhm = fwhm_function(fitted_model)
else:
# Default to Gaussian FWHM
for idx, name in enumerate(self.radprof_parnames):
if "stddev" in name:
found_width = True
break
if found_width:
fwhm = self.radprof_params[idx].value * np.sqrt(8 * np.log(2)) * xunit
fwhm_err = self.radprof_errors[idx].value * np.sqrt(8 * np.log(2)) * xunit
else:
raise ValueError("Could not automatically identify which "
"parameter in the model corresponds to the "
"width. Please pass a function to "
"'fwhm_function' to identify the width "
"parameter.")
if deconvolve_width:
fwhm_deconv_sq = fwhm**2 - beamwidth**2
if fwhm_deconv_sq > 0:
fwhm_deconv = np.sqrt(fwhm_deconv_sq)
fwhm_deconv_err = fwhm * fwhm_err / fwhm_deconv
else:
fwhm_deconv = np.NaN * fwhm.unit
fwhm_deconv_err = np.NaN * fwhm.unit
warnings.warn("Width could not be deconvolved from the beam "
"width.")
else:
fwhm_deconv = fwhm
fwhm_deconv_err = fwhm_err
self._fwhm = fwhm_deconv
self._fwhm_err = fwhm_deconv_err
# Final width check -- make sure length is longer than the width.
# If it is, add the width onto the length since the adaptive
# thresholding shortens each edge by the about the same.
if self.length() < self._fwhm:
fail_flag = True
# Add the width onto the length if enabled
if add_width_to_length:
if fail_flag:
warnings.warn("Ignoring adding the width to the length because"
" the fail flag was raised for the fit.")
else:
self._length += self._fwhm
self._radprof_failflag = fail_flag
@property
def radprof_fit_fail_flag(self):
'''
Flag to catch poor fits.
'''
return self._radprof_failflag
@property
def radprof_type(self):
'''
The model type used to fit the radial profile.
'''
return self._radprof_type
@property
def radprofile(self):
'''
The binned radial profile created in `~FilFinder2D.width_analysis`.
This contains the distances and the profile value in the distance bin.
'''
return self._radprofile
@property
def radprof_params(self):
'''
Fit parameters from `~FilFinder2D.width_analysis`.
'''
return self._radprof_params
@property
def radprof_errors(self):
'''
Fit uncertainties from `~FilFinder2D.width_analysis`.
'''
return self._radprof_errors
def radprof_fwhm(self, unit=u.pixel):
'''
The FWHM of the fitted radial profile and its uncertainty.
Parameters
----------
unit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
'''
return self._converter.from_pixel(self._fwhm, unit), \
self._converter.from_pixel(self._fwhm_err, unit)
@property
def radprof_parnames(self):
'''
Parameter names from `~FilFinder2D.radprof_model`.
'''
return self._radprof_parnames
def radprof_fit_table(self, unit=u.pix):
'''
Return an `~astropy.table.Table` with the fit parameters and
uncertainties.
Parameters
----------
unit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
'''
from astropy.table import Table, Column
tab = Table()
for name, val, err in zip(self.radprof_parnames, self.radprof_params,
self.radprof_errors):
# Try converting to the given unit. Assume failures are not length
# units.
try:
conv_val = self._converter.from_pixel(val, unit)
conv_err = self._converter.from_pixel(err, unit)
except u.UnitsError:
conv_val = val
conv_err = err
tab[name] = Column(conv_val.reshape((1,)))
tab[name + "_err"] = Column(conv_err.reshape((1,)))
# Add on the FWHM
tab['fwhm'] = Column(self.radprof_fwhm(unit)[0].reshape((1,)))
tab['fwhm_err'] = Column(self.radprof_fwhm(unit)[1].reshape((1,)))
# Add on whether the fit was "successful"
tab['fail_flag'] = Column([self.radprof_fit_fail_flag])
# Add the type of fit based on the model type
tab['model_type'] = Column([self.radprof_type])
return tab
@property
def radprof_model(self):
'''
The fitted radial profile model.
'''
return self._radprof_model
def plot_radial_profile(self, save_name=None, xunit=u.pix,
ax=None):
'''
Plot the radial profile of the filament and the fitted model.
Parameters
----------
xunit : `~astropy.units.Unit`, optional
Pixel, angular, or physical unit to convert to.
ax : `~matplotlib.axes`, optional
Use an existing set of axes to plot the profile.
'''
dist, radprof = self.radprofile
model = self.radprof_model
conv_dist = self._converter.from_pixel(dist, xunit)
import matplotlib.pyplot as plt
if ax is None:
ax = plt.subplot(111)
ax.plot(conv_dist, radprof, "kD")
points = np.linspace(np.min(dist),
np.max(dist), 5 * len(dist))
# Check if units should be kept when evaluating the model
if not model._supports_unit_fitting:
points = points.value
conv_points = np.linspace(np.min(conv_dist),
np.max(conv_dist), 5 * len(conv_dist))
ax.plot(conv_points, model(points), "r")
ax.set_xlabel(r'Radial Distance ({})'.format(xunit))
ax.set_ylabel(r'Intensity ({})'.format(self._yunit))
ax.grid(True)
plt.tight_layout()
if save_name is not None:
plt.savefig(save_name)
plt.show()
if in_ipynb():
plt.clf()
def total_intensity(self, bkg_subtract=False, bkg_mod_index=2):
'''
Return the sum of all pixels within the FWHM of the filament.
.. warning::
`fil_finder_2D` multiplied the total intensity by the angular size
of a pixel. This function is just the sum of pixel values. Unit
conversions can be applied on the output if needed.
Parameters
----------
bkg_subtract : bool, optional
Subtract off the fitted background level.
bkg_mod_index : int, optional
Indicate which element in `Filament2D.radprof_params` is the
background level. Defaults to 2 for the Gaussian with background
model.
Returns
-------
total_intensity : `~astropy.units.Quantity`
The total intensity for the filament.
'''
within_fwhm = self._unbin_radprofile[0] <= \
0.5 * self.radprof_fwhm()[0]
total_intensity = np.sum(self._unbin_radprofile[1][within_fwhm])
if bkg_subtract:
bkg = self.radprof_params[bkg_mod_index]
if not self.radprof_model._supports_unit_fitting:
bkg = bkg.value * total_intensity.unit
total_intensity -= bkg * within_fwhm.sum()
return total_intensity
def model_image(self, max_radius=20 * u.pix, bkg_subtract=True,
bkg_mod_index=2):
'''
Return a model image from the radial profile fit.
Parameters
----------
max_radius : `~astropy.units.Quantity`, optional
Set the radius to compute the model to. The outputted array
will be padded by the number of pixels the max_radius corresponds
to.
bkg_subtract : bool, optional
Subtract off the fitted background level.
bkg_mod_index : int, optional
Indicate which element in `Filament2D.radprof_params` is the
background level. Defaults to 2 for the Gaussian with background
model.
Returns
-------
model_array : `~astropy.units.Quantity`
A 2D array computed using the radial profile model.
'''
max_radius = self._converter.to_pixel(max_radius).value
pad_size = int(max_radius)
skel_arr = self.skeleton(pad_size)
dists = nd.distance_transform_edt(~skel_arr)
if self.radprof_model._supports_unit_fitting:
dists = dists * u.pix
if not bkg_subtract:
return self.radprof_model(dists)
else:
bkg = self.radprof_params[bkg_mod_index]
if not self.radprof_model._supports_unit_fitting:
bkg = bkg.value
return self.radprof_model(dists) - bkg
def median_brightness(self, image):
'''
Return the median brightness along the skeleton of the filament.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
The image from which the filament was extracted.
Returns
-------
median_brightness : float or `~astropy.units.Quantity`
Median brightness along the skeleton.
'''
pad_size = 1
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size)
skels = self.skeleton(pad_size=pad_size)
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skels.shape:
input_image = self.image_slicer(input_image, skels.shape,
pad_size=pad_size)
assert input_image.shape == skels.shape
return np.nanmedian(input_image[skels])
def ridge_profile(self, image):
'''
Return the image values along the longest path extent of a filament, or
from radial slices along the longest path.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
The image from which the filament was extracted.
'''
pad_size = 1
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size) * \
u.dimensionless_unscaled
skels = self.skeleton(pad_size=pad_size, out_type='longpath')
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skels.shape:
input_image = self.image_slicer(input_image, skels.shape,
pad_size=pad_size)
# These should have the same shape now.
assert input_image.shape == skels.shape
from .width_profiles.profile_line_width import walk_through_skeleton
order_pts = walk_through_skeleton(skels)
if hasattr(image, 'unit'):
unit = image.unit
else:
unit = u.dimensionless_unscaled
input_image = input_image * unit
values = []
for pt in order_pts:
values.append(input_image[pt[0], pt[1]].value)
return values * unit
def profile_analysis(self, image, max_dist=20 * u.pix,
num_avg=3, xunit=u.pix):
'''
Create profiles of radial slices along the longest path skeleton.
Profiles created from `~fil_finder.width_profiles.filament_profile`.
.. note::
Does not include fitting to the radial profiles. Limited fitting
of Gaussian profiles is provided in
`~fil_finder.width_profiles.filament_profile`. See a dedicated
package like `radfil <https://github.com/catherinezucker/radfil>`_
for modeling profiles.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
The image from which the filament was extracted.
max_dist : astropy Quantity, optional
The angular or physical (when distance is given) extent to create
the profile away from the centre skeleton pixel. The entire
profile will be twice this value (for each side of the profile).
num_avg : int, optional
Number of points before and after a pixel that is used when
computing the normal vector. Using at least three points is
recommended due to small pixel instabilities in the skeletons.
Returns
-------
dists : `~astropy.units.Quantity`
Distances in the radial profiles from the skeleton. Units set by
`xunit`.
profiles : `~astropy.units.Quantity`
Radial image profiles.
'''
from .width_profiles import filament_profile
max_dist = self._converter.to_pixel(max_dist)
pad_size = int(max_dist.value)
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size)
if hasattr(image, 'unit'):
input_image = input_image * image.unit
else:
input_image = input_image * u.dimensionless_unscaled
skels = self.skeleton(pad_size=pad_size, out_type='longpath')
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skels.shape:
input_image = self.image_slicer(input_image, skels.shape,
pad_size=pad_size)
# Check if angular conversions are defined. If not, stay in pixel units
if hasattr(self._converter, '_ang_size'):
pixscale = self._converter.to_angular(1 * u.pix)
ang_conv = True
else:
pixscale = 1.0 * u.deg
ang_conv = False
dists, profiles = filament_profile(skels, input_image, pixscale,
max_dist=max_dist,
distance=None,
fit_profiles=False,
bright_unit=input_image.unit)
# First put the distances into pixel units
if ang_conv:
dists = [self._converter.to_pixel(dist) for dist in dists]
else:
# Already in pixel units.
dists = [dist.value * u.pix for dist in dists]
# Convert the distance units
dists = [self._converter.from_pixel(dist, xunit) for dist in dists]
return dists, profiles
def radprof_table(self, xunit=u.pix):
'''
Return the radial profile as a table.
Parameters
----------
xunit : `~astropy.units.Unit`, optional
Spatial unit to convert radial profile distances.
Returns
-------
tab : `~astropy.table.Table`
Table with the radial profile distance and values.
'''
from astropy.table import Column, Table
dists = Column(self._converter.from_pixel(self._radprofile[0], xunit))
vals = Column(self._radprofile[1])
tab = Table()
tab['distance'] = dists
tab['values'] = vals
return tab
def branch_table(self, include_rht=False):
'''
Save the branch properties of the filament.
Parameters
----------
include_rht : bool, optional
If `branches=True` is used in `Filament2D.exec_rht`, the branch
orientation and curvature will be added to the table.
Returns
-------
tab : `~astropy.table.Table`
Table with the branch properties.
'''
from astropy.table import Table, Column
branch_data = self.branch_properties.copy()
del branch_data['pixels']
del branch_data['number']
if include_rht:
branch_data['orientation'] = self.orientation_branches
branch_data['curvature'] = self.curvature_branches
tab = Table([Column(branch_data[key]) for key in branch_data],
names=branch_data.keys())
return tab
def save_fits(self, savename, image, pad_size=20 * u.pix, header=None,
model_kwargs={},
**kwargs):
'''
Save a stamp of the image centered on the filament, the skeleton,
the longest path skeleton, and the model.
Parameters
----------
image : `~numpy.ndarray` or `~astropy.units.Quantity`
The image from which the filament was extracted.
pad_size : `~astropy.units.Quantity`, optional
Size to pad the saved arrays by.
header : `~astropy.io.fits.Header`, optional
Provide a FITS header to save to. If `~Filament2D` was
given WCS information, this will be used if no header is given.
model_kwargs : dict, optional
Passed to `~Filament2D.model_image`.
kwargs : Passed to `~astropy.io.fits.PrimaryHDU.writeto`.
'''
pad_size = int(self._converter.to_pixel(pad_size).value)
# Do we need to pad the image before slicing?
input_image = pad_image(image, self.pixel_extents, pad_size)
skels = self.skeleton(pad_size=pad_size, out_type='all')
skels_lp = self.skeleton(pad_size=pad_size, out_type='longpath')
# If the padded image matches the mask size, don't need additional
# slicing
if input_image.shape != skels.shape:
input_image = self.image_slicer(input_image, skels.shape,
pad_size=pad_size)
model = self.model_image(max_radius=pad_size * u.pix,
**model_kwargs)
if hasattr(model, 'unit'):
model = model.value
from astropy.io import fits
import time
if header is None:
if hasattr(self._converter, "_wcs"):
header = self._converter._wcs.to_header()
else:
header = fits.Header()
# Strip off units if the image is a Quantity
if hasattr(input_image, 'unit'):
input_image = input_image.value.copy()
hdu = fits.PrimaryHDU(input_image, header)
skel_hdr = header.copy()
skel_hdr['BUNIT'] = ("", "bool")
skel_hdr['COMMENT'] = "Skeleton created by fil_finder on " + \
time.strftime("%c")
skel_hdu = fits.ImageHDU(skels.astype(int), skel_hdr)
skel_lp_hdu = fits.ImageHDU(skels_lp.astype(int), skel_hdr)
model_hdu = fits.ImageHDU(model, header)
hdulist = fits.HDUList([hdu, skel_hdu, skel_lp_hdu, model_hdu])
hdulist.writeto(savename, **kwargs)
def to_pickle(self, savename):
'''
Save a Filament2D class as a pickle file.
Parameters
----------
savename : str
Name of the pickle file.
'''
with open(savename, 'wb') as output:
pickle.dump(self, output, -1)
@staticmethod
def from_pickle(filename):
'''
Load a Filament2D from a pickle file.
Parameters
----------
filename : str
Name of the pickle file.
'''
with open(filename, 'rb') as input:
self = pickle.load(input)
return self
class Filament3D(FilamentNDBase):
"""docstring for Filament3D"""
def __init__(self, arg):
super(Filament3D, self).__init__()
self.arg = arg
|
e-koch/FilFinder
|
fil_finder/filament.py
|
Python
|
mit
| 56,431
|
[
"Gaussian"
] |
32c2ba09624efb14b0d04ba85d4030f42f1bce65e6ba9e1fc20bf5d17d10894c
|
########################################################################
# $HeadURL $
# File: RequestTask.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/13 12:42:45
########################################################################
""" :mod: RequestTask
=================
.. module: RequestTask
:synopsis: request processing task
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
request processing task to be used inside ProcessTask created in RequesteExecutingAgent
"""
__RCSID__ = "$Id $"
# #
# @file RequestTask.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/13 12:42:54
# @brief Definition of RequestTask class.
# # imports
import os, time
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR, gMonitor, gConfig
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.Security import CS
########################################################################
class RequestTask( object ):
"""
.. class:: RequestTask
request's processing task
"""
def __init__( self, requestJSON, handlersDict, csPath, agentName, standalone = False ):
"""c'tor
:param self: self reference
:param str requestJSON: request serialized to JSON
:param dict opHandlers: operation handlers
"""
self.request = Request( requestJSON )
# # csPath
self.csPath = csPath
# # agent name
self.agentName = agentName
# # standalone flag
self.standalone = standalone
# # handlers dict
self.handlersDict = handlersDict
# # handlers class def
self.handlers = {}
# # own sublogger
self.log = gLogger.getSubLogger( "pid_%s/%s" % ( os.getpid(), self.request.RequestName ) )
# # get shifters info
self.__managersDict = {}
shifterProxies = self.__setupManagerProxies()
if not shifterProxies["OK"]:
self.log.error( shifterProxies["Message"] )
# # initialize gMonitor
gMonitor.setComponentType( gMonitor.COMPONENT_AGENT )
gMonitor.setComponentName( self.agentName )
gMonitor.initialize()
# # own gMonitor activities
gMonitor.registerActivity( "RequestAtt", "Requests processed",
"RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RequestFail", "Requests failed",
"RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RequestOK", "Requests done",
"RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM )
self.requestClient = ReqClient()
def __setupManagerProxies( self ):
""" setup grid proxy for all defined managers """
oHelper = Operations()
shifters = oHelper.getSections( "Shifter" )
if not shifters["OK"]:
self.log.error( shifters["Message"] )
return shifters
shifters = shifters["Value"]
for shifter in shifters:
shifterDict = oHelper.getOptionsDict( "Shifter/%s" % shifter )
if not shifterDict["OK"]:
self.log.error( shifterDict["Message"] )
continue
userName = shifterDict["Value"].get( "User", "" )
userGroup = shifterDict["Value"].get( "Group", "" )
userDN = CS.getDNForUsername( userName )
if not userDN["OK"]:
self.log.error( userDN["Message"] )
continue
userDN = userDN["Value"][0]
vomsAttr = CS.getVOMSAttributeForGroup( userGroup )
if vomsAttr:
self.log.debug( "getting VOMS [%s] proxy for shifter %s@%s (%s)" % ( vomsAttr, userName,
userGroup, userDN ) )
getProxy = gProxyManager.downloadVOMSProxyToFile( userDN, userGroup,
requiredTimeLeft = 1200,
cacheTime = 4 * 43200 )
else:
self.log.debug( "getting proxy for shifter %s@%s (%s)" % ( userName, userGroup, userDN ) )
getProxy = gProxyManager.downloadProxyToFile( userDN, userGroup,
requiredTimeLeft = 1200,
cacheTime = 4 * 43200 )
if not getProxy["OK"]:
self.log.error( getProxy["Message" ] )
return S_ERROR( "unable to setup shifter proxy for %s: %s" % ( shifter, getProxy["Message"] ) )
chain = getProxy["chain"]
fileName = getProxy["Value" ]
self.log.debug( "got %s: %s %s" % ( shifter, userName, userGroup ) )
self.__managersDict[shifter] = { "ShifterDN" : userDN,
"ShifterName" : userName,
"ShifterGroup" : userGroup,
"Chain" : chain,
"ProxyFile" : fileName }
return S_OK()
def setupProxy( self ):
""" download and dump request owner proxy to file and env
:return: S_OK with name of newly created owner proxy file and shifter name if any
"""
self.__managersDict = {}
shifterProxies = self.__setupManagerProxies()
if not shifterProxies["OK"]:
self.log.error( shifterProxies["Message"] )
ownerDN = self.request.OwnerDN
ownerGroup = self.request.OwnerGroup
isShifter = []
for shifter, creds in self.__managersDict.items():
if creds["ShifterDN"] == ownerDN and creds["ShifterGroup"] == ownerGroup:
isShifter.append( shifter )
if isShifter:
proxyFile = self.__managersDict[isShifter[0]]["ProxyFile"]
os.environ["X509_USER_PROXY"] = proxyFile
return S_OK( { "Shifter": isShifter, "ProxyFile": proxyFile } )
# # if we're here owner is not a shifter at all
ownerProxy = gProxyManager.downloadVOMSProxy( ownerDN, ownerGroup )
if not ownerProxy["OK"] or not ownerProxy["Value"]:
reason = ownerProxy["Message"] if "Message" in ownerProxy else "No valid proxy found in ProxyManager."
return S_ERROR( "Change proxy error for '%s'@'%s': %s" % ( ownerDN, ownerGroup, reason ) )
ownerProxyFile = ownerProxy["Value"].dumpAllToFile()
if not ownerProxyFile["OK"]:
return S_ERROR( ownerProxyFile["Message"] )
ownerProxyFile = ownerProxyFile["Value"]
os.environ["X509_USER_PROXY"] = ownerProxyFile
return S_OK( { "Shifter": isShifter, "ProxyFile": ownerProxyFile } )
@staticmethod
def getPluginName( pluginPath ):
if not pluginPath:
return ''
if "/" in pluginPath:
pluginPath = ".".join( [ chunk for chunk in pluginPath.split( "/" ) if chunk ] )
return pluginPath.split( "." )[-1]
@staticmethod
def loadHandler( pluginPath ):
""" Create an instance of requested plugin class, loading and importing it when needed.
This function could raise ImportError when plugin cannot be find or TypeError when
loaded class object isn't inherited from BaseOperation class.
:param str pluginName: dotted path to plugin, specified as in import statement, i.e.
"DIRAC.CheesShopSystem.private.Cheddar" or alternatively in 'normal' path format
"DIRAC/CheesShopSystem/private/Cheddar"
:return: object instance
This function try to load and instantiate an object from given path. It is assumed that:
- :pluginPath: is pointing to module directory "importable" by python interpreter, i.e.: it's
package's top level directory is in $PYTHONPATH env variable,
- the module should consist a class definition following module name,
- the class itself is inherited from DIRAC.RequestManagementSystem.private.BaseOperation.BaseOperation
If above conditions aren't meet, function is throwing exceptions:
- ImportError when class cannot be imported
- TypeError when class isn't inherited from OperationHandlerBase
"""
if "/" in pluginPath:
pluginPath = ".".join( [ chunk for chunk in pluginPath.split( "/" ) if chunk ] )
pluginName = pluginPath.split( "." )[-1]
if pluginName not in globals():
mod = __import__( pluginPath, globals(), fromlist = [ pluginName ] )
pluginClassObj = getattr( mod, pluginName )
else:
pluginClassObj = globals()[pluginName]
if not issubclass( pluginClassObj, OperationHandlerBase ):
raise TypeError( "operation handler '%s' isn't inherited from OperationHandlerBase class" % pluginName )
for key, status in ( ( "Att", "Attempted" ), ( "OK", "Successful" ) , ( "Fail", "Failed" ) ):
gMonitor.registerActivity( "%s%s" % ( pluginName, key ), "%s operations %s" % ( pluginName, status ),
"RequestExecutingAgent", "Operations/min", gMonitor.OP_SUM )
# # return an instance
return pluginClassObj
def getHandler( self, operation ):
""" return instance of a handler for a given operation type on demand
all created handlers are kept in self.handlers dict for further use
:param Operation operation: Operation instance
"""
if operation.Type not in self.handlersDict:
return S_ERROR( "handler for operation '%s' not set" % operation.Type )
handler = self.handlers.get( operation.Type, None )
if not handler:
try:
handlerCls = self.loadHandler( self.handlersDict[operation.Type] )
self.handlers[operation.Type] = handlerCls( csPath = "%s/OperationHandlers/%s" % ( self.csPath,
operation.Type ) )
handler = self.handlers[ operation.Type ]
except ( ImportError, TypeError ), error:
self.log.exception( "getHandler: %s" % str( error ), lException = error )
return S_ERROR( str( error ) )
# # set operation for this handler
handler.setOperation( operation )
# # and return
return S_OK( handler )
def updateRequest( self ):
""" put back request to the RequestDB """
updateRequest = self.requestClient.putRequest( self.request, useFailoverProxy = False, retryMainServer = 2 )
if not updateRequest["OK"]:
self.log.error( updateRequest["Message"] )
return updateRequest
def __call__( self ):
""" request processing """
self.log.debug( "about to execute request" )
gMonitor.addMark( "RequestAtt", 1 )
# # setup proxy for request owner
setupProxy = self.setupProxy()
if not setupProxy["OK"]:
self.request.Error = setupProxy["Message"]
if 'has no proxy registered' in setupProxy["Message"]:
self.log.error( 'Request set to Failed:', setupProxy["Message"] )
# If user is no longer registered, fail the request
for operation in self.request:
for opFile in operation:
opFile.Status = 'Failed'
operation.Status = 'Failed'
else:
self.log.error( setupProxy["Message"] )
return S_OK( self.request )
shifter = setupProxy["Value"]["Shifter"]
proxyFile = setupProxy["Value"]["ProxyFile"]
error = None
while self.request.Status == "Waiting":
# # get waiting operation
operation = self.request.getWaiting()
if not operation["OK"]:
self.log.error( operation["Message"] )
return operation
operation = operation["Value"]
self.log.info( "executing operation #%s '%s'" % ( operation.Order, operation.Type ) )
# # and handler for it
handler = self.getHandler( operation )
if not handler["OK"]:
self.log.error( "unable to process operation %s: %s" % ( operation.Type, handler["Message"] ) )
# gMonitor.addMark( "%s%s" % ( operation.Type, "Fail" ), 1 )
operation.Error = handler["Message"]
break
handler = handler["Value"]
# # set shifters list in the handler
handler.shifter = shifter
# # and execute
pluginName = self.getPluginName( self.handlersDict.get( operation.Type ) )
if self.standalone:
useServerCertificate = gConfig.useServerCertificate()
else:
# Always use server certificates if executed within an agent
useServerCertificate = True
try:
if pluginName:
gMonitor.addMark( "%s%s" % ( pluginName, "Att" ), 1 )
# Always use request owner proxy
if useServerCertificate:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'false' )
exe = handler()
if useServerCertificate:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
if not exe["OK"]:
self.log.error( "unable to process operation %s: %s" % ( operation.Type, exe["Message"] ) )
if pluginName:
gMonitor.addMark( "%s%s" % ( pluginName, "Fail" ), 1 )
gMonitor.addMark( "RequestFail", 1 )
if self.request.JobID:
# Check if the job exists
monitorServer = RPCClient( "WorkloadManagement/JobMonitoring", useCertificates = True )
res = monitorServer.getJobPrimarySummary( int( self.request.JobID ) )
if not res["OK"]:
self.log.error( "RequestTask: Failed to get job %d status" % self.request.JobID )
elif not res['Value']:
self.log.warn( "RequestTask: job %d does not exist (anymore): failed request" % self.request.JobID )
for opFile in operation:
opFile.Status = 'Failed'
if operation.Status != 'Failed':
operation.Status = 'Failed'
self.request.Error = 'Job no longer exists'
except Exception, error:
self.log.exception( "hit by exception: %s" % str( error ) )
if pluginName:
gMonitor.addMark( "%s%s" % ( pluginName, "Fail" ), 1 )
gMonitor.addMark( "RequestFail", 1 )
if useServerCertificate:
gConfigurationData.setOptionInCFG( '/DIRAC/Security/UseServerCertificate', 'true' )
break
# # operation status check
if operation.Status == "Done" and pluginName:
gMonitor.addMark( "%s%s" % ( pluginName, "OK" ), 1 )
elif operation.Status == "Failed" and pluginName:
gMonitor.addMark( "%s%s" % ( pluginName, "Fail" ), 1 )
elif operation.Status in ( "Waiting", "Scheduled" ):
# # no update for waiting or all files scheduled
break
# # not a shifter at all? delete temp proxy file
if not shifter:
os.unlink( proxyFile )
gMonitor.flush()
if error:
return S_ERROR( error )
# # request done?
if self.request.Status == "Done":
# # update request to the RequestDB
self.log.info( 'updating request with status %s' % self.request.Status )
update = self.updateRequest()
if not update["OK"]:
self.log.error( update["Message"] )
return update
self.log.info( "request '%s' is done" % self.request.RequestName )
gMonitor.addMark( "RequestOK", 1 )
# # and there is a job waiting for it? finalize!
if self.request.JobID:
attempts = 0
while True:
finalizeRequest = self.requestClient.finalizeRequest( self.request.RequestName, self.request.JobID )
if not finalizeRequest["OK"]:
if not attempts:
self.log.error( "unable to finalize request %s: %s, will retry" % ( self.request.RequestName,
finalizeRequest["Message"] ) )
self.log.verbose( "Waiting 10 seconds" )
attempts += 1
if attempts == 10:
self.log.error( "giving up finalize request after %d attempts" % attempts )
return S_ERROR( 'Could not finalize request' )
time.sleep( 10 )
else:
self.log.info( "request '%s' is finalized%s" % ( self.request.RequestName,
( ' after %d attempts' % attempts ) if attempts else '' ) )
break
# Request will be updated by the callBack method
return S_OK( self.request )
|
Sbalbp/DIRAC
|
RequestManagementSystem/private/RequestTask.py
|
Python
|
gpl-3.0
| 16,433
|
[
"DIRAC"
] |
cd6a6c0b12843711738ff5db04b9a0039c75972380be22317ea656f18e7a543d
|
import unittest
import filecmp
import os
import logging
import numpy as np
from pycgtool.frame import Atom, Residue, Frame
from pycgtool.framereader import FrameReaderSimpleTraj, FrameReaderMDAnalysis, FrameReaderMDTraj
from pycgtool.framereader import FrameReader, get_frame_reader, UnsupportedFormatException
try:
import mdtraj
mdtraj_present = True
except ImportError:
mdtraj_present = False
try:
import MDAnalysis
mdanalysis_present = True
except ImportError:
mdanalysis_present = False
class AtomTest(unittest.TestCase):
def test_atom_create(self):
atom = Atom(name="Name", num=0, type="Type")
self.assertEqual("Name", atom.name)
self.assertEqual(0, atom.num)
self.assertEqual("Type", atom.type)
def test_atom_add_missing_data(self):
atom1 = Atom("Name1", 0, type="Type")
atom2 = Atom("Name2", 0, mass=1)
with self.assertRaises(AssertionError):
atom1.add_missing_data(atom2)
atom2 = Atom("Name1", 0, mass=1)
atom1.add_missing_data(atom2)
self.assertEqual(1, atom1.mass)
class ResidueTest(unittest.TestCase):
def test_residue_create(self):
residue = Residue(name="Resname")
self.assertEqual("Resname", residue.name)
def test_residue_add_atoms(self):
atom = Atom(name="Name", num=0, type="Type")
residue = Residue()
residue.add_atom(atom)
self.assertEqual(atom, residue.atoms[0])
self.assertTrue(atom is residue.atoms[0])
class FrameTest(unittest.TestCase):
def helper_read_xtc(self, frame, first_only=False, skip_names=False):
self.assertEqual(663, frame.natoms)
self.assertEqual(221, len(frame.residues))
self.assertEqual(3, len(frame.residues[0].atoms))
if not skip_names: # MDTraj renames water
self.assertEqual("SOL", frame.residues[0].name)
self.assertEqual("OW", frame.residues[0].atoms[0].name)
atom0_coords = np.array([
[0.696, 1.330, 1.211],
[1.176, 1.152, 1.586],
[1.122, 1.130, 1.534]
])
box_vectors = np.array([
[1.89868, 1.89868, 1.89868],
[1.9052, 1.9052, 1.9052],
[1.90325272, 1.90325272, 1.90325272]
])
for i in range(1 if first_only else len(atom0_coords)):
np.testing.assert_allclose(atom0_coords[i], frame.residues[0].atoms[0].coords)
np.testing.assert_allclose(box_vectors[i], frame.box, rtol=1e-4) # PDB files are f9.3
frame.next_frame()
def test_frame_create(self):
Frame()
def test_frame_add_residue(self):
residue = Residue()
frame = Frame()
frame.add_residue(residue)
self.assertEqual(residue, frame.residues[0])
self.assertTrue(residue is frame.residues[0])
def test_frame_simpletraj_read_gro(self):
frame = Frame("test/data/water.gro", xtc_reader="simpletraj")
self.helper_read_xtc(frame, first_only=True)
@unittest.skipIf(not mdtraj_present, "MDTraj or Scipy not present")
def test_frame_mdtraj_read_gro(self):
logging.disable(logging.WARNING)
frame = Frame("test/data/water.gro", xtc_reader="mdtraj")
logging.disable(logging.NOTSET)
self.helper_read_xtc(frame, first_only=True, skip_names=True)
@unittest.skipIf(not mdanalysis_present, "MDAnalysis not present")
def test_frame_mdanalysis_read_gro(self):
reader = FrameReaderMDAnalysis("test/data/water.gro")
frame = Frame.instance_from_reader(reader)
self.helper_read_xtc(frame, first_only=True)
@unittest.skipIf(not mdtraj_present, "MDTraj or Scipy not present")
def test_frame_mdtraj_read_pdb(self):
reader = FrameReaderMDTraj("test/data/water.pdb")
frame = Frame.instance_from_reader(reader)
self.helper_read_xtc(frame, first_only=True, skip_names=True)
@unittest.skipIf(not mdtraj_present and not mdanalysis_present, "Neither MDTraj or MDAnalysis is present")
def test_frame_any_read_pdb(self):
reader = get_frame_reader("test/data/water.pdb")
frame = Frame.instance_from_reader(reader)
self.helper_read_xtc(frame, first_only=True, skip_names=True)
def test_frame_any_read_unsupported(self):
with self.assertRaises(UnsupportedFormatException):
reader = get_frame_reader("test/data/dppc.map")
@unittest.skipIf(not mdanalysis_present, "MDAnalysis not present")
def test_frame_mdanalysis_read_pdb(self):
reader = FrameReaderMDAnalysis("test/data/water.pdb")
frame = Frame.instance_from_reader(reader)
self.helper_read_xtc(frame, first_only=True)
def test_frame_output_gro(self):
frame = Frame("test/data/water.gro")
frame.output("water-out.gro", format="gro")
self.assertTrue(filecmp.cmp("test/data/water.gro", "water-out.gro"))
os.remove("water-out.gro")
def test_frame_read_xtc_simpletraj_numframes(self):
frame = Frame(gro="test/data/water.gro", xtc="test/data/water.xtc",
xtc_reader="simpletraj")
self.assertEqual(11, frame.numframes)
@unittest.skipIf(not mdtraj_present, "MDTraj or Scipy not present")
def test_frame_read_xtc_mdtraj_numframes(self):
logging.disable(logging.WARNING)
frame = Frame(gro="test/data/water.gro", xtc="test/data/water.xtc",
xtc_reader="mdtraj")
logging.disable(logging.NOTSET)
self.assertEqual(11, frame.numframes)
def test_frame_simpletraj_read_xtc(self):
frame = Frame(gro="test/data/water.gro", xtc="test/data/water.xtc",
xtc_reader="simpletraj")
self.helper_read_xtc(frame)
@unittest.skipIf(not mdtraj_present, "MDTraj or Scipy not present")
def test_frame_mdtraj_read_xtc(self):
logging.disable(logging.WARNING)
frame = Frame(gro="test/data/water.gro", xtc="test/data/water.xtc",
xtc_reader="mdtraj")
logging.disable(logging.NOTSET)
self.helper_read_xtc(frame, skip_names=True)
@unittest.skipIf(not mdanalysis_present, "MDAnalysis not present")
def test_frame_mdanalysis_read_xtc(self):
reader = FrameReaderMDAnalysis("test/data/water.gro", "test/data/water.xtc")
frame = Frame.instance_from_reader(reader)
self.helper_read_xtc(frame)
@unittest.skipIf(not mdtraj_present, "MDTraj or Scipy not present")
def test_frame_write_xtc_mdtraj(self):
try:
os.remove("water_test2.xtc")
except IOError:
pass
logging.disable(logging.WARNING)
frame = Frame(gro="test/data/water.gro", xtc="test/data/water.xtc",
xtc_reader="mdtraj")
logging.disable(logging.NOTSET)
while frame.next_frame():
frame.write_xtc("water_test2.xtc")
def test_frame_instance_from_reader(self):
reader = FrameReaderSimpleTraj("test/data/water.gro")
frame = Frame.instance_from_reader(reader)
self.helper_read_xtc(frame, first_only=True)
def test_frame_instance_from_reader_dummy(self):
class DummyReader(FrameReader):
def _initialise_frame(self, frame):
frame.dummy_reader = True
def _read_frame_number(self, number):
return number * 10, [], None
reader = DummyReader(None)
frame = Frame.instance_from_reader(reader)
self.assertTrue(frame.dummy_reader)
frame.next_frame()
self.assertEqual(frame.number, 0)
self.assertEqual(frame.time, 0)
np.testing.assert_array_equal(np.zeros(3), frame.box) # Reader replaces None with (0, 0, 0)
frame.next_frame()
self.assertEqual(frame.number, 1)
self.assertEqual(frame.time, 10)
if __name__ == '__main__':
unittest.main()
|
jag1g13/pycgtool
|
test/test_frame.py
|
Python
|
gpl-3.0
| 7,943
|
[
"MDAnalysis",
"MDTraj"
] |
2d412f1ed733ff94610a2b15535992136dcef5b751d94b3ca368e542ab0fa9f6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.