text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
"""Gaussian processes regression. """ # Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # # License: BSD 3 clause import warnings from operator import itemgetter import numpy as np from scipy.linalg import cholesky, cho_solve, solve_triangular from scipy.optimize import fmin_l_bfgs_b from sklearn.base import BaseEstimator, RegressorMixin, clone from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C from sklearn.utils import check_random_state from sklearn.utils.validation import check_X_y, check_array from sklearn.utils.deprecation import deprecated from sklearn.exceptions import ConvergenceWarning class GaussianProcessRegressor(BaseEstimator, RegressorMixin): """Gaussian process regression (GPR). The implementation is based on Algorithm 2.1 of Gaussian Processes for Machine Learning (GPML) by Rasmussen and Williams. In addition to standard scikit-learn estimator API, GaussianProcessRegressor: * allows prediction without prior fitting (based on the GP prior) * provides an additional method sample_y(X), which evaluates samples drawn from the GPR (prior or posterior) at given inputs * exposes a method log_marginal_likelihood(theta), which can be used externally for other ways of selecting hyperparameters, e.g., via Markov chain Monte Carlo. Read more in the :ref:`User Guide <gaussian_process>`. .. versionadded:: 0.18 Parameters ---------- kernel : kernel object The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. alpha : float or array-like, optional (default: 1e-10) Value added to the diagonal of the kernel matrix during fitting. Larger values correspond to increased noise level in the observations. This can also prevent a potential numerical issue during fitting, by ensuring that the calculated values form a positive definite matrix. If an array is passed, it must have the same number of entries as the data used for fitting and is used as datapoint-dependent noise level. Note that this is equivalent to adding a WhiteKernel with c=alpha. Allowing to specify the noise level directly as a parameter is mainly for convenience and for consistency with Ridge. optimizer : string or callable, optional (default: "fmin_l_bfgs_b") Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be minimized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'fmin_l_bfgs_b' algorithm from scipy.optimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer : int, optional (default: 0) The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer == 0 implies that one run is performed. normalize_y : boolean, optional (default: False) Whether the target values y are normalized, i.e., the mean of the observed target values become zero. This parameter should be set to True if the target values' mean is expected to differ considerable from zero. When enabled, the normalization effectively modifies the GP's prior based on the data, which contradicts the likelihood principle; normalization is thus disabled per default. copy_X_train : bool, optional (default: True) If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : int, RandomState instance or None, optional (default: None) The generator used to initialize the centers. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- X_train_ : array-like, shape = (n_samples, n_features) Feature values in training data (also required for prediction) y_train_ : array-like, shape = (n_samples, [n_output_dims]) Target values in training data (also required for prediction) kernel_ : kernel object The kernel used for prediction. The structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters L_ : array-like, shape = (n_samples, n_samples) Lower-triangular Cholesky decomposition of the kernel in ``X_train_`` alpha_ : array-like, shape = (n_samples,) Dual coefficients of training data points in kernel space log_marginal_likelihood_value_ : float The log-marginal-likelihood of ``self.kernel_.theta`` Examples -------- >>> from sklearn.datasets import make_friedman2 >>> from sklearn.gaussian_process import GaussianProcessRegressor >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) >>> kernel = DotProduct() + WhiteKernel() >>> gpr = GaussianProcessRegressor(kernel=kernel, ... random_state=0).fit(X, y) >>> gpr.score(X, y) # doctest: +ELLIPSIS 0.3680... >>> gpr.predict(X[:2,:], return_std=True) # doctest: +ELLIPSIS (array([653.0..., 592.1...]), array([316.6..., 316.6...])) """ def __init__(self, kernel=None, alpha=1e-10, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, normalize_y=False, copy_X_train=True, random_state=None): self.kernel = kernel self.alpha = alpha self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.normalize_y = normalize_y self.copy_X_train = copy_X_train self.random_state = random_state @property @deprecated("Attribute rng was deprecated in version 0.19 and " "will be removed in 0.21.") def rng(self): return self._rng @property @deprecated("Attribute y_train_mean was deprecated in version 0.19 and " "will be removed in 0.21.") def y_train_mean(self): return self._y_train_mean def fit(self, X, y): """Fit Gaussian process regression model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Training data y : array-like, shape = (n_samples, [n_output_dims]) Target values Returns ------- self : returns an instance of self. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") \ * RBF(1.0, length_scale_bounds="fixed") else: self.kernel_ = clone(self.kernel) self._rng = check_random_state(self.random_state) X, y = check_X_y(X, y, multi_output=True, y_numeric=True) # Normalize target value if self.normalize_y: self._y_train_mean = np.mean(y, axis=0) # demean y y = y - self._y_train_mean else: self._y_train_mean = np.zeros(1) if np.iterable(self.alpha) \ and self.alpha.shape[0] != y.shape[0]: if self.alpha.shape[0] == 1: self.alpha = self.alpha[0] else: raise ValueError("alpha must be a scalar or an array" " with same number of entries as y.(%d != %d)" % (self.alpha.shape[0], y.shape[0])) self.X_train_ = np.copy(X) if self.copy_X_train else X self.y_train_ = np.copy(y) if self.copy_X_train else y if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True) return -lml, -grad else: return -self.log_marginal_likelihood(theta) # First optimize starting from theta specified in kernel optima = [(self._constrained_optimization(obj_func, self.kernel_.theta, self.kernel_.bounds))] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite.") bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = \ self._rng.uniform(bounds[:, 0], bounds[:, 1]) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds)) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = \ self.log_marginal_likelihood(self.kernel_.theta) # Precompute quantities required for predictions which are independent # of actual query points K = self.kernel_(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: self.L_ = cholesky(K, lower=True) # Line 2 # self.L_ changed, self._K_inv needs to be recomputed self._K_inv = None except np.linalg.LinAlgError as exc: exc.args = ("The kernel, %s, is not returning a " "positive definite matrix. Try gradually " "increasing the 'alpha' parameter of your " "GaussianProcessRegressor estimator." % self.kernel_,) + exc.args raise self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3 return self def predict(self, X, return_std=False, return_cov=False): """Predict using the Gaussian process regression model We can also predict based on an unfitted model by using the GP prior. In addition to the mean of the predictive distribution, also its standard deviation (return_std=True) or covariance (return_cov=True). Note that at most one of the two can be requested. Parameters ---------- X : array-like, shape = (n_samples, n_features) Query points where the GP is evaluated return_std : bool, default: False If True, the standard-deviation of the predictive distribution at the query points is returned along with the mean. return_cov : bool, default: False If True, the covariance of the joint predictive distribution at the query points is returned along with the mean Returns ------- y_mean : array, shape = (n_samples, [n_output_dims]) Mean of predictive distribution a query points y_std : array, shape = (n_samples,), optional Standard deviation of predictive distribution at query points. Only returned when return_std is True. y_cov : array, shape = (n_samples, n_samples), optional Covariance of joint predictive distribution a query points. Only returned when return_cov is True. """ if return_std and return_cov: raise RuntimeError( "Not returning standard deviation of predictions when " "returning full covariance.") X = check_array(X) if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior if self.kernel is None: kernel = (C(1.0, constant_value_bounds="fixed") * RBF(1.0, length_scale_bounds="fixed")) else: kernel = self.kernel y_mean = np.zeros(X.shape[0]) if return_cov: y_cov = kernel(X) return y_mean, y_cov elif return_std: y_var = kernel.diag(X) return y_mean, np.sqrt(y_var) else: return y_mean else: # Predict based on GP posterior K_trans = self.kernel_(X, self.X_train_) y_mean = K_trans.dot(self.alpha_) # Line 4 (y_mean = f_star) y_mean = self._y_train_mean + y_mean # undo normal. if return_cov: v = cho_solve((self.L_, True), K_trans.T) # Line 5 y_cov = self.kernel_(X) - K_trans.dot(v) # Line 6 return y_mean, y_cov elif return_std: # cache result of K_inv computation if self._K_inv is None: # compute inverse K_inv of K based on its Cholesky # decomposition L and its inverse L_inv L_inv = solve_triangular(self.L_.T, np.eye(self.L_.shape[0])) self._K_inv = L_inv.dot(L_inv.T) # Compute variance of predictive distribution y_var = self.kernel_.diag(X) y_var -= np.einsum("ij,ij->i", np.dot(K_trans, self._K_inv), K_trans) # Check if any of the variances is negative because of # numerical issues. If yes: set the variance to 0. y_var_negative = y_var < 0 if np.any(y_var_negative): warnings.warn("Predicted variances smaller than 0. " "Setting those variances to 0.") y_var[y_var_negative] = 0.0 return y_mean, np.sqrt(y_var) else: return y_mean def sample_y(self, X, n_samples=1, random_state=0): """Draw samples from Gaussian process and evaluate at X. Parameters ---------- X : array-like, shape = (n_samples_X, n_features) Query points where the GP samples are evaluated n_samples : int, default: 1 The number of samples drawn from the Gaussian process random_state : int, RandomState instance or None, optional (default=0) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- y_samples : array, shape = (n_samples_X, [n_output_dims], n_samples) Values of n_samples samples drawn from Gaussian process and evaluated at query points. """ rng = check_random_state(random_state) y_mean, y_cov = self.predict(X, return_cov=True) if y_mean.ndim == 1: y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T else: y_samples = \ [rng.multivariate_normal(y_mean[:, i], y_cov, n_samples).T[:, np.newaxis] for i in range(y_mean.shape[1])] y_samples = np.hstack(y_samples) return y_samples def log_marginal_likelihood(self, theta=None, eval_gradient=False): """Returns log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like, shape = (n_kernel_params,) or None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default: False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : array, shape = (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ if theta is None: if eval_gradient: raise ValueError( "Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ kernel = self.kernel_.clone_with_theta(theta) if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: L = cholesky(K, lower=True) # Line 2 except np.linalg.LinAlgError: return (-np.inf, np.zeros_like(theta)) \ if eval_gradient else -np.inf # Support multi-dimensional output of self.y_train_ y_train = self.y_train_ if y_train.ndim == 1: y_train = y_train[:, np.newaxis] alpha = cho_solve((L, True), y_train) # Line 3 # Compute log-likelihood (compare line 7) log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha) log_likelihood_dims -= np.log(np.diag(L)).sum() log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi) log_likelihood = log_likelihood_dims.sum(-1) # sum over dimensions if eval_gradient: # compare Equation 5.9 from GPML tmp = np.einsum("ik,jk->ijk", alpha, alpha) # k: output-dimension tmp -= cho_solve((L, True), np.eye(K.shape[0]))[:, :, np.newaxis] # Compute "0.5 * trace(tmp.dot(K_gradient))" without # constructing the full matrix tmp.dot(K_gradient) since only # its diagonal is required log_likelihood_gradient_dims = \ 0.5 * np.einsum("ijl,ijk->kl", tmp, K_gradient) log_likelihood_gradient = log_likelihood_gradient_dims.sum(-1) if eval_gradient: return log_likelihood, log_likelihood_gradient else: return log_likelihood def _constrained_optimization(self, obj_func, initial_theta, bounds): if self.optimizer == "fmin_l_bfgs_b": theta_opt, func_min, convergence_dict = \ fmin_l_bfgs_b(obj_func, initial_theta, bounds=bounds) if convergence_dict["warnflag"] != 0: warnings.warn("fmin_l_bfgs_b terminated abnormally with the " " state: %s" % convergence_dict, ConvergenceWarning) elif callable(self.optimizer): theta_opt, func_min = \ self.optimizer(obj_func, initial_theta, bounds=bounds) else: raise ValueError("Unknown optimizer %s." % self.optimizer) return theta_opt, func_min
vortex-ape/scikit-learn
sklearn/gaussian_process/gpr.py
Python
bsd-3-clause
21,298
[ "Gaussian" ]
77951f6504ac6b518f254d28ede923d9acfd433d9d2cec25f15027db1fe3c6b3
# $Id$ # # Copyright (C) 2003-2006 Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # import bisect class TopNContainer(object): """ maintains a sorted list of a particular number of data elements. """ def __init__(self,size,mostNeg=-1e99): self._size = size self.best = [mostNeg]*self._size self.extras = [None]*self._size def Insert(self,val,extra=None): """ only does the insertion if val fits """ if val > self.best[0]: idx = bisect.bisect(self.best,val) # insert the new element if idx == self._size: self.best.append(val) self.extras.append(extra) else: self.best.insert(idx,val) self.extras.insert(idx,extra) # and pop off the head self.best.pop(0) self.extras.pop(0) def GetPts(self): """ returns our set of points """ return self.best def GetExtras(self): """ returns our set of extras """ return self.extras def __len__(self): return self._size def __getitem__(self,which): return self.best[which],self.extras[which] def reverse(self): self.best.reverse() self.extras.reverse() if __name__ == '__main__': import random pts = [int(100*random.random()) for x in range(10)] c = TopNContainer(4) for pt in pts: c.Insert(pt,extra=str(pt)) print c.GetPts() print c.GetExtras()
rdkit/rdkit-orig
rdkit/DataStructs/TopNContainer.py
Python
bsd-3-clause
1,553
[ "RDKit" ]
a4116eca4c1cd4258fbb5393221456a097ea51be39bf283e0884c920ff0d37ea
import numpy as np import tensorflow as tf from elbow import Gaussian, Model from elbow.models.factorizations import NoisyGaussianMatrixProduct, NoisySparseGaussianMatrixProduct #################################################################### # methods to sample from a Gaussian matrix prior with uniform random # sparsity pattern. def sample_sparsity(n, m, p=0.1): # sample a sparsity mask for an nxm matrix if p == 1: unique_pairs = [(i,j) for i in range(n) for j in range(m)] else: nnz = int(n*m*p) rows = np.random.randint(0,n,nnz) cols = np.random.randint(0,m,nnz) unique_pairs = set(zip(rows, cols)) nzr, nzc = zip(*unique_pairs) return np.asarray(nzr), np.asarray(nzc) def construct_R(n, m, nzr, nzc, z): R = np.ones((n, m)) * np.nan nnz = len(nzr) for i in range(nnz): R[nzr[i], nzc[i]] = z[i] return R ################################################################ def sparse_model(row_idxs, col_idxs, n=10, m=9, prior_std=1.0, noise_std=0.1): A = Gaussian(mean=0.0, std=prior_std, shape=(n, 3), name="A") B = Gaussian(mean=0.0, std=prior_std, shape=(m, 3), name="B") C = NoisySparseGaussianMatrixProduct(A=A, B=B, std=noise_std, row_idxs=row_idxs, col_idxs=col_idxs, name="C") jm = Model(C) return jm def main(): """ Sample data from the prior """ n = 100 m = 50 k = 3 sparsity = 0.2 nzr, nzc = sample_sparsity(n,m, sparsity) print "sampled sparsity pattern has %d of %d entries nonzero" % (len(nzr), n*m) jm = sparse_model(nzr, nzc, n=n, m=m) sampled = jm.sample() jm["C"].observe(sampled["C"]) mean_abs_err = np.mean(np.abs(sampled["C"])) print "baseline (all zeroes) reconstructs observations with mean deviation %.3f" % (mean_abs_err) """ Reconstruction err for true latent traits. """ sA = sampled["A"] sB = sampled["B"] sC = np.dot(sA, sB.T) mean_abs_err = np.mean(np.abs(sC[nzr,nzc] - sampled["C"])) print "true latent values reconstruct observations with mean deviation %.3f" % (mean_abs_err) """ Run inference and compute reconstruction err on the inferred traits. Note we consider reconstruction error on C, rather than direct recovery of the latents A and B, because due to model symmetries the latter will only be recovered up to a linear transformation. """ jm.train(avg_decay=0.995) posterior = jm.posterior() qA = posterior["q_A"]["mean"] qB = posterior["q_B"]["mean"] qC = np.dot(qA, qB.T) mean_abs_err_inferred = np.mean(np.abs(qC[nzr,nzc] - sampled["C"])) print "inferred latent values reconstruct observations with mean deviation %.3f" % (mean_abs_err_inferred) if __name__ == "__main__": main()
davmre/elbow
examples/matrix_factorization.py
Python
bsd-3-clause
2,980
[ "Gaussian" ]
aaa3b563d71f25e60ed5f2501601ad6e7d531981aea29fbc446fd36a9c8ab794
# Copyright 2017 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests many modules to compute energy of LiH.""" from __future__ import absolute_import import os import numpy import scipy.sparse import unittest from fermilib.config import * from fermilib.ops import * from fermilib.transforms import * from fermilib.utils import * class LiHIntegrationTest(unittest.TestCase): def setUp(self): # Set up molecule. geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))] basis = 'sto-3g' multiplicity = 1 filename = os.path.join(THIS_DIRECTORY, 'data', 'H1-Li1_sto-3g_singlet_1.45') self.molecule = MolecularData( geometry, basis, multiplicity, filename=filename) self.molecule.load() # Get molecular Hamiltonian. self.molecular_hamiltonian = self.molecule.get_molecular_hamiltonian() self.molecular_hamiltonian_no_core = ( self.molecule. get_molecular_hamiltonian(occupied_indices=[0], active_indices=range(1, self.molecule. n_orbitals))) # Get FCI RDM. self.fci_rdm = self.molecule.get_molecular_rdm(use_fci=1) # Get explicit coefficients. self.nuclear_repulsion = self.molecular_hamiltonian.constant self.one_body = self.molecular_hamiltonian.one_body_tensor self.two_body = self.molecular_hamiltonian.two_body_tensor # Get fermion Hamiltonian. self.fermion_hamiltonian = normal_ordered(get_fermion_operator( self.molecular_hamiltonian)) # Get qubit Hamiltonian. self.qubit_hamiltonian = jordan_wigner(self.fermion_hamiltonian) # Get explicit coefficients. self.nuclear_repulsion = self.molecular_hamiltonian.constant self.one_body = self.molecular_hamiltonian.one_body_tensor self.two_body = self.molecular_hamiltonian.two_body_tensor # Get matrix form. self.hamiltonian_matrix = get_sparse_operator( self.molecular_hamiltonian) self.hamiltonian_matrix_no_core = get_sparse_operator( self.molecular_hamiltonian_no_core) def test_all(self): # Test reverse Jordan-Wigner. fermion_hamiltonian = reverse_jordan_wigner(self.qubit_hamiltonian) fermion_hamiltonian = normal_ordered(fermion_hamiltonian) self.assertTrue(self.fermion_hamiltonian.isclose(fermion_hamiltonian)) # Test mapping to interaction operator. fermion_hamiltonian = get_fermion_operator(self.molecular_hamiltonian) fermion_hamiltonian = normal_ordered(fermion_hamiltonian) self.assertTrue(self.fermion_hamiltonian.isclose(fermion_hamiltonian)) # Test RDM energy. fci_rdm_energy = self.nuclear_repulsion fci_rdm_energy += numpy.sum(self.fci_rdm.one_body_tensor * self.one_body) fci_rdm_energy += numpy.sum(self.fci_rdm.two_body_tensor * self.two_body) self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy) # Confirm expectation on qubit Hamiltonian using reverse JW matches. qubit_rdm = self.fci_rdm.get_qubit_expectations(self.qubit_hamiltonian) qubit_energy = 0.0 for term, coefficient in qubit_rdm.terms.items(): qubit_energy += coefficient * self.qubit_hamiltonian.terms[term] self.assertAlmostEqual(qubit_energy, self.molecule.fci_energy) # Confirm fermionic RDMs can be built from measured qubit RDMs. new_fermi_rdm = get_interaction_rdm(qubit_rdm) fermi_rdm_energy = new_fermi_rdm.expectation( self.molecular_hamiltonian) self.assertAlmostEqual(fci_rdm_energy, self.molecule.fci_energy) # Test sparse matrices. energy, wavefunction = get_ground_state(self.hamiltonian_matrix) self.assertAlmostEqual(energy, self.molecule.fci_energy) expected_energy = expectation(self.hamiltonian_matrix, wavefunction) self.assertAlmostEqual(expected_energy, energy) # Make sure you can reproduce Hartree-Fock energy. hf_state = jw_hartree_fock_state( self.molecule.n_electrons, count_qubits(self.qubit_hamiltonian)) hf_density = get_density_matrix([hf_state], [1.]) expected_hf_density_energy = expectation(self.hamiltonian_matrix, hf_density) expected_hf_energy = expectation(self.hamiltonian_matrix, hf_state) self.assertAlmostEqual(expected_hf_energy, self.molecule.hf_energy) self.assertAlmostEqual(expected_hf_density_energy, self.molecule.hf_energy) # Check that frozen core result matches frozen core FCI from psi4. # Recore frozen core result from external calculation. self.frozen_core_fci_energy = -7.8807607374168 no_core_fci_energy = scipy.linalg.eigh( self.hamiltonian_matrix_no_core.todense())[0][0] self.assertAlmostEqual(no_core_fci_energy, self.frozen_core_fci_energy)
ProjectQ-Framework/FermiLib
src/fermilib/tests/_lih_integration_test.py
Python
apache-2.0
5,859
[ "Psi4" ]
a3c441f669f45a29abf3669ce7709c4d98c3b0e386128fba43338e102efaecf1
#!/usr/bin/python import i3ipc import rofi import sys import time from time import sleep i3 = i3ipc.Connection() menu = rofi.Rofi() def scratchpad(): for con in i3.get_tree(): if(con.name == "__i3_scratch"): return con def get_windows(): wins = [] names = [] numEntries = 0 for con in scratchpad(): wins += con for con in wins: names.append(con.name) numEntries += 1 return reversed(wins),reversed(names), numEntries wins, names, numEntries = get_windows() index, key = menu.select("Scratchpad",names)#,select="0 -theme /home/brian/.config/rofi/i3scratch.rasi") if index < 0: sys.exit() for i in range((numEntries - index)*2-1): i3.command("scratchpad show") i3.command("resize set 1800 1012") i3.command("move position center")
AnonymFox/dotfiles
i3-gaps/scratchpad.py
Python
mit
817
[ "Brian" ]
8cabe76210848087a9c9efa4d77dfd85467734c470e84b8dca9eb4da33a595b2
#!/usr/bin/env python ''' V2015-05-03 ''' import os import csv from itertools import ifilter import sys """ COMMON FUNCTIONS """ def output_header_file_old(infile,skip,output_handle,sup_list=[],eliminate=0): ##Version1.0 ##write header into files reader=csv.reader(open(infile,"rU"),delimiter="\t") for i in xrange(skip): if len(sup_list)!=0: initial_rows=reader.next() rows=initial_rows+sup_list else: rows=reader.next() if skip==0: pass else: output_row(output_handle,rows,eliminate) def output_column_descriptor(infile): reader=csv.reader(open(infile,"rU"),delimiter="\t") column_descriptor_list = [] for row in reader: test2=row[0][0:2] test1=row[0][0:1] if test2=="##": pass elif test1=="#": column_descriptor_list=row[:] else: break return column_descriptor_list def output_header_file(infile,output_handle,sup_list=[],eliminate=0): ##Version1.0 ##write header into files reader=csv.reader(open(infile,"rU"),delimiter="\t") for row in reader: test2=row[0][0:2] test1=row[0][0:1] if test2=="##": output_row(output_handle,row) elif test1=="#": output_row(output_handle,row+sup_list) else: break def output_header_VCF_file(infile,output_handle,cmd_record,sup_list=[],eliminate=0): ##Version1.0 ##write header into files reader=csv.reader(open(infile,"rU"),delimiter="\t") first_line=True description_output=False for row in reader: #if first_line!=True and (row[0][0:17]=="##fileformat=VCFv"): # continue if row[0][0:2]=="##": if first_line and row[0][0:18]!="##fileformat=VCFv4": output_handle.write("##fileformat=VCFv4.0\n") output_row(output_handle,row) if cmd_record!="": output_handle.write(cmd_record) elif first_line and row[0][0:18]=="##fileformat=VCFv4": output_row(output_handle,row) if cmd_record!="": output_handle.write(cmd_record) else: #output_handle.write("##fileformat=VCFv4.0\n") output_row(output_handle,row) elif row[0][0]=="#" and row[0][1]!="#": if first_line==True: output_handle.write("##fileformat=VCFv4.0\n") if cmd_record!="": output_handle.write(cmd_record) if eliminate==0: combined_row=row+sup_list else: combined_row=row[:(-1)*eliminate]+sup_list output_row(output_handle,combined_row) description_output=True else: if first_line==True: output_handle.write("##fileformat=VCFv4.0\n") if cmd_record!="": output_handle.write(cmd_record) print "quit early" break first_line=False if description_output==False: description_list=["#CHRO","COOR","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT"] description_list=description_list+sup_list output_row(output_handle,description_list) def output_header_VCF_file_replace(infile,output_handle,cmd_record,sup_list=[],eliminate=0): ##Version1.0 ##write header into files reader=csv.reader(open(infile,"rU"),delimiter="\t") first_line=True for row in reader: #if first_line!=True and (row[0][0:17]=="##fileformat=VCFv"): # continue if row[0][0:2]=="##": if first_line and row[0][0:18]!="##fileformat=VCFv4": output_handle.write("##fileformat=VCFv4.0\n") output_row(output_handle,row,eliminate) if cmd_record!="": output_handle.write(cmd_record) elif first_line and row[0][0:18]=="##fileformat=VCFv4": output_row(output_handle,row,eliminate) if cmd_record!="": output_handle.write(cmd_record) else: #output_handle.write("##fileformat=VCFv4.0\n") output_row(output_handle,row,eliminate) elif row[0][0]=="#" and row[0][1]!="#": if first_line==True: output_handle.write("##fileformat=VCFv4.0\n") if cmd_record!="": output_handle.write(cmd_record) #combined_row=row+sup_list #output_row(output_handle,combined_row,eliminate) else: if first_line==True: output_handle.write("##fileformat=VCFv4.0\n") if cmd_record!="": output_handle.write(cmd_record) print "quit early" break first_line=False def output_row(handle,row,eliminate=0): ##write row into files len_row=len(row)-eliminate for i in xrange(len_row): if i==(len_row-1): handle.write(str(row[i])+'\n') else: handle.write(str(row[i])+'\t') def get_file_name(full_name): if full_name.count("/")==0: return full_name else: full_name_list=full_name.split("/") return full_name_list[-1] def get_path(full_name): full_name_list=full_name.split("/") full_name_len=len(full_name_list) path="" for index in range(1,full_name_len-1): path=path+"/"+full_name_list[index] return path """ COMMON FUNCTIONS """ class GeneralFile_class: def __init__(self,name): self.filename=name self.name_only=get_file_name(name) self.path_only=get_path(name) self.SEP_CHAR='\t' self.SKIP_HEADER=0 self.SAMPLE_ID_LEN=1 #this will determin how many section will be considered to be the unique ID self.SAMPLE_ID_POS=0 self.UNIQUE_ID_COLUMN=0 self.FILENAME_SPLIT_CHAR='_' self.RECORD="" self.AUTOSKIP_HEADER=True self.OUTPUT_PATH=os.getcwd() #self.count_column_number() def count_column_number(self): reader=csv.reader(open(self.filename,'rU'),delimiter=self.SEP_CHAR) rows=reader.next() self.COLUMN_COUNT=len(rows) def ID_frequency_dict_gen(self,COLUMN=2,FILEPATH=os.getcwd()): if '/' in self.filename: compete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR) for i in range(self.SKIP_HEADER): reader.next() ID_dict=dict() for rows in reader: ID=rows[COLUMN] ID_dict[ID]=0 reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR) for i in range(self.SKIP_HEADER): reader.next() for rows in reader: ID=rows[COLUMN] ID_dict[ID]+=1 return ID_dict def generate_sample_id(self): POS=self.SAMPLE_ID_POS if '/' not in self.filename: filename_list=(self.name_only).split(self.FILENAME_SPLIT_CHAR) else: infile_path_list=self.filename.split('/') infile_name=infile_path_list[-1] filename_list=infile_name.split(self.FILENAME_SPLIT_CHAR) if self.SAMPLE_ID_LEN==1: sample_id=filename_list[POS] else: filename_list_len=len(filename_list) if self.SAMPLE_ID_LEN>filename_list_len: self.SAMPLE_ID_LEN=filename_list_len for i in range(self.SAMPLE_ID_LEN): if i == 0 : sample_id=filename_list[POS] else: sample_id+=self.FILENAME_SPLIT_CHAR+filename_list[POS+i] return sample_id def outputfilename_gen(self,name_fragment="std_out",suffix="txt",POS=0): ##version2.0 if '/' not in self.filename: #infile_name_list=(self.filename).split(self.FILENAME_SPLIT_CHAR) #sample_id=infile_name_list[POS] sample_id=self.generate_sample_id() output_filename=sample_id+"_"+name_fragment+'.'+suffix return output_filename else: infile_path_list=self.filename.split('/') infile_name=infile_path_list[-1] print "infile_name",infile_name #infile_name_list=(infile_name).split(self.FILENAME_SPLIT_CHAR) #sample_id=infile_name_list[POS] self.name_only=infile_name sample_id=self.generate_sample_id() output_filename=sample_id+"_"+name_fragment+'.'+suffix return output_filename def sampleID_gen(self): ##version2.0 if self.name_only.count("/")>0: tmp_list=self.name_only.split("/") self.name_only=tmp_list[-1] tmp_list=(self.name_only).split(self.FILENAME_SPLIT_CHAR) sampleID=tmp_list[self.SAMPLE_ID_POS] return sampleID def reader_gen(self,FILEPATH=os.getcwd()): ## this section solve the potential problem running on the PC, Need to implement more if FILEPATH.count('\\')>0: FILEPATH=FILEPATH.replace('\\','/') if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE) if self.AUTOSKIP_HEADER==True and self.SKIP_HEADER==0: ## this will over-write provided default skip_number=0 row=reader.next() while row[0][0]=="#": skip_number+=1 row=reader.next() self.SKIP_HEADER=skip_number #print "current skip header value is", self.SKIP_HEADER reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE) for i in range(self.SKIP_HEADER): reader.next() return reader def unique_ID_list_gen(self,reader,unique_ID_column): unique_ID_list=[] for row in reader: unique_ID=row[unique_ID_column] unique_ID_list.append(unique_ID) return unique_ID_list def unique_ID_list_gen_v2(self,reader,unique_ID_column): ## under development unique_ID_list=[] for row in reader: unique_ID=row[unique_ID_column] unique_ID_list.append(unique_ID) return unique_ID_list def output_handle_gen(self,header_file=None,FILEPATH=os.getcwd(),sup_list=[],HEAD_LINE=1): ##Version2.0 ##Updated 2012-10-31 ''' header_file is the file contains the header information FILEPATH is the path for the output file sup_list is the additional annotations added to the output file header HEAD_LINE is the number of header lines extracted from header file and writen into output file ''' if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename self.handle=open(complete_path,'w') if self.RECORD=="": pass else: self.handle.write(self.RECORD) if header_file==None: pass else: output_header_file(header_file,self.handle,sup_list,eliminate=0) class SVDetectFile_class(GeneralFile_class): ##This file is SVDetect Subtype SVDetect_CHRO1_COLUMN=0 SVDetect_START1_COLUMN=1 SVDetect_END1_COLUMN=2 SVDetect_CHRO2_COLUMN=3 SVDetect_START2_COLUMN=4 SVDetect_END2_COLUMN=5 SVDetect_TYPE_COLUMN=16 SEP_CHAR='\t' SKIP_HEADER=0 DIST_THRESHOLD=1000 def __init__(self,name): GeneralFile_class.__init__(self,name) self.SKIP_HEADER=0 self.CHRO2_COLUMN=3 self.CHRO1_COLUMN=0 self.START1_COLUMN=1 self.END1_COLUMN=2 self.START2_COLUMN=4 self.END2_COLUMN=5 self.ID_COLUMN=-1 def region1_output(self): return None def reader_gen(self,FILEPATH=os.getcwd()): complete_path=FILEPATH + '/' + self.filename #print "complete_path",complete_path reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR) for i in range(self.SKIP_HEADER): reader.next() return reader def filter_chro(self,infile_reader,filter_chro,eliminate_ID): data_dict=dict() data_list=[] previous_point=0 for rows in infile_reader: chro=rows[self.CHRO2_COLUMN] ID=rows[self.ID_COLUMN] if filter_chro==chro and ID!=eliminate_ID: start1=rows[self.START1_COLUMN] end1=rows[self.END1_COLUMN] middle_point=int((int(start1)+int(end1))/2) if previous_point==middle_point: middle_point+=0.1 previous_point=middle_point data_dict[middle_point]=rows data_list.append(middle_point) #print "data_list,",data_list ''' for data in data_list: if data_list.count(data) > 1: print "Same cooridnate ocurrs, Fix needed" sys.exit() ''' data_list.sort() #print "sorted_data", sorted_data #print "data_dict", data_dict return data_list,data_dict class BEDFile_class(GeneralFile_class): SEP_CHAR='\t' # this class can be used for BedGraph format as well def __init__(self,name): GeneralFile_class.__init__(self,name) self.SKIP_HEADER=0 self.CHRO_COLUMN=0 self.START_COLUMN=1 self.END_COLUMN=2 self.ID_COLUMN=3 self.SCORE_COLUMN=4 self.STRAND_COLUMN=5 class Indel_GATK_File_class(BEDFile_class): def __init__(self,name): BEDFile_class.__init__(self,name) self.TUMOR_STRAND_COLUMN=17 self.TUMOR_STRAND_COLUMN_SEP=':' self.TUMOR_STRAND_COLUMN_INFO=1 self.INDEL_COLUMN=3 self.GENE_COLUMN=-1 self.FREQ_COLUMN=21 self.SKIP_HEADER=1 ##Not sure why there is a blank column there in the data class VCF_File_class(GeneralFile_class): def __init__(self,name): GeneralFile_class.__init__(self,name) self.QUAL_COLUMN=5 self.ALT_COLUMN=4 self.REF_COLUMN=3 self.ID_COLUMN=2 self.COOR_COLUMN=1 self.CHRO_COLUMN=0 self.FILTER_COLUMN=6 self.INFO_COLUMN=7 self.FORMAT_COLUMN=8 self.SKIP_HEADER=0 self.SEP_INFO_START_COLUMN=9 self.ALT_SEP_CHAR=',' self.REPLACE_DESCRIPTION=0 self.DESCRIPTION_COLUMN_REMOVAL=0 def check_header(self,FILEPATH=os.getcwd()): ##check the description column and first column result=0 if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR) for row in reader: if row[0][0]!="#": break else: if row[0][1:6].upper()=="CHROM": result=1 return result def sample_list_gen(self,FILEPATH=os.getcwd()): if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'r'),delimiter=self.SEP_CHAR) for rows in reader: first_item=rows[0] if first_item[0]=='#' and first_item[1]!='#': sample_list=rows[self.SEP_INFO_START_COLUMN:] break else: pass return sample_list def reader_gen(self,FILEPATH=os.getcwd()): if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR) skip_count=0 try: rows=reader.next() while rows[0][0]=="#": skip_count+=1 rows=reader.next() reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR) for i in range(skip_count): reader.next() except: pass return reader def output_sample_info(self,FILEPATH=os.getcwd()): if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR) for rows in reader: if rows[0][0:2]=="##": pass else: sample_info=rows[9:] break return sample_info def output_handle_gen(self,header_file=None,FILEPATH=os.getcwd(),sup_list=[],HEAD_LINE=1): ##Version2.0 ##Updated 2012-10-31 ''' header_file is the file contains the header information FILEPATH is the path for the output file sup_list is the additional annotations added to the output file header HEAD_LINE is the number of header lines extracted from header file and writen into output file ''' if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename self.handle=open(complete_path,'w') if header_file==None: self.handle.write("##fileformat=VCFv4\n") if self.RECORD=="": pass else: self.handle.write(self.RECORD) pass else: #output_header_file(header_file,self.handle,sup_list,eliminate=0) if self.REPLACE_DESCRIPTION==0: ## output_header_VCF_file(header_file,self.handle,self.RECORD,sup_list,eliminate=self.DESCRIPTION_COLUMN_REMOVAL) else: output_header_VCF_file_replace(header_file,self.handle,self.RECORD,sup_list,eliminate=self.DESCRIPTION_COLUMN_REMOVAL) def sample_count(self,FILEPATH=os.getcwd()): if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR) sample_count=0 for rows in reader: if rows[0][0]=="#": pass else: sample_info=rows[9:] for sample_data in sample_info: if sample_data.count(":")==2: sample_count+=1 elif sample_data=="./.": sample_count+=1 else: pass break return sample_count def variant_list_gen(self): variant_dict=dict() infile_reader=self.reader_gen() for row in infile_reader: chro=row[self.CHRO_COLUMN] coor=row[self.COOR_COLUMN] ref=row[self.REF_COLUMN] alt=row[self.ALT_COLUMN] unique_ID=chro+"_"+coor+"_"+ref+"_"+alt variant_dict[unique_ID]=[] return variant_dict def add_to_filter_column(self,vcf_row,additional_filter): current_filter=vcf_row[self.FILTER_COLUMN] new_filter=current_filter+";" + additional_filter new_vcf_row=vcf_row[:] new_vcf_row[self.FILTER_COLUMN]=new_filter return new_vcf_row class PBS_File_class(GeneralFile_class): def __init__(self,name,path=os.getcwd()): GeneralFile_class.__init__(self,name) self.email="zhangliy@bu.edu" self.memory="2g" self.suffix='pbs' self.PROJECT="montilab-p" self.MACHINE="scc" self.RUNTIME_LIMIT="96:00:00" #self. #GeneralFile_class.output_handle_gen(self,FILEPATH=path) def output_pbs(self,command_line_list): self.output_handle_gen() if self.MACHINE=="scc": self.handle.write("source ~/.bashrc\n") self.handle.write("#!bin/bash\n") self.handle.write("#$ -l h_rt="+self.RUNTIME_LIMIT+'\n') self.handle.write("\n") else: self.handle.write("#!bin/bash\n") self.handle.write("#\n") self.handle.write("\n") self.handle.write("#Specify which shell to use\n") self.handle.write("#$ -S /bin/bash\n") self.handle.write("\n") self.handle.write("#Run on the current working folder\n") self.handle.write("#$ -cwd\n") self.handle.write("\n") self.handle.write("#Given this job a name\n") if self.filename.count("/")>=1: filename_info_list=self.filename.split("/") filename_info=filename_info_list[-1] else: filename_info=self.filename self.handle.write("#$ -N S"+filename_info+'\n') self.handle.write("\n") self.handle.write("#Join standard output and error to a single file\n") self.handle.write("#$ -j y\n") self.handle.write("\n") self.handle.write("# Name the file where to redict standard output and error\n") if self.filename.count("/")>=1: filename_info_list=self.filename.split("/") filename_info=filename_info_list[-1] else: filename_info=self.filename self.handle.write("#$ -o "+ filename_info +".qlog\n") self.handle.write("\n") self.handle.write("# Project this job belongs to \n") self.handle.write("#$ -P " + self.PROJECT+ " \n") self.handle.write("\n") self.handle.write("# Send an email when the job begins and when it ends running\n") self.handle.write("#$ -m be\n") self.handle.write("\n") if (self.email).lower!="no": self.handle.write("# Whom to send the email to\n") self.handle.write("#$ -M "+self.email+ "\n") self.handle.write("\n") self.handle.write("# memory usage\n") self.handle.write("#$ -l mem_free="+self.memory+ "\n") self.handle.write("\n") self.handle.write("# Now let's Keep track of some information just in case anything go wrong\n") self.handle.write("echo "+'"'+"========================================" + '"'+'\n') self.handle.write("echo "+'"'+"Starting on : $(date)"+'"'+ "\n") self.handle.write("echo "+'"'+"Running on node : $(hostname)"+'"'+"\n") self.handle.write("echo "+'"'+"Current directory : $(pwd)"+'"'+"\n") self.handle.write("echo "+'"'+"Current job ID : $JOB_ID"+'"'+"\n") self.handle.write("echo "+'"'+"Current job name : $JOB_NAME"+'"'+"\n") self.handle.write("echo "+'"'+"Task index number : $TASK_ID"+'"'+"\n") self.handle.write("echo "+'"'+"========================================" + '"'+'\n') self.handle.write("\n") for command_line in command_line_list: self.handle.write(command_line) self.handle.write('\n') self.handle.write("\n") self.handle.write("echo "+'"'+"========================================" + '"'+'\n') self.handle.write("echo "+'"'+"Finished on : $(date)"+'"'+ "\n") self.handle.write("echo "+'"'+"========================================" + '"'+'\n') self.handle.close() class PSL_File_class(GeneralFile_class): def __init__(self,name): GeneralFile_class.__init__(self,name) self.MATCH_COLUMN=0 self.MISMATCH_COLUMN=1 self.QUERY_INSERTION_COUNT_COLUMN=4 self.QUERY_INSERTION_LEN_COLUMN=5 self.REF_INSERTION_COUNT_COLUMN=6 self.REF_INSERTION_LEN_COLUMN=7 self.STRAND_COLUMN=8 self.QUERY_ID_COLUMN=9 self.QUERY_LEN_COLUMN=10 self.QUERY_START_COLUMN=11 self.QUERY_END_COLUMN=12 self.REF_ID_COLUMN=13 self.REF_LEN_COLUMN=14 self.REF_START_COLUMN=15 self.REF_END_COLUMN=16 self.BLOCK_COUNT_COLUMN=17 self.BLOCK_SIZE_COLUMN=18 self.QUERY_STARTS_COLUMN=19 self.REF_STARTS_COLUMN=20 self.AUTOSKIP_HEADER=False self.SKIP_HEADER=5 def output_handle_gen(self,header_file=None,FILEPATH=os.getcwd(),sup_list=[],HEAD_LINE=1): ##Version2.0 ##Updated 2012-10-31 if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename self.handle=open(complete_path,'w') if self.RECORD=="": pass else: self.handle.write(self.RECORD) reader=csv.reader(open(header_file,"rU"),delimiter="\t") for index in range(self.SKIP_HEADER): row=reader.next() output_row(self.handle,row) if index==0: self.handle.write('\n') class SAM_File_class(GeneralFile_class): def __init__(self,name): GeneralFile_class.__init__(self,name) self.QNAME_COLUMN=0 #ID of fastq self.FLG_COLUMN=1 #flag self.CHRO_COLUMN=2 #chromosome self.COOR_COLUMN=3 self.MAPQ_COLUMN=4 self.CIGAR_COLUMN=5 self.RNEXT_COLUMN=6 self.PNEXT_COLUMN=7 self.TLEN_COLUMN=8 self.SEQ_COLUMN=9 self.QUAL_COLUMN=10 self.READGROUP_COLUMN=12 self.MULTI_ALIGNMENT_COLUMN=12 ## for the new bwa result version at least self.SKIP_HEADER=0 def reader_gen(self,FILEPATH=os.getcwd()): if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE) if self.AUTOSKIP_HEADER==True: ## this will over-write provided default skip_number=0 row=reader.next() while row[0][0]=="@": skip_number+=1 row=reader.next() self.SKIP_HEADER=skip_number reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE) for i in range(self.SKIP_HEADER): reader.next() return reader def ref_dict_gen(self,FILEPATH=os.getcwd()): ## this function output the reference chromosome into a dict if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(open(complete_path,'rU'),delimiter=self.SEP_CHAR,quoting=csv.QUOTE_NONE) row=reader.next() ref_dict= dict() while row[0][0]=="@": if row[0][1:3]=="SQ": ref_name=row[1][3:] ref_length=int(row[2][3:]) ref_dict[ref_name]=ref_length row=reader.next() return ref_dict class GTF_File_class(GeneralFile_class): def __init__(self,name): GeneralFile_class.__init__(self,name) self.CHRO_COLUMN=0 self.SOURCE_COLUMN=1 self.FEATURE_COLUMN=2 self.START_COLUMN=3 self.END_COLUMN=4 self.SCORE_COLUMN=5 self.STRAND_COLUMN=6 self.FRAME_COLUMN=7 self.ATTRIBUTE_COLUMN=8 self.SKIP_HEADER=0 class MPILEUP_SINGLE_File_class(GeneralFile_class): def __init__(self,name): GeneralFile_class.__init__(self,name) self.CHRO_COLUMN=0 self.COOR_COLUMN=1 self.REF_COLUMN=2 self.COUNT_COLUMN=3 self.INFO_COLUMN=4 self.QUALITY_COLUMN=5 self.SKIP_HEADER=0 class GZ_File_class(GeneralFile_class): def __init__(self,name): GeneralFile_class.__init__(self,name) def reader_gen(self,FILEPATH=os.getcwd()): import gzip if '/' in self.filename: complete_path=self.filename else: complete_path=FILEPATH + '/' + self.filename reader=csv.reader(gzip.open(complete_path),delimiter=self.SEP_CHAR) return reader class BLASTN6_File_class(GeneralFile_class): def __init__(self,name): GeneralFile_class.__init__(self,name) self.QUERY_ID_COLUMN=0 self.REF_ID_COLUMN=1 self.OVERLAP_PERCENTAGE_COLUMN=2 self.OVERLAP_LENGTH_COLUMN=3 self.MISMATCH_COLUMN=4 self.GAP_COLUMN=5 self.QUERY_START_COLUMN=6 self.QUERY_END_COLUMN=7 self.REF_START_COLUMN=8 self.REF_END_COLUMN=9 self.EVALUE_COLUMN=10 self.BITSCORE_COLUMN=11 self.SKIP_HEADER=0
bioliyezhang/KPAF_codes
shared_Python_library/File_Class.py
Python
mit
37,124
[ "BWA" ]
d2e7680ce307523ec6a425ec353d8e7e75f6323b4b3761622c13e85cb1fee5aa
"""Configuration for the documentation scripts.""" import os import glob import logging import argparse from pprint import pformat from configparser import ConfigParser LOG = logging.getLogger(__name__) K_DOCS = "Docs" K_CFG = "CFG" DEF_CFG_BASEFILE = "../dirac.cfg" DEF_CFG_TARGETFILE = "source/ExampleConfig.rst" K_CODE = "Code" DEF_CODE_TARGETPATH = "source/CodeDocumentation" DEF_CODE_CUSTOMDOCS = "diracdoctools/CustomizedDocs" DEF_CODE_COMMANDSECTION = "false" K_COMMANDS = "Commands" DEF_COMMANDS_FILENAME = "index.rst" DEF_COMMANDS_SECTIONPATH = "source/Commands" def listify(values: str) -> list: """Listify string""" return [entry.strip() for entry in values.split(",") if entry] class Configuration(object): """Provide configuration to the scripts.""" def __init__(self, confFile, sections=None): if sections is None: sections = [K_CODE, K_COMMANDS, K_CFG] LOG.info("Reading configFile %r", os.path.join(os.getcwd(), confFile)) self.config = ConfigParser(dict_type=dict) self.config.read(confFile) # self.config.optionxform = str # do not transform options to lowercase self.docsPath = os.path.dirname(os.path.abspath(confFile)) # Read Docs section self.moduleName = self.getOption(K_DOCS, "module_name", mandatory=True) self.sourcePath = self._fullPath(self.getOption(K_DOCS, "source_folder", f"../src/{self.moduleName}")) # Read Code section if K_CODE in sections: self.code_customDocsPath = self._fullPath(self.getOption(K_CODE, "customdocs_folder", DEF_CODE_CUSTOMDOCS)) self.code_targetPath = self._fullPath(self.getOption(K_CODE, "docs_target_path", DEF_CODE_TARGETPATH)) self.code_privateMembers = listify(self.getOption(K_CODE, "document_private_members")) self.code_noInherited = listify(self.getOption(K_CODE, "no_inherited_members")) self.code_dummyFiles = listify(self.getOption(K_CODE, "create_dummy_files")) self.code_ignoreFolders = listify(self.getOption(K_CODE, "ignore_folders")) self.code_ignoreFiles = listify(self.getOption(K_CODE, "ignore_files")) self.code_add_commands_section = self.getOption( K_CODE, "add_commands_section", DEF_CODE_COMMANDSECTION ).lower() in ["true", "yes", "y"] # Read Commands section if K_COMMANDS in sections: self.com_rst_path = os.path.join( self.getOption(K_COMMANDS, "sectionpath", DEF_COMMANDS_SECTIONPATH), self.getOption(K_COMMANDS, "filename", DEF_COMMANDS_FILENAME), ) self.com_ignore_commands = listify(self.getOption(K_COMMANDS, "ignore_commands")) # List all scripts paths self.allScripts = glob.glob(os.path.join(self.sourcePath, "*", "scripts", "[!_]*.py")) self.allScripts += glob.glob(os.path.join(self.sourcePath, "*", "scripts", "[!_]*.sh")) self.allScripts.sort() self.scripts = {} # Sorted by group/subgroup for section in [s for s in sorted(self.config.sections()) if s.startswith("commands.")]: # Identify group/subgroup names from the section name sp = section.split(".") group, subgroup = (sp[-1], None) if len(sp) == 2 else (sp[-2], sp[-1]) LOG.info("Parsing config section: %r", section) # Read general group/subgroup settings title = self.getOption(section, "title", mandatory=True) pattern = listify(self.getOption(section, "pattern", mandatory=True)) exclude = listify(self.getOption(section, "exclude")) prefix = self.getOption(section, "prefix") # Search scripts for group/subgroup pattern _scripts = [] for sPath in self.allScripts: path = sPath[len(self.sourcePath) :].replace("_", "-") if any(p in path for p in pattern) and not any(p in path for p in exclude): name = os.path.basename(sPath) _scripts.append(name[:-3].replace("_", "-") if name.endswith(".py") else name) # Sort scripts _scripts.sort() # Grouping/subgrouping if not subgroup: # group case # Path to RST file fileName = self.getOption(section, "filename", "index.rst").strip() sectionPath = self._fullPath(self.getOption(section, "sectionpath").replace(" ", "")) # Collect scripts paths and metadata for group self.scripts[group] = dict( scripts=_scripts, title=title, prefix=prefix, rstPath=os.path.join(sectionPath, fileName), subgroups=[], ) else: # subgroup case # Collect scripts paths and metadata for subgroup self.scripts[group]["subgroups"].append(subgroup) # Sub group scripts is a subset of the group scripts subgroupScripts = [s for s in _scripts if s in self.scripts[group]["scripts"]] self.scripts[group][subgroup] = dict(title=title, prefix=prefix, scripts=subgroupScripts) # Remove subgroup scripts from group self.scripts[group]["scripts"] = [s for s in self.scripts[group]["scripts"] if s not in _scripts] # Read CFG section if K_CFG in sections: self.cfg_targetFile = self._fullPath(self.getOption(K_CFG, "target_file", DEF_CFG_TARGETFILE)) self.cfg_baseFile = self._fullPath(self.getOption(K_CFG, "base_file", DEF_CFG_BASEFILE)) for var, val in sorted(vars(self).items()): LOG.debug("Parsed options: %s = %s", var, pformat(val)) def _fullPath(self, path): """Return absolute path based on docsPath.""" return os.path.abspath(os.path.join(self.docsPath, path)) def getOption(self, section: str, option: str, default="", mandatory: bool = False) -> "option value": """Get option from TOML configuration :param section: section name :param option: option name :param default: default value :param mandatory: if option is mandatory""" if mandatory: return self.config.get(section, option) value = self.config.get(section, option) if self.config.has_option(section, option) else "" if not value and default: LOG.debug(f"Since the '{section}.{option}' is not specified, use default: {default}") return default return value def __str__(self): """Return string containing options and values.""" theStr = "" for var, val in vars(self).items(): theStr += "%s = %s\n" % (var, val) return theStr class CLParser(object): def __init__(self): self.log = LOG.getChild("CLParser") self.parsed = None self.debug = False self.parser = argparse.ArgumentParser("DiracDocTool", formatter_class=argparse.RawTextHelpFormatter) self.parser.add_argument( "--configFile", action="store", default="docs.conf", dest="configFile", help="Name of the config file" ) self.parser.add_argument("-d", "--debug", action="count", dest="debug", help="d, dd, ddd", default=0) def parse(self): self.log.info("Parsing common options") self.parsed = self.parser.parse_args() self.logLevel = self._parsePrintLevel(self.parsed.debug) self.configFile = self.parsed.configFile def optionDict(self): """Return dictionary of options.""" if not self.parsed: self.parse() return dict( configFile=self.configFile, logLevel=self.logLevel, debug=self.debug, ) def _parsePrintLevel(self, level): """Translate debug count to logging level.""" level = level if level <= 2 else 2 self.debug = level == 2 return [ logging.INFO, logging.INFO, logging.DEBUG, ][level]
DIRACGrid/DIRAC
docs/diracdoctools/Config.py
Python
gpl-3.0
8,377
[ "DIRAC" ]
866ceda6a18a5d82b407f68ff32c1177bde7451f260646ce40fe177432c9ca1f
# -*- coding: utf8 #pylint: disable-msg=C0301 #pylint: disable-msg=C0111 #pylint: disable-msg=C0103 from __future__ import print_function, division from tagassess import data_parser from tagassess import test import StringIO import time import unittest DELICIOUS_LINE1 = '2003-01-01 01:00:00 2384 125497 tinker' DELICIOUS_LINE2 = '2011-02-17 11:10:20 2384 674518 hardware' DELICIOUS_LINE3 = '2003-01-01 01:00:00 1 674518 hardware pc' DELICIOUS_LINE4 = '2003-01-01 01:00:00 3 674518 tinker' DELICIOUS_LINE5 = '2005-03-03 01:00:00 8718 111111 bala' DELICIOUS_LINE6 = '2005-03-03 01:00:00 125497 2384 bala' DELICIOUS_LINE7 = '2005-03-03 01:00:00 4 2384 125497' CONNOTEA_LINE1 = '286ebecbce9fe3d99432c349fb2851c3|timo|2004-12-09T18:37:12Z|review' CONNOTEA_LINE2 = '1234555asd122d432c349fb285aas1c3|sand|2004-12-09T18:37:12Z|long-term potentiation' BIBSONOMY_LINE1 = '0 boomerang 7 1 2005-12-15 19:31:50' BIBSONOMY_LINE2 = '2 shop 6 1 2005-12-15 19:31:50' CITEUL_LINE1 = '4184140|aeb5429a4c20c7360579f53366633144|2009-03-16 17:58:33.792331+00|flavivirus' CITEUL_LINE2 = '2820125|e4fc89df8b47cf4eaede9b9f1620c57f|2009-03-16 18:00:04.165438+00|partial-order-reduction' LT_LINE1 = '1 20110 10 50s' def convert_time(t): return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(t)) class TestParseFuncs(unittest.TestCase): ''' Basic tests for line parser functions ''' def test_lt(self): pre_date = time.mktime(time.localtime()) user, item, tag, date = data_parser.library_thing_parser(LT_LINE1) post_date = time.mktime(time.localtime()) self.assertEqual(1, user) self.assertEqual(20110, item) self.assertEqual('50s', tag) self.assertEqual('50s', tag) self.assertTrue(date >= pre_date) self.assertTrue(date <= post_date) def test_delicious_flickr(self): user, item, tag, date = \ data_parser.delicious_flickr_parser(DELICIOUS_LINE1) self.assertEqual(2384, user) self.assertEqual(125497, item) self.assertEqual('tinker', tag) self.assertEqual('2003-01-01 01:00:00', convert_time(date)) user, item, tag, date = \ data_parser.delicious_flickr_parser(DELICIOUS_LINE2) self.assertEqual(2384, user) self.assertEqual(674518, item) self.assertEqual('hardware', tag) self.assertEqual('2011-02-17 11:10:20', convert_time(date)) user, item, tag, date = \ data_parser.delicious_flickr_parser(DELICIOUS_LINE3) self.assertEqual(1, user) self.assertEqual(674518, item) self.assertEqual('hardware pc', tag) self.assertEqual('2003-01-01 01:00:00', convert_time(date)) def test_connotea(self): user, item, tag, date = data_parser.connotea_parser(CONNOTEA_LINE1) self.assertEqual('timo', user) self.assertEqual('286ebecbce9fe3d99432c349fb2851c3', item) self.assertEqual('review', tag) self.assertEqual('2004-12-09 18:37:12', convert_time(date)) user, item, tag, date = data_parser.connotea_parser(CONNOTEA_LINE2) self.assertEqual('sand', user) self.assertEqual('1234555asd122d432c349fb285aas1c3', item) self.assertEqual('long-term potentiation', tag) self.assertEqual('2004-12-09 18:37:12', convert_time(date)) def test_bibsonomy(self): user, item, tag, date = data_parser.bibsonomy_parser(BIBSONOMY_LINE1) self.assertEqual(0, user) self.assertEqual(7, item) self.assertEqual('boomerang', tag) self.assertEqual('2005-12-15 19:31:50', convert_time(date)) user, item, tag, date = data_parser.bibsonomy_parser(BIBSONOMY_LINE2) self.assertEqual(2, user) self.assertEqual(6, item) self.assertEqual('shop', tag) self.assertEqual('2005-12-15 19:31:50', convert_time(date)) def test_citeul(self): user, item, tag, date = data_parser.citeulike_parser(CITEUL_LINE1) self.assertEqual('aeb5429a4c20c7360579f53366633144', user) self.assertEqual(4184140, item) self.assertEqual('flavivirus', tag) self.assertEqual('2009-03-16 17:58:33', convert_time(date)) user, item, tag, date = data_parser.citeulike_parser(CITEUL_LINE2) self.assertEqual('e4fc89df8b47cf4eaede9b9f1620c57f', user) self.assertEqual(2820125, item) self.assertEqual('partial-order-reduction', tag) self.assertEqual('2009-03-16 18:00:04', convert_time(date)) class TestIParse(unittest.TestCase): def test_iparse(self): fakef = StringIO.StringIO() fakef.writelines([DELICIOUS_LINE1 + '\n', DELICIOUS_LINE2 + '\n', DELICIOUS_LINE3 + '\n', DELICIOUS_LINE4 + '\n', DELICIOUS_LINE5]) fakef.seek(0) p = data_parser.Parser() annots = [a for a in p.iparse(fakef, data_parser.delicious_flickr_parser)] self.assertEqual(0, annots[0]['user']) self.assertEqual(0, annots[0]['item']) self.assertEqual(0, annots[0]['tag']) self.assertEqual('2003-01-01 01:00:00', convert_time(annots[0]['date'])) self.assertEqual(0, annots[1]['user']) self.assertEqual(1, annots[1]['item']) self.assertEqual(1, annots[1]['tag']) self.assertEqual('2011-02-17 11:10:20', convert_time(annots[1]['date'])) self.assertEqual(1, annots[2]['user']) self.assertEqual(1, annots[2]['item']) self.assertEqual(2, annots[2]['tag']) self.assertEqual(2, annots[3]['user']) self.assertEqual(1, annots[3]['item']) self.assertEqual(0, annots[3]['tag']) self.assertEqual(3, annots[4]['user']) self.assertEqual(2, annots[4]['item']) self.assertEqual(3, annots[4]['tag']) def test_with_file(self): p = data_parser.Parser() with open(test.BIBSONOMY_FILE) as f: annots = [a for a in p.iparse(f, data_parser.bibsonomy_parser)] self.assertEquals(10000, len(annots)) if __name__ == "__main__": unittest.main()
flaviovdf/tag_assess
src/tagassess/test/test_data_parser.py
Python
bsd-3-clause
6,291
[ "TINKER" ]
05dcaf3e1ad42e93e8952ca03a54cf69477861981966eee4b747b2f59d9e81c2
#! /usr/bin/python ### ### ### ### Script written by Ivan Sovic, August 2015. ### All rights reserved. ### ### ### # Some comments on DALIGNER. # # Important about DB generation: DALIGNER splits the input reference fasta into chunks of non 'N' bases, as well as into chromosomes. # E.g. Let's say we have hg19_chr3 as a reference. That is only one chromosome (or, only one sequence). However, fasta2DAM will generate 21 reference sequence. # This corresponds to the number of breakpoints present in the chr3. # As another example, if you convert S. Cerevisiae S288c, you will get exactly 16 sequences as expected. import re; import os SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)); import sys; sys.path.append(SCRIPT_PATH + '/../src'); import subprocess; import multiprocessing; try: import basicdefines; USE_BASICDEFINES_ = True; ALIGNERS_PATH_ROOT_ABS_ = basicdefines.ALIGNERS_PATH_ROOT_ABS; except: USE_BASICDEFINES_ = False; ALIGNERS_PATH_ROOT_ABS_ = SCRIPT_PATH; ALIGNER_URL = 'https://github.com/thegenemyers/DALIGNER.git'; ALIGNER_DB_URL = 'https://github.com/thegenemyers/DAZZ_DB.git'; ALIGNER_PATH = ALIGNERS_PATH_ROOT_ABS_ + '/DALIGNER/DALIGNER/'; ALIGNER_DB_PATH = ALIGNERS_PATH_ROOT_ABS_ + '/DALIGNER/DAZZ_DB/'; BIN = 'daligner'; MAPPER_NAME = 'DALIGNER'; RUNNING_PATH = os.path.dirname(sys.argv[0]); def peek(fp, num_chars): data = fp.read(num_chars); if len(data) == 0: return ''; fp.seek(num_chars * -1, 1); return data; # Returns a single read from the given FASTA/FASTQ file. # Parameter header contains only the header of the read. # Parameter lines contains all lines of the read, which include: # - header # - seq # - '+' if FASTQ # - quals if FASTQ # Parameter lines is an array of strings, each for one component. # Please note that multiline FASTA/FASTQ entries (e.g. sequence line) # will be truncated into one single line. def get_single_read(fp): lines = []; line = fp.readline(); header = line.rstrip(); header_leading_char = ''; if (len(header) > 0): sequence_separator = header[0]; header_leading_char = header[0]; header = header[1:]; # Strip the '>' or '@' sign from the beginning. else: return ['', []]; next_char = peek(fp, 1); line_string = ''; lines.append(header_leading_char + header); num_lines = 1; #while len(next_char) > 0 and next_char != sequence_separator or (next_char == '@' and num_lines < 4): while (len(next_char) > 0 and (next_char != sequence_separator or (next_char == '@' and num_lines < 4))): line = fp.readline(); if (line.rstrip() == '+' or line.rstrip() == ('+' + header)): #if (line.rstrip()[0] == '+'): lines.append(line_string); lines.append(line.rstrip()); line_string = ''; else: line_string += line.rstrip(); next_char = peek(fp, 1); num_lines += 1; lines.append(line_string); return [header, lines]; def get_fastq_headers_and_lengths(fastq_path): headers = []; lengths = []; ### DALIGNER splits each read/sequence into 'subreads' at every N position. That's why bread index does not necessarily correspond to the actual sequence ID. daligner_seq_id = []; ### Contains an array of tuples (seq_id, seq_header, start_offset). fp_in = None; try: fp_in = open(fastq_path, 'r'); except IOError: sys.stderr.write('ERROR: Could not open file "%s" for reading!\n' % fastq_path); exit(1); seq_id = 0; while True: [header, read] = get_single_read(fp_in); if (len(header) == 0): break; headers.append(header); lengths.append(len(read[1])); seq = read[1]; i = 0; while (i < len(seq)): if (seq[i] != 'N' and (i == 0 or (i > 0 and seq[i-1] == 'N'))): daligner_seq_id.append( (seq_id, header, i) ); i += 1; # i = 0; # while (i < len(seq)): # if (i == 0 or (i > 0 and seq[i] == 'N' and seq[i-1] != 'N')): # daligner_seq_id.append( (seq_id, header, i) ); # i += 1; seq_id += 1; fp_in.close(); # print 'len(daligner_seq_id) = %d' % (len(daligner_seq_id)); # exit(1); return [headers, lengths, daligner_seq_id]; def measure_command_wrapper(out_filename): if (USE_BASICDEFINES_ == True): return basicdefines.measure_command(out_filename); else: return '/usr/bin/time --format "Command line: %%C\\nReal time: %%e s\\nCPU time: -1.0 s\\nUser time: %%U s\\nSystem time: %%S s\\nMaximum RSS: %%M kB\\nExit status: %%x" --quiet -o %s ' % out_filename; def parse_memtime(memtime_path): cmdline = ''; realtime = 0; cputime = 0; usertime = 0; systemtime = 0; maxrss = 0; rsscache = 0; time_unit = ''; mem_unit = ''; try: fp = open(memtime_path, 'r'); lines = [line.strip() for line in fp.readlines() if (len(line.strip()) > 0)]; fp.close(); except Exception, e: sys.stderr.write('Could not find memory and time statistics in file "%s".\n' % (memtime_path)); return [cmdline, realtime, cputime, usertime, systemtime, maxrss, time_unit, mem_unit]; for line in lines: if (line.startswith('Command line:')): cmdline = line.split(':')[1].strip(); elif (line.startswith('Real time:')): split_line = line.split(':')[1].strip().split(' '); realtime = float(split_line[0].strip()); time_unit = split_line[1].strip(); elif (line.startswith('CPU time:')): split_line = line.split(':')[1].strip().split(' '); cputime = float(split_line[0].strip()); time_unit = split_line[1].strip(); elif (line.startswith('User time:')): split_line = line.split(':')[1].strip().split(' '); usertime = float(split_line[0].strip()); time_unit = split_line[1].strip(); elif (line.startswith('System time:')): split_line = line.split(':')[1].strip().split(' '); systemtime = float(split_line[0].strip()); time_unit = split_line[1].strip(); elif (line.startswith('Maximum RSS:')): split_line = line.split(':')[1].strip().split(' '); maxrss = float(split_line[0].strip()); mem_unit = split_line[1].strip(); # elif (line.startswith('')): # split_line = line.split(':')[1].strip().split(' '); # rsscache = float(split_line[0].strip()); # mem_unit = split_line[1].strip(); return [cmdline, realtime, cputime, usertime, systemtime, maxrss, time_unit, mem_unit]; def parse_memtime_files_and_accumulate(memtime_files, final_memtime_file): final_command_line = ''; final_real_time = 0.0; final_cpu_time = 0.0; final_user_time = 0.0; final_system_time = 0.0; final_time_unit = ''; final_max_rss = 0; final_mem_unit = ''; i = 0; for memtime_file in memtime_files: i += 1; sys.stderr.write('Parsing memtime file "%s"...\n' % (memtime_file)); [cmdline, realtime, cputime, usertime, systemtime, maxrss, time_unit, mem_unit] = parse_memtime(memtime_file); if (i == 1): final_command_line = cmdline; final_real_time = realtime; final_cpu_time = cputime; final_user_time = usertime; final_system_time = systemtime; final_max_rss += maxrss; final_time_unit = time_unit; final_mem_unit = mem_unit; else: if (time_unit == final_time_unit and mem_unit == final_mem_unit): final_command_line += '; ' + cmdline; final_real_time += realtime; final_cpu_time += cputime; final_user_time += usertime; final_system_time += systemtime; final_max_rss += maxrss; else: sys.stderr.write('Memory or time units not the same in all files! Instead of handling this, we decided to be lazy and just give up.\n'); break; try: fp = open(final_memtime_file, 'w'); except Exception, e: sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % (final_memtime_file)); return; if (final_cpu_time <= 0.0): final_cpu_time = final_user_time + final_system_time; fp.write('Command line: %s\n' % (final_command_line)); fp.write('Real time: %f %s\n' % (final_real_time, final_time_unit)); fp.write('CPU time: %f %s\n' % (final_cpu_time, final_time_unit)); fp.write('User time: %f %s\n' % (final_user_time, final_time_unit)); fp.write('System time: %f %s\n' % (final_system_time, final_time_unit)); fp.write('Maximum RSS: %f %s\n' % (final_max_rss, final_mem_unit)); fp.close(); def read_fastq(fastq_path): headers = []; seqs = []; quals = []; fp_in = None; try: fp_in = open(fastq_path, 'r'); except IOError: print 'ERROR: Could not open file "%s" for reading!' % fastq_path; return; while True: [header, read] = get_single_read(fp_in); if (len(header) == 0): break; seq = read[1]; qual = ''; if (len(read) == 4): qual = read[3]; headers.append(header); seqs.append(seq); quals.append(qual); fp_in.close(); return [headers, seqs, quals]; def convert_to_fasta(fastq_path, out_fasta_path): headers = []; seqs = []; quals = []; fp_in = None; fp_out = None; try: fp_in = open(fastq_path, 'r'); except IOError: print 'ERROR: Could not open file "%s" for reading!' % fastq_path; return; try: fp_out = open(out_fasta_path, 'w'); except IOError: print 'ERROR: Could not open file "%s" for writing!' % out_fasta_path; fp_in.close(); return; while True: [header, read] = get_single_read(fp_in); if (len(header) == 0): break; seq = read[1]; fp_out.write('>' + header + '\n'); fp_out.write(seq + '\n'); fp_in.close(); fp_out.close(); def wrap_fasta_file(fasta_file, daligner_fasta_file): try: fp_in = open(fasta_file, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % fasta_file); exit(0); try: fp_out = open(daligner_fasta_file, 'w'); except: sys.stderr.write('ERROR: Could not open file "%s" for writing! Exiting.\n' % daligner_fasta_file); exit(0); current_read = 0; while True: [header, read] = get_single_read(fp_in); if (len(read) == 0): break; current_read += 1; if (len(read[1]) <= 20): ### DALIGNER has a lower length limit of 10bp. continue; read[1] = re.sub("(.{500})", "\\1\n", read[1], 0, re.DOTALL); ### Wrap the sequence line, because DALIGNER has a 9998bp line len limit. if (len(read) == 4): read[3] = re.sub("(.{500})", "\\1\n", read[3], 0, re.DOTALL); ### Wrap the qual line, because DALIGNER has a 9998bp line len limit. fp_out.write('\n'.join(read) + '\n'); sys.stderr.write('\n'); fp_in.close(); fp_out.close(); def convert_reads_to_pacbio_format(reads_file, daligner_reads_file): try: fp_in = open(reads_file, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading! Exiting.\n' % reads_file); exit(0); try: fp_out = open(daligner_reads_file, 'w'); except: sys.stderr.write('ERROR: Could not open file "%s" for writing! Exiting.\n' % daligner_reads_file); exit(0); current_read = 0; header_conversion_hash = {}; while True: [header, read] = get_single_read(fp_in); if (len(read) == 0): break; current_read += 1; if (len(read[1]) <= 10): ### DALIGNER has a lower length limit of 10bp. sys.stderr.write('Found a read shorter than 10bp. Removing from the output.\n'); continue; ### Check if the read is already formatted like PacBio. if (header.count('/') == 2 and 'RQ' in header): fp_out.write('\n'.join(read) + '\n'); continue; trimmed_header = header.replace('_', ' ').split()[0]; # pacbio_header = '%s/%d/0_%d RQ=0.850' % (trimmed_header, current_read, len(read[1])); pacbio_header = 'S1/%d/0_%d RQ=0.850' % (current_read, len(read[1])); header_conversion_hash[pacbio_header] = header; read[0] = '%s%s' % (read[0][0], pacbio_header); ### Keep the first char of the header line. read[1] = re.sub("(.{500})", "\\1\n", read[1], 0, re.DOTALL); ### Wrap the sequence line, because DALIGNER has a 9998bp line len limit. if (len(read) == 4): read[3] = re.sub("(.{500})", "\\1\n", read[3], 0, re.DOTALL); ### Wrap the qual line, because DALIGNER has a 9998bp line len limit. fp_out.write('\n'.join(read) + '\n'); sys.stderr.write('\n'); fp_in.close(); fp_out.close(); return header_conversion_hash; def execute_command(command): sys.stderr.write('[%s wrapper] %s\n' % (MAPPER_NAME, command)); sys.stderr.flush(); subprocess.call(command, shell=True); sys.stderr.write('\n'); sys.stderr.flush(); def execute_command_get_stdout(command): sys.stderr.write('[%s wrapper] %s\n' % (MAPPER_NAME, command)); sys.stderr.flush(); p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) [out, err] = p.communicate() sys.stderr.write('\n'); sys.stderr.flush(); return [out, err]; class Overlap: def __init__(self, original_line=None, match_obj=None): self.bread = 0; # For alignments this is the ID of the reference hit. It's 1-based. self.aread = 0; # ID of the read that is aligned/overlapped. It's 1-based. self.orient = 0; # 'n' or 'c'. self.bstart = 0; # B-read/reference start. self.bend = 0; # B-read/reference end. self.astart = 0; # A-read start. self.aend = 0; # A-read end. self.diffs = 0; # Number of diffs. self.tracepts = 0; # Number of trace points. self.aln_ref = ''; self.aln_query = ''; self.aln_matching = ''; self.original_line = ''; if (match_obj != None): self.assign(match_obj, original_line); def assign(self, match_obj, original_line=None): [self.bread, self.aread, self.orient, self.bstart, self.bend, self.astart, self.aend, self.diffs, self.tracepts] = match_obj.groups(); self.bread = int(''.join(self.bread.split(','))); self.aread = int(''.join(self.aread.split(','))); self.bstart = int(''.join(self.bstart.split(','))); self.bend = int(''.join(self.bend.split(','))); self.astart = int(''.join(self.astart.split(','))); self.aend = int(''.join(self.aend.split(','))); self.diffs = int(''.join(self.diffs.split(','))); self.tracepts = int(''.join(self.tracepts.split(','))); self.aln_ref = ''; self.aln_query = ''; self.aln_matching = ''; self.original_line = ''; if (original_line != None): self.original_line = original_line; def verbose_as_string(self): ret = ''; ret += 'bread = %d\n' % (self.bread); ### Reference hit ID ret += 'aread = %d\n' % (self.aread); ### Read ID ret += 'orient = %s\n' % (self.orient); ### 'n' or 'c' ret += 'bstart = %d\n' % (self.bstart); ret += 'bend = %d\n' % (self.bend); ret += 'astart = %d\n' % (self.astart); ret += 'aend = %d\n' % (self.aend); ret += 'diffs = %d\n' % (self.diffs); ret += 'tracepts = %d\n' % (self.tracepts); ret += 'aln_ref = %s\n' % (self.aln_ref); ret += 'aln_matching = %s\n' % (self.aln_matching); ret += 'aln_query = %s\n' % (self.aln_query); return ret; def add_ref_alignment(self, ref_aln): self.aln_ref += ref_aln + '\n'; def add_matching_alignment(self, matching_aln): self.aln_matching += matching_aln + '\n'; def add_query_alignment(self, query_aln): self.aln_query += query_aln + '\n'; def calc_cigar_string(self, read_length, cigar_format='basic'): if (cigar_format == 'basic'): TB_MATCH = 'M'; # 0; TB_MISMATCH = 'M'; # 1; TB_INSERTION = 'I'; # 2; TB_DELETION = 'D'; # 3; else: TB_MATCH = '='; # 0; TB_MISMATCH = 'X'; # 1; TB_INSERTION = 'I'; # 2; TB_DELETION = 'D'; # 3; traceback = []; ### Convert the visual alignment to a traceback array. i = 0; while (i < len(self.aln_matching)): if (self.aln_matching[i] == '|'): traceback.append(TB_MATCH); elif (self.aln_matching[i] == '*'): if (self.aln_ref[i] == '-' and self.aln_query[i] != '-'): traceback.append(TB_INSERTION); elif (self.aln_ref[i] != '-' and self.aln_query[i] == '-'): traceback.append(TB_DELETION); elif (self.aln_ref[i] != self.aln_query[i] and self.aln_ref[i] != '-' and self.aln_query[i] != '-'): traceback.append(TB_MISMATCH); i += 1; ### Summarize the traceback array into a CIGAR string. cigar = []; last = '' results = [] for op in traceback: if op == last: cigar[-1] = (op, cigar[-1][1] + 1); else: cigar.append((op, 1)); last = op; cigar_string = '%dS' % (self.astart) if (self.astart > 0) else ''; cigar_string += ''.join(['%d%s' % (op[1], op[0]) for op in cigar]); cigar_string += '%dS' % (read_length - self.aend) if (read_length > (self.aend + 1)) else ''; return cigar_string; def convert_to_sam(self, ref_headers, ref_daligner_seq_id, read_headers, read_seqs, read_quals, header_conversion_hash): # num_clip_front = int(qstart) - 1; # num_clip_back = int(qlen) - (int(qend)); # sam_cigar = convert_btop_to_cigar(btop, num_clip_front, num_clip_back, sstrand); try: qname = header_conversion_hash[read_headers[self.aread-1]].split()[0]; # .split()[0]; except: sys.stderr.write('ERROR: Read "%s" cannot be found in the reads file! Faulty alignment in DALIGNER output?\n'); exit(1); # qname = header_conversion_hash[qname]; # try: daligner_seq_id = ref_daligner_seq_id[self.bread-1]; ### Contains an array of tuples (seq_id, seq_header, start_offset). # except Exception, e: # print e; # print self.bread; # print len(ref_daligner_seq_id); # exit(1); flag = 0 if (self.orient == 'n') else 16; rname = daligner_seq_id[1].split()[0]; pos = daligner_seq_id[2] + self.bstart + 1; mapq = 255; sam_seq = read_seqs[self.aread-1] if (self.orient == 'n') else revcomp_seq(read_seqs[self.aread-1]); sam_qual = read_quals[self.aread-1] if (self.orient == 'n') else read_quals[self.aread-1][::-1]; sam_cigar = self.calc_cigar_string(len(sam_seq)); if (len(sam_qual) == 0): sam_qual = '*'; sam_NM = self.diffs; sam_line = ''; sam_line += '%s\t' % (qname); # 1. qname sam_line += '%d\t' % (flag); # 2. flag sam_line += '%s\t' % (rname); # 3. rname sam_line += '%d\t' % (pos); # 4. pos sam_line += '%d\t' % (mapq); # 5. mapq sam_line += '%s\t' % (sam_cigar); # 6. CIGAR sam_line += '*\t'; # 7. rnext sam_line += '0\t'; # 8. pnext sam_line += '0\t'; # 9. tlen sam_line += '%s\t' % (sam_seq); # 10. seq sam_line += '%s\t' % (sam_qual); # 11. qual sam_line += 'AS:i:%d\t' % (self.aend - self.astart + 1 - sam_NM); # NM, custom sam_line += 'NM:i:%d' % (sam_NM); # NM, custom # sam_line += 'AS:i:%s\t' % (score.strip()); # AS, custom return sam_line; def get_line(fp): line = fp.readline(); if (len(line) == 0): return None; line = line.replace('\n', ''); return line; def complement_base(base): if (base == 'A'): return 'T'; if (base == 'C'): return 'G'; if (base == 'T'): return 'A'; if (base == 'G'): return 'C'; return 'N'; def revcomp_seq(sequence): ret_seq = ''; i = 0; while (i < len(sequence)): ret_seq += complement_base(sequence[len(sequence)-i-1]); i += 1; return ret_seq; def filter_daligner_refs_by_length(min_seq_length, ref_daligner_seq_id): ret_daligner_seq_ids = []; i = 0; while (i < len(ref_daligner_seq_id)): # print 'ref_daligner_seq_id[i] = %s' % (str(ref_daligner_seq_id[i])); # if ((i == 0 and ref_daligner_seq_id[i][2] < min_seq_length) or (i > 0 and (ref_daligner_seq_id[i][2] - ref_daligner_seq_id[i-1][2]) < min_seq_length)): # if ((i > 0 and (ref_daligner_seq_id[i][2] - ref_daligner_seq_id[i-1][2]) < min_seq_length)): # i += 1; # continue; # [seq_id, header, pos] = ref_daligner_seq_id[i] if (i == 0 or (i > 0 and ref_daligner_seq_id[i][1] != ref_daligner_seq_id[i-1][1]) or (i > 0 and ref_daligner_seq_id[i][1] == ref_daligner_seq_id[i-1][1] and (ref_daligner_seq_id[i][2] - ref_daligner_seq_id[i-1][2]) >= min_seq_length)): ret_daligner_seq_ids.append(ref_daligner_seq_id[i]); # print 'ret_daligner_seq_ids[-1] = %s' % (str(ret_daligner_seq_ids[-1])); i += 1; return ret_daligner_seq_ids; def convert_to_sam(alignment_file, daligner_reference, daligner_reads, header_conversion_hash, out_sam_file): try: fp_in = open(alignment_file, 'r'); except: sys.stderr.write('ERROR: Could not open file "%s" for reading!\n' % alignment_file); exit(1); try: fp_out = open(out_sam_file, 'w'); except: sys.stderr.write('ERROR: Could not open file "%s" for writing!\n' % out_sam_file); exit(1); [ref_headers, ref_lengths, ref_daligner_seq_id] = get_fastq_headers_and_lengths(daligner_reference); [read_headers, read_seqs, read_quals] = read_fastq(daligner_reads); # [read_headers, read_seqs, read_quals] = read_fastq(daligner_reads); ref_daligner_seq_id = filter_daligner_refs_by_length(100, ref_daligner_seq_id); STATE_INIT = 0; STATE_HEADER = 1; STATE_OVERLAP_LINE = 9; STATE_ALIGNMENT = 6; STATE_MATCHING_LINE = 7; STATE_QUERY_LINE = 8; STATE_CHECK_IF_NEXT_OVERLAP = 10; current_state = STATE_INIT; next_state = STATE_INIT; # escherichia_coli.fa-dalignerreference.fasta.reads-pacbio-dalignerreads.fasta: 988 records p_init = re.compile("\s*%s.%s: ([\d,]+) records\s*" % (os.path.basename(daligner_reference), os.path.basename(daligner_reads))); # 1 1 n [1,356,122..1,358,765] x [ 817.. 3,601] : = 432 diffs ( 27 trace pts) # p_overlap_line = re.compile("(\d+)\s+(\d+)\s+([cn])\s+\[\s*(.*?)\s*\.\.\s*(.*?)\s*\]\s+x\s+\[\s*(.*?)\s*\.\.\s*(.*?)\s*\]\s+:\s+=\s+(\d+)\s+diffs\s*\(\s*(\d+)\s*trace pts\)"); p_overlap_line = re.compile("\s*([\d,]+)\s+([\d,]+)\s+([cn])\s+\[\s*(.*?)\s*\.\.\s*(.*?)\s*\]\s*x\s*\[\s*(.*?)\s*\.\.\s*(.*?)\s*\]\s*:\s*=\s*([\d,]+)\s*diffs\s*\(\s*([\d,]+)\s*trace pts\)\s*"); # 1356113 taattaacgc[tgtggcgg-taactaaatcgaagaacagcgccgacaacgcgacaatcccgaccataatga-cgttgag-tgccggagtcc--gccattt # p_ref_line = re.compile("(\d+)\s+([actgn\-\[\]\.]+)"); p_ref_line = re.compile("\s*(\d*)\s*([actgn\-\[\]\. ]+)\s*"); p_matching_line = re.compile("\s*([:\[\|\]\* ]+)\s*"); # p_query_line = re.compile("(\d+)\s+([actgn\-\[\]\.]+)\s+(.*?%)"); p_query_line = re.compile("\s*(\d*)\s*([actgn\-\[\]\.]+)\s*(.*?%){0,1}\s*"); # print "\s*%s.%s: (\d+) records\s*" % (os.path.basename(daligner_reference), os.path.basename(daligner_reads)); # Write the SAM header. i = 0; while i < len(ref_headers): line = '@SQ\tSN:%s\tLN:%d\n' % (ref_headers[i].split()[0], ref_lengths[i]); fp_out.write(line); i += 1; ovl = None; num_sam_lines = 0; # for line in fp_in: while (True): if (current_state == STATE_INIT): line = get_line(fp_in); if (line == None): break; m = p_init.match(line); if (m): next_state = STATE_OVERLAP_LINE; else: next_state = current_state; elif (current_state == STATE_OVERLAP_LINE): ### Empty line. line = get_line(fp_in); if (line == None): break; ### The actual overlap line. line = get_line(fp_in); if (line == None): break; # if ('1 362 n [1,075,548..1,079,360]' in line.strip()): # print 'Tu sam 2!'; m = p_overlap_line.match(line); if (m): ovl = Overlap(line, m); next_state = STATE_ALIGNMENT; # if ('1 362 n [1,075,548..1,079,360]' in line.strip()): # print 'Tu sam 2.1!'; # print ovl.original_line; else: next_state = current_state; sys.stderr.write('ERROR: Daligner line not formatted properly (STATE_OVERLAP_LINE)! Maybe the format changed?\n'); sys.stderr.write('Line: "%s".\n' % line); sys.stderr.write('Exiting.\n'); exit(1); elif (current_state == STATE_ALIGNMENT): ### Empty line. line = get_line(fp_in); if (line == None): break; ### Reference line. line = get_line(fp_in); if (line == None): break; m = p_ref_line.match(line); # if ('1 362 n [1,075,548..1,079,360]' in ovl.original_line.strip()): # print 'Tu sam 3!'; # print ovl.verbose_as_string(); if (m): ovl.add_ref_alignment(m.groups()[1]); else: next_state = current_state; sys.stderr.write('ERROR: Daligner line not formatted properly (STATE_ALIGNMENT)! Maybe the format changed?\n'); sys.stderr.write('Line: "%s".\n' % line); sys.stderr.write('Exiting.\n'); exit(1); offset = line.index(m.groups()[1]); offset_len = len(m.groups()[1]); ### Matching line. line = get_line(fp_in); if (line == None): break; ovl.add_matching_alignment(line[offset:(offset+offset_len)]); # m = p_matching_line.match(line); # if (m): # ovl.add_matching_alignment(m.groups()[0]); # else: # next_state = current_state; # sys.stderr.write('ERROR: Daligner line not formatted properly (STATE_MATCHING_LINE)! Maybe the format changed?\n'); # sys.stderr.write('Line: "%s".\n' % line); # sys.stderr.write('Exiting.\n'); # exit(1); ### Query line. line = get_line(fp_in); if (line == None): break; ovl.add_query_alignment(line[offset:(offset+offset_len)]); # m = p_query_line.match(line); # if (m): # ovl.add_query_alignment(m.groups()[1]); # else: # next_state = current_state; # sys.stderr.write('ERROR: Daligner line not formatted properly (STATE_QUERY_LINE)! Maybe the format changed?\n'); # sys.stderr.write('Line: "%s".\n' % line); # sys.stderr.write('Exiting.\n'); # exit(1); next_state = STATE_CHECK_IF_NEXT_OVERLAP; elif (current_state == STATE_CHECK_IF_NEXT_OVERLAP): fp_tell = fp_in.tell(); ### Empty line. line = get_line(fp_in); if (line == None): break; ### Either a new overlap or a continuation to the previous alignment. line = get_line(fp_in); if (line == None): break; ### Return to the previous position, because it needs to be re-read. fp_in.seek(fp_tell, 0); # 1 362 n [1,075,548..1,079,360] x [ 0.. 4,045] : = 670 diffs ( 39 trace pts) # if ('1 362 n [1,075,548..1,079,360]' in ovl.original_line.strip()): # print 'Tu sam 1!'; m = p_overlap_line.match(line); if (m): sam_line = ovl.convert_to_sam(ref_headers, ref_daligner_seq_id, read_headers, read_seqs, read_quals, header_conversion_hash); num_sam_lines += 1; fp_out.write(sam_line + '\n'); # if ('1 362 n [1,075,548..1,079,360]' in ovl.original_line.strip()): # print 'Tu sam 4!'; # print ovl.verbose_as_string(); next_state = STATE_OVERLAP_LINE; else: next_state = STATE_ALIGNMENT; current_state = next_state; if (ovl): sam_line = ovl.convert_to_sam(ref_headers, ref_daligner_seq_id, read_headers, read_seqs, read_quals, header_conversion_hash); num_sam_lines += 1; fp_out.write(sam_line + '\n'); fp_in.close(); fp_out.close(); sys.stderr.write('Number of outputted SAM lines: %d\n' % (num_sam_lines)); # Function 'run' should provide a standard interface for running a mapper. Given input parameters, it should run the # alignment process, and convert any custom output results to the SAM format. Function should return a string with the # path to the output file. # reads_file Path to a FASTA/FASTQ file containing reads. # reference_file Path to a reference genome FASTA file. # machine_name A symbolic name to specify a set of parameters for a specific sequencing platform. # output_path Folder to which the output will be placed to. Filename will be automatically generated according to the name of the mapper being run. # output_suffix A custom suffix that can be added to the output filename. def run(run_type, reads_file, reference_file, machine_name, output_path, output_suffix=''): parameters = ''; num_threads = multiprocessing.cpu_count() / 2; if ((machine_name.lower() == 'illumina') or (machine_name.lower() == 'roche')): # parameters = '-v -s1 -h10 -e.9'; ### I get poor results on Illumina data (simulated), concretely DALIGNER mapps 0 reads. I think the problem is 'alignment but ### simply a set of trace points, typically every 100bp or so, that allow the', and reads that I simulated were 150bp in length. parameters = '-v'; elif ((machine_name.lower() == 'pacbio')): # parameters = '-t %s -x pacbio' % str(num_threads); parameters = '-v'; elif ((machine_name.lower() == 'nanopore')): parameters = '-v -e.7 -k10'; elif ((machine_name.lower() == 'k9')): parameters = '-v -e.7 -k9'; elif ((machine_name.lower() == 'k10')): parameters = '-v -e.7 -k10'; # elif ((machine_name.lower() == 'debug')): # parameters = '-t %s' % str(num_threads); else: # default parameters = '-vd'; if (output_suffix != ''): if (output_suffix.lower().endswith('.sam')): output_filename = os.path.splitext(output_suffix)[0]; else: output_filename = '%s-%s' % (MAPPER_NAME, output_suffix); else: output_filename = MAPPER_NAME; # Check if the given input file is a FASTA or FASTQ, and convert to FASTA if necessary. if (reads_file[-1] == 'q'): sys.stderr.write('[%s wrapper] Converting FASTQ to FASTA...\n' % (MAPPER_NAME)); reads_fasta = reads_file[0:-1] + 'a'; convert_to_fasta(reads_file, reads_fasta); reads_file = reads_fasta; sys.stderr.write('\n'); reads_basename = os.path.splitext(os.path.basename(reads_file))[0]; sam_file = '%s/%s.sam' % (output_path, output_filename); memtime_file = '%s/%s.memtime' % (output_path, output_filename); memtime_file_hpcmapper = '%s/%s-hpcmapper.memtime' % (output_path, output_filename); memtime_file_index = '%s/%s-index.memtime' % (output_path, output_filename); ### Convert the input files to absolute paths. if (os.path.isabs(reads_file) == False): reads_file = os.path.abspath(reads_file); if (os.path.isabs(reference_file) == False): reference_file = os.path.abspath(reference_file); daligner_reference_file = reference_file + '-dalignerreference.fasta'; if (os.path.exists(daligner_reference_file)): sys.stderr.write('[%s wrapper] DALIGNER reference already exists. Removing.\n' % (MAPPER_NAME)); os.remove(daligner_reference_file); if (run_type == 'align' or run_type == 'run'): index_file = daligner_reference_file + '.dam'; if (os.path.exists(index_file)): sys.stderr.write('[%s wrapper] The DALIGNER index file already exists ("%s"), removing.\n' % (MAPPER_NAME, index_file)); os.remove(index_file); # Run the indexing process, and measure execution time and memory. # daligner_reference_file = reference_file if (reference_file.lower().endswith('fasta')) else (reference_file + '.fasta'); sys.stderr.write('[%s wrapper] Wrapping the sequences in the reference FASTA file. DALIGNER has a line length limit of 9998 chars.\n' % (MAPPER_NAME)); wrap_fasta_file(reference_file, daligner_reference_file); if (True or (not os.path.exists(index_file))): if (not os.path.exists(daligner_reference_file)): sys.stderr.write('[%s wrapper] Copying reference to satisfy the extension requirements...\n' % (MAPPER_NAME)); command = 'cp %s %s.fasta' % (reference_file, reference_file); execute_command(command); sys.stderr.write('[%s wrapper] Generating index...\n' % (MAPPER_NAME)); command = '%s %s/fasta2DAM %s %s' % (measure_command_wrapper(memtime_file_index), ALIGNER_DB_PATH, index_file, daligner_reference_file); execute_command(command); command = '%s %s/DBsplit -x100 %s' % (measure_command_wrapper(memtime_file_index), ALIGNER_DB_PATH, daligner_reference_file); execute_command(command); sys.stderr.write('\n'); else: sys.stderr.write('[%s wrapper] Reference index already exists. Continuing.\n' % (MAPPER_NAME)); sys.stderr.flush(); daligner_reads_file = '%s-dalignerreads.fasta' % (os.path.splitext(reads_file)[0]); if (os.path.exists(daligner_reads_file)): sys.stderr.write('[%s wrapper] DALIGNER reads file already exists. Removing.\n' % (MAPPER_NAME)); os.remove(daligner_reads_file); if (True or (not os.path.exists(daligner_reads_file))): sys.stderr.write('[%s wrapper] Modifying the reads file to have PacBio headers...\n' % (MAPPER_NAME)); # command = 'cp %s %s.fasta' % (reads_file, reads_file); # subprocess.call(command, shell=True); header_conversion_hash = convert_reads_to_pacbio_format(reads_file, daligner_reads_file); sys.stderr.write('\n'); sys.stderr.write('[%s wrapper] Converting the reads file into a DB file...\n' % (MAPPER_NAME)); daligner_reads_file_db = '%s.db' % (daligner_reads_file); if (os.path.exists(daligner_reads_file_db)): sys.stderr.write('[%s wrapper] The DALIGNER reads DB file already exists ("%s"), removing.\n' % (MAPPER_NAME, daligner_reads_file_db)); os.remove(daligner_reads_file_db); command = '%s %s/fasta2DB %s.db %s' % (measure_command_wrapper(memtime_file_index), ALIGNER_DB_PATH, daligner_reads_file, daligner_reads_file); execute_command(command); sys.stderr.write('\n'); ### DALIGNER's HPCmapper script basically just generates a shell script with commands that need to be run to generate alignments in parallel and then join them into one LAS file. ### Instead of outputting this script to a file, we intercept the STDOUT and modify it a bit. ### Modifications are needed because DALIGNER's generated script expects that it's binaries are in PATH, and also it generates intermediate files in the current folder. ### That's why we modify the PATH variable first, and change the execution folder to the output folder. ### Please note that reads_file and reference_file then need to be absolute paths, so that's why we performed the conversion above. # Run the alignment process, and measure execution time and memory. # # # ### Extracting alignments is not documented very well, or at least there is not any examples out there on how to do that. ### From the DALIGNER's README on GitHub, it says: ### "If the -c option is given then a cartoon rendering is displayed, and if -a or -r option is set then an alignment of the local alignment is displayed." ### ### (1) The -c on its own doesn't really show much useful info, e.g.: ### 1 1 n [1,356,122..1,358,765] x [ 817.. 3,601] ( 27 trace pts) ### 1356122 3280910 ### A ==========+------------+=========> dif/(len1+len2) = 432/(2643+2784) = 15.92% ### B ======+------------> ### 817 ### ### (2) The -a option produces alignments in the BLAST like format, that is hard to parse. E.g.: ### 1 1 n [1,356,122..1,358,765] x [ 817.. 3,601] : = 432 diffs ( 27 trace pts) ### ### 1356113 taattaacgc[tgtggcgg-taactaaatcgaagaacagcgccgacaacgcgacaatcccgaccataatga-cgttgag-tgccggagtcc--gccattt ### ::::::::::[||||||||*|||||||||||*|| *|||||||||||||||*|*|||||||||||||*||||*|||||||*|||||||||||**||||||| ### 808 tagttcacct[tgtggcgggtaactaaatcgtag-acagcgccgacaacgtg-caatcccgaccattatgaacgttgaggtgccggagtccctgccattt 11.2% ### ### (3) The -r option - causes slightly different row width. ### 1 1 n [1,356,122..1,358,765] x [ 817.. 3,601] : = 432 diffs ( 27 trace pts) ### ### 1356113 taattaacgc[tgtggcgg-taactaaatcgaagaacagcgccgacaacgcgacaatcccgaccataatga-cgttgag-tgccggagtcc--g ### ::::::::::[||||||||*|||||||||||*||*|||||||||||||||*|*|||||||||||||*||||*|||||||*|||||||||||**| ### 808 tagttcacct[tgtggcgggtaactaaatcgtag-acagcgccgacaacgtg-caatcccgaccattatgaacgttgaggtgccggagtccctg 12.0% sys.stderr.write('[%s wrapper] Running %s...\n' % (MAPPER_NAME, MAPPER_NAME)); if (run_type == 'align' or run_type == 'run'): command = '%s %s/HPCmapper %s %s %s' % (measure_command_wrapper(memtime_file_hpcmapper), ALIGNER_PATH, parameters, daligner_reference_file, daligner_reads_file); [out, err] = execute_command_get_stdout(command); # print out; sys.stderr.write(out + '\n\n'); sys.stderr.flush(); ### Replace ampersands with '\n' so it's easier to split commands and add measurement calls to the commands. out = out.replace('&&', '\n'); ### LAshow should extract the overlaps/alignments from the LAS file. las_file = '%s.%s.las' % (os.path.basename(daligner_reference_file), os.path.basename(daligner_reads_file)); out += '\nLAshow -a %s %s %s > %s.txt' % (daligner_reference_file, daligner_reads_file, las_file, las_file); # commands_daligner = 'PATH="$PATH:%s"\necho $PATH\ncd %s\n%s %s\nLAshow -a %s %s %s > %s.txt' % (ALIGNER_PATH, output_path, measure_command_wrapper(memtime_file), out, daligner_reference_file, daligner_reads_file, las_file, las_file); # for line in out.split('\n'): # print '%s\n' % line; # print '---------------'; ### Prepare measurement command for each line of the generated script. daligner_out_formatted = [command.split('#')[0].strip() for command in out.split('\n') if (len(command.split('#')[0].strip()) > 0)] ### This addresses a DALIGNER bug and attempts to pass around it. The bug was: on a larger reference (concretely, hg19 chr6+chr22), DALIGNER split the reference into chunks ### and aligned into separate LAS files, namely: L1.1.1.las and L1.2.1.las, but the final LAmerge tried merging L1.1.1 and L1.1.2 which did not exist. ### For this reason, the output LAS file was empty. all_intermediate_las = []; i = 0; while (i < len(daligner_out_formatted)): line = daligner_out_formatted[i]; line = line.strip(); if ('LAmerge' in line): split_line = line.split('LAmerge -v '); intermediate_las = split_line[-1].split()[0]; if (intermediate_las == os.path.splitext(las_file)[0]): # print all_intermediate_las; joined_intermediate_las = ''; for las in all_intermediate_las: joined_intermediate_las += ' %s' % (las); fixed_command = ''; if (len(joined_intermediate_las) > 0): fixed_command = '%s LAmerge -v %s %s' % (split_line[0], las_file, joined_intermediate_las); daligner_out_formatted[i] = fixed_command; # print fixed_command; break; all_intermediate_las.append(intermediate_las); i += 1; ### This part adds time measurements to the DALIGNER script lines. memtime_files = []; i = 0; while (i < len(daligner_out_formatted)): if (daligner_out_formatted[i].strip().startswith('rm')): daligner_out_formatted[i] = 'echo "Skipping a rm command."'; i += 1; continue; # if (daligner_out_formatted[i].strip().startswith('daligner')): # daligner_out_formatted[i] = 'echo "Skipping a daligner command."'; # i += 1; # continue; memtime_file_i = '%s/%s-%d.memtime' % (output_path, output_filename, i); memtime_files.append(memtime_file_i); measure_command_i = measure_command_wrapper(memtime_file_i); daligner_out_formatted[i] = '%s %s' % (measure_command_i, daligner_out_formatted[i]); i += 1; joined_commands = '; '.join(daligner_out_formatted); sys.stderr.write('\n\n'); sys.stderr.flush(); ### Add the paths so the commands can be executed properly. commands_daligner = 'PATH="$PATH:%s"; echo $PATH; cd %s; %s' % (ALIGNER_PATH, output_path, joined_commands); execute_command(commands_daligner); sys.stderr.write('\n'); final_memtime_file = memtime_file; parse_memtime_files_and_accumulate(memtime_files, final_memtime_file); for memtime_file in memtime_files: try: os.remove(memtime_file); except OSError: pass; sys.stderr.write('[%s wrapper] Converting the output to SAM format at path "%s"...\n' % (MAPPER_NAME, sam_file)); convert_to_sam('%s/%s.txt' % (output_path, las_file), daligner_reference_file, daligner_reads_file, header_conversion_hash, sam_file); elif (run_type == 'overlap'): command = '%s %s/HPCdaligner %s %s' % (measure_command_wrapper(memtime_file_hpcmapper), ALIGNER_PATH, parameters, daligner_reads_file); [out, err] = execute_command_get_stdout(command); ### LAshow should extract the overlaps/alignments from the LAS file. las_file = '%s.1.las' % (daligner_reads_file); commands_daligner = 'PATH="$PATH:%s"\necho $PATH\ncd %s\n%s\nLAshow -a %s %s %s > %s.txt' % (ALIGNER_PATH, output_path, out, daligner_reference_file, daligner_reads_file, las_file, las_file); commands_daligner = '; '.join([command for command in commands_daligner.split('\n') if (len(command) > 0 and command[0] != '#')]); execute_command(commands_daligner); sys.stderr.write('\n'); elif (run_type == 'onlyconvert'): sys.stderr.write('[%s wrapper] Converting the output to SAM format at path "%s"...\n' % (MAPPER_NAME, sam_file)); las_file = '%s.%s.las' % (os.path.basename(daligner_reference_file), os.path.basename(daligner_reads_file)); convert_to_sam('%s/%s.txt' % (output_path, las_file), daligner_reference_file, daligner_reads_file, header_conversion_hash, sam_file); sys.stderr.write('[%s wrapper] %s wrapper script finished processing.\n' % (MAPPER_NAME, MAPPER_NAME)); return sam_file # This is a standard interface for setting up the aligner. It should assume that the aligner # is not present localy, but needs to be retrieved, unpacked, compiled and set-up, without requireing # root privileges. def download_and_install(): sys.stderr.write('[%s wrapper] Started installation of %s.\n' % (MAPPER_NAME, MAPPER_NAME)); sys.stderr.write('[%s wrapper] Creating a folder for all %s repos...\n' % (MAPPER_NAME, MAPPER_NAME)); command = 'mkdir -p %s/%s' % (ALIGNERS_PATH_ROOT_ABS_, MAPPER_NAME); execute_command(command); sys.stderr.write('[%s wrapper] Cloning git repository.\n' % (MAPPER_NAME)); command = 'cd %s/%s; git clone %s' % (ALIGNERS_PATH_ROOT_ABS_, MAPPER_NAME, ALIGNER_URL); execute_command(command); sys.stderr.write('[%s wrapper] Checking out commit d4aa487 used in the GraphMap paper.\n' % (MAPPER_NAME)); command = 'cd %s/%s/%s; git checkout d4aa487' % (ALIGNERS_PATH_ROOT_ABS_, MAPPER_NAME, MAPPER_NAME); execute_command(command); sys.stderr.write('[%s wrapper] Running make.\n' % (MAPPER_NAME)); command = 'cd %s; make' % (ALIGNER_PATH); execute_command(command); sys.stderr.write('[%s wrapper] Cloning git repository.\n' % (MAPPER_NAME)); command = 'cd %s/%s; git clone %s' % (ALIGNERS_PATH_ROOT_ABS_, MAPPER_NAME, ALIGNER_DB_URL); execute_command(command); sys.stderr.write('[%s wrapper] Running make.\n' % (MAPPER_NAME)); command = 'cd %s; make' % (ALIGNER_DB_PATH); execute_command(command); # sys.stderr.write('[%s wrapper] Checking out commit "eb428d7d31ced059ad39af2701a22ebe6d175657" for reproducibility purposes.\n' % (MAPPER_NAME)); # command = 'cd %s; git checkout eb428d7d31ced059ad39af2701a22ebe6d175657' % (ALIGNER_PATH); # subprocess.call(command, shell='True'); # sys.stderr.write('\n'); sys.stderr.write('[%s wrapper] All installation steps finished.\n' % (MAPPER_NAME)); sys.stderr.write('\n'); def verbose_usage_and_exit(): sys.stderr.write('Usage:\n'); sys.stderr.write('\t%s mode [<reads_file> <reference_file> <machine_name> <output_path> [<output_suffix>]]\n' % sys.argv[0]); sys.stderr.write('\n'); sys.stderr.write('\t- mode - either "align", "overlap" or "install". If "install" other parameters can be ommitted.\n'); sys.stderr.write('\t- machine_name - "illumina", "roche", "pacbio", "nanopore" or "default".\n'); sys.stderr.write('\t- output_suffix - suffix for the output filename. If this parameter ends with ".sam", the value will be used as full output filename.\n'); exit(0); if __name__ == "__main__": if (len(sys.argv) < 2 or len(sys.argv) > 7): verbose_usage_and_exit(); if (sys.argv[1] == 'install'): download_and_install(); exit(0); elif (sys.argv[1] == 'align' or sys.argv[1] == 'run' or sys.argv[1] == 'onlyconvert'): if (len(sys.argv) < 6): verbose_usage_and_exit(); reads_file = sys.argv[2]; reference_file = sys.argv[3]; machine_name = sys.argv[4]; output_path = os.path.abspath(sys.argv[5]); output_suffix = ''; if (len(sys.argv) == 7): output_suffix = sys.argv[6]; run(sys.argv[1], reads_file, reference_file, machine_name, output_path, output_suffix); elif (sys.argv[1] == 'overlap'): if (len(sys.argv) < 6): verbose_usage_and_exit(); reads_file = sys.argv[2]; reference_file = sys.argv[3]; machine_name = sys.argv[4]; output_path = os.path.abspath(sys.argv[5]); output_suffix = ''; if (len(sys.argv) == 7): output_suffix = sys.argv[6]; run(sys.argv[1], reads_file, reference_file, machine_name, output_path, output_suffix); else: verbose_usage_and_exit();
isovic/aligneval
wrappers/wrapper_daligner.py
Python
mit
43,692
[ "BLAST" ]
ff69170819555f1fa58a3c0d6ca2c1237b41ed4d74f73882e1a3071b55da9b22
""" Support for managing apps (as created with "0install add"). @since: 1.9 """ # Copyright (C) 2012, Thomas Leonard # See the README file for details, or visit http://0install.net. from zeroinstall import _, SafeException, logger from zeroinstall.support import basedir, portable_rename from zeroinstall.injector import namespaces, selections, qdom, model import re, os, time, tempfile, errno # Avoid characters that are likely to cause problems (reject : and ; everywhere # so that apps can be portable between POSIX and Windows). valid_name = re.compile(r'''^[^./\\:=;'"][^/\\:=;'"]*$''') def validate_name(name): """@type name: str""" if name == '0install': raise SafeException("Creating an app called '0install' would cause trouble; try e.g. '00install' instead") if valid_name.match(name): return raise SafeException("Invalid application name '{name}'".format(name = name)) def _export(name, value): """Try to guess the command to set an environment variable.""" shell = os.environ.get('SHELL', '?') if 'csh' in shell: return "setenv %s %s" % (name, value) return "export %s=%s" % (name, value) def find_bin_dir(paths = None): """Find the first writable path in the list (default $PATH), skipping /bin, /sbin and everything under /usr except /usr/local/bin @type paths: [str] | None @rtype: str""" if paths is None: paths = os.environ['PATH'].split(os.pathsep) for path in paths: if path.startswith('/usr/') and not path.startswith('/usr/local/bin'): # (/usr/local/bin is OK if we're running as root) pass elif path.startswith('/bin') or path.startswith('/sbin'): pass elif os.path.realpath(path).startswith(basedir.xdg_cache_home): pass # print "Skipping cache", first_path elif not os.access(path, os.W_OK): pass # print "No access", first_path else: break else: path = os.path.expanduser('~/bin/') logger.warning('%s is not in $PATH. Add it with:\n%s' % (path, _export('PATH', path + ':$PATH'))) if not os.path.isdir(path): os.makedirs(path) return path _command_template = """#!/bin/sh exec 0install run {app} "$@" """ class AppScriptInfo(object): """@since: 1.12""" name = None command = None def parse_script_header(stream): """If stream is a shell script for an application, return the app details. @param stream: the executable file's stream (will seek) @type stream: file-like object @return: the app details, if any @rtype: L{AppScriptInfo} | None @since: 1.12""" try: stream.seek(0) template_header = _command_template[:_command_template.index("{app}")] actual_header = stream.read(len(template_header)) stream.seek(0) if template_header == actual_header: # If it's a launcher script, it should be quite short! rest = stream.read() line = rest.split('\n')[1] else: return None except UnicodeDecodeError as ex: logger.info("Not an app script '%s': %s", stream, ex) return None info = AppScriptInfo() info.name = line.split()[3] return info class App(object): def __init__(self, config, path): """@type path: str""" self.config = config self.path = path def set_selections(self, sels, set_last_checked = True): """Store a new set of selections. We include today's date in the filename so that we keep a history of previous selections (max one per day), in case we want to to roll back later. @type sels: L{zeroinstall.injector.selections.Selections} @type set_last_checked: bool""" date = time.strftime('%Y-%m-%d') sels_file = os.path.join(self.path, 'selections-{date}.xml'.format(date = date)) dom = sels.toDOM() if self.config.handler.dry_run: print(_("[dry-run] would write selections to {file}").format(file = sels_file)) else: tmp = tempfile.NamedTemporaryFile(prefix = 'selections.xml-', dir = self.path, delete = False, mode = 'wt') try: dom.writexml(tmp, addindent=" ", newl="\n", encoding = 'utf-8') except: tmp.close() os.unlink(tmp.name) raise tmp.close() portable_rename(tmp.name, sels_file) sels_latest = os.path.join(self.path, 'selections.xml') if self.config.handler.dry_run: print(_("[dry-run] would update {link} to point to new selections file").format(link = sels_latest)) else: if os.path.exists(sels_latest): os.unlink(sels_latest) if os.name == "nt": import shutil shutil.copyfile(sels_file, sels_latest) else: os.symlink(os.path.basename(sels_file), sels_latest) if set_last_checked: self.set_last_checked() def get_selections(self, snapshot_date = None, may_update = False, use_gui = None): """Load the selections. If may_update is True then the returned selections will be cached and available. @param snapshot_date: get a historical snapshot @type snapshot_date: (as returned by L{get_history}) | None @param may_update: whether to check for updates @type may_update: bool @param use_gui: whether to use the GUI for foreground updates @type use_gui: bool | None (never/always/if possible) @return: the selections @rtype: L{selections.Selections}""" if snapshot_date: assert may_update is False, "Can't update a snapshot!" sels_file = os.path.join(self.path, 'selections-' + snapshot_date + '.xml') else: sels_file = os.path.join(self.path, 'selections.xml') try: with open(sels_file, 'rb') as stream: sels = selections.Selections(qdom.parse(stream)) except IOError as ex: if may_update and ex.errno == errno.ENOENT: logger.info("App selections missing: %s", ex) sels = None else: raise if may_update: sels = self._check_for_updates(sels, use_gui) return sels def get_history(self): """Get the dates of the available snapshots, starting with the most recent. @rtype: [str]""" date_re = re.compile('selections-(\d\d\d\d-\d\d-\d\d).xml') snapshots = [] for f in os.listdir(self.path): match = date_re.match(f) if match: snapshots.append(match.group(1)) snapshots.sort(reverse = True) return snapshots def download_selections(self, sels): """Download any missing implementations. @type sels: L{zeroinstall.injector.selections.Selections} @return: a blocker which resolves when all needed implementations are available @rtype: L{tasks.Blocker} | None""" return sels.download_missing(self.config) # TODO: package impls def _check_for_updates(self, sels, use_gui): """Check whether the selections need to be updated. If any input feeds have changed, we re-run the solver. If the new selections require a download, we schedule one in the background and return the old selections. Otherwise, we return the new selections. If we can select better versions without downloading, we update the app's selections and return the new selections. If we can't use the current selections, we update in the foreground. We also schedule a background update from time-to-time anyway. @type sels: L{zeroinstall.injector.selections.Selections} @type use_gui: bool @return: the selections to use @rtype: L{selections.Selections}""" need_solve = False # Rerun solver (cached feeds have changed) need_update = False # Update over the network if sels: utime = self._get_mtime('last-checked', warn_if_missing = True) last_solve = max(self._get_mtime('last-solve', warn_if_missing = False), utime) # Ideally, this would return all the files which were inputs into the solver's # decision. Currently, we approximate with: # - the previously selected feed files (local or cached) # - configuration files for the selected interfaces # - the global configuration # We currently ignore feeds and interfaces which were # considered but not selected. # Can yield None (ignored), paths or (path, mtime) tuples. # If this throws an exception, we will log it and resolve anyway. def get_inputs(): for sel in sels.selections.values(): logger.info("Checking %s", sel.feed) if sel.feed.startswith('distribution:'): # If the package has changed version, we'll detect that below # with get_unavailable_selections. pass elif os.path.isabs(sel.feed): # Local feed yield sel.feed else: # Cached feed cached = basedir.load_first_cache(namespaces.config_site, 'interfaces', model.escape(sel.feed)) if cached: yield cached else: raise IOError("Input %s missing; update" % sel.feed) # Per-feed configuration yield basedir.load_first_config(namespaces.config_site, namespaces.config_prog, 'interfaces', model._pretty_escape(sel.interface)) # Global configuration yield basedir.load_first_config(namespaces.config_site, namespaces.config_prog, 'global') # If any of the feeds we used have been updated since the last check, do a quick re-solve iface_cache = self.config.iface_cache try: for item in get_inputs(): if not item: continue if isinstance(item, tuple): path, mtime = item else: path = item try: mtime = os.stat(path).st_mtime except OSError as ex: logger.info("Triggering update to {app} due to error: {ex}".format( app = self, path = path, ex = ex)) need_solve = True break if mtime and mtime > last_solve: logger.info("Triggering update to %s because %s has changed", self, path) need_solve = True break except Exception as ex: logger.info("Error checking modification times: %s", ex) need_solve = True need_update = True # Is it time for a background update anyway? if not need_update: staleness = time.time() - utime logger.info("Staleness of app %s is %d hours", self, staleness / (60 * 60)) freshness_threshold = self.config.freshness if freshness_threshold > 0 and staleness >= freshness_threshold: need_update = True # If any of the saved selections aren't available then we need # to download right now, not later in the background. unavailable_selections = sels.get_unavailable_selections(config = self.config, include_packages = True) if unavailable_selections: logger.info("Saved selections are unusable (missing %s)", ', '.join(str(s) for s in unavailable_selections)) need_solve = True else: # No current selections need_solve = True unavailable_selections = True if need_solve: from zeroinstall.injector.driver import Driver driver = Driver(config = self.config, requirements = self.get_requirements()) if driver.need_download(): if unavailable_selections: return self._foreground_update(driver, use_gui) else: # Continue with the current (cached) selections while we download need_update = True else: old_sels = sels sels = driver.solver.selections from zeroinstall.support import xmltools if old_sels is None or not xmltools.nodes_equal(sels.toDOM(), old_sels.toDOM()): self.set_selections(sels, set_last_checked = False) try: self._touch('last-solve') except OSError as ex: logger.warning("Error checking for updates: %s", ex) # If we tried to check within the last hour, don't try again. if need_update: last_check_attempt = self._get_mtime('last-check-attempt', warn_if_missing = False) if last_check_attempt and last_check_attempt + 60 * 60 > time.time(): logger.info("Tried to check within last hour; not trying again now") need_update = False if need_update: try: self.set_last_check_attempt() except OSError as ex: logger.warning("Error checking for updates: %s", ex) else: from zeroinstall.injector import background r = self.get_requirements() background.spawn_background_update2(r, False, self) return sels def _foreground_update(self, driver, use_gui): """We can't run with saved selections or solved selections without downloading. Try to open the GUI for a blocking download. If we can't do that, download without the GUI. @type driver: L{zeroinstall.injector.driver.Driver} @rtype: L{zeroinstall.injector.selections.Selections}""" from zeroinstall import helpers from zeroinstall.support import tasks gui_args = driver.requirements.get_as_options() + ['--download-only', '--refresh'] sels = helpers.get_selections_gui(driver.requirements.interface_uri, gui_args, test_callback = None, use_gui = use_gui) if sels is None: raise SafeException("Aborted by user") if sels is helpers.DontUseGUI: downloaded = driver.solve_and_download_impls(refresh = True) if downloaded: tasks.wait_for_blocker(downloaded) sels = driver.solver.selections self.set_selections(sels, set_last_checked = True) return sels def set_requirements(self, requirements): """@type requirements: L{zeroinstall.injector.requirements.Requirements}""" reqs_file = os.path.join(self.path, 'requirements.json') if self.config.handler.dry_run: print(_("[dry-run] would write {file}").format(file = reqs_file)) else: import json tmp = tempfile.NamedTemporaryFile(prefix = 'tmp-requirements-', dir = self.path, delete = False, mode = 'wt') try: json.dump(dict((key, getattr(requirements, key)) for key in requirements.__slots__), tmp) except: tmp.close() os.unlink(tmp.name) raise tmp.close() portable_rename(tmp.name, reqs_file) def get_requirements(self): """@rtype: L{zeroinstall.injector.requirements.Requirements}""" import json from zeroinstall.injector import requirements r = requirements.Requirements(None) reqs_file = os.path.join(self.path, 'requirements.json') with open(reqs_file, 'rt') as stream: values = json.load(stream) # Update old before/not-before values before = values.pop('before', None) not_before = values.pop('not_before', None) if before or not_before: assert 'extra_restrictions' not in values, values expr = (not_before or '') + '..' if before: expr += '!' + before values['extra_restrictions'] = {values['interface_uri']: expr} for k, v in values.items(): setattr(r, k, v) return r def set_last_check_attempt(self): self._touch('last-check-attempt') def set_last_checked(self): self._touch('last-checked') def _touch(self, name): """@type name: str""" timestamp_path = os.path.join(self.path, name) if self.config.handler.dry_run: pass #print(_("[dry-run] would update timestamp file {file}").format(file = timestamp_path)) else: fd = os.open(timestamp_path, os.O_WRONLY | os.O_CREAT, 0o644) os.close(fd) os.utime(timestamp_path, None) # In case file already exists def _get_mtime(self, name, warn_if_missing = True): """@type name: str @type warn_if_missing: bool @rtype: int""" timestamp_path = os.path.join(self.path, name) try: return os.stat(timestamp_path).st_mtime except Exception as ex: if warn_if_missing: logger.warning("Failed to get time-stamp of %s: %s", timestamp_path, ex) return 0 def get_last_checked(self): """Get the time of the last successful check for updates. @return: the timestamp (or None on error) @rtype: float | None""" return self._get_mtime('last-checked', warn_if_missing = True) def get_last_check_attempt(self): """Get the time of the last attempted check. @return: the timestamp, or None if we updated successfully. @rtype: float | None""" last_check_attempt = self._get_mtime('last-check-attempt', warn_if_missing = False) if last_check_attempt: last_checked = self.get_last_checked() if last_checked < last_check_attempt: return last_check_attempt return None def destroy(self): # Check for shell command # TODO: remember which commands we own instead of guessing name = self.get_name() bin_dir = find_bin_dir() launcher = os.path.join(bin_dir, name) expanded_template = _command_template.format(app = name) if os.path.exists(launcher) and os.path.getsize(launcher) == len(expanded_template): with open(launcher, 'r') as stream: contents = stream.read() if contents == expanded_template: if self.config.handler.dry_run: print(_("[dry-run] would delete launcher script {file}").format(file = launcher)) else: os.unlink(launcher) if self.config.handler.dry_run: print(_("[dry-run] would delete directory {path}").format(path = self.path)) else: # Remove the app itself import shutil shutil.rmtree(self.path) def integrate_shell(self, name): # TODO: remember which commands we create """@type name: str""" if not valid_name.match(name): raise SafeException("Invalid shell command name '{name}'".format(name = name)) bin_dir = find_bin_dir() launcher = os.path.join(bin_dir, name) if os.path.exists(launcher): raise SafeException("Command already exists: {path}".format(path = launcher)) if self.config.handler.dry_run: print(_("[dry-run] would write launcher script {path}").format(path = launcher)) else: with open(launcher, 'w') as stream: stream.write(_command_template.format(app = self.get_name())) # Make new script executable os.chmod(launcher, 0o111 | os.fstat(stream.fileno()).st_mode) def get_name(self): """@rtype: str""" return os.path.basename(self.path) def __str__(self): """@rtype: str""" return '<app ' + self.get_name() + '>' class AppManager(object): def __init__(self, config): """@type config: L{zeroinstall.injector.config.Config}""" self.config = config def create_app(self, name, requirements): """@type name: str @type requirements: L{zeroinstall.injector.requirements.Requirements} @rtype: L{App}""" validate_name(name) apps_dir = basedir.save_config_path(namespaces.config_site, "apps") app_dir = os.path.join(apps_dir, name) if os.path.isdir(app_dir): raise SafeException(_("Application '{name}' already exists: {path}").format(name = name, path = app_dir)) if self.config.handler.dry_run: print(_("[dry-run] would create directory {path}").format(path = app_dir)) else: os.mkdir(app_dir) app = App(self.config, app_dir) app.set_requirements(requirements) app.set_last_checked() return app def lookup_app(self, name, missing_ok = False): """Get the App for name. Returns None if name is not an application (doesn't exist or is not a valid name). Since / and : are not valid name characters, it is generally safe to try this before calling L{injector.model.canonical_iface_uri}. @type name: str @type missing_ok: bool @rtype: L{App}""" if not valid_name.match(name): if missing_ok: return None else: raise SafeException("Invalid application name '{name}'".format(name = name)) app_dir = basedir.load_first_config(namespaces.config_site, "apps", name) if app_dir: return App(self.config, app_dir) if missing_ok: return None else: raise SafeException("No such application '{name}'".format(name = name)) def iterate_apps(self): seen = set() for apps_dir in basedir.load_config_paths(namespaces.config_site, "apps"): for name in os.listdir(apps_dir): if valid_name.match(name): if name in seen: continue seen.add(name) yield name
slovenwd/0install
zeroinstall/apps.py
Python
lgpl-2.1
19,041
[ "VisIt" ]
47f2cff32de0a18d61e11806eb95a376ae283935b4e161b1b58e2d4eb8fdfc41
import csv import io import unittest from nacc.uds3 import filters class TestFilters(unittest.TestCase): def test_filter_clean_ptid_removes_visits_in_nacc_current(self): ''' `filter_clean_ptid` should remove visit data that is already in NACC's Current database. ''' subjects = ''' Patient ID,Packet type,Visit Num,Status 110001,I,001,Current 110002,I,001,Working 110003,F,002,Current 110004,F,002,Working 110005,I,001,Certified '''.strip() redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110003,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110004,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110005,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results, \ io.StringIO(subjects) as nacc_packet_file: filters.filter_clean_ptid_do(data, nacc_packet_file, results) # Reset the file position indicator so DictReader reads from the # beginning of the results "file". results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['ptid']) expected = ['110002', '110004'] self.assertListEqual(actual, expected) def test_filter_eliminate_empty_date(self): ''' `filter_eliminate_empty_date` should remove data with no visit date. ''' redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,99,1,1,,001,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110003,followup_visit_yea_arm_1,3,99,,1,2019,002,ABC,2 110004,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: filters.filter_eliminate_empty_date_do(data, results) results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['ptid']) expected = ['110002', '110004'] self.assertListEqual(actual, expected) def test_filter_remove_ptid(self): ''' `filter_remove_ptid' should remove and keep ptid from meta file (nacculator_cfg.ini) ''' filter_diction = { 'ptid_format': '11\\d.*', 'bad_ptid': '110002,110004', 'good_ptid': '1600-A' } redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 1600-A,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110003,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110004,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: filters.filter_remove_ptid_do(data, filter_diction, results) results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['ptid']) expected = ['1600-A', '110001', '110003'] self.assertListEqual(actual, expected) def test_filter_fix_vistdate(self): ''' `filter_fix_visitdate` should turn string to int if filled and does nothing if blank. ''' redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,99,1,1,2019,,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110003,followup_visit_yea_arm_1,3,99,1,1,2019,,ABC,2 110004,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: filters.filter_fix_visitdate_do(data, results) results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['visitnum']) expected = ['', '1', '', '2'] self.assertListEqual(actual, expected) def test_filter_fill_default(self): ''' `filter_fill_default` should fill out blanks for a specific col with defualt. ''' fill_default_values = {'adcid': 41, 'formver': 3} # in filters.py redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,,,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,2,99,1,1,2019,001,ABC,2 110001,followup_visit_yea_arm_1,2,,1,1,2019,002,ABC,2 110002,followup_visit_yea_arm_1,,99,1,1,2019,002,ABC,2 '''.strip() actual_adcid = [] actual_formver = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: filters.fill_value_of_fields(data, results, fill_default_values, defaultCheck=True) results.seek(0) reader = csv.DictReader(results) for row in reader: actual_adcid.append(row['adcid']) actual_formver.append(row['formver']) expected_adcid = ['41', '99', '41', '99'] expected_formver = ['3', '2', '2', '3'] self.assertEqual(actual_adcid, expected_adcid) self.assertEqual(actual_formver, expected_formver) def test_filter_update_field(self): ''' `filter_fill_default` should update feild value if feild has value and leave blanks blank. ''' fill_non_blank_values = {'adcid': '41'} redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,3,2,1,1,2019,001,ABC,2 110001,followup_visit_yea_arm_1,3,,1,1,2019,002,ABC,2 110002,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: filters.fill_value_of_fields(data, results, fill_non_blank_values, blankCheck=True) results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['adcid']) expected = ['', '41', '', '41'] self.assertEqual(actual, expected) # Next 4 filters are sub filters of filter_extract_ptid def test_filter_csv_vnum(self): ''' `filter_csv_vnum` should return records of matching input ptid and visit number. ''' Ptid = '110001' visit_num = '1' redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110001,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110002,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110001,followup_visit_yea_arm_1,3,99,1,1,2019,001,ABC,2 110001,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: reader = csv.DictReader(data) output = csv.DictWriter(results, None) filters.write_headers(reader, output) filtered = filter(lambda row: filters.filter_csv_vnum(Ptid, visit_num, row), reader) output.writerows(filtered) results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['ptid']) expected = ['110001', '110001'] self.assertListEqual(actual, expected) def test_filter_csv_all(self): ''' `filter_csv_all` should return records of mathcing ptid, visit number and vistit type ''' Ptid = '110001' visit_num = '1' visit_type = 'initial_visit_year_arm_1' redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110001,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110002,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: reader = csv.DictReader(data) output = csv.DictWriter(results, None) filters.write_headers(reader, output) filtered = filter(lambda row: filters.filter_csv_all(Ptid, visit_num, visit_type, row), reader) output.writerows(filtered) results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['ptid']) expected = ['110001', '110001'] self.assertListEqual(actual, expected) def test_filter_vtype(self): ''' `filter_csv_vtype` should return records of mathcing ptid and vistit type ''' Ptid = '110002' visit_type = 'initial_visit_year_arm_1' redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110001,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110002,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,002,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: reader = csv.DictReader(data) output = csv.DictWriter(results, None) filters.write_headers(reader, output) filtered = filter(lambda row: filters.filter_csv_vtype(Ptid, visit_type, row), reader) output.writerows(filtered) results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['ptid']) expected = ['110002', '110002'] self.assertListEqual(actual, expected) def test_filter_ptid(self): ''' `filter_csv_ptid` should return records of mathcing ptid. ''' Ptid = '110001' redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110001,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110002,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110001,in_person_home_visit,3,99,1,1,2019,002,ABC,2 '''.strip() actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: reader = csv.DictReader(data) output = csv.DictWriter(results, None) filters.write_headers(reader, output) filtered = filter(lambda row: filters.filter_csv_ptid(Ptid, row), reader) output.writerows(filtered) results.seek(0) reader = csv.DictReader(results) for row in reader: actual.append(row['ptid']) expected = ['110001', '110001', '110001'] self.assertListEqual(actual, expected) def test_filter_fix_headers(self): ''' `filter_fix_headers` should change REDCap headers to NACC headers. ''' redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2 110003,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 110004,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2 '''.strip() fix_header_dict = { 'ptid': 'PTID', 'visitmo': 'VisitMo', 'adcid': 'ADCid', 'initials': 'Initials' } actual = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: filters.filter_fix_headers_do(data, fix_header_dict, results) # Reset the file position indicator so DictReader reads from the # beginning of the results "file". results.seek(0) reader = csv.reader(results) actual = next(reader) expected = ['PTID', 'redcap_event_name', 'formver', 'ADCid', 'VisitMo', 'visitday', 'visityr', 'visitnum', 'Initials', 'header_complete'] self.assertListEqual(actual, expected) def test_filter_replace_drug_id(self): ''' `test_filter_replace_drug_id` should replace drug id in the record, and print the processed ptid and number of updated fields. ''' redcap_data = ''' ptid,redcap_event_name,formver,adcid,visitmo,visitday,visityr,visitnum,initials,header_complete,fu_drugid_4,drugid_3 110001,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2,000002,111111 110002,initial_visit_year_arm_1,3,99,1,1,2019,001,ABC,2,,222222 110003,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2,,222222 110004,followup_visit_yea_arm_1,3,99,1,1,2019,002,ABC,2,000001, '''.strip() filter_out_1 = [] filter_out_2 = [] with io.StringIO(redcap_data) as data, \ io.StringIO("") as results: filters.filter_replace_drug_id_do(data, results) # Reset the file position indicator so DictReader reads from the # beginning of the results "file". results.seek(0) reader = csv.DictReader(results) for row in reader: filter_out_1.append(row['fu_drugid_4']) filter_out_2.append(row['drugid_3']) expected_1 = ['d00002', '', '', 'd00001'] self.assertListEqual(filter_out_1, expected_1) expected_2 = ['d11111', 'd22222', 'd22222', ''] self.assertListEqual(filter_out_2, expected_2) if __name__ == "__main__": unittest.main()
ctsit/nacculator
tests/test_filters.py
Python
bsd-2-clause
14,750
[ "VisIt" ]
23d81ea7a3bb627471602f042245492b62684f2b8c279539487627520c7dfbd2
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase import vtk class vtkContourGrid(SimpleVTKClassModuleBase): def __init__(self, module_manager): SimpleVTKClassModuleBase.__init__( self, module_manager, vtk.vtkContourGrid(), 'Processing.', ('vtkUnstructuredGrid',), ('vtkPolyData',), replaceDoc=True, inputFunctions=None, outputFunctions=None)
chrisidefix/devide
modules/vtk_basic/vtkContourGrid.py
Python
bsd-3-clause
491
[ "VTK" ]
158e95112497d32503c401165b48c789fe26ae81d3dc862a687caa00b82c77b9
from qepy import * from yambopy import * from schedulerpy import * import os from copy import deepcopy class YamboGkkpCompute(): """ Class to obtain qe s.dbph* and yambo ndb.elph* databases starting from scratch. It runs the necessary pw.x and ph.x simulations, optionally followed by the yambo setup. Inputs needed: <required> - scf_input: pw scf input file [NOTE: dvscf and gkkp inputs are automatically generated, check parameters if interested] <optional> - work_dir: directory where flow is run and yambo SAVE will appear - pw_exec_path: path to executables - qe_scheduler: optional scheduler for cluster submission - with_SAVE: if True, workflow will generate yambo SAVE at the end (the python master process will remain active). The workflow can be called a second time switching with_SAVE to True to immediately generate the SAVE. [SUBOPTIONS for with_SAVE] -- expand: whether to expand the matrix elements -- yambo_exec_path: path to executables TODO: allow for random qpoints in ph calculations """ def __init__(self,scf_input,work_dir='.',pw_exec_path='',qe_scheduler=None,with_SAVE=False,yambo_exec_path='',expand=False): if not os.path.isdir(work_dir): os.mkdir(work_dir) self.RUN_path = os.path.abspath(work_dir) self.wait_up = with_SAVE #Slightly restructure dependencies and waith for job completions if SAVE is to be created #Configuring schedulers if qe_scheduler is not None: self.qejobrun= qe_scheduler # Here we use, e.g., slurm else: self.qejobrun = Scheduler.factory(scheduler="bash") # Run without submission #Executables if yambo_exec_path != '': yambo_exec_path+='/' self.yambo = yambo_exec_path + 'yambo' self.yambo_ph = yambo_exec_path + 'yambo_ph' self.p2y = yambo_exec_path + 'p2y' self.ypp_ph = yambo_exec_path + 'ypp_ph' if pw_exec_path != '': pw_exec_path+='/' self.pw = pw_exec_path + 'pw.x' self.ph = pw_exec_path + 'ph.x' self.dynmat = pw_exec_path + 'dynmat.x' # Inputs self.scf_input = scf_input self.prefix = scf_input.prefix # Output names self.out_scf = 'scf.out' self.out_nscf = 'nscf.out' self.out_dvscf = 'dvscf.out' self.out_gkkp = 'gkkp.out' #Start IO self.yf = YamboIO(out_name='YAMBOPY_gkkp_calculation.log',out_path=self.RUN_path,print_to_shell=True) self.yf.IO_start() self.yf.msg('#### GKKP WORKFLOW ####') # Create folder structure self.setup_calculations() # Run jobs if not self.scf_status: self.yf.msg('Running scf.') self.run_scf() if not self.dvscf_status: self.yf.msg('Running dvscf.') self.run_dvscf() if not self.gkkp_status: self.yf.msg('Running gkkp.') self.run_gkkp() if not self.nscf_status: self.yf.msg('Running nscf.') self.run_nscf() # [OPTIONAL] Create SAVE if with_SAVE: self.setup_SAVE() if not self.are_gkkp_there: if self.is_SAVE_there: import shutil shutil.rmtree('%s/SAVE'%self.RUN_path) if expand: save_type = 'expanded_elph' else: save_type = 'elph' self.yf.msg('---- Generating SAVE folder: ----') CreateYamboSave(self.prefix,save_type=save_type,nscf=self.nscf_dir,elph_path=self.gkkp_dir,database=self.RUN_path,\ yambo_exec_path=yambo_exec_path,printIO=True) self.clean_rubbish() #End IO self.yf.IO_close() def setup_calculations(self): """ Generate workflow tree """ # Directory names (hardcoded) dft_dir = '%s/dft'%self.RUN_path self.scf_dir = '%s/scf'%dft_dir self.gkkp_dir = '%s/gkkp'%dft_dir self.nscf_dir = '%s/nscf'%dft_dir # Logicals self.gkkp_status = False self.dvscf_status = False self.scf_status = False self.nscf_status = False if not os.path.isdir(dft_dir): os.mkdir(dft_dir) if not os.path.isdir(self.scf_dir): os.mkdir(self.scf_dir) if not os.path.isdir(self.gkkp_dir): os.mkdir(self.gkkp_dir) if not os.path.isdir(self.nscf_dir): os.mkdir(self.nscf_dir) # Check if any gkkp->dvscf->scf calculations have been already done self.gkkp_status = check_qe_completed(self.gkkp_dir,self.prefix,self.out_gkkp,calc_type='gkkp') if self.gkkp_status: self.yf.msg('gkkp calculation found!') self.dvscf_status = True else: self.dvscf_status = check_qe_completed(self.gkkp_dir,self.prefix,self.out_dvscf,calc_type='ph') if self.dvscf_status: self.yf.msg('dvscf calculation found!') else: self.scf_status = check_qe_completed(self.scf_dir,self.prefix,self.out_scf,calc_type='pw') if self.scf_status: self.yf.msg('scf calculation found!') # Check if any nscf->scf calculations have been already done self.nscf_status = check_qe_completed(self.nscf_dir,self.prefix,self.out_nscf,calc_type='pw') if self.nscf_status: self.yf.msg('nscf calculation found') self.scf_status = True else: if not self.scf_status: self.scf_status = check_qe_completed(self.scf_dir,self.prefix,self.out_scf,calc_type='pw') if self.scf_status: self.yf.msg('scf calculation found!') def setup_SAVE(self): """ Expand the workflow tree to include yambo SAVE """ # Check if SAVE and/or gkkp dbs are there already save_dir = '%s/SAVE'%self.RUN_path if os.path.isdir(save_dir): self.yf.msg('SAVE folder found!') self.is_SAVE_there = True if os.path.isfile('%s/ndb.elph_gkkp'%save_dir) or os.path.isfile('%s/ndb.elph_gkkp_expanded'%save_dir): self.yf.msg('ndb.elph databases already found!') self.are_gkkp_there = True else: self.are_gkkp_there = False else: self.is_SAVE_there = False self.are_gkkp_there = False def run_scf(self): """ Run scf calculation """ if self.scf_input.system['nbnd'] is None: raise ValueError('Please specify nbnd in the scf input in order to be able to compute the gkkp elements.') # Write down input inp_name = self.prefix + '.scf' self.scf_input.write('%s/%s'%(self.scf_dir,inp_name)) # Submit calculation jname = 'scf' self.scf_id = shell_qe_run(jname,inp_name,self.out_scf,self.scf_dir,exec=self.pw,scheduler=self.qejobrun) def run_dvscf(self): """ Run dvscf calculation """ # Generate and write down input dvscf_input = self.generate_ph_input('dvscf') inp_name = self.prefix + '.dvscf' dvscf_input.write('%s/%s'%(self.gkkp_dir,inp_name)) # Generate and write down dynmat input dynmat_input = self.generate_dynmat_input() dynp_name = self.prefix + '.dynmat' dynmat_input.write('%s/%s'%(self.gkkp_dir,dynp_name)) # Set dynmat run after completion of main task dyn_run = ["mpirun -np 1 %s -inp %s > dynmat.out"%(self.dynmat,dynp_name)] # Create symlink to qe save if needed commands = [] if not os.path.islink('%s/%s.save'%(self.gkkp_dir,self.prefix)): commands.append('ln -s %s/%s.save %s/'%(self.scf_dir,self.prefix,self.gkkp_dir)) # Manage dependency if self.scf_status: depend = None # No dependency if scf was found else: depend = self.scf_id # Submit calculation jname = 'dvscf' self.dvscf_id = shell_qe_run(jname,inp_name,self.out_dvscf,self.gkkp_dir,exec=self.ph,shell_name='dvscf',\ scheduler=self.qejobrun,pre_run=commands,pos_run=dyn_run,depend_on_JOBID=depend) def run_gkkp(self): """ Run gkkp calculation """ # Generate and write down input gkkp_input = self.generate_ph_input('gkkp') inp_name = self.prefix + '.gkkp' gkkp_input.write('%s/%s'%(self.gkkp_dir,inp_name)) # Create symlink to qe save if needed commands = [] if not os.path.islink('%s/%s.save'%(self.gkkp_dir,self.prefix)): commands.append('ln -s %s/%s.save %s/'%(self.scf_dir,self.prefix,self.gkkp_dir)) # Manage dependency if self.dvscf_status: depend = None # No dependency if dvscf was found else: depend = self.dvscf_id # Submit calculation jname = 'gkkp' self.gkkp_id = shell_qe_run(jname,inp_name,self.out_gkkp,self.gkkp_dir,exec=self.ph,shell_name='gkkp',\ scheduler=self.qejobrun,pre_run=commands,depend_on_JOBID=depend) def run_nscf(self): """ Run nscf calculation """ # Generate and write down input nscf_input = self.generate_nscf_input() inp_name = self.prefix + '.nscf' nscf_input.write('%s/%s'%(self.nscf_dir,inp_name)) # Create symlink to qe save if needed commands = [] if not os.path.isdir('%s/%s.save'%(self.nscf_dir,self.prefix)): commands.append('cp -r %s/%s.save %s/'%(self.scf_dir,self.prefix,self.nscf_dir)) # Dependency here may include gkkp job to ensure that this is the last job to be completed if SAVE is to be generated if self.wait_up: if self.gkkp_status and self.scf_status: depend = None # No dependency if scf and gkkp were found elif self.gkkp_status and not self.scf_status: depend = self.gkkp_id # scf was found, not gkkp elif not self.gkkp_status and self.scf_status: depend = self.scf_id # gkkp was found, not scf else: depend = '%s:%s'%(self.scf_id,self.gkkp_id) # double dependency else: if self.scf_status: depend = None # No dependency if scf was found else: depend = self.scf_id # Submit calculation jname = 'nscf' self.nscf_id = shell_qe_run(jname,inp_name,self.out_nscf,self.nscf_dir,exec=self.pw,scheduler=self.qejobrun,\ pre_run=commands,depend_on_JOBID=depend,hang_python=self.wait_up) def generate_nscf_input(self): """ Create nscf input for yambo SAVE starting from scf input """ nscf_input = deepcopy(self.scf_input) nscf_input.control['calculation']="'nscf'" nscf_input.electrons['diago_full_acc'] = ".true." nscf_input.electrons['conv_thr'] = 1e-8 nscf_input.system['force_symmorphic'] = ".true." return nscf_input def generate_ph_input(self,mode): """ Create dvscf or gkkp input starting from scf input - mode: either 'dvscf' or 'gkkp' """ from qepy import PhIn ph_input = PhIn() # Common to dvscf and gkkp ph_input['prefix'] = "'%s'"%self.prefix ph_input['fildyn'] = "'%s'"%(self.prefix+'.dyn') nq1,nq2,nq3 = [ int(nk) for nk in self.scf_input.kpoints ] ph_input.set_nq(nq1,nq2,nq3) ph_input['tr2_ph'] = 1e-14 ph_input['fildvscf']="'dvscf'" ph_input['ldisp']='.true.' ph_input['qplot']='.false.' # Only dvscf if mode=='dvscf': # Add effective charges if dealing with a non-metal is_insulator = 'occupations' not in self.scf_input.system or self.scf_input.system['occupations'] != "'smearing'" if is_insulator: ph_input['epsil']='.true.' self.is_insulator = is_insulator ph_input['electron_phonon']="'dvscf'" ph_input['recover']='.true.' ph_input['trans']='.true.' elif mode=='gkkp': ph_input['electron_phonon']="'yambo'" ph_input['trans']='.false.' else: raise ValueError("ph input mode not recognized (either 'dvscf' or 'gkkp')") return ph_input def generate_dynmat_input(self): """ Create dynmat input file in order to treat issues with the frequencies at the Gamma point: -- Apply the acoustic sum rule -- Correct the LO mode (if non-metal) Outputs are saved in the gkkp folder as prefix.GAMMA_eigs_eivs and prefix.GAMMA_eigs_norm_eivs """ from qepy import DynmatIn dm_input = DynmatIn() dm_input['asr']="'crystal'" dm_input['fildyn']="'%s.dyn1'"%self.prefix dm_input['fileig']="'%s.GAMMA_eigs_eivs'"%self.prefix dm_input['filout']="'%s.GAMMA_eigs_norm_eivs'"%self.prefix # Add LO correction along first cartesian axis if dealing with non-metal if self.is_insulator: dm_input['q(1)']=1 dm_input['q(2)']=0 dm_input['q(3)']=0 return dm_input def clean_rubbish(self): """ Remove logs, reports and inputs generated during SAVE creation """ from glob import glob run_dir = self.RUN_path+'/' logs1 = glob(run_dir+'l-*') logs2 = glob(run_dir+'l_*') reports1 = glob(run_dir+'r-*') reports2 = glob(run_dir+'r_*') setups = glob(run_dir+'setup.in*') for log in logs1: os.remove(log) for log in logs2: os.remove(log) for report in reports1: os.remove(report) for report in reports2: os.remove(report) for setup in setups: os.remove(setup) os.remove(run_dir+'gkkp.in')
alexmoratalla/yambopy
yambopy/gkkp/compute_gkkp.py
Python
bsd-3-clause
14,445
[ "CRYSTAL", "Yambo" ]
d8f4d0dc98a07697e9c88107d06ba7101a96c62e269392f002886969ab29b0ef
#! /usr/bin/env python ######################################################################## # $HeadURL$ # File : dirac-wms-jobs-select-output-search # Author : Vladimir Romanovsky ######################################################################## """ Retrieve output sandbox for DIRAC Jobs for the given selection and search for a string in their std.out """ __RCSID__ = "$Id$" import os from shutil import rmtree import DIRAC from DIRAC.Core.Base import Script Script.registerSwitch( "", "Status=", "Primary status" ) Script.registerSwitch( "", "MinorStatus=", "Secondary status" ) Script.registerSwitch( "", "ApplicationStatus=", "Application status" ) Script.registerSwitch( "", "Site=", "Execution site" ) Script.registerSwitch( "", "Owner=", "Owner (DIRAC nickname)" ) Script.registerSwitch( "", "JobGroup=", "Select jobs for specified job group" ) Script.registerSwitch( "", "Date=", "Date in YYYY-MM-DD format, if not specified default is today" ) Script.registerSwitch( "", "File=", "File name,if not specified default is std.out " ) Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1], 'Usage:', ' %s [option|cfgfile] ... String ...' % Script.scriptName, 'Arguments:', ' String: string to search for' ] ) ) Script.parseCommandLine( ignoreErrors = True ) args = Script.getPositionalArgs() #Default values status = None minorStatus = None appStatus = None site = None owner = None jobGroup = None date = None filename = 'std.out' if len( args ) != 1: Script.showHelp() searchstring = str( args[0] ) for switch in Script.getUnprocessedSwitches(): if switch[0].lower() == "status": status = switch[1] elif switch[0].lower() == "minorstatus": minorStatus = switch[1] elif switch[0].lower() == "applicationstatus": appStatus = switch[1] elif switch[0].lower() == "site": site = switch[1] elif switch[0].lower() == "owner": owner = switch[1] elif switch[0].lower() == "jobgroup": jobGroup = switch[1] elif switch[0].lower() == "date": date = switch[1] elif switch[0].lower() == "file": filename = switch[1] selDate = date if not date: selDate = 'Today' from DIRAC.Interfaces.API.Dirac import Dirac dirac = Dirac() exitCode = 0 errorList = [] resultDict = {} result = dirac.selectJobs( status = status, minorStatus = minorStatus, applicationStatus = appStatus, site = site, owner = owner, jobGroup = jobGroup, date = date ) if result['OK']: jobs = result['Value'] else: print "Error in selectJob", result['Message'] DIRAC.exit( 2 ) for job in jobs: result = dirac.getOutputSandbox( job ) if result['OK']: if os.path.exists( '%s' % job ): lines = [] try: lines = open( os.path.join( job, filename ) ).readlines() except Exception as x: errorList.append( ( job, x ) ) for line in lines: if line.count( searchstring ): resultDict[job] = line rmtree( "%s" % ( job ) ) else: errorList.append( ( job, result['Message'] ) ) exitCode = 2 for result in resultDict.iteritems(): print result DIRAC.exit( exitCode )
andresailer/DIRAC
Interfaces/scripts/dirac-wms-jobs-select-output-search.py
Python
gpl-3.0
3,268
[ "DIRAC" ]
8060c767420ef9b2a0dc1b96cabfe803a6706f40ce3c694980196ee8e57861a7
__RCSID__ = "$Id$" from DIRAC import S_OK, S_ERROR, gLogger from DIRAC.Core.Utilities import ThreadScheduler from DIRAC.Core.Base.ExecutorMindHandler import ExecutorMindHandler from DIRAC.WorkloadManagementSystem.Client.JobState.JobState import JobState from DIRAC.WorkloadManagementSystem.Client.JobState.CachedJobState import CachedJobState from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB from DIRAC.WorkloadManagementSystem.DB.JobLoggingDB import JobLoggingDB from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import TaskQueueDB def cleanTaskQueues(): tqDB = TaskQueueDB() jobDB = JobDB() logDB = JobLoggingDB() result = tqDB.enableAllTaskQueues() if not result['OK']: return result result = tqDB.findOrphanJobs() if not result['OK']: return result for jid in result['Value']: result = tqDB.deleteJob(jid) if not result['OK']: gLogger.error("Cannot delete from TQ job %s" % jid, result['Message']) continue result = jobDB.rescheduleJob(jid) if not result['OK']: gLogger.error("Cannot reschedule in JobDB job %s" % jid, result['Message']) continue result = logDB.addLoggingRecord(jid, "Received", "", "", source="JobState") if not result['OK']: gLogger.error("Cannot add logging record in JobLoggingDB %s" % jid, result['Message']) continue return S_OK() class OptimizationMindHandler(ExecutorMindHandler): __jobDB = False __optimizationStates = ['Received', 'Checking'] __loadTaskId = False MSG_DEFINITIONS = {'OptimizeJobs': {'jids': (list, tuple)}} auth_msg_OptimizeJobs = ['all'] def msg_OptimizeJobs(self, msgObj): jids = msgObj.jids for jid in jids: try: jid = int(jid) except ValueError: self.log.error("Job ID %s has to be an integer" % jid) continue # Forget and add task to ensure state is reset self.forgetTask(jid) result = self.executeTask(jid, CachedJobState(jid)) if not result['OK']: self.log.error("Could not add job %s to optimization: %s" % (jid, result['Value'])) else: self.log.info("Received new job %s" % jid) return S_OK() @classmethod def __loadJobs(cls, eTypes=None): log = cls.log if cls.__loadTaskId: period = cls.srv_getCSOption("LoadJobPeriod", 300) ThreadScheduler.gThreadScheduler.setTaskPeriod(cls.__loadTaskId, period) if not eTypes: eConn = cls.getExecutorsConnected() eTypes = [eType for eType in eConn if eConn[eType] > 0] if not eTypes: log.info("No optimizer connected. Skipping load") return S_OK() log.info("Getting jobs for %s" % ",".join(eTypes)) checkingMinors = [eType.split("/")[1] for eType in eTypes if eType != "WorkloadManagement/JobPath"] for opState in cls.__optimizationStates: # For Received states if opState == "Received": if 'WorkloadManagement/JobPath' not in eTypes: continue jobCond = {'Status': opState} # For checking states if opState == "Checking": if not checkingMinors: continue jobCond = {'Status': opState, 'MinorStatus': checkingMinors} # Do the magic jobTypeCondition = cls.srv_getCSOption("JobTypeRestriction", []) if jobTypeCondition: jobCond['JobType'] = jobTypeCondition result = cls.__jobDB.selectJobs(jobCond, limit=cls.srv_getCSOption("JobQueryLimit", 10000)) if not result['OK']: return result jidList = result['Value'] knownJids = cls.getTaskIds() added = 0 for jid in jidList: jid = long(jid) if jid not in knownJids: # Same as before. Check that the state is ok. cls.executeTask(jid, CachedJobState(jid)) added += 1 log.info("Added %s/%s jobs for %s state" % (added, len(jidList), opState)) return S_OK() @classmethod def initializeHandler(cls, serviceInfoDict): try: from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB cls.__jobDB = JobDB() except Exception as excp: return S_ERROR("Could not connect to JobDB: %s" % str(excp)) cls.setFailedOnTooFrozen(False) cls.setFreezeOnFailedDispatch(False) cls.setFreezeOnUnknownExecutor(False) cls.setAllowedClients("JobManager") cleanTaskQueues() period = cls.srv_getCSOption("LoadJobPeriod", 60) result = ThreadScheduler.gThreadScheduler.addPeriodicTask(period, cls.__loadJobs) if not result['OK']: return result cls.__loadTaskId = result['Value'] return cls.__loadJobs() @classmethod def exec_executorConnected(cls, trid, eTypes): return cls.__loadJobs(eTypes) @classmethod def exec_taskProcessed(cls, jid, jobState, eType): cls.log.info("Saving changes for job %s after %s" % (jid, eType)) result = jobState.commitChanges() if not result['OK']: cls.log.error("Could not save changes for job", "%s: %s" % (jid, result['Message'])) return result @classmethod def exec_taskFreeze(cls, jid, jobState, eType): cls.log.info("Saving changes for job %s before freezing from %s" % (jid, eType)) result = jobState.commitChanges() if not result['OK']: cls.log.error("Could not save changes for job", "%s: %s" % (jid, result['Message'])) return result @classmethod def exec_dispatch(cls, jid, jobState, pathExecuted): result = jobState.getStatus() if not result['OK']: cls.log.error("Could not get status for job", "%s: %s" % (jid, result['Message'])) return S_ERROR("Could not retrieve status: %s" % result['Message']) status, minorStatus = result['Value'] # If not in proper state then end chain if status not in cls.__optimizationStates: cls.log.info("Dispatching job %s out of optimization" % jid) return S_OK() # If received send to JobPath if status == "Received": cls.log.info("Dispatching job %s to JobPath" % jid) return S_OK("WorkloadManagement/JobPath") result = jobState.getOptParameter('OptimizerChain') if not result['OK']: cls.log.error("Could not get optimizer chain for job, auto resetting job", "%s: %s" % (jid, result['Message'])) result = jobState.resetJob() if not result['OK']: cls.log.error("Could not reset job", "%s: %s" % (jid, result['Message'])) return S_ERROR("Cound not get OptimizationChain or reset job %s" % jid) return S_OK("WorkloadManagement/JobPath") optChain = result['Value'] if minorStatus not in optChain: cls.log.error("Next optimizer is not in the chain for job", "%s: %s not in %s" % (jid, minorStatus, optChain)) return S_ERROR("Next optimizer %s not in chain %s" % (minorStatus, optChain)) cls.log.info("Dispatching job %s to %s" % (jid, minorStatus)) return S_OK("WorkloadManagement/%s" % minorStatus) @classmethod def exec_prepareToSend(cls, jid, jobState, eId): return jobState.recheckValidity() @classmethod def exec_serializeTask(cls, jobState): return S_OK(jobState.serialize()) @classmethod def exec_deserializeTask(cls, taskStub): return CachedJobState.deserialize(taskStub) @classmethod def exec_taskError(cls, jid, cachedJobState, errorMsg): result = cachedJobState.commitChanges() if not result['OK']: cls.log.error("Cannot write changes to job %s: %s" % (jid, result['Message'])) jobState = JobState(jid) result = jobState.getStatus() if result['OK']: if result['Value'][0].lower() == "failed": return S_OK() else: cls.log.error("Could not get status of job %s: %s" % (jid, result['Message '])) cls.log.notice("Job %s: Setting to Failed|%s" % (jid, errorMsg)) return jobState.setStatus("Failed", errorMsg, source='OptimizationMindHandler')
fstagni/DIRAC
WorkloadManagementSystem/Service/OptimizationMindHandler.py
Python
gpl-3.0
7,781
[ "DIRAC" ]
5781453d9d0ae6a07668e17d52100c5221e5e7cfd70f5c9298011bd7c92784fb
import suspect import numpy as np import warnings warnings.filterwarnings("ignore", message="numpy.dtype size changed") def test_sift_preserves_dtype(): time_axis = np.arange(0, 1.024, 1e-3) input_signal = suspect.basis.gaussian(time_axis, 0, 0, 35) input_signal += np.random.randn(1024) * 0.00001 complex_denoise = suspect.processing.denoising.sift(input_signal, 0.001) assert complex_denoise.dtype == np.complex128 real_denoise = suspect.processing.denoising.sift(np.real(input_signal), 0.001) assert real_denoise.dtype == np.float64 np.testing.assert_allclose(np.real(complex_denoise), real_denoise) def test_spline(): # we need to check if this runs correctly when number of splines is not a # factor of length of signal, so that padding is required. # generate a sample signal input_signal = np.random.randn(295) + 10 # denoise the signal with splines output_signal = suspect.processing.denoising.spline(input_signal, 32, 2) # main thing is that the test runs without errors, but we can also check # for reduced std in the result assert np.std(output_signal) < np.std(input_signal) def test_wavelet(): # this is to check if the code runs without throwing double -> integer # conversion issues # generate a sample signal input_signal = np.random.randn(295) + 10 # denoise the signal with splines output_signal = suspect.processing.denoising.wavelet(input_signal, "db8", 1e-2) # main thing is that the test runs without errors, but we can also check # for reduced std in the result assert np.std(output_signal) < np.std(input_signal)
openmrslab/suspect
tests/test_mrs/test_processing/test_denoising.py
Python
mit
1,648
[ "Gaussian" ]
de65ceb689f17506e684967de6d5beb9381d82f52cce78f59427297b50a9bda0
# -*- coding: utf-8 -*- """ Created on Tue Jul 19 08:35:55 2016 @author: nn31 """ import pandas as pd import pickle import matplotlib.pyplot as plt from robocomp import Model import matplotlib.dates as dt import numpy as np from datetime import datetime #read in cmmi data cmmi = pd.read_csv("/Volumes/DCCRP_projects/CMMI/data/QDACT 05-03-2016.csv", parse_dates=['AssessmentDate','AdmissionDate','DischargeDate','PalliativeDischargeDate']) cmmi.sort_values(by=['internalid','AssessmentDate'],ascending=[1,1],inplace=True) dd = pickle.load(open("/Users/nn31/Dropbox/40-githubRrepos/qdact-basic-analysis/notebooks/python_scripts/02_data_dictionary_dict.p", "rb" )) #Looking for plot to help with visualiing the repeated measures stuff #For this analysis we are only interested in the time to first visit, so we'll need to subset #to that using a pandas groupby #Getting rid of potential data entries (i.e. those before 2014-01-01) cmmi = cmmi[cmmi.AssessmentDate > '2014-01-01'] internalIDGroup = cmmi.groupby('internalid') #fv = internalIDGroup.first() fv = cmmi missings = Model('set_missing_codes') fv.PrimaryDiagnosis.value_counts(dropna=False) fv.PrimaryDiagnosis = missings.scoreIt(fv.PrimaryDiagnosis.tolist()) fv.PrimaryDiagnosis.value_counts(dropna=False) #All my row variables set to missing rowVars = ['ESASAnxiety','ESASAppetite','ESASConstipation','ESASDepression','ESASDrowsiness', 'ESASNausea','ESASPain','ESASShortnessOfBreath','ESASTiredness','ESASWellBeing', 'PPSScore'] for x in rowVars: fv[x] = missings.scoreIt(fv[x].tolist()) fv['x_orig'] = fv.AssessmentDate.apply(lambda x: str(x)) #for each variable: fvv = fv.groupby('internalid') fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.xaxis_date() hfmt = dt.DateFormatter('%Y-%m-%d') ax.xaxis.set_major_formatter(hfmt) ax.set_title('Anxiety scatterplot - Overall') ax.set_xlabel('Assessment Date') ax.set_ylabel('Score - Continuous') plt.setp(ax.get_xticklabels(), size=8) #for each group, we'll need to do data stuff def dataChurnAxPlot(z): zz = z[['x_orig','ESASAnxiety']].dropna() x = [datetime.strptime(d, '%Y-%m-%d %H:%M:%S') for d in zz.x_orig] xs = dt.date2num(x) ax.plot(xs, zz.ESASAnxiety, linewidth=2) ax.scatter(xs, zz.ESASAnxiety, s=10, alpha=0.5) print(pd.DataFrame({"xs":xs,"y":zz.ESASAnxiety})) fvv.apply(lambda x: dataChurnAxPlot(x) if x.shape[0]>10 else None)
benneely/qdact-basic-analysis
notebooks/python_scripts/08_repeated_measures_viz.py
Python
gpl-3.0
2,451
[ "VisIt" ]
54df192b8037eb5aba1a3da0d8c8e7043ac3bc298d6528675aef50fda0286031
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" **************************************************** espressopp.interaction.StillingerWeberPairTermCapped **************************************************** This class provides methods to compute forces and energies of 2 body term of Stillinger-Weber potential. If the distance is smaller than the cap-radius: .. math:: U = A [ d_{12}^{-p} (B - 1) ] e^{ \frac{1}{d_{12}-r_c}} where :math:`r_c` is the cutoff-radius. .. function:: espressopp.interaction.StillingerWeberPairTermCapped(A, B, p, q, epsilon, sigma, cutoff, caprad) :param A: :param B: :param p: :param q: :param epsilon: (default: 1.0) :param sigma: (default: 1.0) :param cutoff: (default: infinity) :param caprad: (default: 0.0) :type A: :type B: :type p: :type q: :type epsilon: real :type sigma: real :type cutoff: :type caprad: real .. function:: espressopp.interaction.VerletListStillingerWeberPairTermCapped(vl) :param vl: :type vl: .. function:: espressopp.interaction.VerletListStillingerWeberPairTermCapped.getCaprad() :rtype: .. function:: espressopp.interaction.VerletListStillingerWeberPairTermCapped.getPotential(type1, type2) :param type1: :param type2: :type type1: :type type2: :rtype: .. function:: espressopp.interaction.VerletListStillingerWeberPairTermCapped.getVerletList() :rtype: A Python list of lists. .. function:: espressopp.interaction.VerletListStillingerWeberPairTermCapped.setPotential(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.VerletListAdressStillingerWeberPairTermCapped(vl, fixedtupleList) :param vl: :param fixedtupleList: :type vl: :type fixedtupleList: .. function:: espressopp.interaction.VerletListAdressStillingerWeberPairTermCapped.setPotentialAT(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.VerletListAdressStillingerWeberPairTermCapped.setPotentialCG(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.VerletListHadressStillingerWeberPairTermCapped(vl, fixedtupleList) :param vl: :param fixedtupleList: :type vl: :type fixedtupleList: .. function:: espressopp.interaction.VerletListHadressStillingerWeberPairTermCapped.setPotentialAT(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.VerletListHadressStillingerWeberPairTermCapped.setPotentialCG(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.CellListStillingerWeberPairTermCapped(stor) :param stor: :type stor: .. function:: espressopp.interaction.CellListStillingerWeberPairTermCapped.setPotential(type1, type2, potential) :param type1: :param type2: :param potential: :type type1: :type type2: :type potential: .. function:: espressopp.interaction.FixedPairListStillingerWeberPairTermCapped(system, vl, potential) :param system: :param vl: :param potential: :type system: :type vl: :type potential: .. function:: espressopp.interaction.FixedPairListStillingerWeberPairTermCapped.setPotential(potential) :param potential: :type potential: """ from espressopp import pmi, infinity from espressopp.esutil import * from espressopp.interaction.Potential import * from espressopp.interaction.Interaction import * from _espressopp import interaction_StillingerWeberPairTermCapped, \ interaction_VerletListStillingerWeberPairTermCapped, \ interaction_VerletListAdressStillingerWeberPairTermCapped, \ interaction_VerletListHadressStillingerWeberPairTermCapped, \ interaction_CellListStillingerWeberPairTermCapped, \ interaction_FixedPairListStillingerWeberPairTermCapped class StillingerWeberPairTermCappedLocal(PotentialLocal, interaction_StillingerWeberPairTermCapped): def __init__(self, A, B, p, q, epsilon=1.0, sigma=1.0, cutoff=infinity, caprad = 0.0): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_StillingerWeberPairTermCapped, A, B, p, q, epsilon, sigma, cutoff, caprad) class VerletListStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_VerletListStillingerWeberPairTermCapped): def __init__(self, vl): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_VerletListStillingerWeberPairTermCapped, vl) def setPotential(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, type1, type2, potential) def getPotential(self, type1, type2): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): return self.cxxclass.getPotential(self, type1, type2) def getVerletListLocal(self): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): return self.cxxclass.getVerletList(self) def getCaprad(self): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): return self.cxxclass.getCaprad(self) class VerletListAdressStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_VerletListAdressStillingerWeberPairTermCapped): def __init__(self, vl, fixedtupleList): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_VerletListAdressStillingerWeberPairTermCapped, vl, fixedtupleList) def setPotentialAT(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotentialAT(self, type1, type2, potential) def setPotentialCG(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotentialCG(self, type1, type2, potential) class VerletListHadressStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_VerletListHadressStillingerWeberPairTermCapped): def __init__(self, vl, fixedtupleList): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_VerletListHadressStillingerWeberPairTermCapped, vl, fixedtupleList) def setPotentialAT(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotentialAT(self, type1, type2, potential) def setPotentialCG(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotentialCG(self, type1, type2, potential) class CellListStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_CellListStillingerWeberPairTermCapped): def __init__(self, stor): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_CellListStillingerWeberPairTermCapped, stor) def setPotential(self, type1, type2, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, type1, type2, potential) class FixedPairListStillingerWeberPairTermCappedLocal(InteractionLocal, interaction_FixedPairListStillingerWeberPairTermCapped): def __init__(self, system, vl, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, interaction_FixedPairListStillingerWeberPairTermCapped, system, vl, potential) def setPotential(self, potential): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): self.cxxclass.setPotential(self, potential) if pmi.isController: class StillingerWeberPairTermCapped(Potential): 'The Lennard-Jones potential.' pmiproxydefs = dict( cls = 'espressopp.interaction.StillingerWeberPairTermCappedLocal', pmiproperty = ['A', 'B', 'p', 'q', 'epsilon', 'sigma', 'caprad'], pmiinvoke = ['getCaprad'] ) class VerletListStillingerWeberPairTermCapped(Interaction): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.interaction.VerletListStillingerWeberPairTermCappedLocal', pmicall = ['setPotential', 'getPotential', 'getVerletList'] ) class VerletListAdressStillingerWeberPairTermCapped(Interaction): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.interaction.VerletListAdressStillingerWeberPairTermCappedLocal', pmicall = ['setPotentialAT', 'setPotentialCG'] ) class VerletListHadressStillingerWeberPairTermCapped(Interaction): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.interaction.VerletListHadressStillingerWeberPairTermCappedLocal', pmicall = ['setPotentialAT', 'setPotentialCG'] ) class CellListStillingerWeberPairTermCapped(Interaction): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.interaction.CellListStillingerWeberPairTermCappedLocal', pmicall = ['setPotential'] ) class FixedPairListStillingerWeberPairTermCapped(Interaction): __metaclass__ = pmi.Proxy pmiproxydefs = dict( cls = 'espressopp.interaction.FixedPairListStillingerWeberPairTermCappedLocal', pmicall = ['setPotential'] )
kkreis/espressopp
src/interaction/StillingerWeberPairTermCapped.py
Python
gpl-3.0
11,269
[ "ESPResSo" ]
1665d12cef307dd99f8f49a06e59c09f9ae6b19ae138ed080b432cb216fe57dd
#!/usr/bin/env python # This tests vtkCompositeCutter import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() class TestCompositeCutter(Testing.vtkTest): def testAMR(self): filename= VTK_DATA_ROOT +"/Data/AMR/Enzo/DD0010/moving7_0010.hierarchy" reader = vtk.vtkAMREnzoReader() reader.SetFileName(filename); reader.SetMaxLevel(10); reader.SetCellArrayStatus("TotalEnergy",1) plane = vtk.vtkPlane() plane.SetOrigin(0.5, 0.5, 0.5) plane.SetNormal(1, 0, 0) cutter = vtk.vtkCompositeCutter() cutter.SetCutFunction(plane) cutter.SetInputConnection(reader.GetOutputPort()) cutter.Update() slice = cutter.GetOutputDataObject(0) self.assertEqual(slice.GetNumberOfCells(),662); if __name__ == "__main__": Testing.main([(TestCompositeCutter, 'test')])
hlzz/dotfiles
graphics/VTK-7.0.0/Filters/Core/Testing/Python/TestCompositeCutter.py
Python
bsd-3-clause
903
[ "VTK" ]
3aeb1946f0d78bac86ec9123b76c9ceee420ce61f286137c3eefe875cacf75da
#!/usr/bin/env python # -*- coding: utf-8 -*- # Corinne Maufrais # Institut Pasteur, Centre d'informatique pour les biologistes # corinne.maufrais@pasteur.fr # # version 2.1 import os import sys import argparse try: LIB = os.environ['RANKOPTIMIZERLIB'] except: LIB = '/usr/local/bin' if LIB not in sys.path: sys.path.append( LIB ) try: LIB2 = os.environ['BLAST2TC_LIB'] except: LIB2 = '/usr/local/bin' if LIB2 not in sys.path: sys.path.append( LIB2 ) if __name__=='__main__': parser = argparse.ArgumentParser(prog='taxoextract.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Extract a list of entries from a taxoptimizer output file") usage = "taxoextract [options] -i <file>/-j <file> -b <file>" general_options = parser.add_argument_group(title="Options", description=None) general_options.add_argument("-i", "--one_column_file", dest="offsetfh", metavar="File", type=file, required=True, help="List of taxoptomizer's lines to parse (offset numbers given by kronaexract).") general_options.add_argument("-b", "--taxofile", dest="taxofh", help="taxoptimizer output file (format: blast m8 + OC + taxonomy +/- DE ).", metavar="FILE", type=file, required=True,) general_options.add_argument("-a", "--append", dest="ap_mode", help="append mode. Result will be append to the output file \ instead of creating a new one (Default: write mode)", action="store_true", default=False, ) general_options.add_argument("-o", "--out_file", dest="outfile", help="Output file", metavar="FILE", required=True) args = parser.parse_args() if args.ap_mode is True: extractfh = open( args.outfile, 'a' ) else: extractfh = open( args.outfile, 'w' ) offline = args.offsetfh.readline() while offline: off_field = offline.strip() try: position = int(off_field) except: print >>sys.stderr, "format error: one column file is expected (int)" sys.exit(1) args.taxofh.seek( position ) blLine = args.taxofh.readline() print >>extractfh, blLine[:-1] offline = args.offsetfh.readline()
C3BI-pasteur-fr/taxo_pack
src/taxoextract.py
Python
gpl-3.0
2,816
[ "BLAST" ]
8551fd0894003d60ecd229b4932f9e19e20d56d18294727d396706590bc223aa
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import os import tempfile import unittest import numpy as np from pymatgen.core.structure import Structure from pymatgen.io.abinit.inputs import ( BasicAbinitInput, BasicMultiDataset, ShiftMode, calc_shiftk, ebands_input, gs_input, ion_ioncell_relax_input, num_valence_electrons, ) from pymatgen.util.testing import PymatgenTest _test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "test_files", "abinit") def abiref_file(filename): """Return absolute path to filename in ~pymatgen/test_files/abinit""" return os.path.join(_test_dir, filename) def abiref_files(*filenames): """Return list of absolute paths to filenames in ~pymatgen/test_files/abinit""" return [os.path.join(_test_dir, f) for f in filenames] class AbinitInputTestCase(PymatgenTest): """Unit tests for BasicAbinitInput.""" def test_api(self): """Testing BasicAbinitInput API.""" # Build simple input with structure and pseudos unit_cell = { "acell": 3 * [10.217], "rprim": [[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]], "ntypat": 1, "znucl": [14], "natom": 2, "typat": [1, 1], "xred": [[0.0, 0.0, 0.0], [0.25, 0.25, 0.25]], } inp = BasicAbinitInput(structure=unit_cell, pseudos=abiref_file("14si.pspnc")) shiftk = [[0.5, 0.5, 0.5], [0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.5]] self.assertArrayEqual(calc_shiftk(inp.structure), shiftk) assert num_valence_electrons(inp.structure, inp.pseudos) == 8 repr(inp), str(inp) assert len(inp) == 0 and not inp assert inp.get("foo", "bar") == "bar" and inp.pop("foo", "bar") == "bar" assert inp.comment is None inp.set_comment("This is a comment") assert inp.comment == "This is a comment" assert inp.isnc and not inp.ispaw inp["ecut"] = 1 assert inp.get("ecut") == 1 and len(inp) == 1 and "ecut" in inp.keys() and "foo" not in inp # Test to_string assert inp.to_string(with_structure=True, with_pseudos=True) assert inp.to_string(with_structure=False, with_pseudos=False) inp.set_vars(ecut=5, toldfe=1e-6) assert inp["ecut"] == 5 inp.set_vars_ifnotin(ecut=-10) assert inp["ecut"] == 5 _, tmpname = tempfile.mkstemp(text=True) inp.write(filepath=tmpname) # Cannot change structure variables directly. with self.assertRaises(inp.Error): inp.set_vars(unit_cell) with self.assertRaises(TypeError): inp.add_abiobjects({}) with self.assertRaises(KeyError): inp.remove_vars("foo", strict=True) assert not inp.remove_vars("foo", strict=False) # Test deepcopy and remove_vars. inp["bdgw"] = [1, 2] inp_copy = inp.deepcopy() inp_copy["bdgw"][1] = 3 assert inp["bdgw"] == [1, 2] assert inp.remove_vars("bdgw") and "bdgw" not in inp removed = inp.pop_tolerances() assert len(removed) == 1 and removed["toldfe"] == 1e-6 # Test set_spin_mode old_vars = inp.set_spin_mode("polarized") assert "nsppol" in inp and inp["nspden"] == 2 and inp["nspinor"] == 1 inp.set_vars(old_vars) # Test set_structure new_structure = inp.structure.copy() new_structure.perturb(distance=0.1) inp.set_structure(new_structure) assert inp.structure == new_structure # Compatible with Pickle and MSONable? self.serialize_with_pickle(inp, test_eq=False) def test_input_errors(self): """Testing typical BasicAbinitInput Error""" si_structure = Structure.from_file(abiref_file("si.cif")) # Ambiguous list of pseudos. with self.assertRaises(BasicAbinitInput.Error): BasicAbinitInput(si_structure, pseudos=abiref_files("14si.pspnc", "14si.4.hgh")) # Pseudos do not match structure. with self.assertRaises(BasicAbinitInput.Error): BasicAbinitInput(si_structure, pseudos=abiref_file("H-wdr.oncvpsp")) si1_negative_volume = dict( ntypat=1, natom=1, typat=[1], znucl=14, acell=3 * [7.60], rprim=[[0.0, 0.5, 0.5], [-0.5, -0.0, -0.5], [0.5, 0.5, 0.0]], xred=[[0.0, 0.0, 0.0]], ) # Negative triple product. with self.assertRaises(BasicAbinitInput.Error): BasicAbinitInput(si1_negative_volume, pseudos=abiref_files("14si.pspnc")) def test_helper_functions(self): """Testing BasicAbinitInput helper functions.""" inp = BasicAbinitInput(structure=abiref_file("si.cif"), pseudos="14si.pspnc", pseudo_dir=_test_dir) inp.set_kmesh(ngkpt=(1, 2, 3), shiftk=(1, 2, 3, 4, 5, 6)) assert inp["kptopt"] == 1 and inp["nshiftk"] == 2 inp.set_gamma_sampling() assert inp["kptopt"] == 1 and inp["nshiftk"] == 1 assert np.all(inp["shiftk"] == 0) inp.set_kpath(ndivsm=3, kptbounds=None) assert inp["ndivsm"] == 3 and inp["iscf"] == -2 and len(inp["kptbounds"]) == 12 class TestMultiDataset(PymatgenTest): """Unit tests for BasicMultiDataset.""" def test_api(self): """Testing BasicMultiDataset API.""" structure = Structure.from_file(abiref_file("si.cif")) pseudo = abiref_file("14si.pspnc") pseudo_dir = os.path.dirname(pseudo) multi = BasicMultiDataset(structure=structure, pseudos=pseudo) with self.assertRaises(ValueError): BasicMultiDataset(structure=structure, pseudos=pseudo, ndtset=-1) multi = BasicMultiDataset(structure=structure, pseudos=pseudo, pseudo_dir=pseudo_dir) assert len(multi) == 1 and multi.ndtset == 1 assert multi.isnc for i, inp in enumerate(multi): assert list(inp.keys()) == list(multi[i].keys()) multi.addnew_from(0) assert multi.ndtset == 2 and multi[0] is not multi[1] assert multi[0].structure == multi[1].structure assert multi[0].structure is not multi[1].structure multi.set_vars(ecut=2) assert all(inp["ecut"] == 2 for inp in multi) self.assertEqual(multi.get("ecut"), [2, 2]) multi[1].set_vars(ecut=1) assert multi[0]["ecut"] == 2 and multi[1]["ecut"] == 1 self.assertEqual(multi.get("ecut"), [2, 1]) self.assertEqual(multi.get("foo", "default"), ["default", "default"]) multi[1].set_vars(paral_kgb=1) assert "paral_kgb" not in multi[0] self.assertEqual(multi.get("paral_kgb"), [None, 1]) pert_structure = structure.copy() pert_structure.perturb(distance=0.1) assert structure != pert_structure assert multi.set_structure(structure) == multi.ndtset * [structure] assert all(s == structure for s in multi.structure) assert multi.has_same_structures multi[1].set_structure(pert_structure) assert multi[0].structure != multi[1].structure and multi[1].structure == pert_structure assert not multi.has_same_structures split = multi.split_datasets() assert len(split) == 2 and all(split[i] == multi[i] for i in range(multi.ndtset)) repr(multi) str(multi) assert multi.to_string(with_pseudos=False) tmpdir = tempfile.mkdtemp() filepath = os.path.join(tmpdir, "run.abi") inp.write(filepath=filepath) multi.write(filepath=filepath) new_multi = BasicMultiDataset.from_inputs([inp for inp in multi]) assert new_multi.ndtset == multi.ndtset assert new_multi.structure == multi.structure for old_inp, new_inp in zip(multi, new_multi): assert old_inp is not new_inp self.assertDictEqual(old_inp.as_dict(), new_inp.as_dict()) ref_input = multi[0] new_multi = BasicMultiDataset.replicate_input(input=ref_input, ndtset=4) assert new_multi.ndtset == 4 for inp in new_multi: assert ref_input is not inp self.assertDictEqual(ref_input.as_dict(), inp.as_dict()) # Compatible with Pickle and MSONable? self.serialize_with_pickle(multi, test_eq=False) class ShiftModeTest(PymatgenTest): def test_shiftmode(self): """Testing shiftmode""" gamma = ShiftMode.GammaCentered assert ShiftMode.from_object("G") == gamma assert ShiftMode.from_object(gamma) == gamma with self.assertRaises(TypeError): ShiftMode.from_object({}) class FactoryTest(PymatgenTest): def setUp(self): # Si ebands self.si_structure = Structure.from_file(abiref_file("si.cif")) self.si_pseudo = abiref_file("14si.pspnc") def test_gs_input(self): """Testing gs_input factory.""" inp = gs_input(self.si_structure, self.si_pseudo, kppa=10, ecut=10, spin_mode="polarized") str(inp) assert inp["nsppol"] == 2 assert inp["nband"] == 14 self.assertArrayEqual(inp["ngkpt"], [2, 2, 2]) def test_ebands_input(self): """Testing ebands_input factory.""" multi = ebands_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2) str(multi) scf_inp, nscf_inp = multi.split_datasets() # Test dos_kppa and other options. multi_dos = ebands_input( self.si_structure, self.si_pseudo, nscf_nband=10, kppa=10, ecut=2, spin_mode="unpolarized", smearing=None, charge=2.0, dos_kppa=50, ) assert len(multi_dos) == 3 assert all(i["charge"] == 2 for i in multi_dos) self.assertEqual(multi_dos.get("nsppol"), [1, 1, 1]) self.assertEqual(multi_dos.get("iscf"), [None, -2, -2]) multi_dos = ebands_input( self.si_structure, self.si_pseudo, nscf_nband=10, kppa=10, ecut=2, spin_mode="unpolarized", smearing=None, charge=2.0, dos_kppa=[50, 100], ) assert len(multi_dos) == 4 self.assertEqual(multi_dos.get("iscf"), [None, -2, -2, -2]) str(multi_dos) def test_ion_ioncell_relax_input(self): """Testing ion_ioncell_relax_input factory.""" multi = ion_ioncell_relax_input(self.si_structure, self.si_pseudo, kppa=10, ecut=2) str(multi) ion_inp, ioncell_inp = multi.split_datasets() assert ion_inp["chksymbreak"] == 0 assert ion_inp["ionmov"] == 3 and ion_inp["optcell"] == 0 assert ioncell_inp["ionmov"] == 3 and ioncell_inp["optcell"] == 2
gmatteo/pymatgen
pymatgen/io/abinit/tests/test_inputs.py
Python
mit
10,923
[ "ABINIT", "pymatgen" ]
6f54ae94afa1aae665aae0a564075fe29aa35fdcd5f5832ce5ffeb7d5f01a9e2
import logging, os, sys from PyQt5.QtWidgets import (QVBoxLayout, QHBoxLayout, QListWidget, QWidget, QListWidgetItem, QStackedLayout, QPushButton, QLabel, QTabWidget, QLineEdit, QGroupBox, QFormLayout, QCheckBox, QRadioButton, QSpinBox, QSizePolicy, QScrollArea, QFontDialog) from PyQt5.QtCore import pyqtSignal, Qt from PyQt5.QtGui import QPalette, QPixmapCache from misc import FlowLayout, Spacer, PathLineEdit, ApplicationPopup import settings import gui_constants import misc_db import gallerydb log = logging.getLogger(__name__) log_i = log.info log_d = log.debug log_w = log.warning log_e = log.error log_c = log.critical class SettingsDialog(QWidget): "A settings dialog" scroll_speed_changed = pyqtSignal() init_gallery_rebuild = pyqtSignal() def __init__(self, parent=None): super().__init__(parent, flags=Qt.Window) self.init_gallery_rebuild.connect(self.accept) self.parent_widget = parent self.setAttribute(Qt.WA_DeleteOnClose) self.resize(700, 500) self.restore_values() self.initUI() self.setWindowTitle('Settings') self.show() def initUI(self): main_layout = QVBoxLayout(self) sub_layout = QHBoxLayout() # Left Panel left_panel = QListWidget() left_panel.setViewMode(left_panel.ListMode) #left_panel.setIconSize(QSize(40,40)) left_panel.setTextElideMode(Qt.ElideRight) left_panel.setMaximumWidth(200) left_panel.itemClicked.connect(self.change) #web.setText('Web') self.application = QListWidgetItem() self.application.setText('Application') self.web = QListWidgetItem() self.web.setText('Web') self.visual = QListWidgetItem() self.visual.setText('Visual') self.advanced = QListWidgetItem() self.advanced.setText('Advanced') self.about = QListWidgetItem() self.about.setText('About') #main.setIcon(QIcon(os.path.join(gui_constants.static_dir, 'plus2.png'))) left_panel.addItem(self.application) left_panel.addItem(self.web) left_panel.addItem(self.visual) left_panel.addItem(self.advanced) left_panel.addItem(self.about) left_panel.setMaximumWidth(100) # right panel self.right_panel = QStackedLayout() self.init_right_panel() # bottom bottom_layout = QHBoxLayout() ok_btn = QPushButton('Ok') ok_btn.clicked.connect(self.accept) cancel_btn = QPushButton('Cancel') cancel_btn.clicked.connect(self.close) info_lbl = QLabel() info_lbl.setText('<a href="https://github.com/Pewpews/happypanda">'+ 'Visit GitHub Repo</a> | Options marked with * requires application restart.') info_lbl.setTextFormat(Qt.RichText) info_lbl.setTextInteractionFlags(Qt.TextBrowserInteraction) info_lbl.setOpenExternalLinks(True) self.spacer = QWidget() self.spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) bottom_layout.addWidget(info_lbl, 0, Qt.AlignLeft) bottom_layout.addWidget(self.spacer) bottom_layout.addWidget(ok_btn, 0, Qt.AlignRight) bottom_layout.addWidget(cancel_btn, 0, Qt.AlignRight) sub_layout.addWidget(left_panel) sub_layout.addLayout(self.right_panel) main_layout.addLayout(sub_layout) main_layout.addLayout(bottom_layout) self.restore_options() def change(self, item): def curr_index(index): if index != self.right_panel.currentIndex(): self.right_panel.setCurrentIndex(index) if item == self.application: curr_index(self.application_index) elif item == self.web: curr_index(self.web_index) elif item == self.visual: curr_index(self.visual_index) elif item == self.advanced: curr_index(self.advanced_index) elif item == self.about: curr_index(self.about_index) def restore_values(self): #Web self.exprops = settings.ExProperties() # Visual self.high_quality_thumbs = gui_constants.HIGH_QUALITY_THUMBS self.popup_width = gui_constants.POPUP_WIDTH self.popup_height = gui_constants.POPUP_HEIGHT self.style_sheet = gui_constants.user_stylesheet_path # Advanced self.scroll_speed = gui_constants.SCROLL_SPEED self.cache_size = gui_constants.THUMBNAIL_CACHE_SIZE self.prefetch_item_amnt = gui_constants.PREFETCH_ITEM_AMOUNT def restore_options(self): # App / General self.subfolder_as_chapters.setChecked(gui_constants.SUBFOLDER_AS_GALLERY) self.extract_gallery_before_opening.setChecked(gui_constants.EXTRACT_CHAPTER_BEFORE_OPENING) self.open_galleries_sequentially.setChecked(gui_constants.OPEN_GALLERIES_SEQUENTIALLY) self.scroll_to_new_gallery.setChecked(gui_constants.SCROLL_TO_NEW_GALLERIES) self.move_imported_gs.setChecked(gui_constants.MOVE_IMPORTED_GALLERIES) self.move_imported_def_path.setText(gui_constants.IMPORTED_GALLERY_DEF_PATH) self.open_random_g_chapters.setChecked(gui_constants.OPEN_RANDOM_GALLERY_CHAPTERS) self.rename_g_source_group.setChecked(gui_constants.RENAME_GALLERY_SOURCE) self.path_to_unrar.setText(gui_constants.unrar_tool_path) # App / Monitor / Misc self.enable_monitor.setChecked(gui_constants.ENABLE_MONITOR) self.look_new_gallery_startup.setChecked(gui_constants.LOOK_NEW_GALLERY_STARTUP) self.auto_add_new_galleries.setChecked(gui_constants.LOOK_NEW_GALLERY_AUTOADD) # App / Monitor / Folders for path in gui_constants.MONITOR_PATHS: self.add_folder_monitor(path) # App / Monitor / Ignore list for path in gui_constants.IGNORE_PATHS: self.add_ignore_path(path) # Web / General if 'g.e-hentai' in gui_constants.DEFAULT_EHEN_URL: self.default_ehen_url.setChecked(True) else: self.exhentai_ehen_url.setChecked(True) self.replace_metadata.setChecked(gui_constants.REPLACE_METADATA) self.always_first_hit.setChecked(gui_constants.ALWAYS_CHOOSE_FIRST_HIT) self.web_time_offset.setValue(gui_constants.GLOBAL_EHEN_TIME) self.continue_a_metadata_fetcher.setChecked(gui_constants.CONTINUE_AUTO_METADATA_FETCHER) self.use_jpn_title.setChecked(gui_constants.USE_JPN_TITLE) self.use_gallery_link.setChecked(gui_constants.USE_GALLERY_LINK) # Web / Download if gui_constants.HEN_DOWNLOAD_TYPE == 0: self.archive_download.setChecked(True) else: self.torrent_download.setChecked(True) self.download_directory.setText(gui_constants.DOWNLOAD_DIRECTORY) self.torrent_client.setText(gui_constants.TORRENT_CLIENT) # Web / Exhentai self.ipbid_edit.setText(self.exprops.ipb_id) self.ipbpass_edit.setText(self.exprops.ipb_pass) # Visual / Grid View / Tooltip self.grid_tooltip_group.setChecked(gui_constants.GRID_TOOLTIP) self.visual_grid_tooltip_title.setChecked(gui_constants.TOOLTIP_TITLE) self.visual_grid_tooltip_author.setChecked(gui_constants.TOOLTIP_AUTHOR) self.visual_grid_tooltip_chapters.setChecked(gui_constants.TOOLTIP_CHAPTERS) self.visual_grid_tooltip_status.setChecked(gui_constants.TOOLTIP_STATUS) self.visual_grid_tooltip_type.setChecked(gui_constants.TOOLTIP_TYPE) self.visual_grid_tooltip_lang.setChecked(gui_constants.TOOLTIP_LANG) self.visual_grid_tooltip_descr.setChecked(gui_constants.TOOLTIP_DESCR) self.visual_grid_tooltip_tags.setChecked(gui_constants.TOOLTIP_TAGS) self.visual_grid_tooltip_last_read.setChecked(gui_constants.TOOLTIP_LAST_READ) self.visual_grid_tooltip_times_read.setChecked(gui_constants.TOOLTIP_TIMES_READ) self.visual_grid_tooltip_pub_date.setChecked(gui_constants.TOOLTIP_PUB_DATE) self.visual_grid_tooltip_date_added.setChecked(gui_constants.TOOLTIP_DATE_ADDED) # Visual / Grid View / Gallery self.external_viewer_ico.setChecked(gui_constants.USE_EXTERNAL_PROG_ICO) self.gallery_type_ico.setChecked(gui_constants.DISPLAY_GALLERY_TYPE) if gui_constants.GALLERY_FONT_ELIDE: self.gallery_text_elide.setChecked(True) else: self.gallery_text_fit.setChecked(True) self.font_lbl.setText(gui_constants.GALLERY_FONT[0]) self.font_size_lbl.setValue(gui_constants.GALLERY_FONT[1]) def re_enforce(s): if s: self.search_on_enter.setChecked(True) self.search_allow_regex.clicked.connect(re_enforce) if gui_constants.SEARCH_ON_ENTER: self.search_on_enter.setChecked(True) else: self.search_every_keystroke.setChecked(True) # Visual / Grid View / Colors self.grid_label_color.setText(gui_constants.GRID_VIEW_LABEL_COLOR) self.grid_title_color.setText(gui_constants.GRID_VIEW_TITLE_COLOR) self.grid_artist_color.setText(gui_constants.GRID_VIEW_ARTIST_COLOR) # Advanced / Misc / External Viewer self.external_viewer_path.setText(gui_constants.EXTERNAL_VIEWER_PATH) # Advanced / Gallery / Gallery Text Fixer self.g_data_regex_fix_edit.setText(gui_constants.GALLERY_DATA_FIX_REGEX) self.g_data_replace_fix_edit.setText(gui_constants.GALLERY_DATA_FIX_REPLACE) self.g_data_fixer_title.setChecked(gui_constants.GALLERY_DATA_FIX_TITLE) self.g_data_fixer_artist.setChecked(gui_constants.GALLERY_DATA_FIX_ARTIST) # About / DB Overview self.tags_treeview_on_start.setChecked(gui_constants.TAGS_TREEVIEW_ON_START) def accept(self): set = settings.set # App / General gui_constants.SUBFOLDER_AS_GALLERY = self.subfolder_as_chapters.isChecked() set(gui_constants.SUBFOLDER_AS_GALLERY, 'Application', 'subfolder as gallery') gui_constants.EXTRACT_CHAPTER_BEFORE_OPENING = self.extract_gallery_before_opening.isChecked() set(gui_constants.EXTRACT_CHAPTER_BEFORE_OPENING, 'Application', 'extract chapter before opening') gui_constants.OPEN_GALLERIES_SEQUENTIALLY = self.open_galleries_sequentially.isChecked() set(gui_constants.OPEN_GALLERIES_SEQUENTIALLY, 'Application', 'open galleries sequentially') gui_constants.SCROLL_TO_NEW_GALLERIES = self.scroll_to_new_gallery.isChecked() set(gui_constants.SCROLL_TO_NEW_GALLERIES, 'Application', 'scroll to new galleries') gui_constants.MOVE_IMPORTED_GALLERIES = self.move_imported_gs.isChecked() set(gui_constants.MOVE_IMPORTED_GALLERIES, 'Application', 'move imported galleries') if not self.move_imported_def_path.text() or os.path.exists(self.move_imported_def_path.text()): gui_constants.IMPORTED_GALLERY_DEF_PATH = self.move_imported_def_path.text() set(gui_constants.IMPORTED_GALLERY_DEF_PATH, 'Application', 'imported gallery def path') gui_constants.OPEN_RANDOM_GALLERY_CHAPTERS = self.open_random_g_chapters.isChecked() set(gui_constants.OPEN_RANDOM_GALLERY_CHAPTERS, 'Application', 'open random gallery chapters') gui_constants.RENAME_GALLERY_SOURCE = self.rename_g_source_group.isChecked() set(gui_constants.RENAME_GALLERY_SOURCE, 'Application', 'rename gallery source') gui_constants.unrar_tool_path = self.path_to_unrar.text() set(gui_constants.unrar_tool_path, 'Application', 'unrar tool path') # App / Monitor / misc gui_constants.ENABLE_MONITOR = self.enable_monitor.isChecked() set(gui_constants.ENABLE_MONITOR, 'Application', 'enable monitor') gui_constants.LOOK_NEW_GALLERY_STARTUP = self.look_new_gallery_startup.isChecked() set(gui_constants.LOOK_NEW_GALLERY_STARTUP, 'Application', 'look new gallery startup') gui_constants.LOOK_NEW_GALLERY_AUTOADD = self.auto_add_new_galleries.isChecked() set(gui_constants.LOOK_NEW_GALLERY_AUTOADD, 'Application', 'look new gallery autoadd') # App / Monitor / folders paths = [] folder_p_widgets = self.take_all_layout_widgets(self.folders_layout) for x, l_edit in enumerate(folder_p_widgets): p = l_edit.text() if p: paths.append(p) set(paths, 'Application', 'monitor paths') gui_constants.MONITOR_PATHS = paths # App / Monitor / ignore list paths = [] ignore_p_widgets = self.take_all_layout_widgets(self.ignore_path_l) for x, l_edit in enumerate(ignore_p_widgets): p = l_edit.text() if p: paths.append(p) set(paths, 'Application', 'ignore paths') gui_constants.IGNORE_PATHS = paths # Web / Downloader if self.archive_download.isChecked(): gui_constants.HEN_DOWNLOAD_TYPE = 0 else: gui_constants.HEN_DOWNLOAD_TYPE = 1 set(gui_constants.HEN_DOWNLOAD_TYPE, 'Web', 'hen download type') gui_constants.DOWNLOAD_DIRECTORY = self.download_directory.text() set(gui_constants.DOWNLOAD_DIRECTORY, 'Web', 'download directory') gui_constants.TORRENT_CLIENT = self.torrent_client.text() set(gui_constants.TORRENT_CLIENT, 'Web', 'torrent client') # Web / Metdata if self.default_ehen_url.isChecked(): gui_constants.DEFAULT_EHEN_URL = 'http://g.e-hentai.org/' else: gui_constants.DEFAULT_EHEN_URL = 'http://exhentai.org/' set(gui_constants.DEFAULT_EHEN_URL, 'Web', 'default ehen url') gui_constants.REPLACE_METADATA = self.replace_metadata.isChecked() set(gui_constants.REPLACE_METADATA, 'Web', 'replace metadata') gui_constants.ALWAYS_CHOOSE_FIRST_HIT = self.always_first_hit.isChecked() set(gui_constants.ALWAYS_CHOOSE_FIRST_HIT, 'Web', 'always choose first hit') gui_constants.GLOBAL_EHEN_TIME = self.web_time_offset.value() set(gui_constants.GLOBAL_EHEN_TIME, 'Web', 'global ehen time offset') gui_constants.CONTINUE_AUTO_METADATA_FETCHER = self.continue_a_metadata_fetcher.isChecked() set(gui_constants.CONTINUE_AUTO_METADATA_FETCHER, 'Web', 'continue auto metadata fetcher') gui_constants.USE_JPN_TITLE = self.use_jpn_title.isChecked() set(gui_constants.USE_JPN_TITLE, 'Web', 'use jpn title') gui_constants.USE_GALLERY_LINK = self.use_gallery_link.isChecked() set(gui_constants.USE_GALLERY_LINK, 'Web', 'use gallery link') # Web / ExHentai self.exprops.ipb_id = self.ipbid_edit.text() self.exprops.ipb_pass = self.ipbpass_edit.text() # Visual / Grid View / Tooltip gui_constants.GRID_TOOLTIP = self.grid_tooltip_group.isChecked() set(gui_constants.GRID_TOOLTIP, 'Visual', 'grid tooltip') gui_constants.TOOLTIP_TITLE = self.visual_grid_tooltip_title.isChecked() set(gui_constants.TOOLTIP_TITLE, 'Visual', 'tooltip title') gui_constants.TOOLTIP_AUTHOR = self.visual_grid_tooltip_author.isChecked() set(gui_constants.TOOLTIP_AUTHOR, 'Visual', 'tooltip author') gui_constants.TOOLTIP_CHAPTERS = self.visual_grid_tooltip_chapters.isChecked() set(gui_constants.TOOLTIP_CHAPTERS, 'Visual', 'tooltip chapters') gui_constants.TOOLTIP_STATUS = self.visual_grid_tooltip_status.isChecked() set(gui_constants.TOOLTIP_STATUS, 'Visual', 'tooltip status') gui_constants.TOOLTIP_TYPE = self.visual_grid_tooltip_type.isChecked() set(gui_constants.TOOLTIP_TYPE, 'Visual', 'tooltip type') gui_constants.TOOLTIP_LANG = self.visual_grid_tooltip_lang.isChecked() set(gui_constants.TOOLTIP_LANG, 'Visual', 'tooltip lang') gui_constants.TOOLTIP_DESCR = self.visual_grid_tooltip_descr.isChecked() set(gui_constants.TOOLTIP_DESCR, 'Visual', 'tooltip descr') gui_constants.TOOLTIP_TAGS = self.visual_grid_tooltip_tags.isChecked() set(gui_constants.TOOLTIP_TAGS, 'Visual', 'tooltip tags') gui_constants.TOOLTIP_LAST_READ = self.visual_grid_tooltip_last_read.isChecked() set(gui_constants.TOOLTIP_LAST_READ, 'Visual', 'tooltip last read') gui_constants.TOOLTIP_TIMES_READ = self.visual_grid_tooltip_times_read.isChecked() set(gui_constants.TOOLTIP_TIMES_READ, 'Visual', 'tooltip times read') gui_constants.TOOLTIP_PUB_DATE = self.visual_grid_tooltip_pub_date.isChecked() set(gui_constants.TOOLTIP_PUB_DATE, 'Visual', 'tooltip pub date') gui_constants.TOOLTIP_DATE_ADDED = self.visual_grid_tooltip_date_added.isChecked() set(gui_constants.TOOLTIP_DATE_ADDED, 'Visual', 'tooltip date added') # Visual / Grid View / Gallery gui_constants.USE_EXTERNAL_PROG_ICO = self.external_viewer_ico.isChecked() set(gui_constants.USE_EXTERNAL_PROG_ICO, 'Visual', 'use external prog ico') gui_constants.DISPLAY_GALLERY_TYPE = self.gallery_type_ico.isChecked() set(gui_constants.DISPLAY_GALLERY_TYPE, 'Visual', 'display gallery type') if self.gallery_text_elide.isChecked(): gui_constants.GALLERY_FONT_ELIDE = True else: gui_constants.GALLERY_FONT_ELIDE = False set(gui_constants.GALLERY_FONT_ELIDE, 'Visual', 'gallery font elide') gui_constants.GALLERY_FONT = (self.font_lbl.text(), self.font_size_lbl.value()) set(gui_constants.GALLERY_FONT[0], 'Visual', 'gallery font family') set(gui_constants.GALLERY_FONT[1], 'Visual', 'gallery font size') # Visual / Grid View / Colors if self.color_checker(self.grid_title_color.text()): gui_constants.GRID_VIEW_TITLE_COLOR = self.grid_title_color.text() set(gui_constants.GRID_VIEW_TITLE_COLOR, 'Visual', 'grid view title color') if self.color_checker(self.grid_artist_color.text()): gui_constants.GRID_VIEW_ARTIST_COLOR = self.grid_artist_color.text() set(gui_constants.GRID_VIEW_ARTIST_COLOR, 'Visual', 'grid view artist color') if self.color_checker(self.grid_label_color.text()): gui_constants.GRID_VIEW_LABEL_COLOR = self.grid_label_color.text() set(gui_constants.GRID_VIEW_LABEL_COLOR, 'Visual', 'grid view label color') # Advanced / Misc # Advanced / Misc / Grid View gui_constants.SCROLL_SPEED = self.scroll_speed set(self.scroll_speed, 'Advanced', 'scroll speed') self.scroll_speed_changed.emit() gui_constants.THUMBNAIL_CACHE_SIZE = self.cache_size set(self.cache_size[1], 'Advanced', 'cache size') QPixmapCache.setCacheLimit(self.cache_size[0]* self.cache_size[1]) # Advanced / Misc / Search gui_constants.ALLOW_SEARCH_REGEX = self.search_allow_regex.isChecked() set(gui_constants.ALLOW_SEARCH_REGEX, 'Advanced', 'allow search regex') gui_constants.SEARCH_AUTOCOMPLETE = self.search_autocomplete.isChecked() set(gui_constants.SEARCH_AUTOCOMPLETE, 'Advanced', 'search autocomplete') if self.search_on_enter.isChecked(): gui_constants.SEARCH_ON_ENTER = True else: gui_constants.SEARCH_ON_ENTER = False set(gui_constants.SEARCH_ON_ENTER, 'Advanced', 'search on enter') # Advanced / Misc / External Viewer if not self.external_viewer_path.text(): gui_constants.USE_EXTERNAL_VIEWER = False set(False, 'Advanced', 'use external viewer') else: gui_constants.USE_EXTERNAL_VIEWER = True set(True, 'Advanced', 'use external viewer') gui_constants._REFRESH_EXTERNAL_VIEWER = True gui_constants.EXTERNAL_VIEWER_PATH = self.external_viewer_path.text() set(gui_constants.EXTERNAL_VIEWER_PATH,'Advanced', 'external viewer path') # Advanced / General / Gallery Text Fixer gui_constants.GALLERY_DATA_FIX_REGEX = self.g_data_regex_fix_edit.text() set(gui_constants.GALLERY_DATA_FIX_REGEX, 'Advanced', 'gallery data fix regex') gui_constants.GALLERY_DATA_FIX_TITLE = self.g_data_fixer_title.isChecked() set(gui_constants.GALLERY_DATA_FIX_TITLE, 'Advanced', 'gallery data fix title') gui_constants.GALLERY_DATA_FIX_ARTIST = self.g_data_fixer_artist.isChecked() set(gui_constants.GALLERY_DATA_FIX_ARTIST, 'Advanced', 'gallery data fix artist') gui_constants.GALLERY_DATA_FIX_REPLACE = self.g_data_replace_fix_edit.text() set(gui_constants.GALLERY_DATA_FIX_REPLACE, 'Advanced', 'gallery data fix replace') # About / DB Overview gui_constants.TAGS_TREEVIEW_ON_START = self.tags_treeview_on_start.isChecked() set(gui_constants.TAGS_TREEVIEW_ON_START, 'Application', 'tags treeview on start') settings.save() self.close() def init_right_panel(self): #def title_def(title): # title_lbl = QLabel(title) # f = QFont() # f.setPixelSize(16) # title_lbl.setFont(f) # return title_lbl def groupbox(name, layout, parent): """ Makes a groupbox and a layout for you Returns groupbox and layout """ g = QGroupBox(name, parent) l = layout(g) return g, l def option_lbl_checkbox(text, optiontext, parent=None): l = QLabel(text) c = QCheckBox(text, parent) return l, c def new_tab(name, parent, scroll=False): """ Creates a new tab. Returns new tab page widget and it's layout """ new_t = QWidget(parent) new_l = QFormLayout(new_t) if scroll: scr = QScrollArea(parent) scr.setBackgroundRole(QPalette.Base) scr.setWidget(new_t) scr.setWidgetResizable(True) parent.addTab(scr, name) return new_t, new_l else: parent.addTab(new_t, name) return new_t, new_l # App application = QTabWidget(self) self.application_index = self.right_panel.addWidget(application) application_general, app_general_m_l = new_tab('General', application, True) # App / General / gallery app_gallery_group, app_gallery_l = groupbox('Gallery', QFormLayout, self) app_general_m_l.addRow(app_gallery_group) self.subfolder_as_chapters = QCheckBox("Treat subfolders as galleries (applies in archives too)") self.subfolder_as_chapters.setToolTip("This option will treat subfolders as standalone galleries when scanning for galleries") extract_gallery_info = QLabel("Note: This option has no effect when turned off if path to viewer is not specified.") self.extract_gallery_before_opening = QCheckBox("Extract archive before opening (only turn off if your viewer supports it)") self.open_galleries_sequentially = QCheckBox("Open chapters sequentially (Note: has no effect if path to viewer is not specified)") subf_info = QLabel("Behaviour of 'Scan for new galleries on startup' option will be affected.") subf_info.setWordWrap(True) app_gallery_l.addRow('Note:', subf_info) app_gallery_l.addRow(self.subfolder_as_chapters) app_gallery_l.addRow(extract_gallery_info) app_gallery_l.addRow(self.extract_gallery_before_opening) app_gallery_l.addRow(self.open_galleries_sequentially) self.scroll_to_new_gallery = QCheckBox("Scroll to newly added gallery") self.scroll_to_new_gallery.setDisabled(True) app_gallery_l.addRow(self.scroll_to_new_gallery) self.move_imported_gs, move_imported_gs_l = groupbox('Move imported galleries', QFormLayout, app_gallery_group) self.move_imported_gs.setCheckable(True) self.move_imported_gs.setToolTip("Move imported galleries to specified folder.") self.move_imported_def_path = PathLineEdit() move_imported_gs_l.addRow('Directory:', self.move_imported_def_path) app_gallery_l.addRow(self.move_imported_gs) self.rename_g_source_group, rename_g_source_l = groupbox('Rename gallery source', QFormLayout, app_gallery_group) self.rename_g_source_group.setCheckable(True) self.rename_g_source_group.setDisabled(True) app_gallery_l.addRow(self.rename_g_source_group) rename_g_source_l.addRow(QLabel("Check what to include when renaming gallery source. (Same order)")) rename_g_source_flow_l = FlowLayout() rename_g_source_l.addRow(rename_g_source_flow_l) self.rename_artist = QCheckBox("Artist") self.rename_title = QCheckBox("Title") self.rename_lang = QCheckBox("Language") self.rename_title.setChecked(True) self.rename_title.setDisabled(True) rename_g_source_flow_l.addWidget(self.rename_artist) rename_g_source_flow_l.addWidget(self.rename_title) rename_g_source_flow_l.addWidget(self.rename_lang) random_gallery_opener, random_g_opener_l = groupbox('Random Gallery Opener', QFormLayout, app_gallery_group) app_gallery_l.addRow(random_gallery_opener) self.open_random_g_chapters = QCheckBox("Open random gallery chapters") random_g_opener_l.addRow(self.open_random_g_chapters) # App / General / Rar Support app_rar_group, app_rar_layout = groupbox('RAR Support *', QFormLayout, self) app_general_m_l.addRow(app_rar_group) rar_info = QLabel('Specify the path to the unrar tool to enable rar support.\n'+ 'Windows: "unrar.exe" should be in the "bin" directory if you installed from the'+ ' self-extracting archive provided on github.\nOSX: You can install this via HomeBrew.'+ ' Path should be something like: "/usr/local/bin/unrar".\nLinux: Should already be'+ ' installed. You can just type "unrar". If it\'s not installed, use your package manager: pacman -S unrar') rar_info.setWordWrap(True) app_rar_layout.addRow(rar_info) self.path_to_unrar = PathLineEdit(self, False, filters='') app_rar_layout.addRow('UnRAR tool path:', self.path_to_unrar) # App / Monitor app_monitor_page = QScrollArea() app_monitor_page.setBackgroundRole(QPalette.Base) app_monitor_dummy = QWidget() app_monitor_page.setWidgetResizable(True) app_monitor_page.setWidget(app_monitor_dummy) application.addTab(app_monitor_page, 'Monitoring') app_monitor_m_l = QVBoxLayout(app_monitor_dummy) # App / Monitor / misc app_monitor_misc_group = QGroupBox('General *', self) app_monitor_m_l.addWidget(app_monitor_misc_group) app_monitor_misc_m_l = QFormLayout(app_monitor_misc_group) monitor_info = QLabel('Directory monitoring will monitor the specified directories for any'+ ' filesystem events. For example if you delete a gallery source in one of your'+ ' monitored directories the application will inform you and ask if'+ ' you want to delete the gallery from the application as well.') monitor_info.setWordWrap(True) app_monitor_misc_m_l.addRow(monitor_info) self.enable_monitor = QCheckBox('Enable directory monitoring') app_monitor_misc_m_l.addRow(self.enable_monitor) self.look_new_gallery_startup = QGroupBox('Scan for new galleries on startup', self) app_monitor_misc_m_l.addRow(self.look_new_gallery_startup) self.look_new_gallery_startup.setCheckable(True) look_new_gallery_startup_m_l = QVBoxLayout(self.look_new_gallery_startup) self.auto_add_new_galleries = QCheckBox('Automatically add found galleries') look_new_gallery_startup_m_l.addWidget(self.auto_add_new_galleries) # App / Monitor / folders app_monitor_group = QGroupBox('Directories *', self) app_monitor_m_l.addWidget(app_monitor_group, 1) app_monitor_folders_m_l = QVBoxLayout(app_monitor_group) app_monitor_folders_add = QPushButton('+') app_monitor_folders_add.clicked.connect(self.add_folder_monitor) app_monitor_folders_add.setMaximumWidth(20) app_monitor_folders_add.setMaximumHeight(20) app_monitor_folders_m_l.addWidget(app_monitor_folders_add, 0, Qt.AlignRight) self.folders_layout = QFormLayout() app_monitor_folders_m_l.addLayout(self.folders_layout) # App / Ignore app_ignore, app_ignore_m_l = new_tab('Ignore', application, True) app_ignore_group, app_ignore_list_l = groupbox('List', QVBoxLayout, app_monitor_dummy) app_ignore_m_l.addRow(app_ignore_group) add_buttons_l = QHBoxLayout() app_ignore_add_a = QPushButton('Add archive') app_ignore_add_a.clicked.connect(lambda: self.add_ignore_path(dir=False)) app_ignore_add_f = QPushButton('Add directory') app_ignore_add_f.clicked.connect(self.add_ignore_path) add_buttons_l.addWidget(app_ignore_add_a, 0, Qt.AlignRight) add_buttons_l.addWidget(app_ignore_add_f, 1, Qt.AlignRight) app_ignore_list_l.addLayout(add_buttons_l) self.ignore_path_l = QFormLayout() app_ignore_list_l.addLayout(self.ignore_path_l) # Web web = QTabWidget(self) self.web_index = self.right_panel.addWidget(web) # Web / Downloader web_downloader, web_downloader_l = new_tab('Downloader', web) hen_download_group, hen_download_group_l = groupbox('g.e-hentai/exhentai', QFormLayout, web_downloader) web_downloader_l.addRow(hen_download_group) self.archive_download = QRadioButton('Archive', hen_download_group) self.torrent_download = QRadioButton('Torrent', hen_download_group) download_type_l = QHBoxLayout() download_type_l.addWidget(self.archive_download) download_type_l.addWidget(self.torrent_download, 1) hen_download_group_l.addRow('Download Type:', download_type_l) self.download_directory = PathLineEdit(web_downloader) web_downloader_l.addRow('Destination:', self.download_directory) self.torrent_client = PathLineEdit(web_downloader, False, '') web_downloader_l.addRow(QLabel("Leave empty to use default torrent client."+ "\nIt is NOT recommended to import a file while it's still downloading.")) web_downloader_l.addRow('Torrent client:', self.torrent_client) # Web / Metadata web_metadata_page = QScrollArea() web_metadata_page.setBackgroundRole(QPalette.Base) web_metadata_page.setWidgetResizable(True) web.addTab(web_metadata_page, 'Metadata') web_metadata_dummy = QWidget() web_metadata_page.setWidget(web_metadata_dummy) web_metadata_m_l = QFormLayout(web_metadata_dummy) self.default_ehen_url = QRadioButton('g.e-hentai.org', web_metadata_page) self.exhentai_ehen_url = QRadioButton('exhentai.org', web_metadata_page) ehen_url_l = QHBoxLayout() ehen_url_l.addWidget(self.default_ehen_url) ehen_url_l.addWidget(self.exhentai_ehen_url, 1) web_metadata_m_l.addRow('Default URL:', ehen_url_l) self.continue_a_metadata_fetcher = QCheckBox('Continue from where auto metadata fetcher left off') web_metadata_m_l.addRow(self.continue_a_metadata_fetcher) self.use_jpn_title = QCheckBox('Use japanese title') self.use_jpn_title.setToolTip('Choose the japenese title over the english one') web_metadata_m_l.addRow(self.use_jpn_title) time_offset_info = QLabel('We need to impose a delay between our requests to avoid getting banned.'+ ' I have made it so you cannot set the delay lower than the recommended (I don\'t'+ ' want you to get banned, anon!).\nSpecify the delay between requests in seconds.') time_offset_info.setWordWrap(True) self.web_time_offset = QSpinBox() self.web_time_offset.setMaximumWidth(40) self.web_time_offset.setMinimum(4) self.web_time_offset.setMaximum(99) web_metadata_m_l.addRow(time_offset_info) web_metadata_m_l.addRow('Requests delay in seconds', self.web_time_offset) replace_metadata_info = QLabel('When fetching for metadata the new metadata will be appended'+ ' to the gallery by default. This means that new data will only be added if'+ ' the field was empty. There is however a special case for namespace & tags.'+ ' We go through all the new namespace & tags to only add those that'+ ' do not already exists.\n\nEnabling this option makes it so that a gallery\'s old data'+ ' are deleted and replaced with the new data.') replace_metadata_info.setWordWrap(True) self.replace_metadata = QCheckBox('Replace old metadata with new metadata') web_metadata_m_l.addRow(replace_metadata_info) web_metadata_m_l.addRow(self.replace_metadata) first_hit_info = QLabel('By default, you get to choose which gallery to extract metadata from when'+ ' there is more than one gallery found when searching.\n'+ 'Enabling this option makes it choose the first hit, saving you from moving your mouse.') first_hit_info.setWordWrap(True) self.always_first_hit = QCheckBox('Always choose first hit') web_metadata_m_l.addRow(first_hit_info) web_metadata_m_l.addRow(self.always_first_hit) self.use_gallery_link = QCheckBox('Use current gallery link') self.use_gallery_link.setToolTip("Metadata will be fetched from the current gallery link"+ " if it's a valid ex/g.e gallery url") web_metadata_m_l.addRow(self.use_gallery_link) # Web / Exhentai exhentai_page = QWidget(self) web.addTab(exhentai_page, 'ExHentai') ipb_layout = QFormLayout() exhentai_page.setLayout(ipb_layout) self.ipbid_edit = QLineEdit() self.ipbpass_edit = QLineEdit() exh_tutorial = QLabel(gui_constants.EXHEN_COOKIE_TUTORIAL) exh_tutorial.setTextFormat(Qt.RichText) ipb_layout.addRow('IPB Member ID:', self.ipbid_edit) ipb_layout.addRow('IPB Pass Hash:', self.ipbpass_edit) ipb_layout.addRow(exh_tutorial) # Visual visual = QTabWidget(self) self.visual_index = self.right_panel.addWidget(visual) visual_general_page = QWidget() visual.addTab(visual_general_page, 'General') grid_view_general_page = QWidget() visual.addTab(grid_view_general_page, 'Grid View') grid_view_layout = QVBoxLayout() grid_view_layout.addWidget(QLabel('Options marked with * requires application restart'), 0, Qt.AlignTop) grid_view_general_page.setLayout(grid_view_layout) # grid view # grid view / tooltip self.grid_tooltip_group = QGroupBox('Tooltip', grid_view_general_page) self.grid_tooltip_group.setCheckable(True) grid_view_layout.addWidget(self.grid_tooltip_group, 0, Qt.AlignTop) grid_tooltip_layout = QFormLayout() self.grid_tooltip_group.setLayout(grid_tooltip_layout) grid_tooltip_layout.addRow(QLabel('Control what is'+ ' displayed in the tooltip')) grid_tooltips_hlayout = FlowLayout() grid_tooltip_layout.addRow(grid_tooltips_hlayout) self.visual_grid_tooltip_title = QCheckBox('Title') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_title) self.visual_grid_tooltip_author = QCheckBox('Author') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_author) self.visual_grid_tooltip_chapters = QCheckBox('Chapters') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_chapters) self.visual_grid_tooltip_status = QCheckBox('Status') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_status) self.visual_grid_tooltip_type = QCheckBox('Type') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_type) self.visual_grid_tooltip_lang = QCheckBox('Language') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_lang) self.visual_grid_tooltip_descr = QCheckBox('Description') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_descr) self.visual_grid_tooltip_tags = QCheckBox('Tags') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_tags) self.visual_grid_tooltip_last_read = QCheckBox('Last read') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_last_read) self.visual_grid_tooltip_times_read = QCheckBox('Times read') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_times_read) self.visual_grid_tooltip_pub_date = QCheckBox('Publication Date') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_pub_date) self.visual_grid_tooltip_date_added = QCheckBox('Date added') grid_tooltips_hlayout.addWidget(self.visual_grid_tooltip_date_added) # grid view / gallery grid_gallery_group = QGroupBox('Gallery', grid_view_general_page) grid_view_layout.addWidget(grid_gallery_group, 0, Qt.AlignTop) grid_gallery_main_l = QFormLayout() grid_gallery_main_l.setFormAlignment(Qt.AlignLeft) grid_gallery_group.setLayout(grid_gallery_main_l) grid_gallery_display = FlowLayout() grid_gallery_main_l.addRow('Display on gallery:', grid_gallery_display) self.external_viewer_ico = QCheckBox('External Viewer') grid_gallery_display.addWidget(self.external_viewer_ico) self.gallery_type_ico = QCheckBox('File Type') grid_gallery_display.addWidget(self.gallery_type_ico) if sys.platform.startswith('darwin'): grid_gallery_group.setEnabled(False) gallery_text_mode = QWidget() grid_gallery_main_l.addRow('Text Mode:', gallery_text_mode) gallery_text_mode_l = QHBoxLayout() gallery_text_mode.setLayout(gallery_text_mode_l) self.gallery_text_elide = QRadioButton('Elide text', gallery_text_mode) self.gallery_text_fit = QRadioButton('Fit text', gallery_text_mode) gallery_text_mode_l.addWidget(self.gallery_text_elide, 0, Qt.AlignLeft) gallery_text_mode_l.addWidget(self.gallery_text_fit, 0, Qt.AlignLeft) gallery_text_mode_l.addWidget(Spacer('h'), 1, Qt.AlignLeft) gallery_font = QHBoxLayout() grid_gallery_main_l.addRow('Font:*', gallery_font) self.font_lbl = QLabel() self.font_size_lbl = QSpinBox() self.font_size_lbl.setMaximum(100) self.font_size_lbl.setMinimum(1) self.font_size_lbl.setToolTip('Font size in pixels') choose_font = QPushButton('Choose font') choose_font.clicked.connect(self.choose_font) gallery_font.addWidget(self.font_lbl, 0, Qt.AlignLeft) gallery_font.addWidget(self.font_size_lbl, 0, Qt.AlignLeft) gallery_font.addWidget(choose_font, 0, Qt.AlignLeft) gallery_font.addWidget(Spacer('h'), 1, Qt.AlignLeft) # grid view / colors grid_colors_group = QGroupBox('Colors', grid_view_general_page) grid_view_layout.addWidget(grid_colors_group, 1, Qt.AlignTop) grid_colors_l = QFormLayout() grid_colors_group.setLayout(grid_colors_l) def color_lineedit(): l = QLineEdit() l.setPlaceholderText('Hex colors. Eg.: #323232') l.setMaximumWidth(200) return l self.grid_label_color = color_lineedit() self.grid_title_color = color_lineedit() self.grid_artist_color = color_lineedit() grid_colors_l.addRow('Label color:', self.grid_label_color) grid_colors_l.addRow('Title color:', self.grid_title_color) grid_colors_l.addRow('Artist color:', self.grid_artist_color) style_page = QWidget(self) visual.addTab(style_page, 'Style') visual.setTabEnabled(0, False) visual.setTabEnabled(2, False) visual.setCurrentIndex(1) # Advanced advanced = QTabWidget(self) self.advanced_index = self.right_panel.addWidget(advanced) advanced_misc_scroll = QScrollArea(self) advanced_misc_scroll.setBackgroundRole(QPalette.Base) advanced_misc_scroll.setWidgetResizable(True) advanced_misc = QWidget() advanced_misc_scroll.setWidget(advanced_misc) advanced.addTab(advanced_misc_scroll, 'Misc') advanced_misc_main_layout = QVBoxLayout() advanced_misc.setLayout(advanced_misc_main_layout) misc_controls_layout = QFormLayout() advanced_misc_main_layout.addLayout(misc_controls_layout) # Advanced / Misc / Grid View misc_gridview = QGroupBox('Grid View') misc_controls_layout.addWidget(misc_gridview) misc_gridview_layout = QFormLayout() misc_gridview.setLayout(misc_gridview_layout) # Advanced / Misc / Grid View / scroll speed scroll_speed_spin_box = QSpinBox() scroll_speed_spin_box.setFixedWidth(60) scroll_speed_spin_box.setToolTip('Control the speed when scrolling in'+ ' grid view. DEFAULT: 7') scroll_speed_spin_box.setValue(self.scroll_speed) def scroll_speed(v): self.scroll_speed = v scroll_speed_spin_box.valueChanged[int].connect(scroll_speed) misc_gridview_layout.addRow('Scroll speed:', scroll_speed_spin_box) # Advanced / Misc / Grid View / cache size cache_size_spin_box = QSpinBox() cache_size_spin_box.setFixedWidth(120) cache_size_spin_box.setMaximum(999999999) cache_size_spin_box.setToolTip('This can greatly reduce lags/freezes in the grid view.' + ' Increase the value if you experience lag when scrolling'+ ' through galleries. DEFAULT: 200 MiB') def cache_size(c): self.cache_size = (self.cache_size[0], c) cache_size_spin_box.setValue(self.cache_size[1]) cache_size_spin_box.valueChanged[int].connect(cache_size) misc_gridview_layout.addRow('Cache Size (MiB):', cache_size_spin_box) # Advanced / Misc / Regex misc_search = QGroupBox('Search') misc_controls_layout.addWidget(misc_search) misc_search_layout = QFormLayout() misc_search.setLayout(misc_search_layout) search_allow_regex_l = QHBoxLayout() self.search_allow_regex = QCheckBox() self.search_allow_regex.setChecked(gui_constants.ALLOW_SEARCH_REGEX) self.search_allow_regex.adjustSize() self.search_allow_regex.setToolTip('A regex cheatsheet is located at About->Regex Cheatsheet') search_allow_regex_l.addWidget(self.search_allow_regex) search_allow_regex_l.addWidget(QLabel('A regex cheatsheet is located at About->Regex Cheatsheet')) search_allow_regex_l.addWidget(Spacer('h')) misc_search_layout.addRow('Regex:', search_allow_regex_l) # Advanced / Misc / Regex / autocomplete self.search_autocomplete = QCheckBox('*') self.search_autocomplete.setChecked(gui_constants.SEARCH_AUTOCOMPLETE) self.search_autocomplete.setToolTip('Turn autocomplete on/off') misc_search_layout.addRow('Autocomplete', self.search_autocomplete) # Advanced / Misc / Regex / search behaviour self.search_every_keystroke = QRadioButton('Search on every keystroke *', misc_search) misc_search_layout.addRow(self.search_every_keystroke) self.search_on_enter = QRadioButton('Search on return-key *', misc_search) misc_search_layout.addRow(self.search_on_enter) # Advanced / Misc / External Viewer misc_external_viewer = QGroupBox('External Viewer') misc_controls_layout.addWidget(misc_external_viewer) misc_external_viewer_l = QFormLayout() misc_external_viewer.setLayout(misc_external_viewer_l) misc_external_viewer_l.addRow(QLabel(gui_constants.SUPPORTED_EXTERNAL_VIEWER_LBL)) self.external_viewer_path = PathLineEdit(misc_external_viewer, False, '') self.external_viewer_path.setPlaceholderText('Right/Left-click to open folder explorer.'+ ' Leave empty to use default viewer') self.external_viewer_path.setToolTip('Right/Left-click to open folder explorer.'+ ' Leave empty to use default viewer') self.external_viewer_path.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred) misc_external_viewer_l.addRow('Path:', self.external_viewer_path) # Advanced / Gallery advanced_gallery, advanced_gallery_m_l = new_tab('Gallery', advanced) def rebuild_thumbs(): gallerydb.DatabaseEmitter.RUN = False def start_db_activity(): gallerydb.DatabaseEmitter.RUN = True app_popup = ApplicationPopup(self.parent_widget) app_popup.info_lbl.setText("Recreating thumbnail cache... When done, please restart to use new thumbnails.") app_popup.admin_db = gallerydb.AdminDB() app_popup.admin_db.moveToThread(gui_constants.GENERAL_THREAD) app_popup.admin_db.DONE.connect(app_popup.admin_db.deleteLater) app_popup.admin_db.DONE.connect(start_db_activity) app_popup.admin_db.DATA_COUNT.connect(app_popup.prog.setMaximum) app_popup.admin_db.PROGRESS.connect(app_popup.prog.setValue) self.init_gallery_rebuild.connect(app_popup.admin_db.rebuild_thumbs) app_popup.adjustSize() self.init_gallery_rebuild.emit() app_popup.show() rebuild_thumbs_info = QLabel("Clears thumbnail cache and rebuilds it, which can take a while. Tip: Useful when you change thumbnail size.") rebuild_thumbs_btn = QPushButton('Rebuild Thumbnail Cache') rebuild_thumbs_btn.adjustSize() rebuild_thumbs_btn.setFixedWidth(rebuild_thumbs_btn.width()) rebuild_thumbs_btn.clicked.connect(rebuild_thumbs) advanced_gallery_m_l.addRow(rebuild_thumbs_info) advanced_gallery_m_l.addRow(rebuild_thumbs_btn) g_data_fixer_group, g_data_fixer_l = groupbox('Gallery Renamer', QFormLayout, advanced_gallery) g_data_fixer_group.setEnabled(False) advanced_gallery_m_l.addRow(g_data_fixer_group) g_data_regex_fix_lbl = QLabel("Rename a gallery through regular expression."+ " A regex cheatsheet is located at About -> Regex Cheatsheet.") g_data_regex_fix_lbl.setWordWrap(True) g_data_fixer_l.addRow(g_data_regex_fix_lbl) self.g_data_regex_fix_edit = QLineEdit() self.g_data_regex_fix_edit.setPlaceholderText("Valid regex") g_data_fixer_l.addRow('Regex:', self.g_data_regex_fix_edit) self.g_data_replace_fix_edit = QLineEdit() self.g_data_replace_fix_edit.setPlaceholderText("Leave empty to delete matches") g_data_fixer_l.addRow('Replace with:', self.g_data_replace_fix_edit) g_data_fixer_options = FlowLayout() g_data_fixer_l.addRow(g_data_fixer_options) self.g_data_fixer_title = QCheckBox("Title", g_data_fixer_group) self.g_data_fixer_artist = QCheckBox("Artist", g_data_fixer_group) g_data_fixer_options.addWidget(self.g_data_fixer_title) g_data_fixer_options.addWidget(self.g_data_fixer_artist) # Advanced / Database advanced_db_page, advanced_db_page_l = new_tab('Database', advanced) advanced.setTabEnabled(2, False) # About about = QTabWidget(self) self.about_index = self.right_panel.addWidget(about) about_happypanda_page = QWidget() about_troubleshoot_page = QWidget() about.addTab(about_happypanda_page, 'About Happypanda') about_layout = QVBoxLayout() about_happypanda_page.setLayout(about_layout) info_lbl = QLabel('<b>Author:</b> <a href=\'https://github.com/Pewpews\'>'+ 'Pewpews</a><br/>'+ 'Chat: <a href=\'https://gitter.im/Pewpews/happypanda\'>'+ 'Gitter chat</a><br/>'+ 'Email: happypandabugs@gmail.com<br/>'+ '<b>Current version {}</b><br/>'.format(gui_constants.vs)+ 'Happypanda was created using:<br/>'+ '- Python 3.4<br/>'+ '- The Qt5 Framework') info_lbl.setOpenExternalLinks(True) about_layout.addWidget(info_lbl, 0, Qt.AlignTop) gpl_lbl = QLabel(gui_constants.GPL) gpl_lbl.setOpenExternalLinks(True) gpl_lbl.setWordWrap(True) about_layout.addWidget(gpl_lbl, 0, Qt.AlignTop) about_layout.addWidget(Spacer('v')) # About / DB Overview about_db_overview, about_db_overview_m_l = new_tab('DB Overview', about) about_stats_tab_widget = misc_db.DBOverview(self.parent_widget) about_db_overview_options = QHBoxLayout() self.tags_treeview_on_start = QCheckBox('Start with application', about_db_overview) make_window_btn = QPushButton('Open in window', about_db_overview) make_window_btn.adjustSize() make_window_btn.setFixedWidth(make_window_btn.width()) about_db_overview_options.addWidget(self.tags_treeview_on_start) about_db_overview_options.addWidget(make_window_btn) def mk_btn_false(): try: make_window_btn.setDisabled(False) except RuntimeError: pass def make_tags_treeview_window(): self.parent_widget.tags_treeview = misc_db.DBOverview(self.parent_widget, True) self.parent_widget.tags_treeview.about_to_close.connect(mk_btn_false) make_window_btn.setDisabled(True) self.parent_widget.tags_treeview.show() if self.parent_widget.tags_treeview: self.parent_widget.tags_treeview.about_to_close.connect(mk_btn_false) make_window_btn.setDisabled(True) make_window_btn.clicked.connect(make_tags_treeview_window) about_db_overview_m_l.addRow(about_db_overview_options) about_db_overview_m_l.addRow(about_stats_tab_widget) # About / Troubleshooting about.addTab(about_troubleshoot_page, 'Troubleshooting Guide') troubleshoot_layout = QVBoxLayout() about_troubleshoot_page.setLayout(troubleshoot_layout) guide_lbl = QLabel(gui_constants.TROUBLE_GUIDE) guide_lbl.setTextFormat(Qt.RichText) guide_lbl.setOpenExternalLinks(True) troubleshoot_layout.addWidget(guide_lbl, 0, Qt.AlignTop) troubleshoot_layout.addWidget(Spacer('v')) # About / Regex Cheatsheet about_s_regex = QGroupBox('Regex') about.addTab(about_s_regex, 'Regex Cheatsheet') about_s_regex_l = QFormLayout() about_s_regex.setLayout(about_s_regex_l) about_s_regex_l.addRow('\\\\\\\\', QLabel('Match literally \\')) about_s_regex_l.addRow('.', QLabel('Match any single character')) about_s_regex_l.addRow('^', QLabel('Start of string')) about_s_regex_l.addRow('$', QLabel('End of string')) about_s_regex_l.addRow('\\d', QLabel('Match any decimal digit')) about_s_regex_l.addRow('\\D', QLabel('Match any non-digit character')) about_s_regex_l.addRow('\\s', QLabel('Match any whitespace character')) about_s_regex_l.addRow('\\S', QLabel('Match any non-whitespace character')) about_s_regex_l.addRow('\\w', QLabel('Match any alphanumeric character')) about_s_regex_l.addRow('\\W', QLabel('Match any non-alphanumeric character')) about_s_regex_l.addRow('*', QLabel('Repeat previous character zero or more times')) about_s_regex_l.addRow('+', QLabel('Repeat previous character one or more times')) about_s_regex_l.addRow('?', QLabel('Repeat previous character one or zero times')) about_s_regex_l.addRow('{m, n}', QLabel('Repeat previous character atleast <i>m</i> times but no more than <i>n</i> times')) about_s_regex_l.addRow('(...)', QLabel('Match everything enclosed')) about_s_regex_l.addRow('(a|b)', QLabel('Match either a or b')) about_s_regex_l.addRow('[abc]', QLabel('Match a single character of: a, b or c')) about_s_regex_l.addRow('[^abc]', QLabel('Match a character except: a, b or c')) about_s_regex_l.addRow('[a-z]', QLabel('Match a character in the range')) about_s_regex_l.addRow('[^a-z]', QLabel('Match a character not in the range')) # About / Search tutorial about_search_scroll = QScrollArea() about_search_scroll.setBackgroundRole(QPalette.Base) about_search_scroll.setWidgetResizable(True) about_search_tut = QWidget() about.addTab(about_search_scroll, 'Search Guide') about_search_tut_l = QVBoxLayout() about_search_tut.setLayout(about_search_tut_l) # General about_search_general = QGroupBox('General') about_search_tut_l.addWidget(about_search_general) about_search_general_l = QFormLayout() about_search_general.setLayout(about_search_general_l) about_search_general_l.addRow(QLabel(gui_constants.SEARCH_TUTORIAL_GENERAL)) # Title & Author about_search_tit_aut = QGroupBox('Title and Author') about_search_tut_l.addWidget(about_search_tit_aut) about_search_tit_l = QFormLayout() about_search_tit_aut.setLayout(about_search_tit_l) about_search_tit_l.addRow(QLabel(gui_constants.SEARCH_TUTORIAL_TIT_AUT)) # Namespace & Tags about_search_tags = QGroupBox('Namespace and Tags') about_search_tut_l.addWidget(about_search_tags) about_search_tags_l = QFormLayout() about_search_tags.setLayout(about_search_tags_l) about_search_tags_l.addRow(QLabel(gui_constants.SEARCH_TUTORIAL_TAGS)) about_search_scroll.setWidget(about_search_tut) def add_folder_monitor(self, path=''): if not isinstance(path, str): path = '' l_edit = PathLineEdit() l_edit.setText(path) n = self.folders_layout.rowCount() + 1 self.folders_layout.addRow('{}'.format(n), l_edit) def add_ignore_path(self, path='', dir=True): if not isinstance(path, str): path = '' l_edit = PathLineEdit(dir=dir) l_edit.setText(path) n = self.ignore_path_l.rowCount() + 1 self.ignore_path_l.addRow('{}'.format(n), l_edit) def color_checker(self, txt): allow = False if len(txt) == 7: if txt[0] == '#': allow = True return allow def take_all_layout_widgets(self, l): n = l.rowCount() items = [] for x in range(n): item = l.takeAt(x+1) items.append(item.widget()) return items def choose_font(self): tup = QFontDialog.getFont(self) font = tup[0] if tup[1]: self.font_lbl.setText(font.family()) self.font_size_lbl.setValue(font.pointSize()) def reject(self): self.close()
peaceandpizza/happypanda
version/settingsdialog.py
Python
gpl-3.0
49,568
[ "VisIt" ]
848ad41fb51a25387ca1242c1ca8fe52d789de65d7e387253cd38a220b7508ee
# Copyright (C) 2017 Antoine Fourmy <antoine dot fourmy at gmail dot com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from .graph import Graph from autonomous_system.AS import AS_class from objects import objects import random import re import warnings from copy import copy from ip_networks.configuration import RouterConfiguration from objects.objects import * from miscellaneous.network_functions import * from math import cos, sin, asin, radians, sqrt, ceil, log from collections import defaultdict, deque, OrderedDict from heapq import heappop, heappush, nsmallest from operator import getitem, itemgetter from itertools import combinations from miscellaneous.union_find import UnionFind try: import numpy as np from cvxopt import matrix, glpk, solvers except ImportError: warnings.warn('Package missing: linear programming functions will fail') class Network(Graph): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.nodes = {} self.plinks = {} self.l2links = {} self.l3links = {} self.traffics = {} self.interfaces = set() # pn for 'pool network' self.pn = { 'node': self.nodes, 'plink': self.plinks, 'l2link': self.l2links, 'l3link': self.l3links, 'traffic': self.traffics, 'interface': self.interfaces } self.pnAS = {} # useful for tests and listbox when we want to retrieve an object # based on its name. The only object that needs changing when a object # is renamed by the user. self.name_to_id = {} # dicts used for IP networks # - finds all layer-n segments networks, i.e all layer-n-capable # interfaces that communicate via a layer-(n-1) device self.ma_segments = defaultdict(set) # string IP <-> IP mapping for I/E + parameters saving self.ip_to_oip = {} # osi layer to devices self.osi_layers = { 3: ('router', 'host', 'cloud'), 2: ('switch', 'optical switch'), 1: ('regenerator', 'splitter', 'antenna') } # function filtering AS either per layer or per subtype def ASftr(self, filtering_mode, *sts): if filtering_mode == 'layer': keep = lambda r: r.layer in sts else: keep = lambda r: r.AS_type in sts return filter(keep, self.pnAS.values()) # function that retrieves all IP addresses attached to a node, including # it's loopback IP. def attached_ips(self, src): for _, plink in self.graph[src.id]['plink']: yield plink('ip_address', src) yield src.ip_address # function that retrieves all next-hop IP addresses attached to a node, # including the loopback addresses of its neighbors def nh_ips(self, src): for nh, plink in self.graph[src.id]['plink']: yield plink('ip_address', nh) yield nh.ip_address def OIPf(self, str_ip, interface=None): # creates or retrieves an OIP based on a string IP ('IP/subnet' format) # the interface should always be specified at creation if str_ip in self.ip_to_oip: return self.ip_to_oip[str_ip] if interface: try: ip_addr, subnet = str_ip.split('/') OIP = IPAddress(ip_addr, int(subnet), interface) except ValueError: # wrong IP address format OIP = None self.ip_to_oip[str_ip] = OIP return OIP def AS_factory( self, AS_type = 'RIP', name = None, id = 0, plinks = set(), nodes = set(), imp = False ): if not name: name = 'AS' + str(self.cpt_AS) if name not in self.pnAS: # creation of the AS self.pnAS[name] = AS_class[AS_type]( self.view, name, id, plinks, nodes, imp ) # increase the AS counter by one self.cpt_AS += 1 return self.pnAS[name] ## Retrieve the credentials def get_credentials(self, node): credentials = self.view.controller.credentials_window.get_credentials() for property in ('username', 'password', 'enable_password', 'ip_address'): value = getattr(node, property) if value: credentials[property] = value return credentials ## Conversion methods and property -> type mapping # methods used to convert a string to an object # convert an AS name to an AS def convert_AS(self, AS_name): return self.AS_factory(name=AS_name) # convert a string IP ('IP/subnet') to an 'Object IP' def convert_IP(self, ip): return self.OIPf(ip) def find_edge_nodes(self, AS): AS.pAS['edge'].clear() for node in AS.nodes: if any( n not in AS.nodes for n, _ in self.graph[node.id]['plink'] ): AS.pAS['edge'].add(node) yield node # site management def add_to_site(self, site, *objects): for obj in objects: site.ps[obj.class_type].add(obj) def remove_from_site(self, site, *objects): for obj in objects: site.ps[obj.class_type].remove(obj) def update_AS_topology(self): for AS in self.ASftr('subtype', 'ISIS', 'OSPF', 'BGP'): # for all OSPF, IS-IS and BGP AS, fill the ABR/L1L2/nodes/links # sets based on nodes area (ISIS, BGP) and vice-versa (OSPF) AS.update_AS_topology() def segment_finder(self, layer): # we associate a set of physical links to each layer-n segment. # at this point, there isn't any IP allocated yet: we cannot assign # IP addresses until we know the network layer-n segment topology. # we use that topology to create layer-n virtual connection # we keep the set of all physical links we've already visited visited_plinks = set() # we loop through all the layer-n-networks boundaries for router in self.ftr('node', *self.osi_layers[layer]): # we start by looking at all attached physical links, and when we find one # that hasn't been visited yet, we don't stop until we've discovered # all network's physical links (i.e until we've reached all boundaries # of that networks: routers or host). for neighbor, plink in self.graph[router.id]['plink']: if plink in visited_plinks: continue visited_plinks.add(plink) # we update the set of physical linkss of the network # as we discover them current_network = {(plink, router)} if any(neighbor.subtype in self.osi_layers[l] for l in range(1, layer)): # we add the neighbor of the router in the stack: we'll fill # the stack with nodes as we discover them, provided that # these nodes are not boundaries, i.e not router or host stack_network = [neighbor] visited_nodes = {router} while stack_network: curr_node = stack_network.pop() for node, adj_plink in self.graph[curr_node.id]['plink']: if node in visited_nodes: continue visited_plinks.add(adj_plink) visited_nodes.add(node) if any(node.subtype in self.osi_layers[l] for l in range(1, layer)): stack_network.append(node) else: current_network.add((adj_plink, node)) else: current_network.add((plink, neighbor)) self.ma_segments[layer].add(frozenset(current_network)) def multi_access_network(self, layer): # we create the virtual connnections at layer 2 and 3, that is the # links between adjacent Ln devices (L2-L2, L3-L3). link_type = 'l{layer}link'.format(layer = layer) vc_type = 'l{layer}vc'.format(layer = layer) for ma_network in self.ma_segments[layer]: for source_plink, node in ma_network: allowed_neighbors = ma_network - {(source_plink, node)} for destination_plink, neighbor in allowed_neighbors: if not self.is_connected(node, neighbor, link_type, vc_type): vc = self.lf( source = node, destination = neighbor, subtype = vc_type ) vc('link', node, source_plink) vc('link', neighbor, destination_plink) def vc_creation(self): # clear all existing multi-access segments self.ma_segments.clear() for i in (2, 3): type, subtype = 'l{}link'.format(i), 'l{}vc'.format(i) self.segment_finder(i) self.multi_access_network(i) def clear_ip(self): # remove all existing IP addresses self.ip_to_oip.clear() # reset all traffic links source and destination IP as new IP will # be assigned for traffic in self.traffics.values(): traffic.source_IP = traffic.destination_IP = None def ip_allocation(self): self.clear_ip() # we will perform the IP addressing of all subnetworks with VLSM # we first sort all subnetworks in increasing order of size, then # compute which subnet is needed subnetworks = sorted(list(self.ma_segments[3]), key=len) subnetwork_ip = '10.0.0.0' while subnetworks: # we retrieve the biggest subnetwork not yet treated subnetwork = subnetworks.pop() # both network and broadcast addresses are excluded: # we add 2 to the size of the subnetwork size = ceil(log(len(subnetwork) + 2, 2)) subnet = 32 - size for idx, (plink, node) in enumerate(subnetwork, 1): curr_ip = ip_incrementer(subnetwork_ip, idx) ip_addr = IPAddress(curr_ip, subnet, plink('interface', node)) self.ip_to_oip[str(ip_addr)] = ip_addr plink('ip_address', node, ip_addr) plink.subnetwork = ip_addr.network subnetwork_ip = ip_incrementer(subnetwork_ip, 2**size) # allocate loopback address using the 192.168.0.0/16 private # address space for idx, router in enumerate(self.ftr('node', 'router'), 1): router.ip_address = '192.168.{}.{}'.format(idx // 255, idx % 255) def mac_allocation(self): # ranges of private MAC addresses # x2:xx:xx:xx:xx:xx # x6:xx:xx:xx:xx:xx # xA:xx:xx:xx:xx:xx # xE:xx:xx:xx:xx:xx # allocation of mac_x2 and mac_x6 for interfaces MAC address mac_x2, mac_x6 = '020000000000', '060000000000' for id, plink in enumerate(self.plinks.values(), 1): macS, macD = mac_incrementer(mac_x2, id), mac_incrementer(mac_x6, id) source_mac = ':'.join(macS[i:i+2] for i in range(0, 12, 2)) destination_mac = ':'.join(macD[i:i+2] for i in range(0, 12, 2)) plink.interfaceS.mac_address = source_mac plink.interfaceD.mac_address = destination_mac # allocation of mac_xA for switches base (hardware) MAC address mac_xA = '0A0000000000' for id, switch in enumerate(self.ftr('node', 'switch', 1)): switch.base_mac_address = mac_incrementer(mac_xA, id) def interface_allocation(self): for node in self.nodes.values(): for idx, (_, adj_plink) in enumerate(self.graph[node.id]['plink']): adj_plink('name', node, 'FastEthernet 0/{}'.format(idx)) def interface_configuration(self): self.mac_allocation() self.ip_allocation() self.interface_allocation() def mininet_configuration(self): first_letter = {'host': 'h', 'sdn_switch': 's', 'sdn_controller': 'c'} for subtype in ('host', 'sdn_switch', 'sdn_controller'): letter = first_letter[subtype] for idx, node in enumerate(self.ftr('node', subtype), 1): node.mininet_name = letter + str(idx) # WC physical link dimensioning: this computes the maximum traffic the physical link # may have to carry considering all possible physical link failure. # NetDim fails all physical links of the network one by one, and evaluates # the impact in terms of bandwidth for each physical link. # The highest value is kept in memory, as well as the physical link which failure # induces this value. def plink_dimensioning(self): # we need to remove all failures before dimensioning the physical links: # the set of failed physical link will be redefined, but we also need the # icons to be cleaned from the canvas self.view.remove_failures() # we consider each physical link in the network to be failed, one by one for failed_plink in self.plinks.values(): self.failed_obj = {failed_plink} # the physical link being failed, we will recreate all routing tables # then use the path finding procedure to map the traffic flows self.routing_table_creation() self.path_finder() for plink in self.plinks.values(): for dir in ('SD', 'DS'): curr_traffic = getattr(plink, 'traffic' + dir) if curr_traffic > getattr(plink, 'wctraffic' + dir): setattr(plink, 'wctraffic' + dir, curr_traffic) setattr(plink, 'wcfailure', str(failed_plink)) self.failed_obj.clear() # this function creates both the ARP and the RARP tables def arpt_creation(self): # clear the existing ARP tables for router in self.ftr('node', 'router'): router.arpt.clear() for l3_segments in self.ma_segments[3]: for (plinkA, routerA) in l3_segments: for (plinkB, routerB) in l3_segments: remote_ip = plinkB('ip_address', routerB) remote_mac = plinkB('mac_address', routerB) outgoing_if = plinkA('name', routerA) routerA.arpt[remote_ip] = (remote_mac, outgoing_if) def STP_update(self): for AS in self.ASftr('subtype', 'STP'): AS.root_election() AS.build_SPT() def st_creation(self): # clear the existing switching table for switch in self.ftr('node', 'switch'): switch.st.clear() for AS in self.ASftr('subtype', 'STP'): for switch in AS.nodes: self.ST_builder(switch, AS.pAS['link'] - AS.SPT_links) # if the switch isn't part of an STP AS, we build its switching table # without excluding any physical link for switch in self.ftr('node', 'switch'): if not switch.st: self.ST_builder(switch) def reset_traffic(self): # reset the traffic for all physical links for plink in self.plinks.values(): plink.trafficSD = plink.trafficDS = 0. def path_finder(self): self.reset_traffic() for traffic in self.traffics.values(): src, dest = traffic.source, traffic.destination if all(node.subtype == 'router' for node in (src, dest)): self.RFT_path_finder(traffic) else: _, traffic.path = self.A_star(src, dest) if not traffic.path: print('no path found for {}'.format(traffic)) ## A) Ethernet switching table def ST_builder(self, source, excluded_plinks=None): if not excluded_plinks: excluded_plinks = set() visited = set() heap = [(source, [], [], None)] while heap: node, path_node, path_plink, ex_int = heappop(heap) if node not in visited: visited.add(node) for neighbor, l2vc in self.gftr(node, 'l2link', 'l2vc'): adj_plink = l2vc('link', node) remote_plink = l2vc('link', neighbor) if adj_plink in path_plink: continue if adj_plink in excluded_plinks: continue if node == source: ex_int = adj_plink('interface', source) mac = remote_plink('mac_address', neighbor) source.st[mac] = ex_int heappush(heap, (neighbor, path_node + [neighbor], path_plink + [adj_plink], ex_int)) if path_plink: plink, ex_tk = path_plink[-1], path_plink[0] source.st[plink.interfaceS.mac_address] = ex_tk('interface', source) source.st[plink.interfaceD.mac_address] = ex_tk('interface', source) ## 1) RFT-based routing and dimensioning def RFT_path_finder(self, traffic): source, destination = traffic.source, traffic.destination src_ip, dst_ip = traffic.source_IP, traffic.destination_IP valid = bool(src_ip) & bool(dst_ip) if valid: dst_ntw = dst_ip.network # (current node, physical link from which the data flow comes, dataflow) heap = [(source, None, None)] path = set() path_str = [] while heap and valid: curr_node, curr_plink, dataflow = heap.pop() path.add(curr_node) # data flow creation if not dataflow: dataflow = DataFlow(src_ip, dst_ip) dataflow.throughput = traffic.throughput if curr_node == destination: continue if curr_node.subtype == 'router': if dst_ntw in curr_node.rt: routes = curr_node.rt[dst_ntw] # if we cannot find the destination address in the routing table, # and there is a default route, we use it. elif '0.0.0.0' in curr_node.rt: routes = curr_node.rt['0.0.0.0'] else: warnings.warn('Path not found for {}'.format(traffic)) break # we count the number of physical links in failure failed_plinks = sum(r[-1] in self.failed_obj for r in routes) # and remove them from share so that they are ignored for # physical link dimensioning for idx, route in enumerate(routes): _, nh_ip, ex_int, _, router, ex_tk = route # we create a new dataflow based on the old one new_dataflow = copy(dataflow) # the throughput depends on the number of ECMP routes new_dataflow.throughput /= len(routes) - failed_plinks # the source MAC address is the MAC address of the interface # used to exit the current node new_dataflow.src_mac = ex_int.mac_address # the destination MAC address is the MAC address # corresponding to the next-hop IP address in the ARP table # we take the first element as the ARP table is built as # a mapping IP <-> (MAC, outgoing interface) new_dataflow.dst_mac = curr_node.arpt[nh_ip][0] sd = (curr_node == ex_tk.source)*'SD' or 'DS' ex_tk.__dict__['traffic' + sd] += new_dataflow.throughput # add the exit physical link to the path path.add(ex_tk) # the next-hop is the node at the end of the exit physical link next_hop = ex_tk.source if sd == 'DS' else ex_tk.destination heap.append((next_hop, ex_tk, new_dataflow)) if not idx: path_str.append(''' Current_node: {curr_node} Next-hop: {next_hop} Next-hop IP address: {nh_ip} Destination MAC address: {dst_mac} Outgoing physical link: {ex_tk} Outgoing interface: {ex_int}'''.format( curr_node = curr_node, next_hop = next_hop, nh_ip = nh_ip, dst_mac = new_dataflow.dst_mac, ex_tk = ex_tk, ex_int = ex_int )) if curr_node.subtype == 'switch': # we find the exit interface based on the destination MAC # address in the switching table, the dataflow itself remains # unaltered ex_int = curr_node.st[dataflow.dst_mac] ex_tk = ex_int.link path.add(ex_tk) # we append the next hop to the heap if ex_tk.source == curr_node: next_hop = ex_tk.destination else: next_hop = ex_tk.source heap.append((next_hop, ex_tk, dataflow)) path_str.append(''' Current_node: {curr_node} Next-hop: {next_hop} Outgoing physical link: {ex_tk} Outgoing interface: {ex_int}'''.format( curr_node = curr_node, next_hop = next_hop, ex_tk = ex_tk, ex_int = ex_int )) print(path) traffic.path = path return path, path_str ## 2) Add connected interfaces to the RFT def static_RFT_builder(self, source): for _, sr in self.gftr(source, 'l3link', 'static route', False): source.rt[sr.dst_sntw] = {('S', sr.nh_ip, None, 0, nh_node, None)} for neighbor, adj_l3vc in self.gftr(source, 'l3link', 'l3vc'): # if adj_plink in self.failed_obj: # continue ex_ip = adj_l3vc('ip_address', neighbor) ex_int = adj_l3vc('interface', source) adj_plink = adj_l3vc('link', source) # we compute the subnetwork of the attached # interface: it is a directly connected interface source.rt[adj_plink.subnetwork] = {('C', ex_ip, ex_int, 0, neighbor, adj_plink)} def switching_table_creation(self): self.arpt_creation() self.STP_update() self.st_creation() def subnetwork_update(self): for ip in self.ip_to_oip.values(): ip.interface.link.subnetwork = ip.network def routing_table_creation(self): self.subnetwork_update() # clear the existing routing tables for node in self.ftr('node', 'router', 'host'): node.rt.clear() # we compute the routing table of all routers for AS in self.ASftr('subtype', 'RIP', 'ISIS', 'OSPF'): AS.build_RFT() for router in self.ftr('node', 'router', 'host'): self.static_RFT_builder(router) def route(self): self.routing_table_creation() self.path_finder() ## Shortest path(s) algorithms ## 1) Dijkstra algorithm def dijkstra( self, source, target, allowed_plinks = None, allowed_nodes = None ): if allowed_plinks is None: allowed_plinks = set(self.plinks.values()) if allowed_nodes is None: allowed_nodes = set(self.nodes.values()) prec_node = {i: None for i in allowed_nodes} prec_plink = {i: None for i in allowed_nodes} visited = set() dist = {i: float('inf') for i in allowed_nodes} dist[source] = 0 heap = [(0, source)] while heap: dist_node, node = heappop(heap) if node not in visited: visited.add(node) for neighbor, adj_plink in self.graph[node.id]['plink']: # we ignore what's not allowed (not in the AS or in failure) if neighbor not in allowed_nodes: continue if adj_plink not in allowed_plinks: continue dist_neighbor = dist_node + adj_plink('cost', node) if dist_neighbor < dist[neighbor]: dist[neighbor] = dist_neighbor prec_node[neighbor] = node prec_plink[neighbor] = adj_plink heappush(heap, (dist_neighbor, neighbor)) # traceback the path from target to source curr, path_plink = target, [prec_plink[target]] while curr != source: curr = prec_node[curr] path_plink.append(prec_plink[curr]) # we return: # - the dist dictionnary, that contains the distance from the source # to any other node in the tree # - the shortest path from source to target # - all edges that belong to the Shortest Path Tree # we need all three variables for Suurbale algorithm below return dist, path_plink[:-1][::-1], filter(None, prec_plink.values()) ## 2) A* algorithm for CSPF modelization def A_star( self, source, target, excluded_plinks = None, excluded_nodes = None, path_constraints = None, allowed_plinks = None, allowed_nodes = None ): # initialize parameters if excluded_nodes is None: excluded_nodes = set() if excluded_plinks is None: excluded_plinks = set() if path_constraints is None: path_constraints = [] if allowed_plinks is None: allowed_plinks = set(self.plinks.values()) if allowed_nodes is None: allowed_nodes = set(self.nodes.values()) pc = [target] + path_constraints[::-1] visited = set() heap = [(0, source, [source], [], pc)] while heap: dist, node, nodes, plinks, pc = heappop(heap) if node not in visited: visited.add(node) if node == pc[-1]: visited.clear() heap.clear() pc.pop() if not pc: return nodes, plinks for neighbor, adj_plink in self.graph[node.id]['plink']: # excluded and allowed nodes if neighbor not in allowed_nodes - excluded_nodes: continue # excluded and allowed physical links if adj_plink not in allowed_plinks - excluded_plinks: continue heappush(heap, ( dist + adj_plink('cost', node), neighbor, nodes + [neighbor], plinks + [adj_plink], pc ) ) return [], [] ## 3) Bellman-Ford algorithm def bellman_ford( self, source, target, cycle = False, excluded_plinks = None, excluded_nodes = None, allowed_plinks = None, allowed_nodes = None ): # initialize parameters if excluded_nodes is None: excluded_nodes = set() if excluded_plinks is None: excluded_plinks = set() if allowed_plinks is None: allowed_plinks = set(self.plinks.values()) if allowed_nodes is None: allowed_nodes = set(self.nodes.values()) n = len(allowed_nodes) prec_node = {i: None for i in allowed_nodes} prec_plink = {i: None for i in allowed_nodes} dist = {i: float('inf') for i in allowed_nodes} dist[source] = 0 for i in range(n+2): negative_cycle = False for node in allowed_nodes: for neighbor, adj_plink in self.graph[node.id]['plink']: sd = (node == adj_plink.source)*'SD' or 'DS' # excluded and allowed nodes if neighbor not in allowed_nodes - excluded_nodes: continue # excluded and allowed physical links if adj_plink not in allowed_plinks - excluded_plinks: continue dist_neighbor = dist[node] + getattr(adj_plink, 'cost' + sd) if dist_neighbor < dist[neighbor]: dist[neighbor] = dist_neighbor prec_node[neighbor] = node prec_plink[neighbor] = adj_plink negative_cycle = True # traceback the path from target to source if dist[target] != float('inf') and not cycle: curr, path_node, path_plink = target, [target], [prec_plink[target]] while curr != source: curr = prec_node[curr] path_plink.append(prec_plink[curr]) path_node.append(curr) return path_node[::-1], path_plink[:-1][::-1] # if we want a cycle, and one exists, we find it if cycle and negative_cycle: curr, path_node, path_plink = target, [target], [prec_plink[target]] # return the cycle itself (for the cycle cancelling algorithm) # starting from the target, we go through the predecessors # we find any cycle (we don't necessarily have to come back to # the target). while curr not in path_node: curr = prec_node[curr] path_plink.append(prec_plink[curr]) path_node.append(curr) return path_node[::-1], path_plink[:-1][::-1] # if we didn't find a path, and were not looking for a cycle, # we return empty lists return [], [] ## 4) Floyd-Warshall algorithm def floyd_warshall(self): nodes = list(self.nodes.values()) n = len(nodes) W = [[0]*n for _ in range(n)] for id1, n1 in enumerate(nodes): for id2, n2 in enumerate(nodes): if id1 != id2: for neighbor, plink in self.graph[n1.id]['plink']: if neighbor == n2: W[id1][id2] = plink.costSD break else: W[id1][id2] = float('inf') for k in range(n): for u in range(n): for v in range(n): W[u][v] = min(W[u][v], W[u][k] + W[k][v]) if any(W[v][v] < 0 for v in range(n)): return False else: all_length = defaultdict(dict) for id1, n1 in enumerate(nodes): for id2, n2 in enumerate(nodes): all_length[n1][n2] = W[id1][id2] return all_length ## 5) DFS (all loop-free paths) def all_paths(self, source, target=None): # generates all loop-free paths from source to optional target path = [source] seen = {source} def find_all_paths(): dead_end = True node = path[-1] if node == target: yield list(path) else: for neighbor, adj_plink in self.graph[node.id]['plink']: if neighbor not in seen: dead_end = False seen.add(neighbor) path.append(neighbor) yield from find_all_paths() path.pop() seen.remove(neighbor) if not target and dead_end: yield list(path) yield from find_all_paths() ## Link-disjoint / link-and-node-disjoint shortest pair algorithms ## 1) A* link-disjoint pair search def A_star_shortest_pair(self, source, target, a_n=None, a_t=None): # To find the shortest pair from the source to the target, we look # for the shortest path going from the source to the source, with # the target as a 'path constraint'. # Each path is stored with sets of allowed nodes and physical links that will # contains what belongs to the first path, once we've reached the target. # if a_n is None: # a_n = AS.nodes # if a_t is None: # a_t = AS.pAS['link'] if a_t is None: a_t = set(self.plinks.values()) if a_n is None: a_n = set(self.nodes.values()) visited = set() # in the heap, we store e_o, the list of excluded objects, which is # empty until we reach the target. heap = [(0, source, [], set())] while heap: dist, node, path_plink, e_o = heappop(heap) if (node, tuple(path_plink)) not in visited: visited.add((node, tuple(path_plink))) if node == target: e_o = set(path_plink) if node == source and e_o: return [], path_plink for neighbor, adj_plink in self.graph[node.id]['plink']: sd = (node == adj_plink.source)*'SD' or 'DS' # we ignore what's not allowed (not in the AS or in failure # or in the path we've used to reach the target) if neighbor not in a_n or adj_plink not in a_t-e_o: continue cost = getattr(adj_plink, 'cost' + sd) heappush(heap, (dist + cost, neighbor, path_plink + [adj_plink], e_o)) return [], [] ## 2) Bhandari algorithm for link-disjoint shortest pair def bhandari(self, source, target, a_n=None, a_t=None): # - we find the shortest path from source to target using A* algorithm # - we replace bidirectionnal physical links of the shortest path with # unidirectional physical links with a negative cost # - we run Bellman-Ford algorithm to find the new # shortest path from source to target # - we remove all overlapping physical links if a_t is None: a_t = set(self.plinks.values()) if a_n is None: a_n = set(self.nodes.values()) # we store the cost value in the flow parameters, since bhandari # algorithm relies on graph transformation, and the costs of the edges # will be modified. # at the end, we will revert the cost to their original value for plink in a_t: plink.flowSD = plink.costSD plink.flowDS = plink.costDS _, first_path = self.A_star( source, target, allowed_plinks = a_t, allowed_nodes = a_n ) # we set the cost of the shortest path physical linkss to float('inf'), # which is equivalent to just removing them. In the reverse direction, # we set the cost to -1. current_node = source for plink in first_path: dir = 'SD' * (current_node == plink.source) or 'DS' reverse_dir = 'SD' if dir == 'DS' else 'DS' setattr(plink, 'cost' + dir, float('inf')) setattr(plink, 'cost' + reverse_dir, -1) current_node = plink.destination if dir == 'SD' else plink.source _, second_path = self.bellman_ford( source, target, allowed_plinks = a_t, allowed_nodes = a_n ) for plink in a_t: plink.costSD = plink.flowSD plink.costDS = plink.flowDS return set(first_path) ^ set(second_path) def suurbale(self, source, target, a_n=None, a_t=None): # - we find the shortest path tree from the source using dijkstra algorithm # - we change the cost of all edges (a,b) such that # c(a, b) = c(a, b) - d(s, b) + d(s, a) (all tree edge will have a # resulting cost of 0 with that formula, since c(a, b) = d(s, a) - d(s, b) # - we run A* algorithm to find the new # shortest path from source to target # - we remove all overlapping physical links if a_t is None: a_t = set(self.plinks.values()) if a_n is None: a_n = set(self.nodes.values()) # we store the cost value in the flow parameters, since bhandari # algorithm relies on graph transformation, and the costs of the edges # will be modified. # at the end, we will revert the cost to their original value for plink in a_t: plink.flowSD = plink.costSD plink.flowDS = plink.costDS dist, first_path, tree = self.dijkstra( source, target, allowed_plinks = a_t, allowed_nodes = a_n ) # we change the physical links' cost with the formula described above for plink in tree: # new_c(a, b) = c(a, b) - D(b) + D(a) where D(x) is the # distance from the source to x. src, dest = plink.source, plink.destination plink.costSD += dist[src] - dist[dest] plink.costDS += dist[dest] - dist[src] # we exclude the edge of the shortest path (infinite cost) current_node = source for plink in first_path: dir = 'SD' * (current_node == plink.source) or 'DS' setattr(plink, 'cost' + dir, float('inf')) current_node = plink.destination if dir == 'SD' else plink.source _, second_path = self.A_star( source, target, allowed_plinks = a_t, allowed_nodes = a_n ) return set(first_path) ^ set(second_path) ## Flow algorithms def reset_flow(self): for plink in self.plinks.values(): plink.flowSD = plink.flowDS = 0 ## 1) Ford-Fulkerson algorithm def augment_ff(self, val, curr_node, target, visit): visit[curr_node] = True if curr_node == target: return val for neighbor, adj_plink in self.graph[curr_node.id]['plink']: direction = curr_node == adj_plink.source sd, ds = direction*'SD' or 'DS', direction*'DS' or 'SD' cap = getattr(adj_plink, 'capacity' + sd) current_flow = getattr(adj_plink, 'flow' + sd) if cap > current_flow and not visit[neighbor]: residual_capacity = min(val, cap - current_flow) global_flow = self.augment_ff( residual_capacity, neighbor, target, visit ) if global_flow > 0: adj_plink.__dict__['flow' + sd] += global_flow adj_plink.__dict__['flow' + ds] -= global_flow return global_flow return False def ford_fulkerson(self, s, d): self.reset_flow() while self.augment_ff(float('inf'), s, d, {n:0 for n in self.pn['node'].values()}): pass # flow leaving from the source return sum( getattr(adj, 'flow' + (s==adj.source)*'SD' or 'DS') for _, adj in self.graph[s.id]['plink'] ) ## 2) Edmonds-Karp algorithm def augment_ek(self, source, destination): res_cap = {n:0 for n in self.pn['node'].values()} augmenting_path = {n: None for n in self.pn['node'].values()} Q = deque() Q.append(source) augmenting_path[source] = source res_cap[source] = float('inf') while Q: curr_node = Q.popleft() for neighbor, adj_plink in self.graph[curr_node.id]['plink']: direction = curr_node == adj_plink.source sd, ds = direction*'SD' or 'DS', direction*'DS' or 'SD' cap = getattr(adj_plink, 'capacity' + sd) flow = getattr(adj_plink, 'flow' + sd) residual = cap - flow if residual and augmenting_path[neighbor] is None: augmenting_path[neighbor] = curr_node res_cap[neighbor] = min(res_cap[curr_node], residual) if neighbor == destination: break else: Q.append(neighbor) return augmenting_path, res_cap[destination] def edmonds_karp(self, source, destination): self.reset_flow() while True: augmenting_path, global_flow = self.augment_ek(source, destination) if not global_flow: break curr_node = destination while curr_node != source: # find the physical link between the two nodes prec_node = augmenting_path[curr_node] find_plink = lambda p: getitem(p, 0) == prec_node (_, plink) ,= filter(find_plink, self.graph[curr_node.id]['plink']) # define sd and ds depending on how the physical link is defined direction = curr_node == plink.source sd, ds = direction*'SD' or 'DS', direction*'DS' or 'SD' plink.__dict__['flow' + ds] += global_flow plink.__dict__['flow' + sd] -= global_flow curr_node = prec_node return sum( getattr(adj, 'flow' + ((source==adj.source)*'SD' or 'DS')) for _, adj in self.graph[source.id]['plink'] ) ## 2) Dinic algorithm def augment_di(self, level, flow, curr_node, dest, limit): if limit <= 0: return 0 if curr_node == dest: return limit val = 0 for neighbor, adj_plink in self.graph[curr_node.id]['plink']: direction = curr_node == adj_plink.source sd, ds = direction*'SD' or 'DS', direction*'DS' or 'SD' cap = getattr(adj_plink, 'capacity' + sd) flow = getattr(adj_plink, 'flow' + sd) residual = cap - flow if level[neighbor] == level[curr_node] + 1 and residual > 0: z = min(limit, residual) aug = self.augment_di(level, flow, neighbor, dest, z) adj_plink.__dict__['flow' + sd] += aug adj_plink.__dict__['flow' + ds] -= aug val += aug limit -= aug if not val: level[curr_node] = None return val def dinic(self, source, destination): self.reset_flow() Q = deque() total = 0 while True: Q.appendleft(source) level = {node: None for node in self.nodes.values()} level[source] = 0 while Q: curr_node = Q.pop() for neighbor, adj_plink in self.graph[curr_node.id]['plink']: direction = curr_node == adj_plink.source sd = direction*'SD' or 'DS' cap = getattr(adj_plink, 'capacity' + sd) flow = getattr(adj_plink, 'flow' + sd) if level[neighbor] is None and cap > flow: level[neighbor] = level[curr_node] + 1 Q.appendleft(neighbor) if level[destination] is None: return flow, total limit = sum( getattr(adj_plink, 'capacity' + ((source == adj_plink.source)*'SD' or 'DS')) for _, adj_plink in self.graph[source.id]['plink'] ) total += self.augment_di(level, flow, source, destination, limit) ## Minimum spanning tree algorithms ## 1) Kruskal algorithm def kruskal(self, allowed_nodes): uf = UnionFind(allowed_nodes) edges = [] for node in allowed_nodes: for neighbor, adj_plink in self.graph[node.id]['plink']: if neighbor in allowed_nodes: edges.append((adj_plink.costSD, adj_plink, node, neighbor)) for w, t, u, v in sorted(edges, key=itemgetter(0)): if uf.union(u, v): yield t ## Linear programming algorithms ## 1) Shortest path def LP_SP_formulation(self, s, t): # Solves the MILP: minimize c'*x # subject to G*x + s = h # A*x = b # s >= 0 # xi integer, forall i in I self.reset_flow() new_graph = {node: {} for node in self.nodes.values()} for node in self.nodes.values(): for neighbor, plink in self.graph[node.id]['plink']: sd = (node == plink.source)*'SD' or 'DS' new_graph[node][neighbor] = getattr(plink, 'cost' + sd) n = 2*len(self.plinks) c = [] for node in new_graph: for neighbor, cost in new_graph[node].items(): # the float conversion is ESSENTIAL ! # I first forgot it, then spent hours trying to understand # what was wrong. If 'c' is not made of float, no explicit # error is raised, but the result is sort of random ! c.append(float(cost)) # for the condition 0 < x_ij < 1 h = np.concatenate([np.ones(n), np.zeros(n)]) id = np.eye(n, n) G = np.concatenate((id, -1*id), axis=0).tolist() # flow conservation: Ax = b A, b = [], [] for node_r in new_graph: if node_r != t: b.append(float(node_r == s)) row = [] for node in new_graph: for neighbor in new_graph[node]: row.append( -1. if neighbor == node_r else 1. if node == node_r else 0. ) A.append(row) A, G, b, c, h = map(matrix, (A, G, b, c, h)) solsta, x = glpk.ilp(c, G.T, h, A.T, b) # update the resulting flow for each node cpt = 0 for node in new_graph: for neighbor in new_graph[node]: new_graph[node][neighbor] = x[cpt] cpt += 1 # update the network physical links with the new flow value for plink in self.plinks.values(): src, dest = plink.source, plink.destination plink.flowSD = new_graph[src][dest] plink.flowDS = new_graph[dest][src] # traceback the shortest path with the flow curr_node, path_plink = s, [] while curr_node != t: for neighbor, adj_plink in self.graph[curr_node.id]['plink']: # if the flow leaving the current node is 1, we move # forward and replace the current node with its neighbor if adj_plink('flow', curr_node) == 1: path_plink.append(adj_plink) curr_node = neighbor return path_plink ## 2) Single-source single-destination maximum flow def LP_MF_formulation(self, s, t): # Solves the MILP: minimize c'*x # subject to G*x + s = h # A*x = b # s >= 0 # xi integer, forall i in I new_graph = {node: {} for node in self.nodes.values()} for node in self.nodes.values(): for neighbor, plink in self.graph[node.id]['plink']: sd = (node == plink.source)*'SD' or 'DS' new_graph[node][neighbor] = getattr(plink, 'capacity' + sd) n = 2*len(self.plinks) v = len(new_graph) c, h = [], [] for node in new_graph: for neighbor, capacity in new_graph[node].items(): c.append(float(node == s)) h.append(float(capacity)) # flow conservation: Ax = b A = [] for node_r in new_graph: if node_r not in (s, t): row = [] for node in new_graph: for neighbor in new_graph[node]: row.append( 1. if neighbor == node_r else -1. if node == node_r else 0. ) A.append(row) b = np.zeros(v - 2) h = np.concatenate([h, np.zeros(n)]) x = np.eye(n, n) G = np.concatenate((x, -1*x), axis=0).tolist() A, G, b, c, h = map(matrix, (A, G, b, c, h)) solsta, x = glpk.ilp(-c, G.T, h, A.T, b) # update the resulting flow for each node cpt = 0 for node in new_graph: for neighbor in new_graph[node]: new_graph[node][neighbor] = x[cpt] cpt += 1 # update the network physical links with the new flow value for plink in self.plinks.values(): src, dest = plink.source, plink.destination plink.flowSD = new_graph[src][dest] plink.flowDS = new_graph[dest][src] return sum( getattr(adj, 'flow' + ((s==adj.source)*'SD' or 'DS')) for _, adj in self.graph[s.id]['plink'] ) ## 3) Single-source single-destination minimum-cost flow def LP_MCF_formulation(self, s, t, flow): # Solves the MILP: minimize c'*x # subject to G*x + s = h # A*x = b # s >= 0 # xi integer, forall i in I new_graph = {node: {} for node in self.nodes.values()} for node in self.nodes.values(): for neighbor, plink in self.graph[node.id]['plink']: new_graph[node][neighbor] = (plink('capacity', node), plink('cost', node)) n = 2*len(self.plinks) v = len(new_graph) c, h = [], [] for node in new_graph: for neighbor, (capacity, cost) in new_graph[node].items(): c.append(float(cost)) h.append(float(capacity)) # flow conservation: Ax = b A, b = [], [] for node_r in new_graph: if node_r != t: b.append(flow * float(node_r == s)) row = [] for node in new_graph: for neighbor in new_graph[node]: row.append( -1. if neighbor == node_r else 1. if node == node_r else 0. ) A.append(row) h = np.concatenate([h, np.zeros(n)]) x = np.eye(n, n) G = np.concatenate((x, -1*x), axis=0).tolist() A, G, b, c, h = map(matrix, (A, G, b, c, h)) solsta, x = glpk.ilp(c, G.T, h, A.T, b) # update the resulting flow for each node cpt = 0 for node in new_graph: for neighbor in new_graph[node]: new_graph[node][neighbor] = x[cpt] cpt += 1 # update the network physical links with the new flow value for plink in self.plinks.values(): src, dest = plink.source, plink.destination plink.flowSD = new_graph[src][dest] plink.flowDS = new_graph[dest][src] return sum( getattr(adj, 'flow' + ((s==adj.source)*'SD' or 'DS')) for _, adj in self.graph[s.id]['plink'] ) ## 4) K Link-disjoint shortest pair def LP_LDSP_formulation(self, s, t, K): # Solves the MILP: minimize c'*x # subject to G*x + s = h # A*x = b # s >= 0 # xi integer, forall i in I self.reset_flow() all_graph = [] for i in range(K): graph_K = {node: {} for node in self.nodes.values()} for node in graph_K: for neighbor, plink in self.graph[node.id]['plink']: sd = (node == plink.source)*'SD' or 'DS' graph_K[node][neighbor] = getattr(plink, 'cost' + sd) all_graph.append(graph_K) n = 2*len(self.plinks) c = [] for graph_K in all_graph: for node in graph_K: for neighbor, cost in graph_K[node].items(): c.append(float(cost)) # for the condition 0 < x_ij < 1 h = np.concatenate([np.ones(K * n), np.zeros(K * n), np.ones(K * (K - 1) * n)]) G2 = [] for i in range(K): for j in range(K): if i != j: for nodeA in all_graph[j]: for neighborA in all_graph[j][nodeA]: row = [] for k in range(K): for nodeB in all_graph[k]: for neighborB in all_graph[k][nodeB]: row.append(float(k in (i, j) and nodeA == nodeB and neighborA == neighborB )) G2.append(row) id = np.eye(K * n, K * n) G = np.concatenate((id, -1*id, G2), axis=0).tolist() # flow conservation: Ax = b A, b = [], [] for i in range(K): for node_r in self.nodes.values(): if node_r != t: row = [] b.append(float(node_r == s)) for j in range(K): for node in all_graph[j]: for neighbor in all_graph[j][node]: row.append( -1. if neighbor == node_r and i == j else 1. if node == node_r and i == j else 0. ) A.append(row) A, G, b, c, h = map(matrix, (A, G, b, c, h)) binvar = set(range(n)) solsta, x = glpk.ilp(c, G.T, h, A.T, b, B=binvar) print(x) # update the resulting flow for each node cpt = 0 for graph_K in all_graph: for node in graph_K: for neighbor in graph_K[node]: graph_K[node][neighbor] = x[cpt] cpt += 1 # update the network physical links with the new flow value for plink in self.plinks.values(): src, dest = plink.source, plink.destination plink.flowSD = max(graph_K[src][dest] for graph_K in all_graph) plink.flowDS = max(graph_K[dest][src] for graph_K in all_graph) return sum(x) ## IP network cost optimization: Weight Setting Problem # compute the network congestion ratio of an autonomous system # it is defined as max( link bw / link capacity for all links): # it is the maximum utilization ratio among all AS links. # we also use this function to retrieve the argmax, that is, # the physical link with the highlight bandwidth / capacity ratio. def ncr_computation(self, AS_links): # ct_id is the index of the congested plink bandwidth in AS_links # cd indicates which is the congested direction: SD or DS ncr, ct_id, cd = 0, None, None for idx, plink in enumerate(AS_links): for direction in ('SD', 'DS'): tf, cap = 'traffic' + direction, 'capacity' + direction curr_ncr = getattr(plink, tf) / getattr(plink, cap) if curr_ncr > ncr: ncr = curr_ncr ct_id = idx cd = direction return ncr, ct_id, cd # 2) Tabu search heuristic def WSP_TS(self, AS): AS_links = list(AS.pAS['link']) # a cost assignment solution is a vector of 2*n value where n is # the number of physical links in the AS, because each physical link # has two costs: # one per direction (SD and DS). n = 2*len(AS_links) iteration_nb = 50 # the tabu list is an empty: it will contain all the solutions, so that # we don't evaluate a solution more than once (we don't go 'backward') tabu_list = [] # the current optimal solution found best_solution = None # for each solution, we compute the 'network congestion ratio': # best_ncr is the best network congestion ratio that has been found # so far, i.e the network congestion ratio of the best solution. best_ncr = float('inf') # we store the cost value in the flow parameters, since we'll change # the links' costs to evaluate each solution # at the end, we will revert the cost to their original value for plink in AS.pAS['link']: plink.flowSD = plink.costSD plink.flowDS = plink.costDS generation_size = 10 best_candidates = [] for i in range(generation_size): print(i) curr_solution = [random.randint(1, n) for _ in range(n)] # we assign the costs to the physical links for id, cost in enumerate(curr_solution): setattr(AS_links[id//2], 'cost' + ('DS'*(id%2) or 'SD'), cost) # create the routing tables with the newly allocated costs, # route all traffic flows and find the network congestion ratio self.routing_table_creation() self.path_finder() curr_ncr, *_ = self.ncr_computation(AS_links) best_candidates.append((curr_ncr, curr_solution)) best_candidates = nsmallest(5, best_candidates) for i, (_, curr_solution) in enumerate(best_candidates): print(i) if curr_solution in tabu_list: continue # we create an cost assignment and add it to the tabu list tabu_list.append(curr_solution) # we assign the costs to the physical links for id, cost in enumerate(curr_solution): setattr(AS_links[id//2], 'cost' + ('DS'*(id%2) or 'SD'), cost) self.route() # if we have to look for the most congested physical link more than # C_max times, and still can't have a network congestion # ratio lower than best_ncr, we stop C_max, C = 10, 0 local_best_ncr = float('inf') while True: self.route() curr_ncr, ct_id, cd = self.ncr_computation(AS_links) # update the best solution found if the network congestion ratio # is the lowest one found so far if curr_ncr < local_best_ncr: print(curr_ncr) C = 0 local_best_ncr = curr_ncr if curr_ncr < best_ncr: best_ncr = curr_ncr best_solution = curr_solution[:] else: C += 1 if C == C_max: print(best_ncr) break # we store the bandwidth of the physical link with the highest # congestion (in the congested direction) initial_bw = getattr(AS_links[ct_id], 'traffic' + cd) # we'll increase the cost of the congested physical link, until # at least one traffic is rerouted (in such a way that it will # no longer use the congested physical link) for k in range(5): #print(k) AS_links[ct_id].__dict__['cost' + cd] += n // 5 # we update the solution being evaluated and append # it to the tabu list curr_solution[ct_id*2 + (cd == 'DS')] += n // 5 tabu_list.append(curr_solution) self.route() new_bw = getattr(AS_links[ct_id], 'traffic' + cd) if new_bw != initial_bw: break else: C = C_max - 1 for id, cost in enumerate(best_solution): setattr(AS_links[id//2], 'cost' + ('DS'*(id%2) or 'SD'), cost) self.route() ncr, ct_id, cd = self.ncr_computation(AS_links) print(ncr) ## Optical networks: routing and wavelength assignment def RWA_graph_transformation(self, name=None): # we compute the path of all traffic physical links self.path_finder() graph_project = self.view.controller.add_project(name) # in the new graph, each node corresponds to a traffic path # we create one node per traffic physical link in the new view visited = set() # tl stands for traffic physical link for tlA in self.traffics.values(): for tlB in self.traffics.values(): if tlB not in visited and tlA != tlB: if set(tlA.path) & set(tlB.path): nA, nB = tlA.name, tlB.name name = '{} - {}'.format(nA, nB) graph_project.network.lf( source = graph_project.network.nf( name = nA, subtype = 'optical switch' ), destination = graph_project.network.nf( name = nB, subtype = 'optical switch' ), name = name ) visited.add(tlA) graph_project.current_view.refresh_display() return graph_project def largest_degree_first(self): # we color the transformed graph by allocating colors to largest # degree nodes: # 1) we select the largest degree uncolored optical switch # 2) we look at the adjacent vertices and select the minimum indexed # color not yet used by adjacent vertices # 3) when everything is colored, we stop # we will use a dictionary that binds optical switch to the color it uses. optical_switch_color = dict.fromkeys(self.ftr('node', 'optical switch'), None) # and a list that contains all vertices that we have yet to color uncolored_nodes = list(optical_switch_color) # we will use a function that returns the degree of a node to sort # the list in ascending order uncolored_nodes.sort(key = lambda node: len(self.graph[node.id]['plink'])) # and pop nodes one by one while uncolored_nodes: largest_degree = uncolored_nodes.pop() # we compute the set of colors used by adjacent vertices colors = set(optical_switch_color[neighbor] for neighbor, _ in self.graph[largest_degree.id]['plink']) # we find the minimum indexed color which is available min_index = [i in colors for i in range(len(colors) + 1)].index(0) # and assign it to the current optical switch optical_switch_color[largest_degree] = min_index number_lambda = max(optical_switch_color.values()) + 1 return number_lambda def LP_RWA_formulation(self, K=10): # Solves the MILP: minimize c'*x # subject to G*x + s = h # A*x = b # s >= 0 # xi integer, forall i in I # we note x_v_wl the variable that defines whether wl is used for # the path v (x_v_wl = 1) or not (x_v_wl = 0) # we construct the vector of variable the following way: # x = [x_1_0, x_2_0, ..., x_V_0, x_1_1, ... x_V-1_K-1, x_V_K-1] # that is, [(x_v_0) for v in V, ..., (x_v_K) for wl in K] # V is the total number of path (i.e the total number of physical links # in the transformed graph) V, T = len(self.nodes), len(self.plinks) # for the objective function, which must minimize the sum of y_wl, # that is, the number of wavelength used c = np.concatenate([np.zeros(V * K), np.ones(K)]) # for a given path v, we must have sum(x_v_wl for wl in K) = 1 # which ensures that each optical path uses only one wavelength # for each path v, we must create a vector with all x_v_wl set to 1 # for the path v, and the rest of it set to 0. A = [] for path in range(V): row = [float(K * path <= i < K * (path + 1)) for i in range(V * K)] row += [0.] * K A.append(row) b = np.ones(V) G2 = [] for i in range(K): for plink in self.plinks.values(): p_src, p_dest = plink.source, plink.destination # we want to ensure that paths that have at least one physical link in # common are not assigned the same wavelength. # this means that x_v_src_i + x_v_dest_i <= y_i row = [] # vector of x_v_wl: we set x_v_src_i and x_v_dest_i to 1 for path in self.nodes.values(): for j in range(K): row.append(float( (path == p_src or path == p_dest) and i == j ) ) # we continue filling the vector with the y_wl # we want to have x_v_src_i + x_v_dest_i - y_i <= 0 # hence the 'minus' sign instead of float for j in range(K): row.append(-float(i == j)) G2.append(row) # G2 size should be K * T (rows) x K * (V + 1) (columns) # finally, we want to ensure that wavelength are used in # ascending order, meaning that y_wl >= y_(wl + 1) for wl # in [0, K-1]. We can rewrite it y_(wl + 1) - y_wl <= 0 G3 = [] for i in range(1, K): row_wl = [float( (i == wl) or -(i == wl + 1) ) for wl in range(K) ] final_row = np.concatenate([np.zeros(V * K), row_wl]) G3.append(final_row) # G3 size should be K - 1 (rows) x K * (V + 1) (columns) h = np.concatenate([ # x_v_src_i + x_v_dest_i - y_i <= 0 np.zeros(K * T), # y_(wl + 1) - y_wl <= 0 np.zeros(K - 1) ]) G = np.concatenate((G2, G3), axis=0).tolist() A, G, b, c, h = map(matrix, (A, G, b, c, h)) binvar = set(range(K * (V + 1))) solsta, x = glpk.ilp(c, G.T, h, A.T, b, B=binvar) warnings.warn(str(int(sum(x[-K:])))) return int(sum(x[-K:])) ## Graph generation functions ## 1) Tree generation def tree(self, n, subtype): for i in range(2**n-1): n1, n2, n3 = str(i), str(2*i+1), str(2*i+2) source = self.nf(name = n1, subtype = subtype) destination = self.nf(name = n2, subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) source = self.nf(name = n1, subtype = subtype) destination = self.nf(name = n3, subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) ## 2) Star generation def star(self, n, subtype): nb_node = self.cpt_node + 1 for i in range(n): n1, n2 = str(nb_node), str(nb_node+1+i) source = self.nf(name = n1, subtype = subtype) destination = self.nf(name = n2, subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) ## 3) Full-meshed network generation def full_mesh(self, n, subtype): nb_node = self.cpt_node + 1 for i in range(n): for j in range(i): n1, n2 = str(nb_node+j), str(nb_node+i) source = self.nf(name = n1, subtype = subtype) destination = self.nf(name = n2, subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) ## 4) Ring generation def ring(self, n, subtype): nb_node = self.cpt_node + 1 for i in range(n): n1, n2 = str(nb_node+i), str(nb_node+(1+i)%n) source = self.nf(name = n1, subtype = subtype) destination = self.nf(name = n2, subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) ## 5) Square tiling generation def square_tiling(self, n, subtype): for i in range(n**2): n1, n2, n3 = str(i), str(i-1), str(i+n) if i-1 > -1 and i%n: source = self.nf(name = n1, subtype = subtype) destination = self.nf(name = n2, subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) if i+n < n**2: source = self.nf(name = n1, subtype = subtype) destination = self.nf(name = n3, subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) ## 6) Hypercube generation def hypercube(self, n, subtype): # we create a n-dim hypercube by connecting two (n-1)-dim hypercubes i = 0 graph_nodes = [self.nf(name=str(0), subtype=subtype)] graph_plinks = [] while i < n+1: for k in range(len(graph_nodes)): # creation of the nodes of the second hypercube graph_nodes.append( self.nf( name = str(k+2**i), subtype = subtype ) ) for plink in graph_plinks[:]: # connection of the two hypercubes source, destination = plink.source, plink.destination n1 = str(int(source.name) + 2**i) n2 = str(int(destination.name) + 2**i) graph_plinks.append( self.lf( source = self.nf(name = n1), destination = self.nf(name = n2) ) ) for k in range(len(graph_nodes)//2): # creation of the physical links of the second hypercube graph_plinks.append( self.lf( source = graph_nodes[k], destination = graph_nodes[k+2**i] ) ) i += 1 yield from graph_nodes yield from graph_plinks ## 7) Generalized Kneser graph def kneser(self, n, k, subtype): # we keep track of what set we've seen to avoid having # duplicated edges in the graph, with the 'already_done' set already_done = set() for setA in map(set, combinations(range(1, n), k)): already_done.add(frozenset(setA)) for setB in map(set, combinations(range(1, n), k)): if setB not in already_done and not setA & setB: source = self.nf(name = str(setA), subtype = subtype) destination = self.nf(name = str(setB), subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) ## 8) Generalized Petersen graph def petersen(self, n, k, subtype): # the petersen graph is made of the vertices (u_i) and (v_i) for # i in [0, n-1] and the edges (u_i, u_i+1), (u_i, v_i) and (v_i, v_i+k). # to build it, we consider that v_i = u_(i+n). for i in range(n): # (u_i, u_i+1) edges source = self.nf(name = str(i), subtype = subtype) destination = self.nf(name = str((i + 1)%n), subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) # (u_i, v_i) edges source = self.nf(name = str(i), subtype = subtype) destination = self.nf(name = str(i+n), subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) # (v_i, v_i+k) edges source = self.nf(name = str(i+n), subtype = subtype) destination = self.nf(name = str((i+n+k)%n + n), subtype = subtype) yield source yield destination yield self.lf(source=source, destination=destination) ## Multiple object creation def multiple_nodes(self, n, subtype): nb_nodes = self.cpt_node + 1 for k in range(n): yield self.nf(name = str(k + nb_nodes), subtype = subtype) def multiple_links(self, source_nodes, destination_nodes): # create a link between the destination node and all source nodes for src_node in source_nodes: for dest_node in destination_nodes: if src_node != dest_node: yield self.lf(source=src_node, destination=dest_node) ## Configuration def build_router_configuration(self, node): # initialization # yield 'enable' yield 'configure terminal' # configuration of the loopback interface # yield 'interface Loopback0' # yield 'ip address {ip} 255.255.255.255'.format(ip=node.ip_address) # yield 'exit' for _, sr in self.gftr(node, 'route', 'static route', False): subnetwork, mask = sr.dst_sntw.split('/') mask = tomask(int(mask)) yield ' '.join(('ip route ', subnetwork, mask, sr.nh_ip)) for neighbor, adj_plink in self.graph[node.id]['plink']: interface = adj_plink('interface', node) ip = interface.ip_address mask = interface.subnet_mask yield 'interface ' + str(interface) yield 'ip address {ip} {mask}'.format(ip=ip.ip_addr, mask=ip.mask) yield 'no shutdown' yield 'exit' if any(AS.AS_type == 'OSPF' for AS in adj_plink.AS): cost = adj_plink('cost', node) if cost != 1: yield 'ip ospf cost ' + cost # IS-IS is configured both in 'config-router' mode and on the # interface itself: the code is set here so that the user doesn't # have the exit the interace, then come back to it for IS-IS. for AS in node.AS: # we configure isis only if the neighbor # belongs to the same AS. if AS in neighbor.AS and AS.AS_type == 'ISIS': node_area ,= node.AS[AS] in_backbone = node_area.name == 'Backbone' # activate IS-IS on the interface yield 'ip router isis' # we need to check what area the neighbor belongs to. # If it belongs to the node's area, the interface is # configured as L1 with circuit-type, else with L2. neighbor_area ,= neighbor.AS[AS] # we configure circuit-type as level 2 if the routers # belong to different areas, or they both belong to # the backbone l2 = node_area != neighbor_area or in_backbone cct_type = 'level-2' if l2 else 'level-1' yield 'isis circuit-type ' + cct_type for AS in node.AS: if AS.AS_type == 'RIP': yield 'router rip' for _, adj_plink in self.graph[node.id]['plink']: interface = adj_plink('interface', node) if adj_plink in AS.pAS['link']: ip = interface.ip_address yield 'network ' + ip.ip_addr else: yield 'passive-interface ' + interface.name elif AS.AS_type == 'OSPF': yield 'router ospf 1' for _, adj_plink in self.graph[node.id]['plink']: interface = adj_plink('interface', node) if adj_plink in AS.pAS['link']: ip = interface.ip_address plink_area ,= adj_plink.AS[AS] yield ' '.join(( 'network', ip.ip_addr, '0.0.0.3', 'area', str(plink_area.id) )) else: if_name = interface.name yield 'passive-interface ' + if_name if AS.exit_point == node: yield 'default-information originate' elif AS.AS_type == 'ISIS': # we need to know: # - whether the node is in the backbone area (L1/L2 or L2) # or a L1 area # - whether the node is at the edge of its area (L1/L2) node_area ,= node.AS[AS] in_backbone = node_area.name == 'Backbone' level = 'level-1-2' if node in AS.border_routers else ( 'level-2' if in_backbone else 'level-1') # An IS-IS NET (Network Entity Title) is made up of: # - AFI must be 1 byte # - Area ID can be 0 to 12 bytes long # - System ID must be 6 bytes long # - SEL must be 1 byte # The AFI, or the Authority & Format Identifier. # In an IP-only environment, this number has no meaning # separate from the Area ID it Most vendors and operators # tend to stay compliant with the defunct protocols by # specifying an AFI of “49”. # We will stick to this convention. # Area ID’s function just as they do in OSPF. # System ID can be anything chosen by the administrator, # similarly to an OSPF Router ID. However, best practice # with NETs is to keep the configuration as simple as # humanly possible. # We will derive it from the router's loopback address AFI = '49.' + str(format(node_area.id, '04d')) sid = '.'.join((format(int(n), '03d') for n in node.ip_address.split('.'))) net = '.'.join((AFI, sid, '00')) yield 'router isis' yield 'net ' + net yield 'is-type ' + level yield 'passive-interface Loopback0' yield 'exit' def build_switch_configuration(self, node): # initialization yield 'enable' yield 'configure terminal' # create all VLAN on the switch for AS in node.AS: if AS.AS_type == 'VLAN': for VLAN in node.AS[AS]: yield 'vlan ' + VLAN.id yield 'name ' + VLAN.name yield 'exit' for _, adj_plink in self.graph[node.id]['plink']: interface = adj_plink('interface', node) yield 'interface ' + interface for AS in adj_plink.AS: # VLAN configuration if AS.AS_type == 'VLAN': # if there is a single VLAN, the link is an access link if len(adj_plink.AS[AS]) == 1: # retrieve the unique VLAN the link belongs to unique_VLAN ,= adj_plink.AS[AS] yield 'switchport mode access' yield 'switchport access vlan ' + unique_VLAN.id else: # there is more than one VLAN, the link is a trunk yield 'switchport mode trunk' # finds all VLAN IDs VLAN_IDs = map(lambda vlan: str(vlan.id), adj_plink.AS[AS]) # allow them on the trunk yield 'switchport trunk allowed vlan add ' + ','.join(VLAN_IDs) yield 'end'
afourmy/pyNMS
pyNMS/networks/network.py
Python
gpl-3.0
88,769
[ "VisIt" ]
10632d3c75e91481eb204f061387160ecd648ae87c5f21ca2110f7da0ff1c835
from netCDF4 import Dataset import json # example showing how python objects (lists, dicts, None, True) # can be serialized as strings, saved as netCDF attributes, # and then converted back to python objects using json. ds = Dataset('json.nc', 'w') ds.pythonatt1 = json.dumps(['foo', {'bar': ['baz', None, 1.0, 2]}]) ds.pythonatt2 = "true" # converted to bool ds.pythonatt3 = "null" # converted to None print(ds) ds.close() ds = Dataset('json.nc') def convert_json(s): try: a = json.loads(s) return a except: return s x = convert_json(ds.pythonatt1) print(type(x)) print(x) print(convert_json(ds.pythonatt2)) print(convert_json(ds.pythonatt3)) ds.close()
Unidata/netcdf4-python
examples/json_att.py
Python
mit
688
[ "NetCDF" ]
106932fe09083a1a87126cd124cc69149e68f028415b1021b23f70a06d5d8a32
# Copyright (C) 2012 Aalto University # Author: Lauri Leukkunen <lauri.leukkunen@aalto.fi> from ase import Atoms from ase.constraints import FixBondLengths from ase.data.molecules import molecule from ase.md.langevin import Langevin from ase.md.verlet import VelocityVerlet from ase.md.velocitydistribution import MaxwellBoltzmannDistribution from ase.io.trajectory import PickleTrajectory from ase import units from gpaw.mpi import rank import os import sys from gpaw import GPAW from multiasecalc.mixer.mixer import Mixer from multiasecalc.mixer.mixer import EnergyCalculation, ForceCalculation from multiasecalc.lammps.reaxff import ReaxFF from multiasecalc.utils import get_datafile, DynTesting from multiasecalc.mixer.selector import CalcBox import numpy as np import cPickle as pickle import getopt def usage(): print(""" python mixer_box.py [options] options: -h, --help help -c, --classical plain classical simulation -q, --quantum plain quantum simulation -d, --debug=N debug level -i, --input=input.traj input trajectory file -o, --output=output.traj output trajectory file -B, --box=NxNxN full system cell -Q, --quantum-box=NxNxN quantum cell -T, --transition-buffer=N transition buffer in angstroms -C, --cutoff=N molecule bond detection cutoff in angstroms -S, --steps=N number of simulation steps to run -t, --time-step=N time step in femtoseconds -M, --molecular-dynamics=name choose from "Langevin", "Verlet", "TESTING" -P, --position=N,N,N quantum box center position, the full system box is always centered at (0.0, 0.0, 0.0) -L, --log-interval=N write system state to output and energy log every Nth step -H, --langevin-temp=N temperature target in Kelvins for Langevin molecular dynamics -b, --bands=N number of electron bands for GPAW calculation N is float (1.234) for all except --debug, --steps, --log-interval and --bands where it is an integer. """) try: opts, args = getopt.getopt(sys.argv[1:], "H:L:B:Q:T:C:S:t:hcqo:d:i:M:b:",[ "langevin-temp=", "log-interval=", "box=", "quantum-box=", "transition-buffer=", "cutoff=", "steps=", "time-step=", "help", "classical", "quantum", "output=", "debug=", "input=", "molecular-dynamics=", "bands"]) except getopt.GetoptError, err: print(str(err)) usage() sys.exit(2) # PARAMETERS s = 10.0 cell = (100.0, 100.0, 100.0) q_cell = (10.0, 10.0, 10.0) qbox_pos = (0.0, 0.0, 0.0) transition_buffer = 2.0 cutoff = 2.0 calc_style = "combined" # "combined", "classical", "quantum" md_style = "Verlet" # "Verlet", "Langevin" timestep = 0.1*units.fs output_file = "mixer_box.traj" input_file = None set_debug = 0 steps = 10 log_interval = 25 langevin_temp = 300 # Kelvins bands = -2 # END OF PARAMETERS # process command line options for o, a in opts: if o in ["-d", "--debug"]: set_debug = int(a) if o in ["-h", "--help"]: usage() sys.exit(0) if o in ["-c", "--classical"]: calc_style = "classical" if o in ["-q", "--quantum"]: calc_style = "quantum" if o in ["-o", "--output"]: output_file = a if o in ["-i", "--input"]: input_file = a if o in ["-M", "--molecular-dynamics"]: md_style = a if o in ["-t", "--time-step"]: timestep = float(a) * units.fs if o in ["-B", "--box"]: cell = tuple([float(f) for f in a.split("x")]) if o in ["-Q", "--quantum-box"]: q_cell = tuple([float(f) for f in a.split("x")]) if o in ["-T", "--transition-buffer"]: transition_buffer = float(a) if o in ["-C", "--cutoff"]: cutoff = float(a) if o in ["-S", "--steps"]: steps = int(a) if o in ["-P", "--position"]: qbox_pos = tuple([float(f) for f in a.split(",")]) if o in ["-L", "--log-interval"]: log_interval = int(a) if o in ["-H", "--langevin-temp"]: langevin_temp = float(a) if o in ["-b", "--bands"]: bands = int(a) # verify that MPI is actually working print("rank: %i" % rank) pt = PickleTrajectory(input_file, "r") atoms = pt[-1] # get the last step Mixer.set_atom_ids(atoms) # this one is important! calc_gpaw = GPAW(nbands=bands, txt="mixer_box_gpaw.log") calc_reaxff_full = ReaxFF(ff_file_path=get_datafile("ffield.reax.new"), implementation="C") calc_reaxff_qbox = ReaxFF(ff_file_path=get_datafile("ffield.reax.new"), implementation="C") # debug disabled for non-master nodes, this is so on purpose! debug = 0 if rank == 0: debug = set_debug filter_full_sys = CalcBox(name="full_sys", pos=(0,0,0), dim=cell, cutoff=cutoff, pbc=(1,1,1), debug=debug) filter_qbox = CalcBox(name="qbox",pos=qbox_pos, dim=q_cell, cutoff=cutoff, inner_dim=(q_cell[0] - transition_buffer*2.0, q_cell[1] - transition_buffer*2.0, q_cell[2] - transition_buffer*2.0), debug=debug) # full system classical is taken as positive forces_full_system = ForceCalculation("force_full", selector=filter_full_sys, calculator=calc_reaxff_full, cell=cell, debug=debug) # quantum box classical is subtracted using the qbox weights forces_qbox_reaxff = ForceCalculation("force_qbox_reax", selector=filter_qbox, calculator=calc_reaxff_qbox, coeff=-1.0, cell=cell, debug=debug) # quantum box quantum is added using qbox weights forces_qbox_gpaw = ForceCalculation("force_qbox_gpaw", selector=filter_qbox, calculator=calc_gpaw, cell=(q_cell[0] + 3, q_cell[1] + 3, q_cell[2] + 3), debug=debug) # energies are based on H = H_c + H_q' - H_c' energy_full_system = EnergyCalculation("energy_full", selector=filter_full_sys, calculator=calc_reaxff_full, cell=cell) energy_qbox_reaxff = EnergyCalculation("energy_qbox_reax", selector=filter_qbox, calculator=calc_reaxff_qbox, cell=cell, coeff=-1.0) energy_qbox_gpaw = EnergyCalculation("energy_qbox_gpaw", selector=filter_qbox, calculator=calc_gpaw, cell=(q_cell[0] + 3, q_cell[1] + 3, q_cell[2] + 3)) mixer_forces = [] mixer_energies = [] if calc_style == "combined": mixer_forces = [forces_full_system, forces_qbox_reaxff, forces_qbox_gpaw] mixer_energies = [energy_full_system, energy_qbox_reaxff, energy_qbox_gpaw] elif calc_style == "classical": mixer_forces = [forces_full_system] mixer_energies = [energy_full_system] elif calc_style == "quantum": mixer_forces = [forces_qbox_gpaw] mixer_energies = [energy_qbox_gpaw] mixer = Mixer(name="mixer_box", forces=mixer_forces, energies=mixer_energies, debug=debug) atoms.set_calculator(mixer) dyn = None if md_style == "Langevin": dyn = Langevin(atoms, timestep, 1.5*T*units.kB, 0.002) elif md_style == "Verlet": dyn = VelocityVerlet(atoms, timestep) elif md_style == "TESTING": dyn = DynTesting(atoms, timestep=0.1*units.fs, offset=(0.1, 0., 0.)) energy_file = None def printenergy(a=atoms): epot = a.get_potential_energy() / len(a) ekin = a.get_kinetic_energy() / len(a) energy_file.write("%.5e,%.5e,%.5e,%.5e" % (epot + ekin, epot, ekin, ekin/(1.5*units.kB)) + "\n") # only enable logging for master node where rank == 0 if rank == 0: energy_file = open("mixer_box_energy.csv", "w+b", buffering=0) energy_file.write("total,potential,kinetic,temperature\n") dyn.attach(printenergy, interval=log_interval) traj = PickleTrajectory(output_file, 'w', atoms) dyn.attach(traj.write, interval=log_interval) printenergy() dyn.run(steps)
csmm/multiase
tests/mixer/mixer_box.py
Python
gpl-2.0
9,168
[ "ASE", "GPAW", "LAMMPS" ]
3c52c361af15d6e12664ea86bb22f302e2933959b565f504c0ef7c1c119defc6
from ase.parallel import paropen from ase.units import Hartree from gpaw.xc.rpa import RPACorrelation f = paropen('con_freq.dat', 'w') for N in [4, 6, 8, 12, 16, 24, 32]: rpa = RPACorrelation('N2.gpw', txt='rpa_N2_frequencies.txt', nfrequencies=N) E = rpa.calculate(ecut=[50]) print >> f, N, E[0] if N == 16: f16 = paropen('frequency_gauss16.dat', 'w') for w, e in zip(rpa.omega_w, rpa.E_w): print >> f16, w * Hartree, e f16.close() f.close()
robwarm/gpaw-symm
doc/tutorials/rpa/con_freq.py
Python
gpl-3.0
496
[ "ASE", "GPAW" ]
da329d0ff162d2dee1f72c13526e2b7c243ec643b08a221d4a80ee7e6aa2d46e
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2022, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import types import unittest import qiime2.plugin import qiime2.sdk from qiime2.core.testing.type import (IntSequence1, IntSequence2, Mapping, FourInts, Kennel, Dog, Cat, SingleInt) from qiime2.core.testing.util import get_dummy_plugin class TestPlugin(unittest.TestCase): def setUp(self): self.plugin = get_dummy_plugin() def test_name(self): self.assertEqual(self.plugin.name, 'dummy-plugin') def test_version(self): self.assertEqual(self.plugin.version, '0.0.0-dev') def test_website(self): self.assertEqual(self.plugin.website, 'https://github.com/qiime2/qiime2') def test_package(self): self.assertEqual(self.plugin.package, 'qiime2.core.testing') def test_citations(self): self.assertEqual(self.plugin.citations[0].type, 'article') def test_user_support_text(self): self.assertEqual(self.plugin.user_support_text, 'For help, see https://qiime2.org') def test_short_description_text(self): self.assertEqual(self.plugin.short_description, 'Dummy plugin for testing.') def test_description_text(self): self.assertEqual(self.plugin.description, 'Description of dummy plugin.') def test_citations_default(self): plugin = qiime2.plugin.Plugin( name='local-dummy-plugin', version='0.0.0-dev', website='https://github.com/qiime2/qiime2', package='qiime2.core.testing') self.assertEqual(plugin.citations, ()) def test_user_support_text_default(self): plugin = qiime2.plugin.Plugin( name='local-dummy-plugin', version='0.0.0-dev', website='https://github.com/qiime2/qiime2', package='qiime2.core.testing') self.assertTrue(plugin.user_support_text.startswith('Please post')) self.assertTrue(plugin.user_support_text.endswith( 'https://forum.qiime2.org')) def test_actions(self): actions = self.plugin.actions self.assertIsInstance(actions, types.MappingProxyType) self.assertEqual(actions.keys(), {'merge_mappings', 'concatenate_ints', 'split_ints', 'most_common_viz', 'mapping_viz', 'identity_with_metadata', 'identity_with_metadata_column', 'identity_with_categorical_metadata_column', 'identity_with_numeric_metadata_column', 'identity_with_optional_metadata', 'identity_with_optional_metadata_column', 'params_only_method', 'no_input_method', 'optional_artifacts_method', 'variadic_input_method', 'params_only_viz', 'no_input_viz', 'long_description_method', 'parameter_only_pipeline', 'typical_pipeline', 'optional_artifact_pipeline', 'pointless_pipeline', 'visualizer_only_pipeline', 'pipelines_in_pipeline', 'failing_pipeline', 'docstring_order_method', 'constrained_input_visualization', 'combinatorically_mapped_method', 'double_bound_variable_method', 'bool_flag_swaps_output_method', 'predicates_preserved_method', 'deprecated_method', 'unioned_primitives', 'type_match_list_and_set', }) for action in actions.values(): self.assertIsInstance(action, qiime2.sdk.Action) # Read-only dict. with self.assertRaises(TypeError): actions["i-shouldn't-do-this"] = "my-action" with self.assertRaises(TypeError): actions["merge_mappings"] = "my-action" def test_methods(self): methods = self.plugin.methods self.assertEqual(methods.keys(), {'merge_mappings', 'concatenate_ints', 'split_ints', 'identity_with_metadata', 'identity_with_metadata_column', 'identity_with_categorical_metadata_column', 'identity_with_numeric_metadata_column', 'identity_with_optional_metadata', 'identity_with_optional_metadata_column', 'params_only_method', 'no_input_method', 'optional_artifacts_method', 'long_description_method', 'variadic_input_method', 'docstring_order_method', 'combinatorically_mapped_method', 'double_bound_variable_method', 'bool_flag_swaps_output_method', 'predicates_preserved_method', 'deprecated_method', 'unioned_primitives', 'type_match_list_and_set', }) for method in methods.values(): self.assertIsInstance(method, qiime2.sdk.Method) def test_visualizers(self): visualizers = self.plugin.visualizers self.assertEqual(visualizers.keys(), {'most_common_viz', 'mapping_viz', 'params_only_viz', 'no_input_viz', 'constrained_input_visualization'}) for viz in visualizers.values(): self.assertIsInstance(viz, qiime2.sdk.Visualizer) def test_pipelines(self): pipelines = self.plugin.pipelines self.assertEqual(pipelines.keys(), {'parameter_only_pipeline', 'typical_pipeline', 'optional_artifact_pipeline', 'pointless_pipeline', 'visualizer_only_pipeline', 'pipelines_in_pipeline', 'failing_pipeline'}) for pipeline in pipelines.values(): self.assertIsInstance(pipeline, qiime2.sdk.Pipeline) # TODO test registration of directory formats. def test_type_fragments(self): types = self.plugin.type_fragments.keys() self.assertEqual( set(types), set(['IntSequence1', 'IntSequence2', 'IntSequence3', 'Mapping', 'FourInts', 'Kennel', 'Dog', 'Cat', 'SingleInt', 'C1', 'C2', 'C3', 'Foo', 'Bar', 'Baz', 'AscIntSequence', 'Squid', 'Octopus', 'Cuttlefish'])) def test_types(self): types = self.plugin.types # Get just the SemanticTypeRecords out of the types dictionary, then # get just the types out of the SemanticTypeRecord namedtuples types = {type_.semantic_type for type_ in types.values()} exp = {IntSequence1, IntSequence2, FourInts, Mapping, Kennel[Dog], Kennel[Cat], SingleInt} self.assertLessEqual(exp, types) self.assertNotIn(Cat, types) self.assertNotIn(Dog, types) self.assertNotIn(Kennel, types) if __name__ == '__main__': unittest.main()
qiime2/qiime2
qiime2/plugin/tests/test_plugin.py
Python
bsd-3-clause
7,747
[ "Octopus" ]
8e06125473b7835b85401b3de24ef2971bc199bf4b71167a925d26fc5a726898
#! /usr/bin/env python """ Module with routines allowing for the estimation of the uncertainty on the parameters of an imaged companion associated to residual speckle noise. """ __author__ = 'O. Wertz, C. A. Gomez Gonzalez, V. Christiaens' __all__ = ['speckle_noise_uncertainty'] #import itertools as itt from multiprocessing import cpu_count import numpy as np import matplotlib.pyplot as plt from ..conf.utils_conf import pool_map, iterable #eval_func_tuple from ..metrics import cube_inject_companions from .simplex_optim import firstguess_simplex from .simplex_fmerit import get_mu_and_sigma from .utils_negfc import cube_planet_free from .mcmc_sampling import confidence def speckle_noise_uncertainty(cube, p_true, angle_range, derot_angles, algo, psfn, plsc, fwhm, aperture_radius, cube_ref=None, fmerit='sum', algo_options={}, transmission=None, mu_sigma=None, wedge=None, weights=None, force_rPA=False, nproc=None, simplex_options=None, bins=None, save=False, output=None, verbose=True, full_output=True, plot=False): """ Step-by-step procedure used to determine the speckle noise uncertainty associated to the parameters of a companion candidate. __ | The steps 1 to 3 need to be performed for each angle. | | - 1 - At the true planet radial distance and for a given angle, we | inject a fake companion in our planet-free cube. | | - 2 - Then, using the negative fake companion method, we determine the | position and flux of the fake companion thanks to a Simplex | Nelder-Mead minimization. | | - 3 - We calculate the offset between the true values of the position | and the flux of the fake companion, and those obtained from the | minimization. The results will be dependent on the angular | position of the fake companion. |__ The resulting distribution of deviations is then used to infer the 1-sigma uncertainty on each parameter by fitting a 1d-gaussian. Parameters ---------- cube: numpy array The original ADI cube. p_true: tuple or numpy array with 3 elements The radial separation, position angle (from x=0 axis) and flux associated to a given companion candidate for which the speckle uncertainty is to be evaluated. The planet will first be subtracted from the cube, then used for test injections. angle_range: 1d numpy array Range of angles (counted from x=0 axis, counter-clockwise) at which the fake companions will be injected, in [0,360[. derot_angles: 1d numpy array Derotation angles for ADI. Length should match input cube. algo: python routine Routine to be used to model and subtract the stellar PSF. From an input cube, derotation angles, and optional arguments, it should return a post-processed frame. psfn: 2d numpy array 2d array with the normalized PSF template. The PSF image must be centered wrt to the array. Therefore, it is recommended to run the function ``metrics/normalize_psf()`` to generate a centered and flux-normalized PSF template. plsc : float Value of the plsc in arcsec/px. Only used for printing debug output when ``verbose=True``. algo_options: dict Options for algo. To be provided as a dictionary. Can include ncomp (for PCA), svd_mode, collapse, imlib, interpolation, scaling, delta_rot transmission: numpy array, optional Array with 2 columns. First column is the radial separation in pixels. Second column is the off-axis transmission (between 0 and 1) at the radial separation given in column 1. mu_sigma: tuple of 2 floats, bool or None, opt If set to None: not used, and falls back to original version of the algorithm, using fmerit. If a tuple of 2 elements: should be the mean and standard deviation of pixel intensities in an annulus centered on the lcoation of the companion candidate, excluding the area directly adjacent to the CC. If set to anything else, but None/False/tuple: will compute said mean and standard deviation automatically. force_rPA: bool, optional Whether to only search for optimal flux, provided (r,PA). fmerit: None Figure of merit to use, if mu_sigma is None. simplex_options: dict All the required simplex parameters, for instance {'tol':1e-08, 'max_iter':200} bins: int or None, opt Number of bins for histogram of parameter deviations. If None, will be determined automatically based on number of injected fake companions. full_output: bool, optional Whether to return more outputs. output: str, optional The name of the output file (if save is True) save: bool, optional If True, the result are pickled. verbose: bool, optional If True, informations are displayed in the shell. plot: bool, optional Whether to plot the gaussian fit to the distributions of parameter deviations (between retrieved and injected). Returns: -------- sp_unc: numpy ndarray of 3 elements Uncertainties on the radius, position angle and flux of the companion, respectively, associated to residual speckle noise. Only 1 element if force_rPA is set to True. If full_output, also returns: mean_dev: numpy ndarray of 3 elements Mean deviation for each of the 3 parameters p_simplex: numpy ndarray n_fc x 3 Parameters retrieved by the simplex for the injected fake companions; n_fc is the number of injected offset: numpy ndarray n_fc x 3 Deviations with respect to the values used for injection of the fake companions. chi2, nit, success: numpy ndarray of length n_fc Outputs from the simplex function for the retrieval of the parameters of each injected companion: chi square value, number of iterations and whether the simplex converged, respectively. """ if not nproc: # Hyper-threading "duplicates" the cores -> cpu_count/2 nproc = (cpu_count()/2) if verbose: print('') print('#######################################################') print('### SPECKLE NOISE DETERMINATION ###') print('#######################################################') print('') r_true, theta_true, f_true = p_true if angle_range[0]%360 == angle_range[-1]%360: angle_range = angle_range[:-1] if verbose: print('Number of steps: {}'.format(angle_range.shape[0])) print('') imlib = algo_options.get('imlib','opencv') interpolation = algo_options.get('interpolation','lanczos4') # FIRST SUBTRACT THE TRUE COMPANION CANDIDATE planet_parameter = np.array([[r_true, theta_true, f_true]]) cube_pf = cube_planet_free(planet_parameter, cube, derot_angles, psfn, plsc, imlib=imlib, interpolation=interpolation, transmission=transmission) # Measure mu and sigma once in the annulus (instead of each MCMC step) if isinstance(mu_sigma,tuple): if len(mu_sigma)!=2: raise TypeError("If a tuple, mu_sigma must have 2 elements") elif mu_sigma is not None: ncomp = algo_options.get('ncomp', None) annulus_width = algo_options.get('annulus_width', int(fwhm)) if weights is not None: if not len(weights)==cube.shape[0]: raise TypeError("Weights should have same length as cube axis 0") norm_weights = weights/np.sum(weights) else: norm_weights=weights mu_sigma = get_mu_and_sigma(cube, derot_angles, ncomp, annulus_width, aperture_radius, fwhm, r_true, theta_true, cube_ref=cube_ref, wedge=wedge, algo=algo, weights=norm_weights, algo_options=algo_options) res = pool_map(nproc, _estimate_speckle_one_angle, iterable(angle_range), cube_pf, psfn, derot_angles, r_true, f_true, plsc, fwhm, aperture_radius, cube_ref, fmerit, algo, algo_options, transmission, mu_sigma, weights, force_rPA, simplex_options, verbose=verbose) residuals = np.array(res) if verbose: print("residuals (offsets): ", residuals[:,3],residuals[:,4], residuals[:,5]) p_simplex = np.transpose(np.vstack((residuals[:,0],residuals[:,1], residuals[:,2]))) offset = np.transpose(np.vstack((residuals[:,3],residuals[:,4], residuals[:,5]))) print(offset) chi2 = residuals[:,6] nit = residuals[:,7] success = residuals[:,8] if save: speckles = {'r_true':r_true, 'angle_range': angle_range, 'f_true':f_true, 'r_simplex':residuals[:,0], 'theta_simplex':residuals[:,1], 'f_simplex':residuals[:,2], 'offset': offset, 'chi2': chi2, 'nit': nit, 'success': success} if output is None: output = 'speckles_noise_result' from pickle import Pickler with open(output,'wb') as fileSave: myPickler = Pickler(fileSave) myPickler.dump(speckles) # Calculate 1 sigma of distribution of deviations print(offset.shape) if force_rPA: offset = offset[:,2] print(offset.shape) if bins is None: bins = offset.shape[0] mean_dev, sp_unc = confidence(offset, cfd=68.27, bins=bins, gaussian_fit=True, verbose=True, save=False, output_dir='', force=True) if plot: plt.show() if full_output: return sp_unc, mean_dev, p_simplex, offset, chi2, nit, success else: return sp_unc def _estimate_speckle_one_angle(angle, cube_pf, psfn, angs, r_true, f_true, plsc, fwhm, aperture_radius, cube_ref, fmerit, algo, algo_options, transmission, mu_sigma, weights, force_rPA, simplex_options, verbose=True): if verbose: print('Process is running for angle: {:.2f}'.format(angle)) cube_fc = cube_inject_companions(cube_pf, psfn, angs, flevel=f_true, plsc=plsc, rad_dists=[r_true], n_branches=1, theta=angle, transmission=transmission, verbose=False) ncomp = algo_options.get('ncomp', None) annulus_width = algo_options.get('annulus_width', int(fwhm)) res_simplex = firstguess_simplex((r_true,angle,f_true), cube_fc, angs, psfn, plsc, ncomp, fwhm, annulus_width, aperture_radius, cube_ref=cube_ref, fmerit=fmerit, algo=algo, algo_options=algo_options, transmission=transmission, mu_sigma=mu_sigma, weights=weights, force_rPA=force_rPA, options=simplex_options, verbose=False) if force_rPA: simplex_res_f, = res_simplex.x simplex_res_r, simplex_res_PA = r_true, angle else: simplex_res_r, simplex_res_PA, simplex_res_f = res_simplex.x offset_r = simplex_res_r - r_true offset_PA = simplex_res_PA - angle offset_f = simplex_res_f - f_true chi2 = res_simplex.fun nit = res_simplex.nit success = res_simplex.success return (simplex_res_r, simplex_res_PA, simplex_res_f, offset_r, offset_PA, offset_f, chi2, nit, success)
carlgogo/vip_exoplanets
vip_hci/negfc/speckle_noise.py
Python
bsd-3-clause
12,649
[ "Gaussian" ]
3e289df5debf8f802a6c93ce4e5dcee83cb38ea666978a0904be7e57f8429f3f
#!/usr/bin/env python # # Appcelerator Titanium Module Packager # # import os, subprocess, sys, glob, string import zipfile from datetime import date cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename)) os.chdir(cwd) required_module_keys = ['architectures', 'name','version','moduleid','description','copyright','license','copyright','platform','minsdk'] module_defaults = { 'description':'My module', 'author': 'Your Name', 'license' : 'Specify your license', 'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year), } module_license_default = "TODO: place your license here and we'll include it in the module distribution" def find_sdk(config): sdk = config['TITANIUM_SDK'] return os.path.expandvars(os.path.expanduser(sdk)) def replace_vars(config,token): idx = token.find('$(') while idx != -1: idx2 = token.find(')',idx+2) if idx2 == -1: break key = token[idx+2:idx2] if not config.has_key(key): break token = token.replace('$(%s)' % key, config[key]) idx = token.find('$(') return token def read_ti_xcconfig(): contents = open(os.path.join(cwd,'titanium.xcconfig')).read() config = {} for line in contents.splitlines(False): line = line.strip() if line[0:2]=='//': continue idx = line.find('=') if idx > 0: key = line[0:idx].strip() value = line[idx+1:].strip() config[key] = replace_vars(config,value) return config def generate_doc(config): docdir = os.path.join(cwd,'documentation') if not os.path.exists(docdir): docdir = os.path.join(cwd,'..','documentation') if not os.path.exists(docdir): print "Couldn't find documentation file at: %s" % docdir return None try: import markdown2 as markdown except ImportError: import markdown documentation = [] for file in os.listdir(docdir): if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)): continue md = open(os.path.join(docdir,file)).read() html = markdown.markdown(md) documentation.append({file:html}); return documentation def compile_js(manifest,config): js_file = os.path.join(cwd,'assets','com.enouvo.tigif.js') if not os.path.exists(js_file): js_file = os.path.join(cwd,'..','assets','com.enouvo.tigif.js') if not os.path.exists(js_file): return from compiler import Compiler try: import json except: import simplejson as json compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs') root_asset, module_assets = compiler.compile_module() root_asset_content = """ %s return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]); """ % root_asset module_asset_content = """ %s NSNumber *index = [map objectForKey:path]; if (index == nil) { return nil; } return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]); """ % module_assets from tools import splice_code assets_router = os.path.join(cwd,'Classes','ComEnouvoTigifModuleAssets.m') splice_code(assets_router, 'asset', root_asset_content) splice_code(assets_router, 'resolve_asset', module_asset_content) # Generate the exports after crawling all of the available JS source exports = open('metadata.json','w') json.dump({'exports':compiler.exports }, exports) exports.close() def die(msg): print msg sys.exit(1) def warn(msg): print "[WARN] %s" % msg def validate_license(): license_file = os.path.join(cwd,'LICENSE') if not os.path.exists(license_file): license_file = os.path.join(cwd,'..','LICENSE') if os.path.exists(license_file): c = open(license_file).read() if c.find(module_license_default)!=-1: warn('please update the LICENSE file with your license text before distributing') def validate_manifest(): path = os.path.join(cwd,'manifest') f = open(path) if not os.path.exists(path): die("missing %s" % path) manifest = {} for line in f.readlines(): line = line.strip() if line[0:1]=='#': continue if line.find(':') < 0: continue key,value = line.split(':') manifest[key.strip()]=value.strip() for key in required_module_keys: if not manifest.has_key(key): die("missing required manifest key '%s'" % key) if manifest[key].strip() == '': die("manifest key '%s' missing required value" % key) if module_defaults.has_key(key): defvalue = module_defaults[key] curvalue = manifest[key] if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key) return manifest,path ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README'] ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT'] def zip_dir(zf,dir,basepath,ignore=[],includeJSFiles=False): for root, dirs, files in os.walk(dir): for name in ignoreDirs: if name in dirs: dirs.remove(name) # don't visit ignored directories for file in files: if file in ignoreFiles: continue e = os.path.splitext(file) if len(e) == 2 and e[1] == '.pyc': continue if not includeJSFiles and len(e) == 2 and e[1] == '.js': continue from_ = os.path.join(root, file) to_ = from_.replace(dir, basepath, 1) zf.write(from_, to_) def glob_libfiles(): files = [] for libfile in glob.glob('build/**/*.a'): if libfile.find('Release-')!=-1: files.append(libfile) return files def build_module(manifest,config): from tools import ensure_dev_path ensure_dev_path() rc = os.system("xcodebuild -sdk iphoneos -configuration Release") if rc != 0: die("xcodebuild failed") rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release") if rc != 0: die("xcodebuild failed") # build the merged library using lipo moduleid = manifest['moduleid'] libpaths = '' for libfile in glob_libfiles(): libpaths+='%s ' % libfile os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid)) def verify_build_arch(manifest, config): binaryname = 'lib%s.a' % manifest['moduleid'] binarypath = os.path.join('build', binaryname) manifestarch = set(manifest['architectures'].split(' ')) output = subprocess.check_output('xcrun lipo -info %s' % binarypath, shell=True) builtarch = set(output.split(':')[-1].strip().split(' ')) if ('arm64' not in builtarch): warn('built module is missing 64-bit support.') if (manifestarch != builtarch): warn('there is discrepancy between the architectures specified in module manifest and compiled binary.') warn('architectures in manifest: %s' % ', '.join(manifestarch)) warn('compiled binary architectures: %s' % ', '.join(builtarch)) die('please update manifest to match module binary architectures.') def package_module(manifest,mf,config): name = manifest['name'].lower() moduleid = manifest['moduleid'].lower() version = manifest['version'] modulezip = '%s-iphone-%s.zip' % (moduleid,version) if os.path.exists(modulezip): os.remove(modulezip) zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED) modulepath = 'modules/iphone/%s/%s' % (moduleid,version) zf.write(mf,'%s/manifest' % modulepath) libname = 'lib%s.a' % moduleid zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname)) docs = generate_doc(config) if docs!=None: for doc in docs: for file, html in doc.iteritems(): filename = string.replace(file,'.md','.html') zf.writestr('%s/documentation/%s'%(modulepath,filename),html) p = os.path.join(cwd, 'assets') if not os.path.exists(p): p = os.path.join(cwd, '..', 'assets') if os.path.exists(p): zip_dir(zf,p,'%s/%s' % (modulepath,'assets'),['README']) for dn in ('example','platform'): p = os.path.join(cwd, dn) if not os.path.exists(p): p = os.path.join(cwd, '..', dn) if os.path.exists(p): zip_dir(zf,p,'%s/%s' % (modulepath,dn),['README'],True) license_file = os.path.join(cwd,'LICENSE') if not os.path.exists(license_file): license_file = os.path.join(cwd,'..','LICENSE') if os.path.exists(license_file): zf.write(license_file,'%s/LICENSE' % modulepath) zf.write('module.xcconfig','%s/module.xcconfig' % modulepath) exports_file = 'metadata.json' if os.path.exists(exports_file): zf.write(exports_file, '%s/%s' % (modulepath, exports_file)) zf.close() if __name__ == '__main__': manifest,mf = validate_manifest() validate_license() config = read_ti_xcconfig() sdk = find_sdk(config) sys.path.insert(0,os.path.join(sdk,'iphone')) sys.path.append(os.path.join(sdk, "common")) compile_js(manifest,config) build_module(manifest,config) verify_build_arch(manifest, config) package_module(manifest,mf,config) sys.exit(0)
Enouvo/TiGIF
iphone/build.py
Python
apache-2.0
8,495
[ "VisIt" ]
5d922cacab84d23b272cb925ac2bdd977e855fc898e9cad95fa758d3d2c496b1
#!/usr/bin/env python SITES = { '163' : 'netease', '56' : 'w56', 'acfun' : 'acfun', 'archive' : 'archive', 'baidu' : 'baidu', 'bandcamp' : 'bandcamp', 'baomihua' : 'baomihua', 'bigthink' : 'bigthink', 'bilibili' : 'bilibili', 'cctv' : 'cntv', 'cntv' : 'cntv', 'cbs' : 'cbs', 'dailymotion' : 'dailymotion', 'dilidili' : 'dilidili', 'douban' : 'douban', 'douyu' : 'douyutv', 'ehow' : 'ehow', 'facebook' : 'facebook', 'fantasy' : 'fantasy', 'fc2' : 'fc2video', 'flickr' : 'flickr', 'freesound' : 'freesound', 'fun' : 'funshion', 'google' : 'google', 'giphy' : 'giphy', 'heavy-music' : 'heavymusic', 'huaban' : 'huaban', 'huomao' : 'huomaotv', 'iask' : 'sina', 'icourses' : 'icourses', 'ifeng' : 'ifeng', 'imgur' : 'imgur', 'in' : 'alive', 'infoq' : 'infoq', 'instagram' : 'instagram', 'interest' : 'interest', 'iqilu' : 'iqilu', 'iqiyi' : 'iqiyi', 'isuntv' : 'suntv', 'joy' : 'joy', 'kankanews' : 'bilibili', 'khanacademy' : 'khan', 'ku6' : 'ku6', 'kugou' : 'kugou', 'kuwo' : 'kuwo', 'le' : 'le', 'letv' : 'le', 'lizhi' : 'lizhi', 'magisto' : 'magisto', 'metacafe' : 'metacafe', 'mgtv' : 'mgtv', 'miomio' : 'miomio', 'mixcloud' : 'mixcloud', 'mtv81' : 'mtv81', 'musicplayon' : 'musicplayon', 'naver' : 'naver', '7gogo' : 'nanagogo', 'nicovideo' : 'nicovideo', 'panda' : 'panda', 'pinterest' : 'pinterest', 'pixnet' : 'pixnet', 'pptv' : 'pptv', 'qingting' : 'qingting', 'qq' : 'qq', 'quanmin' : 'quanmin', 'showroom-live' : 'showroom', 'sina' : 'sina', 'smgbb' : 'bilibili', 'sohu' : 'sohu', 'soundcloud' : 'soundcloud', 'ted' : 'ted', 'theplatform' : 'theplatform', 'tucao' : 'tucao', 'tudou' : 'tudou', 'tumblr' : 'tumblr', 'twimg' : 'twitter', 'twitter' : 'twitter', 'ucas' : 'ucas', 'videomega' : 'videomega', 'vidto' : 'vidto', 'vimeo' : 'vimeo', 'wanmen' : 'wanmen', 'weibo' : 'miaopai', 'veoh' : 'veoh', 'vine' : 'vine', 'vk' : 'vk', 'xiami' : 'xiami', 'xiaokaxiu' : 'yixia', 'xiaojiadianvideo' : 'fc2video', 'ximalaya' : 'ximalaya', 'yinyuetai' : 'yinyuetai', 'miaopai' : 'yixia', 'yizhibo' : 'yizhibo', 'youku' : 'youku', 'iwara' : 'iwara', 'youtu' : 'youtube', 'youtube' : 'youtube', 'zhanqi' : 'zhanqi', } import json import locale import logging import os import re import socket import sys import time from urllib import request, parse, error from http import cookiejar from importlib import import_module import argparse from .version import __version__ from .util import log, term from .util.git import get_version from .util.strings import get_filename, unescape_html from . import json_output as json_output_ dry_run = False json_output = False force = False player = None extractor_proxy = None cookies = None output_filename = None fake_headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Charset': 'UTF-8,*;q=0.5', 'Accept-Encoding': 'gzip,deflate,sdch', 'Accept-Language': 'en-US,en;q=0.8', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0' } if sys.stdout.isatty(): default_encoding = sys.stdout.encoding.lower() else: default_encoding = locale.getpreferredencoding().lower() def rc4(key, data): #all encryption algo should work on bytes assert type(key)==type(data) and type(key) == type(b'') state = list(range(256)) j = 0 for i in range(256): j += state[i] + key[i % len(key)] j &= 0xff state[i], state[j] = state[j], state[i] i = 0 j = 0 out_list = [] for char in data: i += 1 i &= 0xff j += state[i] j &= 0xff state[i], state[j] = state[j], state[i] prn = state[(state[i] + state[j]) & 0xff] out_list.append(char ^ prn) return bytes(out_list) def general_m3u8_extractor(url, headers={}): m3u8_list = get_content(url, headers=headers).split('\n') urls = [] for line in m3u8_list: line = line.strip() if line and not line.startswith('#'): if line.startswith('http'): urls.append(line) else: seg_url = parse.urljoin(url, line) urls.append(seg_url) return urls def maybe_print(*s): try: print(*s) except: pass def tr(s): if default_encoding == 'utf-8': return s else: return s #return str(s.encode('utf-8'))[2:-1] # DEPRECATED in favor of match1() def r1(pattern, text): m = re.search(pattern, text) if m: return m.group(1) # DEPRECATED in favor of match1() def r1_of(patterns, text): for p in patterns: x = r1(p, text) if x: return x def match1(text, *patterns): """Scans through a string for substrings matched some patterns (first-subgroups only). Args: text: A string to be scanned. patterns: Arbitrary number of regex patterns. Returns: When only one pattern is given, returns a string (None if no match found). When more than one pattern are given, returns a list of strings ([] if no match found). """ if len(patterns) == 1: pattern = patterns[0] match = re.search(pattern, text) if match: return match.group(1) else: return None else: ret = [] for pattern in patterns: match = re.search(pattern, text) if match: ret.append(match.group(1)) return ret def matchall(text, patterns): """Scans through a string for substrings matched some patterns. Args: text: A string to be scanned. patterns: a list of regex pattern. Returns: a list if matched. empty if not. """ ret = [] for pattern in patterns: match = re.findall(pattern, text) ret += match return ret def launch_player(player, urls): import subprocess import shlex subprocess.call(shlex.split(player) + list(urls)) def parse_query_param(url, param): """Parses the query string of a URL and returns the value of a parameter. Args: url: A URL. param: A string representing the name of the parameter. Returns: The value of the parameter. """ try: return parse.parse_qs(parse.urlparse(url).query)[param][0] except: return None def unicodize(text): return re.sub(r'\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])', lambda x: chr(int(x.group(0)[2:], 16)), text) # DEPRECATED in favor of util.legitimize() def escape_file_path(path): path = path.replace('/', '-') path = path.replace('\\', '-') path = path.replace('*', '-') path = path.replace('?', '-') return path def ungzip(data): """Decompresses data for Content-Encoding: gzip. """ from io import BytesIO import gzip buffer = BytesIO(data) f = gzip.GzipFile(fileobj=buffer) return f.read() def undeflate(data): """Decompresses data for Content-Encoding: deflate. (the zlib compression is used.) """ import zlib decompressobj = zlib.decompressobj(-zlib.MAX_WBITS) return decompressobj.decompress(data)+decompressobj.flush() # DEPRECATED in favor of get_content() def get_response(url, faker = False): logging.debug('get_response: %s' % url) # install cookies if cookies: opener = request.build_opener(request.HTTPCookieProcessor(cookies)) request.install_opener(opener) if faker: response = request.urlopen(request.Request(url, headers = fake_headers), None) else: response = request.urlopen(url) data = response.read() if response.info().get('Content-Encoding') == 'gzip': data = ungzip(data) elif response.info().get('Content-Encoding') == 'deflate': data = undeflate(data) response.data = data return response # DEPRECATED in favor of get_content() def get_html(url, encoding = None, faker = False): content = get_response(url, faker).data return str(content, 'utf-8', 'ignore') # DEPRECATED in favor of get_content() def get_decoded_html(url, faker = False): response = get_response(url, faker) data = response.data charset = r1(r'charset=([\w-]+)', response.headers['content-type']) if charset: return data.decode(charset, 'ignore') else: return data def get_location(url): logging.debug('get_location: %s' % url) response = request.urlopen(url) # urllib will follow redirections and it's too much code to tell urllib # not to do that return response.geturl() def urlopen_with_retry(*args, **kwargs): retry_time = 3 for i in range(retry_time): try: return request.urlopen(*args, **kwargs) except socket.timeout as e: logging.debug('request attempt %s timeout' % str(i + 1)) if i + 1 == retry_time: raise e # try to tackle youku CDN fails except error.HTTPError as http_error: logging.debug('HTTP Error with code{}'.format(http_error.code)) if i + 1 == retry_time: raise http_error def get_content(url, headers={}, decoded=True): """Gets the content of a URL via sending a HTTP GET request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string. """ logging.debug('get_content: %s' % url) req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) response = urlopen_with_retry(req) data = response.read() # Handle HTTP compression for gzip and deflate (zlib) content_encoding = response.getheader('Content-Encoding') if content_encoding == 'gzip': data = ungzip(data) elif content_encoding == 'deflate': data = undeflate(data) # Decode the response body if decoded: charset = match1(response.getheader('Content-Type'), r'charset=([\w-]+)') if charset is not None: data = data.decode(charset) else: data = data.decode('utf-8', 'ignore') return data def post_content(url, headers={}, post_data={}, decoded=True): """Post the content of a URL via sending a HTTP POST request. Args: url: A URL. headers: Request headers used by the client. decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type. Returns: The content as a string. """ logging.debug('post_content: %s \n post_data: %s' % (url, post_data)) req = request.Request(url, headers=headers) if cookies: cookies.add_cookie_header(req) req.headers.update(req.unredirected_hdrs) post_data_enc = bytes(parse.urlencode(post_data), 'utf-8') response = urlopen_with_retry(req, data=post_data_enc) data = response.read() # Handle HTTP compression for gzip and deflate (zlib) content_encoding = response.getheader('Content-Encoding') if content_encoding == 'gzip': data = ungzip(data) elif content_encoding == 'deflate': data = undeflate(data) # Decode the response body if decoded: charset = match1(response.getheader('Content-Type'), r'charset=([\w-]+)') if charset is not None: data = data.decode(charset) else: data = data.decode('utf-8') return data def url_size(url, faker = False, headers = {}): if faker: response = urlopen_with_retry(request.Request(url, headers=fake_headers)) elif headers: response = urlopen_with_retry(request.Request(url, headers=headers)) else: response = urlopen_with_retry(url) size = response.headers['content-length'] return int(size) if size!=None else float('inf') def urls_size(urls, faker = False, headers = {}): return sum([url_size(url, faker=faker, headers=headers) for url in urls]) def get_head(url, headers = {}, get_method = 'HEAD'): logging.debug('get_head: %s' % url) if headers: req = request.Request(url, headers=headers) else: req = request.Request(url) req.get_method = lambda: get_method res = urlopen_with_retry(req) return dict(res.headers) def url_info(url, faker = False, headers = {}): logging.debug('url_info: %s' % url) if faker: response = urlopen_with_retry(request.Request(url, headers=fake_headers)) elif headers: response = urlopen_with_retry(request.Request(url, headers=headers)) else: response = urlopen_with_retry(request.Request(url)) headers = response.headers type = headers['content-type'] if type == 'image/jpg; charset=UTF-8' or type == 'image/jpg' : type = 'audio/mpeg' #fix for netease mapping = { 'video/3gpp': '3gp', 'video/f4v': 'flv', 'video/mp4': 'mp4', 'video/MP2T': 'ts', 'video/quicktime': 'mov', 'video/webm': 'webm', 'video/x-flv': 'flv', 'video/x-ms-asf': 'asf', 'audio/mp4': 'mp4', 'audio/mpeg': 'mp3', 'audio/wav': 'wav', 'audio/x-wav': 'wav', 'audio/wave': 'wav', 'image/jpeg': 'jpg', 'image/png': 'png', 'image/gif': 'gif', 'application/pdf': 'pdf', } if type in mapping: ext = mapping[type] else: type = None if headers['content-disposition']: try: filename = parse.unquote(r1(r'filename="?([^"]+)"?', headers['content-disposition'])) if len(filename.split('.')) > 1: ext = filename.split('.')[-1] else: ext = None except: ext = None else: ext = None if headers['transfer-encoding'] != 'chunked': size = headers['content-length'] and int(headers['content-length']) else: size = None return type, ext, size def url_locations(urls, faker = False, headers = {}): locations = [] for url in urls: logging.debug('url_locations: %s' % url) if faker: response = urlopen_with_retry(request.Request(url, headers=fake_headers)) elif headers: response = urlopen_with_retry(request.Request(url, headers=headers)) else: response = urlopen_with_retry(request.Request(url)) locations.append(response.url) return locations def url_save(url, filepath, bar, refer=None, is_part=False, faker=False, headers=None, timeout=None, **kwargs): tmp_headers = headers.copy() if headers is not None else {} # When a referer specified with param refer, the key must be 'Referer' for the hack here if refer is not None: tmp_headers['Referer'] = refer file_size = url_size(url, faker=faker, headers=tmp_headers) if os.path.exists(filepath): if not force and file_size == os.path.getsize(filepath): if not is_part: if bar: bar.done() print('Skipping %s: file already exists' % tr(os.path.basename(filepath))) else: if bar: bar.update_received(file_size) return else: if not is_part: if bar: bar.done() print('Overwriting %s' % tr(os.path.basename(filepath)), '...') elif not os.path.exists(os.path.dirname(filepath)): os.mkdir(os.path.dirname(filepath)) temp_filepath = filepath + '.download' if file_size!=float('inf') else filepath received = 0 if not force: open_mode = 'ab' if os.path.exists(temp_filepath): received += os.path.getsize(temp_filepath) if bar: bar.update_received(os.path.getsize(temp_filepath)) else: open_mode = 'wb' if received < file_size: if faker: tmp_headers = fake_headers ''' if parameter headers passed in, we have it copied as tmp_header elif headers: headers = headers else: headers = {} ''' if received: tmp_headers['Range'] = 'bytes=' + str(received) + '-' if refer: tmp_headers['Referer'] = refer if timeout: response = urlopen_with_retry(request.Request(url, headers=tmp_headers), timeout=timeout) else: response = urlopen_with_retry(request.Request(url, headers=tmp_headers)) try: range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0]) end_length = int(response.headers['content-range'][6:].split('/')[1]) range_length = end_length - range_start except: content_length = response.headers['content-length'] range_length = int(content_length) if content_length!=None else float('inf') if file_size != received + range_length: received = 0 if bar: bar.received = 0 open_mode = 'wb' with open(temp_filepath, open_mode) as output: while True: buffer = None try: buffer = response.read(1024 * 256) except socket.timeout: pass if not buffer: if received == file_size: # Download finished break # Unexpected termination. Retry request tmp_headers['Range'] = 'bytes=' + str(received) + '-' response = urlopen_with_retry(request.Request(url, headers=tmp_headers)) continue output.write(buffer) received += len(buffer) if bar: bar.update_received(len(buffer)) assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath) if os.access(filepath, os.W_OK): os.remove(filepath) # on Windows rename could fail if destination filepath exists os.rename(temp_filepath, filepath) class SimpleProgressBar: term_size = term.get_terminal_size()[1] def __init__(self, total_size, total_pieces = 1): self.displayed = False self.total_size = total_size self.total_pieces = total_pieces self.current_piece = 1 self.received = 0 self.speed = '' self.last_updated = time.time() total_pieces_len = len(str(total_pieces)) # 38 is the size of all statically known size in self.bar total_str = '%5s' % round(self.total_size / 1048576, 1) total_str_width = max(len(total_str), 5) self.bar_size = self.term_size - 28 - 2*total_pieces_len - 2*total_str_width self.bar = '{:>4}%% ({:>%s}/%sMB) ├{:─<%s}┤[{:>%s}/{:>%s}] {}' % ( total_str_width, total_str, self.bar_size, total_pieces_len, total_pieces_len) def update(self): self.displayed = True bar_size = self.bar_size percent = round(self.received * 100 / self.total_size, 1) if percent >= 100: percent = 100 dots = bar_size * int(percent) // 100 plus = int(percent) - dots // bar_size * 100 if plus > 0.8: plus = '█' elif plus > 0.4: plus = '>' else: plus = '' bar = '█' * dots + plus bar = self.bar.format(percent, round(self.received / 1048576, 1), bar, self.current_piece, self.total_pieces, self.speed) sys.stdout.write('\r' + bar) sys.stdout.flush() def update_received(self, n): self.received += n time_diff = time.time() - self.last_updated bytes_ps = n / time_diff if time_diff else 0 if bytes_ps >= 1024 ** 3: self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3) elif bytes_ps >= 1024 ** 2: self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2) elif bytes_ps >= 1024: self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024) else: self.speed = '{:4.0f} B/s'.format(bytes_ps) self.last_updated = time.time() self.update() def update_piece(self, n): self.current_piece = n def done(self): if self.displayed: print() self.displayed = False class PiecesProgressBar: def __init__(self, total_size, total_pieces = 1): self.displayed = False self.total_size = total_size self.total_pieces = total_pieces self.current_piece = 1 self.received = 0 def update(self): self.displayed = True bar = '{0:>5}%[{1:<40}] {2}/{3}'.format('', '=' * 40, self.current_piece, self.total_pieces) sys.stdout.write('\r' + bar) sys.stdout.flush() def update_received(self, n): self.received += n self.update() def update_piece(self, n): self.current_piece = n def done(self): if self.displayed: print() self.displayed = False class DummyProgressBar: def __init__(self, *args): pass def update_received(self, n): pass def update_piece(self, n): pass def done(self): pass def get_output_filename(urls, title, ext, output_dir, merge): # lame hack for the --output-filename option global output_filename if output_filename: if ext: return output_filename + '.' + ext return output_filename merged_ext = ext if (len(urls) > 1) and merge: from .processor.ffmpeg import has_ffmpeg_installed if ext in ['flv', 'f4v']: if has_ffmpeg_installed(): merged_ext = 'mp4' else: merged_ext = 'flv' elif ext == 'mp4': merged_ext = 'mp4' elif ext == 'ts': if has_ffmpeg_installed(): merged_ext = 'mkv' else: merged_ext = 'ts' return '%s.%s' % (title, merged_ext) def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers = {}, **kwargs): assert urls if json_output: json_output_.download_urls(urls=urls, title=title, ext=ext, total_size=total_size, refer=refer) return if dry_run: print('Real URLs:\n%s' % '\n'.join(urls)) return if player: launch_player(player, urls) return if not total_size: try: total_size = urls_size(urls, faker=faker, headers=headers) except: import traceback traceback.print_exc(file=sys.stdout) pass title = tr(get_filename(title)) output_filename = get_output_filename(urls, title, ext, output_dir, merge) output_filepath = os.path.join(output_dir, output_filename) if total_size: if not force and os.path.exists(output_filepath) and os.path.getsize(output_filepath) >= total_size * 0.9: print('Skipping %s: file already exists' % output_filepath) print() return bar = SimpleProgressBar(total_size, len(urls)) else: bar = PiecesProgressBar(total_size, len(urls)) if len(urls) == 1: url = urls[0] print('Downloading %s ...' % tr(output_filename)) bar.update() url_save(url, output_filepath, bar, refer = refer, faker = faker, headers = headers, **kwargs) bar.done() else: parts = [] print('Downloading %s.%s ...' % (tr(title), ext)) bar.update() for i, url in enumerate(urls): filename = '%s[%02d].%s' % (title, i, ext) filepath = os.path.join(output_dir, filename) parts.append(filepath) #print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls)) bar.update_piece(i + 1) url_save(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers, **kwargs) bar.done() if not merge: print() return if 'av' in kwargs and kwargs['av']: from .processor.ffmpeg import has_ffmpeg_installed if has_ffmpeg_installed(): from .processor.ffmpeg import ffmpeg_concat_av ret = ffmpeg_concat_av(parts, output_filepath, ext) print('Merged into %s' % output_filename) if ret == 0: for part in parts: os.remove(part) elif ext in ['flv', 'f4v']: try: from .processor.ffmpeg import has_ffmpeg_installed if has_ffmpeg_installed(): from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4 ffmpeg_concat_flv_to_mp4(parts, output_filepath) else: from .processor.join_flv import concat_flv concat_flv(parts, output_filepath) print('Merged into %s' % output_filename) except: raise else: for part in parts: os.remove(part) elif ext == 'mp4': try: from .processor.ffmpeg import has_ffmpeg_installed if has_ffmpeg_installed(): from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4 ffmpeg_concat_mp4_to_mp4(parts, output_filepath) else: from .processor.join_mp4 import concat_mp4 concat_mp4(parts, output_filepath) print('Merged into %s' % output_filename) except: raise else: for part in parts: os.remove(part) elif ext == "ts": try: from .processor.ffmpeg import has_ffmpeg_installed if has_ffmpeg_installed(): from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv ffmpeg_concat_ts_to_mkv(parts, output_filepath) else: from .processor.join_ts import concat_ts concat_ts(parts, output_filepath) print('Merged into %s' % output_filename) except: raise else: for part in parts: os.remove(part) else: print("Can't merge %s files" % ext) print() def download_rtmp_url(url,title, ext,params={}, total_size=0, output_dir='.', refer=None, merge=True, faker=False): assert url if dry_run: print('Real URL:\n%s\n' % [url]) if params.get("-y",False): #None or unset ->False print('Real Playpath:\n%s\n' % [params.get("-y")]) return if player: from .processor.rtmpdump import play_rtmpdump_stream play_rtmpdump_stream(player, url, params) return from .processor.rtmpdump import has_rtmpdump_installed, download_rtmpdump_stream assert has_rtmpdump_installed(), "RTMPDump not installed." download_rtmpdump_stream(url, title, ext,params, output_dir) def download_url_ffmpeg(url,title, ext,params={}, total_size=0, output_dir='.', refer=None, merge=True, faker=False, stream=True): assert url if dry_run: print('Real URL:\n%s\n' % [url]) if params.get("-y",False): #None or unset ->False print('Real Playpath:\n%s\n' % [params.get("-y")]) return if player: launch_player(player, [url]) return from .processor.ffmpeg import has_ffmpeg_installed, ffmpeg_download_stream assert has_ffmpeg_installed(), "FFmpeg not installed." global output_filename if output_filename: dotPos = output_filename.rfind(".") title = output_filename[:dotPos] ext = output_filename[dotPos+1:] title = tr(get_filename(title)) ffmpeg_download_stream(url, title, ext, params, output_dir, stream=stream) def playlist_not_supported(name): def f(*args, **kwargs): raise NotImplementedError('Playlist is not supported for ' + name) return f def print_info(site_info, title, type, size, **kwargs): if json_output: json_output_.print_info(site_info=site_info, title=title, type=type, size=size) return if type: type = type.lower() if type in ['3gp']: type = 'video/3gpp' elif type in ['asf', 'wmv']: type = 'video/x-ms-asf' elif type in ['flv', 'f4v']: type = 'video/x-flv' elif type in ['mkv']: type = 'video/x-matroska' elif type in ['mp3']: type = 'audio/mpeg' elif type in ['mp4']: type = 'video/mp4' elif type in ['mov']: type = 'video/quicktime' elif type in ['ts']: type = 'video/MP2T' elif type in ['webm']: type = 'video/webm' elif type in ['jpg']: type = 'image/jpeg' elif type in ['png']: type = 'image/png' elif type in ['gif']: type = 'image/gif' if type in ['video/3gpp']: type_info = "3GPP multimedia file (%s)" % type elif type in ['video/x-flv', 'video/f4v']: type_info = "Flash video (%s)" % type elif type in ['video/mp4', 'video/x-m4v']: type_info = "MPEG-4 video (%s)" % type elif type in ['video/MP2T']: type_info = "MPEG-2 transport stream (%s)" % type elif type in ['video/webm']: type_info = "WebM video (%s)" % type #elif type in ['video/ogg']: # type_info = "Ogg video (%s)" % type elif type in ['video/quicktime']: type_info = "QuickTime video (%s)" % type elif type in ['video/x-matroska']: type_info = "Matroska video (%s)" % type #elif type in ['video/x-ms-wmv']: # type_info = "Windows Media video (%s)" % type elif type in ['video/x-ms-asf']: type_info = "Advanced Systems Format (%s)" % type #elif type in ['video/mpeg']: # type_info = "MPEG video (%s)" % type elif type in ['audio/mp4', 'audio/m4a']: type_info = "MPEG-4 audio (%s)" % type elif type in ['audio/mpeg']: type_info = "MP3 (%s)" % type elif type in ['audio/wav', 'audio/wave', 'audio/x-wav']: type_info = 'Waveform Audio File Format ({})'.format(type) elif type in ['image/jpeg']: type_info = "JPEG Image (%s)" % type elif type in ['image/png']: type_info = "Portable Network Graphics (%s)" % type elif type in ['image/gif']: type_info = "Graphics Interchange Format (%s)" % type elif type in ['m3u8']: if 'm3u8_type' in kwargs: if kwargs['m3u8_type'] == 'master': type_info = 'M3U8 Master {}'.format(type) else: type_info = 'M3U8 Playlist {}'.format(type) else: type_info = "Unknown type (%s)" % type maybe_print("Site: ", site_info) maybe_print("Title: ", unescape_html(tr(title))) print("Type: ", type_info) if type != 'm3u8': print("Size: ", round(size / 1048576, 2), "MiB (" + str(size) + " Bytes)") if type == 'm3u8' and 'm3u8_url' in kwargs: print('M3U8 Url: {}'.format(kwargs['m3u8_url'])) print() def mime_to_container(mime): mapping = { 'video/3gpp': '3gp', 'video/mp4': 'mp4', 'video/webm': 'webm', 'video/x-flv': 'flv', } if mime in mapping: return mapping[mime] else: return mime.split('/')[1] def parse_host(host): """Parses host name and port number from a string. """ if re.match(r'^(\d+)$', host) is not None: return ("0.0.0.0", int(host)) if re.match(r'^(\w+)://', host) is None: host = "//" + host o = parse.urlparse(host) hostname = o.hostname or "0.0.0.0" port = o.port or 0 return (hostname, port) def set_proxy(proxy): proxy_handler = request.ProxyHandler({ 'http': '%s:%s' % proxy, 'https': '%s:%s' % proxy, }) opener = request.build_opener(proxy_handler) request.install_opener(opener) def unset_proxy(): proxy_handler = request.ProxyHandler({}) opener = request.build_opener(proxy_handler) request.install_opener(opener) # DEPRECATED in favor of set_proxy() and unset_proxy() def set_http_proxy(proxy): if proxy == None: # Use system default setting proxy_support = request.ProxyHandler() elif proxy == '': # Don't use any proxy proxy_support = request.ProxyHandler({}) else: # Use proxy proxy_support = request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy}) opener = request.build_opener(proxy_support) request.install_opener(opener) def print_more_compatible(*args, **kwargs): import builtins as __builtin__ """Overload default print function as py (<3.3) does not support 'flush' keyword. Although the function name can be same as print to get itself overloaded automatically, I'd rather leave it with a different name and only overload it when importing to make less confusion. """ # nothing happens on py3.3 and later if sys.version_info[:2] >= (3, 3): return __builtin__.print(*args, **kwargs) # in lower pyver (e.g. 3.2.x), remove 'flush' keyword and flush it as requested doFlush = kwargs.pop('flush', False) ret = __builtin__.print(*args, **kwargs) if doFlush: kwargs.get('file', sys.stdout).flush() return ret def download_main(download, download_playlist, urls, playlist, **kwargs): for url in urls: if re.match(r'https?://', url) is None: url = 'http://' + url if playlist: download_playlist(url, **kwargs) else: download(url, **kwargs) def load_cookies(cookiefile): global cookies try: cookies = cookiejar.MozillaCookieJar(cookiefile) cookies.load() except Exception: import sqlite3 cookies = cookiejar.MozillaCookieJar() con = sqlite3.connect(cookiefile) cur = con.cursor() try: cur.execute("""SELECT host, path, isSecure, expiry, name, value FROM moz_cookies""") for item in cur.fetchall(): c = cookiejar.Cookie( 0, item[4], item[5], None, False, item[0], item[0].startswith('.'), item[0].startswith('.'), item[1], False, item[2], item[3], item[3]=="", None, None, {}, ) cookies.set_cookie(c) except Exception: pass # TODO: Chromium Cookies # SELECT host_key, path, secure, expires_utc, name, encrypted_value # FROM cookies # http://n8henrie.com/2013/11/use-chromes-cookies-for-easier-downloading-with-python-requests/ def set_socks_proxy(proxy): try: import socks socks_proxy_addrs = proxy.split(':') socks.set_default_proxy(socks.SOCKS5, socks_proxy_addrs[0], int(socks_proxy_addrs[1])) socket.socket = socks.socksocket def getaddrinfo(*args): return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))] socket.getaddrinfo = getaddrinfo except ImportError: log.w('Error importing PySocks library, socks proxy ignored.' 'In order to use use socks proxy, please install PySocks.') def script_main(download, download_playlist, **kwargs): logging.basicConfig(format='[%(levelname)s] %(message)s') def print_version(): log.i('version %s, a tiny downloader that scrapes the web.' % get_version(kwargs['repo_path'] if 'repo_path' in kwargs else __version__)) parser = argparse.ArgumentParser( prog='you-get', usage='you-get [OPTION]... URL...', description='A tiny downloader that scrapes the web', add_help=False, ) parser.add_argument('-V', '--version', action='store_true', help='Print version and exit') parser.add_argument('-h', '--help', action='store_true', help='Print this help message and exit') dry_run_grp = parser.add_argument_group('Dry-run options', '(no actual downloading)') dry_run_grp = dry_run_grp.add_mutually_exclusive_group() dry_run_grp.add_argument('-i', '--info', action='store_true', help='Print extracted information') dry_run_grp.add_argument('-u', '--url', action='store_true', help='Print extracted information with URLs') dry_run_grp.add_argument('--json', action='store_true', help='Print extracted URLs in JSON format') download_grp = parser.add_argument_group('Download options') download_grp.add_argument('-n', '--no-merge', action='store_true', default=False, help='Do not merge video parts') download_grp.add_argument('--no-caption', action='store_true', help='Do not download captions (subtitles, lyrics, danmaku, ...)') download_grp.add_argument('-f', '--force', action='store_true', default=False, help='Force overwriting existing files') download_grp.add_argument('-F', '--format', metavar='STREAM_ID', help='Set video format to STREAM_ID') download_grp.add_argument('-O', '--output-filename', metavar='FILE', help='Set output filename') download_grp.add_argument('-o', '--output-dir', metavar='DIR', default='.', help='Set output directory') download_grp.add_argument('-p', '--player', metavar='PLAYER', help='Stream extracted URL to a PLAYER') download_grp.add_argument('-c', '--cookies', metavar='COOKIES_FILE', help='Load cookies.txt or cookies.sqlite') download_grp.add_argument('-t', '--timeout', metavar='SECONDS', type=int, default=600, help='Set socket timeout') download_grp.add_argument('-d', '--debug', action='store_true', help='Show traceback and other debug info') download_grp.add_argument('-I', '--input-file', metavar='FILE', type=argparse.FileType('r'), help='Read non-playlist URLs from FILE') download_grp.add_argument('-P', '--password', help='Set video visit password to PASSWORD') download_grp.add_argument('-l', '--playlist', action='store_true', help='Prefer to download a playlist') proxy_grp = parser.add_argument_group('Proxy options') proxy_grp = proxy_grp.add_mutually_exclusive_group() proxy_grp.add_argument('-x', '--http-proxy', metavar='HOST:PORT', help='Use an HTTP proxy for downloading') proxy_grp.add_argument('-y', '--extractor-proxy', metavar='HOST:PORT', help='Use an HTTP proxy for extracting only') proxy_grp.add_argument('--no-proxy', action='store_true', help='Never use a proxy') proxy_grp.add_argument('-s', '--socks-proxy', metavar='HOST:PORT', help='Use an SOCKS5 proxy for downloading') download_grp.add_argument('--stream', help=argparse.SUPPRESS) download_grp.add_argument('--itag', help=argparse.SUPPRESS) parser.add_argument('URL', nargs='*', help=argparse.SUPPRESS) args = parser.parse_args() if args.help: print_version() parser.print_help() sys.exit() if args.version: print_version() sys.exit() if args.debug: # Set level of root logger to DEBUG logging.getLogger().setLevel(logging.DEBUG) global force global dry_run global json_output global player global extractor_proxy global output_filename output_filename = args.output_filename extractor_proxy = args.extractor_proxy info_only = args.info if args.url: dry_run = True if args.json: json_output = True # to fix extractors not use VideoExtractor dry_run = True info_only = False if args.cookies: load_cookies(args.cookies) caption = True stream_id = args.format or args.stream or args.itag if args.no_caption: caption = False if args.player: player = args.player caption = False if args.no_proxy: set_http_proxy('') else: set_http_proxy(args.http_proxy) if args.socks_proxy: set_socks_proxy(args.socks_proxy) URLs = [] if args.input_file: logging.debug('you are trying to load urls from %s', args.input_file) if args.playlist: log.e("reading playlist from a file is unsupported and won't make your life easier") sys.exit(2) URLs.extend(args.input_file.read().splitlines()) args.input_file.close() URLs.extend(args.URL) if not URLs: parser.print_help() sys.exit() socket.setdefaulttimeout(args.timeout) try: extra = {} if extractor_proxy: extra['extractor_proxy'] = extractor_proxy if stream_id: extra['stream_id'] = stream_id download_main( download, download_playlist, URLs, args.playlist, output_dir=args.output_dir, merge=not args.no_merge, info_only=info_only, json_output=json_output, caption=caption, password=args.password, **extra ) except KeyboardInterrupt: if args.debug: raise else: sys.exit(1) except UnicodeEncodeError: if args.debug: raise log.e('[error] oops, the current environment does not seem to support Unicode.') log.e('please set it to a UTF-8-aware locale first,') log.e('so as to save the video (with some Unicode characters) correctly.') log.e('you can do it like this:') log.e(' (Windows) % chcp 65001 ') log.e(' (Linux) $ LC_CTYPE=en_US.UTF-8') sys.exit(1) except Exception: if not args.debug: log.e('[error] oops, something went wrong.') log.e('don\'t panic, c\'est la vie. please try the following steps:') log.e(' (1) Rule out any network problem.') log.e(' (2) Make sure you-get is up-to-date.') log.e(' (3) Check if the issue is already known, on') log.e(' https://github.com/soimort/you-get/wiki/Known-Bugs') log.e(' https://github.com/soimort/you-get/issues') log.e(' (4) Run the command with \'--debug\' option,') log.e(' and report this issue with the full output.') else: print_version() log.i(args) raise sys.exit(1) def google_search(url): keywords = r1(r'https?://(.*)', url) url = 'https://www.google.com/search?tbm=vid&q=%s' % parse.quote(keywords) page = get_content(url, headers=fake_headers) videos = re.findall(r'<a href="(https?://[^"]+)" onmousedown="[^"]+">([^<]+)<', page) vdurs = re.findall(r'<span class="vdur _dwc">([^<]+)<', page) durs = [r1(r'(\d+:\d+)', unescape_html(dur)) for dur in vdurs] print("Google Videos search:") for v in zip(videos, durs): print("- video: %s [%s]" % (unescape_html(v[0][1]), v[1] if v[1] else '?')) print("# you-get %s" % log.sprint(v[0][0], log.UNDERLINE)) print() print("Best matched result:") return(videos[0][0]) def url_to_module(url): try: video_host = r1(r'https?://([^/]+)/', url) video_url = r1(r'https?://[^/]+(.*)', url) assert video_host and video_url except AssertionError: url = google_search(url) video_host = r1(r'https?://([^/]+)/', url) video_url = r1(r'https?://[^/]+(.*)', url) if video_host.endswith('.com.cn') or video_host.endswith('.ac.cn'): video_host = video_host[:-3] domain = r1(r'(\.[^.]+\.[^.]+)$', video_host) or video_host assert domain, 'unsupported url: ' + url k = r1(r'([^.]+)', domain) if k in SITES: return import_module('.'.join(['you_get', 'extractors', SITES[k]])), url else: import http.client video_host = r1(r'https?://([^/]+)/', url) # .cn could be removed if url.startswith('https://'): conn = http.client.HTTPSConnection(video_host) else: conn = http.client.HTTPConnection(video_host) conn.request("HEAD", video_url, headers=fake_headers) res = conn.getresponse() location = res.getheader('location') if location and location != url and not location.startswith('/'): return url_to_module(location) else: return import_module('you_get.extractors.universal'), url def any_download(url, **kwargs): m, url = url_to_module(url) m.download(url, **kwargs) def any_download_playlist(url, **kwargs): m, url = url_to_module(url) m.download_playlist(url, **kwargs) def main(**kwargs): script_main(any_download, any_download_playlist, **kwargs)
zmwangx/you-get
src/you_get/common.py
Python
mit
46,899
[ "VisIt" ]
94dd5d8ab223a51dbc8c2c1b8b0d533dcfa7f3ca29ab26869a51e1fffc7d239a
"""Utilities for building response surface approximations.""" import numpy as np from scipy.optimize import fminbound from scipy.misc import comb from misc import process_inputs, process_inputs_outputs class ResponseSurface(): """An abstract class for response surfaces. Attributes ---------- N : int maximum degree of global polynomial in the response surface Rsqr : float the R-squared coefficient for the response surface X : ndarray an ndarray of training points for the response surface. The shape is M-by-m, where m is the number of dimensions. f : ndarray an ndarray of function values used to train the response surface. The shape of `f` is M-by-1. See Also -------- utils.response_surfaces.PolynomialApproximation utils.response_surfaces.RadialBasisApproximation """ N = None Rsqr = None X, f = None, None def __init__(self, N=2): self.N = N def train(self, X, f): raise NotImplementedError() def predict(self, X, compgrad=False): raise NotImplementedError() def gradient(self, X): return self.predict(X, compgrad=True)[1] def __call__(self, X): return self.predict(X)[0] class PolynomialApproximation(ResponseSurface): """Least-squares-fit, global, multivariate polynomial approximation. Attributes ---------- poly_weights : ndarray an ndarray of coefficients for the polynomial approximation in the monomial basis g : ndarray contains the m coefficients corresponding to the degree 1 monomials in the polynomial approximation H : ndarray an ndarray of shape m-by-m that contains the coefficients of the degree 2 monomials in the approximation See Also -------- utils.response_surfaces.RadialBasisApproximation Notes ----- All attributes besides the degree `N` are set when the class's `train` method is called. """ poly_weights = None g, H = None, None def train(self, X, f, weights=None): """Train the least-squares-fit polynomial approximation. Parameters ---------- X : ndarray an ndarray of training points for the polynomial approximation. The shape is M-by-m, where m is the number of dimensions. f : ndarray an ndarray of function values used to train the polynomial approximation. The shape of `f` is M-by-1. weights : ndarray, optional an ndarray of weights for the least-squares. (default is None, which means uniform weights) Notes ----- This method sets all the attributes of the class for use in the `predict` method. """ X, f, M, m = process_inputs_outputs(X, f) # check that there are enough points to train the polynomial if M < comb(self.N + m, m): raise Exception('Not enough points to fit response surface of order {:d}'.format(self.N)) B, indices = polynomial_bases(X, self.N) p = B.shape[1] if weights is not None: B, f = weights*B, weights*f poly_weights = np.linalg.lstsq(B, f)[0] Rsqr = 1.0 - ( np.linalg.norm(np.dot(B, poly_weights) - f)**2 / (M*np.var(f)) ) # store data self.X, self.f = X, f self.poly_weights = poly_weights.reshape((p,1)) self.Rsqr = Rsqr # organize linear and quadratic coefficients self.g = poly_weights[1:m+1].copy().reshape((m,1)) if self.N > 1: H = np.zeros((m, m)) for i in range(m+1, int(m+1+comb(m+1,2))): ind = indices[i,:] loc = np.nonzero(ind!=0)[0] if loc.size==1: H[loc,loc] = 2.0*poly_weights[i] elif loc.size==2: H[loc[0],loc[1]] = poly_weights[i] H[loc[1],loc[0]] = poly_weights[i] else: raise Exception('Error creating quadratic coefficients.') self.H = H def predict(self, X, compgrad=False): """Evaluate least-squares-fit polynomial approximation at new points. Parameters ---------- X : ndarray an ndarray of points to evaluate the polynomial approximation. The shape is M-by-m, where m is the number of dimensions. compgrad : bool, optional a flag to decide whether or not to compute the gradient of the polynomial approximation at the points `X`. (default False) Returns ------- f : ndarray an ndarray of predictions from the polynomial approximation. The shape of `f` is M-by-1. df : ndarray an ndarray of gradient predictions from the polynomial approximation. The shape of `df` is M-by-m. """ X, M, m = process_inputs(X) B = polynomial_bases(X, self.N)[0] f = np.dot(B, self.poly_weights).reshape((M, 1)) if compgrad: dB = grad_polynomial_bases(X, self.N) df = np.zeros((M, m)) for i in range(m): df[:,i] = np.dot(dB[:,:,i], self.poly_weights).reshape((M)) df = df.reshape((M, m)) else: df = None return f, df class RadialBasisApproximation(ResponseSurface): """Approximate a multivariate function with a radial basis. A class for global, multivariate radial basis approximation with anisotropic squared-exponential radial basis and a weighted-least-squares-fit monomial basis. Attributes ---------- radial_weights : ndarray an ndarray of coefficients radial basis functions in the model poly_weights : poly_weights an ndarray of coefficients for the polynomial approximation in the monomial basis K : ndarray an ndarray of shape M-by-M that contains the matrix of radial basis functions evaluated at the training points ell : ndarray an ndarray of shape m-by-1 that contains the characteristic length scales along each of the inputs See Also -------- utils.response_surfaces.PolynomialApproximation Notes ----- All attributes besides the degree `N` are set when the class's `train` method is called. """ K, ell = None, None radial_weights, poly_weights = None, None def train(self, X, f, v=None, e=None): """Train the radial basis approximation. Parameters ---------- X : ndarray an ndarray of training points for the polynomial approximation. The shape is M-by-m, where m is the number of dimensions. f : ndarray an ndarray of function values used to train the polynomial approximation. The shape of `f` is M-by-1. v : ndarray, optional contains the regularization parameters that model error in the function values (default None) e : ndarray, optional an ndarray containing the eigenvalues from the active subspace analysis. If present, the radial basis uses it to determine the appropriate anisotropy in the length scales. (default None) Notes ----- The approximation uses an multivariate, squared exponential radial basis. If `e` is not None, then the radial basis is anisotropic with length scales determined by `e`. Otherwise, the basis is isotropic. The length scale parameters (i.e., the rbf shape parameters) are determined with a maximum likelihood heuristic inspired by techniques for fitting a Gaussian process model. The approximation also includes a monomial basis with monomials of total degree up to order `N`. These are fit with weighted least-squares, where the weight matrix is the inverse of the matrix of radial basis functions evaluated at the training points. This method sets all the attributes of the class for use in the `predict` method. """ X, f, M, m = process_inputs_outputs(X, f) # check that there are enough points to train the polynomial if M < comb(self.N + m, m): raise Exception('Not enough points to fit response surface of order {:d}'.format(self.N)) # use maximum likelihood to tune parameters log10g = fminbound(_rbf_objective, -10.0, 1.0, args=(X, f, v, self.N, e, )) g = 10**(log10g) if e is None: ell = g*np.ones((m,1)) if v is None: v = 1e-6*np.ones(f.shape) else: ell = g*np.sum(e)/e[:m] if v is None: v = g*np.sum(e[m:])*np.ones(f.shape) # ensure conditioning v = np.amax([v.reshape(f.shape), 1e-6*np.ones(f.shape)], axis=0) # covariance matrix of observations K = exponential_squared(X, X, 1.0, ell) K += np.diag(v.reshape((M,))) B = polynomial_bases(X, self.N)[0] p = B.shape[1] C = np.hstack(( np.vstack(( K, B.T )), np.vstack(( B, np.zeros((p, p)) )) )) weights = np.linalg.solve(C, np.vstack(( f, np.zeros((p, 1)) )) ) radial_weights, poly_weights = weights[:M], weights[M:] res = f - np.dot(B, poly_weights) Rsqr = 1.0 - (np.dot( res.T, np.linalg.solve(K, res)) / np.dot( f.T, np.linalg.solve(K, f) )) # store parameters self.X, self.f = X, f self.ell, self.K = ell, K self.Rsqr = Rsqr[0,0] self.radial_weights, self.poly_weights = radial_weights, poly_weights def predict(self, X, compgrad=False): """Evaluate the radial basis approximation at new points. Parameters ---------- X : ndarray an ndarray of points to evaluate the polynomial approximation. The shape is M-by-m, where m is the number of dimensions. compgrad : bool, optional a flag to decide whether or not to compute the gradient of the polynomial approximation at the points `X`. (default False) Returns ------- f : ndarray an ndarray of predictions from the polynomial approximation. The shape of `f` is M-by-1. df : ndarray an ndarray of gradient predictions from the polynomial approximation. The shape of `df` is M-by-m. Notes ----- I'll tell you what. I just refactored this code to use terminology from radial basis functions instead of Gaussian processes, and I feel so much better about it. Now I don't have to compute that silly prediction variance and try to pretend that it has anything to do with the actual error in the approximation. Also, computing that variance requires another system solve, which might be expensive. So it's both expensive and of dubious value. So I got rid of it. Sorry, Gaussian processes. """ X, M, m = process_inputs(X) # K = exponential_squared(X, self.X, 1.0, self.ell) B = polynomial_bases(X, self.N)[0] f = np.dot(K, self.radial_weights) + np.dot(B, self.poly_weights) f = f.reshape((M, 1)) if compgrad: dK = grad_exponential_squared(self.X, X, 1.0, self.ell) dB = grad_polynomial_bases(X, self.N) df = np.zeros((M, m)) for i in range(m): df[:,i] = (np.dot(dK[:,:,i].T, self.radial_weights) + \ np.dot(dB[:,:,i], self.poly_weights)).reshape((M, )) df = df.reshape((M, m)) else: df = None return f, df def _rbf_objective(log10g, X, f, v, N, e): """Objective function for choosing the RBF shape parameters. Parameters ---------- log10g : float the log of the scaling factor for the rbf shape parameters X : ndarray the ndarray of training points f : ndarray the ndarray of training data v : ndarray contains the regularization parameters for the training data N : int the order of polynomial approximation e : ndarray contains the eigenvalues from the active subspace analysis Returns ------- r : float objective function value. If you were training a Gaussian process, it would be the negative log likelihood. In this context, it's just a heuristic. """ # TODO: I can probably make this implementation more efficient, but as of # now, I don't need to. g = 10**(log10g) M, m = X.shape if e is None: ell = g*np.ones((m,1)) if v is None: v = 1e-6*np.ones(f.shape) else: ell = g*np.sum(e)/e[:m] if v is None: v = g*np.sum(e[m:])*np.ones(f.shape) # covariance matrix K = exponential_squared(X, X, 1.0, ell) K += np.diag(v.reshape((M,))) L = np.linalg.cholesky(K) # polynomial basis B = polynomial_bases(X, N)[0] A = np.dot(B.T, np.linalg.solve(K, B)) z = np.dot(B.T, np.linalg.solve(K, f)) beta = np.linalg.solve(A, z) # residual res = f - np.dot(B, beta) # variance sig2 = np.max([np.dot(res.T, np.linalg.solve(K, res))/M, 5*np.finfo(float).eps]) r = np.sum(np.log(np.diag(L))) + M*np.log(sig2) return r def exponential_squared(X1, X2, sigma, ell): """Compute the matrix of radial basis functions. Parameters ---------- X1 : ndarray contains the centers of the radial functions X2 : ndarray the evaluation points of the radial functions sigma : float scales the radial functions ell : ndarray contains the length scales of each dimension Returns ------- C : ndarray the matrix of radial functions centered at `X1` and evaluated at `X2`. The shape of `C` is `X1.shape[0]`-by-`X2.shape[0]`. """ m = X1.shape[0] n = X2.shape[0] c = -1.0 / ell.flatten() C = np.zeros((m, n)) for i in range(n): x2 = X2[i,:] B = X1 - x2 C[:,i] = sigma*np.exp(np.dot(B*B, c)) return C def grad_exponential_squared(X1, X2, sigma, ell): """Compute the matrices of radial basis function gradients. Parameters ---------- X1 : ndarray contains the centers of the radial functions X2 : ndarray the evaluation points of the radial functions sigma : float scales the radial functions ell : ndarray contains the length scales of each dimension Returns ------- dC : ndarray the matrix of radial function gradients centered at `X1` and evaluated at `X2`. The shape of `dC` is `X1.shape[0]`-by-`X2.shape[0]`-by-m. `dC` is a three-dimensional ndarray. The third dimension indexes the partial derivatives in each gradient. """ m, d = X1.shape n = X2.shape[0] c = -1.0 / ell.flatten() C = np.zeros((m, n, d)) for k in range(d): for i in range(n): x2 = X2[i,:] B = X1 - x2 C[:,i,k] = sigma*(-2.0*c[k]*B[:,k])*np.exp(np.dot(B*B, c)) return C def polynomial_bases(X, N): """Compute the monomial bases. Parameters ---------- X : ndarray contains the points to evaluate the monomials N : int the maximum degree of the monomial basis Returns ------- B : ndarray contains the monomial evaluations I : ndarray contains the multi-indices that tell the degree of each univariate monomial term in the multivariate monomial """ M, m = X.shape I = index_set(N, m) n = I.shape[0] B = np.zeros((M, n)) for i in range(n): ind = I[i,:] B[:,i] = np.prod(np.power(X, ind), axis=1) return B, I def grad_polynomial_bases(X, N): """ Compute the gradients of the monomial bases. Parameters ---------- X : ndarray contains the points to evaluate the monomials N : int the maximum degree of the monomial basis Returns ------- dB : ndarray contains the gradients of the monomials evaluate at `X`. `dB` is a three-dimensional ndarray. The third dimension indexes the partial derivatives in each gradient. """ M, m = X.shape I = index_set(N, m) n = I.shape[0] B = np.zeros((M, n, m)) for k in range(m): for i in range(n): ind = I[i,:].copy() indk = ind[k] if indk==0: B[:,i,k] = np.zeros(M) else: ind[k] -= 1 B[:,i,k] = indk*np.prod(np.power(X, ind), axis=1) return B def _full_index_set(n, d): """ A helper function for index_set. """ if d == 1: I = np.array([[n]]) else: II = _full_index_set(n, d-1) m = II.shape[0] I = np.hstack((np.zeros((m, 1)), II)) for i in range(1, n+1): II = _full_index_set(n-i, d-1) m = II.shape[0] T = np.hstack((i*np.ones((m, 1)), II)) I = np.vstack((I, T)) return I def index_set(n, d): """Enumerate multi-indices for a total degree of order `n` in `d` variables. Parameters ---------- n : int degree of polynomial d : int number of variables, dimension Returns ------- I : ndarray multi-indices ordered as columns """ I = np.zeros((1, d)) for i in range(1, n+1): II = _full_index_set(i, d) I = np.vstack((I, II)) return I[:,::-1]
paulcon/active_subspaces
active_subspaces/utils/response_surfaces.py
Python
mit
17,856
[ "Gaussian" ]
c2c10678ebe87d1f18f5ff9d2b95fa0cffb634b66f9d2b68e37b3b1d3ee2c340
import unittest from test import support from itertools import * from weakref import proxy from decimal import Decimal from fractions import Fraction import sys import operator import random import copy import pickle from functools import reduce maxsize = support.MAX_Py_ssize_t minsize = -maxsize-1 def lzip(*args): return list(zip(*args)) def onearg(x): 'Test function of one argument' return 2*x def errfunc(*args): 'Test function that raises an error' raise ValueError def gen3(): 'Non-restartable source sequence' for i in (0, 1, 2): yield i def isEven(x): 'Test predicate' return x%2==0 def isOdd(x): 'Test predicate' return x%2==1 class StopNow: 'Class emulating an empty iterable.' def __iter__(self): return self def __next__(self): raise StopIteration def take(n, seq): 'Convenience function for partially consuming a long of infinite iterable' return list(islice(seq, n)) def prod(iterable): return reduce(operator.mul, iterable, 1) def fact(n): 'Factorial' return prod(range(1, n+1)) class TestBasicOps(unittest.TestCase): def test_accumulate(self): self.assertEqual(list(accumulate(range(10))), # one positional arg [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) self.assertEqual(list(accumulate(iterable=range(10))), # kw arg [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]) for typ in int, complex, Decimal, Fraction: # multiple types self.assertEqual( list(accumulate(map(typ, range(10)))), list(map(typ, [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]))) self.assertEqual(list(accumulate('abc')), ['a', 'ab', 'abc']) # works with non-numeric self.assertEqual(list(accumulate([])), []) # empty iterable self.assertEqual(list(accumulate([7])), [7]) # iterable of length one self.assertRaises(TypeError, accumulate, range(10), 5) # too many args self.assertRaises(TypeError, accumulate) # too few args self.assertRaises(TypeError, accumulate, x=range(10)) # unexpected kwd arg self.assertRaises(TypeError, list, accumulate([1, []])) # args that don't add def test_chain(self): def chain2(*iterables): 'Pure python version in the docs' for it in iterables: for element in it: yield element for c in (chain, chain2): self.assertEqual(list(c('abc', 'def')), list('abcdef')) self.assertEqual(list(c('abc')), list('abc')) self.assertEqual(list(c('')), []) self.assertEqual(take(4, c('abc', 'def')), list('abcd')) self.assertRaises(TypeError, list,c(2, 3)) def test_chain_from_iterable(self): self.assertEqual(list(chain.from_iterable(['abc', 'def'])), list('abcdef')) self.assertEqual(list(chain.from_iterable(['abc'])), list('abc')) self.assertEqual(list(chain.from_iterable([''])), []) self.assertEqual(take(4, chain.from_iterable(['abc', 'def'])), list('abcd')) self.assertRaises(TypeError, list, chain.from_iterable([2, 3])) def test_combinations(self): self.assertRaises(TypeError, combinations, 'abc') # missing r argument self.assertRaises(TypeError, combinations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, combinations, None) # pool is not iterable self.assertRaises(ValueError, combinations, 'abc', -2) # r is negative self.assertEqual(list(combinations('abc', 32)), []) # r > n self.assertEqual(list(combinations(range(4), 3)), [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) def combinations1(iterable, r): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) if r > n: return indices = list(range(r)) yield tuple(pool[i] for i in indices) while 1: for i in reversed(range(r)): if indices[i] != i + n - r: break else: return indices[i] += 1 for j in range(i+1, r): indices[j] = indices[j-1] + 1 yield tuple(pool[i] for i in indices) def combinations2(iterable, r): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) for indices in permutations(range(n), r): if sorted(indices) == list(indices): yield tuple(pool[i] for i in indices) def combinations3(iterable, r): 'Pure python version from cwr()' pool = tuple(iterable) n = len(pool) for indices in combinations_with_replacement(range(n), r): if len(set(indices)) == r: yield tuple(pool[i] for i in indices) for n in range(7): values = [5*x-12 for x in range(n)] for r in range(n+2): result = list(combinations(values, r)) self.assertEqual(len(result), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # right number of combs self.assertEqual(len(result), len(set(result))) # no repeats self.assertEqual(result, sorted(result)) # lexicographic order for c in result: self.assertEqual(len(c), r) # r-length combinations self.assertEqual(len(set(c)), r) # no duplicate elements self.assertEqual(list(c), sorted(c)) # keep original ordering self.assertTrue(all(e in values for e in c)) # elements taken from input iterable self.assertEqual(list(c), [e for e in values if e in c]) # comb is a subsequence of the input iterable self.assertEqual(result, list(combinations1(values, r))) # matches first pure python version self.assertEqual(result, list(combinations2(values, r))) # matches second pure python version self.assertEqual(result, list(combinations3(values, r))) # matches second pure python version # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, combinations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(combinations('abcde', 3))))), 1) def test_combinations_with_replacement(self): cwr = combinations_with_replacement self.assertRaises(TypeError, cwr, 'abc') # missing r argument self.assertRaises(TypeError, cwr, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, cwr, None) # pool is not iterable self.assertRaises(ValueError, cwr, 'abc', -2) # r is negative self.assertEqual(list(cwr('ABC', 2)), [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) def cwr1(iterable, r): 'Pure python version shown in the docs' # number items returned: (n+r-1)! / r! / (n-1)! when n>0 pool = tuple(iterable) n = len(pool) if not n and r: return indices = [0] * r yield tuple(pool[i] for i in indices) while 1: for i in reversed(range(r)): if indices[i] != n - 1: break else: return indices[i:] = [indices[i] + 1] * (r - i) yield tuple(pool[i] for i in indices) def cwr2(iterable, r): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) for indices in product(range(n), repeat=r): if sorted(indices) == list(indices): yield tuple(pool[i] for i in indices) def numcombs(n, r): if not n: return 0 if r else 1 return fact(n+r-1) / fact(r)/ fact(n-1) for n in range(7): values = [5*x-12 for x in range(n)] for r in range(n+2): result = list(cwr(values, r)) self.assertEqual(len(result), numcombs(n, r)) # right number of combs self.assertEqual(len(result), len(set(result))) # no repeats self.assertEqual(result, sorted(result)) # lexicographic order regular_combs = list(combinations(values, r)) # compare to combs without replacement if n == 0 or r <= 1: self.assertEqual(result, regular_combs) # cases that should be identical else: self.assertTrue(set(result) >= set(regular_combs)) # rest should be supersets of regular combs for c in result: self.assertEqual(len(c), r) # r-length combinations noruns = [k for k,v in groupby(c)] # combo without consecutive repeats self.assertEqual(len(noruns), len(set(noruns))) # no repeats other than consecutive self.assertEqual(list(c), sorted(c)) # keep original ordering self.assertTrue(all(e in values for e in c)) # elements taken from input iterable self.assertEqual(noruns, [e for e in values if e in c]) # comb is a subsequence of the input iterable self.assertEqual(result, list(cwr1(values, r))) # matches first pure python version self.assertEqual(result, list(cwr2(values, r))) # matches second pure python version # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, cwr('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(cwr('abcde', 3))))), 1) def test_permutations(self): self.assertRaises(TypeError, permutations) # too few arguments self.assertRaises(TypeError, permutations, 'abc', 2, 1) # too many arguments self.assertRaises(TypeError, permutations, None) # pool is not iterable self.assertRaises(ValueError, permutations, 'abc', -2) # r is negative self.assertEqual(list(permutations('abc', 32)), []) # r > n self.assertRaises(TypeError, permutations, 'abc', 's') # r is not an int or None self.assertEqual(list(permutations(range(3), 2)), [(0,1), (0,2), (1,0), (1,2), (2,0), (2,1)]) def permutations1(iterable, r=None): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) r = n if r is None else r if r > n: return indices = list(range(n)) cycles = list(range(n-r+1, n+1))[::-1] yield tuple(pool[i] for i in indices[:r]) while n: for i in reversed(range(r)): cycles[i] -= 1 if cycles[i] == 0: indices[i:] = indices[i+1:] + indices[i:i+1] cycles[i] = n - i else: j = cycles[i] indices[i], indices[-j] = indices[-j], indices[i] yield tuple(pool[i] for i in indices[:r]) break else: return def permutations2(iterable, r=None): 'Pure python version shown in the docs' pool = tuple(iterable) n = len(pool) r = n if r is None else r for indices in product(range(n), repeat=r): if len(set(indices)) == r: yield tuple(pool[i] for i in indices) for n in range(7): values = [5*x-12 for x in range(n)] for r in range(n+2): result = list(permutations(values, r)) self.assertEqual(len(result), 0 if r>n else fact(n) / fact(n-r)) # right number of perms self.assertEqual(len(result), len(set(result))) # no repeats self.assertEqual(result, sorted(result)) # lexicographic order for p in result: self.assertEqual(len(p), r) # r-length permutations self.assertEqual(len(set(p)), r) # no duplicate elements self.assertTrue(all(e in values for e in p)) # elements taken from input iterable self.assertEqual(result, list(permutations1(values, r))) # matches first pure python version self.assertEqual(result, list(permutations2(values, r))) # matches second pure python version if r == n: self.assertEqual(result, list(permutations(values, None))) # test r as None self.assertEqual(result, list(permutations(values))) # test default r # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, permutations('abcde', 3)))), 1) self.assertNotEqual(len(set(map(id, list(permutations('abcde', 3))))), 1) def test_combinatorics(self): # Test relationships between product(), permutations(), # combinations() and combinations_with_replacement(). for n in range(6): s = 'ABCDEFG'[:n] for r in range(8): prod = list(product(s, repeat=r)) cwr = list(combinations_with_replacement(s, r)) perm = list(permutations(s, r)) comb = list(combinations(s, r)) # Check size self.assertEqual(len(prod), n**r) self.assertEqual(len(cwr), (fact(n+r-1) / fact(r)/ fact(n-1)) if n else (not r)) self.assertEqual(len(perm), 0 if r>n else fact(n) / fact(n-r)) self.assertEqual(len(comb), 0 if r>n else fact(n) / fact(r) / fact(n-r)) # Check lexicographic order without repeated tuples self.assertEqual(prod, sorted(set(prod))) self.assertEqual(cwr, sorted(set(cwr))) self.assertEqual(perm, sorted(set(perm))) self.assertEqual(comb, sorted(set(comb))) # Check interrelationships self.assertEqual(cwr, [t for t in prod if sorted(t)==list(t)]) # cwr: prods which are sorted self.assertEqual(perm, [t for t in prod if len(set(t))==r]) # perm: prods with no dups self.assertEqual(comb, [t for t in perm if sorted(t)==list(t)]) # comb: perms that are sorted self.assertEqual(comb, [t for t in cwr if len(set(t))==r]) # comb: cwrs without dups self.assertEqual(comb, list(filter(set(cwr).__contains__, perm))) # comb: perm that is a cwr self.assertEqual(comb, list(filter(set(perm).__contains__, cwr))) # comb: cwr that is a perm self.assertEqual(comb, sorted(set(cwr) & set(perm))) # comb: both a cwr and a perm def test_compress(self): self.assertEqual(list(compress(data='ABCDEF', selectors=[1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) self.assertEqual(list(compress('ABCDEF', [0,0,0,0,0,0])), list('')) self.assertEqual(list(compress('ABCDEF', [1,1,1,1,1,1])), list('ABCDEF')) self.assertEqual(list(compress('ABCDEF', [1,0,1])), list('AC')) self.assertEqual(list(compress('ABC', [0,1,1,1,1,1])), list('BC')) n = 10000 data = chain.from_iterable(repeat(range(6), n)) selectors = chain.from_iterable(repeat((0, 1))) self.assertEqual(list(compress(data, selectors)), [1,3,5] * n) self.assertRaises(TypeError, compress, None, range(6)) # 1st arg not iterable self.assertRaises(TypeError, compress, range(6), None) # 2nd arg not iterable self.assertRaises(TypeError, compress, range(6)) # too few args self.assertRaises(TypeError, compress, range(6), None) # too many args def test_count(self): self.assertEqual(lzip('abc',count()), [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(lzip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)]) self.assertEqual(take(2, lzip('abc',count(3))), [('a', 3), ('b', 4)]) self.assertEqual(take(2, zip('abc',count(-1))), [('a', -1), ('b', 0)]) self.assertEqual(take(2, zip('abc',count(-3))), [('a', -3), ('b', -2)]) self.assertRaises(TypeError, count, 2, 3, 4) self.assertRaises(TypeError, count, 'a') self.assertEqual(list(islice(count(maxsize-5), 10)), list(range(maxsize-5, maxsize+5))) self.assertEqual(list(islice(count(-maxsize-5), 10)), list(range(-maxsize-5, -maxsize+5))) c = count(3) self.assertEqual(repr(c), 'count(3)') next(c) self.assertEqual(repr(c), 'count(4)') c = count(-9) self.assertEqual(repr(c), 'count(-9)') next(c) self.assertEqual(repr(count(10.25)), 'count(10.25)') self.assertEqual(next(c), -8) for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5): # Test repr (ignoring the L in longs) r1 = repr(count(i)).replace('L', '') r2 = 'count(%r)'.__mod__(i).replace('L', '') self.assertEqual(r1, r2) # check copy, deepcopy, pickle for value in -3, 3, maxsize-5, maxsize+5: c = count(value) self.assertEqual(next(copy.copy(c)), value) self.assertEqual(next(copy.deepcopy(c)), value) self.assertEqual(next(pickle.loads(pickle.dumps(c))), value) def test_count_with_stride(self): self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(lzip('abc',count(start=2,step=3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(lzip('abc',count(step=-1)), [('a', 0), ('b', -1), ('c', -2)]) self.assertEqual(lzip('abc',count(2,0)), [('a', 2), ('b', 2), ('c', 2)]) self.assertEqual(lzip('abc',count(2,1)), [('a', 2), ('b', 3), ('c', 4)]) self.assertEqual(lzip('abc',count(2,3)), [('a', 2), ('b', 5), ('c', 8)]) self.assertEqual(take(20, count(maxsize-15, 3)), take(20, range(maxsize-15, maxsize+100, 3))) self.assertEqual(take(20, count(-maxsize-15, 3)), take(20, range(-maxsize-15,-maxsize+100, 3))) self.assertEqual(take(3, count(2, 3.25-4j)), [2, 5.25-4j, 8.5-8j]) self.assertEqual(take(3, count(Decimal('1.1'), Decimal('.1'))), [Decimal('1.1'), Decimal('1.2'), Decimal('1.3')]) self.assertEqual(take(3, count(Fraction(2,3), Fraction(1,7))), [Fraction(2,3), Fraction(17,21), Fraction(20,21)]) self.assertEqual(repr(take(3, count(10, 2.5))), repr([10, 12.5, 15.0])) c = count(3, 5) self.assertEqual(repr(c), 'count(3, 5)') next(c) self.assertEqual(repr(c), 'count(8, 5)') c = count(-9, 0) self.assertEqual(repr(c), 'count(-9, 0)') next(c) self.assertEqual(repr(c), 'count(-9, 0)') c = count(-9, -3) self.assertEqual(repr(c), 'count(-9, -3)') next(c) self.assertEqual(repr(c), 'count(-12, -3)') self.assertEqual(repr(c), 'count(-12, -3)') self.assertEqual(repr(count(10.5, 1.25)), 'count(10.5, 1.25)') self.assertEqual(repr(count(10.5, 1)), 'count(10.5)') # suppress step=1 when it's an int self.assertEqual(repr(count(10.5, 1.00)), 'count(10.5, 1.0)') # do show float values lilke 1.0 for i in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 10, sys.maxsize-5, sys.maxsize+5): for j in (-sys.maxsize-5, -sys.maxsize+5 ,-10, -1, 0, 1, 10, sys.maxsize-5, sys.maxsize+5): # Test repr (ignoring the L in longs) r1 = repr(count(i, j)).replace('L', '') if j == 1: r2 = ('count(%r)' % i).replace('L', '') else: r2 = ('count(%r, %r)' % (i, j)).replace('L', '') self.assertEqual(r1, r2) def test_cycle(self): self.assertEqual(take(10, cycle('abc')), list('abcabcabca')) self.assertEqual(list(cycle('')), []) self.assertRaises(TypeError, cycle) self.assertRaises(TypeError, cycle, 5) self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0]) def test_groupby(self): # Check whether it accepts arguments correctly self.assertEqual([], list(groupby([]))) self.assertEqual([], list(groupby([], key=id))) self.assertRaises(TypeError, list, groupby('abc', [])) self.assertRaises(TypeError, groupby, None) self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10) # Check normal input s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22), (2,15,22), (3,16,23), (3,17,23)] dup = [] for k, g in groupby(s, lambda r:r[0]): for elem in g: self.assertEqual(k, elem[0]) dup.append(elem) self.assertEqual(s, dup) # Check nested case dup = [] for k, g in groupby(s, lambda r:r[0]): for ik, ig in groupby(g, lambda r:r[2]): for elem in ig: self.assertEqual(k, elem[0]) self.assertEqual(ik, elem[2]) dup.append(elem) self.assertEqual(s, dup) # Check case where inner iterator is not used keys = [k for k, g in groupby(s, lambda r:r[0])] expectedkeys = set([r[0] for r in s]) self.assertEqual(set(keys), expectedkeys) self.assertEqual(len(keys), len(expectedkeys)) # Exercise pipes and filters style s = 'abracadabra' # sort s | uniq r = [k for k, g in groupby(sorted(s))] self.assertEqual(r, ['a', 'b', 'c', 'd', 'r']) # sort s | uniq -d r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))] self.assertEqual(r, ['a', 'b', 'r']) # sort s | uniq -c r = [(len(list(g)), k) for k, g in groupby(sorted(s))] self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')]) # sort s | uniq -c | sort -rn | head -3 r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3] self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')]) # iter.__next__ failure class ExpectedError(Exception): pass def delayed_raise(n=0): for i in range(n): yield 'yo' raise ExpectedError def gulp(iterable, keyp=None, func=list): return [func(g) for k, g in groupby(iterable, keyp)] # iter.__next__ failure on outer object self.assertRaises(ExpectedError, gulp, delayed_raise(0)) # iter.__next__ failure on inner object self.assertRaises(ExpectedError, gulp, delayed_raise(1)) # __cmp__ failure class DummyCmp: def __eq__(self, dst): raise ExpectedError s = [DummyCmp(), DummyCmp(), None] # __eq__ failure on outer object self.assertRaises(ExpectedError, gulp, s, func=id) # __eq__ failure on inner object self.assertRaises(ExpectedError, gulp, s) # keyfunc failure def keyfunc(obj): if keyfunc.skip > 0: keyfunc.skip -= 1 return obj else: raise ExpectedError # keyfunc failure on outer object keyfunc.skip = 0 self.assertRaises(ExpectedError, gulp, [None], keyfunc) keyfunc.skip = 1 self.assertRaises(ExpectedError, gulp, [None, None], keyfunc) def test_filter(self): self.assertEqual(list(filter(isEven, range(6))), [0,2,4]) self.assertEqual(list(filter(None, [0,1,0,2,0])), [1,2]) self.assertEqual(list(filter(bool, [0,1,0,2,0])), [1,2]) self.assertEqual(take(4, filter(isEven, count())), [0,2,4,6]) self.assertRaises(TypeError, filter) self.assertRaises(TypeError, filter, lambda x:x) self.assertRaises(TypeError, filter, lambda x:x, range(6), 7) self.assertRaises(TypeError, filter, isEven, 3) self.assertRaises(TypeError, next, filter(range(6), range(6))) def test_filterfalse(self): self.assertEqual(list(filterfalse(isEven, range(6))), [1,3,5]) self.assertEqual(list(filterfalse(None, [0,1,0,2,0])), [0,0,0]) self.assertEqual(list(filterfalse(bool, [0,1,0,2,0])), [0,0,0]) self.assertEqual(take(4, filterfalse(isEven, count())), [1,3,5,7]) self.assertRaises(TypeError, filterfalse) self.assertRaises(TypeError, filterfalse, lambda x:x) self.assertRaises(TypeError, filterfalse, lambda x:x, range(6), 7) self.assertRaises(TypeError, filterfalse, isEven, 3) self.assertRaises(TypeError, next, filterfalse(range(6), range(6))) def test_zip(self): # XXX This is rather silly now that builtin zip() calls zip()... ans = [(x,y) for x, y in zip('abc',count())] self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)]) self.assertEqual(list(zip('abc', range(6))), lzip('abc', range(6))) self.assertEqual(list(zip('abcdef', range(3))), lzip('abcdef', range(3))) self.assertEqual(take(3,zip('abcdef', count())), lzip('abcdef', range(3))) self.assertEqual(list(zip('abcdef')), lzip('abcdef')) self.assertEqual(list(zip()), lzip()) self.assertRaises(TypeError, zip, 3) self.assertRaises(TypeError, zip, range(3), 3) # Check tuple re-use (implementation detail) self.assertEqual([tuple(list(pair)) for pair in zip('abc', 'def')], lzip('abc', 'def')) self.assertEqual([pair for pair in zip('abc', 'def')], lzip('abc', 'def')) ids = list(map(id, zip('abc', 'def'))) self.assertEqual(min(ids), max(ids)) ids = list(map(id, list(zip('abc', 'def')))) self.assertEqual(len(dict.fromkeys(ids)), len(ids)) def test_ziplongest(self): for args in [ ['abc', range(6)], [range(6), 'abc'], [range(1000), range(2000,2100), range(3000,3050)], [range(1000), range(0), range(3000,3050), range(1200), range(1500)], [range(1000), range(0), range(3000,3050), range(1200), range(1500), range(0)], ]: target = [tuple([arg[i] if i < len(arg) else None for arg in args]) for i in range(max(map(len, args)))] self.assertEqual(list(zip_longest(*args)), target) self.assertEqual(list(zip_longest(*args, **{})), target) target = [tuple((e is None and 'X' or e) for e in t) for t in target] # Replace None fills with 'X' self.assertEqual(list(zip_longest(*args, **dict(fillvalue='X'))), target) self.assertEqual(take(3,zip_longest('abcdef', count())), list(zip('abcdef', range(3)))) # take 3 from infinite input self.assertEqual(list(zip_longest()), list(zip())) self.assertEqual(list(zip_longest([])), list(zip([]))) self.assertEqual(list(zip_longest('abcdef')), list(zip('abcdef'))) self.assertEqual(list(zip_longest('abc', 'defg', **{})), list(zip(list('abc')+[None], 'defg'))) # empty keyword dict self.assertRaises(TypeError, zip_longest, 3) self.assertRaises(TypeError, zip_longest, range(3), 3) for stmt in [ "zip_longest('abc', fv=1)", "zip_longest('abc', fillvalue=1, bogus_keyword=None)", ]: try: eval(stmt, globals(), locals()) except TypeError: pass else: self.fail('Did not raise Type in: ' + stmt) # Check tuple re-use (implementation detail) self.assertEqual([tuple(list(pair)) for pair in zip_longest('abc', 'def')], list(zip('abc', 'def'))) self.assertEqual([pair for pair in zip_longest('abc', 'def')], list(zip('abc', 'def'))) ids = list(map(id, zip_longest('abc', 'def'))) self.assertEqual(min(ids), max(ids)) ids = list(map(id, list(zip_longest('abc', 'def')))) self.assertEqual(len(dict.fromkeys(ids)), len(ids)) def test_bug_7244(self): class Repeater: # this class is similar to itertools.repeat def __init__(self, o, t, e): self.o = o self.t = int(t) self.e = e def __iter__(self): # its iterator is itself return self def __next__(self): if self.t > 0: self.t -= 1 return self.o else: raise self.e # Formerly this code in would fail in debug mode # with Undetected Error and Stop Iteration r1 = Repeater(1, 3, StopIteration) r2 = Repeater(2, 4, StopIteration) def run(r1, r2): result = [] for i, j in zip_longest(r1, r2, fillvalue=0): with support.captured_output('stdout'): print((i, j)) result.append((i, j)) return result self.assertEqual(run(r1, r2), [(1,2), (1,2), (1,2), (0,2)]) # Formerly, the RuntimeError would be lost # and StopIteration would stop as expected r1 = Repeater(1, 3, RuntimeError) r2 = Repeater(2, 4, StopIteration) it = zip_longest(r1, r2, fillvalue=0) self.assertEqual(next(it), (1, 2)) self.assertEqual(next(it), (1, 2)) self.assertEqual(next(it), (1, 2)) self.assertRaises(RuntimeError, next, it) def test_product(self): for args, result in [ ([], [()]), # zero iterables (['ab'], [('a',), ('b',)]), # one iterable ([range(2), range(3)], [(0,0), (0,1), (0,2), (1,0), (1,1), (1,2)]), # two iterables ([range(0), range(2), range(3)], []), # first iterable with zero length ([range(2), range(0), range(3)], []), # middle iterable with zero length ([range(2), range(3), range(0)], []), # last iterable with zero length ]: self.assertEqual(list(product(*args)), result) for r in range(4): self.assertEqual(list(product(*(args*r))), list(product(*args, **dict(repeat=r)))) self.assertEqual(len(list(product(*[range(7)]*6))), 7**6) self.assertRaises(TypeError, product, range(6), None) def product1(*args, **kwds): pools = list(map(tuple, args)) * kwds.get('repeat', 1) n = len(pools) if n == 0: yield () return if any(len(pool) == 0 for pool in pools): return indices = [0] * n yield tuple(pool[i] for pool, i in zip(pools, indices)) while 1: for i in reversed(range(n)): # right to left if indices[i] == len(pools[i]) - 1: continue indices[i] += 1 for j in range(i+1, n): indices[j] = 0 yield tuple(pool[i] for pool, i in zip(pools, indices)) break else: return def product2(*args, **kwds): 'Pure python version used in docs' pools = list(map(tuple, args)) * kwds.get('repeat', 1) result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] for prod in result: yield tuple(prod) argtypes = ['', 'abc', '', range(0), range(4), dict(a=1, b=2, c=3), set('abcdefg'), range(11), tuple(range(13))] for i in range(100): args = [random.choice(argtypes) for j in range(random.randrange(5))] expected_len = prod(map(len, args)) self.assertEqual(len(list(product(*args))), expected_len) self.assertEqual(list(product(*args)), list(product1(*args))) self.assertEqual(list(product(*args)), list(product2(*args))) args = map(iter, args) self.assertEqual(len(list(product(*args))), expected_len) # Test implementation detail: tuple re-use self.assertEqual(len(set(map(id, product('abc', 'def')))), 1) self.assertNotEqual(len(set(map(id, list(product('abc', 'def'))))), 1) def test_repeat(self): self.assertEqual(list(repeat(object='a', times=3)), ['a', 'a', 'a']) self.assertEqual(lzip(range(3),repeat('a')), [(0, 'a'), (1, 'a'), (2, 'a')]) self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a']) self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a']) self.assertEqual(list(repeat('a', 0)), []) self.assertEqual(list(repeat('a', -3)), []) self.assertRaises(TypeError, repeat) self.assertRaises(TypeError, repeat, None, 3, 4) self.assertRaises(TypeError, repeat, None, 'a') r = repeat(1+0j) self.assertEqual(repr(r), 'repeat((1+0j))') r = repeat(1+0j, 5) self.assertEqual(repr(r), 'repeat((1+0j), 5)') list(r) self.assertEqual(repr(r), 'repeat((1+0j), 0)') def test_map(self): self.assertEqual(list(map(operator.pow, range(3), range(1,7))), [0**1, 1**2, 2**3]) def tupleize(*args): return args self.assertEqual(list(map(tupleize, 'abc', range(5))), [('a',0),('b',1),('c',2)]) self.assertEqual(list(map(tupleize, 'abc', count())), [('a',0),('b',1),('c',2)]) self.assertEqual(take(2,map(tupleize, 'abc', count())), [('a',0),('b',1)]) self.assertEqual(list(map(operator.pow, [])), []) self.assertRaises(TypeError, map) self.assertRaises(TypeError, list, map(None, range(3), range(3))) self.assertRaises(TypeError, map, operator.neg) self.assertRaises(TypeError, next, map(10, range(5))) self.assertRaises(ValueError, next, map(errfunc, [4], [5])) self.assertRaises(TypeError, next, map(onearg, [4], [5])) def test_starmap(self): self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))), [0**1, 1**2, 2**3]) self.assertEqual(take(3, starmap(operator.pow, zip(count(), count(1)))), [0**1, 1**2, 2**3]) self.assertEqual(list(starmap(operator.pow, [])), []) self.assertEqual(list(starmap(operator.pow, [iter([4,5])])), [4**5]) self.assertRaises(TypeError, list, starmap(operator.pow, [None])) self.assertRaises(TypeError, starmap) self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra') self.assertRaises(TypeError, next, starmap(10, [(4,5)])) self.assertRaises(ValueError, next, starmap(errfunc, [(4,5)])) self.assertRaises(TypeError, next, starmap(onearg, [(4,5)])) def test_islice(self): for args in [ # islice(args) should agree with range(args) (10, 20, 3), (10, 3, 20), (10, 20), (10, 3), (20,) ]: self.assertEqual(list(islice(range(100), *args)), list(range(*args))) for args, tgtargs in [ # Stop when seqn is exhausted ((10, 110, 3), ((10, 100, 3))), ((10, 110), ((10, 100))), ((110,), (100,)) ]: self.assertEqual(list(islice(range(100), *args)), list(range(*tgtargs))) # Test stop=None self.assertEqual(list(islice(range(10), None)), list(range(10))) self.assertEqual(list(islice(range(10), None, None)), list(range(10))) self.assertEqual(list(islice(range(10), None, None, None)), list(range(10))) self.assertEqual(list(islice(range(10), 2, None)), list(range(2, 10))) self.assertEqual(list(islice(range(10), 1, None, 2)), list(range(1, 10, 2))) # Test number of items consumed SF #1171417 it = iter(range(10)) self.assertEqual(list(islice(it, 3)), list(range(3))) self.assertEqual(list(it), list(range(3, 10))) # Test invalid arguments self.assertRaises(TypeError, islice, range(10)) self.assertRaises(TypeError, islice, range(10), 1, 2, 3, 4) self.assertRaises(ValueError, islice, range(10), -5, 10, 1) self.assertRaises(ValueError, islice, range(10), 1, -5, -1) self.assertRaises(ValueError, islice, range(10), 1, 10, -1) self.assertRaises(ValueError, islice, range(10), 1, 10, 0) self.assertRaises(ValueError, islice, range(10), 'a') self.assertRaises(ValueError, islice, range(10), 'a', 1) self.assertRaises(ValueError, islice, range(10), 1, 'a') self.assertRaises(ValueError, islice, range(10), 'a', 1, 1) self.assertRaises(ValueError, islice, range(10), 1, 'a', 1) self.assertEqual(len(list(islice(count(), 1, 10, maxsize))), 1) # Issue #10323: Less islice in a predictable state c = count() self.assertEqual(list(islice(c, 1, 3, 50)), [1]) self.assertEqual(next(c), 3) def test_takewhile(self): data = [1, 3, 5, 20, 2, 4, 6, 8] underten = lambda x: x<10 self.assertEqual(list(takewhile(underten, data)), [1, 3, 5]) self.assertEqual(list(takewhile(underten, [])), []) self.assertRaises(TypeError, takewhile) self.assertRaises(TypeError, takewhile, operator.pow) self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra') self.assertRaises(TypeError, next, takewhile(10, [(4,5)])) self.assertRaises(ValueError, next, takewhile(errfunc, [(4,5)])) t = takewhile(bool, [1, 1, 1, 0, 0, 0]) self.assertEqual(list(t), [1, 1, 1]) self.assertRaises(StopIteration, next, t) def test_dropwhile(self): data = [1, 3, 5, 20, 2, 4, 6, 8] underten = lambda x: x<10 self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8]) self.assertEqual(list(dropwhile(underten, [])), []) self.assertRaises(TypeError, dropwhile) self.assertRaises(TypeError, dropwhile, operator.pow) self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra') self.assertRaises(TypeError, next, dropwhile(10, [(4,5)])) self.assertRaises(ValueError, next, dropwhile(errfunc, [(4,5)])) def test_tee(self): n = 200 def irange(n): for i in range(n): yield i a, b = tee([]) # test empty iterator self.assertEqual(list(a), []) self.assertEqual(list(b), []) a, b = tee(irange(n)) # test 100% interleaved self.assertEqual(lzip(a,b), lzip(range(n), range(n))) a, b = tee(irange(n)) # test 0% interleaved self.assertEqual(list(a), list(range(n))) self.assertEqual(list(b), list(range(n))) a, b = tee(irange(n)) # test dealloc of leading iterator for i in range(100): self.assertEqual(next(a), i) del a self.assertEqual(list(b), list(range(n))) a, b = tee(irange(n)) # test dealloc of trailing iterator for i in range(100): self.assertEqual(next(a), i) del b self.assertEqual(list(a), list(range(100, n))) for j in range(5): # test randomly interleaved order = [0]*n + [1]*n random.shuffle(order) lists = ([], []) its = tee(irange(n)) for i in order: value = next(its[i]) lists[i].append(value) self.assertEqual(lists[0], list(range(n))) self.assertEqual(lists[1], list(range(n))) # test argument format checking self.assertRaises(TypeError, tee) self.assertRaises(TypeError, tee, 3) self.assertRaises(TypeError, tee, [1,2], 'x') self.assertRaises(TypeError, tee, [1,2], 3, 'x') # tee object should be instantiable a, b = tee('abc') c = type(a)('def') self.assertEqual(list(c), list('def')) # test long-lagged and multi-way split a, b, c = tee(range(2000), 3) for i in range(100): self.assertEqual(next(a), i) self.assertEqual(list(b), list(range(2000))) self.assertEqual([next(c), next(c)], list(range(2))) self.assertEqual(list(a), list(range(100,2000))) self.assertEqual(list(c), list(range(2,2000))) # test values of n self.assertRaises(TypeError, tee, 'abc', 'invalid') self.assertRaises(ValueError, tee, [], -1) for n in range(5): result = tee('abc', n) self.assertEqual(type(result), tuple) self.assertEqual(len(result), n) self.assertEqual([list(x) for x in result], [list('abc')]*n) # tee pass-through to copyable iterator a, b = tee('abc') c, d = tee(a) self.assertTrue(a is c) # test tee_new t1, t2 = tee('abc') tnew = type(t1) self.assertRaises(TypeError, tnew) self.assertRaises(TypeError, tnew, 10) t3 = tnew(t1) self.assertTrue(list(t1) == list(t2) == list(t3) == list('abc')) # test that tee objects are weak referencable a, b = tee(range(10)) p = proxy(a) self.assertEqual(getattr(p, '__class__'), type(b)) del a self.assertRaises(ReferenceError, getattr, p, '__class__') def test_StopIteration(self): self.assertRaises(StopIteration, next, zip()) for f in (chain, cycle, zip, groupby): self.assertRaises(StopIteration, next, f([])) self.assertRaises(StopIteration, next, f(StopNow())) self.assertRaises(StopIteration, next, islice([], None)) self.assertRaises(StopIteration, next, islice(StopNow(), None)) p, q = tee([]) self.assertRaises(StopIteration, next, p) self.assertRaises(StopIteration, next, q) p, q = tee(StopNow()) self.assertRaises(StopIteration, next, p) self.assertRaises(StopIteration, next, q) self.assertRaises(StopIteration, next, repeat(None, 0)) for f in (filter, filterfalse, map, takewhile, dropwhile, starmap): self.assertRaises(StopIteration, next, f(lambda x:x, [])) self.assertRaises(StopIteration, next, f(lambda x:x, StopNow())) class TestExamples(unittest.TestCase): def test_accumlate(self): self.assertEqual(list(accumulate([1,2,3,4,5])), [1, 3, 6, 10, 15]) def test_chain(self): self.assertEqual(''.join(chain('ABC', 'DEF')), 'ABCDEF') def test_chain_from_iterable(self): self.assertEqual(''.join(chain.from_iterable(['ABC', 'DEF'])), 'ABCDEF') def test_combinations(self): self.assertEqual(list(combinations('ABCD', 2)), [('A','B'), ('A','C'), ('A','D'), ('B','C'), ('B','D'), ('C','D')]) self.assertEqual(list(combinations(range(4), 3)), [(0,1,2), (0,1,3), (0,2,3), (1,2,3)]) def test_combinations_with_replacement(self): self.assertEqual(list(combinations_with_replacement('ABC', 2)), [('A','A'), ('A','B'), ('A','C'), ('B','B'), ('B','C'), ('C','C')]) def test_compress(self): self.assertEqual(list(compress('ABCDEF', [1,0,1,0,1,1])), list('ACEF')) def test_count(self): self.assertEqual(list(islice(count(10), 5)), [10, 11, 12, 13, 14]) def test_cycle(self): self.assertEqual(list(islice(cycle('ABCD'), 12)), list('ABCDABCDABCD')) def test_dropwhile(self): self.assertEqual(list(dropwhile(lambda x: x<5, [1,4,6,4,1])), [6,4,1]) def test_groupby(self): self.assertEqual([k for k, g in groupby('AAAABBBCCDAABBB')], list('ABCDAB')) self.assertEqual([(list(g)) for k, g in groupby('AAAABBBCCD')], [list('AAAA'), list('BBB'), list('CC'), list('D')]) def test_filter(self): self.assertEqual(list(filter(lambda x: x%2, range(10))), [1,3,5,7,9]) def test_filterfalse(self): self.assertEqual(list(filterfalse(lambda x: x%2, range(10))), [0,2,4,6,8]) def test_map(self): self.assertEqual(list(map(pow, (2,3,10), (5,2,3))), [32, 9, 1000]) def test_islice(self): self.assertEqual(list(islice('ABCDEFG', 2)), list('AB')) self.assertEqual(list(islice('ABCDEFG', 2, 4)), list('CD')) self.assertEqual(list(islice('ABCDEFG', 2, None)), list('CDEFG')) self.assertEqual(list(islice('ABCDEFG', 0, None, 2)), list('ACEG')) def test_zip(self): self.assertEqual(list(zip('ABCD', 'xy')), [('A', 'x'), ('B', 'y')]) def test_zip_longest(self): self.assertEqual(list(zip_longest('ABCD', 'xy', fillvalue='-')), [('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')]) def test_permutations(self): self.assertEqual(list(permutations('ABCD', 2)), list(map(tuple, 'AB AC AD BA BC BD CA CB CD DA DB DC'.split()))) self.assertEqual(list(permutations(range(3))), [(0,1,2), (0,2,1), (1,0,2), (1,2,0), (2,0,1), (2,1,0)]) def test_product(self): self.assertEqual(list(product('ABCD', 'xy')), list(map(tuple, 'Ax Ay Bx By Cx Cy Dx Dy'.split()))) self.assertEqual(list(product(range(2), repeat=3)), [(0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1)]) def test_repeat(self): self.assertEqual(list(repeat(10, 3)), [10, 10, 10]) def test_stapmap(self): self.assertEqual(list(starmap(pow, [(2,5), (3,2), (10,3)])), [32, 9, 1000]) def test_takewhile(self): self.assertEqual(list(takewhile(lambda x: x<5, [1,4,6,4,1])), [1,4]) class TestGC(unittest.TestCase): def makecycle(self, iterator, container): container.append(iterator) next(iterator) del container, iterator def test_accumulate(self): a = [] self.makecycle(accumulate([1,2,a,3]), a) def test_chain(self): a = [] self.makecycle(chain(a), a) def test_chain_from_iterable(self): a = [] self.makecycle(chain.from_iterable([a]), a) def test_combinations(self): a = [] self.makecycle(combinations([1,2,a,3], 3), a) def test_combinations_with_replacement(self): a = [] self.makecycle(combinations_with_replacement([1,2,a,3], 3), a) def test_compress(self): a = [] self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a) def test_count(self): a = [] Int = type('Int', (int,), dict(x=a)) self.makecycle(count(Int(0), Int(1)), a) def test_cycle(self): a = [] self.makecycle(cycle([a]*2), a) def test_dropwhile(self): a = [] self.makecycle(dropwhile(bool, [0, a, a]), a) def test_groupby(self): a = [] self.makecycle(groupby([a]*2, lambda x:x), a) def test_issue2246(self): # Issue 2246 -- the _grouper iterator was not included in GC n = 10 keyfunc = lambda x: x for i, j in groupby(range(n), key=keyfunc): keyfunc.__dict__.setdefault('x',[]).append(j) def test_filter(self): a = [] self.makecycle(filter(lambda x:True, [a]*2), a) def test_filterfalse(self): a = [] self.makecycle(filterfalse(lambda x:False, a), a) def test_zip(self): a = [] self.makecycle(zip([a]*2, [a]*3), a) def test_zip_longest(self): a = [] self.makecycle(zip_longest([a]*2, [a]*3), a) b = [a, None] self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a) def test_map(self): a = [] self.makecycle(map(lambda x:x, [a]*2), a) def test_islice(self): a = [] self.makecycle(islice([a]*2, None), a) def test_permutations(self): a = [] self.makecycle(permutations([1,2,a,3], 3), a) def test_product(self): a = [] self.makecycle(product([1,2,a,3], repeat=3), a) def test_repeat(self): a = [] self.makecycle(repeat(a), a) def test_starmap(self): a = [] self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a) def test_takewhile(self): a = [] self.makecycle(takewhile(bool, [1, 0, a, a]), a) def R(seqn): 'Regular generator' for i in seqn: yield i class G: 'Sequence using __getitem__' def __init__(self, seqn): self.seqn = seqn def __getitem__(self, i): return self.seqn[i] class I: 'Sequence using iterator protocol' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self def __next__(self): if self.i >= len(self.seqn): raise StopIteration v = self.seqn[self.i] self.i += 1 return v class Ig: 'Sequence using iterator protocol defined with a generator' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): for val in self.seqn: yield val class X: 'Missing __getitem__ and __iter__' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __next__(self): if self.i >= len(self.seqn): raise StopIteration v = self.seqn[self.i] self.i += 1 return v class N: 'Iterator missing __next__()' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self class E: 'Test propagation of exceptions' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self def __next__(self): 3 // 0 class S: 'Test immediate stop' def __init__(self, seqn): pass def __iter__(self): return self def __next__(self): raise StopIteration def L(seqn): 'Test multiple tiers of iterators' return chain(map(lambda x:x, R(Ig(G(seqn))))) class TestVariousIteratorArgs(unittest.TestCase): def test_accumulate(self): s = [1,2,3,4,5] r = [1,3,6,10,15] n = len(s) for g in (G, I, Ig, L, R): self.assertEqual(list(accumulate(g(s))), r) self.assertEqual(list(accumulate(S(s))), []) self.assertRaises(TypeError, accumulate, X(s)) self.assertRaises(TypeError, accumulate, N(s)) self.assertRaises(ZeroDivisionError, list, accumulate(E(s))) def test_chain(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(chain(g(s))), list(g(s))) self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s))) self.assertRaises(TypeError, list, chain(X(s))) self.assertRaises(TypeError, list, chain(N(s))) self.assertRaises(ZeroDivisionError, list, chain(E(s))) def test_compress(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): n = len(s) for g in (G, I, Ig, S, L, R): self.assertEqual(list(compress(g(s), repeat(1))), list(g(s))) self.assertRaises(TypeError, compress, X(s), repeat(1)) self.assertRaises(TypeError, compress, N(s), repeat(1)) self.assertRaises(ZeroDivisionError, list, compress(E(s), repeat(1))) def test_product(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): self.assertRaises(TypeError, product, X(s)) self.assertRaises(TypeError, product, N(s)) self.assertRaises(ZeroDivisionError, product, E(s)) def test_cycle(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): tgtlen = len(s) * 3 expected = list(g(s))*3 actual = list(islice(cycle(g(s)), tgtlen)) self.assertEqual(actual, expected) self.assertRaises(TypeError, cycle, X(s)) self.assertRaises(TypeError, cycle, N(s)) self.assertRaises(ZeroDivisionError, list, cycle(E(s))) def test_groupby(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual([k for k, sb in groupby(g(s))], list(g(s))) self.assertRaises(TypeError, groupby, X(s)) self.assertRaises(TypeError, groupby, N(s)) self.assertRaises(ZeroDivisionError, list, groupby(E(s))) def test_filter(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(filter(isEven, g(s))), [x for x in g(s) if isEven(x)]) self.assertRaises(TypeError, filter, isEven, X(s)) self.assertRaises(TypeError, filter, isEven, N(s)) self.assertRaises(ZeroDivisionError, list, filter(isEven, E(s))) def test_filterfalse(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(filterfalse(isEven, g(s))), [x for x in g(s) if isOdd(x)]) self.assertRaises(TypeError, filterfalse, isEven, X(s)) self.assertRaises(TypeError, filterfalse, isEven, N(s)) self.assertRaises(ZeroDivisionError, list, filterfalse(isEven, E(s))) def test_zip(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(zip(g(s))), lzip(g(s))) self.assertEqual(list(zip(g(s), g(s))), lzip(g(s), g(s))) self.assertRaises(TypeError, zip, X(s)) self.assertRaises(TypeError, zip, N(s)) self.assertRaises(ZeroDivisionError, list, zip(E(s))) def test_ziplongest(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(zip_longest(g(s))), list(zip(g(s)))) self.assertEqual(list(zip_longest(g(s), g(s))), list(zip(g(s), g(s)))) self.assertRaises(TypeError, zip_longest, X(s)) self.assertRaises(TypeError, zip_longest, N(s)) self.assertRaises(ZeroDivisionError, list, zip_longest(E(s))) def test_map(self): for s in (range(10), range(0), range(100), (7,11), range(20,50,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(map(onearg, g(s))), [onearg(x) for x in g(s)]) self.assertEqual(list(map(operator.pow, g(s), g(s))), [x**x for x in g(s)]) self.assertRaises(TypeError, map, onearg, X(s)) self.assertRaises(TypeError, map, onearg, N(s)) self.assertRaises(ZeroDivisionError, list, map(onearg, E(s))) def test_islice(self): for s in ("12345", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2]) self.assertRaises(TypeError, islice, X(s), 10) self.assertRaises(TypeError, islice, N(s), 10) self.assertRaises(ZeroDivisionError, list, islice(E(s), 10)) def test_starmap(self): for s in (range(10), range(0), range(100), (7,11), range(20,50,5)): for g in (G, I, Ig, S, L, R): ss = lzip(s, s) self.assertEqual(list(starmap(operator.pow, g(ss))), [x**x for x in g(s)]) self.assertRaises(TypeError, starmap, operator.pow, X(ss)) self.assertRaises(TypeError, starmap, operator.pow, N(ss)) self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss))) def test_takewhile(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): tgt = [] for elem in g(s): if not isEven(elem): break tgt.append(elem) self.assertEqual(list(takewhile(isEven, g(s))), tgt) self.assertRaises(TypeError, takewhile, isEven, X(s)) self.assertRaises(TypeError, takewhile, isEven, N(s)) self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s))) def test_dropwhile(self): for s in (range(10), range(0), range(1000), (7,11), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): tgt = [] for elem in g(s): if not tgt and isOdd(elem): continue tgt.append(elem) self.assertEqual(list(dropwhile(isOdd, g(s))), tgt) self.assertRaises(TypeError, dropwhile, isOdd, X(s)) self.assertRaises(TypeError, dropwhile, isOdd, N(s)) self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s))) def test_tee(self): for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)): for g in (G, I, Ig, S, L, R): it1, it2 = tee(g(s)) self.assertEqual(list(it1), list(g(s))) self.assertEqual(list(it2), list(g(s))) self.assertRaises(TypeError, tee, X(s)) self.assertRaises(TypeError, tee, N(s)) self.assertRaises(ZeroDivisionError, list, tee(E(s))[0]) class LengthTransparency(unittest.TestCase): def test_repeat(self): from test.test_iterlen import len self.assertEqual(len(repeat(None, 50)), 50) self.assertRaises(TypeError, len, repeat(None)) class RegressionTests(unittest.TestCase): def test_sf_793826(self): # Fix Armin Rigo's successful efforts to wreak havoc def mutatingtuple(tuple1, f, tuple2): # this builds a tuple t which is a copy of tuple1, # then calls f(t), then mutates t to be equal to tuple2 # (needs len(tuple1) == len(tuple2)). def g(value, first=[1]): if first: del first[:] f(next(z)) return value items = list(tuple2) items[1:1] = list(tuple1) gen = map(g, items) z = zip(*[gen]*len(tuple1)) next(z) def f(t): global T T = t first[:] = list(T) first = [] mutatingtuple((1,2,3), f, (4,5,6)) second = list(T) self.assertEqual(first, second) def test_sf_950057(self): # Make sure that chain() and cycle() catch exceptions immediately # rather than when shifting between input sources def gen1(): hist.append(0) yield 1 hist.append(1) raise AssertionError hist.append(2) def gen2(x): hist.append(3) yield 2 hist.append(4) if x: raise StopIteration hist = [] self.assertRaises(AssertionError, list, chain(gen1(), gen2(False))) self.assertEqual(hist, [0,1]) hist = [] self.assertRaises(AssertionError, list, chain(gen1(), gen2(True))) self.assertEqual(hist, [0,1]) hist = [] self.assertRaises(AssertionError, list, cycle(gen1())) self.assertEqual(hist, [0,1]) class SubclassWithKwargsTest(unittest.TestCase): def test_keywords_in_subclass(self): # count is not subclassable... for cls in (repeat, zip, filter, filterfalse, chain, map, starmap, islice, takewhile, dropwhile, cycle, compress): class Subclass(cls): def __init__(self, newarg=None, *args): cls.__init__(self, *args) try: Subclass(newarg=1) except TypeError as err: # we expect type errors because of wrong argument count self.assertNotIn("does not take keyword arguments", err.args[0]) libreftest = """ Doctest for examples in the library reference: libitertools.tex >>> amounts = [120.15, 764.05, 823.14] >>> for checknum, amount in zip(count(1200), amounts): ... print('Check %d is for $%.2f' % (checknum, amount)) ... Check 1200 is for $120.15 Check 1201 is for $764.05 Check 1202 is for $823.14 >>> import operator >>> for cube in map(operator.pow, range(1,4), repeat(3)): ... print(cube) ... 1 8 27 >>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele'] >>> for name in islice(reportlines, 3, None, 2): ... print(name.title()) ... Alex Laura Martin Walter Samuele >>> from operator import itemgetter >>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3) >>> di = sorted(sorted(d.items()), key=itemgetter(1)) >>> for k, g in groupby(di, itemgetter(1)): ... print(k, list(map(itemgetter(0), g))) ... 1 ['a', 'c', 'e'] 2 ['b', 'd', 'f'] 3 ['g'] # Find runs of consecutive numbers using groupby. The key to the solution # is differencing with a range so that consecutive numbers all appear in # same group. >>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28] >>> for k, g in groupby(enumerate(data), lambda t:t[0]-t[1]): ... print(list(map(operator.itemgetter(1), g))) ... [1] [4, 5, 6] [10] [15, 16, 17, 18] [22] [25, 26, 27, 28] >>> def take(n, iterable): ... "Return first n items of the iterable as a list" ... return list(islice(iterable, n)) >>> def enumerate(iterable, start=0): ... return zip(count(start), iterable) >>> def tabulate(function, start=0): ... "Return function(0), function(1), ..." ... return map(function, count(start)) >>> def nth(iterable, n, default=None): ... "Returns the nth item or a default value" ... return next(islice(iterable, n, None), default) >>> def quantify(iterable, pred=bool): ... "Count how many times the predicate is true" ... return sum(map(pred, iterable)) >>> def padnone(iterable): ... "Returns the sequence elements and then returns None indefinitely" ... return chain(iterable, repeat(None)) >>> def ncycles(iterable, n): ... "Returns the seqeuence elements n times" ... return chain(*repeat(iterable, n)) >>> def dotproduct(vec1, vec2): ... return sum(map(operator.mul, vec1, vec2)) >>> def flatten(listOfLists): ... return list(chain.from_iterable(listOfLists)) >>> def repeatfunc(func, times=None, *args): ... "Repeat calls to func with specified arguments." ... " Example: repeatfunc(random.random)" ... if times is None: ... return starmap(func, repeat(args)) ... else: ... return starmap(func, repeat(args, times)) >>> def pairwise(iterable): ... "s -> (s0,s1), (s1,s2), (s2, s3), ..." ... a, b = tee(iterable) ... try: ... next(b) ... except StopIteration: ... pass ... return zip(a, b) >>> def grouper(n, iterable, fillvalue=None): ... "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx" ... args = [iter(iterable)] * n ... return zip_longest(*args, fillvalue=fillvalue) >>> def roundrobin(*iterables): ... "roundrobin('ABC', 'D', 'EF') --> A D E B F C" ... # Recipe credited to George Sakkis ... pending = len(iterables) ... nexts = cycle(iter(it).__next__ for it in iterables) ... while pending: ... try: ... for next in nexts: ... yield next() ... except StopIteration: ... pending -= 1 ... nexts = cycle(islice(nexts, pending)) >>> def powerset(iterable): ... "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" ... s = list(iterable) ... return chain.from_iterable(combinations(s, r) for r in range(len(s)+1)) >>> def unique_everseen(iterable, key=None): ... "List unique elements, preserving order. Remember all elements ever seen." ... # unique_everseen('AAAABBBCCDAABBB') --> A B C D ... # unique_everseen('ABBCcAD', str.lower) --> A B C D ... seen = set() ... seen_add = seen.add ... if key is None: ... for element in iterable: ... if element not in seen: ... seen_add(element) ... yield element ... else: ... for element in iterable: ... k = key(element) ... if k not in seen: ... seen_add(k) ... yield element >>> def unique_justseen(iterable, key=None): ... "List unique elements, preserving order. Remember only the element just seen." ... # unique_justseen('AAAABBBCCDAABBB') --> A B C D A B ... # unique_justseen('ABBCcAD', str.lower) --> A B C A D ... return map(next, map(itemgetter(1), groupby(iterable, key))) This is not part of the examples but it tests to make sure the definitions perform as purported. >>> take(10, count()) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> list(enumerate('abc')) [(0, 'a'), (1, 'b'), (2, 'c')] >>> list(islice(tabulate(lambda x: 2*x), 4)) [0, 2, 4, 6] >>> nth('abcde', 3) 'd' >>> nth('abcde', 9) is None True >>> quantify(range(99), lambda x: x%2==0) 50 >>> a = [[1, 2, 3], [4, 5, 6]] >>> flatten(a) [1, 2, 3, 4, 5, 6] >>> list(repeatfunc(pow, 5, 2, 3)) [8, 8, 8, 8, 8] >>> import random >>> take(5, map(int, repeatfunc(random.random))) [0, 0, 0, 0, 0] >>> list(pairwise('abcd')) [('a', 'b'), ('b', 'c'), ('c', 'd')] >>> list(pairwise([])) [] >>> list(pairwise('a')) [] >>> list(islice(padnone('abc'), 0, 6)) ['a', 'b', 'c', None, None, None] >>> list(ncycles('abc', 3)) ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c'] >>> dotproduct([1,2,3], [4,5,6]) 32 >>> list(grouper(3, 'abcdefg', 'x')) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'x', 'x')] >>> list(roundrobin('abc', 'd', 'ef')) ['a', 'd', 'e', 'b', 'f', 'c'] >>> list(powerset([1,2,3])) [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] >>> all(len(list(powerset(range(n)))) == 2**n for n in range(18)) True >>> list(powerset('abcde')) == sorted(sorted(set(powerset('abcde'))), key=len) True >>> list(unique_everseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D'] >>> list(unique_everseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'D'] >>> list(unique_justseen('AAAABBBCCDAABBB')) ['A', 'B', 'C', 'D', 'A', 'B'] >>> list(unique_justseen('ABBCcAD', str.lower)) ['A', 'B', 'C', 'A', 'D'] """ __test__ = {'libreftest' : libreftest} def test_main(verbose=None): test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC, RegressionTests, LengthTransparency, SubclassWithKwargsTest, TestExamples) support.run_unittest(*test_classes) # verify reference counting if verbose and hasattr(sys, "gettotalrefcount"): import gc counts = [None] * 5 for i in range(len(counts)): support.run_unittest(*test_classes) gc.collect() counts[i] = sys.gettotalrefcount() print(counts) # doctest the examples in the library reference support.run_doctest(sys.modules[__name__], verbose) if __name__ == "__main__": test_main(verbose=True)
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.2/Lib/test/test_itertools.py
Python
mit
69,134
[ "GULP" ]
6e3883f83ef19af4d497d29ee32e0e5107e5bec080d8e736c3bb70e4901890c4
################################################################################ # # RMG - Reaction Mechanism Generator # # Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu), # Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ################################################################################ import os import re import external.cclib as cclib import logging from subprocess import Popen, PIPE import distutils.spawn import tempfile import shutil from rmgpy.molecule import Molecule from molecule import QMMolecule from rmgpy.exceptions import DependencyError class Mopac: """ A base class for all QM calculations that use MOPAC. Classes such as :class:`MopacMol` will inherit from this class. """ inputFileExtension = '.mop' outputFileExtension = '.out' executablesToTry = ('MOPAC2016.exe', 'MOPAC2012.exe', 'MOPAC2009.exe', 'mopac') for exe in executablesToTry: try: executablePath = distutils.spawn.find_executable(exe) except: executablePath = None if executablePath is not None: break else: # didn't break logging.debug("Did not find MOPAC on path, checking if it exists in a declared MOPAC_DIR...") mopacEnv = os.getenv('MOPAC_DIR', default="/opt/mopac") for exe in executablesToTry: executablePath = os.path.join(mopacEnv, exe) if os.path.exists(executablePath): break else: # didn't break executablePath = os.path.join(mopacEnv , '(MOPAC 2009 or 2012 or 2016)') usePolar = False #use polar keyword in MOPAC "Keywords for the multiplicity" multiplicityKeywords = { 1: '', 2: 'uhf doublet', 3: 'uhf triplet', 4: 'uhf quartet', 5: 'uhf quintet', 6: 'uhf sextet', 7: 'uhf septet', 8: 'uhf octet', 9: 'uhf nonet', } #: List of phrases that indicate failure #: NONE of these must be present in a succesful job. failureKeys = [ 'IMAGINARY FREQUENCIES', 'EXCESS NUMBER OF OPTIMIZATION CYCLES', 'NOT ENOUGH TIME FOR ANOTHER CYCLE', ] #: List of phrases to indicate success. #: ALL of these must be present in a successful job. successKeys = [ 'DESCRIPTION OF VIBRATIONS', 'MOPAC DONE' ] def testReady(self): if not os.path.exists(self.executablePath): raise DependencyError("Couldn't find MOPAC executable at {0}. Try setting your MOPAC_DIR environment variable.".format(self.executablePath)) # Check if MOPAC executable works properly process = Popen(self.executablePath, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() self.expired = False if 'has expired' in stderr: # The MOPAC executable is expired logging.warning('\n'.join(stderr.split('\n')[2:7])) self.expired = True elif 'To install the MOPAC license' in stderr: # The MOPAC executable exists, but the license has not been installed raise DependencyError('\n'.join(stderr.split('\n')[0:9])) elif 'MOPAC_LICENSE' in stderr: # The MOPAC executable is in the wrong location on Windows; MOPAC_LICENSE must be set raise DependencyError('\n'.join(stderr.split('\n')[0:11])) def run(self): self.testReady() # submits the input file to mopac dirpath = tempfile.mkdtemp() # copy input file to temp dir: tempInpFile = os.path.join(dirpath, os.path.basename(self.inputFilePath)) shutil.copy(self.inputFilePath, dirpath) process = Popen([self.executablePath, tempInpFile], stdin=PIPE, stdout=PIPE, stderr=PIPE) command = '\n' if self.expired else None # press enter to pass expiration notice stdout, stderr = process.communicate(input=command) # necessary to wait for executable termination! if "ended normally" not in stderr.strip(): logging.warning("Mopac error message:" + stderr) # copy output file from temp dir to output dir: tempOutFile = os.path.join(dirpath, os.path.basename(self.outputFilePath)) shutil.copy(tempOutFile, self.outputFilePath) # delete temp folder: shutil.rmtree(dirpath) return self.verifyOutputFile() def verifyOutputFile(self): """ Check's that an output file exists and was successful. Returns a boolean flag that states whether a successful MOPAC simulation already exists for the molecule with the given (augmented) InChI Key. The definition of finding a successful simulation is based on these criteria: 1) finding an output file with the file name equal to the InChI Key 2) NOT finding any of the keywords that are denote a calculation failure 3) finding all the keywords that denote a calculation success. 4) finding a match between the InChI of the given molecule and the InchI found in the calculation files 5) checking that the optimized geometry, when connected by single bonds, is isomorphic with self.molecule (converted to single bonds) If any of the above criteria is not matched, False will be returned. If all succeed, then it will return True. """ if not os.path.exists(self.outputFilePath): logging.debug("Output file {0} does not (yet) exist.".format(self.outputFilePath)) return False InChIFound=False #flag (1 or 0) indicating whether an InChI was found in the log file # Initialize dictionary with "False"s successKeysFound = dict([(key, False) for key in self.successKeys]) with open(self.outputFilePath) as outputFile: for line in outputFile: line = line.strip() for element in self.failureKeys: #search for failure keywords if element in line: logging.error("MOPAC output file contains the following error: {0}".format(element) ) return False for element in self.successKeys: #search for success keywords if element in line: successKeysFound[element] = True if "InChI=" in line: logFileInChI = line #output files should take up to 240 characters of the name in the input file InChIFound = True if self.uniqueIDlong in logFileInChI: pass elif self.uniqueIDlong.startswith(logFileInChI): logging.info("InChI too long to check, but beginning matches so assuming OK.") else: logging.warning("InChI in log file ({0}) didn't match that in geometry ({1}).".format(logFileInChI, self.uniqueIDlong)) # Use only up to first 80 characters to match due to MOPAC bug which deletes 81st character of InChI string if self.uniqueIDlong.startswith(logFileInChI[:80]): logging.warning("but the beginning matches so it's probably just a truncation problem.") # Check that ALL 'success' keywords were found in the file. if not all( successKeysFound.values() ): logging.error('Not all of the required keywords for success were found in the output file!') return False if not InChIFound: logging.error("No InChI was found in the MOPAC output file {0}".format(self.outputFilePath)) return False # Compare the optimized geometry to the original molecule qmData = self.parse() cclibMol = Molecule() cclibMol.fromXYZ(qmData.atomicNumbers, qmData.atomCoords.value) testMol = self.molecule.toSingleBonds() if not cclibMol.isIsomorphic(testMol): logging.info("Incorrect connectivity for optimized geometry in file {0}".format(self.outputFilePath)) return False logging.info("Successful {1} quantum result in {0}".format(self.outputFilePath, self.__class__.__name__)) return True def getParser(self, outputFile): """ Returns the appropriate cclib parser. """ return cclib.parser.Mopac(outputFile) class MopacMol(QMMolecule, Mopac): """ A base Class for calculations of molecules using MOPAC. Inherits from both :class:`QMMolecule` and :class:`Mopac`. """ #: Keywords that will be added at the top and bottom of the qm input file keywords = [ {'top':"precise nosym THREADS=1", 'bottom':"oldgeo thermo nosym precise THREADS=1 "}, {'top':"precise nosym gnorm=0.0 nonr THREADS=1", 'bottom':"oldgeo thermo nosym precise THREADS=1 "}, {'top':"precise nosym gnorm=0.0 THREADS=1", 'bottom':"oldgeo thermo nosym precise THREADS=1 "}, {'top':"precise nosym gnorm=0.0 bfgs THREADS=1", 'bottom':"oldgeo thermo nosym precise THREADS=1 "}, {'top':"precise nosym recalc=10 dmax=0.10 nonr cycles=2000 t=2000 THREADS=1", 'bottom':"oldgeo thermo nosym precise THREADS=1 "}, ] def writeInputFile(self, attempt): """ Using the :class:`Geometry` object, write the input file for the `attempt`. """ molfile = self.getMolFilePathForCalculation(attempt) atomline = re.compile('\s*([\- ][0-9.]+)\s+([\- ][0-9.]+)+\s+([\- ][0-9.]+)\s+([A-Za-z]+)') output = [ self.geometry.uniqueIDlong, '' ] atomCount = 0 with open(molfile) as molinput: for line in molinput: match = atomline.match(line) if match: output.append("{0:4s} {1} 1 {2} 1 {3} 1".format(match.group(4), match.group(1), match.group(2), match.group(3))) atomCount += 1 assert atomCount == len(self.molecule.atoms) output.append('') input_string = '\n'.join(output) top_keys, bottom_keys, polar_keys = self.inputFileKeywords(attempt) with open(self.inputFilePath, 'w') as mopacFile: mopacFile.write(top_keys) mopacFile.write('\n') mopacFile.write(input_string) mopacFile.write('\n') mopacFile.write(bottom_keys) if self.usePolar: mopacFile.write('\n\n\n') mopacFile.write(polar_keys) def inputFileKeywords(self, attempt): """ Return the top, bottom, and polar keywords. """ raise NotImplementedError("Should be defined by subclass, eg. MopacMolPM3") def generateQMData(self): """ Calculate the QM data and return a QMData object, or None if it fails. """ for atom in self.molecule.vertices: if atom.atomType.label in ('N5s', 'N5d', 'N5dd', 'N5t', 'N5b'): return None if self.verifyOutputFile(): logging.info("Found a successful output file already; using that.") source = "QM {0} calculation found from previous run.".format(self.__class__.__name__) else: self.createGeometry() success = False for attempt in range(1, self.maxAttempts+1): self.writeInputFile(attempt) logging.info('Trying {3} attempt {0} of {1} on molecule {2}.'.format(attempt, self.maxAttempts, self.molecule.toSMILES(), self.__class__.__name__)) success = self.run() if success: logging.info('Attempt {0} of {1} on species {2} succeeded.'.format(attempt, self.maxAttempts, self.molecule.toAugmentedInChI())) source = "QM {0} calculation attempt {1}".format(self.__class__.__name__, attempt ) break else: logging.error('QM thermo calculation failed for {0}.'.format(self.molecule.toAugmentedInChI())) return None result = self.parse() # parsed in cclib result.source = source return result class MopacMolPMn(MopacMol): """ Mopac PMn calculations for molecules (n undefined here) This is a parent class for MOPAC PMn calculations. Inherit it, and define the pm_method, then redefine anything you wish to do differently. """ pm_method = '(should be defined by sub class)' def inputFileKeywords(self, attempt): """ Return the top, bottom, and polar keywords for attempt number `attempt`. NB. `attempt` begins at 1, not 0. """ assert attempt <= self.maxAttempts if attempt > self.scriptAttempts: attempt -= self.scriptAttempts multiplicity_keys = self.multiplicityKeywords[self.geometry.molecule.multiplicity] top_keys = "{method} {mult} {top}".format( method = self.pm_method, mult = multiplicity_keys, top = self.keywords[attempt-1]['top'], ) bottom_keys = "{bottom} {method} {mult}".format( method = self.pm_method, bottom = self.keywords[attempt-1]['bottom'], mult = multiplicity_keys, ) polar_keys = "oldgeo {polar} nosym precise {method} {mult}".format( method = self.pm_method, polar = ('polar' if self.geometry.molecule.multiplicity == 1 else 'static'), mult = multiplicity_keys, ) return top_keys, bottom_keys, polar_keys class MopacMolPM3(MopacMolPMn): """ Mopac PM3 calculations for molecules This is a class of its own in case you wish to do anything differently, but for now it's the same as all the MOPAC PMn calculations, only pm3 """ pm_method = 'pm3' class MopacMolPM6(MopacMolPMn): """ Mopac PM6 calculations for molecules This is a class of its own in case you wish to do anything differently, but for now it's the same as all the MOPAC PMn calculations, only pm6 """ pm_method = 'pm6' class MopacMolPM7(MopacMolPMn): """ Mopac PM7 calculations for molecules This is a class of its own in case you wish to do anything differently, but for now it's the same as all the MOPAC PMn calculations, only pm7 """ pm_method = 'pm7'
Molecular-Image-Recognition/Molecular-Image-Recognition
code/rmgpy/qm/mopac.py
Python
mit
16,212
[ "MOPAC", "cclib" ]
be0e2961b6b33f41a68b182585a3c1744b89f594eefec0fbe265d8be142f9fe4
# $Id$ # # Copyright (C) 2000-2008 greg Landrum and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """ command line utility for screening composite models **Usage** _ScreenComposite [optional args] modelfile(s) datafile_ Unless indicated otherwise (via command line arguments), _modelfile_ is a file containing a pickled composite model and _filename_ is a QDAT file. **Command Line Arguments** - -t *threshold value(s)*: use high-confidence predictions for the final analysis of the hold-out data. The threshold value can be either a single float or a list/tuple of floats. All thresholds should be between 0.0 and 1.0 - -D: do a detailed screen. - -d *database name*: instead of reading the data from a QDAT file, pull it from a database. In this case, the _datafile_ argument provides the name of the database table containing the data set. - -N *note*: use all models from the database which have this note. The modelfile argument should contain the name of the table with the models. - -H: screen only the hold out set (works only if a version of BuildComposite more recent than 1.2.2 was used). - -T: screen only the training set (works only if a version of BuildComposite more recent than 1.2.2 was used). - -E: do a detailed Error analysis. This shows each misclassified point and the number of times it was missed across all screened composites. If the --enrich argument is also provided, only compounds that have true activity value equal to the enrichment value will be used. - --enrich *enrichVal*: target "active" value to be used in calculating enrichments. - -A: show All predictions. - -S: shuffle activity values before screening - -R: randomize activity values before screening - -F *filter frac*: filters the data before training to change the distribution of activity values in the training set. *filter frac* is the fraction of the training set that should have the target value. **See note in BuildComposite help about data filtering** - -v *filter value*: filters the data before training to change the distribution of activity values in the training set. *filter value* is the target value to use in filtering. **See note in BuildComposite help about data filtering** - -V: be verbose when screening multiple models - -h: show this message and exit - --OOB: Do out an "out-of-bag" generalization error estimate. This only makes sense when applied to the original data set. - --pickleCol *colId*: index of the column containing a pickled value (used primarily for cases where fingerprints are used as descriptors) *** Options for making Prediction (Hanneke) Plots *** - --predPlot=<fileName>: triggers the generation of a Hanneke plot and sets the name of the .txt file which will hold the output data. A Gnuplot control file, <fileName>.gnu, will also be generated. - --predActTable=<name> (optional): name of the database table containing activity values. If this is not provided, activities will be read from the same table containing the screening data - --predActCol=<name> (optional): name of the activity column. If not provided, the name of the last column in the activity table will be used. - --predLogScale (optional): If provided, the x axis of the prediction plot (the activity axis) will be plotted using a log scale - --predShow: launch a gnuplot instance and display the prediction plot (the plot will still be written to disk). *** The following options are likely obsolete *** - -P: read pickled data. The datafile argument should contain a pickled data set. *relevant only to qdat files* - -q: data are not quantized (the composite should take care of quantization itself if it requires quantized data). *relevant only to qdat files* """ import os import sys import numpy from rdkit import DataStructs from rdkit.Dbase import DbModule from rdkit.Dbase.DbConnection import DbConnect from rdkit.ML import CompositeRun from rdkit.ML.Data import DataUtils, SplitData import pickle try: from PIL import Image, ImageDraw except ImportError: hasPil = 0 else: hasPil = 1 _details = CompositeRun.CompositeRun() __VERSION_STRING = "3.3.0" def message(msg, noRet=0): """ emits messages to _sys.stdout_ override this in modules which import this one to redirect output **Arguments** - msg: the string to be displayed """ if noRet: sys.stdout.write('%s ' % (msg)) else: sys.stdout.write('%s\n' % (msg)) def error(msg): """ emits messages to _sys.stderr_ override this in modules which import this one to redirect output **Arguments** - msg: the string to be displayed """ sys.stderr.write('ERROR: %s\n' % (msg)) def CalcEnrichment(mat, tgt=1): if tgt < 0 or tgt >= mat.shape[0]: return 0 nPts = float(sum(sum(mat))) nTgtPred = float(sum(mat[:, tgt])) if nTgtPred: pctCorrect = mat[tgt, tgt] / nTgtPred nTgtReal = float(sum(mat[tgt, :])) pctOverall = nTgtReal / nPts else: return 0.0 return pctCorrect / pctOverall def CollectResults(indices, dataSet, composite, callback=None, appendExamples=0, errorEstimate=0): """ screens a set of examples through a composite and returns the results #DOC **Arguments** - examples: the examples to be screened (a sequence of sequences) it's assumed that the last element in each example is it's "value" - composite: the composite model to be used - callback: (optional) if provided, this should be a function taking a single argument that is called after each example is screened with the number of examples screened so far as the argument. - appendExamples: (optional) this value is passed on to the composite's _ClassifyExample()_ method. - errorEstimate: (optional) calculate the "out of bag" error estimate for the composite using Breiman's definition. This only makes sense when screening the original data set! [L. Breiman "Out-of-bag Estimation", UC Berkeley Dept of Statistics Technical Report (1996)] **Returns** a list of 3-tuples _nExamples_ long: 1) answer: the value from the example 2) pred: the composite model's prediction 3) conf: the confidence of the composite """ # for i in range(len(composite)): # print(' ',i,'TRAIN:',composite[i][0]._trainIndices) for j in range(len(composite)): tmp = composite.GetModel(j) if hasattr(tmp, '_trainIndices') and type(tmp._trainIndices) != dict: tis = {} if hasattr(tmp, '_trainIndices'): for v in tmp._trainIndices: tis[v] = 1 tmp._trainIndices = tis nPts = len(indices) res = [None] * nPts for i in range(nPts): idx = indices[i] example = dataSet[idx] if errorEstimate: use = [] for j in range(len(composite)): mdl = composite.GetModel(j) if not mdl._trainIndices.get(idx, 0): use.append(j) else: use = None # print('IDX:',idx,'use:',use ) pred, conf = composite.ClassifyExample(example, appendExample=appendExamples, onlyModels=use) if composite.GetActivityQuantBounds(): answer = composite.QuantizeActivity(example)[-1] else: answer = example[-1] res[i] = answer, pred, conf if callback: callback(i) return res def DetailedScreen(indices, data, composite, threshold=0, screenResults=None, goodVotes=None, badVotes=None, noVotes=None, callback=None, appendExamples=0, errorEstimate=0): """ screens a set of examples cross a composite and breaks the predictions into *correct*,*incorrect* and *unclassified* sets. #DOC **Arguments** - examples: the examples to be screened (a sequence of sequences) it's assumed that the last element in each example is its "value" - composite: the composite model to be used - threshold: (optional) the threshold to be used to decide whether or not a given prediction should be kept - screenResults: (optional) the results of screening the results (a sequence of 3-tuples in the format returned by _CollectResults()_). If this is provided, the examples will not be screened again. - goodVotes,badVotes,noVotes: (optional) if provided these should be lists (or anything supporting an _append()_ method) which will be used to pass the screening results back. - callback: (optional) if provided, this should be a function taking a single argument that is called after each example is screened with the number of examples screened so far as the argument. - appendExamples: (optional) this value is passed on to the composite's _ClassifyExample()_ method. - errorEstimate: (optional) calculate the "out of bag" error estimate for the composite using Breiman's definition. This only makes sense when screening the original data set! [L. Breiman "Out-of-bag Estimation", UC Berkeley Dept of Statistics Technical Report (1996)] **Notes** - since this function doesn't return anything, if one or more of the arguments _goodVotes_, _badVotes_, and _noVotes_ is not provided, there's not much reason to call it """ if screenResults is None: screenResults = CollectResults(indices, data, composite, callback=callback, appendExamples=appendExamples, errorEstimate=errorEstimate) if goodVotes is None: goodVotes = [] if badVotes is None: badVotes = [] if noVotes is None: noVotes = [] for i in range(len(screenResults)): answer, pred, conf = screenResults[i] if conf > threshold: if pred != answer: badVotes.append((answer, pred, conf, i)) else: goodVotes.append((answer, pred, conf, i)) else: noVotes.append((answer, pred, conf, i)) def ShowVoteResults(indices, data, composite, nResultCodes, threshold, verbose=1, screenResults=None, callback=None, appendExamples=0, goodVotes=None, badVotes=None, noVotes=None, errorEstimate=0): """ screens the results and shows a detailed workup The work of doing the screening and processing the results is handled by _DetailedScreen()_ #DOC **Arguments** - examples: the examples to be screened (a sequence of sequences) it's assumed that the last element in each example is its "value" - composite: the composite model to be used - nResultCodes: the number of possible results the composite can return - threshold: the threshold to be used to decide whether or not a given prediction should be kept - screenResults: (optional) the results of screening the results (a sequence of 3-tuples in the format returned by _CollectResults()_). If this is provided, the examples will not be screened again. - callback: (optional) if provided, this should be a function taking a single argument that is called after each example is screened with the number of examples screened so far as the argument. - appendExamples: (optional) this value is passed on to the composite's _ClassifyExample()_ method. - goodVotes,badVotes,noVotes: (optional) if provided these should be lists (or anything supporting an _append()_ method) which will be used to pass the screening results back. - errorEstimate: (optional) calculate the "out of bag" error estimate for the composite using Breiman's definition. This only makes sense when screening the original data set! [L. Breiman "Out-of-bag Estimation", UC Berkeley Dept of Statistics Technical Report (1996)] **Returns** a 7-tuple: 1) the number of good (correct) predictions 2) the number of bad (incorrect) predictions 3) the number of predictions skipped due to the _threshold_ 4) the average confidence in the good predictions 5) the average confidence in the bad predictions 6) the average confidence in the skipped predictions 7) the results table """ nExamples = len(indices) if goodVotes is None: goodVotes = [] if badVotes is None: badVotes = [] if noVotes is None: noVotes = [] DetailedScreen(indices, data, composite, threshold, screenResults=screenResults, goodVotes=goodVotes, badVotes=badVotes, noVotes=noVotes, callback=callback, appendExamples=appendExamples, errorEstimate=errorEstimate) nBad = len(badVotes) nGood = len(goodVotes) nClassified = nGood + nBad if verbose: print('\n\t*** Vote Results ***') print('misclassified: %d/%d (%%%4.2f)\t%d/%d (%%%4.2f)' % (nBad, nExamples, 100. * float(nBad) / nExamples, nBad, nClassified, 100. * float(nBad) / nClassified)) nSkip = len(noVotes) if nSkip > 0: if verbose: print('skipped: %d/%d (%%% 4.2f)' % (nSkip, nExamples, 100. * float(nSkip) / nExamples)) noConf = numpy.array([x[2] for x in noVotes]) avgSkip = sum(noConf) / float(nSkip) else: avgSkip = 0. if nBad > 0: badConf = numpy.array([x[2] for x in badVotes]) avgBad = sum(badConf) / float(nBad) else: avgBad = 0. if nGood > 0: goodRes = [x[1] for x in goodVotes] goodConf = numpy.array([x[2] for x in goodVotes]) avgGood = sum(goodConf) / float(nGood) else: goodRes = [] goodConf = [] avgGood = 0. if verbose: print() print('average correct confidence: % 6.4f' % avgGood) print('average incorrect confidence: % 6.4f' % avgBad) voteTab = numpy.zeros((nResultCodes, nResultCodes), numpy.int) for res in goodRes: voteTab[res, res] += 1 for ans, res, conf, idx in badVotes: voteTab[ans, res] += 1 if verbose: print() print('\tResults Table:') vTab = voteTab.transpose() colCounts = numpy.sum(vTab, 0) rowCounts = numpy.sum(vTab, 1) message('') for i in range(nResultCodes): if rowCounts[i] == 0: rowCounts[i] = 1 row = vTab[i] message(' ', noRet=1) for j in range(nResultCodes): entry = row[j] message(' % 6d' % entry, noRet=1) message(' | % 4.2f' % (100. * vTab[i, i] / rowCounts[i])) message(' ', noRet=1) for i in range(nResultCodes): message('-------', noRet=1) message('') message(' ', noRet=1) for i in range(nResultCodes): if colCounts[i] == 0: colCounts[i] = 1 message(' % 6.2f' % (100. * vTab[i, i] / colCounts[i]), noRet=1) message('') return nGood, nBad, nSkip, avgGood, avgBad, avgSkip, voteTab def ScreenIt(composite, indices, data, partialVote=0, voteTol=0.0, verbose=1, screenResults=None, goodVotes=None, badVotes=None, noVotes=None): """ screens a set of data using a composite model and prints out statistics about the screen. #DOC The work of doing the screening and processing the results is handled by _DetailedScreen()_ **Arguments** - composite: the composite model to be used - data: the examples to be screened (a sequence of sequences) it's assumed that the last element in each example is its "value" - partialVote: (optional) toggles use of the threshold value in the screnning. - voteTol: (optional) the threshold to be used to decide whether or not a given prediction should be kept - verbose: (optional) sets degree of verbosity of the screening - screenResults: (optional) the results of screening the results (a sequence of 3-tuples in the format returned by _CollectResults()_). If this is provided, the examples will not be screened again. - goodVotes,badVotes,noVotes: (optional) if provided these should be lists (or anything supporting an _append()_ method) which will be used to pass the screening results back. **Returns** a 7-tuple: 1) the number of good (correct) predictions 2) the number of bad (incorrect) predictions 3) the number of predictions skipped due to the _threshold_ 4) the average confidence in the good predictions 5) the average confidence in the bad predictions 6) the average confidence in the skipped predictions 7) None """ if goodVotes is None: goodVotes = [] if badVotes is None: badVotes = [] if noVotes is None: noVotes = [] if not partialVote: voteTol = 0.0 DetailedScreen(indices, data, composite, voteTol, screenResults=screenResults, goodVotes=goodVotes, badVotes=badVotes, noVotes=noVotes) nGood = len(goodVotes) goodAccum = 0. for res, pred, conf, idx in goodVotes: goodAccum += conf misCount = len(badVotes) badAccum = 0. for res, pred, conf, idx in badVotes: badAccum += conf nSkipped = len(noVotes) goodSkipped = 0 badSkipped = 0 skipAccum = 0. for ans, pred, conf, idx in noVotes: skipAccum += conf if ans != pred: badSkipped += 1 else: goodSkipped += 1 nData = nGood + misCount + nSkipped if verbose: print('Total N Points:', nData) if partialVote: nCounted = nData - nSkipped if verbose: print('Misclassifications: %d (%%%4.2f)' % (misCount, 100. * float(misCount) / nCounted)) print('N Skipped: %d (%%%4.2f)' % (nSkipped, 100. * float(nSkipped) / nData)) print('\tGood Votes Skipped: %d (%%%4.2f)' % (goodSkipped, 100. * float(goodSkipped) / nSkipped)) print('\tBad Votes Skipped: %d (%%%4.2f)' % (badSkipped, 100. * float(badSkipped) / nSkipped)) else: if verbose: print('Misclassifications: %d (%%%4.2f)' % (misCount, 100. * float(misCount) / nData)) print('Average Correct Vote Confidence: % 6.4f' % (goodAccum / (nData - misCount))) print('Average InCorrect Vote Confidence: % 6.4f' % (badAccum / misCount)) avgGood = 0 avgBad = 0 avgSkip = 0 if nGood: avgGood = goodAccum / nGood if misCount: avgBad = badAccum / misCount if nSkipped: avgSkip = skipAccum / nSkipped return nGood, misCount, nSkipped, avgGood, avgBad, avgSkip, None def _processVoteList(votes, data): """ *Internal Use Only* converts a list of 4 tuples: (answer,prediction,confidence,idx) into an alternate list: (answer,prediction,confidence,data point) **Arguments** - votes: a list of 4 tuples: (answer, prediction, confidence, index) - data: a _DataUtils.MLData.MLDataSet_ **Note**: alterations are done in place in the _votes_ list """ for i in range(len(votes)): ans, pred, conf, idx = votes[i] votes[i] = (ans, pred, conf, data[idx]) def PrepareDataFromDetails(model, details, data, verbose=0): if (hasattr(details, 'doHoldout') and details.doHoldout) or \ (hasattr(details, 'doTraining') and details.doTraining): try: splitF = model._splitFrac except AttributeError: pass else: if verbose: message('s', noRet=1) if hasattr(details, 'errorEstimate') and details.errorEstimate and \ hasattr(details, 'doHoldout') and details.doHoldout: message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') message('****** WARNING: OOB screening should not be combined with doHoldout option.') message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') trainIdx, testIdx = SplitData.SplitIndices(data.GetNPts(), splitF, silent=1) if hasattr(details, 'filterFrac') and details.filterFrac != 0.0: if verbose: message('f', noRet=1) trainFilt, temp = DataUtils.FilterData(data, details.filterVal, details.filterFrac, -1, indicesToUse=trainIdx, indicesOnly=1) testIdx += temp trainIdx = trainFilt elif hasattr(details, 'errorEstimate') and details.errorEstimate: # the OOB screening works by checking to see if a given index # is in the if hasattr(details, 'filterFrac') and details.filterFrac != 0.0: if verbose: message('f', noRet=1) testIdx, trainIdx = DataUtils.FilterData(data, details.filterVal, details.filterFrac, -1, indicesToUse=range(data.GetNPts()), indicesOnly=1) testIdx.extend(trainIdx) else: testIdx = list(range(data.GetNPts())) trainIdx = [] else: testIdx = list(range(data.GetNPts())) trainIdx = [] if hasattr(details, 'doTraining') and details.doTraining: testIdx, trainIdx = trainIdx, testIdx return trainIdx, testIdx def ScreenFromDetails(models, details, callback=None, setup=None, appendExamples=0, goodVotes=None, badVotes=None, noVotes=None, data=None, enrichments=None): """ Screens a set of data using a a _CompositeRun.CompositeRun_ instance to provide parameters # DOC The actual data to be used are extracted from the database and table specified in _details_ Aside from dataset construction, _ShowVoteResults()_ does most of the heavy lifting here. **Arguments** - model: a composite model - details: a _CompositeRun.CompositeRun_ object containing details (options, parameters, etc.) about the run - callback: (optional) if provided, this should be a function taking a single argument that is called after each example is screened with the number of examples screened so far as the argument. - setup: (optional) a function taking a single argument which is called at the start of screening with the number of points to be screened as the argument. - appendExamples: (optional) this value is passed on to the composite's _ClassifyExample()_ method. - goodVotes,badVotes,noVotes: (optional) if provided these should be lists (or anything supporting an _append()_ method) which will be used to pass the screening results back. **Returns** a 7-tuple: 1) the number of good (correct) predictions 2) the number of bad (incorrect) predictions 3) the number of predictions skipped due to the _threshold_ 4) the average confidence in the good predictions 5) the average confidence in the bad predictions 6) the average confidence in the skipped predictions 7) the results table """ if data is None: if hasattr(details, 'pickleCol'): data = details.GetDataSet(pickleCol=details.pickleCol, pickleClass=DataStructs.ExplicitBitVect) else: data = details.GetDataSet() if details.threshold > 0.0: details.partialVote = 1 else: details.partialVote = 0 if type(models) not in [list, tuple]: models = (models, ) nModels = len(models) if setup is not None: setup(nModels * data.GetNPts()) nGood = numpy.zeros(nModels, numpy.float) nBad = numpy.zeros(nModels, numpy.float) nSkip = numpy.zeros(nModels, numpy.float) confGood = numpy.zeros(nModels, numpy.float) confBad = numpy.zeros(nModels, numpy.float) confSkip = numpy.zeros(nModels, numpy.float) voteTab = None if goodVotes is None: goodVotes = [] if badVotes is None: badVotes = [] if noVotes is None: noVotes = [] if enrichments is None: enrichments = [0.0] * nModels badVoteDict = {} noVoteDict = {} for i in range(nModels): if nModels > 1: goodVotes = [] badVotes = [] noVotes = [] model = models[i] try: seed = model._randomSeed except AttributeError: pass else: DataUtils.InitRandomNumbers(seed) if (hasattr(details, 'shuffleActivities') and details.shuffleActivities) or \ (hasattr(details, 'randomActivities') and details.randomActivities): if hasattr(details, 'shuffleActivities') and details.shuffleActivities: shuffle = True else: shuffle = False randomize = True DataUtils.RandomizeActivities(data, shuffle=shuffle, runDetails=details) else: randomize = False shuffle = False if hasattr(model, '_shuffleActivities') and \ model._shuffleActivities and \ not shuffle: message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') message('****** WARNING: Shuffled model being screened with unshuffled data.') message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') if hasattr(model, '_randomizeActivities') and \ model._randomizeActivities and \ not randomize: message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') message('****** WARNING: Random model being screened with non-random data.') message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') trainIdx, testIdx = PrepareDataFromDetails(model, details, data) nPossible = model.GetQuantBounds()[1] if callback: cb = lambda x, y=callback, z=i * data.GetNPts(): y(x + z) else: cb = None if not hasattr(details, 'errorEstimate') or not details.errorEstimate: errorEstimate = 0 else: errorEstimate = 1 g, b, s, aG, aB, aS, vT = ShowVoteResults( testIdx, data, model, nPossible[-1], details.threshold, verbose=0, callback=cb, appendExamples=appendExamples, goodVotes=goodVotes, badVotes=badVotes, noVotes=noVotes, errorEstimate=errorEstimate) if voteTab is None: voteTab = numpy.zeros(vT.shape, numpy.float) if hasattr(details, 'errorAnalysis') and details.errorAnalysis: for a, p, c, idx in badVotes: label = testIdx[idx] if hasattr(details, 'enrichTgt') and details.enrichTgt >= 0: if a == details.enrichTgt: badVoteDict[label] = badVoteDict.get(label, 0) + 1 else: badVoteDict[label] = badVoteDict.get(label, 0) + 1 for a, p, c, idx in noVotes: label = testIdx[idx] if hasattr(details, 'enrichTgt') and details.enrichTgt >= 0: if a == details.enrichTgt: noVoteDict[label] = noVoteDict.get(label, 0) + 1 else: noVoteDict[label] = noVoteDict.get(label, 0) + 1 voteTab += vT nGood[i] = g nBad[i] = b nSkip[i] = s confGood[i] = aG confBad[i] = aB confSkip[i] = aS if hasattr(details, 'enrichTgt') and details.enrichTgt >= 0: enrichments[i] = CalcEnrichment(vT, tgt=details.enrichTgt) if nModels == 1: return g, b, s, aG, aB, aS, vT else: voteTab /= nModels avgNBad = sum(nBad) / nModels devNBad = numpy.sqrt(sum((nBad - avgNBad)**2) / (nModels - 1)) # bestIdx = numpy.argsort(nBad)[0] avgNGood = sum(nGood) / nModels devNGood = numpy.sqrt(sum((nGood - avgNGood)**2) / (nModels - 1)) avgNSkip = sum(nSkip) / nModels devNSkip = numpy.sqrt(sum((nSkip - avgNSkip)**2) / (nModels - 1)) avgConfBad = sum(confBad) / nModels devConfBad = numpy.sqrt(sum((confBad - avgConfBad)**2) / (nModels - 1)) avgConfGood = sum(confGood) / nModels devConfGood = numpy.sqrt(sum((confGood - avgConfGood)**2) / (nModels - 1)) avgConfSkip = sum(confSkip) / nModels devConfSkip = numpy.sqrt(sum((confSkip - avgConfSkip)**2) / (nModels - 1)) return ((avgNGood, devNGood), (avgNBad, devNBad), (avgNSkip, devNSkip), (avgConfGood, devConfGood), (avgConfBad, devConfBad), (avgConfSkip, devConfSkip), voteTab) def GetScreenImage(nGood, nBad, nRej, size=None): if not hasPil: return None try: nTot = float(nGood) + float(nBad) + float(nRej) except TypeError: nGood = nGood[0] nBad = nBad[0] nRej = nRej[0] nTot = float(nGood) + float(nBad) + float(nRej) if not nTot: return None goodColor = (100, 100, 255) badColor = (255, 100, 100) rejColor = (255, 255, 100) pctGood = float(nGood) / nTot pctBad = float(nBad) / nTot pctRej = float(nRej) / nTot if size is None: size = (100, 100) img = Image.new('RGB', size, (255, 255, 255)) draw = ImageDraw.Draw(img) box = (0, 0, size[0] - 1, size[1] - 1) startP = -90 endP = int(startP + pctGood * 360) draw.pieslice(box, startP, endP, fill=goodColor) startP = endP endP = int(startP + pctBad * 360) draw.pieslice(box, startP, endP, fill=badColor) startP = endP endP = int(startP + pctRej * 360) draw.pieslice(box, startP, endP, fill=rejColor) return img def ScreenToHtml(nGood, nBad, nRej, avgGood, avgBad, avgSkip, voteTable, imgDir='.', fullPage=1, skipImg=0, includeDefs=1): """ returns the text of a web page showing the screening details #DOC **Arguments** - nGood: number of correct predictions - nBad: number of incorrect predictions - nRej: number of rejected predictions - avgGood: average correct confidence - avgBad: average incorrect confidence - avgSkip: average rejected confidence - voteTable: vote table - imgDir: (optional) the directory to be used to hold the vote image (if constructed) **Returns** a string containing HTML """ if type(nGood) == tuple: multModels = 1 else: multModels = 0 if fullPage: outTxt = ["""<html><body>"""] outTxt.append('<center><h2>VOTE DETAILS</h2></center>') else: outTxt = [] outTxt.append('<font>') # Get the image if not skipImg: img = GetScreenImage(nGood, nBad, nRej) if img: if imgDir: imgFileName = '/'.join((imgDir, 'votes.png')) else: imgFileName = 'votes.png' img.save(imgFileName) outTxt.append('<center><img src="%s"></center>' % (imgFileName)) nPoss = len(voteTable) pureCounts = numpy.sum(voteTable, 1) accCounts = numpy.sum(voteTable, 0) pureVect = numpy.zeros(nPoss, numpy.float) accVect = numpy.zeros(nPoss, numpy.float) for i in range(nPoss): if pureCounts[i]: pureVect[i] = float(voteTable[i, i]) / pureCounts[i] if accCounts[i]: accVect[i] = float(voteTable[i, i]) / accCounts[i] outTxt.append('<center><table border=1>') outTxt.append('<tr><td></td>') for i in range(nPoss): outTxt.append('<th>%d</th>' % i) outTxt.append('<th>% Accurate</th>') outTxt.append('</tr>') # outTxt.append('<th rowspan=%d>Predicted</th></tr>'%(nPoss+1)) for i in range(nPoss): outTxt.append('<tr><th>%d</th>' % (i)) for j in range(nPoss): if i == j: if not multModels: outTxt.append('<td bgcolor="#A0A0FF">%d</td>' % (voteTable[j, i])) else: outTxt.append('<td bgcolor="#A0A0FF">%.2f</td>' % (voteTable[j, i])) else: if not multModels: outTxt.append('<td>%d</td>' % (voteTable[j, i])) else: outTxt.append('<td>%.2f</td>' % (voteTable[j, i])) outTxt.append('<td>%4.2f</td</tr>' % (100.0 * accVect[i])) if i == 0: outTxt.append('<th rowspan=%d>Predicted</th></tr>' % (nPoss)) else: outTxt.append('</tr>') outTxt.append('<tr><th>% Pure</th>') for i in range(nPoss): outTxt.append('<td>%4.2f</td>' % (100.0 * pureVect[i])) outTxt.append('</tr>') outTxt.append('<tr><td></td><th colspan=%d>Original</th>' % (nPoss)) outTxt.append('</table></center>') if not multModels: nTotal = nBad + nGood + nRej nClass = nBad + nGood if nClass: pctErr = 100. * float(nBad) / nClass else: pctErr = 0.0 outTxt.append('<p>%d of %d examples were misclassified (%%%4.2f)' % (nBad, nGood + nBad, pctErr)) if nRej > 0: pctErr = 100. * float(nBad) / (nGood + nBad + nRej) outTxt.append('<p> %d of %d overall: (%%%4.2f)' % (nBad, nTotal, pctErr)) pctRej = 100. * float(nRej) / nTotal outTxt.append('<p>%d of %d examples were rejected (%%%4.2f)' % (nRej, nTotal, pctRej)) if nGood != 0: outTxt.append('<p>The correctly classified examples had an average confidence of %6.4f' % avgGood) if nBad != 0: outTxt.append('<p>The incorrectly classified examples had an average confidence of %6.4f' % avgBad) if nRej != 0: outTxt.append('<p>The rejected examples had an average confidence of %6.4f' % avgSkip) else: nTotal = nBad[0] + nGood[0] + nRej[0] nClass = nBad[0] + nGood[0] devClass = nBad[1] + nGood[1] if nClass: pctErr = 100. * float(nBad[0]) / nClass devPctErr = 100. * float(nBad[1]) / nClass else: pctErr = 0.0 devPctErr = 0.0 outTxt.append('<p>%.2f(%.2f) of %.2f(%.2f) examples were misclassified (%%%4.2f(%4.2f))' % (nBad[0], nBad[1], nClass, devClass, pctErr, devPctErr)) if nRej > 0: pctErr = 100. * float(nBad[0]) / nTotal devPctErr = 100. * float(nBad[1]) / nTotal outTxt.append('<p> %.2f(%.2f) of %d overall: (%%%4.2f(%4.2f))' % (nBad[0], nBad[1], nTotal, pctErr, devPctErr)) pctRej = 100. * float(nRej[0]) / nTotal devPctRej = 100. * float(nRej[1]) / nTotal outTxt.append('<p>%.2f(%.2f) of %d examples were rejected (%%%4.2f(%4.2f))' % (nRej[0], nRej[1], nTotal, pctRej, devPctRej)) if nGood != 0: outTxt.append( '<p>The correctly classified examples had an average confidence of %6.4f(%.4f)' % avgGood) if nBad != 0: outTxt.append( '<p>The incorrectly classified examples had an average confidence of %6.4f(%.4f)' % avgBad) if nRej != 0: outTxt.append('<p>The rejected examples had an average confidence of %6.4f(%.4f)' % avgSkip) outTxt.append('</font>') if includeDefs: txt = """ <p><b>Definitions:</b> <ul> <li> <i>% Pure:</i> The percentage of, for example, known positives predicted to be positive. <li> <i>% Accurate:</i> The percentage of, for example, predicted positives that actually are positive. </ul> """ outTxt.append(txt) if fullPage: outTxt.append("""</body></html>""") return '\n'.join(outTxt) def MakePredPlot(details, indices, data, goodVotes, badVotes, nRes, idCol=0, verbose=0): """ **Arguments** - details: a CompositeRun.RunDetails object - indices: a sequence of integer indices into _data_ - data: the data set in question. We assume that the ids for the data points are in the _idCol_ column - goodVotes/badVotes: predictions where the model was correct/incorrect. These are sequences of 4-tuples: (answer,prediction,confidence,index into _indices_) """ if not hasattr(details, 'predPlot') or not details.predPlot: return if verbose: message('\n-> Constructing Prediction (Hanneke) Plot') outF = open(details.predPlot, 'w+') gnuF = open('%s.gnu' % details.predPlot, 'w+') # first get the ids of the data points we screened: ptIds = [data[x][idCol] for x in indices] # get a connection to the database we'll use to grab the continuous # activity values: origConn = DbConnect(details.dbName, details.tableName, user=details.dbUser, password=details.dbPassword) colNames = origConn.GetColumnNames() idName = colNames[idCol] if not hasattr(details, 'predActTable') or \ not details.predActTable or \ details.predActTable == details.tableName: actConn = origConn else: actConn = DbConnect(details.dbName, details.predActTable, user=details.dbUser, password=details.dbPassword) if verbose: message('\t-> Pulling Activity Data') if type(ptIds[0]) not in [type(''), type(u'')]: ptIds = [str(x) for x in ptIds] whereL = [DbModule.placeHolder] * len(ptIds) if hasattr(details, 'predActCol') and details.predActCol: actColName = details.predActCol else: actColName = actConn.GetColumnNames()[-1] whereTxt = "%s in (%s)" % (idName, ','.join(whereL)) rawD = actConn.GetData(fields='%s,%s' % (idName, actColName), where=whereTxt, extras=ptIds) # order the data returned: if verbose: message('\t-> Creating Plot') acts = [None] * len(ptIds) for entry in rawD: ID, act = entry idx = ptIds.index(ID) acts[idx] = act outF.write('#ID Pred Conf %s\n' % (actColName)) for ans, pred, conf, idx in goodVotes: act = acts[idx] if act != 'None': act = float(act) else: act = 0 outF.write('%s %d %.4f %f\n' % (ptIds[idx], pred, conf, act)) for ans, pred, conf, idx in badVotes: act = acts[idx] if act != 'None': act = float(act) else: act = 0 outF.write('%s %d %.4f %f\n' % (ptIds[idx], pred, conf, act)) outF.close() if not hasattr(details, 'predLogScale') or not details.predLogScale: actLabel = actColName else: actLabel = 'log(%s)' % (actColName) actLabel = actLabel.replace('_', ' ') gnuHdr = """# Generated by ScreenComposite.py version: %s set size square 0.7 set yrange [:1] set data styl points set ylab 'confidence' set xlab '%s' set grid set nokey set term postscript enh color solid "Helvetica" 16 set term X """ % (__VERSION_STRING, actLabel) gnuF.write(gnuHdr) plots = [] for i in range(nRes): if not hasattr(details, 'predLogScale') or not details.predLogScale: plots.append("'%s' us 4:($2==%d?$3:0/0)" % (details.predPlot, i)) else: plots.append("'%s' us (log10($4)):($2==%d?$3:0/0)" % (details.predPlot, i)) gnuF.write("plot %s\n" % (','.join(plots))) gnuTail = """ # EOF """ gnuF.write(gnuTail) gnuF.close() if hasattr(details, 'predShow') and details.predShow: try: try: from Gnuplot import Gnuplot except ImportError: raise ImportError('Functionality requires the Gnuplot module') p = Gnuplot() p('cd "%s"' % (os.getcwd())) p('load "%s.gnu"' % (details.predPlot)) input('press return to continue...\n') except Exception: import traceback traceback.print_exc() def Go(details): pass def SetDefaults(details=None): global _details if details is None: details = _details CompositeRun.SetDefaults(details) details.screenVoteTol = [0.] details.detailedScreen = 0 details.doHoldout = 0 details.doTraining = 0 details.errorAnalysis = 0 details.verbose = 0 details.partialVote = 0 return details def Usage(): """ prints a list of arguments for when this is used from the command line and then exits """ print(__doc__) sys.exit(-1) def ShowVersion(includeArgs=0): """ prints the version number of the program """ print('This is ScreenComposite.py version %s' % (__VERSION_STRING)) if includeArgs: print('command line was:') print(' '.join(sys.argv)) def ParseArgs(details): import getopt try: args, extras = getopt.getopt(sys.argv[1:], 'EDd:t:VN:HThSRF:v:AX', ['predPlot=', 'predActCol=', 'predActTable=', 'predLogScale', 'predShow', 'OOB', 'pickleCol=', 'enrich=', ]) except Exception: import traceback traceback.print_exc() Usage() details.predPlot = '' details.predActCol = '' details.predActTable = '' details.predLogScale = '' details.predShow = 0 details.errorEstimate = 0 details.pickleCol = -1 details.enrichTgt = -1 for arg, val in args: if arg == '-d': details.dbName = val elif arg == '-D': details.detailedScreen = 1 elif arg == '-t': details.partialVote = 1 voteTol = eval(val) if type(voteTol) not in [type([]), type((1, 1))]: voteTol = [voteTol] for tol in voteTol: if tol > 1 or tol < 0: error('Voting threshold must be between 0 and 1') sys.exit(-2) details.screenVoteTol = voteTol elif arg == '-N': details.note = val elif arg == '-H': details.doTraining = 0 details.doHoldout = 1 elif arg == '-T': details.doHoldout = 0 details.doTraining = 1 elif arg == '-E': details.errorAnalysis = 1 details.detailedScreen = 1 elif arg == '-A': details.showAll = 1 details.detailedScreen = 1 elif arg == '-S': details.shuffleActivities = 1 elif arg == '-R': details.randomActivities = 1 elif arg == '-h': Usage() elif arg == '-F': details.filterFrac = float(val) elif arg == '-v': details.filterVal = float(val) elif arg == '-V': verbose = 1 elif arg == '--predPlot': details.detailedScreen = 1 details.predPlot = val elif arg == '--predActCol': details.predActCol = val elif arg == '--predActTable': details.predActTable = val elif arg == '--predLogScale': details.predLogScale = 1 elif arg == '--predShow': details.predShow = 1 elif arg == '--predShow': details.predShow = 1 elif arg == '--OOB': details.errorEstimate = 1 elif arg == '--pickleCol': details.pickleCol = int(val) - 1 elif arg == '--enrich': details.enrichTgt = int(val) else: Usage() if len(extras) < 1: Usage() return extras if __name__ == '__main__': details = SetDefaults() extras = ParseArgs(details) ShowVersion(includeArgs=1) models = [] if details.note and details.dbName: tblName = extras[0] message('-> Retrieving models from database') conn = DbConnect(details.dbName, tblName) blobs = conn.GetData(fields='model', where="where note='%s'" % (details.note)) for blob in blobs: blob = blob[0] try: models.append(pickle.loads(str(blob))) except Exception: import traceback traceback.print_exc() message('Model load failed') else: message('-> Loading model') modelFile = open(extras[0], 'rb') models.append(pickle.load(modelFile)) if not len(models): error('No composite models found') sys.exit(-1) else: message('-> Working with %d models.' % len(models)) extras = extras[1:] for fName in extras: if details.dbName != '': details.tableName = fName data = details.GetDataSet(pickleCol=details.pickleCol, pickleClass=DataStructs.ExplicitBitVect) else: data = DataUtils.BuildDataSet(fName) descNames = data.GetVarNames() nModels = len(models) screenResults = [None] * nModels dataSets = [None] * nModels message('-> Constructing and screening data sets') testIdx = list(range(data.GetNPts())) trainIdx = testIdx for modelIdx in range(nModels): # tmpD = copy.deepcopy(data) tmpD = data model = models[modelIdx] message('.', noRet=1) try: seed = model._randomSeed except AttributeError: pass else: DataUtils.InitRandomNumbers(seed) if details.shuffleActivities or details.randomActivities: shuffle = details.shuffleActivities randomize = 1 DataUtils.RandomizeActivities(tmpD, shuffle=details.shuffleActivities, runDetails=details) else: randomize = False shuffle = False if hasattr(model, '_shuffleActivities') and \ model._shuffleActivities and \ not shuffle: message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') message('****** WARNING: Shuffled model being screened with unshuffled data.') message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') if hasattr(model, '_randomizeActivities') and \ model._randomizeActivities and \ not randomize: message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') message('****** WARNING: Random model being screened with non-random data.') message('*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*') trainIdx, testIdx = PrepareDataFromDetails(model, details, tmpD, verbose=1) screenResults[modelIdx] = CollectResults(testIdx, tmpD, model, errorEstimate=details.errorEstimate) dataSets[modelIdx] = testIdx for tol in details.screenVoteTol: if len(details.screenVoteTol) > 1: message('\n-----*****-----*****-----*****-----*****-----*****-----*****-----\n') message('Tolerance: %f' % tol) nGood = numpy.zeros(nModels, numpy.float) nBad = numpy.zeros(nModels, numpy.float) nSkip = numpy.zeros(nModels, numpy.float) confGood = numpy.zeros(nModels, numpy.float) confBad = numpy.zeros(nModels, numpy.float) confSkip = numpy.zeros(nModels, numpy.float) if details.enrichTgt >= 0: enrichments = numpy.zeros(nModels, numpy.float) goodVoteDict = {} badVoteDict = {} noVoteDict = {} voteTab = None for modelIdx in range(nModels): model = models[modelIdx] model.SetInputOrder(descNames) testIdx = dataSets[modelIdx] screenRes = screenResults[modelIdx] if not details.detailedScreen: g, b, s, aG, aB, aS, vT = ScreenIt(model, testIdx, tmpD, details.partialVote, tol, verbose=details.verbose, screenResults=screenRes) else: if model.GetActivityQuantBounds(): nRes = len(model.GetActivityQuantBounds()) + 1 else: nRes = model.GetQuantBounds()[1][-1] badVotes = [] noVotes = [] if (hasattr(details, 'showAll') and details.showAll) or \ (hasattr(details, 'predPlot') and details.predPlot): goodVotes = [] else: goodVotes = None g, b, s, aG, aB, aS, vT = ShowVoteResults( testIdx, tmpD, model, nRes, tol, verbose=details.verbose, screenResults=screenRes, badVotes=badVotes, noVotes=noVotes, goodVotes=goodVotes, errorEstimate=details.errorEstimate) if voteTab is None: voteTab = numpy.zeros(vT.shape, numpy.float) if details.errorAnalysis: for a, p, c, idx in badVotes: label = testIdx[idx] if hasattr(details, 'enrichTgt') and details.enrichTgt >= 0: if a == details.enrichTgt: badVoteDict[label] = badVoteDict.get(label, 0) + 1 else: badVoteDict[label] = badVoteDict.get(label, 0) + 1 for a, p, c, idx in noVotes: label = testIdx[idx] if hasattr(details, 'enrichTgt') and details.enrichTgt >= 0: if a == details.enrichTgt: noVoteDict[label] = noVoteDict.get(label, 0) + 1 else: noVoteDict[label] = noVoteDict.get(label, 0) + 1 if hasattr(details, 'showAll') and details.showAll: for a, p, c, idx in goodVotes: label = testIdx[idx] if details.enrichTgt >= 0: if a == details.enrichTgt: goodVoteDict[label] = goodVoteDict.get(label, 0) + 1 else: goodVoteDict[label] = goodVoteDict.get(label, 0) + 1 if details.enrichTgt > -1: enrichments[modelIdx] = CalcEnrichment(vT, tgt=details.enrichTgt) voteTab += vT if details.detailedScreen and hasattr(details, 'predPlot') and details.predPlot: MakePredPlot(details, testIdx, tmpD, goodVotes, badVotes, nRes, verbose=1) if hasattr(details, 'showAll') and details.showAll: print('-v-v-v-v-v-v-v- All Votes -v-v-v-v-v-v-v-') print('id, prediction, confidence, flag(-1=skipped,0=wrong,1=correct)') for ans, pred, conf, idx in goodVotes: pt = tmpD[testIdx[idx]] assert model.GetActivityQuantBounds() or pt[-1] == ans, 'bad point?: %s != %s' % ( str(pt[-1]), str(ans)) print('%s, %d, %.4f, 1' % (str(pt[0]), pred, conf)) for ans, pred, conf, idx in badVotes: pt = tmpD[testIdx[idx]] assert model.GetActivityQuantBounds() or pt[-1] == ans, 'bad point?: %s != %s' % ( str(pt[-1]), str(ans)) print('%s, %d, %.4f, 0' % (str(pt[0]), pred, conf)) for ans, pred, conf, idx in noVotes: pt = tmpD[testIdx[idx]] assert model.GetActivityQuantBounds() or pt[-1] == ans, 'bad point?: %s != %s' % ( str(pt[-1]), str(ans)) print('%s, %d, %.4f, -1' % (str(pt[0]), pred, conf)) print('-^-^-^-^-^-^-^- -^-^-^-^-^-^-^-') nGood[modelIdx] = g nBad[modelIdx] = b nSkip[modelIdx] = s confGood[modelIdx] = aG confBad[modelIdx] = aB confSkip[modelIdx] = aS print() if nModels > 1: print('-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*') print('AVERAGES:') avgNBad = sum(nBad) / nModels devNBad = numpy.sqrt(sum((nBad - avgNBad)**2) / (nModels - 1)) bestIdx = numpy.argsort(nBad)[0] avgNGood = sum(nGood) / nModels devNGood = numpy.sqrt(sum((nGood - avgNGood)**2) / (nModels - 1)) avgNSkip = sum(nSkip) / nModels devNSkip = numpy.sqrt(sum((nSkip - avgNSkip)**2) / (nModels - 1)) avgConfBad = sum(confBad) / nModels devConfBad = numpy.sqrt(sum((confBad - avgConfBad)**2) / (nModels - 1)) avgConfGood = sum(confGood) / nModels devConfGood = numpy.sqrt(sum((confGood - avgConfGood)**2) / (nModels - 1)) avgConfSkip = sum(confSkip) / nModels devConfSkip = numpy.sqrt(sum((confSkip - avgConfSkip)**2) / (nModels - 1)) nClassified = avgNGood + avgNBad nExamples = nClassified + avgNSkip print('Misclassifications: \t%%%5.2f(%%%5.2f) %4.1f(%4.1f) / %d' % (100 * avgNBad / nExamples, 100 * devNBad / nExamples, avgNBad, devNBad, nExamples)) if avgNSkip > 0: print('\tthreshold: \t%%%5.2f(%%%5.2f) %4.1f(%4.1f) / %d' % (100 * avgNBad / nClassified, 100 * devNBad / nClassified, avgNBad, devNBad, nClassified)) print() print('Number Skipped: %%%4.2f(%%%4.2f) %4.2f(%4.2f)' % (100 * avgNSkip / nExamples, 100 * devNSkip / nExamples, avgNSkip, devNSkip)) print() print('Confidences:') print('\tCorrect: \t%4.2f(%4.2f)' % (100 * avgConfGood, 100 * devConfGood)) print('\tIncorrect: \t%4.2f(%4.2f)' % (100 * avgConfBad, 100 * devConfBad)) if avgNSkip > 0: print('\tSkipped: \t%4.2f(%4.2f)' % (100 * avgConfSkip, 100 * devConfSkip)) if details.detailedScreen: message('Results Table:') voteTab = numpy.transpose(voteTab) / nModels nResultCodes = len(voteTab) colCounts = numpy.sum(voteTab, 0) rowCounts = numpy.sum(voteTab, 1) print() for i in range(nResultCodes): if rowCounts[i] == 0: rowCounts[i] = 1 row = voteTab[i] message(' ', noRet=1) for j in range(nResultCodes): entry = row[j] message(' % 6.2f' % entry, noRet=1) message(' | % 4.2f' % (100. * voteTab[i, i] / rowCounts[i])) message(' ', noRet=1) for i in range(nResultCodes): message('-------', noRet=1) message('') message(' ', noRet=1) for i in range(nResultCodes): if colCounts[i] == 0: colCounts[i] = 1 message(' % 6.2f' % (100. * voteTab[i, i] / colCounts[i]), noRet=1) message('') if details.enrichTgt > -1: mean = sum(enrichments) / nModels enrichments -= mean dev = numpy.sqrt(sum(enrichments * enrichments)) / (nModels - 1) message(' Enrichment of value %d: %.4f (%.4f)' % (details.enrichTgt, mean, dev)) else: bestIdx = 0 print('------------------------------------------------') print('Best Model: ', bestIdx + 1) bestBad = nBad[bestIdx] bestGood = nGood[bestIdx] bestSkip = nSkip[bestIdx] nClassified = bestGood + bestBad nExamples = nClassified + bestSkip print('Misclassifications: \t%%%5.2f %d / %d' % (100 * bestBad / nExamples, bestBad, nExamples)) if bestSkip > 0: print('\tthreshold: \t%%%5.2f %d / %d' % (100 * bestBad / nClassified, bestBad, nClassified)) print() print('Number Skipped: %%%4.2f %d' % (100 * bestSkip / nExamples, bestSkip)) print() print('Confidences:') print('\tCorrect: \t%4.2f' % (100 * confGood[bestIdx])) print('\tIncorrect: \t%4.2f' % (100 * confBad[bestIdx])) if bestSkip > 0: print('\tSkipped: \t%4.2f' % (100 * confSkip[bestIdx])) if nModels == 1 and details.detailedScreen: message('') message('Results Table:') voteTab = numpy.transpose(vT) nResultCodes = len(vT) colCounts = numpy.sum(voteTab, 0) rowCounts = numpy.sum(voteTab, 1) message('') for i in range(nResultCodes): if rowCounts[i] == 0: rowCounts[i] = 1 row = voteTab[i] message(' ', noRet=1) for j in range(nResultCodes): entry = row[j] message(' % 6.2f' % entry, noRet=1) message(' | % 4.2f' % (100. * voteTab[i, i] / rowCounts[i])) message(' ', noRet=1) for i in range(nResultCodes): message('-------', noRet=1) message('') message(' ', noRet=1) for i in range(nResultCodes): if colCounts[i] == 0: colCounts[i] = 1 message(' % 6.2f' % (100. * voteTab[i, i] / colCounts[i]), noRet=1) message('') if details.errorAnalysis: message('\n*-*-*-*-*-*-*-*- ERROR ANALYSIS -*-*-*-*-*-*-*-*\n') ks = badVoteDict.keys() if len(ks): message(' ---> Bad Vote Counts') ks = noVoteDict.keys() if len(ks): message(' ---> Skipped Compound Counts') for k in ks: pt = data[k] message('%s,%d' % (str(pt[0]), noVoteDict[k])) if hasattr(details, 'showAll') and details.showAll: ks = goodVoteDict.keys() if len(ks): message(' ---> Good Vote Counts') for k in ks: pt = data[k] message('%s,%d' % (str(pt[0]), goodVoteDict[k]))
rdkit/rdkit
rdkit/ML/ScreenComposite.py
Python
bsd-3-clause
55,093
[ "RDKit" ]
9c9705c6b11a2f0265d2bfe5d321eceb2dd6deec53e0e30145a8467951b9a084
#!/usr/bin/python #SBATCH --job-name=mmp_tel #SBATCH --output=../log/%j.txt #SBATCH --error=../log/%j.out #SBATCH --partition=compute #SBATCH --nodes=1 #SBATCH --ntasks=1 #SBATCH --cpus-per-task=4 #SBATCH --nodes=1 #SBATCH --mem=16384 #SBATCH --mail-user=dec@u.northwestern.edu #SBATCH --workdir=/lscr2/andersenlab/dec211/mmp_telseq/sra import os, sys import glob import re import subprocess from subprocess import PIPE, Popen from datetime import datetime def file_exists(filename): if os.path.isfile(filename) and os.path.getsize(filename) > 0: return True else: return False class EAV: """ Very simple Entity-Attribute-Value Object """ def __init__(self): self.entity = "" self.sub_entity = "" self.attribute = "" self.sub_attribute = "" self.value = "" self.timestamp = datetime.now() self.comment = "" self.file = None def __repr__(self): return "\nEntity:{self.entity}\n\ Entity:{self.sub_entity}\n\ Attribute:{self.attribute}\n\ Sub-Attribute:{self.sub_attribute}\n\ Value:{self.value}\n\ timestamp:{self.timestamp}\n".format(**locals()) def save(self): if self.file is None: raise Exception("No Log File Set") if not file_exists(self.file): write_header = True else: write_header = False with(open(self.file, "a")) as f: if write_header is True: f.write("entity\tsub_entity\tattribute\tsub_attribute\tvalue\tcomment\ttimestamp\n") line = '\t'.join(map(str,[self.entity, self.sub_entity, self.attribute, self.sub_attribute, self.value, self.comment, self.timestamp])) f.write(line + "\n") def get_contigs(bam): header, err = subprocess.Popen(["samtools","view","-H",bam], stdout=PIPE, stderr=PIPE).communicate() if err != "": raise Exception(err) # Extract contigs from header and convert contigs to integers contigs = {} for x in re.findall("@SQ\WSN:(?P<chrom>[A-Za-z0-9_]*)\WLN:(?P<length>[0-9]+)", header): contigs[x[0]] = int(x[1]) return contigs def coverage(bam, mtchr = None): # Check to see if file exists if os.path.isfile(bam) == False: raise Exception("Bam file does not exist") contigs = get_contigs(bam) # Guess mitochondrial chromosome mtchr = [x for x in contigs if x.lower().find("m") == 0] if len(mtchr) != 1: mtchr = None else: mtchr = mtchr[0] coverage_dict = {} for c in contigs.keys(): command = "samtools depth -r %s %s | awk '{sum+=$3;cnt++}END{print cnt \"\t\" sum}'" % (c, bam) coverage_dict[c] = {} coverage_dict[c]["Bases Mapped"], coverage_dict[c]["Sum of Depths"] = map(int,subprocess.Popen(command, stdout=PIPE, shell = True).communicate()[0].strip().split("\t")) coverage_dict[c]["Breadth of Coverage"] = coverage_dict[c]["Bases Mapped"] / float(contigs[c]) coverage_dict[c]["Depth of Coverage"] = coverage_dict[c]["Sum of Depths"] / float(contigs[c]) coverage_dict[c]["Length"] = int(contigs[c]) # Calculate Genome Wide Breadth of Coverage and Depth of Coverage genome_length = float(sum(contigs.values())) coverage_dict["genome"] = {} coverage_dict["genome"]["Length"] = int(genome_length) coverage_dict["genome"]["Bases Mapped"] = sum([x["Bases Mapped"] for k, x in coverage_dict.iteritems() if k != "genome"]) coverage_dict["genome"]["Sum of Depths"] = sum([x["Sum of Depths"] for k, x in coverage_dict.iteritems() if k != "genome"]) coverage_dict["genome"]["Breadth of Coverage"] = sum([x["Bases Mapped"] for k, x in coverage_dict.iteritems() if k != "genome"]) / float(genome_length) coverage_dict["genome"]["Depth of Coverage"] = sum([x["Sum of Depths"] for k, x in coverage_dict.iteritems() if k != "genome"]) / float(genome_length) if mtchr != None: # Calculate nuclear breadth of coverage and depth of coverage ignore_contigs = [mtchr, "genome", "nuclear"] coverage_dict["nuclear"] = {} coverage_dict["nuclear"]["Length"] = sum([x["Length"] for k,x in coverage_dict.iteritems() if k not in ignore_contigs ]) coverage_dict["nuclear"]["Bases Mapped"] = sum([x["Bases Mapped"] for k, x in coverage_dict.iteritems() if k not in ignore_contigs]) coverage_dict["nuclear"]["Sum of Depths"] = sum([x["Sum of Depths"] for k, x in coverage_dict.iteritems() if k not in ignore_contigs]) coverage_dict["nuclear"]["Breadth of Coverage"] = sum([x["Bases Mapped"] for k, x in coverage_dict.iteritems() if k not in ignore_contigs]) / float(coverage_dict["nuclear"]["Length"]) coverage_dict["nuclear"]["Depth of Coverage"] = sum([x["Sum of Depths"] for k, x in coverage_dict.iteritems() if k not in ignore_contigs]) / float(coverage_dict["nuclear"]["Length"]) # Calculate the ratio of mtDNA depth to nuclear depth coverage_dict["genome"]["mt_ratio"] = coverage_dict[mtchr]["Depth of Coverage"] / float(coverage_dict["nuclear"]["Depth of Coverage"]) # Flatten Dictionary coverage = [] for k,v in coverage_dict.items(): for x in v.items(): coverage += [(k,x[0], x[1])] return coverage line_num = int(sys.argv[1]) - 1 f=open('../strain_info.txt') lines = f.readlines() lines = [x.strip().split("\t") for x in lines] line = lines[line_num] strain_name = line[0].split(" ")[2] strain_bp = line[0].split(" ")[4] reference = "/lscr2/andersenlab/dec211/pyPipeline/genomes/WS245/c_elegans.PRJNA13758.WS245.genomic.fa.gz" # Download sra files """for line in lines: for i in line[1:]: strain = line[0].split(" ")[2] length = line[0].split(" ")[4] if len(glob.glob("../telseq/{strain}.{length}*".format(**locals()))) == 0: print strain, length i06 = i[0:6] i09 = i[0:9] loc_string = "ftp://ftp-trace.ncbi.nih.gov/sra/sra-instant/reads/ByRun/sra/SRR/{i06}/{i09}/{i}.sra" loc_string = loc_string.format(**locals()) print "downloading " + loc_string.format(**locals()) print "curl {loc_string}".format(**locals()) os.system("curl {loc_string} > {i}.sra".format(**locals())) """ # Process SRA Files for i in line[1:]: os.system("fastq-dump --split-files --gzip {i}.sra ".format(i=i)) #os.system("rm {i}".format(**locals())) # Generate read group RG = r'@RG\tID:{i}\tSM:{strain_name}'.format(**locals()) # Align os.system(r"bwa mem -R '{RG}' -t 4 {reference} {i}_1.fastq.gz {i}_2.fastq.gz > ../bam/{i}.tmp.bam".format(i=i.replace(".sra",""), RG=RG, reference=reference)) # Sort os.system("samtools sort -O bam -T ../bam/{i}.TEMP.bam -@ 4 ../bam/{i}.tmp.bam > ../bam/{i}.sorted.bam && samtools index ../bam/{i}.sorted.bam".format(**locals())) # Remove temporary BAM and fastq os.system("rm {i}_1.fastq.gz && rm {i}_2.fastq.gz".format(i=i)) os.system("rm ../bam/{i}.tmp.bam".format(i=i)) # Combine processed BAM Files. SRA_files = ' '.join(["../bam/" + x.replace(".sra","") + ".sorted.bam" for x in line[1:]]) if len(["../bam/" + x.replace(".sra","") + ".sorted.bam" for x in line[1:]]) > 1: merge_command = "samtools merge -f -@ 4 ../bam/{strain_name}.{strain_bp}.bam {SRA_files} && samtools index ../bam/{strain_name}.{strain_bp}.bam".format(**locals()) os.system(merge_command) else: os.system("mv {SRA_files} ../bam/{strain_name}.{strain_bp}.bam".format(**locals())) os.system("samtools index ../bam/{strain_name}.{strain_bp}.bam".format(**locals())) for i in line[1:]: os.system("rm ../bam/{i}.sorted.bam && rm ../bam/{i}.sorted.bam.bai".format(i=i)) # Produce Coverage Statistics Here bam = "../bam/{strain_name}.{strain_bp}.bam".format(**locals()) eav = EAV() eav.file = "../eav.txt" eav.entity = strain_name eav.sub_entity = strain_bp for contig, k,v in coverage(bam, "MtDNA"): eav.sub_attribute = contig + " (" + k + ")" eav.value = v eav.save() # Run Telseq Here #telseq -z 'AATCCG' -u $file.bam -o $file.telseq_elegans.AATCCG.noreadgroup.txt os.system("telseq -m -z 'TTAGGC' -u ../bam/{strain_name}.{strain_bp}.bam -o ../telseq/{strain_name}.{strain_bp}.telseq_elegans.TTAGGC.noreadgroup.txt".format(**locals())) os.system("telseq -m -z 'GTATGC' -u ../bam/{strain_name}.{strain_bp}.bam -o ../telseq/{strain_name}.{strain_bp}.telseq_elegans.GTATGC.noreadgroup.txt".format(**locals())) #telseq -z 'GTCTAG' -u $file.bam -o $file.telseq_elegans.GTCTAG.noreadgroup.txt # Delete sra file for i in line[1:]: #os.system("rm {i}.sra ".format(i=i)) pass # Delete bam file os.system("rm ../bam/{strain_name}.{strain_bp}.bam".format(**locals()))
AndersenLab/mmp-telseq
mmp_telseq.py
Python
mit
9,002
[ "BWA" ]
4426c0cf3f32918566db42f1b2661df11cf614c6abb4c7141b095f05ef41dbd6
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name """The expression functor of Relay.""" from tvm.ir import Op from .function import Function from .expr import Call, Let, Var, GlobalVar from .expr import If, Tuple, TupleGetItem, Constant from .expr import RefCreate, RefRead, RefWrite from .adt import Constructor, Match, Clause class ExprFunctor: """ An abstract visitor defined over Expr. Defines the default dispatch over expressions, and implements memoization. """ def __init__(self): self.memo_map = {} # pylint: disable=no-else-return def visit(self, expr): """Apply the visitor to an expression.""" if expr in self.memo_map: return self.memo_map[expr] if isinstance(expr, Function): res = self.visit_function(expr) elif isinstance(expr, Call): res = self.visit_call(expr) elif isinstance(expr, Let): res = self.visit_let(expr) elif isinstance(expr, Var): res = self.visit_var(expr) elif isinstance(expr, GlobalVar): res = self.visit_global_var(expr) elif isinstance(expr, If): res = self.visit_if(expr) elif isinstance(expr, Tuple): res = self.visit_tuple(expr) elif isinstance(expr, TupleGetItem): res = self.visit_tuple_getitem(expr) elif isinstance(expr, Constant): res = self.visit_constant(expr) elif isinstance(expr, Op): res = self.visit_op(expr) elif isinstance(expr, RefCreate): res = self.visit_ref_create(expr) elif isinstance(expr, RefRead): res = self.visit_ref_read(expr) elif isinstance(expr, RefWrite): res = self.visit_ref_write(expr) elif isinstance(expr, Constructor): res = self.visit_constructor(expr) elif isinstance(expr, Match): res = self.visit_match(expr) else: raise Exception("warning unhandled case: {0}".format(type(expr))) self.memo_map[expr] = res return res def visit_function(self, _): raise NotImplementedError() def visit_let(self, _): raise NotImplementedError() def visit_call(self, _): raise NotImplementedError() def visit_var(self, _): raise NotImplementedError() def visit_type(self, typ): return typ def visit_if(self, _): raise NotImplementedError() def visit_tuple(self, _): raise NotImplementedError() def visit_tuple_getitem(self, _): raise NotImplementedError() def visit_global_var(self, _): raise NotImplementedError() def visit_op(self, _): raise NotImplementedError() def visit_constant(self, _): raise NotImplementedError() def visit_ref_create(self, _): raise NotImplementedError() def visit_ref_write(self, _): raise NotImplementedError() def visit_ref_read(self, _): raise NotImplementedError() def visit_constructor(self, _): raise NotImplementedError() def visit_match(self, _): raise NotImplementedError() class ExprVisitor(ExprFunctor): """ A visitor over Expr. The default behavior recursively traverses the AST. """ def visit_tuple(self, tup): for x in tup.fields: self.visit(x) def visit_call(self, call): self.visit(call.op) for a in call.args: self.visit(a) def visit_var(self, var): pass def visit_let(self, let): self.visit(let.var) self.visit(let.value) self.visit(let.body) def visit_function(self, f): self.visit(f.body) def visit_if(self, i): self.visit(i.cond) self.visit(i.true_branch) self.visit(i.false_branch) def visit_global_var(self, gv): pass def visit_constructor(self, c): pass def visit_op(self, op): pass def visit_constant(self, const): pass def visit_ref_create(self, r): self.visit(r.value) def visit_ref_read(self, r): self.visit(r.ref) def visit_ref_write(self, r): self.visit(r.ref) self.visit(r.value) def visit_tuple_getitem(self, t): self.visit(t.tuple_value) def visit_match(self, m): self.visit(m.data) for c in m.clauses: self.visit(c.rhs) class ExprMutator(ExprFunctor): """ A functional visitor over Expr. The default behavior recursively traverses the AST and reconstructs the AST. """ def visit_function(self, fn): new_params = [self.visit(x) for x in fn.params] new_body = self.visit(fn.body) return Function(list(new_params), new_body, fn.ret_type, fn.type_params, fn.attrs) def visit_let(self, let): new_var = self.visit(let.var) new_val = self.visit(let.value) new_body = self.visit(let.body) return Let(new_var, new_val, new_body) def visit_call(self, call): new_fn = self.visit(call.op) new_args = [self.visit(arg) for arg in call.args] return Call(new_fn, new_args, call.attrs) def visit_var(self, var): return var def visit_global_id(self, global_var): return global_var def visit_if(self, ite): return If(self.visit(ite.cond), self.visit(ite.true_branch), self.visit(ite.false_branch)) def visit_tuple(self, tup): return Tuple([self.visit(field) for field in tup.fields]) def visit_tuple_getitem(self, op): tuple_value = self.visit(op.tuple_value) if not tuple_value.same_as(op.tuple_value): return TupleGetItem(tuple_value, op.index) return op def visit_global_var(self, gvar): return gvar def visit_op(self, op): return op def visit_constant(self, const): return const def visit_constructor(self, con): return con def visit_match(self, m): return Match( self.visit(m.data), [Clause(c.lhs, self.visit(c.rhs)) for c in m.clauses], complete=m.complete, ) def visit_ref_create(self, r): return RefCreate(self.visit(r.value)) def visit_ref_write(self, r): return RefWrite(self.visit(r.ref), self.visit(r.value)) def visit_ref_read(self, r): return RefRead(self.visit(r.ref))
tqchen/tvm
python/tvm/relay/expr_functor.py
Python
apache-2.0
7,280
[ "VisIt" ]
d895b9cdd8432e9dbfa420749175ffcea78efe98a868473029fca318c87cd1bc
################################################# ## rotate_mol_vasp.py # ## Decription: Rotate a molecule from POSCAR # ## Developver: feihoom82@gmail.com # ## Date: 2018-05-02 # ################################################# ### Start of Seting ### center = 65 # Index of the center atom ids_mol = [65, 66, 67, 68, 69] # The center atom must be the first index. rotate1 = 42 # Angle(degree) or initial direction vector rotate2 = [0,0,1] # Rotational axis vector # Example1) rotate1 = 45; rotate2 = [0,0,1] # Example2) rotate1 = [1,0,0]; rotate2 = [1,1,0] ### End of Setting ### from ase import Atoms from ase.build import molecule import sys # Set first index as '0' center = center-1 for i in range(len(ids_mol)) : ids_mol[i] = ids_mol[i]-1 ### Read POSCAR if len(sys.argv) < 2 : print 'Usage: python rotate_mol_vasp.py POSCAR' print 'Notice - You should set parameters in the source code !' sys.exit(-1) POSCAR = open(sys.argv[1],'r').readlines() ### Read structure and elem positions from POSCAR elem = POSCAR[5].split() nelem = [] for i in range(len(elem)) : nelem[i:] = [int(POSCAR[6].split()[i])] n_tot = sum(nelem) if POSCAR[7].split()[0][0] in ['S','s'] : start = 9 else : start = 8 if POSCAR[start-1].split()[0][0] in ['C','c'] : coord_type = 'Cartesian' elif POSCAR[start-1].split()[0][0] in ['D','d'] : coord_type = 'Direct' mag = float(POSCAR[1].strip()) a = [float(x)*mag for x in POSCAR[2].split()] b = [float(x)*mag for x in POSCAR[3].split()] c = [float(x)*mag for x in POSCAR[4].split()] xyz = POSCAR[start:start+n_tot] cnt = 0 for i in range(len(elem)) : for j in range(nelem[i]) : if coord_type == 'Cartesian' : xyz[cnt] = [float(x) for x in xyz[cnt].split()[0:3], elem[i]] elif coord_type == 'Direct' : vec = [float(x) for x in xyz[cnt].split()[0:3]] xyz[cnt] = [a[0]*vec[0]+b[0]*vec[1]+c[0]*vec[2], a[1]*vec[0]+b[1]*vec[1]+c[1]*vec[2], a[2]*vec[0]+b[2]*vec[1]+c[2]*vec[2], elem[i]] cnt += 1 ### Generate a molecule from POSCAR chem_mol = [] pos_mol = [] for idx in ids_mol : chem_mol.append(xyz[idx][3]) pos_mol.append([xyz[idx][0]-xyz[center][0],xyz[idx][1]-xyz[center][1],xyz[idx][2]-xyz[center][2]]) MOL = Atoms(''.join(chem_mol),pos_mol) #ooh = Atoms("OOH", [[0, 0, 0], [-1.067, -0.403, 0.796], [-0.696, -0.272, 1.706]]) #h2o = molecule('H2O') print 'Molecule : '+MOL.get_chemical_formula() print '\nBefore rotation:' print MOL.get_positions() MOL.rotate(rotate1,rotate2,center=(0,0,0)) print '\nAfter rotation:' print MOL.get_positions() ### Change the posision of atom in the molecule for i in range(len(ids_mol)) : xyz[ids_mol[i]][0] = MOL.get_positions()[i][0]+xyz[center][0] xyz[ids_mol[i]][1] = MOL.get_positions()[i][1]+xyz[center][1] xyz[ids_mol[i]][2] = MOL.get_positions()[i][2]+xyz[center][2] ### Write POSCAR_out out = open(sys.argv[1]+'_out','w') out.write(''.join(POSCAR[0:start-1])) out.write('Cartesian'+'\n') SelDyn = '' if start == 9 : SelDyn = '\tT T T' for i in range(len(xyz)) : out.write(' %13.9F %13.9F %13.9F' %(xyz[i][0],xyz[i][1],xyz[i][2])+SelDyn+'\n') out.close() print '\nWritting '+sys.argv[1]+'_out file is done.'
cwandtj/A2P2
tools/rotate_mol_vasp.py
Python
mit
3,145
[ "ASE" ]
6c4af9b44d8b01c48d7827e9ceaecbe3e45d16712a0ca5204e39290c8e5f89c4
from __future__ import unicode_literals import base64 import datetime import hashlib import json import netrc import os import re import socket import sys import time import xml.etree.ElementTree from ..utils import ( compat_http_client, compat_urllib_error, compat_urllib_parse_urlparse, compat_urlparse, compat_str, clean_html, compiled_regex_type, ExtractorError, float_or_none, int_or_none, RegexNotFoundError, sanitize_filename, unescapeHTML, ) _NO_DEFAULT = object() class InfoExtractor(object): """Information Extractor class. Information extractors are the classes that, given a URL, extract information about the video (or videos) the URL refers to. This information includes the real video URL, the video title, author and others. The information is stored in a dictionary which is then passed to the FileDownloader. The FileDownloader processes this information possibly downloading the video to the file system, among other possible outcomes. The dictionaries must include the following fields: id: Video identifier. title: Video title, unescaped. Additionally, it must contain either a formats entry or a url one: formats: A list of dictionaries for each format available, ordered from worst to best quality. Potential fields: * url Mandatory. The URL of the video file * ext Will be calculated from url if missing * format A human-readable description of the format ("mp4 container with h264/opus"). Calculated from the format_id, width, height. and format_note fields if missing. * format_id A short description of the format ("mp4_h264_opus" or "19"). Technically optional, but strongly recommended. * format_note Additional info about the format ("3D" or "DASH video") * width Width of the video, if known * height Height of the video, if known * resolution Textual description of width and height * tbr Average bitrate of audio and video in KBit/s * abr Average audio bitrate in KBit/s * acodec Name of the audio codec in use * asr Audio sampling rate in Hertz * vbr Average video bitrate in KBit/s * vcodec Name of the video codec in use * container Name of the container format * filesize The number of bytes, if known in advance * filesize_approx An estimate for the number of bytes * player_url SWF Player URL (used for rtmpdump). * protocol The protocol that will be used for the actual download, lower-case. "http", "https", "rtsp", "rtmp", "m3u8" or so. * preference Order number of this format. If this field is present and not None, the formats get sorted by this field, regardless of all other values. -1 for default (order by other properties), -2 or smaller for less than default. * quality Order number of the video quality of this format, irrespective of the file format. -1 for default (order by other properties), -2 or smaller for less than default. * http_referer HTTP Referer header value to set. * http_method HTTP method to use for the download. * http_headers A dictionary of additional HTTP headers to add to the request. * http_post_data Additional data to send with a POST request. url: Final video URL. ext: Video filename extension. format: The video format, defaults to ext (used for --get-format) player_url: SWF Player URL (used for rtmpdump). The following fields are optional: display_id An alternative identifier for the video, not necessarily unique, but available before title. Typically, id is something like "4234987", title "Dancing naked mole rats", and display_id "dancing-naked-mole-rats" thumbnails: A list of dictionaries, with the following entries: * "url" * "width" (optional, int) * "height" (optional, int) * "resolution" (optional, string "{width}x{height"}, deprecated) thumbnail: Full URL to a video thumbnail image. description: One-line video description. uploader: Full name of the video uploader. timestamp: UNIX timestamp of the moment the video became available. upload_date: Video upload date (YYYYMMDD). If not explicitly set, calculated from timestamp. uploader_id: Nickname or id of the video uploader. location: Physical location where the video was filmed. subtitles: The subtitle file contents as a dictionary in the format {language: subtitles}. duration: Length of the video in seconds, as an integer. view_count: How many users have watched the video on the platform. like_count: Number of positive ratings of the video dislike_count: Number of negative ratings of the video comment_count: Number of comments on the video age_limit: Age restriction for the video, as an integer (years) webpage_url: The url to the video webpage, if given to youtube-dl it should allow to get the same result again. (It will be set by YoutubeDL if it's missing) categories: A list of categories that the video falls in, for example ["Sports", "Berlin"] is_live: True, False, or None (=unknown). Whether this video is a live stream that goes on instead of a fixed-length video. Unless mentioned otherwise, the fields should be Unicode strings. Unless mentioned otherwise, None is equivalent to absence of information. Subclasses of this one should re-define the _real_initialize() and _real_extract() methods and define a _VALID_URL regexp. Probably, they should also be added to the list of extractors. Finally, the _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. """ _ready = False _downloader = None _WORKING = True def __init__(self, downloader=None): """Constructor. Receives an optional downloader.""" self._ready = False self.set_downloader(downloader) @classmethod def suitable(cls, url): """Receives a URL and returns True if suitable for this IE.""" # This does not use has/getattr intentionally - we want to know whether # we have cached the regexp for *this* class, whereas getattr would also # match the superclass if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) return cls._VALID_URL_RE.match(url) is not None @classmethod def _match_id(cls, url): if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) m = cls._VALID_URL_RE.match(url) assert m return m.group('id') @classmethod def working(cls): """Getter method for _WORKING.""" return cls._WORKING def initialize(self): """Initializes an instance (authentication, etc).""" if not self._ready: self._real_initialize() self._ready = True def extract(self, url): """Extracts URL information and returns it in list of dicts.""" self.initialize() return self._real_extract(url) def set_downloader(self, downloader): """Sets the downloader for this IE.""" self._downloader = downloader def _real_initialize(self): """Real initialization process. Redefine in subclasses.""" pass def _real_extract(self, url): """Real extraction process. Redefine in subclasses.""" pass @classmethod def ie_key(cls): """A string for getting the InfoExtractor with get_info_extractor""" return cls.__name__[:-2] @property def IE_NAME(self): return type(self).__name__[:-2] def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True): """ Returns the response handle """ if note is None: self.report_download_webpage(video_id) elif note is not False: if video_id is None: self.to_screen('%s' % (note,)) else: self.to_screen('%s: %s' % (video_id, note)) try: return self._downloader.urlopen(url_or_request) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: if errnote is False: return False if errnote is None: errnote = 'Unable to download webpage' errmsg = '%s: %s' % (errnote, compat_str(err)) if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: self._downloader.report_warning(errmsg) return False def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True): """ Returns a tuple (page content as string, URL handle) """ # Strip hashes from the URL (#1038) if isinstance(url_or_request, (compat_str, str)): url_or_request = url_or_request.partition('#')[0] urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal) if urlh is False: assert not fatal return False content_type = urlh.headers.get('Content-Type', '') webpage_bytes = urlh.read() m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type) if m: encoding = m.group(1) else: m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]', webpage_bytes[:1024]) if m: encoding = m.group(1).decode('ascii') elif webpage_bytes.startswith(b'\xff\xfe'): encoding = 'utf-16' else: encoding = 'utf-8' if self._downloader.params.get('dump_intermediate_pages', False): try: url = url_or_request.get_full_url() except AttributeError: url = url_or_request self.to_screen('Dumping request to ' + url) dump = base64.b64encode(webpage_bytes).decode('ascii') self._downloader.to_screen(dump) if self._downloader.params.get('write_pages', False): try: url = url_or_request.get_full_url() except AttributeError: url = url_or_request basen = '%s_%s' % (video_id, url) if len(basen) > 240: h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest() basen = basen[:240 - len(h)] + h raw_filename = basen + '.dump' filename = sanitize_filename(raw_filename, restricted=True) self.to_screen('Saving request to ' + filename) with open(filename, 'wb') as outf: outf.write(webpage_bytes) try: content = webpage_bytes.decode(encoding, 'replace') except LookupError: content = webpage_bytes.decode('utf-8', 'replace') if ('<title>Access to this site is blocked</title>' in content and 'Websense' in content[:512]): msg = 'Access to this webpage has been blocked by Websense filtering software in your network.' blocked_iframe = self._html_search_regex( r'<iframe src="([^"]+)"', content, 'Websense information URL', default=None) if blocked_iframe: msg += ' Visit %s for more details' % blocked_iframe raise ExtractorError(msg, expected=True) return (content, urlh) def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True): """ Returns the data of the page as a string """ res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal) if res is False: return res else: content, _ = res return content def _download_xml(self, url_or_request, video_id, note='Downloading XML', errnote='Unable to download XML', transform_source=None, fatal=True): """Return the xml as an xml.etree.ElementTree.Element""" xml_string = self._download_webpage( url_or_request, video_id, note, errnote, fatal=fatal) if xml_string is False: return xml_string if transform_source: xml_string = transform_source(xml_string) return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8')) def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', transform_source=None, fatal=True): json_string = self._download_webpage( url_or_request, video_id, note, errnote, fatal=fatal) if (not fatal) and json_string is False: return None if transform_source: json_string = transform_source(json_string) try: return json.loads(json_string) except ValueError as ve: errmsg = '%s: Failed to parse JSON ' % video_id if fatal: raise ExtractorError(errmsg, cause=ve) else: self.report_warning(errmsg + str(ve)) def report_warning(self, msg, video_id=None): idstr = '' if video_id is None else '%s: ' % video_id self._downloader.report_warning( '[%s] %s%s' % (self.IE_NAME, idstr, msg)) def to_screen(self, msg): """Print msg to screen, prefixing it with '[ie_name]'""" self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg)) def report_extraction(self, id_or_name): """Report information extraction.""" self.to_screen('%s: Extracting information' % id_or_name) def report_download_webpage(self, video_id): """Report webpage download.""" self.to_screen('%s: Downloading webpage' % video_id) def report_age_confirmation(self): """Report attempt to confirm age.""" self.to_screen('Confirming age') def report_login(self): """Report attempt to log in.""" self.to_screen('Logging in') #Methods for following #608 @staticmethod def url_result(url, ie=None, video_id=None): """Returns a url that points to a page that should be processed""" #TODO: ie should be the class used for getting the info video_info = {'_type': 'url', 'url': url, 'ie_key': ie} if video_id is not None: video_info['id'] = video_id return video_info @staticmethod def playlist_result(entries, playlist_id=None, playlist_title=None): """Returns a playlist""" video_info = {'_type': 'playlist', 'entries': entries} if playlist_id: video_info['id'] = playlist_id if playlist_title: video_info['title'] = playlist_title return video_info def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0): """ Perform a regex search on the given string, using a single or a list of patterns returning the first matching group. In case of failure return a default value or raise a WARNING or a RegexNotFoundError, depending on fatal, specifying the field name. """ if isinstance(pattern, (str, compat_str, compiled_regex_type)): mobj = re.search(pattern, string, flags) else: for p in pattern: mobj = re.search(p, string, flags) if mobj: break if os.name != 'nt' and sys.stderr.isatty(): _name = '\033[0;34m%s\033[0m' % name else: _name = name if mobj: # return the first matching group return next(g for g in mobj.groups() if g is not None) elif default is not _NO_DEFAULT: return default elif fatal: raise RegexNotFoundError('Unable to extract %s' % _name) else: self._downloader.report_warning('unable to extract %s; ' 'please report this issue on http://yt-dl.org/bug' % _name) return None def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0): """ Like _search_regex, but strips HTML tags and unescapes entities. """ res = self._search_regex(pattern, string, name, default, fatal, flags) if res: return clean_html(res).strip() else: return res def _get_login_info(self): """ Get the the login info as (username, password) It will look in the netrc file using the _NETRC_MACHINE value If there's no info available, return (None, None) """ if self._downloader is None: return (None, None) username = None password = None downloader_params = self._downloader.params # Attempt to use provided username and password or .netrc data if downloader_params.get('username', None) is not None: username = downloader_params['username'] password = downloader_params['password'] elif downloader_params.get('usenetrc', False): try: info = netrc.netrc().authenticators(self._NETRC_MACHINE) if info is not None: username = info[0] password = info[2] else: raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) except (IOError, netrc.NetrcParseError) as err: self._downloader.report_warning('parsing .netrc: %s' % compat_str(err)) return (username, password) def _get_tfa_info(self): """ Get the two-factor authentication info TODO - asking the user will be required for sms/phone verify currently just uses the command line option If there's no info available, return None """ if self._downloader is None: return None downloader_params = self._downloader.params if downloader_params.get('twofactor', None) is not None: return downloader_params['twofactor'] return None # Helper functions for extracting OpenGraph info @staticmethod def _og_regexes(prop): content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')' property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop) template = r'<meta[^>]+?%s[^>]+?%s' return [ template % (property_re, content_re), template % (content_re, property_re), ] def _og_search_property(self, prop, html, name=None, **kargs): if name is None: name = 'OpenGraph %s' % prop escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs) if escaped is None: return None return unescapeHTML(escaped) def _og_search_thumbnail(self, html, **kargs): return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs) def _og_search_description(self, html, **kargs): return self._og_search_property('description', html, fatal=False, **kargs) def _og_search_title(self, html, **kargs): return self._og_search_property('title', html, **kargs) def _og_search_video_url(self, html, name='video url', secure=True, **kargs): regexes = self._og_regexes('video') + self._og_regexes('video:url') if secure: regexes = self._og_regexes('video:secure_url') + regexes return self._html_search_regex(regexes, html, name, **kargs) def _og_search_url(self, html, **kargs): return self._og_search_property('url', html, **kargs) def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs): if display_name is None: display_name = name return self._html_search_regex( r'''(?ix)<meta (?=[^>]+(?:itemprop|name|property)=["\']?%s["\']?) [^>]+content=["\']([^"\']+)["\']''' % re.escape(name), html, display_name, fatal=fatal, **kwargs) def _dc_search_uploader(self, html): return self._html_search_meta('dc.creator', html, 'uploader') def _rta_search(self, html): # See http://www.rtalabel.org/index.php?content=howtofaq#single if re.search(r'(?ix)<meta\s+name="rating"\s+' r' content="RTA-5042-1996-1400-1577-RTA"', html): return 18 return 0 def _media_rating_search(self, html): # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/ rating = self._html_search_meta('rating', html) if not rating: return None RATING_TABLE = { 'safe for kids': 0, 'general': 8, '14 years': 14, 'mature': 17, 'restricted': 19, } return RATING_TABLE.get(rating.lower(), None) def _twitter_search_player(self, html): return self._html_search_meta('twitter:player', html, 'twitter card player') def _sort_formats(self, formats): if not formats: raise ExtractorError('No video formats found') def _formats_key(f): # TODO remove the following workaround from ..utils import determine_ext if not f.get('ext') and 'url' in f: f['ext'] = determine_ext(f['url']) preference = f.get('preference') if preference is None: proto = f.get('protocol') if proto is None: proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme preference = 0 if proto in ['http', 'https'] else -0.1 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported preference -= 0.5 if f.get('vcodec') == 'none': # audio only if self._downloader.params.get('prefer_free_formats'): ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus'] else: ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a'] ext_preference = 0 try: audio_ext_preference = ORDER.index(f['ext']) except ValueError: audio_ext_preference = -1 else: if self._downloader.params.get('prefer_free_formats'): ORDER = ['flv', 'mp4', 'webm'] else: ORDER = ['webm', 'flv', 'mp4'] try: ext_preference = ORDER.index(f['ext']) except ValueError: ext_preference = -1 audio_ext_preference = 0 return ( preference, f.get('quality') if f.get('quality') is not None else -1, f.get('height') if f.get('height') is not None else -1, f.get('width') if f.get('width') is not None else -1, ext_preference, f.get('tbr') if f.get('tbr') is not None else -1, f.get('vbr') if f.get('vbr') is not None else -1, f.get('abr') if f.get('abr') is not None else -1, audio_ext_preference, f.get('filesize') if f.get('filesize') is not None else -1, f.get('filesize_approx') if f.get('filesize_approx') is not None else -1, f.get('format_id'), ) formats.sort(key=_formats_key) def http_scheme(self): """ Either "https:" or "https:", depending on the user's preferences """ return ( 'http:' if self._downloader.params.get('prefer_insecure', False) else 'https:') def _proto_relative_url(self, url, scheme=None): if url is None: return url if url.startswith('//'): if scheme is None: scheme = self.http_scheme() return scheme + url else: return url def _sleep(self, timeout, video_id, msg_template=None): if msg_template is None: msg_template = '%(video_id)s: Waiting for %(timeout)s seconds' msg = msg_template % {'video_id': video_id, 'timeout': timeout} self.to_screen(msg) time.sleep(timeout) def _extract_f4m_formats(self, manifest_url, video_id): manifest = self._download_xml( manifest_url, video_id, 'Downloading f4m manifest', 'Unable to download f4m manifest') formats = [] media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media') for i, media_el in enumerate(media_nodes): tbr = int_or_none(media_el.attrib.get('bitrate')) format_id = 'f4m-%d' % (i if tbr is None else tbr) formats.append({ 'format_id': format_id, 'url': manifest_url, 'ext': 'flv', 'tbr': tbr, 'width': int_or_none(media_el.attrib.get('width')), 'height': int_or_none(media_el.attrib.get('height')), }) self._sort_formats(formats) return formats def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None, entry_protocol='m3u8', preference=None): formats = [{ 'format_id': 'm3u8-meta', 'url': m3u8_url, 'ext': ext, 'protocol': 'm3u8', 'preference': -1, 'resolution': 'multiple', 'format_note': 'Quality selection URL', }] format_url = lambda u: ( u if re.match(r'^https?://', u) else compat_urlparse.urljoin(m3u8_url, u)) m3u8_doc = self._download_webpage(m3u8_url, video_id) last_info = None kv_rex = re.compile( r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)') for line in m3u8_doc.splitlines(): if line.startswith('#EXT-X-STREAM-INF:'): last_info = {} for m in kv_rex.finditer(line): v = m.group('val') if v.startswith('"'): v = v[1:-1] last_info[m.group('key')] = v elif line.startswith('#') or not line.strip(): continue else: if last_info is None: formats.append({'url': format_url(line)}) continue tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000) f = { 'format_id': 'm3u8-%d' % (tbr if tbr else len(formats)), 'url': format_url(line.strip()), 'tbr': tbr, 'ext': ext, 'protocol': entry_protocol, 'preference': preference, } codecs = last_info.get('CODECS') if codecs: # TODO: looks like video codec is not always necessarily goes first va_codecs = codecs.split(',') if va_codecs[0]: f['vcodec'] = va_codecs[0].partition('.')[0] if len(va_codecs) > 1 and va_codecs[1]: f['acodec'] = va_codecs[1].partition('.')[0] resolution = last_info.get('RESOLUTION') if resolution: width_str, height_str = resolution.split('x') f['width'] = int(width_str) f['height'] = int(height_str) formats.append(f) last_info = {} self._sort_formats(formats) return formats def _live_title(self, name): """ Generate the title for a live video """ now = datetime.datetime.now() now_str = now.strftime("%Y-%m-%d %H:%M") return name + ' ' + now_str def _int(self, v, name, fatal=False, **kwargs): res = int_or_none(v, **kwargs) if 'get_attr' in kwargs: print(getattr(v, kwargs['get_attr'])) if res is None: msg = 'Failed to extract %s: Could not parse value %r' % (name, v) if fatal: raise ExtractorError(msg) else: self._downloader.report_warning(msg) return res def _float(self, v, name, fatal=False, **kwargs): res = float_or_none(v, **kwargs) if res is None: msg = 'Failed to extract %s: Could not parse value %r' % (name, v) if fatal: raise ExtractorError(msg) else: self._downloader.report_warning(msg) return res class SearchInfoExtractor(InfoExtractor): """ Base class for paged search queries extractors. They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query} Instances should define _SEARCH_KEY and _MAX_RESULTS. """ @classmethod def _make_valid_url(cls): return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY @classmethod def suitable(cls, url): return re.match(cls._make_valid_url(), url) is not None def _real_extract(self, query): mobj = re.match(self._make_valid_url(), query) if mobj is None: raise ExtractorError('Invalid search query "%s"' % query) prefix = mobj.group('prefix') query = mobj.group('query') if prefix == '': return self._get_n_results(query, 1) elif prefix == 'all': return self._get_n_results(query, self._MAX_RESULTS) else: n = int(prefix) if n <= 0: raise ExtractorError('invalid download number %s for query "%s"' % (n, query)) elif n > self._MAX_RESULTS: self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) n = self._MAX_RESULTS return self._get_n_results(query, n) def _get_n_results(self, query, n): """Get a specified number of results for a query""" raise NotImplementedError("This method must be implemented by subclasses") @property def SEARCH_KEY(self): return self._SEARCH_KEY
xu-cheng/youtube-dl
youtube_dl/extractor/common.py
Python
unlicense
32,240
[ "VisIt" ]
47a0ee592f3d29fe619ebf01842f168db198400ae1fce72770b84b3dbeb0e0da
# -*- coding: latin-1 -*- import os import sys import shutil import string import makerEditorWxView import makerCopyright import wx.lib.buttons import wx.gizmos import wx.lib.imagebrowser as ib from wx.lib.anchors import LayoutAnchors import wx.lib.flatnotebook as nb import wx.py as pyShell import math # Used on OSX to get access to carbon api constants if wx.Platform == '__WXMAC__': import Carbon.Appearance def create(app): return wxPythonGUI(app) class wxPythonGUI(wx.Frame): def _init_coll_boxSizer1_Items(self, parent): #this is actually a wxWindow #parent.Add(self.topPanel, 0, border=0, flag=wx.FIXED_MINSIZE | wx.EXPAND) parent.Add(self.splitter, 1, border=0, flag=wx.EXPAND | wx.GROW) def _init_coll_boxSizer2_Items(self, parent): pass #parent.Add(self.notebook1, 0, border=0, flag=wx.EXPAND) #parent.Add(self.styledTextCtrl1, 1, border=0, flag=wx.EXPAND) #this is some menu items def _init_coll_code_Items(self, parent): self.MenuItemHTML = parent.AppendMenu(help='(X)HTML Tags', id=-1, submenu = self.subMenuHTML, text=u'HTML' ) parent.AppendSeparator() self.MenuItemCSS = parent.AppendMenu(help='CSS', id=-1, submenu = self.subMenuCSS, text=u'CSS' ) parent.AppendSeparator() self.MenuItemMarkers = parent.AppendMenu(help='insert a marker', id=-1, submenu = self.SubMenuMarkers, text=u'Markers' ) parent.AppendSeparator() self.MenuItemComment = parent.Append(help='Insert Comment !', id=-1, kind=wx.ITEM_NORMAL, text=u'Comment\tCtrl+Shift+c' ) parent.AppendSeparator() self.MenuItemMarkdown = parent.Append(help='Markdown !', id=-1, kind=wx.ITEM_NORMAL, text=u'Markdown\tCtrl+Shift+m' ) def _init_coll_ftp_Items(self, parent): self.MenuItemEditDist = parent.Append(help='edit the distribution table', id=-1, kind=wx.ITEM_NORMAL, text=u'Edit Distribution Table' ) parent.AppendSeparator() self.MenuItemPublish = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Publish\tCtrl+u' ) self.MenuItemFullUpload = parent.Append(help='upload everything', id=-1, kind=wx.ITEM_NORMAL, text=u'Upload Everything' ) self.MenuItemBrowseFtp = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Browse Server' ) parent.AppendSeparator() self.MenuItemSetupFTP = parent.Append(help='setup your project', id=-1, kind=wx.ITEM_NORMAL, text=u'Setup FTP Connection' ) self.Bind(wx.EVT_MENU, self.OnFtpDistributiontableMenu, self.MenuItemEditDist ) # self.Bind(wx.EVT_MENU, # self.OnFtpUploadMenu, # self.MenuItemUploadFile # ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemFullUpload ) def _init_coll_edit_Items(self, parent): self.MenuItemUndo = parent.Append(help='undo', id=-1, kind=wx.ITEM_NORMAL, text=u'Undo\tCtrl+z' ) self.MenuItemRedo = parent.Append(help=u'copy selection', id=-1, kind=wx.ITEM_NORMAL, text=u'Redo\tCtrl+y' ) parent.AppendSeparator() self.MenuItemCut = parent.Append(help='cut', id=-1, kind=wx.ITEM_NORMAL, text=u'Cut\tCtrl+x' ) self.MenuItemCopy = parent.Append(help=u'copy selection', id=-1, kind=wx.ITEM_NORMAL, text=u'Copy\tCtrl+c' ) self.MenuItemPaste = parent.Append(help='paste selection', id=-1, kind=wx.ITEM_NORMAL, text=u'Paste\tCtrl+v' ) parent.AppendSeparator() self.MenuItemReplace = parent.Append(help='Replace', id=-1, kind=wx.ITEM_NORMAL, text=u'Replace\tCtrl+r' ) self.MenuItemFind = parent.Append(help='Find', id=-1, kind=wx.ITEM_NORMAL, text=u'Find\tCtrl+f' ) self.MenuItemFindNext = parent.Append(help='Find Next', id=-1, kind=wx.ITEM_NORMAL, text=u'Find Next\tCtrl+g' ) # # self.Bind(wx.EVT_MENU, # self.CallController, # self.MenuItemFind # ) # # self.Bind(wx.EVT_MENU, # self.CallController, # self.MenuItemFindNext # ) # # # self.Bind(wx.EVT_MENU, # self.OnEditEnlargeMenu, # self.MenuItemFontInc # ) # # self.Bind(wx.EVT_MENU, # self.OnEditReduceMenu, # self.MenuItemFontDec # ) # def _init_coll_view_Items(self, parent): self.MenuItemWrapWord = parent.Append(help='Wrap Words In Editor', id=-1, kind=wx.ITEM_CHECK, text=u'Wrap Words In Editor\tCtrl+Shift+w') parent.AppendSeparator() self.MenuItemEditorStyles = parent.AppendMenu(help='Editor Styles', id=-1, submenu = self.subMenuEditorStyles, text=u'Editor Styles' ) parent.AppendSeparator() self.MenuItemFontInc = parent.Append(help='Increase Font Size', id=-1, kind=wx.ITEM_NORMAL, text=u'Increase Font Size\tCtrl+=' ) self.MenuItemFontDec = parent.Append(help='reduce Font Size', id=-1, kind=wx.ITEM_NORMAL, text=u'Decrease Font Size\tCtrl+-' ) self.MenuItemFontNormal = parent.Append(help='Font Size to default', id=-1, kind=wx.ITEM_NORMAL, text=u'Font Size To Normal\tCtrl+0' ) def _init_coll_filetypes_Items(self, parent): self.MenuItemEditHead = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Edit .head') self.MenuItemEditRssHead = parent.Append(help='edit title for RSS feed', id=-1, kind=wx.ITEM_NORMAL, text=u'Edit RSS Title' ) self.MenuItemLanguages = parent.AppendMenu(help='manage project languages', id=-1, submenu = self.subMenuLanguages, text=u'Project Languages' ) parent.AppendSeparator() self.MenuItemSelectColor = parent.Append(help='Choose a color', id=-1, kind=wx.ITEM_NORMAL, text=u'Select Color') parent.AppendSeparator() self.MenuItemUnderline = parent.Append(help='Underline', id=-1, kind=wx.ITEM_NORMAL, text=u'Underline') self.MenuItemOblique = parent.Append(help='Oblique', id=-1, kind=wx.ITEM_NORMAL, text=u'Oblique') self.MenuItemBold = parent.Append(help='Bold', id=-1, kind=wx.ITEM_NORMAL, text=u'Bold') self.MenuItemLine_through = parent.Append(help='Line Through', id=-1, kind=wx.ITEM_NORMAL, text=u'Line Through') def _init_coll_sub_menu_markers(self, parent): self.MenuItemMarkerTodaysDate = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Todays date - !todaysDate!') self.MenuItemMarkerProjectName = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Project Name - !projectName!') self.MenuItemMarkerPageName = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Page Name - !pageName!') self.MenuItemMarkerCreationDate = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Creation Date - !creationDate!') def _init_coll_sub_menu_languages(self, parent): self.MenuItemAddLanguage = parent.Append(help='add a language to this project', id=-1, kind=wx.ITEM_NORMAL, text=u'Add Language') self.MenuItemRemoveLanguage = parent.Append(help='remove language from project', id=-1, kind=wx.ITEM_NORMAL, text=u'Remove Language') def _init_coll_new_files_Items(self, parent): self.MenuItemNewContentFile = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'.content\tCtrl-N') self.MenuItemNewCssFile = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'.css' ) self.MenuItemNewCgiFile = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'.cgi' ) self.MenuItemNewJsFile = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'.js' ) self.MenuItemNewTxtFile = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'.txt' ) # added by Gerald July 7th 07 self.MenuItemNewHtmlFile = parent.Append(help='add new .html file', id=-1, kind=wx.ITEM_NORMAL, text=u'.html' ) self.MenuItemNewXmlFile = parent.Append(help='add new .xml file', id=-1, kind=wx.ITEM_NORMAL, text=u'.xml' ) self.MenuItemNewPhpFile = parent.Append(help='add new .php file', id=-1, kind=wx.ITEM_NORMAL, text=u'.php' ) self.MenuItemNewDynamicFile = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'.dynamic' ) parent.AppendSeparator() self.MenuItemNewOtherFile = parent.Append(help='add any file', id=-1, kind=wx.ITEM_NORMAL, text=u'other...' ) def _init_coll_pages_Items(self, parent): self.MenuItemAddProject = parent.Append(help='create new project', id=-1, kind=wx.ITEM_NORMAL, text=u'New Project\tCtrl+Shift+N' ) self.MenuItemOpenProject = parent.Append(help='Open A Maker Project', id=-1, kind=wx.ITEM_NORMAL, text=u'Open Project\tCtrl+O') parent.AppendSeparator() self.MenuItemNewFiles = parent.AppendMenu(help='Add New File', id=-1, submenu = self.new_files, text=u'New File' ) self.MenuItemSaveFile = parent.Append(help=u'save File', id=-1, kind=wx.ITEM_NORMAL, text=u'Save File\tCtrl+S' ) self.MenuItemDeleteFile = parent.Append(help='delete file', id=-1, kind=wx.ITEM_NORMAL, text=u'Delete File' ) self.MenuItemRenameFile = parent.Append(help='rename file', id=-1, kind=wx.ITEM_NORMAL, text=u'Rename File' ) self.MenuItemSaveAsTemplate = parent.Append(help='Save As Template', id=-1, kind=wx.ITEM_NORMAL, text=u'Save As Template' ) self.MenuItemAddToFTPQueue = parent.Append(help='Add To FTP Queue', id=-1, kind=wx.ITEM_NORMAL, text=u'Add To FTP Queue' ) self.MenuItemCloseFile = parent.Append(help='close file', id=-1, kind=wx.ITEM_NORMAL, text=u'Close File\tCtrl+W' ) parent.AppendSeparator() self.MenuItemPreview = parent.Append(help=u'preview', id=-1, kind=wx.ITEM_NORMAL, text=u'Preview File\tF5' ) parent.AppendSeparator() self.MenuItemImportFile = parent.Append(help=u'import File', id=-1, kind=wx.ITEM_NORMAL, text=u'Import File(s)' ) parent.AppendSeparator() self.MenuItemManageProjects = parent.Append(help='Manage Projects', id=-1, kind=wx.ITEM_NORMAL, text=u'Manage Projects') self.MenuItemImportProject = parent.Append(help="Import 'Classic' Maker Project", id=-1, kind=wx.ITEM_NORMAL, text=u"Import 'Classic' Project") # # self.MenuItemDeleteProject = parent.Append(help='Delete a maker project', # id=-1, # kind=wx.ITEM_NORMAL, # text=u'Delete Project') parent.AppendSeparator() self.MenuItemSaveProjectAsTemplate = parent.Append(help='Save project as template', id=-1, kind=wx.ITEM_NORMAL, text=u'Save Project As Template') parent.AppendSeparator() self.MenuItemPrint = parent.Append(help='Print file', id=-1, kind=wx.ITEM_NORMAL, text=u'Print ' ) parent.AppendSeparator() self.MenuItemQuit = parent.Append(help=u'leave program', id=wx.ID_EXIT, kind=wx.ITEM_NORMAL, text=u'Exit' ) # # self.Bind(wx.EVT_MENU, # self.OnDeleteFile, # self.MenuItemDeleteFile # ) # # self.Bind(wx.EVT_MENU, # self.OnPagesAddprojectMenu, # self.MenuItemAddProject # ) # # self.Bind(wx.EVT_MENU, # self.OnPagesImportProject, # self.MenuItemImportProject # ) # # # # def _init_coll_languages_Items(self, parent): # # # self.MenuItemDeutsch = parent.Append(help='', # id=-1, # kind=wx.ITEM_NORMAL, # text=u'German (de)' # ) # # self.MenuItemEnglish = parent.Append(help='', # id=-1, # kind=wx.ITEM_NORMAL, # text=u'English (en)' # ) # # def _init_coll_mainMenuBar_Menus(self, parent): parent.Append(menu=self.pages, title=u'Files' ) parent.Append(menu=self.edit, title=u'Edit' ) parent.Append(menu=self.view, title=u'View' ) # parent.Append(menu=self.parts, # title=u'Parts' # ) parent.Append(menu=self.images, title=u'Images' ) parent.Append(menu=self.ftp, title=u'FTP' ) parent.Append(menu=self.filetypes, title=u'Tools' ) parent.Append(menu=self.insert, title=u'Insert' ) parent.Append(menu=self.help, title=u'Help' ) # def _init_coll_parts_Items(self, parent): # # # self.MenuItemEditNav = parent.Append(help='', # id=-1, # kind=wx.ITEM_NORMAL, # text=u'Edit Navigation' # ) # # self.MenuItemEditBody = parent.Append(help='', # id=-1, # kind=wx.ITEM_NORMAL, # text=u'Edit Top of Page' # ) # # self.MenuItemEditFoot = parent.Append(help='', # id=-1, # kind=wx.ITEM_NORMAL, # text=u'Edit Foot' # ) # parent.AppendSeparator() # def _init_coll_help_Items(self, parent): parent.AppendSeparator() self.MenuItemTutorial = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Tutorial' ) parent.AppendSeparator() self.MenuItemLearnHTMLandCSS = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'HTML and CSS Resources' ) parent.AppendSeparator() self.MenuItemFeedback = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Feedback' ) self.MenuItemBugReport = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Bugreport' ) self.MenuItemWebsite = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Visit Project Website' ) parent.AppendSeparator() self.MenuItemLicense = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'License') # this part is for the Mac App About Dialog is putting the About on Win in the # Help menu #------------------------------------------ parent.Append(wx.ID_ABOUT, "&About") # self.Bind(wx.EVT_MENU, # self.OnHelpItemsAbout, # id=wx.ID_ABOUT # ) # ------------------------------------------ # self.MenuItemAbout = parent.Append(help='',id = -1, kind=wx.ITEM_NORMAL, text=u'About') # # self.Bind(wx.EVT_MENU, # self.OnHelpItemsAbout, # self.MenuItemAbout # ) # self.Bind(wx.EVT_MENU, # self.OnHelpItemsTutorial, # self.MenuItemTutorial # ) # self.Bind(wx.EVT_MENU, # self.OnCheckForUpdate, # self.MenuItemUpdate # ) # self.Bind(wx.EVT_MENU, # self.OnFeedbackMenu, # self.MenuItemFeedback # ) # # self.Bind(wx.EVT_MENU, # self.OnHelpWebsiteMenu, # self.MenuItemWebsite # ) # self.Bind(wx.EVT_MENU, # self.OnHelpReport_a_bugMenu, # self.MenuItemBugReport # ) # self.Bind(wx.EVT_MENU, # self.OnHelpDonateMenu, # self.MenuItemDonation # ) def _init_coll_images_Items(self, parent): self.MenuItemImportImage = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Import Image' ) self.MenuItemDeleteImage = parent.Append(help='', id=-1, kind=wx.ITEM_NORMAL, text=u'Delete Image' ) parent.AppendSeparator() self.MenuItemSyncImages = parent.Append(help='sync images', id=-1, kind=wx.ITEM_NORMAL, text=u'Sync Images With Server' ) #status bar def _init_coll_statusBar1_Fields(self, parent): parent.SetFieldsCount(5) parent.SetStatusText(number=0, text=u'Status') parent.SetStatusText(number=1, text=u'Current Project') parent.SetStatusText(number=2, text=u'Language') parent.SetStatusText(number=3, text=u'Current File') parent.SetStatusText(number=4, text=u'Files in queue ') parent.SetStatusWidths([-1, -1, -1, -1, 200]) def _init_utils(self): self.mainMenuBar = wx.MenuBar() self.pages = wx.Menu(title=u'') self.edit = wx.Menu(title=u'') self.view = wx.Menu(title=u'') self.ftp = wx.Menu(title=u'') self.insert = wx.Menu(title=u'') self.images = wx.Menu(title=u'') self.filetypes = wx.Menu(title=u'') self.help = wx.Menu(title=u'') self.additional_projects = wx.Menu(title=u'') self.editorStyles = wx.Menu(title=u'') self.new_files = wx.Menu(title=u'') self.SubMenuMarkers = wx.Menu(title='') self.subMenuLanguages = wx.Menu(title='') self.subMenuEditorStyles = wx.Menu(title='') self._init_coll_mainMenuBar_Menus(self.mainMenuBar) self._init_coll_pages_Items(self.pages) self._init_coll_edit_Items(self.edit) self._init_coll_view_Items(self.view) self._init_coll_ftp_Items(self.ftp) self._init_coll_images_Items(self.images) self._init_coll_filetypes_Items(self.filetypes) self._init_coll_help_Items(self.help) self._init_coll_new_files_Items(self.new_files) self._init_coll_sub_menu_markers(self.SubMenuMarkers) self._init_coll_sub_menu_languages(self.subMenuLanguages) # the submenus #--------- #--------- #--------- self.subMenuStructure = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuMeta_Information = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuText = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuLinks = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuImages_and_Objects = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuLists = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuTables = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuForms = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuScripting = wx.Menu(title=u'') #--------- #--------- #--------- self.subMenuPresentational = wx.Menu(title=u'') self.subMenuHTML = wx.Menu(title=u'') self.subMenuCSS = wx.Menu(title=u'') #-------- #-------- self.subMenuHTML.AppendMenu(help='Structure', id=-1, submenu = self.subMenuStructure, text=u'Structure') self.subMenuHTML.AppendMenu(help='Meta_Information', id=-1, submenu = self.subMenuMeta_Information, text=u'Meta_Information') self.subMenuHTML.AppendMenu(help='Text', id=-1, submenu = self.subMenuText, text=u'Text') self.subMenuHTML.AppendMenu(help='Links', id=-1, submenu = self.subMenuLinks, text=u'Links') self.subMenuHTML.AppendMenu(help='Images_and_Objects', id=-1, submenu = self.subMenuImages_and_Objects, text=u'Images_and_Objects') self.subMenuHTML.AppendMenu(help='Lists', id=-1, submenu = self.subMenuLists, text=u'Lists') self.subMenuHTML.AppendMenu(help='Tables', id=-1, submenu = self.subMenuTables, text=u'Tables') self.subMenuHTML.AppendMenu(help='Forms', id=-1, submenu = self.subMenuForms, text=u'Forms') self.subMenuHTML.AppendMenu(help='Scripting', id=-1, submenu = self.subMenuScripting, text=u'Scripting') self.subMenuHTML.AppendMenu(help='Presentational', id=-1, submenu = self.subMenuPresentational, text=u'Presentational') # Menu Items self.MenuItemHTML_body = self.subMenuStructure.Append(help='The main body of an HTML document', id=-1, kind=wx.ITEM_NORMAL, text=u'<body> The main body of an HTML document') self.MenuItemHTML_div = self.subMenuStructure.Append(help='Division. Defines a block of HTML', id=-1, kind=wx.ITEM_NORMAL, text=u'<div> Division. Defines a block of HTML\tCtrl+Shift+D') self.MenuItemHTML_head = self.subMenuStructure.Append(help='The header of an HTML document', id=-1, kind=wx.ITEM_NORMAL, text=u'<head> The header of an HTML document') self.MenuItemHTML_html = self.subMenuStructure.Append(help='The root element of the (X)HTML document', id=-1, kind=wx.ITEM_NORMAL, text=u'<html> The root element of the (X)HTML document') self.MenuItemHTML_span = self.subMenuStructure.Append(help='Used to group in-line HTML', id=-1, kind=wx.ITEM_NORMAL, text=u'<span> Used to group in-line HTML') # menu items self.MenuItemHTML_DOCTYPE = self.subMenuMeta_Information.Append(help='Document type declaration', id=-1, kind=wx.ITEM_NORMAL, text=u'<DOCTYPE> Document type declaration') self.MenuItemHTML_link = self.subMenuMeta_Information.Append(help='Defines a link to an external resource', id=-1, kind=wx.ITEM_NORMAL, text=u'<link> Defines a link to an external resource') self.MenuItemHTML_meta = self.subMenuMeta_Information.Append(help='Meta information', id=-1, kind=wx.ITEM_NORMAL, text=u'<meta> Meta information') self.MenuItemHTML_style = self.subMenuMeta_Information.Append(help='Used to define CSS at a page-level ', id=-1, kind=wx.ITEM_NORMAL, text=u'<style> Used to define CSS at a page-level ') self.MenuItemHTML_title = self.subMenuMeta_Information.Append(help='The title of a page', id=-1, kind=wx.ITEM_NORMAL, text=u'<title> The title of a page') # menu items self.MenuItemHTML_abbr = self.subMenuText.Append(help='Abbreviation', id=-1, kind=wx.ITEM_NORMAL, text=u'<abbr> Abbreviation') self.MenuItemHTML_acronym = self.subMenuText.Append(help='Acronym', id=-1, kind=wx.ITEM_NORMAL, text=u'<acronym> Acronym') self.MenuItemHTML_address = self.subMenuText.Append(help='Address', id=-1, kind=wx.ITEM_NORMAL, text=u'<address> Address') self.MenuItemHTML_bdo = self.subMenuText.Append(help='Bi-directional text', id=-1, kind=wx.ITEM_NORMAL, text=u'<bdo> Bi-directional text') self.MenuItemHTML_blockquote = self.subMenuText.Append(help='A large quotation', id=-1, kind=wx.ITEM_NORMAL, text=u'<blockquote> A large quotation') self.MenuItemHTML_br = self.subMenuText.Append(help='A line break', id=-1, kind=wx.ITEM_NORMAL, text=u'<br /> A line break\tCtrl+Return') self.MenuItemHTML_cite = self.subMenuText.Append(help='in-line citation', id=-1, kind=wx.ITEM_NORMAL, text=u'<cite> in-line citation') self.MenuItemHTML_code = self.subMenuText.Append(help='Computer code', id=-1, kind=wx.ITEM_NORMAL, text=u'<code> Computer code') self.MenuItemHTML_del = self.subMenuText.Append(help='Deletion', id=-1, kind=wx.ITEM_NORMAL, text=u'<del> Deletion') self.MenuItemHTML_dfn = self.subMenuText.Append(help='Definition term', id=-1, kind=wx.ITEM_NORMAL, text=u'<dfn> Definition term') self.MenuItemHTML_em = self.subMenuText.Append(help='Emphasis', id=-1, kind=wx.ITEM_NORMAL, text=u'<em> Emphasis') self.MenuItemHTML_h1 = self.subMenuText.Append(help='Heading size 1', id=-1, kind=wx.ITEM_NORMAL, text=u'<h1> Heading size 1\tCtrl+Shift+h') self.MenuItemHTML_h2 = self.subMenuText.Append(help='Heading size 2', id=-1, kind=wx.ITEM_NORMAL, text=u'<h2> Heading size 2') self.MenuItemHTML_h3 = self.subMenuText.Append(help='Heading size 3', id=-1, kind=wx.ITEM_NORMAL, text=u'<h3> Heading size 3') self.MenuItemHTML_h4 = self.subMenuText.Append(help='Heading size 4', id=-1, kind=wx.ITEM_NORMAL, text=u'<h4> Heading size 4') self.MenuItemHTML_h5 = self.subMenuText.Append(help='Heading size 5', id=-1, kind=wx.ITEM_NORMAL, text=u'<h5> Heading size 5') self.MenuItemHTML_h6 = self.subMenuText.Append(help='Heading size 6', id=-1, kind=wx.ITEM_NORMAL, text=u'<h6> Heading size 6') self.MenuItemHTML_ins = self.subMenuText.Append(help='Insertion', id=-1, kind=wx.ITEM_NORMAL, text=u'<ins> Insertion') self.MenuItemHTML_kbd = self.subMenuText.Append(help='text that should be typed in by the user', id=-1, kind=wx.ITEM_NORMAL, text=u'<kbd> text that should be typed in by the user') self.MenuItemHTML_p = self.subMenuText.Append(help='Paragraph', id=-1, kind=wx.ITEM_NORMAL, text=u'<p> Paragraph\tCtrl+Shift+p') self.MenuItemHTML_pre = self.subMenuText.Append(help='Preformatted text', id=-1, kind=wx.ITEM_NORMAL, text=u'<pre> Preformatted text') self.MenuItemHTML_q = self.subMenuText.Append(help='An in-line quote', id=-1, kind=wx.ITEM_NORMAL, text=u'<q> An in-line quote') self.MenuItemHTML_samp = self.subMenuText.Append(help='Sample', id=-1, kind=wx.ITEM_NORMAL, text=u'<samp> Sample') self.MenuItemHTML_strong = self.subMenuText.Append(help='Strong emphasis.', id=-1, kind=wx.ITEM_NORMAL, text=u'<strong> Strong emphasis.') self.MenuItemHTML_var = self.subMenuText.Append(help='Variable', id=-1, kind=wx.ITEM_NORMAL, text=u'<var> Variable') # menu items self.MenuItemHTML_a = self.subMenuLinks.Append(help='Anchor. Primarily used as a hypertext link.', id=-1, kind=wx.ITEM_NORMAL, text=u'<a> Anchor. Primarily used as a hypertext link.\tCtrl+Shift+a') self.MenuItemHTML_base = self.subMenuLinks.Append(help='base location for links on a page', id=-1, kind=wx.ITEM_NORMAL, text=u'<base> base location for links on a page') # menu items self.MenuItemHTML_area = self.subMenuImages_and_Objects.Append(help='A region of a client-side image map', id=-1, kind=wx.ITEM_NORMAL, text=u'<area> A region of a client-side image map') self.MenuItemHTML_img = self.subMenuImages_and_Objects.Append(help='Image', id=-1, kind=wx.ITEM_NORMAL, text=u'<img> Image') self.MenuItemHTML_map = self.subMenuImages_and_Objects.Append(help='client-side image map', id=-1, kind=wx.ITEM_NORMAL, text=u'<map> client-side image map') self.MenuItemHTML_object = self.subMenuImages_and_Objects.Append(help='An embedded multimedia object', id=-1, kind=wx.ITEM_NORMAL, text=u'<object> An embedded multimedia object') self.MenuItemHTML_param = self.subMenuImages_and_Objects.Append(help='Parameter of an object', id=-1, kind=wx.ITEM_NORMAL, text=u'<param> Parameter of an object') # menu items self.MenuItemHTML_dd = self.subMenuLists.Append(help='Definition description', id=-1, kind=wx.ITEM_NORMAL, text=u'<dd> Definition description') self.MenuItemHTML_dl = self.subMenuLists.Append(help='Definition list', id=-1, kind=wx.ITEM_NORMAL, text=u'<dl> Definition list') self.MenuItemHTML_dt = self.subMenuLists.Append(help='Definition term', id=-1, kind=wx.ITEM_NORMAL, text=u'<dt> Definition term') self.MenuItemHTML_li = self.subMenuLists.Append(help='List item', id=-1, kind=wx.ITEM_NORMAL, text=u'<li> List item') self.MenuItemHTML_ol = self.subMenuLists.Append(help='Ordered list', id=-1, kind=wx.ITEM_NORMAL, text=u'<ol> Ordered list') self.MenuItemHTML_ul = self.subMenuLists.Append(help='Unordered list', id=-1, kind=wx.ITEM_NORMAL, text=u'<ul> Unordered list') # menu items self.MenuItemHTML_caption = self.subMenuTables.Append(help='caption for a table', id=-1, kind=wx.ITEM_NORMAL, text=u'<caption> caption for a table') self.MenuItemHTML_col = self.subMenuTables.Append(help='Table column', id=-1, kind=wx.ITEM_NORMAL, text=u'<col> Table column') self.MenuItemHTML_colgroup = self.subMenuTables.Append(help='Column group', id=-1, kind=wx.ITEM_NORMAL, text=u'<colgroup> Column group') self.MenuItemHTML_table = self.subMenuTables.Append(help='Table used for tabular data', id=-1, kind=wx.ITEM_NORMAL, text=u'<table> Table used for tabular data') self.MenuItemHTML_tbody = self.subMenuTables.Append(help='Table body', id=-1, kind=wx.ITEM_NORMAL, text=u'<tbody> Table body') self.MenuItemHTML_td = self.subMenuTables.Append(help='Table data cell', id=-1, kind=wx.ITEM_NORMAL, text=u'<td> Table data cell') self.MenuItemHTML_tfoot = self.subMenuTables.Append(help='Table foot', id=-1, kind=wx.ITEM_NORMAL, text=u'<tfoot> Table foot') self.MenuItemHTML_th = self.subMenuTables.Append(help='Table header cell', id=-1, kind=wx.ITEM_NORMAL, text=u'<th> Table header cell') self.MenuItemHTML_thead = self.subMenuTables.Append(help='Table header', id=-1, kind=wx.ITEM_NORMAL, text=u'<thead> Table header') self.MenuItemHTML_tr = self.subMenuTables.Append(help='Table row', id=-1, kind=wx.ITEM_NORMAL, text=u'<tr> Table row') # menu items self.MenuItemHTML_button = self.subMenuForms.Append(help='Defines a form button', id=-1, kind=wx.ITEM_NORMAL, text=u'<button> Defines a form button') self.MenuItemHTML_fieldset = self.subMenuForms.Append(help='Defines a group of related form items', id=-1, kind=wx.ITEM_NORMAL, text=u'<fieldset> Defines a group of related form items') self.MenuItemHTML_form = self.subMenuForms.Append(help='Defines a form', id=-1, kind=wx.ITEM_NORMAL, text=u'<form> Defines a form') self.MenuItemHTML_input = self.subMenuForms.Append(help='Form field for user input', id=-1, kind=wx.ITEM_NORMAL, text=u'<input> Form field for user input') self.MenuItemHTML_label = self.subMenuForms.Append(help='Label for a form element', id=-1, kind=wx.ITEM_NORMAL, text=u'<label> Label for a form element') self.MenuItemHTML_legend = self.subMenuForms.Append(help='Defines a caption for a fieldset', id=-1, kind=wx.ITEM_NORMAL, text=u'<legend> Defines a caption for a fieldset') self.MenuItemHTML_optgroup = self.subMenuForms.Append(help='Option group', id=-1, kind=wx.ITEM_NORMAL, text=u'<optgroup> Option group') self.MenuItemHTML_option = self.subMenuForms.Append(help='Defines an option of a <select> form field', id=-1, kind=wx.ITEM_NORMAL, text=u'<option> Defines an option of a <select> form field') self.MenuItemHTML_select = self.subMenuForms.Append(help='A drop-down list form element', id=-1, kind=wx.ITEM_NORMAL, text=u'<select> A drop-down list form element') self.MenuItemHTML_textarea = self.subMenuForms.Append(help='A multi-row text area form element', id=-1, kind=wx.ITEM_NORMAL, text=u'<textarea> A multi-row text area form element') # menu items self.MenuItemHTML_noscript = self.subMenuScripting.Append(help='content to be used when a script can not be used', id=-1, kind=wx.ITEM_NORMAL, text=u'<noscript> content to be used when a script can not be used') self.MenuItemHTML_script = self.subMenuScripting.Append(help='Defines a scripting language, eg. JavaScript', id=-1, kind=wx.ITEM_NORMAL, text=u'<script> Defines a scripting language, eg. JavaScript') # menu items self.MenuItemHTML_b = self.subMenuPresentational.Append(help='Bold', id=-1, kind=wx.ITEM_NORMAL, text=u'<b> Bold') self.MenuItemHTML_big = self.subMenuPresentational.Append(help='Big', id=-1, kind=wx.ITEM_NORMAL, text=u'<big> Big') self.MenuItemHTML_hr = self.subMenuPresentational.Append(help='Horizontal ruler', id=-1, kind=wx.ITEM_NORMAL, text=u'<hr> Horizontal ruler') self.MenuItemHTML_i = self.subMenuPresentational.Append(help='Italic', id=-1, kind=wx.ITEM_NORMAL, text=u'<i> Italic') self.MenuItemHTML_small = self.subMenuPresentational.Append(help='Small', id=-1, kind=wx.ITEM_NORMAL, text=u'<small> Small') self.MenuItemHTML_sub = self.subMenuPresentational.Append(help='Subscript', id=-1, kind=wx.ITEM_NORMAL, text=u'<sub> Subscript') self.MenuItemHTML_sup = self.subMenuPresentational.Append(help='Superscript', id=-1, kind=wx.ITEM_NORMAL, text=u'<sup> Superscript') self.MenuItemHTML_tt = self.subMenuPresentational.Append(help='Teletype', id=-1, kind=wx.ITEM_NORMAL, text=u'<tt> Teletype') # CSS Menu Items self.MenuItemCSS_background = self.subMenuCSS.Append(help='background', id=-1, kind=wx.ITEM_NORMAL, text=u'background' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_background) self.MenuItemCSS_background_attachment = self.subMenuCSS.Append(help='background-attachment', id=-1, kind=wx.ITEM_NORMAL, text=u'background_attachment' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_background_attachment) self.MenuItemCSS_background_color = self.subMenuCSS.Append(help='background-color', id=-1, kind=wx.ITEM_NORMAL, text=u'background_color' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_background_color) self.MenuItemCSS_background_image = self.subMenuCSS.Append(help='background-image', id=-1, kind=wx.ITEM_NORMAL, text=u'background_image' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_background_image) self.MenuItemCSS_background_position = self.subMenuCSS.Append(help='background-position', id=-1, kind=wx.ITEM_NORMAL, text=u'background_position' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_background_position) self.MenuItemCSS_background_repeat = self.subMenuCSS.Append(help='background-repeat', id=-1, kind=wx.ITEM_NORMAL, text=u'background_repeat' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_background_repeat) self.MenuItemCSS_border = self.subMenuCSS.Append(help='border', id=-1, kind=wx.ITEM_NORMAL, text=u'border' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_border) self.MenuItemCSS_border_collapse = self.subMenuCSS.Append(help='border-collapse', id=-1, kind=wx.ITEM_NORMAL, text=u'border_collapse' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_border_collapse) self.MenuItemCSS_border_color = self.subMenuCSS.Append(help='border-color', id=-1, kind=wx.ITEM_NORMAL, text=u'border_color' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_border_color) self.MenuItemCSS_border_spacing = self.subMenuCSS.Append(help='border-spacing', id=-1, kind=wx.ITEM_NORMAL, text=u'border_spacing' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_border_spacing) self.MenuItemCSS_border_style = self.subMenuCSS.Append(help='border-style', id=-1, kind=wx.ITEM_NORMAL, text=u'border_style' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_border_style) self.MenuItemCSS_border_width = self.subMenuCSS.Append(help='border-width', id=-1, kind=wx.ITEM_NORMAL, text=u'border_width' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_border_width) self.MenuItemCSS_bottom = self.subMenuCSS.Append(help='bottom', id=-1, kind=wx.ITEM_NORMAL, text=u'bottom' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_bottom) self.MenuItemCSS_caption_side = self.subMenuCSS.Append(help='caption-side', id=-1, kind=wx.ITEM_NORMAL, text=u'caption_side' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_caption_side) self.MenuItemCSS_clear = self.subMenuCSS.Append(help='clear', id=-1, kind=wx.ITEM_NORMAL, text=u'clear' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_clear) self.MenuItemCSS_clip = self.subMenuCSS.Append(help='clip', id=-1, kind=wx.ITEM_NORMAL, text=u'clip' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_clip) self.MenuItemCSS_color = self.subMenuCSS.Append(help='color', id=-1, kind=wx.ITEM_NORMAL, text=u'color' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_color) self.MenuItemCSS_content = self.subMenuCSS.Append(help='content', id=-1, kind=wx.ITEM_NORMAL, text=u'content' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_content) self.MenuItemCSS_counter_increment = self.subMenuCSS.Append(help='counter-increment', id=-1, kind=wx.ITEM_NORMAL, text=u'counter_increment' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_counter_increment) self.MenuItemCSS_counter_reset = self.subMenuCSS.Append(help='counter-reset', id=-1, kind=wx.ITEM_NORMAL, text=u'counter_reset' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_counter_reset) self.MenuItemCSS_cursor = self.subMenuCSS.Append(help='cursor', id=-1, kind=wx.ITEM_NORMAL, text=u'cursor' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_cursor) self.MenuItemCSS_direction = self.subMenuCSS.Append(help='direction', id=-1, kind=wx.ITEM_NORMAL, text=u'direction' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_direction) self.MenuItemCSS_display = self.subMenuCSS.Append(help='display', id=-1, kind=wx.ITEM_NORMAL, text=u'display' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_display) self.MenuItemCSS_empty_cells = self.subMenuCSS.Append(help='empty-cells', id=-1, kind=wx.ITEM_NORMAL, text=u'empty_cells' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_empty_cells) self.MenuItemCSS_float = self.subMenuCSS.Append(help='float', id=-1, kind=wx.ITEM_NORMAL, text=u'float' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_float) self.MenuItemCSS_font = self.subMenuCSS.Append(help='font', id=-1, kind=wx.ITEM_NORMAL, text=u'font' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_font) self.MenuItemCSS_font_family = self.subMenuCSS.Append(help='font-family', id=-1, kind=wx.ITEM_NORMAL, text=u'font_family' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_font_family) self.MenuItemCSS_font_size = self.subMenuCSS.Append(help='font-size', id=-1, kind=wx.ITEM_NORMAL, text=u'font_size' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_font_size) self.MenuItemCSS_font_style = self.subMenuCSS.Append(help='font-style', id=-1, kind=wx.ITEM_NORMAL, text=u'font_style' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_font_style) self.MenuItemCSS_font_variant = self.subMenuCSS.Append(help='font-variant', id=-1, kind=wx.ITEM_NORMAL, text=u'font_variant' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_font_variant) self.MenuItemCSS_font_weight = self.subMenuCSS.Append(help='font-weight', id=-1, kind=wx.ITEM_NORMAL, text=u'font_weight' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_font_weight) self.MenuItemCSS_height = self.subMenuCSS.Append(help='height', id=-1, kind=wx.ITEM_NORMAL, text=u'height' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_height) self.MenuItemCSS_left = self.subMenuCSS.Append(help='left', id=-1, kind=wx.ITEM_NORMAL, text=u'left' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_left) self.MenuItemCSS_letter_spacing = self.subMenuCSS.Append(help='letter-spacing', id=-1, kind=wx.ITEM_NORMAL, text=u'letter_spacing' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_letter_spacing) self.MenuItemCSS_line_height = self.subMenuCSS.Append(help='line-height', id=-1, kind=wx.ITEM_NORMAL, text=u'line_height' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_line_height) self.MenuItemCSS_list_style = self.subMenuCSS.Append(help='list-style', id=-1, kind=wx.ITEM_NORMAL, text=u'list_style' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_list_style) self.MenuItemCSS_list_style_image = self.subMenuCSS.Append(help='list-style-image', id=-1, kind=wx.ITEM_NORMAL, text=u'list_style_image' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_list_style_image) self.MenuItemCSS_list_style_position = self.subMenuCSS.Append(help='list-style-position', id=-1, kind=wx.ITEM_NORMAL, text=u'list_style_position' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_list_style_position) self.MenuItemCSS_list_style_type = self.subMenuCSS.Append(help='list-style-type', id=-1, kind=wx.ITEM_NORMAL, text=u'list_style_type' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_list_style_type) self.MenuItemCSS_margin = self.subMenuCSS.Append(help='margin', id=-1, kind=wx.ITEM_NORMAL, text=u'margin' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_margin) self.MenuItemCSS_max_height = self.subMenuCSS.Append(help='max-height', id=-1, kind=wx.ITEM_NORMAL, text=u'max_height' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_max_height) self.MenuItemCSS_max_width = self.subMenuCSS.Append(help='max-width', id=-1, kind=wx.ITEM_NORMAL, text=u'max_width' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_max_width) self.MenuItemCSS_min_height = self.subMenuCSS.Append(help='min-height', id=-1, kind=wx.ITEM_NORMAL, text=u'min_height' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_min_height) self.MenuItemCSS_min_width = self.subMenuCSS.Append(help='min-width', id=-1, kind=wx.ITEM_NORMAL, text=u'min_width' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_min_width) self.MenuItemCSS_orphans = self.subMenuCSS.Append(help='orphans', id=-1, kind=wx.ITEM_NORMAL, text=u'orphans' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_orphans) self.MenuItemCSS_outline = self.subMenuCSS.Append(help='outline', id=-1, kind=wx.ITEM_NORMAL, text=u'outline' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_outline) self.MenuItemCSS_outline_color = self.subMenuCSS.Append(help='outline-color', id=-1, kind=wx.ITEM_NORMAL, text=u'outline_color' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_outline_color) self.MenuItemCSS_outline_style = self.subMenuCSS.Append(help='outline-style', id=-1, kind=wx.ITEM_NORMAL, text=u'outline_style' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_outline_style) self.MenuItemCSS_outline_width = self.subMenuCSS.Append(help='outline-width', id=-1, kind=wx.ITEM_NORMAL, text=u'outline_width' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_outline_width) self.MenuItemCSS_overflow = self.subMenuCSS.Append(help='overflow', id=-1, kind=wx.ITEM_NORMAL, text=u'overflow' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_overflow) self.MenuItemCSS_padding = self.subMenuCSS.Append(help='padding', id=-1, kind=wx.ITEM_NORMAL, text=u'padding' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_padding) self.MenuItemCSS_page_break_after = self.subMenuCSS.Append(help='page-break-after', id=-1, kind=wx.ITEM_NORMAL, text=u'page_break_after' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_page_break_after) self.MenuItemCSS_page_break_before = self.subMenuCSS.Append(help='page-break-before', id=-1, kind=wx.ITEM_NORMAL, text=u'page_break_before' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_page_break_before) self.MenuItemCSS_page_break_inside = self.subMenuCSS.Append(help='page-break-inside', id=-1, kind=wx.ITEM_NORMAL, text=u'page_break_inside' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_page_break_inside) self.MenuItemCSS_position = self.subMenuCSS.Append(help='position', id=-1, kind=wx.ITEM_NORMAL, text=u'position' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_position) self.MenuItemCSS_quotes = self.subMenuCSS.Append(help='quotes', id=-1, kind=wx.ITEM_NORMAL, text=u'quotes' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_quotes) self.MenuItemCSS_right = self.subMenuCSS.Append(help='right', id=-1, kind=wx.ITEM_NORMAL, text=u'right' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_right) self.MenuItemCSS_table_layout = self.subMenuCSS.Append(help='table-layout', id=-1, kind=wx.ITEM_NORMAL, text=u'table_layout' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_table_layout) self.MenuItemCSS_text_align = self.subMenuCSS.Append(help='text-align', id=-1, kind=wx.ITEM_NORMAL, text=u'text_align' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_text_align) self.MenuItemCSS_text_decoration = self.subMenuCSS.Append(help='text-decoration', id=-1, kind=wx.ITEM_NORMAL, text=u'text_decoration' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_text_decoration) self.MenuItemCSS_text_indent = self.subMenuCSS.Append(help='text-indent', id=-1, kind=wx.ITEM_NORMAL, text=u'text_indent' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_text_indent) self.MenuItemCSS_text_transform = self.subMenuCSS.Append(help='text-transform', id=-1, kind=wx.ITEM_NORMAL, text=u'text_transform' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_text_transform) self.MenuItemCSS_top = self.subMenuCSS.Append(help='top', id=-1, kind=wx.ITEM_NORMAL, text=u'top' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_top) self.MenuItemCSS_unicode_bidi = self.subMenuCSS.Append(help='unicode-bidi', id=-1, kind=wx.ITEM_NORMAL, text=u'unicode_bidi' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_unicode_bidi) self.MenuItemCSS_vertical_align = self.subMenuCSS.Append(help='vertical-align', id=-1, kind=wx.ITEM_NORMAL, text=u'vertical_align' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_vertical_align) self.MenuItemCSS_visibility = self.subMenuCSS.Append(help='visibility', id=-1, kind=wx.ITEM_NORMAL, text=u'visibility' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_visibility) self.MenuItemCSS_white_space = self.subMenuCSS.Append(help='white-space', id=-1, kind=wx.ITEM_NORMAL, text=u'white_space' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_white_space) self.MenuItemCSS_widows = self.subMenuCSS.Append(help='widows', id=-1, kind=wx.ITEM_NORMAL, text=u'widows' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_widows) self.MenuItemCSS_width = self.subMenuCSS.Append(help='width', id=-1, kind=wx.ITEM_NORMAL, text=u'width' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_width) self.MenuItemCSS_word_spacing = self.subMenuCSS.Append(help='word-spacing', id=-1, kind=wx.ITEM_NORMAL, text=u'word_spacing' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_word_spacing) self.MenuItemCSS_z_index = self.subMenuCSS.Append(help='z-index', id=-1, kind=wx.ITEM_NORMAL, text=u'z_index' ) self.Bind(wx.EVT_MENU, self.CallController, self.MenuItemCSS_z_index) # end of css menu items self._init_coll_code_Items(self.insert) #the sizers def _init_sizers(self): # generated method, don't edit self.boxSizer1 = wx.BoxSizer(orient=wx.VERTICAL) #self.boxSizer2 = wx.BoxSizer(orient=wx.VERTICAL) self._init_coll_boxSizer1_Items(self.boxSizer1) #self._init_coll_boxSizer2_Items(self.boxSizer2) #here the sizer for the window is set self.SetSizer(self.boxSizer1) #deprecated #self.flexGridSizer1 = wx.FlexGridSizer(cols=2, hgap=0, rows=1, vgap=0) #self._init_coll_flexGridSizer1_Items(self.flexGridSizer1) #self._init_coll_flexGridSizer1_Growables(self.flexGridSizer1) # def partArt(self, il, image_size): # # called in init ctrls # # moved out here to make testing easier # # wx.ArtProvider.Push(MyArtProvider()) # self.part = il.Add(wx.ArtProvider_GetBitmap(wx.ART_NORMAL_FILE, wx.ART_OTHER, image_size)) # wx.ArtProvider.Pop() def _init_ctrls(self, prnt): wx.Frame.__init__(self, id=-1, name='', parent=prnt, pos=wx.Point(0,0 ), size=wx.Size(1200, 700), style=wx.DEFAULT_FRAME_STYLE, title=u'the maker') try: # - don't sweat it if it doesn't load self.SetIcon(wx.Icon(os.path.join(os.path.dirname(sys.argv[0]), "system/tags.ico"), wx.BITMAP_TYPE_ICO)) finally: pass self._init_utils() self.SetMenuBar(self.mainMenuBar) self.SetStatusBarPane(0) #the other splitter #self.splitter2 = MySplitter(self, -1,None) #the top splitter self.splitter = MySplitter(self, -1,None) # and the stc is added to it # it is very important to keep the NODRAG style # # if dragging is added at some point the # makerProjectController.py method noteBookPageClosed has to be # changed where the noteBoolPages dict is updated # # self.noteBook = nb.FlatNotebook(self.splitter, -1, agwStyle = wx.lib.flatnotebook.FNB_FF2, style= wx.lib.flatnotebook.FNB_NODRAG # | wx.lib.flatnotebook.FNB_X_ON_TAB) self.noteBook = MyCustomNoteBook(self.splitter, -1, None, None) self.noteBook.SetPadding(wx.Size(20)) # add a welcome message to the noteBook self.styledTextCtrl1 = (makerEditorWxView.editorView(self, "default")).editor self.welcomeId = self.styledTextCtrl1.GetId() self.noteBook.AddPage(self.styledTextCtrl1, "Thank you for using The Maker.") self.styledTextCtrl1.SetText(self.BoilerPlate) #switch off popup #self.styledTextCtrl1.Bind(wx.EVT_RIGHT_DOWN, self.OnSTCRightDown) #add widgets to the first splitter self.listWindow = wx.Panel(self.splitter, -1, style = wx.NO_BORDER) #self.listWindow.SetBackgroundColour(wx.RED) self.listSizer = wx.BoxSizer(orient=wx.VERTICAL) # the listbox is added to the splitter too self.tree = wx.TreeCtrl(self.listWindow, -1, style=wx.TR_HAS_BUTTONS |wx.TR_LINES_AT_ROOT |wx.TR_DEFAULT_STYLE) def drawAfterPaint(evt): Size = self.tree.GetClientSizeTuple() dc = wx.ClientDC(self.tree) dc.SetPen(self.treePen) dc.DrawLine(Size[0]-1, 0, Size[0]-1, Size[1]) def onTreePaint(evt): wx.CallAfter(drawAfterPaint, evt) evt.Skip() self.treePen = wx.Pen('#666666', 1) self.tree.Bind(wx.EVT_PAINT, onTreePaint) image_size = (16,16) projectArt = wx.Image(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/114.png"), wx.BITMAP_TYPE_PNG).Scale(16,16).ConvertToBitmap() folderArt = wx.Image(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/99.png"), wx.BITMAP_TYPE_PNG).Scale(16,16).ConvertToBitmap() folderOpenArt = wx.Image(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/107.png"), wx.BITMAP_TYPE_PNG).Scale(16,16).ConvertToBitmap() fileArt = wx.Image(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/93.png"), wx.BITMAP_TYPE_PNG).Scale(16,16).ConvertToBitmap() fileChangeArt = wx.Image(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/118.png"), wx.BITMAP_TYPE_PNG).Scale(16,16).ConvertToBitmap() partArt = wx.Image(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/24-16.png"), wx.BITMAP_TYPE_PNG).Scale(16,16).ConvertToBitmap() il = wx.ImageList(image_size[0], image_size[1]) self.projidx = il.Add(projectArt) self.fldridx = il.Add(folderArt) self.fldropenidx = il.Add(folderOpenArt) self.fileidx = il.Add(fileArt) self.filechange = il.Add(fileChangeArt) self.part = il.Add(partArt) #self.partArt(il, image_size) self.tree.SetImageList(il) self.il = il self.listSizer.Add(self.tree, 1, border=0, flag=wx.EXPAND) self.listWindow.SetAutoLayout(True) self.listWindow.SetSizer(self.listSizer) self.listSizer.Fit(self.listWindow) self.splitter.SetMinimumPaneSize(200) self.splitter.SplitVertically(self.listWindow, self.noteBook, 180) self.toolBar = self.CreateToolBar( style = wx.TB_HORIZONTAL | wx.NO_BORDER #| wx.TB_FLAT | wx.TB_TEXT ) self.search = wx.SearchCtrl(self.toolBar, id= -1, pos=(750,-1), size=(180,25), style=wx.TE_PROCESS_ENTER) #extract the searchCtrl's textCtrl self.searchStatus = wx.StaticText(self.toolBar, -1, size=wx.DefaultSize, pos=wx.DefaultPosition, style=0) self.searchStatus.SetLabel(" ") saveArt = wx.Bitmap(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/23.png")) publishArt = wx.Bitmap(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/53.png")) previewArt = wx.Bitmap(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/25.png")) makeAllArt = wx.Bitmap(os.path.join(os.path.dirname(sys.argv[0]), "./system/ToolBarIcons/24.png")) self.toolBar.AddSeparator() self.toolBar.AddLabelTool(10, "Save", saveArt) self.toolBar.AddLabelTool(20, "Publish", publishArt) self.toolBar.AddLabelTool(30, "Preview", previewArt) self.toolBar.AddLabelTool(40, "Make All", makeAllArt) self.toolBar.AddStretchableSpace() self.toolBar.AddControl(self.searchStatus) self.toolBar.AddControl(self.search) self.toolBar.Realize() self.statusBar1 = wx.StatusBar(id=-1, name='statusBar1', parent=self, style=wx.ST_SIZEGRIP) self.statusBar1.SetConstraints(LayoutAnchors(self.statusBar1, True, True, False, False)) self._init_coll_statusBar1_Fields(self.statusBar1) self.SetStatusBar(self.statusBar1) self.styledTextCtrl1.Bind(wx.EVT_PAINT, self.OnStyledTextCtrl1Paint) self.styledTextCtrl1.Bind(wx.EVT_ERASE_BACKGROUND, self.OnStyledTextCtrl1EraseBackground) def __init__(self, app): self.BoilerPlate = makerCopyright.getCopyright() self.ProgressBars = [] # this is a stack for progress bars self.ModifierBind = False self.application = app self._init_ctrls(None) self._init_sizers() self.wx = wx self.saved= True self.selection= None # set interface fonts if wx.Platform == '__WXMSW__': self.interfaceSetFonts(10,-2) elif wx.Platform == '__WXMAC__': self.interfaceSetFonts(12,0) # Linux and others else: self.interfaceSetFonts(10,0, special = True) self.createPopUpMenus() def interfaceSetFonts(self,value, zoom, special = False): """ value is the size for Fonts zoom is the zoom for the editor """ if special: theFont = wx.SystemSettings.GetFont(0) else: theFont = wx.Font(value, wx.SWISS, wx.NORMAL, wx.NORMAL, False, u'Arial') self.styledTextCtrl1.SetZoom(zoom) self.statusBar1.SetFont(theFont) self.SetFont(theFont) #self.makeAllButton.SetFont(theFont) #self.previewButton.SetFont(theFont) #self.publishButton.SetFont(theFont) #self.saveButton.SetFont(theFont) self.tree.SetFont(theFont) self.tree.SetBackgroundColour('#e2e6ec') self.tree.SetIndent(20) def CallController(self, event): """ This is a universal method sending the event to the controller and the controller method findActionForEvent(event) will trigger the right action """ self.controller.findActionForEvent(event) def createPopUpMenus(self): self.treePopUp = self.wx.Menu() # Bind this in Maker File Controller self.treePopUpMenuItemDeleteFile = self.treePopUp.Append(help='Delete Current File', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Delete File' ) self.treePopUpMenuItemRenameFile = self.treePopUp.Append(help='Rename Current File', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Rename File' ) self.treePopUpMenuItemCloseFile = self.treePopUp.Append(help='Close Current File', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Close File' ) self.treePopUp.AppendSeparator() self.treePopUpMenuItemPreview = self.treePopUp.Append(help='Preview File', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Preview' ) self.treePopUp.AppendSeparator() self.treePopUpMenuItemExpandAll = self.treePopUp.Append(help='Expand All Items', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Expand All Items' ) self.treePopUpMenuItemCollapseAll = self.treePopUp.Append(help='Collapse All Items', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Collapse All Items' ) self.treePopUpMenuItemCollapseOther = self.treePopUp.Append(help='Collapse Other Projects', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Collapse Other Projects' ) self.treePopUp.AppendSeparator() self.treePopUpMenuItemPrint = self.treePopUp.Append(help='Print File', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Print' ) # Editor self.editorPopUp = self.wx.Menu() self.editorPopUpMenuItemUndo = self.editorPopUp.Append(help='undo', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Undo\tCtrl+z' ) self.editorPopUpMenuItemRedo = self.editorPopUp.Append(help=u'copy selection', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Redo\tCtrl+y' ) self.editorPopUp.AppendSeparator() self.editorPopUpMenuItemCut = self.editorPopUp.Append(help='cut', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Cut\tCtrl+x' ) self.editorPopUpMenuItemCopy = self.editorPopUp.Append(help=u'copy selection', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Copy\tCtrl+c' ) self.editorPopUpMenuItemPaste = self.editorPopUp.Append(help='paste selection', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Paste\tCtrl+v' ) self.editorPopUp.AppendSeparator() self.editorPopUpMenuItemReplace = self.editorPopUp.Append(help='Replace', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Replace\tCtrl+r' ) self.editorPopUpMenuItemFind = self.editorPopUp.Append(help='Find', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Find\tCtrl+f' ) self.editorPopUpMenuItemFindNext = self.editorPopUp.Append(help='Find Next', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Find Next\tCtrl+g' ) self.editorPopUp.AppendSeparator() self.editorPopUpMenuItemSelectColor = self.editorPopUp.Append(help='Choose a color', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Select Color') self.editorPopUp.AppendSeparator() self.editorPopUpMenuItemUnderline = self.editorPopUp.Append(help='Underline', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Underline') self.editorPopUpMenuItemOblique = self.editorPopUp.Append(help='Oblique', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Oblique') self.editorPopUpMenuItemBold = self.editorPopUp.Append(help='Bold', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Bold') self.editorPopUpMenuItemLine_through = self.editorPopUp.Append(help='Line Through', id=-1, kind=self.wx.ITEM_NORMAL, text=u'Line Through') def OnFrameSize(self,event): self.splitter2.SetSashPosition(self.Splitter2SashStart) event.Skip() def OnStyledTextCtrl1Paint(self, event): event.Skip() def OnStyledTextCtrl1EraseBackground(self, event): event.Skip() def OnPagesExitMenu(self, event): self.controller.actionGUIClose() # def OnHelpItemsAbout(self, event): # self.controller.actionShowAbout() def openSpecialEditor(self,Filename): self.controller.actionSpecialEdit(Filename) def Password(self, Question): """ user input Dialog for Password returns a string or None """ value = None dlg = wx.TextEntryDialog( self, Question, '','', wx.TE_PASSWORD | wx.CENTER | wx.CANCEL | wx.OK ) if dlg.ShowModal() == wx.ID_OK: value = dlg.GetValue() dlg.Destroy() #print "PASSW VALUE ", value return value def InputWithValue(self, question="?", value=""): """ user input Dialog returns a string or None """ dlg = wx.TextEntryDialog(self, question, 'Question', value) value = None if dlg.ShowModal() == wx.ID_OK: value = dlg.GetValue() dlg.Destroy() return value def SingleChoiceDialog(self, choices, title="", message="Please choose..."): value = None dlg = wx.SingleChoiceDialog( self, message, title, choices, wx.CHOICEDLG_STYLE ) if dlg.ShowModal() == wx.ID_OK: value = dlg.GetStringSelection() dlg.Destroy() return value def Input(self, Question="?", title=""): """ user input Dialog returns a string or Null """ value = None dlg = wx.TextEntryDialog( self, Question, title, "") if dlg.ShowModal() == wx.ID_OK: value = dlg.GetValue() dlg.Destroy() return value def OnFtpDistributiontableMenu(self, event): self.controller.actionEditDistributionTable() #=========================================================================== # all kinds of dialogs #=========================================================================== def ImageDialogWithDir(self, dir): """ Display an image select dialog returns filename of image returns None if no image was selected """ dlg = ib.ImageDialog(self, dir) dlg.Centre() if dlg.ShowModal() == wx.ID_OK: if dlg.GetDirectory() == dir: Image = dlg.GetFile() else: self.Error("Image is not in the project folder ! Please import first...") Image = None else: Image = None dlg.Destroy() return Image def ImageDialog(self): """ Display an image select dialog returns filename of image returns None if no image was selected """ dlg = ib.ImageDialog(self) dlg.Centre() if dlg.ShowModal() == wx.ID_OK: Image=dlg.GetFile() else: Image=None dlg.Destroy() return Image def Ask_YesOrNo(self,Message): """Returns Yes or No.""" dlg = wx.MessageDialog(self, Message, 'Question', wx.YES | wx.NO | wx.ICON_QUESTION) try: ret = dlg.ShowModal() finally: dlg.Destroy() if ret == wx.ID_YES: Answer = 'Yes' elif ret == wx.ID_NO: Answer = 'No' return Answer def doShell(self, frame, nb=None, log=None): frame = ShellFrame(self, -1, "Python Shell", size=(700, 400), pos=(150,150), style = wx.DEFAULT_FRAME_STYLE) frame.Show(True) return frame.getShell() def Ask(self,Message): """ returns Ok or Cancel """ dlg = wx.MessageDialog(self, Message, 'Question', wx.OK | wx.CANCEL | wx.ICON_WARNING) try: ret = dlg.ShowModal() finally: dlg.Destroy() if ret == wx.ID_OK: Answer = 'Ok' elif ret == wx.ID_CANCEL: Answer = 'Cancel' return Answer def MessageNotModal(self, Message): dlg = wx.MessageDialog(self, Message, 'Info', wx.OK | wx.ICON_INFORMATION) dlg.Show() def Message(self,Message): dlg = wx.MessageDialog(self, Message, 'Info', wx.OK | wx.ICON_INFORMATION) try: dlg.ShowModal() finally: dlg.Destroy() return def Warning (self,Message): #self.errorhandler.write(Message) dlg = wx.MessageDialog(self, Message, 'Alert', wx.OK | wx.ICON_EXCLAMATION) try: dlg.ShowModal() finally: dlg.Destroy() return def Error(self,Message): #sys.stderr.write(Message) dlg = wx.MessageDialog(self, Message, 'Error', wx.OK | wx.ICON_ERROR) try: dlg.ShowModal() finally: dlg.Destroy() return # ------- # # some functions for a pulsing progress bar # def PulseProgress(self, Message=""): self.PulseBar = self.GetLastProgressBar() self.PulseBar.Pulse(Message) self.fitRefreshAndCenter(self.PulseBar) # ------- Pulse stuff end ----------------------------------------------- # def ShowProgress(self,max,Message): # # # self.Progress = wx.ProgressDialog("Progress...", # Message, # maximum = max, # parent=self, # style = wx.PD_APP_MODAL | wx.PD_AUTO_HIDE) # self.AddProgressBar(self.Progress) # # # def UpdateProgress(self,count): # #print self.Progress.max # Bar = self.GetLastProgressBar() # Bar.Update(count) # self.fitRefreshAndCenter(Bar) # # # # def UpdateProgressMessage(self,count,message): # Bar = self.GetLastProgressBar() # Bar.Update(count,message) # self.fitRefreshAndCenter(Bar) def fitRefreshAndCenter(self, widget): """ is calling .Fit(), .Refresh() and .CenterOnScreen() for the widget """ widget.Fit() widget.Refresh() widget.CenterOnScreen() # def KillProgress(self): # try: # Bar = self.GetLastProgressBar() # self.DeleteProgressBar(Bar) # Bar.Destroy() # if self.keepGoing: # self.StopPulse() # except: # print "unable to Kill Progress Bar" # # def DeleteProgressBar(self,ThisOne): # """ # ! is deleting the instance from the stack # # """ # self.ProgressBars.remove(ThisOne) # # # def AddProgressBar(self,ThisOne): # """ # ThisOne is an instance of a wxProgressbar # """ # self.ProgressBars.append(ThisOne) # # # def GetLastProgressBar(self): # """ # returns the instance of the progressbar that was last added to the stack # """ # # return self.ProgressBars[-1] # # # def GetAllProgressBars(self): # return self.ProgressBars # # def OnDeleteFile(self, event): self.controller.actionDeleteCurrentFile() # here are the tag tools def OnP_buttonButton(self, event): self.insert_xhtml_tag(['<p>','</p>']) def OnBr_buttonButton(self, event): self.insert_xhtml_tag(['<br />']) def OnH1_buttonButton(self, event): self.insert_xhtml_tag(['<h1>','</h1>']) def OnHr_buttonButton(self, event): self.insert_xhtml_tag(['<hr />']) def OnA_buttonButton(self, event): self.insert_xhtml_tag(['<a href=" ">','</a>']) def Onul_buttonButton(self, event): self.insert_xhtml_tag(['<ul>','</ul>']) #event.Skip() def Onli_buttonButton(self, event): self.insert_xhtml_tag(['<li>','</li>']) #event.Skip() def Onol_buttonButton(self, event): self.insert_xhtml_tag(['<ol>','</ol>']) #event.Skip() def OnImageButton(self, event): dir = self.cms.path_parts+'gfx/' #print dir # set the initial directory for the demo bitmaps initial_dir = os.path.join(dir, 'bitmaps') # open the image browser dialog dlg = ib.ImageDialog(self, dir) dlg.Centre() if dlg.ShowModal() == wx.ID_OK: # show the selected file #print "You Selected File: " + dlg.GetFile() Image=dlg.GetFile() else: pass dlg.Destroy() try: Image=os.path.split(Image) Image=Image[1] self.styledTextCtrl1.AddText('<img src="'+self.cms.url+self.cms.gfxFolder+Image+'" align="left" alt="'+Image+' " />') self.saved= False #self.saveButton.Enable() #self.publishButton.Enable() except: pass def OnProjectEdittemplatesMenu(self, event): self.controller.actionEditTemplate() def OnImagesDelete_imageMenu(self, event): self.controller.actionDeleteImage() def OnPagesAddprojectMenu(self, event): self.controller.addNewProject() def OnFtpBrowseServerMenu(self, event): self.look_busy() path = self.controller.actionBrowseServer() #print "this is the returned path", path self.relax() def look_busy(self): #print 'getting busy' cursor = wx.StockCursor(wx.CURSOR_WATCH) wx.BeginBusyCursor(cursor) def busy(self): #print 'getting busy' cursor = wx.StockCursor(wx.CURSOR_WATCH) wx.BeginBusyCursor(cursor) def relax(self): if wx.IsBusy()==True: wx.EndBusyCursor() else: pass print 'done...' def OnEditReduceMenu(self,event): #size = str((self.tree.GetFont()).GetPointSize()) """ reduce the editor font """ zoom = self.styledTextCtrl1.GetZoom() newZoom=int(zoom)-1 try: self.styledTextCtrl1.SetZoom(newZoom) except: self.Error("You cannot reduce the Font size any further...") #event.Skip() def OnaddToListButton(self,event): self.Message("this is not working") #event.Skip() def Version(self): return "0.5.9" def ColorDialog(self): dlg = wx.ColourDialog(self) # Ensure the full colour dialog is displayed, # not the abbreviated version. dlg.GetColourData().SetChooseFull(True) if dlg.ShowModal() == wx.ID_OK: # If the user selected OK, then the dialog's wx.ColourData will # contain valid information. Fetch the data ... data = dlg.GetColourData() self.color_data = str(data.GetColour().Get()) # ... then do something with it. The actual colour data will be # returned as a three-tuple (r, g, b) in this particular case. # Once the dialog is destroyed, Mr. wx.ColourData is no longer your # friend. Don't use it again! dlg.Destroy() def SelectProject(self): dlg = wx.DirDialog(self, "Select a project to import:", style=wx.DD_DEFAULT_STYLE) # If the user selects OK, then we process the dialog's data. # This is done by getting the path data from the dialog - BEFORE # we destroy it. if dlg.ShowModal() == wx.ID_OK: return dlg.GetPath() else: return None # -------------------------------------------------------------------------- def getFileFromUser(self, dir=None, prompt = None): if not dir: dir = os.getcwd() if not prompt: prompt = "Choose a file..." wildcard = "all files (*.*)|*.*|" \ "content Files (*.content)|*.content|" \ "CSS Files (*.css)|*.css|" \ "CGI scripts (*.cgi)|*.cgi|" \ "Javascript file (*.js)|*.js|" \ "plain Text file (*.txt)|*.txt|" \ "maker Dynamic (*.dynamic)|*.dynamic" \ "Quicktime movie (*.mov) | *.mov" dlg = wx.FileDialog( self, message = prompt, style=wx.OPEN | wx.CHANGE_DIR ) # If the user selects OK, then we process the dialog's data. # This is done by getting the path data from the dialog - BEFORE # we destroy it. try: dlg.ShowModal() finally: paths = dlg.GetPaths() dlg.Destroy() return paths # ------------------------------------------------------------------------ def getDirFromUser(self, dialogMessage = None): self.dirDialog = wx.DirDialog(self, message = dialogMessage, style=wx.DD_DEFAULT_STYLE| wx.DD_NEW_DIR_BUTTON) # ------------------------------------------------------------------------ #=============================================================================== # Custom FlatNoteBook #=============================================================================== class MyCustomNoteBook(nb.FlatNotebook): def __init__(self, parent, ID, pos=wx.DefaultPosition, size=wx.DefaultSize, agwStyle = wx.lib.flatnotebook.FNB_FF2, style= wx.lib.flatnotebook.FNB_NODRAG | wx.lib.flatnotebook.FNB_X_ON_TAB): self._bForceSelection = False self._nPadding = 60 self._nFrom = 0 #style |= wx.TAB_TRAVERSAL self._pages = None self._windows = [] self._popupWin = None self._naviIcon = None self._agwStyle = agwStyle self._orientation = None self._customPanel = None wx.PyPanel.__init__(self, parent, ID, pos, size, style) attr = self.GetDefaultAttributes() self.SetOwnForegroundColour(attr.colFg) self.SetOwnBackgroundColour(attr.colBg) self._pages = MyPageContainer(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, style) self.Bind(wx.EVT_NAVIGATION_KEY, self.OnNavigationKey) self.Init() class MyPageContainer(nb.PageContainer): """ This class acts as a container for the pages you add to :class:`FlatNotebook`. """ def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0): """ Default class constructor. Used internally, do not call it in your code! :param `parent`: the :class:`PageContainer` parent; :param `id`: an identifier for the control: a value of -1 is taken to mean a default; :param `pos`: the control position. A value of (-1, -1) indicates a default position, chosen by either the windowing system or wxPython, depending on platform; :param `size`: the control size. A value of (-1, -1) indicates a default size, chosen by either the windowing system or wxPython, depending on platform; :param `style`: the window style. """ self._ImageList = None self._iActivePage = -1 self._pDropTarget = None self._nLeftClickZone = nb.FNB_NOWHERE self._iPreviousActivePage = -1 self._pRightClickMenu = None self._nXButtonStatus = nb.FNB_BTN_NONE self._nArrowDownButtonStatus = nb.FNB_BTN_NONE self._pParent = parent self._nRightButtonStatus = nb.FNB_BTN_NONE self._nLeftButtonStatus = nb.FNB_BTN_NONE self._nTabXButtonStatus = nb.FNB_BTN_NONE self._nHoveringOverTabIndex = -1 self._nHoveringOverLastTabIndex = -1 self._setCursor = False self._pagesInfoVec = [] self._colourTo = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION) self._colourFrom = wx.WHITE self._activeTabColour = wx.WHITE self._activeTextColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNTEXT) self._nonActiveTextColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNTEXT) self._tabAreaColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE) self._nFrom = 0 self._isdragging = False # Set default page height, this is done according to the system font memDc = wx.MemoryDC() memDc.SelectObject(wx.EmptyBitmap(1,1)) if "__WXGTK__" in wx.PlatformInfo: boldFont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) boldFont.SetWeight(wx.BOLD) memDc.SetFont(boldFont) height = memDc.GetCharHeight() tabHeight = height + nb.FNB_HEIGHT_SPACER # We use 10 pixels as padding wx.PyPanel.__init__(self, parent, id, pos, wx.Size(size.x, tabHeight), style|wx.NO_BORDER|wx.NO_FULL_REPAINT_ON_RESIZE|wx.WANTS_CHARS) attr = self.GetDefaultAttributes() self.SetOwnForegroundColour(attr.colFg) self.SetOwnBackgroundColour(attr.colBg) self._pDropTarget = nb.FNBDropTarget(self) self.SetDropTarget(self._pDropTarget) #======================================================================= # Here we plug in our custom Renderer Manager #======================================================================= self._mgr = MyRendererMgr() self.Bind(wx.EVT_PAINT, self.OnPaint) self.Bind(wx.EVT_SIZE, self.OnSize) self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown) self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp) self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown) self.Bind(wx.EVT_MIDDLE_DOWN, self.OnMiddleDown) self.Bind(wx.EVT_MOTION, self.OnMouseMove) self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel) self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground) self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeave) self.Bind(wx.EVT_ENTER_WINDOW, self.OnMouseEnterWindow) self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick) self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus) self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus) self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown) def IsTabVisible(self, page): """ Returns whether a tab is visible or not. :param `page`: an integer specifying the page index. """ iLastVisiblePage = self.GetLastVisibleTab() return page <= iLastVisiblePage and page >= self._nFrom def GetLastVisibleTab(self): """ Returns the last visible tab in the tab area. """ if self._nFrom < 0: return -1 ii = 0 for ii in xrange(self._nFrom, len(self._pagesInfoVec)): if self._pagesInfoVec[ii].GetPosition() == wx.Point(-1, -1): break return ii-1 def OnSetFocus(self, event=None): """ Handles the ``wx.EVT_SET_FOCUS`` event for :class:`PageContainer`. :param `event`: a :class:`FocusEvent` event to be processed. """ if self._iActivePage < 0: if event: event.Skip() return self.SetFocusedPage(self._iActivePage) # make selection advance try: # don't complain if this does not work self.SetSelection(self._iActivePage) except: pass # ---------------------------------------------------------------------------- # # Class FNBRendererMgr # A manager that handles all the renderers defined below and calls the # appropriate one when drawing is needed # ---------------------------------------------------------------------------- # class MyRendererMgr(nb.FNBRendererMgr): """ This class represents a manager that handles all the 6 renderers defined and calls the appropriate one when drawing is needed. """ def __init__(self): """ Default class constructor. """ # overridden # does nothing def GetRenderer(self, style): # Here we push our custom renderers return MakerRenderer() class MakerRenderer(nb.FNBRenderer): def __init__(self): """ Default class constructor. """ self._tabHeight = None self.renderPen = wx.Pen("#444444",1 ) if wx.Platform == "__WXMAC__": # Get proper highlight colour for focus rectangle from the # current Mac theme. kThemeBrushFocusHighlight is # available on Mac OS 8.5 and higher if hasattr(wx, 'MacThemeColour'): c = wx.MacThemeColour(Carbon.Appearance.kThemeBrushFocusHighlight) else: brush = wx.Brush(wx.BLACK) brush.MacSetTheme(Carbon.Appearance.kThemeBrushFocusHighlight) c = brush.GetColour() self._focusPen = wx.Pen(c, 3) def DrawTabs(self, pageContainer, dc): """ Actually draws the tabs in :class:`FlatNotebook`. :param `pageContainer`: an instance of :class:`FlatNotebook`; :param `dc`: an instance of :class:`DC`. """ pc = pageContainer if "__WXMAC__" in wx.PlatformInfo: # Works well on MSW & GTK, however this lines should be skipped on MAC if not pc._pagesInfoVec or pc._nFrom >= len(pc._pagesInfoVec): pc.Hide() return # Get the text hight tabHeight = self.CalcTabHeight(pageContainer) agwStyle = pc.GetParent().GetAGWWindowStyleFlag() # Calculate the number of rows required for drawing the tabs rect = pc.GetClientRect() clientWidth = rect.width # Set the maximum client size pc.SetSizeHints(self.GetButtonsAreaLength(pc), tabHeight) borderPen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW)) backBrush = wx.Brush(pc._tabAreaColour) noselBrush = wx.Brush(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNFACE)) selBrush = wx.Brush(pc._activeTabColour) size = pc.GetSize() # Background dc.SetTextBackground(pc.GetBackgroundColour()) dc.SetTextForeground(pc._activeTextColour) dc.SetBrush(backBrush) # If border style is set, set the pen to be border pen colr = pc.GetBackgroundColour() dc.SetPen(wx.Pen(colr)) # if pc.HasAGWFlag(FNB_FF2): # lightFactor = (pc.HasAGWFlag(FNB_BACKGROUND_GRADIENT) and [70] or [0])[0] # PaintStraightGradientBox(dc, pc.GetClientRect(), pc._tabAreaColour, LightColour(pc._tabAreaColour, lightFactor)) # dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.DrawRectangle(0, 0, size.x, size.y) # We always draw the bottom/upper line of the tabs # regradless the style dc.SetPen(borderPen) # if not pc.HasAGWFlag(FNB_FF2): # self.DrawTabsLine(pc, dc) # Restore the pen dc.SetPen(borderPen) # if pc.HasAGWFlag(FNB_VC71): # # greyLineYVal = (pc.HasAGWFlag(FNB_BOTTOM) and [0] or [size.y - 2])[0] # whiteLineYVal = (pc.HasAGWFlag(FNB_BOTTOM) and [3] or [size.y - 3])[0] # # pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE)) # dc.SetPen(pen) # # # Draw thik grey line between the windows area and # # the tab area # for num in xrange(3): # dc.DrawLine(0, greyLineYVal + num, size.x, greyLineYVal + num) # # wbPen = (pc.HasAGWFlag(FNB_BOTTOM) and [wx.BLACK_PEN] or [wx.WHITE_PEN])[0] # dc.SetPen(wbPen) # dc.DrawLine(1, whiteLineYVal, size.x - 1, whiteLineYVal) # # # Restore the pen # dc.SetPen(borderPen) # Draw labels normalFont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) boldFont = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT) boldFont.SetWeight(wx.FONTWEIGHT_BOLD) dc.SetFont(boldFont) posx = pc._pParent.GetPadding() # Update all the tabs from 0 to 'pc._nFrom' to be non visible for i in xrange(pc._nFrom): pc._pagesInfoVec[i].SetPosition(wx.Point(-1, -1)) pc._pagesInfoVec[i].GetRegion().Clear() count = pc._nFrom #---------------------------------------------------------- # Go over and draw the visible tabs #---------------------------------------------------------- x1 = x2 = -1 for i in xrange(pc._nFrom, len(pc._pagesInfoVec)): dc.SetPen(borderPen) # if not pc.HasAGWFlag(FNB_FF2): # dc.SetBrush((i==pc.GetSelection() and [selBrush] or [noselBrush])[0]) # Now set the font to the correct font dc.SetFont((i==pc.GetSelection() and [boldFont] or [normalFont])[0]) # Add the padding to the tab width # Tab width: # +-----------------------------------------------------------+ # | PADDING | IMG | IMG_PADDING | TEXT | PADDING | x |PADDING | # +-----------------------------------------------------------+ tabWidth = self.CalcTabWidth(pageContainer, i, tabHeight) # Check if we can draw more if posx + tabWidth + self.GetButtonsAreaLength(pc) >= clientWidth: break count = count + 1 # By default we clean the tab region pc._pagesInfoVec[i].GetRegion().Clear() # Clean the 'x' buttn on the tab. # A 'Clean' rectangle, is a rectangle with width or height # with values lower than or equal to 0 pc._pagesInfoVec[i].GetXRect().SetSize(wx.Size(-1, -1)) # Draw the tab (border, text, image & 'x' on tab) self.DrawTab(pc, dc, posx, i, tabWidth, tabHeight, pc._nTabXButtonStatus) if pc.GetSelection() == i: x1 = posx x2 = posx + tabWidth + 2 # Restore the text forground dc.SetTextForeground(pc._activeTextColour) # Update the tab position & size posy = (pc.HasAGWFlag(wx.lib.flatnotebook.FNB_BOTTOM) and [0] or [wx.lib.flatnotebook.VERTICAL_BORDER_PADDING])[0] pc._pagesInfoVec[i].SetPosition(wx.Point(posx, posy)) pc._pagesInfoVec[i].SetSize(wx.Size(tabWidth, tabHeight)) self.DrawFocusRectangle(dc, pc, pc._pagesInfoVec[i]) posx += tabWidth # Update all tabs that can not fit into the screen as non-visible for i in xrange(count, len(pc._pagesInfoVec)): pc._pagesInfoVec[i].SetPosition(wx.Point(-1, -1)) pc._pagesInfoVec[i].GetRegion().Clear() # Draw the left/right/close buttons # Left arrow self.DrawLeftArrow(pc, dc) self.DrawRightArrow(pc, dc) self.DrawX(pc, dc) self.DrawDropDownArrow(pc, dc) if pc.HasAGWFlag(wx.lib.flatnotebook.FNB_FF2): self.DrawTabsLine(pc, dc, x1, x2) def DrawFocusRectangle(self, dc, pageContainer, page): """ Draws a focus rectangle like the native :class:`Notebook`. :param `dc`: an instance of :class:`DC`; :param `pageContainer`: an instance of :class:`FlatNotebook`; :param `page`: an instance of :class:`PageInfo`, representing a page in the notebook. """ return def DrawTabsLine(self, pageContainer, dc, selTabX1=-1, selTabX2=-1): """ Draws a line over the tabs. :param `pageContainer`: an instance of :class:`FlatNotebook`; :param `dc`: an instance of :class:`DC`; :param `selTabX1`: first x coordinate of the tab line; :param `selTabX2`: second x coordinate of the tab line. """ pc = pageContainer clntRect = pc.GetClientRect() dc.SetPen(self.renderPen) dc.DrawLine(1, clntRect.height, clntRect.width-1, clntRect.height) def DrawTab(self, pageContainer, dc, posx, tabIdx, tabWidth, tabHeight, btnStatus): """ Draws a tab using the `Firefox 2` style. :param `pageContainer`: an instance of :class:`FlatNotebook`; :param `dc`: an instance of :class:`DC`; :param `posx`: the x position of the tab; :param `tabIdx`: the index of the tab; :param `tabWidth`: the tab's width; :param `tabHeight`: the tab's height; :param `btnStatus`: the status of the 'X' button inside this tab. """ pc = pageContainer if tabIdx == pc.GetSelection(): # borderPen = wx.Pen("#111111",1) borderPen = self._focusPen else: borderPen = wx.Pen("#888888",1) #------------------------------------ # Paint the tab with gradient #------------------------------------ rr = wx.RectPP((posx + 4, nb.VERTICAL_BORDER_PADDING), (posx + tabWidth ,tabHeight)) nb.DrawButton(dc, rr, pc.GetSelection() == tabIdx , not pc.HasAGWFlag(nb.FNB_BOTTOM)) #dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.SetPen(borderPen) # Draw the tab as rounded rectangle dc.DrawRoundedRectangle(posx + 2, nb.VERTICAL_BORDER_PADDING, tabWidth-2, tabHeight, 4) # ----------------------------------- # Text and image drawing # ----------------------------------- # The width of the images are 16 pixels padding = pc.GetParent().GetPadding() shapePoints = int(tabHeight*math.tan(float(pc._pagesInfoVec[tabIdx].GetTabAngle())/180.0*math.pi)) hasImage = pc._pagesInfoVec[tabIdx].GetImageIndex() != -1 imageYCoord = (pc.HasAGWFlag(nb.FNB_BOTTOM) and [6] or [8])[0] if hasImage: textOffset = 2*padding + 16 + shapePoints/2 else: textOffset = padding + shapePoints/2 textOffset += 2 if tabIdx != pc.GetSelection(): # Set the text background to be like the vertical lines dc.SetTextForeground("#666666") if hasImage: imageXOffset = textOffset - 16 - padding pc._ImageList.Draw(pc._pagesInfoVec[tabIdx].GetImageIndex(), dc, posx + imageXOffset, imageYCoord, wx.IMAGELIST_DRAW_TRANSPARENT, True) pageTextColour = pc._pParent.GetPageTextColour(tabIdx) if pageTextColour is not None: dc.SetTextForeground(pageTextColour) dc.DrawText(pc.GetPageText(tabIdx), posx + textOffset, imageYCoord) # draw 'x' on tab (if enabled) if pc.HasAGWFlag(nb.FNB_X_ON_TAB) and tabIdx == pc.GetSelection(): textWidth, textHeight = dc.GetTextExtent(pc.GetPageText(tabIdx)) tabCloseButtonXCoord = posx + textOffset + textWidth + 1 # take a bitmap from the position of the 'x' button (the x on tab button) # this bitmap will be used later to delete old buttons tabCloseButtonYCoord = imageYCoord x_rect = wx.Rect(tabCloseButtonXCoord, tabCloseButtonYCoord, 16, 16) # Draw the tab self.DrawTabX(pc, dc, x_rect, tabIdx, btnStatus) #=============================================================================== # # End Custom Flat Notebook # #=============================================================================== class MySplitter(wx.SplitterWindow): def __init__(self, parent, ID, log): wx.SplitterWindow.__init__(self, parent, ID, style = wx.SP_LIVE_UPDATE | wx.SP_3DSASH | wx.SP_THIN_SASH) # def paintSplitter(evt): # # pass #self.Bind(wx.EVT_PAINT, paintSplitter) class MyArtProvider(wx.ArtProvider): def __init__(self): wx.ArtProvider.__init__(self) def CreateBitmap(self, artid, client, size): bmp = wx.NullBitmap bmp = makeCustomArt() return bmp class ShellFrame(wx.Frame): def __init__( self, parent, ID, title, pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE ): self.parent = parent wx.Frame.__init__(self, parent, ID, title, pos, size, style) self.shell = pyShell.shell.Shell(self, -1, introText="") self.shell.zoom(-1) self.Bind(wx.EVT_CLOSE, self.onClose) def onClose(self, event): w = "Your script is still running! " w += "Wait for it to finish or kill it..." if not self.shell.GetText().endswith(">>> "): self.parent.Warning(w) return self.Destroy() def getShell(self): return self.shell def makeCustomArt(): return wx.Image(os.path.join(os.path.dirname(sys.argv[0]), "./system/makerPart.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
geraldspreer/the-maker
makerWxGUI.py
Python
gpl-3.0
128,335
[ "VisIt" ]
9af54592ed685b8aebb3a255d049474dcbfe2e1a0b8bdde8451f31e0d5e389ee
#! /usr/bin/env python """ Author: Robert A Petit III. Date: 03/28/2016 Read Jellyfish counts of non-B. anthracis BCG members and determine the BCG kmer coverage and the false positives for B. anthracis kmers. """ import argparse as ap import json if __name__ == '__main__': parser = ap.ArgumentParser( prog='parse-hamming-distance.py', conflict_handler='resolve', description=("Read BLAST JSON output and determine the hamming " "distance between a k-mer and its best hit.") ) parser.add_argument('json', type=str, metavar="BLAST_JSON", help=('BLAST output in JSON format.')) args = parser.parse_args() json_data = [] with open(args.json) as fh: record = [] first_line = True for line in fh: if line.startswith('{') and not first_line: json_data.append(json.loads(''.join(record))) record = [] if first_line: first_line = False record.append(line) json_data.append(json.loads(''.join(record))) for entry in json_data: hit = entry['BlastOutput2']['report']['results']['search'] hsp = hit['hits'][0]['hsps'][0] # Includes mismatches and gaps mismatch = hsp['align_len'] - hsp['identity'] # Hamming distance hd = mismatch if hit['query_len'] > hsp['align_len']: # Include those bases that weren't aligned hd = hit['query_len'] - hsp['align_len'] + mismatch print('{0}\t{1}'.format( hit['query_title'], hd ))
Read-Lab-Confederation/nyc-subway-anthrax-study
data/05-custom-bcg-assay/bin/parse-hamming-distance.py
Python
mit
1,642
[ "BLAST" ]
a8f294d90f03006ddd3a7dd11e936faa95e067969e2a56436cb0217e0ffe8459
import abc import math import numpy as np import scipy.stats UNDEFINED = - np.inf class Prior(object): __metaclass__ = abc.ABCMeta def __init__(self, prior_name): self._prior_name = prior_name @property def name(self): """Return the name of the function""" return self._prior_name # This is used only by children def _set_name(self, new_name): self._prior_name = new_name @abc.abstractmethod def __call__(self, value): """ Return the logarithm of the prior pdf at the given value """ pass class UniformPrior(Prior): def __init__(self, min_value, max_value): """ A uniform (constant) prior. It is constant between min_value and max_value, and undefined outside. :param min_value: lower bound for the range where the prior is defined :param max_value: upper bound for the range where the prior is defined :return: """ # Verify that min_value < max_value assert min_value < max_value, "Minimum must be smaller than maximum" # Store boundaries self._min_value = min_value self._max_value = max_value # Init parent class super(UniformPrior, self).__init__("UniformPrior") def _get_min_value(self): """ :return: the lower bound of the range where the prior is defined """ return self._min_value def _set_min_value(self, new_minimum): """ Set the lower bound of the range where the prior is defined :return: (none) """ assert new_minimum < self._max_value, ("Minimum must be smaller than maximum. You can change both " "at the same time by using the set_bounds() method.") self._min_value = new_minimum min_value = property(_get_min_value, _set_min_value, doc='Set or get the lower bound of the range where the prior is defined') def _get_max_value(self): """ :return: the upper bound of the range where the prior is defined """ return self._max_value def _set_max_value(self, new_maximum): """ Set the upper bound of the range where the prior is defined :return: (none) """ assert new_maximum > self._min_value, ("Maximum must be larger than minimum. You can change both " "at the same time by using the set_bounds() method.") self._max_value = new_maximum max_value = property(_get_max_value, _set_max_value, doc='Set or get the upper bound of the range where the prior is defined') def set_bounds(self, new_min_value, new_max_value): """ Set the upper and lower bound of the range where the prior is defined :param new_min_value: lower bound :param new_max_value: upper bound :return: (none) """ self._min_value = new_min_value self._max_value = new_max_value def __call__(self, value): if self.min_value < value < self.max_value: return 0.0 else: return UNDEFINED def multinest_call(self, cube): return cube * (self._max_value - self._min_value) + self._min_value class LogUniformPrior(UniformPrior): def __init__(self, min_value, max_value): """ A log-uniform prior: f(x) = log(1 / x) It is defined between min_value and max_value, and undefined outside. :param min_value: lower bound for the range where the prior is defined :param max_value: upper bound for the range where the prior is defined :return: """ # Init parent class super(LogUniformPrior, self).__init__(min_value, max_value) # Update the name self._set_name("LogUniformPrior") # Override the __call__ method of the UniformPrior class def __call__(self, value): if self._min_value < value < self._max_value and value > 0: # This is = log(1/value) return -math.log10(value) else: return UNDEFINED def multinest_call(self, cube): decades = math.log10(self._max_value) - math.log10(self._min_value) start_decade = math.log10(self._min_value) return 10 ** ((cube * decades) + start_decade) class GaussianPrior(Prior): def __init__(self, mu, sigma): """ A Gaussian prior centered in mu with standard deviation sigma. Note that the value of the prior too far from the center will be a special value which means "undefined", to avoid numerical errors in Bayesian samplers. :param mu: center of the Gaussian :param sigma: standard deviation of the Gaussian :return: """ self._mu = float(mu) self._sigma = abs(float(sigma)) self._update_pdf() # Init parent class super(GaussianPrior, self).__init__("GaussianPrior") def _update_pdf(self): # Update the PDF with the current sigma and mu self._norm = scipy.stats.norm(loc=self._mu, scale=self._sigma) def _set_mu(self, new_mu): """ Set a new center for the Gaussian :param new_mu: new center :return: (none) """ self._mu = new_mu self._update_pdf() def _get_mu(self): """ :return: get the center of the Gaussian """ return self._mu mu = property(_get_mu, _set_mu, doc="Get or set the center of the Gaussian") def _set_sigma(self, new_sigma): """ Set a new sigma for the Gaussian :param new_sigma: new sigma :return: (none) """ self._sigma = new_sigma self._update_pdf() def _get_sigma(self): """ :return: get the standard deviation of the Gaussian """ return self._sigma sigma = property(_get_sigma, _set_sigma, doc="Get or set the standard deviation of the Gaussian") def __call__(self, x): # Here I multiply by sigma because of scipy "scale" behavior # (see scipy documentation) value = self._norm.pdf(x) * self._sigma # Truncate the value of the Gaussian, to avoid numerical errors, i.e., return UNDEFINED if the value of # the prior is too small if value < 1e-15: return UNDEFINED else: return math.log10(value)
sybenzvi/3ML
threeML/bayesian/priors.py
Python
bsd-3-clause
6,566
[ "Gaussian" ]
f7e3ded7977bca9613fa99a6e2524536d8f6b0c516e3c47ab10eb0fe881c9ed7
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals from gratipay.testing import BrowserHarness class Tests(BrowserHarness): def check(self, status, has_request_button, has_check_button): self.make_participant('alice', claimed_time='now', status_of_1_0_payout=status) self.sign_in('alice') self.visit('/~alice/settings/') self.css('.account-details a button') assert self.has_text('Request 1.0 Payout') is has_request_button self.css('.account-details a button') assert self.has_text('Check 1.0 Payout') is has_check_button def test_too_little_has_neither(self): self.check('too-little', False, False) def test_pending_application_has_request_button(self): self.check('pending-application', True, False) def test_pending_review_has_check_button(self): self.check('pending-review', False, True) def test_rejected_has_neither(self): self.check('rejected', False, False) def test_pending_payout_has_check_button(self): self.check('pending-payout', False, True) def test_pending_completed_has_neither(self): self.check('completed', False, False)
gratipay/gratipay.com
tests/ttw/test_1_0_payout.py
Python
mit
1,241
[ "VisIt" ]
f9016469af0dee6cd26b4c08c933ddff9c981786533910d57708c09f9d439768
"""Quality control and summary metrics for next-gen alignments and analysis. """ import collections import contextlib import csv import os import glob import shutil import subprocess import pandas as pd import lxml.html import yaml from datetime import datetime from collections import defaultdict # allow graceful during upgrades try: import matplotlib matplotlib.use('Agg', force=True) import matplotlib.pyplot as plt plt.ioff() except ImportError: plt = None try: from fadapa import Fadapa except ImportError: Fadapa = None import pybedtools import pysam import toolz as tz import toolz.dicttoolz as dtz from bcbio import bam, utils from bcbio.distributed.transaction import file_transaction, tx_tmpdir from bcbio.log import logger from bcbio.pipeline import config_utils, run_info from bcbio.install import _get_data_dir from bcbio.provenance import do import bcbio.rnaseq.qc import bcbio.pipeline.datadict as dd from bcbio.variation import bedutils from bcbio.variation import coverage as cov from bcbio.ngsalign.postalign import dedup_bam from bcbio.rnaseq import gtf # ## High level functions to generate summary def generate_parallel(samples, run_parallel): """Provide parallel preparation of summary information for alignment and variant calling. """ samples = run_parallel("pipeline_summary", samples) samples = run_parallel("coverage_report", samples) samples = run_parallel("qc_report_summary", [samples]) qsign_info = run_parallel("qsignature_summary", [samples]) summary_file = write_project_summary(samples, qsign_info) out = [] for data in samples: if "summary" not in data[0]: data[0]["summary"] = {} data[0]["summary"]["project"] = summary_file if qsign_info: data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"] out.append(data) out = _add_researcher_summary(out, summary_file) return out def pipeline_summary(data): """Provide summary information on processing sample. """ work_bam = data.get("align_bam") if data["analysis"].lower().startswith("smallrna-seq"): work_bam = data["clean_fastq"] data["summary"] = _run_qc_tools(work_bam, data) elif data["analysis"].lower().startswith("chip-seq"): work_bam = data["raw_bam"] data["summary"] = _run_qc_tools(work_bam, data) elif dd.get_ref_file(data) is not None and work_bam and work_bam.endswith(".bam"): logger.info("Generating summary files: %s" % dd.get_sample_name(data)) data["summary"] = _run_qc_tools(work_bam, data) return [[data]] def prep_pdf(qc_dir, config): """Create PDF from HTML summary outputs in QC directory. Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1 Thanks to: https://www.biostars.org/p/16991/ Works around issues with CSS conversion on CentOS by adjusting CSS. """ html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html") html_fixed = "%s-fixed%s" % os.path.splitext(html_file) try: topdf = config_utils.get_program("wkhtmltopdf", config) except config_utils.CmdNotFound: topdf = None if topdf and utils.file_exists(html_file): out_file = "%s.pdf" % os.path.splitext(html_file)[0] if not utils.file_exists(out_file): cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s" % (html_file, html_fixed)) do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf") cmd = [topdf, html_fixed, out_file] do.run(cmd, "Convert QC HTML to PDF") return out_file def _run_qc_tools(bam_file, data): """Run a set of third party quality control tools, returning QC directory and metrics. :param bam_file: alignments in bam format :param data: dict with all configuration information :returns: dict with output of different tools """ metrics = {} to_run = [] if "fastqc" not in tz.get_in(("config", "algorithm", "tools_off"), data, []): to_run.append(("fastqc", _run_fastqc)) if data["analysis"].lower().startswith("rna-seq"): to_run.append(("bamtools", _run_bamtools_stats)) if gtf.is_qualimap_compatible(dd.get_gtf_file(data)): to_run.append(("qualimap", _rnaseq_qualimap)) elif data["analysis"].lower().startswith("chip-seq"): to_run.append(["bamtools", _run_bamtools_stats]) elif not data["analysis"].lower().startswith("smallrna-seq"): to_run += [("bamtools", _run_bamtools_stats), ("gemini", _run_gemini_stats)] if data["analysis"].lower().startswith(("standard", "variant2")): to_run.append(["qsignature", _run_qsignature_generator]) if any([tool in tz.get_in(("config", "algorithm", "tools_on"), data, []) for tool in ["qualimap", "qualimap_full"]]): to_run.append(("qualimap", _run_qualimap)) qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) metrics = {} for program_name, qc_fn in to_run: cur_qc_dir = os.path.join(qc_dir, program_name) cur_metrics = qc_fn(bam_file, data, cur_qc_dir) metrics.update(cur_metrics) if data['config']["algorithm"].get("kraken", None): if data["analysis"].lower().startswith("smallrna-seq"): logger.info("Kraken is not compatible with srnaseq pipeline yet.") else: ratio = bam.get_aligned_reads(bam_file, data) cur_metrics = _run_kraken(data, ratio) metrics.update(cur_metrics) bam.remove("%s-downsample%s" % os.path.splitext(bam_file)) metrics["Name"] = dd.get_sample_name(data) metrics["Quality format"] = utils.get_in(data, ("config", "algorithm", "quality_format"), "standard").lower() return {"qc": qc_dir, "metrics": metrics} # ## Generate project level QC summary for quickly assessing large projects def write_project_summary(samples, qsign_info=None): """Write project summary information on the provided samples. write out dirs, genome resources, """ work_dir = samples[0][0]["dirs"]["work"] out_file = os.path.join(work_dir, "project-summary.yaml") upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"]) if "dir" in samples[0][0]["upload"] else "") date = str(datetime.now()) prev_samples = _other_pipeline_samples(out_file, samples) with open(out_file, "w") as out_handle: yaml.safe_dump({"date": date}, out_handle, default_flow_style=False, allow_unicode=False) if qsign_info: qsign_out = utils.deepish_copy(qsign_info[0]) qsign_out.pop("out_dir", None) yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"upload": upload_dir}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle, default_flow_style=False, allow_unicode=False) yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle, default_flow_style=False, allow_unicode=False) return out_file def _other_pipeline_samples(summary_file, cur_samples): """Retrieve samples produced previously by another pipeline in the summary output. """ cur_descriptions = set([s[0]["description"] for s in cur_samples]) out = [] if os.path.exists(summary_file): with open(summary_file) as in_handle: for s in yaml.load(in_handle).get("samples", []): if s["description"] not in cur_descriptions: out.append(s) return out def _save_fields(sample): to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata", "description"] saved = {k: sample[k] for k in to_save if k in sample} if "summary" in sample: saved["summary"] = {"metrics": sample["summary"]["metrics"]} # check if disambiguation was run if "disambiguate" in sample: if utils.file_exists(sample["disambiguate"]["summary"]): disambigStats = _parse_disambiguate(sample["disambiguate"]["summary"]) saved["summary"]["metrics"]["Disambiguated %s reads" % str(sample["genome_build"])] = disambigStats[0] disambigGenome = (sample["config"]["algorithm"]["disambiguate"][0] if isinstance(sample["config"]["algorithm"]["disambiguate"], (list, tuple)) else sample["config"]["algorithm"]["disambiguate"]) saved["summary"]["metrics"]["Disambiguated %s reads" % disambigGenome] = disambigStats[1] saved["summary"]["metrics"]["Disambiguated ambiguous reads"] = disambigStats[2] return saved def _parse_disambiguate(disambiguatestatsfilename): """Parse disambiguation stats from given file. """ disambig_stats = [0, 0, 0] with open(disambiguatestatsfilename, "r") as in_handle: for i, line in enumerate(in_handle): fields = line.strip().split("\t") if i == 0: assert fields == ['sample', 'unique species A pairs', 'unique species B pairs', 'ambiguous pairs'] else: disambig_stats = [x + int(y) for x, y in zip(disambig_stats, fields[1:])] return disambig_stats # ## Generate researcher specific summaries def _add_researcher_summary(samples, summary_yaml): """Generate summary files per researcher if organized via a LIMS. """ by_researcher = collections.defaultdict(list) for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: by_researcher[researcher].append(data["description"]) out_by_researcher = {} for researcher, descrs in by_researcher.items(): out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher, set(descrs), samples[0][0]) out = [] for data in (x[0] for x in samples): researcher = utils.get_in(data, ("upload", "researcher")) if researcher: data["summary"]["researcher"] = out_by_researcher[researcher] out.append([data]) return out def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data): """Generate a CSV file with summary information for a researcher on this project. """ out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")), "%s-summary.tsv" % run_info.clean_name(researcher)) metrics = ["Total reads", "Mapped reads", "Mapped reads pct", "Duplicates", "Duplicates pct"] with open(summary_yaml) as in_handle: with open(out_file, "w") as out_handle: writer = csv.writer(out_handle, dialect="excel-tab") writer.writerow(["Name"] + metrics) for sample in yaml.safe_load(in_handle)["samples"]: if sample["description"] in descrs: row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "") for x in metrics] writer.writerow(row) return out_file # ## Run and parse read information from FastQC class FastQCParser: def __init__(self, base_dir, sample=None): self._dir = base_dir self.sample = sample def get_fastqc_summary(self): ignore = set(["Total Sequences", "Filtered Sequences", "Filename", "File type", "Encoding"]) stats = {} for stat_line in self._fastqc_data_section("Basic Statistics")[1:]: k, v = stat_line.split("\t")[:2] if k not in ignore: stats[k] = v return stats def _fastqc_data_section(self, section_name): out = [] in_section = False data_file = os.path.join(self._dir, "fastqc_data.txt") if os.path.exists(data_file): with open(data_file) as in_handle: for line in in_handle: if line.startswith(">>%s" % section_name): in_section = True elif in_section: if line.startswith(">>END"): break out.append(line.rstrip("\r\n")) return out def save_sections_into_file(self): data_file = os.path.join(self._dir, "fastqc_data.txt") if os.path.exists(data_file) and Fadapa: parser = Fadapa(data_file) module = [m[1] for m in parser.summary()][2:9] for m in module: out_file = os.path.join(self._dir, m.replace(" ", "_") + ".tsv") dt = self._get_module(parser, m) dt.to_csv(out_file, sep="\t", index=False) def _get_module(self, parser, module): """ Get module using fadapa package """ dt = [] lines = parser.clean_data(module) header = lines[0] for data in lines[1:]: if data[0].startswith("#"): #some modules have two headers header = data continue if data[0].find("-") > -1: # expand positions 1-3 to 1, 2, 3 f, s = map(int, data[0].split("-")) for pos in range(f, s): dt.append([str(pos)] + data[1:]) else: dt.append(data) dt = pd.DataFrame(dt) dt.columns = [h.replace(" ", "_") for h in header] dt['sample'] = self.sample return dt def _run_gene_coverage(bam_file, data, out_dir): out_file = os.path.join(out_dir, "gene_coverage.pdf") ref_file = utils.get_in(data, ("genome_resources", "rnaseq", "transcripts")) count_file = data["count_file"] if utils.file_exists(out_file): return out_file with file_transaction(data, out_file) as tx_out_file: plot_gene_coverage(bam_file, ref_file, count_file, tx_out_file) return {"gene_coverage": out_file} def _run_kraken(data, ratio): """Run kraken, generating report in specified directory and parsing metrics. Using only first paired reads. """ # logger.info("Number of aligned reads < than 0.60 in %s: %s" % (dd.get_sample_name(data), ratio)) logger.info("Running kraken to determine contaminant: %s" % dd.get_sample_name(data)) qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"])) kraken_out = os.path.join(qc_dir, "kraken") out = out_stats = None db = data['config']["algorithm"]["kraken"] kraken_cmd = config_utils.get_program("kraken", data["config"]) if db == "minikraken": db = os.path.join(_get_data_dir(), "genomes", "kraken", "minikraken") if not os.path.exists(db): logger.info("kraken: no database found %s, skipping" % db) return {"kraken_report": "null"} if not os.path.exists(os.path.join(kraken_out, "kraken_out")): work_dir = os.path.dirname(kraken_out) utils.safe_makedir(work_dir) num_cores = data["config"]["algorithm"].get("num_cores", 1) fn_file = data["files"][0] if fn_file.endswith("bam"): logger.info("kraken: need fasta files as input") return {"kraken_report": "null"} with tx_tmpdir(data, work_dir) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): out = os.path.join(tx_tmp_dir, "kraken_out") out_stats = os.path.join(tx_tmp_dir, "kraken_stats") cat = "zcat" if fn_file.endswith(".gz") else "cat" cl = ("{cat} {fn_file} | {kraken_cmd} --db {db} --quick " "--preload --min-hits 2 " "--threads {num_cores} " "--out {out} --fastq-input /dev/stdin 2> {out_stats}").format(**locals()) do.run(cl, "kraken: %s" % dd.get_sample_name(data)) if os.path.exists(kraken_out): shutil.rmtree(kraken_out) shutil.move(tx_tmp_dir, kraken_out) metrics = _parse_kraken_output(kraken_out, db, data) return metrics def _parse_kraken_output(out_dir, db, data): """Parse kraken stat info comming from stderr, generating report with kraken-report """ in_file = os.path.join(out_dir, "kraken_out") stat_file = os.path.join(out_dir, "kraken_stats") out_file = os.path.join(out_dir, "kraken_summary") kraken_cmd = config_utils.get_program("kraken-report", data["config"]) classify = unclassify = None with open(stat_file, 'r') as handle: for line in handle: if line.find(" classified") > -1: classify = line[line.find("(") + 1:line.find(")")] if line.find(" unclassified") > -1: unclassify = line[line.find("(") + 1:line.find(")")] if os.path.getsize(in_file) > 0 and not os.path.exists(out_file): with file_transaction(data, out_file) as tx_out_file: cl = ("{kraken_cmd} --db {db} {in_file} > {tx_out_file}").format(**locals()) do.run(cl, "kraken report: %s" % dd.get_sample_name(data)) kraken = {"kraken_clas": classify, "kraken_unclas": unclassify} kraken_sum = _summarize_kraken(out_file) kraken.update(kraken_sum) return kraken def _summarize_kraken(fn): """get the value at species level""" kraken = {} list_sp, list_value = [], [] with open(fn) as handle: for line in handle: cols = line.strip().split("\t") sp = cols[5].strip() if len(sp.split(" ")) > 1 and not sp.startswith("cellular"): list_sp.append(sp) list_value.append(cols[0]) kraken = {"kraken_sp": list_sp, "kraken_value": list_value} return kraken def _run_fastqc(bam_file, data, fastqc_out): """Run fastqc, generating report in specified directory and parsing metrics. Downsamples to 10 million reads to avoid excessive processing times with large files, unless we're running a Standard/smallRNA-seq/QC pipeline. Handles fastqc 0.11+, which use a single HTML file and older versions that use a directory of files + images. The goal is to eventually move to only 0.11+ """ sentry_file = os.path.join(fastqc_out, "fastqc_report.html") if not os.path.exists(sentry_file): work_dir = os.path.dirname(fastqc_out) utils.safe_makedir(work_dir) ds_bam = (bam.downsample(bam_file, data, 1e7) if data.get("analysis", "").lower() not in ["standard", "smallrna-seq"] else None) bam_file = ds_bam if ds_bam else bam_file frmt = "bam" if bam_file.endswith("bam") else "fastq" fastqc_name = utils.splitext_plus(os.path.basename(bam_file))[0] fastqc_clean_name = dd.get_sample_name(data) num_cores = data["config"]["algorithm"].get("num_cores", 1) with tx_tmpdir(data, work_dir) as tx_tmp_dir: with utils.chdir(tx_tmp_dir): cl = [config_utils.get_program("fastqc", data["config"]), "-d", tx_tmp_dir, "-t", str(num_cores), "--extract", "-o", tx_tmp_dir, "-f", frmt, bam_file] do.run(cl, "FastQC: %s" % dd.get_sample_name(data)) tx_fastqc_out = os.path.join(tx_tmp_dir, "%s_fastqc" % fastqc_name) tx_combo_file = os.path.join(tx_tmp_dir, "%s_fastqc.html" % fastqc_name) if not os.path.exists(sentry_file) and os.path.exists(tx_combo_file): utils.safe_makedir(fastqc_out) # Use sample name for reports instead of bam file name with open(os.path.join(tx_fastqc_out, "fastqc_data.txt"), 'r') as fastqc_bam_name, \ open(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), 'w') as fastqc_sample_name: for line in fastqc_bam_name: fastqc_sample_name.write(line.replace(os.path.basename(bam_file), fastqc_clean_name)) shutil.move(os.path.join(tx_fastqc_out, "_fastqc_data.txt"), os.path.join(fastqc_out, 'fastqc_data.txt')) shutil.move(tx_combo_file, sentry_file) if os.path.exists("%s.zip" % tx_fastqc_out): shutil.move("%s.zip" % tx_fastqc_out, os.path.join(fastqc_out, "%s.zip" % fastqc_clean_name)) elif not os.path.exists(sentry_file): if os.path.exists(fastqc_out): shutil.rmtree(fastqc_out) shutil.move(tx_fastqc_out, fastqc_out) parser = FastQCParser(fastqc_out, dd.get_sample_name(data)) stats = parser.get_fastqc_summary() parser.save_sections_into_file() return stats def _run_complexity(bam_file, data, out_dir): try: import pandas as pd import statsmodels.formula.api as sm except ImportError: return {"Unique Starts Per Read": "NA"} SAMPLE_SIZE = 1000000 base, _ = os.path.splitext(os.path.basename(bam_file)) utils.safe_makedir(out_dir) out_file = os.path.join(out_dir, base + ".pdf") df = bcbio.rnaseq.qc.starts_by_depth(bam_file, data["config"], SAMPLE_SIZE) if not utils.file_exists(out_file): with file_transaction(data, out_file) as tmp_out_file: df.plot(x='reads', y='starts', title=bam_file + " complexity") fig = plt.gcf() fig.savefig(tmp_out_file) print "file saved as", out_file print "out_dir is", out_dir return bcbio.rnaseq.qc.estimate_library_complexity(df) # ## Qualimap def _parse_num_pct(k, v): num, pct = v.split(" / ") return {k: num.replace(",", "").strip(), "%s pct" % k: pct.strip()} def _parse_qualimap_globals(table): """Retrieve metrics of interest from globals table. """ out = {} want = {"Mapped reads": _parse_num_pct, "Duplication rate": lambda k, v: {k: v}} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] if col in want: out.update(want[col](col, val)) return out def _parse_qualimap_globals_inregion(table): """Retrieve metrics from the global targeted region table. """ out = {} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] if col == "Mapped reads": out.update(_parse_num_pct("%s (in regions)" % col, val)) return out def _parse_qualimap_coverage(table): """Parse summary qualimap coverage metrics. """ out = {} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] if col == "Mean": out["Coverage (Mean)"] = val return out def _parse_qualimap_insertsize(table): """Parse insert size metrics. """ out = {} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] if col == "Median": out["Insert size (Median)"] = val return out def _parse_qualimap_metrics(report_file): """Extract useful metrics from the qualimap HTML report file. """ if not utils.file_exists(report_file): return {} out = {} parsers = {"Globals": _parse_qualimap_globals, "Globals (inside of regions)": _parse_qualimap_globals_inregion, "Coverage": _parse_qualimap_coverage, "Coverage (inside of regions)": _parse_qualimap_coverage, "Insert size": _parse_qualimap_insertsize, "Insert size (inside of regions)": _parse_qualimap_insertsize} root = lxml.html.parse(report_file).getroot() for table in root.xpath("//div[@class='table-summary']"): header = table.xpath("h3")[0].text if header in parsers: out.update(parsers[header](table)) new_names = [] for metric in out: new_names.append(metric + "_qualimap_1e7reads_est") out = dict(zip(new_names, out.values())) return out def _bed_to_bed6(orig_file, out_dir): """Convert bed to required bed6 inputs. """ bed6_file = os.path.join(out_dir, "%s-bed6%s" % os.path.splitext(os.path.basename(orig_file))) if not utils.file_exists(bed6_file): with open(bed6_file, "w") as out_handle: for i, region in enumerate(list(x) for x in pybedtools.BedTool(orig_file)): region = [x for x in list(region) if x] fillers = [str(i), "1.0", "+"] full = region + fillers[:6 - len(region)] out_handle.write("\t".join(full) + "\n") return bed6_file def _run_qualimap(bam_file, data, out_dir): """Run qualimap to assess alignment quality metrics. """ resources = config_utils.get_resources("qualimap", data["config"]) options = " ".join(resources.get("options", "")) report_file = os.path.join(out_dir, "qualimapReport.html") pdf_file = "qualimapReport.pdf" if not utils.file_exists(report_file) and not utils.file_exists(os.path.join(out_dir, pdf_file)): if "qualimap_full" in tz.get_in(("config", "algorithm", "tools_on"), data, []): logger.info("Full qualimap analysis for %s may be slow." % bam_file) ds_bam = bam_file else: ds_bam = bam.downsample(bam_file, data, 1e7) bam_file = ds_bam if ds_bam else bam_file if options.find("PDF") > -1: options = "%s -outfile %s" % (options, pdf_file) utils.safe_makedir(out_dir) num_cores = data["config"]["algorithm"].get("num_cores", 1) qualimap = config_utils.get_program("qualimap", data["config"]) max_mem = config_utils.adjust_memory(resources.get("memory", "1G"), num_cores) cmd = ("unset DISPLAY && {qualimap} bamqc -bam {bam_file} -outdir {out_dir} " "-nt {num_cores} --java-mem-size={max_mem} {options}") species = tz.get_in(("genome_resources", "aliases", "ensembl"), data, "") if species in ["HUMAN", "MOUSE"]: cmd += " -gd {species}" regions = bedutils.merge_overlaps(dd.get_variant_regions(data), data) if regions: bed6_regions = _bed_to_bed6(regions, out_dir) cmd += " -gff {bed6_regions}" do.run(cmd.format(**locals()), "Qualimap: %s" % dd.get_sample_name(data)) return _parse_qualimap_metrics(report_file) # ## RNAseq Qualimap def _parse_metrics(metrics): # skipped metrics can sometimes be in unicode, replace unicode with NA if it exists metrics = dtz.valmap(lambda x: 'nan' if isinstance(x, unicode) else x, metrics) missing = set(["Genes Detected", "Transcripts Detected", "Mean Per Base Cov."]) correct = set(["rRNA", "rRNA_rate"]) percentages = set(["Intergenic pct", "Intronic pct", "Exonic pct"]) to_change = dict({"5'-3' bias": 1, "Intergenic pct": "Intergenic Rate", "Intronic pct": "Intronic Rate", "Exonic pct": "Exonic Rate", "Not aligned": 0, 'Aligned to genes': 0, 'Non-unique alignment': 0, "No feature assigned": 0, "Duplication Rate of Mapped": 1, "Fragment Length Mean": 1, "Ambiguou alignment": 0}) total = ["Not aligned", "Aligned to genes", "No feature assigned"] out = {} total_reads = sum([int(metrics[name]) for name in total]) out['Mapped'] = sum([int(metrics[name]) for name in total[1:]]) out['Mapping Rate'] = 1.0 * int(out['Mapped']) / total_reads [out.update({name: 0}) for name in missing] out.update({key: val for key, val in metrics.iteritems() if key in correct}) [metrics.update({name: 1.0 * float(metrics[name]) / 100}) for name in percentages] for name in to_change: if not to_change[name]: continue try: if to_change[name] == 1: out.update({name: float(metrics[name])}) else: out.update({to_change[name]: float(metrics[name])}) # if we can't convert metrics[name] to float (?'s or other non-floats) except ValueError: continue return out def _detect_duplicates(bam_file, out_dir, data): """ count duplicate percentage """ out_file = os.path.join(out_dir, "dup_metrics.txt") if not utils.file_exists(out_file): dup_align_bam = dedup_bam(bam_file, data) num_cores = dd.get_num_cores(data) with file_transaction(out_file) as tx_out_file: sambamba = config_utils.get_program("sambamba", data, default="sambamba") dup_count = ("{sambamba} view --nthreads {num_cores} --count " "-F 'duplicate and not unmapped' " "{bam_file} >> {tx_out_file}") message = "Counting duplicates in {bam_file}.".format(bam_file=bam_file) do.run(dup_count.format(**locals()), message) tot_count = ("{sambamba} view --nthreads {num_cores} --count " "-F 'not unmapped' " "{bam_file} >> {tx_out_file}") message = "Counting reads in {bam_file}.".format(bam_file=bam_file) do.run(tot_count.format(**locals()), message) with open(out_file) as in_handle: dupes = float(in_handle.next().strip()) total = float(in_handle.next().strip()) return {"Duplication Rate of Mapped": dupes / total} def _transform_browser_coor(rRNA_interval, rRNA_coor): """ transform interval format to browser coord: chr:start-end """ with open(rRNA_coor, 'w') as out_handle: with open(rRNA_interval, 'r') as in_handle: for line in in_handle: c, bio, source, s, e = line.split("\t")[:5] if bio.startswith("rRNA"): out_handle.write(("{0}:{1}-{2}\n").format(c, s, e)) def _detect_rRNA(data): gtf_file = dd.get_gtf_file(data) count_file = dd.get_count_file(data) rrna_features = gtf.get_rRNA(gtf_file) genes = [x[0] for x in rrna_features if x] if not genes: return {'rRNA': "NA", "rRNA_rate": "NA"} count_table = pd.read_csv(count_file, sep="\t", names=["id", "counts"]) rrna = sum(count_table[count_table["id"].isin(genes)]["counts"]) rrna_rate = float(rrna) / sum(count_table["counts"]) return {'rRNA': str(rrna), 'rRNA_rate': str(rrna_rate)} def _parse_qualimap_rnaseq(table): """ Retrieve metrics of interest from globals table. """ out = {} for row in table.xpath("table/tr"): col, val = [x.text for x in row.xpath("td")] col = col.replace(":", "").strip() val = val.replace(",", "") m = {col: val} if val.find("/") > -1: m = _parse_num_pct(col, val.replace("%", "")) out.update(m) return out def _parse_rnaseq_qualimap_metrics(report_file): """Extract useful metrics from the qualimap HTML report file. """ out = {} parsers = ["Reads alignment", "Reads genomic origin", "Transcript coverage profile"] root = lxml.html.parse(report_file).getroot() for table in root.xpath("//div[@class='table-summary']"): header = table.xpath("h3")[0].text if header in parsers: out.update(_parse_qualimap_rnaseq(table)) return out def _rnaseq_qualimap(bam_file, data, out_dir): """ Run qualimap for a rnaseq bam file and parse results """ report_file = os.path.join(out_dir, "qualimapReport.html") config = data["config"] gtf_file = dd.get_gtf_file(data) ref_file = dd.get_ref_file(data) single_end = not bam.is_paired(bam_file) if not utils.file_exists(report_file): utils.safe_makedir(out_dir) bam.index(bam_file, config) cmd = _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file, single_end) do.run(cmd, "Qualimap for {}".format(dd.get_sample_name(data))) metrics = _parse_rnaseq_qualimap_metrics(report_file) metrics.update(_detect_duplicates(bam_file, out_dir, data)) metrics.update(_detect_rRNA(data)) metrics.update({"Fragment Length Mean": bam.estimate_fragment_size(bam_file)}) metrics = _parse_metrics(metrics) return metrics def _rnaseq_qualimap_cmd(config, bam_file, out_dir, gtf_file=None, single_end=None): """ Create command lines for qualimap """ qualimap = config_utils.get_program("qualimap", config) resources = config_utils.get_resources("qualimap", config) num_cores = resources.get("cores", 1) max_mem = config_utils.adjust_memory(resources.get("memory", "4G"), num_cores) cmd = ("unset DISPLAY && {qualimap} rnaseq -outdir {out_dir} -a proportional -bam {bam_file} " "-gtf {gtf_file} --java-mem-size={max_mem}").format(**locals()) return cmd # ## Lightweight QC approaches def _parse_bamtools_stats(stats_file): out = {} want = set(["Total reads", "Mapped reads", "Duplicates", "Median insert size"]) with open(stats_file) as in_handle: for line in in_handle: parts = line.split(":") if len(parts) == 2: metric, stat_str = parts metric = metric.split("(")[0].strip() if metric in want: stat_parts = stat_str.split() if len(stat_parts) == 2: stat, pct = stat_parts pct = pct.replace("(", "").replace(")", "") else: stat = stat_parts[0] pct = None out[metric] = stat if pct: out["%s pct" % metric] = pct return out def _parse_offtargets(bam_file): """ Add to metrics off-targets reads if it exitst """ off_target = bam_file.replace(".bam", "-offtarget-stats.yaml") if os.path.exists(off_target): res = yaml.load(open(off_target)) res['offtarget_pct'] = "%.3f" % (float(res['offtarget']) / float(res['mapped'])) return res return {} def _run_bamtools_stats(bam_file, data, out_dir): """Run bamtools stats with reports on mapped reads, duplicates and insert sizes. """ stats_file = os.path.join(out_dir, "bamtools_stats.txt") if not utils.file_exists(stats_file): utils.safe_makedir(out_dir) bamtools = config_utils.get_program("bamtools", data["config"]) with file_transaction(data, stats_file) as tx_out_file: cmd = "{bamtools} stats -in {bam_file}" if bam.is_paired(bam_file): cmd += " -insert" cmd += " > {tx_out_file}" do.run(cmd.format(**locals()), "bamtools stats", data) out = _parse_bamtools_stats(stats_file) out.update(_parse_offtargets(bam_file)) return out ## Variant statistics from gemini def _run_gemini_stats(bam_file, data, out_dir): """Retrieve high level variant statistics from Gemini. """ out = {} gemini_dbs = [d for d in [tz.get_in(["population", "db"], x) for x in data.get("variants", [])] if d] if len(gemini_dbs) > 0: gemini_db = gemini_dbs[0] gemini_stat_file = "%s-stats.yaml" % os.path.splitext(gemini_db)[0] if not utils.file_uptodate(gemini_stat_file, gemini_db): gemini = config_utils.get_program("gemini", data["config"]) tstv = subprocess.check_output([gemini, "stats", "--tstv", gemini_db]) gt_counts = subprocess.check_output([gemini, "stats", "--gts-by-sample", gemini_db]) dbsnp_count = subprocess.check_output([gemini, "query", gemini_db, "-q", "SELECT count(*) FROM variants WHERE in_dbsnp==1"]) out["Transition/Transversion"] = tstv.split("\n")[1].split()[-1] for line in gt_counts.split("\n"): parts = line.rstrip().split() if len(parts) > 0 and parts[0] != "sample": name, hom_ref, het, hom_var, _, total = parts out[name] = {} out[name]["Variations (heterozygous)"] = int(het) out[name]["Variations (homozygous)"] = int(hom_var) # same total variations for all samples, keep that top level as well. out["Variations (total)"] = int(total) out["Variations (in dbSNP)"] = int(dbsnp_count.strip()) if out.get("Variations (total)") > 0: out["Variations (in dbSNP) pct"] = "%.1f%%" % (out["Variations (in dbSNP)"] / float(out["Variations (total)"]) * 100.0) with open(gemini_stat_file, "w") as out_handle: yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False) else: with open(gemini_stat_file) as in_handle: out = yaml.safe_load(in_handle) else: vcf_file = dd.get_vrn_file(data) if isinstance(vcf_file, list): vcf_file = vcf_file[0] if vcf_file: out_file = "%s-bcfstats.tsv" % utils.splitext_plus(vcf_file)[0] bcftools = config_utils.get_program("bcftools", data["config"]) if not utils.file_exists(out_file): cmd = ("{bcftools} stats -f PASS {vcf_file} > {out_file}") do.run(cmd.format(**locals()), "basic vcf stats %s" % dd.get_sample_name(data)) with open(out_file) as in_handle: for line in in_handle: if line.startswith("SN") and line.find("records") > -1: cols = line.split() print line out["Variations (total)"] = cols[-1] res = {} for k, v in out.iteritems(): if not isinstance(v, dict): res.update({k: v}) if k == dd.get_sample_name(data): res.update(v) return res ## qsignature def _run_qsignature_generator(bam_file, data, out_dir): """ Run SignatureGenerator to create normalize vcf that later will be input of qsignature_summary :param bam_file: (str) path of the bam_file :param data: (list) list containing the all the dictionary for this sample :param out_dir: (str) path of the output :returns: (dict) dict with the normalize vcf file """ qsig = config_utils.get_program("qsignature", data["config"]) if not qsig: logger.info("There is no qsignature tool. Skipping...") return {} utils.safe_makedir(out_dir) position = dd.get_qsig_file(data) mixup_check = dd.get_mixup_check(data) if mixup_check and mixup_check.startswith("qsignature"): if not position: logger.info("There is no qsignature for this species: %s" % tz.get_in(['genome_build'], data)) return {} jvm_opts = "-Xms750m -Xmx2g" limit_reads = 20000000 if mixup_check == "qsignature_full": slice_bam = bam_file jvm_opts = "-Xms750m -Xmx8g" limit_reads = 100000000 down_file = bam.downsample(slice_bam, data, limit_reads) if not down_file: down_file = slice_bam else: down_bam = _slice_bam_chr21(bam_file, data) position = _slice_vcf_chr21(position, out_dir) out_name = os.path.basename(down_bam).replace("bam", "qsig.vcf") out_file = os.path.join(out_dir, out_name) log_file = os.path.join(out_dir, "qsig.log") cores = dd.get_cores(data) base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureGenerator " "--noOfThreads {cores} " "-log {log_file} -i {position} " "-i {down_bam} ") if not os.path.exists(out_file): file_qsign_out = "{0}.qsig.vcf".format(down_bam) do.run(base_cmd.format(**locals()), "qsignature vcf generation: %s" % dd.get_sample_name(data)) if os.path.exists(file_qsign_out): with file_transaction(data, out_file) as file_txt_out: shutil.move(file_qsign_out, file_txt_out) else: raise IOError("File doesn't exist %s" % file_qsign_out) return {'qsig_vcf': out_file} return {} def qsignature_summary(*samples): """Run SignatureCompareRelatedSimple module from qsignature tool. Creates a matrix of pairwise comparison among samples. The function will not run if the output exists :param samples: list with only one element containing all samples information :returns: (dict) with the path of the output to be joined to summary """ warnings, similar = [], [] qsig = config_utils.get_program("qsignature", samples[0][0]["config"]) if not qsig: return [[]] jvm_opts = "-Xms750m -Xmx8g" work_dir = samples[0][0]["dirs"]["work"] count = 0 for data in samples: data = data[0] vcf = tz.get_in(["summary", "metrics", "qsig_vcf"], data) if vcf: count += 1 vcf_name = dd.get_sample_name(data) + ".qsig.vcf" out_dir = utils.safe_makedir(os.path.join(work_dir, "qsignature")) if not os.path.lexists(os.path.join(out_dir, vcf_name)): os.symlink(vcf, os.path.join(out_dir, vcf_name)) if count > 0: qc_out_dir = utils.safe_makedir(os.path.join(work_dir, "qc", "qsignature")) out_file = os.path.join(qc_out_dir, "qsignature.xml") out_ma_file = os.path.join(qc_out_dir, "qsignature.ma") out_warn_file = os.path.join(qc_out_dir, "qsignature.warnings") log = os.path.join(work_dir, "qsignature", "qsig-summary.log") if not os.path.exists(out_file): with file_transaction(samples[0][0], out_file) as file_txt_out: base_cmd = ("{qsig} {jvm_opts} " "org.qcmg.sig.SignatureCompareRelatedSimple " "-log {log} -dir {out_dir} " "-o {file_txt_out} ") do.run(base_cmd.format(**locals()), "qsignature score calculation") error, warnings, similar = _parse_qsignature_output(out_file, out_ma_file, out_warn_file, samples[0][0]) return [{'total samples': count, 'similar samples pairs': len(similar), 'warnings samples pairs': len(warnings), 'error samples': list(error), 'out_dir': qc_out_dir}] else: return [] def _parse_qsignature_output(in_file, out_file, warning_file, data): """ Parse xml file produced by qsignature :param in_file: (str) with the path to the xml file :param out_file: (str) with the path to output file :param warning_file: (str) with the path to warning file :returns: (list) with samples that could be duplicated """ name = {} error, warnings, similar = set(), set(), set() same, replicate, related = 0, 0.1, 0.18 mixup_check = dd.get_mixup_check(data) if mixup_check == "qsignature_full": same, replicate, related = 0, 0.01, 0.061 with open(in_file, 'r') as in_handle: with file_transaction(data, out_file) as out_tx_file: with file_transaction(data, warning_file) as warn_tx_file: with open(out_tx_file, 'w') as out_handle: with open(warn_tx_file, 'w') as warn_handle: et = lxml.etree.parse(in_handle) for i in list(et.iter('file')): name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "") for i in list(et.iter('comparison')): msg = None pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]]) out_handle.write("%s\t%s\t%s\n" % (name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score'])) if float(i.attrib['score']) == same: msg = 'qsignature ERROR: read same samples:%s\n' error.add(pair) elif float(i.attrib['score']) < replicate: msg = 'qsignature WARNING: read similar/replicate samples:%s\n' warnings.add(pair) elif float(i.attrib['score']) < related: msg = 'qsignature NOTE: read relative samples:%s\n' similar.add(pair) if msg: logger.info(msg % pair) warn_handle.write(msg % pair) return error, warnings, similar def _slice_bam_chr21(in_bam, data): """ return only one BAM file with only chromosome 21 """ sambamba = config_utils.get_program("sambamba", data["config"]) out_file = "%s-chr%s" % os.path.splitext(in_bam) if not utils.file_exists(out_file): bam.index(in_bam, data['config']) with contextlib.closing(pysam.Samfile(in_bam, "rb")) as bamfile: bam_contigs = [c["SN"] for c in bamfile.header["SQ"]] chromosome = "21" if "chr21" in bam_contigs: chromosome = "chr21" with file_transaction(data, out_file) as tx_out_file: cmd = ("{sambamba} slice -o {tx_out_file} {in_bam} {chromosome}").format(**locals()) out = subprocess.check_output(cmd, shell=True) return out_file def _slice_vcf_chr21(vcf_file, out_dir): """ Slice chr21 of qsignature SNPs to reduce computation time """ tmp_file = os.path.join(out_dir, "chr21_qsignature.vcf") if not utils.file_exists(tmp_file): cmd = ("grep chr21 {vcf_file} > {tmp_file}").format(**locals()) out = subprocess.check_output(cmd, shell=True) return tmp_file ## report and coverage def report_summary(*samples): """ Run coverage report with bcbiocov package """ try: import bcbreport.prepare as bcbreport except ImportError: logger.info("skipping report. No bcbreport installed.") return samples samples = utils.unpack_worlds(samples) work_dir = dd.get_work_dir(samples[0]) parent_dir = utils.safe_makedir(os.path.join(work_dir, "report")) with utils.chdir(parent_dir): logger.info("copy qsignature") qsignature_fn = os.path.join(work_dir, "qc", "qsignature", "qsignature.ma") if qsignature_fn: if utils.file_exists(qsignature_fn) and not utils.file_exists("qsignature.ma"): shutil.copy(qsignature_fn, "qsignature.ma") out_dir = utils.safe_makedir("fastqc") logger.info("summarize fastqc") with utils.chdir(out_dir): _merge_fastqc(samples) out_dir = utils.safe_makedir("variants") bcbreport.report(parent_dir) out_report = os.path.join(parent_dir, "qc-coverage-report.html") if not utils.file_exists(out_report): rmd_file = os.path.join(parent_dir, "report-ready.Rmd") run_file = "%s-run.R" % (os.path.splitext(out_report)[0]) with open(run_file, "w") as out_handle: out_handle.write("""library(rmarkdown)\nrender("%s")\n""" % rmd_file) cmd = "%s %s" % (utils.Rscript_cmd(), run_file) try: do.run(cmd, "Prepare coverage summary", log_error=False) except subprocess.CalledProcessError, msg: logger.info("Skipping generation of coverage report: %s" % (str(msg))) if utils.file_exists("report-ready.html"): shutil.move("report-ready.html", out_report) out = [] for d in samples: if "coverage" not in d: d["coverage"] = {} if utils.file_exists(out_report): d["coverage"]["report"] = out_report out.append(d) samples = out logger.info("summarize metrics") samples = _merge_metrics(samples) return [[d] for d in samples] def coverage_report(data): """ Run heavy coverage and variants process in parallel """ data = cov.coverage(data) data = cov.variants(data) data = cov.priority_coverage(data) data = cov.priority_total_coverage(data) problem_regions = dd.get_problem_region_dir(data) data["coverage"] = {} if "coverage" in data: coverage = data['coverage'] annotated = None if problem_regions and coverage: annotated = cov.decorate_problem_regions(coverage, problem_regions) data['coverage'] = {'all': coverage, 'problems': annotated} return [[data]] def _get_coverage_per_region(name): """ Parse coverage file if it exists to get average value. """ fn = os.path.join("coverage", name + "_coverage.bed") if utils.file_exists(fn): try: dt = pd.read_csv(fn, sep="\t", index_col=False) if len(dt["meanCoverage"]) > 0: return "%.3f" % (sum(map(float, dt['meanCoverage'])) / len(dt['meanCoverage'])) except TypeError: logger.debug("%s has no lines in coverage.bed" % name) return "NA" def _merge_metrics(samples): """ parse project.yaml file to get metrics for each bam """ out_file = os.path.join("metrics", "metrics.tsv") dt_together = [] cov = {} with file_transaction(out_file) as out_tx: for s in samples: if s['description'] in cov: continue m = tz.get_in(['summary', 'metrics'], s) if m: for me in m: if isinstance(m[me], list): m[me] = ":".join(m[me]) dt = pd.DataFrame(m, index=['1']) dt['avg_coverage_per_region'] = _get_coverage_per_region(s['description']) cov[s['description']] = dt['avg_coverage_per_region'][0] # dt = pd.DataFrame.from_dict(m) dt.columns = [k.replace(" ", "_").replace("(", "").replace(")", "") for k in dt.columns] dt['sample'] = s['description'] dt_together.append(dt) if len(dt_together) > 0: dt_together = utils.rbind(dt_together) dt_together.to_csv(out_tx, index=False, sep="\t") out = [] for s in samples: if s['description'] in cov: s['summary']['metrics']['avg_coverage_per_region'] = cov[s['description']] out.append(s) return samples def _merge_fastqc(samples): """ merge all fastqc samples into one by module """ fastqc_list = defaultdict(list) seen = set() for data in samples: name = dd.get_sample_name(data) if name in seen: continue seen.add(name) fns = glob.glob(os.path.join(dd.get_work_dir(data), "qc", dd.get_sample_name(data), "fastqc") + "/*") for fn in fns: if fn.endswith("tsv"): metric = os.path.basename(fn) fastqc_list[metric].append([name, fn]) for metric in fastqc_list: dt_by_sample = [] for fn in fastqc_list[metric]: dt = pd.read_csv(fn[1], sep="\t") dt['sample'] = fn[0] dt_by_sample.append(dt) dt = utils.rbind(dt_by_sample) dt.to_csv(metric, sep="\t", index=False, mode ='w') return samples
gifford-lab/bcbio-nextgen
bcbio/pipeline/qcsummary.py
Python
mit
52,291
[ "pysam" ]
0a0ac64f12c7f2dc31bc6dc0e3212c42c29b2c8eadd9686bc89297ec68e32d46
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # This example demonstrates how to use a matrix in place of a transfrom # via vtkMatrixToLinearTransform and vtkMatrixToHomogeneousTransform. # create a rendering window renWin = vtk.vtkRenderWindow() renWin.SetSize(600,300) # set up first set of polydata p1 = vtk.vtkPlaneSource() p1.SetOrigin(0.5,0.508,-0.5) p1.SetPoint1(-0.5,0.508,-0.5) p1.SetPoint2(0.5,0.508,0.5) p1.SetXResolution(5) p1.SetYResolution(5) p2 = vtk.vtkPlaneSource() p2.SetOrigin(-0.508,0.5,-0.5) p2.SetPoint1(-0.508,-0.5,-0.5) p2.SetPoint2(-0.508,0.5,0.5) p2.SetXResolution(5) p2.SetYResolution(5) p3 = vtk.vtkPlaneSource() p3.SetOrigin(-0.5,-0.508,-0.5) p3.SetPoint1(0.5,-0.508,-0.5) p3.SetPoint2(-0.5,-0.508,0.5) p3.SetXResolution(5) p3.SetYResolution(5) p4 = vtk.vtkPlaneSource() p4.SetOrigin(0.508,-0.5,-0.5) p4.SetPoint1(0.508,0.5,-0.5) p4.SetPoint2(0.508,-0.5,0.5) p4.SetXResolution(5) p4.SetYResolution(5) p5 = vtk.vtkPlaneSource() p5.SetOrigin(0.5,0.5,-0.508) p5.SetPoint1(0.5,-0.5,-0.508) p5.SetPoint2(-0.5,0.5,-0.508) p5.SetXResolution(5) p5.SetYResolution(5) p6 = vtk.vtkPlaneSource() p6.SetOrigin(0.5,0.5,0.508) p6.SetPoint1(-0.5,0.5,0.508) p6.SetPoint2(0.5,-0.5,0.508) p6.SetXResolution(5) p6.SetYResolution(5) # append together ap = vtk.vtkAppendPolyData() ap.AddInputConnection(p1.GetOutputPort()) ap.AddInputConnection(p2.GetOutputPort()) ap.AddInputConnection(p3.GetOutputPort()) ap.AddInputConnection(p4.GetOutputPort()) ap.AddInputConnection(p5.GetOutputPort()) ap.AddInputConnection(p6.GetOutputPort()) #-------------------------- # linear transform matrix t1 = vtk.vtkMatrixToLinearTransform() m1 = vtk.vtkMatrix4x4() t1.SetInput(m1) m1.SetElement(0,0,1.127631) m1.SetElement(0,1,0.205212) m1.SetElement(0,2,-0.355438) m1.SetElement(1,0,0.000000) m1.SetElement(1,1,0.692820) m1.SetElement(1,2,0.400000) m1.SetElement(2,0,0.200000) m1.SetElement(2,1,-0.469846) m1.SetElement(2,2,0.813798) f11 = vtk.vtkTransformPolyDataFilter() f11.SetInputConnection(ap.GetOutputPort()) f11.SetTransform(t1) m11 = vtk.vtkDataSetMapper() m11.SetInputConnection(f11.GetOutputPort()) a11 = vtk.vtkActor() a11.SetMapper(m11) a11.GetProperty().SetColor(1,0,0) a11.GetProperty().SetRepresentationToWireframe() ren11 = vtk.vtkRenderer() ren11.SetViewport(0.0,0.5,0.25,1.0) ren11.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren11.AddActor(a11) renWin.AddRenderer(ren11) # inverse identity transform f12 = vtk.vtkTransformPolyDataFilter() f12.SetInputConnection(ap.GetOutputPort()) f12.SetTransform(t1.GetInverse()) m12 = vtk.vtkDataSetMapper() m12.SetInputConnection(f12.GetOutputPort()) a12 = vtk.vtkActor() a12.SetMapper(m12) a12.GetProperty().SetColor(0.9,0.9,0) a12.GetProperty().SetRepresentationToWireframe() ren12 = vtk.vtkRenderer() ren12.SetViewport(0.0,0.0,0.25,0.5) ren12.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren12.AddActor(a12) renWin.AddRenderer(ren12) #-------------------------- # perspective transform matrix m2 = vtk.vtkMatrix4x4() m2.SetElement(3,0,-0.11) m2.SetElement(3,1,0.3) m2.SetElement(3,2,0.2) t2 = vtk.vtkMatrixToHomogeneousTransform() t2.SetInput(m2) f21 = vtk.vtkTransformPolyDataFilter() f21.SetInputConnection(ap.GetOutputPort()) f21.SetTransform(t2) m21 = vtk.vtkDataSetMapper() m21.SetInputConnection(f21.GetOutputPort()) a21 = vtk.vtkActor() a21.SetMapper(m21) a21.GetProperty().SetColor(1,0,0) a21.GetProperty().SetRepresentationToWireframe() ren21 = vtk.vtkRenderer() ren21.SetViewport(0.25,0.5,0.50,1.0) ren21.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren21.AddActor(a21) renWin.AddRenderer(ren21) # inverse linear transform f22 = vtk.vtkTransformPolyDataFilter() f22.SetInputConnection(ap.GetOutputPort()) f22.SetTransform(t2.GetInverse()) m22 = vtk.vtkDataSetMapper() m22.SetInputConnection(f22.GetOutputPort()) a22 = vtk.vtkActor() a22.SetMapper(m22) a22.GetProperty().SetColor(0.9,0.9,0) a22.GetProperty().SetRepresentationToWireframe() ren22 = vtk.vtkRenderer() ren22.SetViewport(0.25,0.0,0.50,0.5) ren22.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren22.AddActor(a22) renWin.AddRenderer(ren22) #-------------------------- # linear concatenation - should end up with identity here t3 = vtk.vtkTransform() t3.Concatenate(t1) t3.Concatenate(t1.GetInverse()) f31 = vtk.vtkTransformPolyDataFilter() f31.SetInputConnection(ap.GetOutputPort()) f31.SetTransform(t3) m31 = vtk.vtkDataSetMapper() m31.SetInputConnection(f31.GetOutputPort()) a31 = vtk.vtkActor() a31.SetMapper(m31) a31.GetProperty().SetColor(1,0,0) a31.GetProperty().SetRepresentationToWireframe() ren31 = vtk.vtkRenderer() ren31.SetViewport(0.50,0.5,0.75,1.0) ren31.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren31.AddActor(a31) renWin.AddRenderer(ren31) # inverse linear transform f32 = vtk.vtkTransformPolyDataFilter() f32.SetInputConnection(ap.GetOutputPort()) f32.SetTransform(t3.GetInverse()) m32 = vtk.vtkDataSetMapper() m32.SetInputConnection(f32.GetOutputPort()) a32 = vtk.vtkActor() a32.SetMapper(m32) a32.GetProperty().SetColor(0.9,0.9,0) a32.GetProperty().SetRepresentationToWireframe() ren32 = vtk.vtkRenderer() ren32.SetViewport(0.5,0.0,0.75,0.5) ren32.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren32.AddActor(a32) renWin.AddRenderer(ren32) #-------------------------- # perspective transform concatenation t4 = vtk.vtkPerspectiveTransform() t4.Concatenate(t1) t4.Concatenate(t2) t4.Concatenate(t3) f41 = vtk.vtkTransformPolyDataFilter() f41.SetInputConnection(ap.GetOutputPort()) f41.SetTransform(t4) m41 = vtk.vtkDataSetMapper() m41.SetInputConnection(f41.GetOutputPort()) a41 = vtk.vtkActor() a41.SetMapper(m41) a41.GetProperty().SetColor(1,0,0) a41.GetProperty().SetRepresentationToWireframe() ren41 = vtk.vtkRenderer() ren41.SetViewport(0.75,0.5,1.0,1.0) ren41.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren41.AddActor(a41) renWin.AddRenderer(ren41) # inverse of transform concatenation f42 = vtk.vtkTransformPolyDataFilter() f42.SetInputConnection(ap.GetOutputPort()) f42.SetTransform(t4.GetInverse()) m42 = vtk.vtkDataSetMapper() m42.SetInputConnection(f42.GetOutputPort()) a42 = vtk.vtkActor() a42.SetMapper(m42) a42.GetProperty().SetColor(0.9,0.9,0) a42.GetProperty().SetRepresentationToWireframe() ren42 = vtk.vtkRenderer() ren42.SetViewport(0.75,0.0,1.0,0.5) ren42.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1) ren42.AddActor(a42) renWin.AddRenderer(ren42) renWin.SetMultiSamples(0) renWin.Render() # --- end of script --
hlzz/dotfiles
graphics/VTK-7.0.0/Common/Transforms/Testing/Python/MatrixToTransform.py
Python
bsd-3-clause
6,605
[ "VTK" ]
e4315771e04c912a5e2df7775c9519eb8bba90fb81f5b2dea103c3e4ac4402df
import os, sys import unittest from __main__ import vtk, qt, ctk, slicer from slicer.ScriptedLoadableModule import * import logging # # Add the CIP common library to the path if it has not been loaded yet # try: # from CIP.logic.SlicerUtil import SlicerUtil # except Exception as ex: # import inspect # path = os.path.dirname(inspect.getfile(inspect.currentframe())) # if os.path.exists(os.path.normpath(path + '/../CIP_Common')): # path = os.path.normpath(path + '/../CIP_Common') # We assume that CIP_Common is a sibling folder of the one that contains this module # elif os.path.exists(os.path.normpath(path + '/CIP')): # path = os.path.normpath(path + '/CIP') # We assume that CIP is a subfolder (Slicer behaviour) # sys.path.append(path) # from CIP.logic.SlicerUtil import SlicerUtil # print("CIP was added to the python path manually in CIP_Blank") # # from CIP.logic import Util # # # # CIP_Blank # class CIP_Blank(ScriptedLoadableModule): """Uses ScriptedLoadableModule base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def __init__(self, parent): ScriptedLoadableModule.__init__(self, parent) self.parent.title = "CIP_Blank" self.parent.categories = SlicerUtil.CIP_ModulesCategory self.parent.dependencies = [SlicerUtil.CIP_ModuleName] self.parent.contributors = ["Jorge Onieva (jonieva@bwh.harvard.edu)", "Applied Chest Imaging Laboratory", "Brigham and Women's Hospital"] self.parent.helpText = """Write here the description of your module""" self.parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText # # CIP_BlankWidget # class CIP_BlankWidget(ScriptedLoadableModuleWidget): """Uses ScriptedLoadableModuleWidget base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def __init__(self, parent): ScriptedLoadableModuleWidget.__init__(self, parent) def setup(self): """This is called one time when the module GUI is initialized """ ScriptedLoadableModuleWidget.setup(self) # Create objects that can be used anywhere in the module. Example: in most cases there should be just one # object of the logic class self.logic = CIP_BlankLogic() # Create all the widgets. Main Area mainAreaCollapsibleButton = ctk.ctkCollapsibleButton() mainAreaCollapsibleButton.text = "Main parameters" self.layout.addWidget(mainAreaCollapsibleButton) # Layout within the dummy collapsible button. See http://doc.qt.io/qt-4.8/layout.html for more info about layouts self.mainAreaLayout = qt.QFormLayout(mainAreaCollapsibleButton) # Example button with some common properties self.exampleButton = ctk.ctkPushButton() self.exampleButton.text = "Push me!" self.exampleButton.toolTip = "This is the button tooltip" self.exampleButton.setIcon(qt.QIcon("{0}/Reload.png".format(SlicerUtil.CIP_ICON_DIR))) self.exampleButton.setIconSize(qt.QSize(20,20)) self.exampleButton.setStyleSheet("font-weight:bold; font-size:12px" ) self.exampleButton.setFixedWidth(200) self.mainAreaLayout.addWidget(self.exampleButton) # Connections self.exampleButton.connect('clicked()', self.onApplyButton) def enter(self): """This is invoked every time that we select this module as the active module in Slicer (not only the first time)""" pass def exit(self): """This is invoked every time that we switch to another module (not only when Slicer is closed).""" pass def cleanup(self): """This is invoked as a destructor of the GUI when the module is no longer going to be used""" pass def onApplyButton(self): message = self.logic.printMessage("This is the message that I want to print") qt.QMessageBox.information(slicer.util.mainWindow(), 'OK!', 'The test was ok. Review the console for details') # # CIP_BlankLogic # class CIP_BlankLogic(ScriptedLoadableModuleLogic): """This class should implement all the actual computation done by your module. The interface should be such that other python code can import this class and make use of the functionality without requiring an instance of the Widget. Uses ScriptedLoadableModuleLogic base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def __init__(self): """Constructor. """ ScriptedLoadableModuleLogic.__init__(self) def printMessage(self, message): print(("This is your message: ", message)) return "I have printed this message: " + message class CIP_BlankTest(ScriptedLoadableModuleTest): """ This is the test case for your scripted module. Uses ScriptedLoadableModuleTest base class, available at: https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py """ def setUp(self): """ Do whatever is needed to reset the state - typically a scene clear will be enough. """ slicer.mrmlScene.Clear(0) def runTest(self): """Run as few or as many tests as needed here. """ self.setUp() self.test_CIP_Blank_PrintMessage() def test_CIP_Blank_PrintMessage(self): self.delayDisplay("Starting the test") logic = CIP_BlankLogic() myMessage = "Print this test message in console" logging.info("Starting the test with this message: " + myMessage) expectedMessage = "I have printed this message: " + myMessage logging.info("The expected message would be: " + expectedMessage) responseMessage = logic.printMessage(myMessage) logging.info("The response message was: " + responseMessage) self.assertTrue(responseMessage == expectedMessage) self.delayDisplay('Test passed!')
acil-bwh/SlicerCIP
Scripted/CIP_Blank/CIP_Blank.py
Python
bsd-3-clause
6,109
[ "VTK" ]
850b50de14297afc3fc7d527cbda2a4a5d01c68c18125cd1d734a0dc0f86fc7f
""" Load Region from NeuroHDF and apply measures to retrieved trees """ from unidesign.spatial.group import Region, Treelines, Connectors from neurohdf import File f = File('mydataset.nh5', 'r') # wrap a region myregion = Region.from_neurohdf(f, 'neuropile', memmapped = False) # fetch cells # level defines how many indirections: connectors, trees etc. to fetch into memory # creating objects for trees and connectors myregion.fetch( neuron_id, level = 0 ) # or fetch all myregion.fetch_all() # retrieve particular kinds of trees myregion.retrieve( class = 'tree', where = 'type = sensory neuron', fetch = False ) myregion.retrieve( class = 'connector', fetch = False ) # produce a connectivity diagram from the currently fetched trees/connectors # for multiple connectors between, a nx.MultiDiGraph might be used connection_diagram = myregion.get_graph() # produce a connectivity diagram for neuron classes myregion.get_class_graph( include_classes = ['all'] ) # display the diagram # usecase: color tree based on the centrality of treenodes # 1) convert to graph # 2) compute centrality # 3) feed centrality into colormap # 4) create fos actor with information
unidesigner/unidesign
unidesign/spatial/tests/test_spatial2.py
Python
bsd-3-clause
1,173
[ "NEURON" ]
9f75e97cfc588c30ff3f3d9c7ed26a941c61b3e5f841b436c80b45c3f49b10fd
# -*- coding: utf-8 -*- { "'Sounds-like' name search allowing search even the spelling of the name is not known exactly": "'Sounds-like'名稱搜尋或搜尋,即使名稱的拼字不完全", "A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": '一个位置指定地理區域的這个區域。 這可以是位置的位置階層,或"群組位置",或位置有界限的區域。', "Acronym of the organization's name, eg. IFRC.": '縮寫的組織的名稱,例如: IFRC。', "Authenticate system's Twitter account": '系统的鉴別Twitter账户', "Can't import tweepy": '無法匯入tweepy', "Caution: doesn't respect the framework rules!": '警告:不符合架构規則!', "Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": '按一下"抵押"按鈕左手邊的直欄來進行抵押以符合要求的协助。', "Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": '詳細地址的站點的参考/后勤的用途。 請注意,您可以新增GIS/對映資料中的關于此站台"位置"欄位下面的說明。', "Facilitate uploading of missing person's photograph": '促進上传失蹤人口的照片', "Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": '清單格式屬性的值" & RGB值用于為JSON物件,例如: {0}紅色: \'#FF0000,綠色: \'#00FF00,黃色: \'#FFFF00的', "Grouping by 'Family Unit' or other group category": '分組\'系列單元"或"其他"群組種類', "If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": '如果選取,則此資產的位置將會被更新時,人員的位置已更新。', "If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": '如果此配置代表一个區域的區域功能表上,請提供一个名稱,以使用在功能表中。 名稱的个人對映配置將會設為使用者的名稱。', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": '如果這个欄位會移入,則使用者指定此組織時,註冊將指定為一个人員的組織,除非它們的網域不符合網域欄位。', "If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": '如果這是起來,則這會成為使用者的基本位置和因此使用者在地圖上顯示', "If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": '如果啟用了這項設定,則所有刪除的記錄只是標示為刪除而確定刪除。 它們會顯示在原始資料庫存取,但不會看到一般使用者。', "If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '如果您無法找到該記錄的人員您要報告丟失了,您可以將它新增至按一下"新增人員"如下:', "If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": '如果您沒有看到"以在清單中,您可以新增一个新的按一下鏈結新增醫院。', "If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": '如果您沒有看到"辦事處清單中,您可以新增一个新的按一下鏈結新增Out of Office。', "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": '如果您沒有看到"組織清單中,您可以新增一个新的按一下鏈結新增組織。', "Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": '而自動從其他同步對等網路上,您也可以同步檔案,這是必需的,沒有網路。 您可以利用這个頁面來匯入同步檔案資料,匯出資料要同步化的檔案。 上的鏈結,按一下滑鼠右鍵,前往這个頁面。', "Level is higher than parent's": '母項的層次高于', "NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "Nb SMS要求過濾,只是一个actionable',時, Tweet要求過濾,因此可能會是一个好開始搜尋。", "Need a 'url' argument!": "需要一个'URL'引數!", "Note that the dropdowns won't refresh automatically. Refresh the page if you wish to verify that the locations have gone.": '請注意,清單不會自動重新整理。 如果您想要重新整理頁面,以驗證"位置不存在。', "Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "選用。 幾何形狀的名稱直欄。 在PostGIS預設為'the_geom'.", "Parent level should be higher than this record's level. Parent level is": '母項層次應該高于此記錄的層次。 母項層次是', "Password fields don't match": '密碼欄位不符', "Phone number to donate to this organization's relief efforts.": '捐贈撥打電話號碼這个組織的釋放工作。', "Please come back after sometime if that doesn't help.": '請回到之后,如果該時間不說明。', "Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": '按下"刪除舊的按鈕,使所有記錄参照此一个被repointed在新的一个,則舊記錄將被刪除。', "Quantity in %s's Inventory": '以百分比的庫存數量', "Search here for a person's record in order to:": '搜尋這裡的人員的記錄,以便:', "Select a Room from the list or click 'Create Room'": '選取會議室從清單,或按一下新增空間"', "Select a person in charge for status 'assigned'": "選取一个人員負責的狀態'指定的'", "Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": '選取這个如果所有特定位置需要母項在最深層次的位置階層。 例如,如果"地區"的最小部門階層中,則所有特定位置所需的要區域作為母項。', "Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": '選取這个如果所有特定位置需要一个母項位置階層。 這可协助在設定"地區"代表一个受影响的區域。', "Sorry, things didn't get done on time.": '抱歉,項目沒有取得完成的時間。', "Sorry, we couldn't find that page.": '很抱歉,我們找不到該頁面。', "System's Twitter account updated": '系统的Twitter更新账户', "The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": '在Donor(S)適用于這个專案。 可以選取多个值,請按住控制鍵。', "The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": '部門(s)此組織運作中。 可以選取多个值,請按住控制鍵。', "The URL of the image file. If you don't upload an image file, then you must specify its location here.": '映像檔的URL。 如果您不上传影像檔案,則您必须指定其位置在這裡。', "The person's manager within this Office/Project.": '人員的管理員在這个辦事處/專案。', "To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員名稱,輸入任何的第一个,中間或最后一个名稱,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的人。', "To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '要搜尋的主体,請輸入ID標籤的主体。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的主体。', "To search for a hospital, enter any of the names or IDs of the hospital, or the organisation name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何名稱或ID的醫院,或組織名稱或縮寫,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,以列出所有醫院。', "To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何名稱或ID的醫院,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,以列出所有醫院。', "To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何部分的名稱或ID。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,以列出所有醫院。', "To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": '要搜尋的位置,輸入該名稱。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的位置。', "To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員,輸入任何的第一个,中間或最后一个名稱和/或ID號碼的人員,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的人。', "To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員,輸入任何的第一个,中間或最后一个名稱,以空格區隔。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的人。', "To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '搜尋要求時,輸入您要尋找的部分文字。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,會列出所有的要求。', "To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": '要搜尋的評估,輸入任何部分的票据號碼的評估。 您可以使用%作為通配符。 按一下"搜尋"不需要輸入,以列出所有評估。', "Type the first few characters of one of the Person's names.": '輸入前幾个字元的其中一个人員的名稱。', "Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '上传影像檔案在這裡。 如果您不上传影像檔案,則您必须指定其位置在URL欄位中。', "View and/or update details of the person's record": '檢視及/或更新詳細資料的人員的記錄', "View/Edit the Database directly (caution: doesn't respect the framework rules!)": '檢視/編輯資料庫直接(警告:不符合架构規則! )', "What are the people's normal ways to obtain food in this area?": '在這個地區通常大家怎麼取得食物?', "What should be done to reduce women and children's vulnerability to violence?": '如何做才能減少婦女和小孩遭受暴力?', "When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '當與他人同步數據 ,兩個(或多方)要同步的資料都已經修改的情況下衝突發生,即信息相互矛盾。 同步模組嘗試解析這類冲突自動,但是在某些情湟下不能。 在這些情湟下,您有來解决這些冲突,請手動按一下上的鏈結,才能進入這个頁面。', "You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click": '您已經設定密碼,因此在這裡進行的變更不會顯示給您。 若要變更您的設定,請按一下个人化', "You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "您有未儲存的變更。 現在按一下'取消',然后'儲存',以儲存它們。 按一下確定以立即舍棄它們。", "You haven't made any calculations": '您尚未進行任何計算', "couldn't be parsed so NetworkLinks not followed.": '無法剖析,因此NetworkLinks不遵循。', "includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": '包括一个GroundOverlay或ScreenOverlay都不支援在OpenLayers尚未,因此可能無法正常運作。', '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update"是選用的表示式類似"field1=\'newvalue\'"。您不能更新或刪除結果的結合', '# of Houses Damaged': '損壞的房屋數', '# of Houses Destroyed': '損毀的房屋數', '# of International Staff': '国際人員的人數', '# of National Staff': '#的国家人員', '# of People Affected': '#的人員分配', '# of People Deceased': '#的人員死亡', '# of People Injured': '#的人員受傷', '# of Vehicles': '#的媒介', '%(count)s rows deleted': '%(count)s已刪除的橫列', '%(count)s rows updated': '%(count)s已更新的橫列', '%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nIf的要求類型是"%(type)s",請輸入 %(type)s 在下一个畫面。', '%(system_name)s - Verify Email': '%(system_name)s - 驗證電子郵件', '%.1f km': '%.1f公里', '& then click on the map below to adjust the Lat/Lon fields': '&,然后按一下"對映"下面的調整平面/長欄位', '* Required Fields': '* 必填欄位', '0-15 minutes': '〇-15分鐘', '1 Assessment': '一評量', '1 location, shorter time, can contain multiple Tasks': '一位置,較短的時間,可以包含多个作業', '1-3 days': '1-3 天', '1. Fill the necessary fields in BLOCK letters.': '一,填入必要的欄位區塊字母。', '15-30 minutes': '15-30分鐘', '2 different options are provided here currently:': '二个不同的選項此處提供目前:', '2. Always use one box per letter and leave one box space to seperate words.': '二一律使用有一个方框依字母,并保留空間有一个方框來分隔文字。', '2x4 Car': '2x4車', '30-60 minutes': '30-60分鐘', '4-7 days': '四-七天', '4x4 Car': '電腦(4x4)車', '8-14 days': '八-14天', 'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '一个標記,指派給个別位置設定時需要置換的記號指派給功能類別。', 'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': '一个参考文件,如:檔案, URL或聯絡人,以驗證這項資料。 您可以鍵入第1幾个字元的文件名稱,以鏈結至現有的文件。', 'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': '倉庫/網站是一个實体位置的地址与GIS資料位置的項目會儲存。 它可以是建置,一个特定區域中的城市或任何類似。', 'A brief description of the group (optional)': '群組的簡要說明(選用)', 'A file downloaded from a GPS containing a series of geographic points in XML format.': '從全球定位系統下載的文件包含了一系列XML格式的地理點。', 'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': '在來自全球定位系統GPX格式的文件,其時間戳可與照片的時間戳關聯以在地圖上找到它們。', 'A library of digital resources, such as Photos, signed contracts and Office documents.': '一个庫的數位資源,如照片,已簽署的合約和Office文件。', 'A library of digital resources, such as photos, documents and reports': '一个庫的數位資源,如照片,文檔和報告', 'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': '位置群組可用來定义的范圍的受影响的區域,如果它未落在一个管理區域。', 'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': '位置群組是一組的位置(通常是一个管理區域表示結合"地區")。 成員位置會新增至位置群組在這裡。 位置群組可能用來過濾顯示的內容在地圖上和在搜尋結果中只能實体所涵蓋的位置群組。 位置群組可用來定义的范圍的受影响的區域,如果它未落在一个管理區域。 位置群組可用于區域的功能表。', 'A location group is a set of locations (often, a set of administrative regions representing a combined area).': '位置群組是一組的位置(通常是一个管理區域表示結合"地區")。', 'A location group must have at least one member.': '位置群組必须至少有一个成員。', 'A place within a Site like a Shelf, room, bin number etc.': '一个位置網站內的類似層板,房間,貯存箱號碼等等。', 'A practical example can be of a report of lost person. Now if one machine register him to be found on 16th August and another machine registers him to found on 17th August, then e.g. Newer timestamp will replace data entry of your machine with that of foriegn machine because that is newer one.': '一个實用的范例可以是一个報告的遺失人員。 現在如果一台機器註冊該上找到第16 8月及另一部機器登錄該使用者上找第17 8月,例如,然后新的時間戳記會取代資料項目的機器的外部機器的原因是新的。', 'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'Snapshot的bin或其他文件包含有關的增補資訊可以上传這裡。', 'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': '一个Snapshot的位置或其他文件包含有關的增補資訊位置可以上传在這裡。', 'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': '一个Snapshot的位置或其他文件包含有關的增補資訊的網站可以上传在這裡。', 'A survey series with id %s does not exist. Please go back and create one.': '一个調查系列ID為%s不存在。 請回上頁,并建立一个。', 'ABOUT THIS MODULE': '關於此模組', 'ABOUT': '關於', 'ACCESS DATA': '存取資料', 'ANY': '任何', 'API is documented here': 'API是記載在這裡', 'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20快速評估修改新西蘭', 'Abbreviation': '縮寫', 'Ability to Fill Out Surveys': '能够填寫調查', 'Ability to customize the list of details tracked at a Shelter': '能够自訂清單的詳細追蹤, Shelter', 'Ability to customize the list of human resource tracked at a Shelter': '能够自訂清單的人力資源上追蹤一个Shelter', 'Ability to customize the list of important facilities needed at a Shelter': '能够自訂清單的重要設備需要在一个Shelter', 'Ability to track partial fulfillment of the request': '能够追蹤部分履行的要求', 'Ability to view Results of Completed and/or partially filled out Surveys': '可用來檢視結果的完成和/或部分填寫調查', 'About Sahana Eden': '關于Sahana Eden', 'About Sahana': '關于Sahana', 'About this module': '關於此模組', 'About': '關於', 'Access denied': '拒絕存取', 'Access to Shelter': '若要存取Shelter', 'Access to education services': '若要存取教育服務', 'Accessibility of Affected Location': '协助工具的受影响的位置', 'Account Registered - Please Check Your Email': '帳戶已註冊-請檢查您的電子郵件', 'Account registered, however registration is still pending approval - please wait until confirmation received.': '账户登錄,但是登錄仍在擱置核准-請稍候直到收到確認。', 'Acronym': '字首語', 'Actionable by all targeted recipients': '所有可執行目標收件者', 'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '僅可由指定的練習"参与者"練習應該出現在ID<note>', 'Actionable': '可行', 'Actioned?': '大通?', 'Actions taken as a result of this request.': '採取的動作的結果,這个要求。', 'Actions': '動作', 'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': '從方案模板激活活動以分配適當的資源(人力,資產及設施)。', 'Active Problems': '作用中問題', 'Active': '作用中', 'Activities matching Assessments:': '相符的活動評量:', 'Activities of boys 13-17yrs before disaster': '活動的男女13-17yrs前災難', 'Activities of boys 13-17yrs now': '活動的男女13-17yrs現在', 'Activities of boys <12yrs before disaster': '活動的男孩<12yrs之前災難', 'Activities of boys <12yrs now': '活動的<12yrs現在男孩', 'Activities of children': '活動的子項', 'Activities of girls 13-17yrs before disaster': '活動的女孩13 17yrs之前災難', 'Activities of girls 13-17yrs now': '活動的女孩13 17yrs現在', 'Activities of girls <12yrs before disaster': '活動的女孩<12yrs之前災難', 'Activities of girls <12yrs now': '活動的女孩<12yrs現在', 'Activities': '活動', 'Activities:': '活動:', 'Activity Added': '新增活動', 'Activity Deleted': '刪除活動', 'Activity Details': '活動明細', 'Activity Report': '活動報告', 'Activity Reports': '活動報告', 'Activity Type': '活動類型', 'Activity Updated': '更新活動', 'Activity': '活動', 'Add Activity Type': '新增活動類型', 'Add Address': '新增地址', 'Add Aid Request': '新增輔助請求', 'Add Alternative Item': '新增替代項目', 'Add Assessment Summary': '新增評量摘要', 'Add Assessment': '新增評量', 'Add Asset Log Entry - Change Label': '新增資產日誌項目-變更標籤', 'Add Availability': '新增可用性', 'Add Baseline Type': '新增基準线類型', 'Add Baseline': '新增基準线', 'Add Bin Type': '新增bin類型', 'Add Bins': '新增圖表匣', 'Add Bundle': '新增軟體組', 'Add Camp Service': 'Camp新增服務', 'Add Camp Type': 'Camp新增類型', 'Add Camp': '新增Camp', 'Add Catalog.': '新增型錄。', 'Add Category': '新增種類', 'Add Category<>Sub-Category<>Catalog Relation': '新增Category<>Sub-Category<>Catalog關系', 'Add Certification': '新增認證', 'Add Competency': '新增能力', 'Add Config': '新增配置', 'Add Contact': '新增聯絡人', 'Add Contact Information': '新增聯絡資訊', 'Add Course Certicate': '新增進程凭證', 'Add Credential': '新增認證', 'Add Credentials': '新增認證', 'Add Disaster Victims': '新增災難受害者', 'Add Distribution': '新增配送', 'Add Donor': '新增Donor', 'Add Flood Report': '新增水災報告', 'Add GIS Feature': '新增GIS功能', 'Add Group Member': '新增群組成員', 'Add Human Resource': '新增人力資源', 'Add Identity': '新增新的身分', 'Add Identity': '新增身分', 'Add Image': '新增影像', 'Add Impact Type': '新增影响類型', 'Add Impact': '新增影响', 'Add Inventory Item': '新增庫存項目', 'Add Inventory Location': '新增庫存位置', 'Add Inventory Store': '新增資產儲存庫', 'Add Item (s)': '新增項目(S)', 'Add Item Catalog Category': '新增項目型錄種類', 'Add Item Catalog': '新增項目型錄', 'Add Item Sub-Category': '添加子類別', 'Add Item to Catalog': '新增項目到型錄', 'Add Item to Commitment': '新增項目至承諾', 'Add Item to Inventory': '新增項目至庫存', 'Add Item to Request': '新增項目至要求', 'Add Item to Shipment': '新增項目至出貨', 'Add Item': '新增項目', 'Add Job Role': '新增工作角色', 'Add Key': '新增金鑰', 'Add Kit': '新增套件', 'Add Landmark': '新增里程碑', 'Add Level 1 Assessment': '新增層次一評量', 'Add Level 2 Assessment': '新增層次二評量', 'Add Line': '新增一行', 'Add Locations': '新增位置', 'Add Log Entry': '新增日誌項目', 'Add Member': '新增成員', 'Add Membership': '新增成員資格', 'Add Message': '新增訊息', 'Add Metadata': '新增元數據', 'Add Mission': '新增任務', 'Add Need Type': '新增需要類型', 'Add Need': '新增需要', 'Add New Aid Request': '新增輔助請求', 'Add New Assessment Summary': '新增評量摘要', 'Add New Baseline Type': '新增基準线類型', 'Add New Baseline': '新增基準線', 'Add New Bin Type': '新增bin類型', 'Add New Bin': '新增新貯存箱', 'Add New Budget': '新增新預算', 'Add New Bundle': '新增軟体組', 'Add New Camp Service': '新增Camp服務', 'Add New Camp Type': '新增Camp類型', 'Add New Camp': '新增Camp', 'Add New Cluster Subsector': '新增叢集Subsector', 'Add New Cluster': '新增叢集', 'Add New Commitment Item': '新增承諾書項目', 'Add New Config': '新增配置', 'Add New Distribution Item': '新增分配項目', 'Add New Distribution': '新增分配', 'Add New Document': '新增文件', 'Add New Donor': '新增Donor', 'Add New Entry': '新增項目', 'Add New Event': '新增事件', 'Add New Flood Report': '新增水災報告', 'Add New Human Resource': '新增人力資源', 'Add New Image': '新增影像', 'Add New Impact Type': '新增影响類型', 'Add New Impact': '新增新影响', 'Add New Inventory Item': '新增庫存項目', 'Add New Inventory Location': '新增庫存位置', 'Add New Inventory Store': '新增至資產儲存庫', 'Add New Item Catalog Category': '新增項目型錄種類', 'Add New Item Catalog': '新增項目型錄', 'Add New Item Sub-Category': '新增項目子類別', 'Add New Item to Kit': '新增項目至套件', 'Add New Key': '新增金鑰', 'Add New Landmark': '新增里程碑', 'Add New Level 1 Assessment': '新增層次一評量', 'Add New Level 2 Assessment': '新增層次二評量', 'Add New Member': '新增成員', 'Add New Membership': '新增組員', 'Add New Metadata': '新增meta資料', 'Add New Need Type': '新增需要類型', 'Add New Need': '新增需要', 'Add New Note': '新增附註', 'Add New Partner': '新增夥伴', 'Add New Patient': '新增病人', 'Add New Peer': '新增同層級', 'Add New Population Statistic': '新增人口统計資料', 'Add New Position': '新增位置', 'Add New Problem': '新增問題', 'Add New Rapid Assessment': '新增快速評量', 'Add New Received Item': '新增接收項目', 'Add New Record': '新增記錄', 'Add New Request Item': '新增要求項目', 'Add New Request': '新增要求', 'Add New Response': '新增回應', 'Add New River': '新增金水河', 'Add New Role to User': '新增角色至使用者', 'Add New Scenario': '新增實務', 'Add New School District': '新增學校特區', 'Add New School Report': '新增學校報告', 'Add New Section': '新增區段', 'Add New Sent Item': '新增传送的項目', 'Add New Setting': '新增設定', 'Add New Shipment to Send': '新增出貨以传送', 'Add New Site': '新增網站', 'Add New Solution': '新增解决方案', 'Add New Source': '新增來源', 'Add New Staff Type': '新增工作人員類型', 'Add New Staff': '新增人員', 'Add New Storage Location': '新增儲存位置', 'Add New Subsector': '新增Subsector', 'Add New Survey Answer': '新增問卷調查回答', 'Add New Survey Question': '新增問卷調查問題', 'Add New Survey Section': '新增問卷調查部分', 'Add New Survey Series': '新增問卷調查系列', 'Add New Survey Template': '新增調查范本', 'Add New Team': '新增團隊', 'Add New Ticket': '新增問題單', 'Add New Track': '新增追蹤', 'Add New Unit': '新增單位', 'Add New Update': '新增更新', 'Add New User to Role': '新增使用者至角色', 'Add New': '新增', 'Add Note': '新增附註', 'Add Partner': '新增夥伴', 'Add Peer': '新增同層級', 'Add Person': '新增人員', 'Add Photo': '新增照片', 'Add Point': '新增點', 'Add Polygon': '新增多邊形', 'Add Population Statistic': '新增人口统計資料', 'Add Position': '新增位置', 'Add Problem': '新增問題', 'Add Projections': '新增估算', 'Add Question': '新增問題', 'Add Rapid Assessment': '新增快速評量', 'Add Recipient Site': '新增收件者網站', 'Add Recipient': '新增接收者', 'Add Record': '新增記錄', 'Add Recovery Report': '新增回复報告', 'Add Reference Document': '新增参照文件', 'Add Relief Item': '新增浮雕項目', 'Add Report': '新增報告', 'Add Request Detail': '新增要求詳細資料', 'Add Request Item': '新增要求項目', 'Add Request': '新增要求', 'Add Response': '新增回應', 'Add School District': '新增學校特區', 'Add School Report': '新增學校報告', 'Add Section': '新增區段', 'Add Sender Organization': '新增寄件者組織', 'Add Sender Site': '新增寄件者網站', 'Add Setting': '新增設定', 'Add Shipment Transit Log': '新增出貨传輸日誌', 'Add Shipment/Way Bills': '新增出貨/方式账單', 'Add Site': '新增站台', 'Add Skill Equivalence': '新增等值技能', 'Add Skill Provision': '新增供應技能', 'Add Skill Types': '技能新增類型', 'Add Solution': '新增解決方案', 'Add Source': '新增來源', 'Add Staff Type': '新增人員類型', 'Add Staff': '新增人員', 'Add Storage Bin Type': '新增儲存体bin類型', 'Add Storage Bin': '新增儲存体bin', 'Add Storage Location': '新增儲存體位置', 'Add Sub-Category': '新增子種類', 'Add Subscription': '新增訂閱', 'Add Subsector': '新增Subsector', 'Add Survey Answer': '新增調查回答', 'Add Survey Question': '新增調查問題', 'Add Survey Section': '新增調查區段', 'Add Survey Series': '新增調查系列', 'Add Survey Template': '新增調查范本', 'Add Team Member': '新增成員', 'Add Team': '新增團隊', 'Add Ticket': '新增問題單', 'Add Training': '新增訓練', 'Add Unit': '新增單位', 'Add Update': '新增更新', 'Add Volunteer Availability': '新增自愿可用性', 'Add Volunteer Registration': '新增自愿登錄', 'Add a New Inventory Location': '新增一个新庫存位置', 'Add a New Relief Item': '新增一个新的項目', 'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': '新增一个参考文件,如:檔案, URL或聯絡人,以驗證這項資料。 如果您不輸入一个参照文件,您的電子郵件將不顯示。', 'Add a Reference Document such as a file, URL or contact person to verify this data.': '新增一个参考文件,如:檔案, URL或聯絡人,以驗證這項資料。', 'Add a Volunteer': '新增一个主動', 'Add a new Relief Item.': '新增一个新的項目。', 'Add a new Site from where the Item is being sent.': '新增一个新的站點的項目被送出。', 'Add a new Site where the Item is being sent to.': '新增一个新的項目的场所传送。', 'Add a new certificate to the catalog.': '於目錄添加新的證書。', 'Add a new competency rating to the catalog.': '新增一个新的能力的分級目錄。', 'Add a new course to the catalog.': '新增一个新的進程至型錄。', 'Add a new job role to the catalog.': '新增一个新的工作角色至型錄。', 'Add a new skill provision to the catalog.': '新增一个新技術供應至型錄。', 'Add a new skill to the catalog.': '新增一个新技術的型錄。', 'Add a new skill type to the catalog.': '新增一个新的技術類型至型錄。', 'Add an Photo.': '新增一个照片。', 'Add main Item Category.': '新增主要項目種類。', 'Add main Item Sub-Category.': '新增主要項目子類別。', 'Add new Group': '新增群組', 'Add new Individual': '新增个別', 'Add new person.': '新增人員。', 'Add new position.': '新增位置。', 'Add new project.': '新增專案。', 'Add new staff role.': '新增工作人員角色。', 'Add new staff.': '新增人員。', 'Add or Update': '新增或更新', 'Add staff members': '新增人員成員', 'Add the Storage Bin Type.': '新增儲存体bin類型。', 'Add the Storage Location where this bin is located.': '新增儲存体位置這位的位置。', 'Add the Storage Location where this this Bin belongs to.': '新增儲存体位置這个紙匣所屬。', 'Add the main Warehouse/Site information where this Bin belongs to.': '新增主要倉儲/站台資訊在此Bin屬于。', 'Add the main Warehouse/Site information where this Item is to be added.': '新增主要倉儲/站台資訊在這个項目是要新增。', 'Add the main Warehouse/Site information where this Storage location is.': '新增主要倉儲/資訊為的儲存体位置。', 'Add the unit of measure if it doesnt exists already.': '新增測量單位如果不存在。', 'Add to Bundle': '新增至軟體組', 'Add to Catalog': '新增至型錄', 'Add to budget': '新增至預算', 'Add volunteers': '新增志愿者', 'Add': '新增', 'Add/Edit/Remove Layers': '新增/編輯/移除層', 'Added to Group': '組員已新增', 'Added to Team': '組員已新增', 'Additional Beds / 24hrs': '其他Beds / 24hrs', 'Additional Comments': '其他註解', 'Additional quantity quantifier – i.e. “4x5”.': '其他數量限量元-也就是"4x5"。', 'Address Details': '位址詳細資料', 'Address Type': '位址類型', 'Address added': '新增位址', 'Address deleted': '刪除地址', 'Address updated': '更新地址', 'Address': '地址', 'Addresses': '地址', 'Adequate food and water available': '足够的食物和水可用', 'Adequate': '足够', 'Adjust Item(s) Quantity': '調整項目(s)的數量', 'Adjust Items due to Theft/Loss': '調整項目由于遭竊/遺失', 'Admin Email': '管理電子郵件', 'Admin Name': 'Admin 名稱', 'Admin Tel': '管理TEL', 'Admin': '管理權', 'Administration': '管理模組', 'Administrator': '管理者', 'Adolescent (12-20)': '青少年 (13-17)', 'Adolescent participating in coping activities': 'Adolescent参与复制活動', 'Adult (21-50)': '成人 (16-64)', 'Adult ICU': '成人ICU', 'Adult Psychiatric': '成人Psychiatric', 'Adult female': '成人女性', 'Adult male': '成人男性', 'Adults in prisons': 'prisons中的成人', 'Advanced Bin Search': '進階搜尋bin', 'Advanced Catalog Search': '進階搜尋型錄', 'Advanced Category Search': '進階搜尋種類', 'Advanced Item Search': '進階搜尋項目', 'Advanced Location Search': '進階搜尋位置', 'Advanced Site Search': '進階網站搜尋', 'Advanced Sub-Category Search': '先進的子分類搜索', 'Advanced Unit Search': '進階單位搜索', 'Advanced:': '進階:', 'Advisory': '諮詢', 'Affectees Families settled in the school belong to district': '受影響家庭定居在區內學校', 'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': '之后,按一下按鈕時,一个組成對的項目會顯示一个。 請選取一个解决方案中每一對您喜好的"其他"。', 'Age Group': '年齡層', 'Age group does not match actual age.': '群組不符合實際經歷時間。', 'Age group': '年齡層', 'Aggravating factors': 'Aggravating因素', 'Aggregate Items': '聚集項目', 'Agriculture': '農業', 'Aid Request Details': '輔助要求詳細資料', 'Aid Request added': '輔助請求添加', 'Aid Request deleted': '輔助刪除要求', 'Aid Request updated': '要求更新輔助', 'Aid Request': '輔助請求', 'Aid Requests': '輔助要求', 'Air Transport Service': '空氣传輸服務', 'Aircraft Crash': '墜機', 'Aircraft Hijacking': '飛機强制存取', 'Airport Closure': '機场關閉', 'Airport': '機場', 'Airspace Closure': 'Airspace關閉', 'Alcohol': '酒精', 'Alert': '警示', 'All Inbound & Outbound Messages are stored here': '所有入埠及出埠訊息儲存在這裡', 'All Locations': '所有位置', 'All Pledges': '所有抵押', 'All Requested Items': '所有要求的項目', 'All Resources': '所有資源', 'All data is able to be shared with other sites in real time.': '所有資料可以共用其他站台的實際時間。', 'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': '所有資料所提供的Sahana Software Foundation從這个網站授權下的創意Commons聲明授權。 然而,并非所有資料產生在這裡。 請参閱來源欄位的每一个項目。', 'All': '所有', 'Allowed to push': '允許推送', 'Allows a Budget to be drawn up': '容許預算要繪制設置', 'Allows authorized users to control which layers are available to the situation map.': '可讓授權使用者來控制層可用的狀湟對映。', 'Allows authorized users to upload multiple features into the situation map.': '容許授權的使用者上传多个特性的狀湟對映。', 'Alternative Item Details': '替代項目詳細資料', 'Alternative Item added': '新增替代項目', 'Alternative Item deleted': '替代項目刪除', 'Alternative Item updated': '替代更新項目', 'Alternative Item': '替代項目', 'Alternative Items': '替代項目', 'Alternative infant nutrition in use': '替代嬰兒營養使用中', 'Alternative places for studying available': '替代的工作區研究可用', 'Alternative places for studying': '替代工作區的研究', 'Ambulance Service': '救護車服務', 'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': '一个資產儲存庫是一个實体位置包含的項目可用的。', 'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '一个進氣區系统,倉儲管理系统,商品追蹤,供應鏈管理,採購,及其他資產和資源管理功能。', 'An interactive map of the situation.': '互動式對映的狀湟。', 'An item which can be used in place of another item': '一个項目可用于代替另一个項目', 'Analysis of Completed Surveys': '分析完成的調查', 'Animal Die Off': '動物骰子關閉', 'Animal Feed': '動物饋送', 'Animals': '動物', 'Answer Choices (One Per Line)': '答案選項(每行一)', 'Antibiotics available': 'Antibiotics可用', 'Antibiotics needed per 24h': 'Antibiotics需要每小時', 'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': '任何可用的meta資料中,將檔案自動讀取,例如時間戳記,作者,緯度和經度。', 'Any comments about this sync partner.': '任何相關註解這个同步伙伴。', 'Apparent Age': '明顯經歷時間', 'Apparent Gender': '明顯性別', 'Application Deadline': '應用程式截止時間', 'Appropriate clothing available': '適當的衣服可用', 'Appropriate cooking equipment/materials in HH': '烹飪適當設備/材料hh', 'Approve': '核准', 'Approved': '已核准', 'Approver': '核准者', 'Approx. number of cases/48h': '大約 號碼的案例/小時', 'Approximately how many children under 5 with diarrhea in the past 48 hours?': '大約有多少下的五diarrhea在過去48小時內?', 'Archive not Delete': '無法刪除保存', 'Arctic Outflow': '北極串流', 'Are basic medical supplies available for health services since the disaster?': '災後基本的醫療用品是否可用於衛生服務?', 'Are breast milk substitutes being used here since the disaster?': '在災難發生後母乳代用品是否被用在這裡?', 'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': '兒童,老人,和殘疾人士每天生活,嬉戲和走過的地區是否實際安全?', 'Are the chronically ill receiving sufficient care and assistance?': '長期病患者是否得到足夠的關心和幫助?', 'Are there adults living in prisons in this area?': '有成人活中prisons在此區域嗎?', 'Are there alternative places for studying?': '有替代工作區的研究嗎?', 'Are there cases of diarrhea among children under the age of 5?': '有diarrhea之間的情湟五歲以下兒童嗎?', 'Are there children living in adult prisons in this area?': '有子項活中有prisons在此區域嗎?', 'Are there children living in boarding schools in this area?': '有子項活在學校登機前在此區域嗎?', 'Are there children living in homes for disabled children in this area?': '有子項活在住家中的停用中這个區域嗎?', 'Are there children living in juvenile detention in this area?': '有子項活中青少年detention在此區域嗎?', 'Are there children living in orphanages in this area?': '有子項活中orphanages在此區域嗎?', 'Are there children with chronical illnesses in your community?': '有子項,含chronical疾病的社群嗎?', 'Are there health services functioning for the community since the disaster?': '有狀態服務運作的社群,因為災難?', 'Are there older people living in care homes in this area?': '有舊的人員使用者在管理Home在此區域嗎?', 'Are there older people with chronical illnesses in your community?': '有舊的人chronical疾病的社群嗎?', 'Are there people with chronical illnesses in your community?': '有人chronical疾病的社群嗎?', 'Are there separate latrines for women and men available?': '有个別latrines的男人或婦女,老人可用嗎?', 'Are there staff present and caring for the residents in these institutions?': '有人員存在与維護的居民在這些機构嗎?', 'Area': '區域 (area)', 'Areas inspected': '已視察地區', 'Assessment Details': '評量詳細資料', 'Assessment Reported': '評量報告', 'Assessment Summaries': '評量摘要', 'Assessment Summary Details': '評量摘要詳細資料', 'Assessment Summary added': '新增評量摘要', 'Assessment Summary deleted': '刪除評量摘要', 'Assessment Summary updated': '評量摘要更新', 'Assessment Type': '評量類型', 'Assessment Type:': '評量類型:', 'Assessment added': '新增評量', 'Assessment admin level': '評量管理層次', 'Assessment deleted': '評量刪除', 'Assessment timeline': '評量時間表', 'Assessment updated': '評量更新', 'Assessment': '評量', 'Assessments Needs vs. Activities': '需要評估与活動', 'Assessments and Activities': '評量及活動', 'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': '評估是結构化報告來完成專業組織的資料包括WFP評量', 'Assessments are structured reports done by Professional Organizations': '評估是結构化報告來完成專業組織', 'Assessments': '評量', 'Assessments:': '評量:', 'Assessor': '評量者', 'Asset Assigned': '指派資產', 'Asset Assignment Details': '資產分派明細', 'Asset Assignments deleted': '資產分派刪除', 'Asset Assignments updated': '資產更新工作分派', 'Asset Assignments': '資產分派', 'Asset Details': '資產明細', 'Asset Log Details': '資產詳細資料日誌', 'Asset Log Empty': '資產空日誌', 'Asset Log Entry Added - Change Label': '資產日誌項目新增-變更標籤', 'Asset Log Entry deleted': '資產日誌項目刪除', 'Asset Log Entry updated': '資產日誌項目更新', 'Asset Log': '資產日誌', 'Asset Management': '資產管理', 'Asset Number': '資產編號', 'Asset added': '已新增資產', 'Asset deleted': '已刪除資產', 'Asset removed': '移除資產', 'Asset updated': '已更新資產', 'Asset': '資產', 'Assets are resources which are not consumable but are expected back, so they need tracking.': '資產是資源不易損耗部件,但是預期返回,所以他們所需要的追蹤。', 'Assets': '資產', 'Assign Asset': '指派資產', 'Assign Group': '指派給群組', 'Assign Staff': '指派人員', 'Assign Storage Location': '指定存儲位置', 'Assign to Org.': '指派給組織。', 'Assign to Organization': '指派給組織', 'Assign to Person': '指派給人員', 'Assign to Site': '指派給網站', 'Assign': '指派', 'Assigned By': '由指派', 'Assigned To': '指派給', 'Assigned to Organization': '指派給組織', 'Assigned to Person': '指派給人員', 'Assigned to Site': '指派給網站', 'Assigned to': '指派給', 'Assigned': '已指派', 'Assignments': '指派', 'Assistance for immediate repair/reconstruction of houses': '协助立即的修复/的重新安置', 'Assistant': '助理', 'Assisted Family Care': '輔助管理系列', 'Assisted Self-care': '輔助自我管理', 'At/Visited Location (not virtual)': '在/瀏览位置(非虛擬)', 'Attend to information sources as described in <instruction>': '参加"以資訊來源中所述<instruction>', 'Attribution': '賦值', 'Audit Read': '審核讀取', 'Audit Write': '寫入審核', 'Authentication failed!': '鉴別失敗!', 'Authentication information of foreign server.': '鉴別資訊的外來伺服器。', 'Author': '作者', 'Author:': '作者:', 'Automatic Database Synchronization History': '自動同步化歷程資料庫', 'Automotive': '汽車', 'Availability': '可用性', 'Available Alternative Inventories': '可用的替代庫存', 'Available Beds': '可用Beds', 'Available Inventories': '可用的庫存', 'Available Messages': '可用的訊息', 'Available Records': '可用的記錄', 'Available databases and tables': '可用的資料庫及表格', 'Available for Location': '可用的位置', 'Available from': '可用開始時間', 'Available in Viewer?': '可用的檢視器中?', 'Available until': '截止有效期', 'Availablity': '可用性', 'Avoid the subject event as per the <instruction>': '避免在主旨事件作為每个<instruction>', 'Babies who are not being breastfed, what are they being fed on?': '不接受母乳喂養的嬰兒吃什么?', 'Baby And Child Care': '嬰兒及幼兒護理', 'Background Colour for Text blocks': '文字區塊的背景顏色', 'Background Colour': '背景顏色', 'Baldness': '禿頭', 'Banana': '香蕉', 'Bank/micro finance': '銀行/MICRO財務', 'Barricades are needed': 'Barricades需要', 'Base Layer?': '基本層?', 'Base Layers': '基本層', 'Base Location': '基本位置', 'Base Site Set': '基本網站設定', 'Base Unit': '基本裝置', 'Baseline Data': '基準线資料', 'Baseline Number of Beds': '基線床位數', 'Baseline Type Details': '基準线類型詳細資料', 'Baseline Type added': '新增基準线類型', 'Baseline Type deleted': '刪除基準线類型', 'Baseline Type updated': '更新基準线類型', 'Baseline Type': '基準線類型', 'Baseline Types': '基準线類型', 'Baseline added': '新增基準线', 'Baseline deleted': '刪除基準线', 'Baseline number of beds of that type in this unit.': '在本單位這種類型病床的基線數目。', 'Baseline updated': '更新基準线', 'Baselines Details': '基準线詳細資料', 'Baselines': '基準線', 'Basic Assessment Reported': '基本評量報告', 'Basic Assessment': '基本評量', 'Basic Details': '基本詳細資料', 'Basic information on the requests and donations, such as category, the units, contact details and the status.': '基本資訊的要求和捐款,如:種類,裝置,請聯絡詳細資料和狀態。', 'Basic medical supplies available prior to disaster': '基本醫療用品可用之前災難', 'Basic medical supplies available since disaster': '醫療用品基本后提供災難', 'Basic reports on the Shelter and drill-down by region': '基本報告在Shelter和往下探查"區域', 'Baud rate to use for your modem - The default is safe for most cases': '传輸速率,以用于您的數据機的預設值是安全的大部分情湟', 'Baud': '傳輸速率', 'Beacon Service URL': '引標服務URL', 'Beam': '光束', 'Bed Capacity per Unit': '每單位床容量', 'Bed Capacity': '床容量', 'Bed Type': '床型', 'Bed type already registered': '床型已註冊', 'Bedding materials available': '床上用品材料可用', 'Below ground level': '地面以下', 'Beneficiary Type': '受益人類型', 'Biological Hazard': '生物危害', 'Blood Type (AB0)': '渾身類型(AB)', 'Blowing Snow': '沒有吹向他人雪', 'Boat': '船班', 'Bodies found': '找到主体', 'Bodies recovered': '回复主体', 'Body Recovery Reports': '主体回复報告', 'Body Recovery Request': '回复要求主体', 'Body Recovery Requests': '回复要求主体', 'Body': '主體', 'Bomb Explosion': 'Bomb爆炸', 'Bomb Threat': 'Bomb威胁', 'Border Colour for Text blocks': '邊框顏色的文字區塊', 'Bounding Box Insets': '嵌入外框', 'Bounding Box Size': '外框框大小', 'Boys 13-18 yrs in affected area': '13男女-18年期中受影响的區域', 'Boys 13-18 yrs not attending school': '13男女-18年期不参加學校', 'Boys 6-12 yrs in affected area': '六男女-12年期中受影响的區域', 'Boys 6-12 yrs not attending school': '六男女-12年期不参加學校', 'Brand Details': '品牌詳細資料', 'Brand added': '品牌新增', 'Brand deleted': '品牌刪除', 'Brand updated': '品牌更新', 'Brand': '產品', 'Brands': '品牌', 'Breast milk substitutes in use since disaster': 'Breast替換espresso使用中,因為災難', 'Breast milk substitutes used prior to disaster': 'Breast替換espresso使用之前災難', 'Bricks': '磚', 'Bridge Closed': '關閉橋接器', 'Bridge': '橋接器', 'Bucket': '儲存器 (bucket)', 'Buddhist': '佛教徒', 'Budget Details': '預算明細', 'Budget Updated': '更新預算', 'Budget added': '新增預算', 'Budget deleted': '刪除預算', 'Budget updated': '更新預算', 'Budget': '預算', 'Budgeting Module': '預算模組', 'Budgets': '預算', 'Buffer': 'buffer', 'Bug': '錯誤', 'Building Aide': 'AIDE建置', 'Building Assessments': '建置評量', 'Building Collapsed': '建置收合', 'Building Name': '大樓名稱', 'Building Safety Assessments': '建置安全評量', 'Building Short Name/Business Name': '建置簡短名稱/商業名稱', 'Building or storey leaning': '建置或storey leaning', 'Built using the Template agreed by a group of NGOs working together as the': '使用內建的范本所認可群組的迫切合作的', 'Bulk Uploader': '大量Multi File Uploader', 'Bundle Contents': '銷售組合內容', 'Bundle Details': '軟體組詳細資料', 'Bundle Updated': '更新軟体組', 'Bundle added': '新增軟体組', 'Bundle deleted': '刪除組', 'Bundle updated': '更新軟体組', 'Bundle': '組合 (bundle)', 'Bundles': '軟體組', 'Burn ICU': 'ICU燒錄', 'Burn': '燒錄', 'Burned/charred': '燒錄/charred', 'Business damaged': '商業損壞', 'By Facility': '由機能', 'By Inventory': '由庫存', 'By Site': '依網站', 'By Warehouse': '由倉儲', 'CBA Women': 'CBA婦女', 'CSS file %s not writable - unable to apply theme!': 'CSS檔%無法寫入-無法套用布景主題!', 'Calculate': '計算', 'Camp Coordination/Management': 'Camp协調/管理', 'Camp Details': 'Camp詳細資料', 'Camp Service Details': 'Camp服務詳細資料', 'Camp Service added': 'Camp服務新增', 'Camp Service deleted': 'Camp服務刪除', 'Camp Service updated': 'Camp服務更新', 'Camp Service': 'Camp服務', 'Camp Services': 'Camp服務', 'Camp Type Details': 'Camp類型詳細資料', 'Camp Type added': 'Camp新增類型', 'Camp Type deleted': 'Camp刪除類型', 'Camp Type updated': 'Camp更新類型', 'Camp Type': 'Camp類型', 'Camp Types and Services': 'Camp類型和服務', 'Camp Types': 'Camp類型', 'Camp added': 'Camp新增', 'Camp deleted': 'Camp刪除', 'Camp updated': 'Camp更新', 'Can only disable 1 record at a time!': '只能停用一个記錄時間!', 'Can users register themselves for authenticated login access?': '使用者可以自行登錄的鉴別登入嗎?', 'Cancel Log Entry': '取消日誌項目', 'Cancel Shipment': '取消出貨', 'Cancel': '取消', 'Canceled': '已取消', 'Cancelled': '已取消', 'Candidate Matches for Body %s': '候選相符的主体%', 'Canned Fish': '預錄fish', 'Cannot be empty': '不能是空的', 'Cannot delete whilst there are linked records. Please delete linked records first.': '無法刪除時有的記錄。 請刪除鏈結的第一个記錄。', 'Cannot disable your own account!': '無法停用您自己的账户!', 'Capacity (Max Persons)': '容量(最大人員)', 'Capacity (W x D X H)': '容量(寬x深x)', 'Capacity': '容量', 'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': '擷取資訊的意外受損群組(Tourists,乘客,系列,等等。 )', 'Capture Information on each disaster victim': '擷取資訊在每个意外受損', 'Capturing organizational information of a relief organization and all the projects they have in the region': '擷取組織資訊的浮雕屬組織与其所有的專案區域中它們', 'Capturing the essential services each Volunteer is providing and where': '在擷取基本服務每个主動提供和位置', 'Capturing the projects each organization is providing and where': '在擷取專案每个組織提供和位置', 'Care Report': '管理報告', 'Care Strategy': '管理策略', 'Cash available to restart business': '現金用于重新啟動"商業', 'Casual Labor': '訪客勞工', 'Casualties': '意外', 'Catalog Details': '型錄詳細資料', 'Catalog Item added': '型錄項目新增', 'Catalog Item deleted': '型錄項目已刪除', 'Catalog Item updated': '型錄項目更新', 'Catalog Item': '型錄項目', 'Catalog Items': '型錄商品項目', 'Catalog Name': '型錄名稱', 'Catalog added': '新增型錄', 'Catalog deleted': '已刪除型錄', 'Catalog updated': '型錄更新', 'Catalog': '型錄 (catalog)', 'Catalogs': '型錄', 'Categories': '種類', 'Category': '類別', 'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog新增關系', 'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog關系刪除', 'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog關系更新', 'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog關系', 'Ceilings, light fixtures': '天花板,燈退回', 'Central point to record details on People': '中心點來記錄詳細資料的人員', 'Certificate Catalog': '凭證型錄', 'Certificate Details': '憑證明細', 'Certificate Status': '憑證狀態', 'Certificate added': '添加凭證', 'Certificate deleted': '已刪除憑證', 'Certificate updated': '已更新憑證', 'Certificate': '凭證', 'Certificates': '憑證', 'Certification Details': '認證詳細資料', 'Certification added': '新增認證', 'Certification deleted': '刪除認證', 'Certification updated': '更新認證', 'Certification': '認證', 'Certifications': '認證', 'Certifying Organization': '組織認證', 'Change Password': '變更密碼', 'Check Request': '檢查要求', 'Check for errors in the URL, maybe the address was mistyped.': '檢查錯誤中的URL,可能的地址是輸入錯誤。', 'Check if the URL is pointing to a directory instead of a webpage.': '請檢查URL是否指向一个目錄,而一个網頁。', 'Check outbox for the message status': '檢查寄件匣的訊息狀態', 'Check to delete': '勾選以刪除', 'Check to delete:': '勾選以刪除:', 'Check': '檢查', 'Check-in': '移入', 'Check-out': '退房', 'Checklist created': '已建立核對清單', 'Checklist deleted': '刪除清單', 'Checklist of Operations': '作業核對清單', 'Checklist updated': '更新清單', 'Checklist': '核對清單', 'Chemical Hazard': '化學危害', 'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '化學,生物, Radiological,核能或高產生爆炸的威胁或攻擊', 'Chicken': '雞肉', 'Child (2-11)': '兒童 (2-12)', 'Child (< 18 yrs)': '子項(< 18年期)', 'Child Abduction Emergency': '子項Abduction緊急', 'Child headed households (<18 yrs)': '子項頭住家(<18年期)', 'Child': '子項', 'Children (2-5 years)': '小孩(二到五歲)', 'Children (5-15 years)': '小孩(五到十五歲)', 'Children (< 2 years)': '小孩(不到兩歲)', 'Children in adult prisons': '小孩關在成人的監牢裏', 'Children in boarding schools': '小孩在住宿學校裏', 'Children in homes for disabled children': '子項在住家中的停用子項', 'Children in juvenile detention': '小孩在青少年監獄裏', 'Children in orphanages': '子項中orphanages', 'Children living on their own (without adults)': '子項使用者在自己的(不含成人)', 'Children not enrolled in new school': '子項不登記新學校', 'Children orphaned by the disaster': '子項遺留的災難', 'Children separated from their parents/caregivers': '子項分開母項/caregivers', 'Children that have been sent to safe places': '子項已传送到安全位置', 'Children who have disappeared since the disaster': '子項擁有消失,因為災難', 'Children with chronical illnesses': '与子項chronical疾病', 'Chinese (Taiwan)': '中文(台灣)', 'Chinese': '中文', 'Cholera Treatment Capability': 'Cholera處理功能', 'Cholera Treatment Center': 'Cholera處理中心', 'Cholera Treatment': 'Cholera處理', 'Cholera-Treatment-Center': 'Cholera-處理"-"置中"', 'Choose Manually': '選擇手動', 'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '選擇一个新的張貼根据新的評估和團隊判斷。 嚴重狀湟影响整个建置資格是安全的發布。 嚴重區域化和整体中度條件可能需要使用上有限制。 檢查位置placard在御路。 所有其他后置placards在每个重要的正門入口。', 'Choose from one of the following options': '選擇下列其中一个選項', 'Choosing Skill and Resources of Volunteers': '選擇技能和資源的主動参与者', 'Christian': '基督徒', 'Church': '教堂', 'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': '的情湟disappearance,其他受害者/證人前次發現遺漏的人員在作用中。', 'City': '城市', 'Civil Emergency': '民事緊急', 'Clear Selection': '取消選擇', 'Click here to open log': '請按一下這裡來開啟日誌', 'Click on a Map': '按一下上一个對映', 'Click on an ID in the left-hand column to make a Pledge to match a request for aid.': '按一下ID中左手邊的直欄來進行抵押以符合要求的协助。', 'Click on the link %(url)s to reset your password': '按一下的鏈結 %(url)s 若要重設您的密碼', 'Click on the link %(url)s to verify your email': '按一下的鏈結 %(url)s 若要驗證您的電子郵件', 'Client IP': '用戶端 IP', 'Clinical Laboratory': '臨床實驗室', 'Clinical Operations': '臨床作業', 'Clinical Status': '臨床狀態', 'Closed': '結案', 'Closure': '結束', 'Clothing': '衣服', 'Cluster Details': '叢集詳細資料', 'Cluster Distance': '叢集距離', 'Cluster Subsector Details': '集群界別分組詳細資料', 'Cluster Subsector added': 'Subsector新增至叢集', 'Cluster Subsector deleted': 'Subsector刪除叢集', 'Cluster Subsector updated': '集群界別分組更新', 'Cluster Subsector': '叢集Subsector', 'Cluster Subsectors': '集群界別分組', 'Cluster Threshold': '集群臨界值', 'Cluster added': '新增叢集', 'Cluster deleted': '刪除叢集', 'Cluster updated': '更新集群', 'Cluster': '叢集', 'Cluster(s)': '叢集(S)', 'Clusters': '叢集', 'Code': '程式碼', 'Cold Wave': '冷Wave', 'Collapse, partial collapse, off foundation': '收合,局部收合", "關閉"基礎', 'Collective center': '群体中心', 'Colour for Underline of Subheadings': '顏色的底线的標題', 'Colour of Buttons when hovering': '的顏色按鈕限于暫留時', 'Colour of bottom of Buttons when not pressed': '新增顏色至底端的按鈕時按下', 'Colour of bottom of Buttons when pressed': '新增顏色至底端的按鈕按下時', 'Colour of dropdown menus': '顏色的下拉功能表', 'Colour of selected Input fields': '新增顏色至選取的輸入欄位', 'Colour of selected menu items': '新增顏色至選取的功能表項目', 'Column Choices (One Per Line': '直欄選項(每行一', 'Columns, pilasters, corbels': '直欄, pilasters, corbels', 'Combined Method': '合併方法', 'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '稍后回來。 每个造訪此網站可能發生相同的問題。', 'Come back later.': '稍后回來。', 'Comments': '備註', 'Commercial/Offices': '商業/辦公室', 'Commit Date': '確定日期', 'Commit from %s': '確定從%s', 'Commit': '確定', 'Commit. Status': '確定。 狀態', 'Commiting a changed spreadsheet to the database': '正在確定變更資料庫,試算表', 'Commitment Added': '新增的承諾書', 'Commitment Canceled': '取消承諾', 'Commitment Details': '承諾書細節', 'Commitment Item Details': '承諾項目細節', 'Commitment Item added': '新增承諾項目', 'Commitment Item deleted': '已刪除之承諾項目', 'Commitment Item updated': '承諾項目更新', 'Commitment Item': '承諾項目', 'Commitment Items': '承諾項目', 'Commitment Status': '承諾狀態', 'Commitment Updated': '更新承諾', 'Commitment': '確定', 'Commitments': '承諾', 'Committed By': '確定由', 'Committed': '已確定', 'Committing Inventory': '確定庫存', 'Communication problems': '通訊問題', 'Community Centre': '中心社群', 'Community Health Center': '健康中心社群', 'Community Member': '社群成員', 'Competencies': '競爭力', 'Competency Details': '能力詳細資料', 'Competency Rating Catalog': '能力分級目錄', 'Competency Rating Details': '能力詳細分級', 'Competency Rating added': '能力新增分級', 'Competency Rating deleted': '能力刪除分級', 'Competency Rating updated': '能力更新評比', 'Competency Ratings': '能力等級', 'Competency added': '新增能力', 'Competency deleted': '刪除能力', 'Competency updated': '更新能力', 'Competency': '能力', 'Complete Database Synchronized': '完成資料庫同步', 'Complete Unit Label for e.g. meter for m.': '完成單元的標籤(如的計量M。', 'Complete': '完成', 'Completed': '已完成', 'Compose': '傳訊', 'Compromised': '受損', 'Concrete frame': '具体框架', 'Concrete shear wall': '具体銳角牆面', 'Condition': '條件', 'Config added': '新增配置', 'Config deleted': '刪除配置', 'Config updated': '更新配置', 'Config': '配置', 'Configs': 'configs', 'Configurations': '配置', 'Configure Run-time Settings': '配置執行時期設定', 'Confirm Shipment Received': '確認出貨接收', 'Confirmed Incidents': '確認事件', 'Confirmed': '已確認', 'Confirming Organization': '確認組織', 'Conflict Details': '衝突明細', 'Conflict Resolution': '衝突解決', 'Consignment Note': '寄售附註', 'Constraints Only': '僅限制', 'Consumable': '消耗品', 'Contact Data': '聯絡資料', 'Contact Details': '聯絡人詳細資料', 'Contact Info': '聯絡資訊', 'Contact Information Added': '新增聯絡資訊', 'Contact Information Deleted': '刪除聯絡資訊', 'Contact Information Updated': '更新聯絡資訊', 'Contact Information': '聯絡資訊', 'Contact Method': '聯絡方式', 'Contact Name': '聯絡人名稱', 'Contact Person': '聯絡人', 'Contact Phone': '聯絡電話', 'Contact details': '聯絡人詳細資料', 'Contact information added': '新增聯絡資訊', 'Contact information deleted': '刪除聯絡資訊', 'Contact information updated': '更新聯絡資訊', 'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '聯絡人的情湟下新聞或其他問題(如果不同報告人員)。 包括電話號碼,地址和電子郵件作為可用。', 'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '聯絡人(s)的情湟下新聞或其他問題(如果与報告人員)。 包括電話號碼,地址和電子郵件作為可用。', 'Contact us': '聯絡我們', 'Contact': '聯絡人', 'Contacts': '聯絡人', 'Contents': '目錄', 'Contradictory values!': '互相矛盾的值!', 'Contributor': '提供者', 'Conversion Tool': '轉換工具', 'Cooking NFIs': 'NFIs調理油', 'Cooking Oil': '食用油', 'Coordinate Conversion': '座標轉換', 'Coping Activities': '复制活動', 'Copy any data from the one to be deleted into the one to keep': '复制任何資料從一个要刪除的一个保留', 'Copy': '複製', 'Corn': '玉米粉', 'Cost Type': '成本類型', 'Cost per Megabyte': '每MB成本', 'Cost per Minute': '成本每分鐘', 'Country of Residence': '居住國家', 'Country': '國家', 'County': '州政府', 'Course Catalog': '課程型錄', 'Course Certicate Details': '課程凭證詳細資料', 'Course Certicate added': '課程凭證新增至', 'Course Certicate deleted': '課程刪除凭證', 'Course Certicate updated': '課程更新凭證', 'Course Certicates': '課程凭證', 'Course Certificates': '課程凭證', 'Course Details': '課程詳細資料', 'Course added': '課程新增', 'Course deleted': '刪除進程', 'Course updated': '課程更新', 'Course': '課程', 'Courses': '課程', 'Create & manage Distribution groups to receive Alerts': '建立並管理發送通知的群組', 'Create Activity Report': '新增活動報告', 'Create Activity Type': '新增活動類型', 'Create Activity': '新增活動', 'Create Assessment': '新增評量', 'Create Asset': '新增資產', 'Create Bed Type': '新增平台類型', 'Create Brand': '新增品牌', 'Create Budget': '新增預算', 'Create Catalog Item': '新增型錄項目', 'Create Catalog': '新增型錄', 'Create Certificate': '新增憑證', 'Create Checklist': '建立核對清單', 'Create Cholera Treatment Capability Information': '新增Cholera處理功能資訊', 'Create Cluster Subsector': '新增叢集Subsector', 'Create Cluster': '新增叢集', 'Create Competency Rating': '新增能力分級', 'Create Contact': '新增聯絡人', 'Create Course': '新增課程', 'Create Dead Body Report': '新增停用主体報告', 'Create Event': '建立新的事件', 'Create Facility': '新增機能', 'Create Feature Layer': '新增功能層', 'Create Group Entry': '新增群組', 'Create Group': '新增群組', 'Create Hospital': '新增醫院', 'Create Identification Report': '新增識別報告', 'Create Impact Assessment': '建立影响評估', 'Create Import Job': '建立匯入工作', 'Create Incident Report': '新增事件報告', 'Create Incident': '新增事件', 'Create Item Category': '新增項目種類', 'Create Item Pack': '新增項目套件', 'Create Item': '新增項目', 'Create Kit': '新增套件', 'Create Layer': '新增層', 'Create Location': '新增位置', 'Create Map Configuration': '新增對映配置', 'Create Marker': '新增標記', 'Create Member': '新增成員', 'Create Mobile Impact Assessment': '建立行動式影响評估', 'Create Office': '新增辦公室', 'Create Organization': '新增組織', 'Create Personal Effects': '新增个人效果', 'Create Project': '新增專案', 'Create Projection': '新增投射', 'Create Rapid Assessment': '建立快速評量', 'Create Report': '新增新報告', 'Create Request': '建立要求', 'Create Resource': '新增資源', 'Create River': '新增金水河', 'Create Role': '新增角色', 'Create Room': '新增室', 'Create Scenario': '建立新情境', 'Create Sector': '新增行業', 'Create Service Profile': '新增服務設定檔', 'Create Shelter Service': '新增Shelter服務', 'Create Shelter Type': '新增Shelter類型', 'Create Shelter': '新增Shelter', 'Create Skill Type': '新增技術類型', 'Create Skill': '新增技能', 'Create Staff Member': '新增人員', 'Create Status': '新增狀態', 'Create Task': '新增作業', 'Create Theme': '新增佈景主題', 'Create User': '新增使用者', 'Create Volunteer': '新增志工', 'Create Warehouse': '新增倉儲', 'Create a Person': '新增人員', 'Create a group entry in the registry.': '在登錄表中建立群組.', 'Create, enter, and manage surveys.': '建立,進入,以及管理調查。', 'Creation of Surveys': '建立的調查', 'Credential Details': '認證詳細資料', 'Credential added': '新增認證', 'Credential deleted': '刪除認證', 'Credential updated': '更新認證', 'Credentialling Organization': 'Credentialling組織', 'Credentials': '認證', 'Credit Card': '信用卡', 'Crime': '犯罪', 'Criteria': '準則', 'Currency': '貨幣', 'Current Entries': '現行項目', 'Current Group Members': '現有組員', 'Current Identities': '現行身分', 'Current Location': '目前地點', 'Current Log Entries': '現行日誌項目', 'Current Memberships': '現行的成員資格', 'Current Notes': '現行Notes', 'Current Records': '現行記錄', 'Current Registrations': '目前登錄', 'Current Status': '現行狀態', 'Current Team Members': '現行團隊成員', 'Current Twitter account': '現行Twitter账户', 'Current community priorities': '現行社群优先順序', 'Current general needs': '目前的一般需求', 'Current greatest needs of vulnerable groups': '現行最大的需求有漏洞的群組', 'Current health problems': '現行性能問題', 'Current main income sources': '目前主要收入來源', 'Current major expenses': '目前主要費用', 'Current number of patients': '病患的現行數目', 'Current problems, categories': '現行問題,種類', 'Current problems, details': '現行問題,詳細資料', 'Current request': '現行要求', 'Current response': '現行回應', 'Current session': '現行階段作業', 'Current type of health problems, adults': '現行類型的性能問題,成人', 'Current type of health problems, children': '現行類型的性能問題,子項', 'Current type of source for drinking water': '現行的來源類型為"Drinking Water Protection"臨界值', 'Current type of source for sanitary water': '現行類型的來源卫生臨界值', 'Currently no Certifications registered': '目前沒有認證登錄', 'Currently no Competencies registered': '目前沒有登錄能力', 'Currently no Course Certicates registered': '目前沒有進程凭證登錄', 'Currently no Credentials registered': '目前沒有認證登錄', 'Currently no Missions registered': '目前沒有任務註冊', 'Currently no Skill Equivalences registered': '技能目前沒有同等登錄', 'Currently no Trainings registered': '目前沒有登錄撰文', 'Currently no entries in the catalog': '在型錄中目前沒有項目', 'Currently your system has default username and password. Username and Password are required by foriegn machines to sync data with your computer. You may set a username and password so that only those machines can fetch and submit data to your machines which your grant access by sharing your password.': '目前您的系统已預設使用者名稱及密碼。 使用者名稱及密碼所需的外部機器來同步資料与您的電腦。 您可以設定使用者名稱和密碼,以便只機器可以提取及提交資料至您的機器的存取權授予的共享密碼。', 'Custom Database Resource (e.g., anything defined as a resource in Sahana)': '自訂資料庫資源(例如,任何定义為中的資源Sahana)', 'Customisable category of aid': '可輔助的種類', 'DECISION': '決策', 'DNA Profile': 'dna設定檔', 'DNA Profiling': 'dna側寫', 'DVI Navigator': 'DVI導览器', 'Daily': '每日', 'Dam Overflow': 'DAM溢位', 'Damage': '損壞', 'Dangerous Person': '危險的人員', 'Dashboard': '儀表版', 'Data import policy': '資料匯入原則', 'Data uploaded': '上传資料', 'Data': '資料', 'Database': '資料庫', 'Date & Time': '日期和時間', 'Date Avaialble': 'install.log日期', 'Date Available': '可出貨日期', 'Date Received': '收到的日期', 'Date Requested': '要求日期', 'Date Required': '需要的日期', 'Date Sent': '傳送日期', 'Date Until': '日期之前', 'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': '日期和時間的貨品收据。 依預設顯示目前的時間,但可以修改中編輯的下拉列表。', 'Date and Time': '日期與時間', 'Date and time this report relates to.': '報告日期与時間相關。', 'Date of Birth': '出生日期', 'Date of Latest Information on Beneficiaries Reached': '日期的最新資訊達到受益人', 'Date of Report': '報告的日期', 'Date': '日期', 'Date/Time of Find': '尋找的日期/時間', 'Date/Time of disappearance': '日期/時間disappearance', 'Date/Time when found': '找到日期/時間', 'Date/Time when last seen': '日期/時間前次看到', 'Date/Time': '日期/時間', 'De-duplicator': 'DE-duplicator', 'Dead Body Details': '停用主体詳細資料', 'Dead Body Reports': '停用主体報告', 'Dead Body': '停用主体', 'Dead body report added': '停用主体新增報告', 'Dead body report deleted': '停用主体報告刪除', 'Dead body report updated': '停用主体報告更新', 'Deaths in the past 24h': '過去24小時的死亡人數', 'Debug': '除錯', 'Decimal Degrees': '小數度', 'Decision': '決策', 'Decomposed': '分解', 'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': '預設高度的對映"視窗。 視窗布置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。', 'Default Height of the map window.': '預設高度的對映"視窗。', 'Default Map': '預設對映', 'Default Marker': '預設標記', 'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': '預設寬度的對映"視窗。 視窗布置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。', 'Default Width of the map window.': '預設寬度的對映"視窗。', 'Default synchronization policy': '預設同步化原則', 'Defaults updated': '預設更新', 'Defaults': '預設值', 'Defecation area for animals': 'Defecation區域的祥禽瑞獸', 'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': '定义的實務配置適當的資源(人力,資產和設備)。', 'Defines the icon used for display of features on handheld GPS.': '用于定义圖示的顯示功能的掌上型GPS。', 'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': '用于定义圖示的顯示功能的互動式對映和KML匯出。 一个標記,指派給个別位置設定時需要置換的記號指派給功能類別。 如果未定义,則預設記號使用。', 'Defines the icon used for display of features on interactive map & KML exports.': '用于定义圖示的顯示功能的互動式對映和KML匯出。', 'Defines the marker used for display & the attributes visible in the popup.': '定义標記用于顯示的屬性顯示在蹦現式畫面。', 'Degrees must be a number between -180 and 180': '度必须是一个數字的180和180', 'Degrees must be between -180 and 180': '度必须介于180和180', 'Degrees should be greater than 0 and less than 180': '度應該大于〇且小于180', 'Delete Aid Request': '刪除輔助請求', 'Delete Alternative Item': '刪除替代項目', 'Delete Assessment Summary': '刪除評量摘要', 'Delete Assessment': '刪除評量', 'Delete Asset Assignments': '刪除資產分派', 'Delete Asset Log Entry': '刪除資產日誌項目', 'Delete Asset': '刪除資產', 'Delete Baseline Type': '刪除基準线類型', 'Delete Baseline': '刪除基準線', 'Delete Brand': '刪除品牌', 'Delete Budget': '刪除預算', 'Delete Bundle': '刪除軟體組', 'Delete Catalog Item': '刪除型錄項目', 'Delete Catalog': '刪除型錄', 'Delete Certificate': '刪除凭證', 'Delete Certification': '刪除認證', 'Delete Cluster Subsector': '刪除叢集Subsector', 'Delete Cluster': '刪除叢集', 'Delete Commitment Item': '刪除承諾項目', 'Delete Commitment': '刪除承諾', 'Delete Competency Rating': '刪除能力分級', 'Delete Competency': '刪除能力', 'Delete Config': '刪除配置', 'Delete Contact Information': '刪除聯絡人資訊', 'Delete Course Certicate': '刪除進程證書', 'Delete Course': '刪除進程', 'Delete Credential': '刪除認證', 'Delete Distribution Item': '刪除分配項目', 'Delete Distribution': '刪除配送', 'Delete Document': '刪除文件', 'Delete Donor': '刪除Donor', 'Delete Entry': '刪除項目', 'Delete Event': '刪除事件', 'Delete Feature Layer': '刪除功能層', 'Delete Group': '刪除群組', 'Delete Hospital': '刪除醫院', 'Delete Image': '刪除影像', 'Delete Impact Type': '刪除影响類型', 'Delete Impact': '刪除影響', 'Delete Incident Report': '刪除事故報告', 'Delete Incident': '刪除事件', 'Delete Inventory Item': '刪除庫存項目', 'Delete Inventory Store': '刪除資產儲存庫', 'Delete Item Category': '刪除項目種類', 'Delete Item Pack': '刪除項目套件', 'Delete Item': '刪除項目', 'Delete Job Role': '刪除工作角色', 'Delete Key': '刪除金鑰', 'Delete Kit': '刪除套件', 'Delete Landmark': '刪除里程碑', 'Delete Layer': '刪除層', 'Delete Level 1 Assessment': '刪除層次一評量', 'Delete Level 2 Assessment': '刪除層次二評量', 'Delete Location': '刪除位置', 'Delete Map Configuration': '刪除對映配置', 'Delete Marker': '刪除標記', 'Delete Membership': '刪除組員', 'Delete Message': '刪除訊息', 'Delete Metadata': '刪除 Meta 資料', 'Delete Mission': '刪除任務', 'Delete Need Type': '刪除需求類型', 'Delete Need': '需要刪除', 'Delete Office': '刪除辦公室', 'Delete Old': '刪除舊', 'Delete Organization': '刪除組織', 'Delete Peer': '刪除同層級', 'Delete Person': '刪除人員', 'Delete Photo': '刪除照片', 'Delete Population Statistic': '刪除人口统計資料', 'Delete Position': '刪除位置', 'Delete Project': '刪除專案', 'Delete Projection': '刪除投射', 'Delete Rapid Assessment': '刪除快速評量', 'Delete Received Item': '刪除接收項目', 'Delete Received Shipment': '刪除接收出貨', 'Delete Record': '刪除記錄', 'Delete Recovery Report': '刪除回复報告', 'Delete Report': '刪除報告', 'Delete Request Item': '刪除要求項目', 'Delete Request': '刪除要求', 'Delete Resource': '刪除資源', 'Delete Room': '刪除會議室', 'Delete Scenario': '刪除實務範例', 'Delete Section': '刪除區段', 'Delete Sector': '刪除磁區', 'Delete Sent Item': '刪除传送項目', 'Delete Sent Shipment': '刪除传送出貨', 'Delete Service Profile': '刪除服務設定檔', 'Delete Setting': '刪除設定', 'Delete Skill Equivalence': '刪除技術等值', 'Delete Skill Provision': '刪除技術供應', 'Delete Skill Type': '刪除技術類型', 'Delete Skill': '刪除技術', 'Delete Staff Type': '刪除人員類型', 'Delete Status': '刪除狀態', 'Delete Subscription': '刪除訂閱', 'Delete Subsector': '刪除Subsector', 'Delete Survey Answer': '刪除調查回答', 'Delete Survey Question': '刪除調查問題', 'Delete Survey Section': '刪除調查區段', 'Delete Survey Series': '刪除調查系列', 'Delete Survey Template': '刪除調查范本', 'Delete Training': '刪除訓練', 'Delete Unit': '刪除單元', 'Delete User': '刪除使用者', 'Delete Volunteer': '刪除志願者', 'Delete Warehouse': '刪除倉庫', 'Delete from Server?': '刪除從伺服器嗎?', 'Delete': '刪除', 'Delivered': '已遞送', 'Delphi Decision Maker': '專案群組決策', 'Demographic': '人口統計學', 'Demonstrations': '示範', 'Dental Examination': '牙齒檢查', 'Dental Profile': '牙齒設定檔', 'Department/Unit Name': '部門/單元名稱', 'Deployment': '部署', 'Describe the condition of the roads to your hospital.': '描述條件的街道的醫院。', 'Describe the procedure which this record relates to (e.g. "medical examination")': '描述程序此記錄的關系(例如, "醫學examination")', 'Description of Bin Type': '說明bin的類型', 'Description of Contacts': '說明的聯絡人', 'Description of defecation area': '說明的defecation區域', 'Description of drinking water source': '說明的"Drinking Water Protection"臨界值來源', 'Description of sanitary water source': '說明的卫生臨界值來源', 'Description of water source before the disaster': '說明的水來源前的災難', 'Description': '說明', 'Descriptive Text (e.g., Prose, etc)': '說明文字(例如, Prose等)', 'Designated for': '指定的', 'Desire to remain with family': '希望與家人同處', 'Destination': '目的地', 'Destroyed': '已毀損', 'Details field is required!': '詳細資料欄位是必要的!', 'Details': '詳細資料', 'Diaphragms, horizontal bracing': '膜片,水平支撐', 'Diarrhea among children under 5': '5歲以下兒童腹瀉', 'Dignitary Visit': '要人訪問', 'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '維度的儲存体bin。 輸入下列格式的一x二x三的寬度x深度x高度,然后選擇單位下拉清單。', 'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '維度的儲存体位置。 輸入下列格式的一x二x三的寬度x深度x高度,然后選擇單位下拉清單。', 'Direction': '方向', 'Disabilities': '殘障人士', 'Disable': '停用', 'Disabled participating in coping activities': '殘疾人士參與應對活動', 'Disabled': '已停用', 'Disabled?': '殘疾人士?', 'Disaster Victim Identification': '災民身份識別', 'Disaster Victim Registry': '災民登錄', 'Disaster clean-up/repairs': '災難up/repairs清除', 'Discharge (cusecs)': '放電(cusecs)', 'Discharges/24hrs': '放電/24hrs', 'Discussion Forum on item': '討論論壇上項目', 'Discussion Forum': '討論區', 'Disease vectors': '疾病向量', 'Diseases': '疾病', 'Dispatch Items': '分派項目', 'Dispatch': '分派', 'Displaced Populations': '移離个体群', 'Displaced': '移離', 'Display Polygons?': '顯示多邊形?', 'Display Routes?': '顯示路由?', 'Display Tracks?': '顯示追蹤?', 'Display Waypoints?': '顯示路迳點?', 'Dispose Expired/Unusable Items': '處置過期/無法使用的項目', 'Dispose': '處置', 'Distance between defecation area and water source': '距離defecation區域和臨界值來源', 'Distance between latrines and temporary shelter in meters': '距離latrines及暫時shelter以公尺為單位', 'Distance between shelter and latrines': '距離shelter和latrines', 'Distance from %s:': '距離%s:', 'Distance(Kms)': '距離(Kms)', 'Distribution Details': '配送明細', 'Distribution Item Details': '分配項目詳細資料', 'Distribution Item added': '分配項目新增', 'Distribution Item deleted': '分配項目刪除', 'Distribution Item updated': '配送更新項目', 'Distribution Item': '項目分配', 'Distribution Items': '項目分配', 'Distribution added': '配送新增', 'Distribution deleted': '刪除分配', 'Distribution groups': '收件群組', 'Distribution updated': '配送更新', 'Distribution': '發行套件', 'Distributions': '分配', 'District': 'district', 'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '請adolescent和泉您社群中参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)', 'Do households each have at least 2 containers (10-20 litres each) to hold water?': '每個家庭至少有兩個儲水器(每個10-20公升)儲水嗎?', 'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': '每個家庭是否有適當的烹調設備和材料來煮食(爐,壺,盤,碟,杯等)?', 'Do households have bedding materials available (tarps, plastic mats, blankets)?': '每個家庭是否有被鋪(防水布,塑料墊子,毯子)?', 'Do households have household water storage containers?': '每個家庭是否有儲水器?', 'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '做生意您社群中成員参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)', 'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '舊做您社群中的人員参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)', 'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': '人們是否有至少2套完整的服裝(襯衫,褲子/紗籠,內衣)?', 'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': '人們是否能可靠地獲得足夠的衛生/衛生用品(沐浴香皂,洗衣皂,洗髮水,牙膏和牙刷)?', 'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '請殘障人士您社群中参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)', 'Do women and girls have easy access to sanitary materials?': '婦女做和女孩輕松存取卫生資料嗎?', 'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '婦女做您社群中参与活動,协助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)', 'Do you have access to cash to restart your business?': '您有權存取現金重新啟動您的業務?', 'Do you know of any incidents of violence?': '您知道的任何事件的暴力嗎?', 'Do you know of children living on their own (without adults)?': '您知道子項使用者在自己的(不含成人)?', 'Do you know of children separated from their parents or caregivers?': '您知道子項分開母項或caregivers嗎?', 'Do you know of children that have been orphaned by the disaster?': '您知道子項已遺留的災難?', 'Do you know of children that have been sent to safe places?': '您知道子項的已传送至安全工作區嗎?', 'Do you know of children that have disappeared without explanation in the period since the disaster?': '您知道子項的消失而無任何說明在此期間,因為災難?', 'Do you know of older people who are primary caregivers of children?': '您知道舊的人員是主要caregivers的子項嗎?', 'Do you know of parents/caregivers missing children?': '您知道的母項/caregivers遺漏子項嗎?', 'Do you really want to delete these records?': '您確定要刪除這些記錄嗎?', 'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': '您要取消此接收出貨? 項目將從庫存。 這个動作無法复原!', 'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '您要取消此传送出貨? 項目將回到庫存。 這个動作無法复原!', 'Do you want to over-write the file metadata with new default values?': '您要改寫檔案寫入meta資料与新的預設值嗎?', 'Do you want to receive this shipment?': '您要接收此出貨?', 'Do you want to send these Committed items?': '您要传送這些已確定的項目嗎?', 'Do you want to send this shipment?': '您要传送此出貨?', 'Document Details': '文件詳細資料', 'Document Library': '文件庫', 'Document Scan': '文件掃描', 'Document added': '新增文件', 'Document deleted': '文件已刪除', 'Document updated': '已更新的文件', 'Document': '文件', 'Documents and Photos': '文件和照片', 'Documents': '文件', 'Does this facility provide a cholera treatment center?': '該設施是否提供霍亂治療中心?', 'Doing nothing (no structured activity)': '什麼都不做(沒有結構化的活動)', 'Dollars': '美元', 'Domain': '網域', 'Domestic chores': '家務', 'Donated': '已捐贈', 'Donation Certificate': '捐贈證書', 'Donation Phone #': '捐贈電話號碼', 'Donor Details': 'Donor詳細資料', 'Donor added': '新增Donor', 'Donor deleted': '刪除Donor', 'Donor updated': '已更新捐贈者', 'Donors Report': 'Donors報告', 'Door frame': '門框架', 'Download PDF': '下載 PDF', 'Draft Features': '草稿功能', 'Draft': '草稿', 'Drainage': '排水', 'Drawing up a Budget for Staff & Equipment across various Locations.': '一个繪圖預算人員和設備各位置。', 'Drill Down by Group': '展開依群組', 'Drill Down by Incident': '展開事件', 'Drill Down by Shelter': '往下探查來Shelter', 'Driving License': '駕照', 'Drugs': '藥物', 'Dug Well': '挖出以及', 'Duplicate?': '重複?', 'Duration': '持續時間', 'Dust Storm': '暴雨灰塵', 'Dwelling': '住宅', 'EMS Reason': 'EMS原因', 'EMS Status Reason': 'EMS狀態原因', 'EMS Status': 'EMS狀態', 'EMS Traffic Status': 'EMS狀態传輸', 'ER Status Reason': 'ER狀態原因', 'ER Status': 'ER狀態', 'Early Recovery': '早期回复', 'Earthquake': '地震', 'Easy access to sanitation items for women/girls': '容易存取設施的項目婦女/女孩', 'Edit Activity': '編輯活動', 'Edit Address': '編輯地址', 'Edit Aid Request': '編輯輔助請求', 'Edit Alternative Item': '編輯替代項目', 'Edit Application': '編輯應用程式', 'Edit Assessment Summary': '編輯評量摘要', 'Edit Assessment': '編輯評量', 'Edit Asset Assignment': '編輯資產分派', 'Edit Asset Log Entry': '編輯資產日誌項目', 'Edit Asset': '編輯資產', 'Edit Baseline Type': '編輯基準线類型', 'Edit Baseline': '編輯基準线', 'Edit Brand': '編輯品牌', 'Edit Budget': '編輯預算', 'Edit Bundle': '編輯軟体組', 'Edit Camp Service': 'Camp編輯服務', 'Edit Camp Type': '編輯營式', 'Edit Camp': '編輯Camp', 'Edit Catalog Item': '編輯型錄項目', 'Edit Catalog': '編輯目錄', 'Edit Category<>Sub-Category<>Catalog Relation': '編輯Category<>Sub-Category<>Catalog關系', 'Edit Certificate': '編輯證書', 'Edit Certification': '編輯認證', 'Edit Cluster Subsector': '編輯叢集Subsector', 'Edit Cluster': '編輯叢集', 'Edit Commitment Item': '編輯承諾項目', 'Edit Commitment': '編輯承諾', 'Edit Competency Rating': '編輯能力分級', 'Edit Competency': '編輯能力', 'Edit Config': '編輯配置', 'Edit Contact Information': '編輯聯絡資訊', 'Edit Contact': '編輯聯絡人', 'Edit Contents': '編輯內容', 'Edit Course Certicate': '編輯課程證書', 'Edit Course': '編輯課程', 'Edit Credential': '編輯認證', 'Edit Dead Body Details': '編輯停用主体詳細資料', 'Edit Defaults': '編輯預設值', 'Edit Description': '編輯說明', 'Edit Details': '編輯詳細資料', 'Edit Disaster Victims': '編輯災難受害者', 'Edit Distribution Item': '編輯項目分配', 'Edit Distribution': '編輯配送', 'Edit Document': '編輯文件', 'Edit Donor': '編輯Donor', 'Edit Email Settings': '編輯電子郵件設定', 'Edit Entry': '編輯條目', 'Edit Event': '編輯事件', 'Edit Facility': '編輯設備', 'Edit Feature Layer': '編輯功能層', 'Edit Flood Report': '水災編輯報告', 'Edit Gateway Settings': '編輯設定閘道', 'Edit Group': '編輯群組', 'Edit Hospital': '編輯醫院', 'Edit Human Resource': '編輯人力資源', 'Edit Identification Report': '編輯識別報告', 'Edit Identity': '編輯身分', 'Edit Image Details': '編輯映像檔詳細資料', 'Edit Image': '編輯影像', 'Edit Impact Type': '編輯影响類型', 'Edit Impact': '編輯影响', 'Edit Incident Report': '編輯事件報告', 'Edit Incident': '編輯事件', 'Edit Inventory Item': '編輯庫存項目', 'Edit Inventory Location': '編輯庫存位置', 'Edit Inventory Store': '編輯配備盤點儲存', 'Edit Item Catalog Categories': '編輯型錄種類項目', 'Edit Item Catalog': '編輯項目型錄', 'Edit Item Category': '編輯項目種類', 'Edit Item Pack': '編輯項目套件', 'Edit Item Sub-Categories': '編輯項目子種類', 'Edit Item': '編輯項目', 'Edit Job Role': '編輯工作角色', 'Edit Key': '編輯索引鍵', 'Edit Kit': '編輯套件', 'Edit Landmark': '編輯里程碑', 'Edit Layer': '編輯層', 'Edit Level %d Locations?': '編輯層次%d位置?', 'Edit Level 1 Assessment': '編輯層次一評量', 'Edit Level 2 Assessment': '編輯層次二評量', 'Edit Location': '編輯位置', 'Edit Log Entry': '編輯日誌項目', 'Edit Map Configuration': '編輯對映配置', 'Edit Map Services': '編輯對映服務', 'Edit Marker': '編輯標記', 'Edit Membership': '編輯成員資格', 'Edit Message': '編輯訊息', 'Edit Messaging Settings': '編輯传訊設定', 'Edit Metadata': '編輯 meta 資料', 'Edit Mission': '編輯任務', 'Edit Modem Settings': '編輯數据機設定', 'Edit Need Type': '需要編輯類型', 'Edit Need': '需要編輯', 'Edit Note': '編輯附註', 'Edit Office': '編輯辦公室', 'Edit Options': '編輯選項', 'Edit Organization': '編輯組織', 'Edit Parameters': '編輯參數', 'Edit Partner': '編輯伙伴', 'Edit Peer Details': '編輯層級詳細資料', 'Edit Peer': '編輯同層級', 'Edit Person Details': '編輯人員詳細資料', 'Edit Personal Effects Details': '編輯个人效果詳細資料', 'Edit Photo': '編輯照片', 'Edit Pledge': '編輯質押', 'Edit Population Statistic': '編輯人口统計資料', 'Edit Position': '編輯位置', 'Edit Problem': '編輯問題', 'Edit Project': '編輯專案', 'Edit Projection': '編輯投射', 'Edit Rapid Assessment': '編輯快速評量', 'Edit Received Item': '編輯接收項目', 'Edit Received Shipment': '編輯收到出貨', 'Edit Record': '編輯記錄', 'Edit Recovery Details': '編輯回复明細', 'Edit Registration Details': '編輯登錄詳細資料', 'Edit Registration': '編輯登錄', 'Edit Report': '編輯報告', 'Edit Request Item': '編輯要求項目', 'Edit Request': '編輯要求', 'Edit Resource': '編輯資源', 'Edit Response': '編輯回應', 'Edit River': '編輯金水河', 'Edit Role': '編輯角色', 'Edit Room': '編輯室', 'Edit Scenario': '編輯範例情節', 'Edit School District': '編輯學校特區', 'Edit School Report': '編輯學校報告', 'Edit Section': '編輯區段', 'Edit Sector': '編輯磁區', 'Edit Sent Item': '传送編輯項目', 'Edit Setting': '編輯設定', 'Edit Settings': '編輯設定', 'Edit Shelter Service': '編輯Shelter服務', 'Edit Shelter Type': '編輯Shelter類型', 'Edit Shelter': '編輯Shelter', 'Edit Shipment Transit Log': '編輯出貨传輸日誌', 'Edit Shipment to Send': '編輯出貨以传送', 'Edit Shipment/Way Bills': '出貨/編輯方式账單', 'Edit Shipment<>Item Relation': '編輯Shipment<>Item關系', 'Edit Site': '編輯網站', 'Edit Skill Equivalence': '編輯等值技能', 'Edit Skill Provision': '編輯技術供應', 'Edit Skill Type': '編輯技術類型', 'Edit Skill': '編輯技術', 'Edit Solution': '編輯解決方案', 'Edit Source': '編輯原始碼', 'Edit Staff Type': '編輯人員類型', 'Edit Staff': '編輯人員', 'Edit Storage Bin Type(s)': '編輯儲存体bin類型(S)', 'Edit Storage Bins': '編輯儲存体紙匣', 'Edit Storage Location': '編輯儲存体位置', 'Edit Subscription': '編輯訂閱', 'Edit Subsector': '編輯Subsector', 'Edit Survey Answer': '編輯調查回答', 'Edit Survey Question': '編輯調查問題', 'Edit Survey Section': '編輯調查區段', 'Edit Survey Series': '編輯調查系列', 'Edit Survey Template': '編輯調查范本', 'Edit Sync Settings': '編輯同步設定', 'Edit Task': '編輯作業', 'Edit Team': '編輯團隊', 'Edit Theme': '編輯佈景主題', 'Edit Themes': '編輯布景主題', 'Edit Ticket': '編輯單', 'Edit Track': '編輯追蹤', 'Edit Training': '編輯培訓', 'Edit Tropo Settings': '編輯Tropo設定', 'Edit Unit': '編輯單元', 'Edit Update': '編輯更新', 'Edit User': '編輯使用者', 'Edit Volunteer Availability': '編輯自愿可用性', 'Edit Volunteer Details': '編輯自愿詳細資料', 'Edit Volunteer Registration': '編輯自愿登錄', 'Edit Warehouse': '編輯倉儲', 'Edit current record': '編輯現行記錄', 'Edit message': '編輯訊息', 'Edit the Application': '編輯應用程式', 'Edit': '編輯', 'Editable?': '可編輯?', 'Education materials received': '教育材料接收', 'Education materials, source': '教育材料,來源', 'Education': '教育', 'Effects Inventory': '效果庫存', 'Either a shelter or a location must be specified': '一shelter或位置必须指定', 'Either file upload or document URL required.': '可能是檔案上传或文件URL是必要的。', 'Either file upload or image URL required.': '可能是檔案上传或影像URL是必要的。', 'Elderly person headed households (>60 yrs)': '人員年長者家庭頭(>60年期)', 'Electrical': '電子', 'Electrical, gas, sewerage, water, hazmats': '電力,瓦斯, sewerage,水分, hazmats', 'Electricity': '靜電', 'Elevated': '高專用權', 'Elevators': '升降機', 'Email Address': '電子郵件位址', 'Email Settings': '電子郵件設定', 'Email address verified, however registration is still pending approval - please wait until confirmation received.': '電子郵件位址驗證,但是登錄仍在擱置核准-請稍候直到收到確認。', 'Email settings updated': '更新電子郵件設定', 'Email': '電子郵件', 'Embassy': '大使館', 'Emergency Capacity Building project': '緊急容量建置專案', 'Emergency Department': '緊急部門', 'Emergency Shelter': '緊急Shelter', 'Emergency Support Facility': '緊急支援機能', 'Emergency Support Service': '緊急服務支援', 'Emergency Telecommunications': '電信緊急', 'Enable/Disable Layers': '啟用/停用層', 'Enabled': '已啟用', 'Enabled?': '已啟用?', 'End Date': '結束日期', 'End date should be after start date': '結束日期應該晚于開始日期', 'End date': '結束日期', 'End of Period': '結束的期間', 'English': '英文', 'Enter Coordinates in Deg Min Sec': '以度分秒的格式輸入座標值', 'Enter Coordinates:': '輸入座標:', 'Enter a GPS Coord': '輸入一个GPS协調', 'Enter a date before': '輸入一个日期之前', 'Enter a name for the spreadsheet you are uploading (mandatory).': '輸入一個您上傳的電子表格的名稱(強制)。', 'Enter a new support request.': '輸入一個新的援助申請。', 'Enter a summary of the request here.': '在這裡輸入申請摘要。', 'Enter a unique label!': '輸入獨一無二的標籤!', 'Enter a valid date before': '輸入一個有效的日期前', 'Enter a valid email': '輸入一個有效的電子郵件地址', 'Enter a valid future date': '輸入一个有效的未來日期', 'Enter some characters to bring up a list of possible matches': '輸入部分字元以啟動清單的可能的相符項', 'Enter some characters to bring up a list of possible matches.': '輸入部分字元以啟動清單的可能的相符項。', 'Enter tags separated by commas.': '輸入以逗點區隔的標籤。', 'Enter the same password as above': '輸入与上面相同的密碼', 'Enter your firstname': '輸入你的名字', 'Entered': '已輸入', 'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '輸入一个電話號碼是選用的,但這樣做可讓您訂閱以收到SMS訊息。', 'Entry deleted': '已刪除項目', 'Environment': '環境', 'Equipment': '設備', 'Error encountered while applying the theme.': '發生錯誤時的主題。', 'Error in message': '錯誤訊息中', 'Error logs for "%(app)s"': '錯誤日誌的 "%(app)s"', 'Errors': '錯誤', 'Est. Delivery Date': 'EST。 交付日期', 'Estimated # of households who are affected by the emergency': '估計的#家庭誰受到緊急', 'Estimated # of people who are affected by the emergency': '估計數目的人員所影响的緊急', 'Estimated Overall Building Damage': '估計整体建置損壞', 'Estimated total number of people in institutions': '估計總數中的人員機构', 'Euros': '歐元', 'Evacuation': '撤離', 'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': '評估此訊息中的資訊。 (這个值不應該用于公開警告應用程式。 )', 'Event Details': '事件詳細資料', 'Event Time': '事件時間', 'Event Type': '事件類型', 'Event added': '新增事件', 'Event deleted': '刪除事件', 'Event type': '事件類型', 'Event updated': '更新事件', 'Event': '事件', 'Events': '事件', 'Example': '範例', 'Exceeded': '已超出', 'Excellent': '絕佳', 'Exclude contents': '排除內容', 'Excreta disposal': 'Excreta處置', 'Execute a pre-planned activity identified in <instruction>': '執行預先計畫中所識別的活動<instruction>', 'Exercise': '練習', 'Exercise?': '練習?', 'Exercises mean all screens have a watermark & all notifications have a prefix.': '練習表示所有畫面都具有一个浮水印和所有通知有一个字首。', 'Existing Placard Type': '現有Placard類型', 'Existing food stocks': '現有食品股票', 'Existing food stocks, main dishes': '現有的食物股票,主要餐盤', 'Existing food stocks, side dishes': '現有的食物股票,端餐盤', 'Existing location cannot be converted into a group.': '現有的位置無法轉換成一个群組。', 'Exits': '結束程式', 'Expected In': '預期中', 'Expected Out': '預期輸出', 'Experience': '經驗', 'Expiry Date': '到期日期', 'Expiry Time': '期限時間', 'Explosive Hazard': '爆炸性危害', 'Export Data': '匯出資料', 'Export Database as CSV': '資料庫匯出成CSV', 'Export in GPX format': '匯出中GPX格式', 'Export in KML format': '匯出中KML格式', 'Export in OSM format': '匯出中OSM格式', 'Export in PDF format': '匯出為PDF檔', 'Export in RSS format': '匯出為RSS格式', 'Export in XLS format': '匯出為XLS檔', 'Export': '匯出', 'Exterior Only': '僅外景', 'Exterior and Interior': '外部和內部', 'External Features': '外部特性', 'Eye Color': '眼睛顏色', 'Facebook': '臉書', 'Facial hair, color': 'Facial頭髮,顏色', 'Facial hair, type': 'Facial頭髮,類型', 'Facial hear, length': 'Facial聽,長度', 'Facilities': '設備', 'Facility Details': '機能詳細資料', 'Facility Operations': '設施營運', 'Facility Status': '機能狀態', 'Facility Type': '機能類型', 'Facility added': '新增機能', 'Facility or Location': '設備或位置', 'Facility removed': '移除機能', 'Facility updated': '機能更新', 'Facility': '機能', 'Factors affecting school attendance': '因素影响學校与會者', 'Fail': '失敗', 'Failed!': '失敗!', 'Fair': '普通', 'Falling Object Hazard': '落在物件危害', 'Families/HH': '系列/hh', 'Family Care': '系列Care', 'Family tarpaulins received': 'tarpaulins收到系列', 'Family tarpaulins, source': '系列tarpaulins,來源', 'Family': '家庭', 'Family/friends': '系列/朋友', 'Farmland/fishing material assistance, Rank': 'Farmland/釣魚物料幫助,等級', 'Fax': '傳真', 'Feature Layer Details': '功能層詳細資料', 'Feature Layer added': '功能層新增', 'Feature Layer deleted': '功能刪除層', 'Feature Layer updated': '功能更新層', 'Feature Layers': '功能層', 'Feature Namespace': '特性名稱空間', 'Feature Request': '功能要求', 'Feature Type': '功能類型', 'Feature': '特性 (feature)', 'Features Include': '功能包括', 'Female headed households': '女性頭家庭', 'Female': '女性', 'Few': '幾', 'Field Hospital': '欄位醫院', 'Field': '欄位', 'Fields tagged with a star': '標記星號', 'File': '檔案', 'Fill in Latitude': '填寫緯度', 'Fill in Longitude': '填寫經度', 'Filter Field': '過濾欄位', 'Filter Value': '過濾器值', 'Filter': '過濾器', 'Filtered search of aid pledges and requests': '過濾搜尋的輔助抵押和要求', 'Find Dead Body Report': '尋找传送主体報告', 'Find Hospital': '尋找醫院', 'Find Person Record': '尋找人員記錄', 'Find Recovery Report': '尋找恢復報告', 'Find Volunteers': '尋找志願者', 'Find a Person Record': '尋找一个人員記錄', 'Find by Name': '依名稱搜尋', 'Find': '尋找', 'Finder': '搜尋器', 'Fingerprint': '指紋', 'Fingerprinting': '產生指紋', 'Fingerprints': '指紋', 'Finish': '完成', 'Finished Jobs': '完成工作', 'Fire suppression and rescue': '滅火和救援', 'Fire': '發動', 'First Name': '名', 'First name': '名', 'Fishing': '打撈', 'Flash Flood': 'Flash水災', 'Flash Freeze': 'Flash凍結', 'Fleet Management': '車隊管理', 'Flexible Impact Assessments': '彈性評量影响', 'Flood Alerts show water levels in various parts of the country': '水災顯示警示臨界值層次的各个部分的国家', 'Flood Alerts': '水災警示', 'Flood Report Details': '水災報告詳細資料', 'Flood Report added': '水災新增報告', 'Flood Report deleted': '水災報告刪除', 'Flood Report updated': '水災報告更新', 'Flood Report': '水災報告', 'Flood Reports': '水災報告', 'Flood': '填滿', 'Flow Status': '流程狀態', 'Focal Point': '中控系統', 'Food Supply': '食品供應', 'Food assistance available/expected': '預期可提供食品援助', 'Food assistance': '协助食品', 'Food': '食物', 'Footer file %s missing!': '缺少頁腳文件 %s!', 'Footer': '頁腳', 'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': '的Eden實例輸入應用程式基本URL,例如, http://sync.sahanfoundation.org/eden,其他同層級的URL的同步化介面。', 'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': '為彈出三通常是110 (995用于SSL),對于IMAP,這通常是143 (993為IMAP)。', 'For Warehouse': '倉儲', 'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '国家的,這將是ISO2代碼,一个城市,它將是機场的Locode。', 'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': '每个同步伙伴,有一个預設同步執行的工作在指定的時間間隔。 您也可以設定更多同步工作可上自訂您的需求。 上的鏈結,按一下滑鼠右鍵來開始。', 'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': '為加强安全,建議您輸入使用者名稱和密碼,并通知其他機器的管理者組織中的新增這个使用者名稱和密碼對UUID同步->同步伙伴', 'For live help from the Sahana community on using this application, go to': '想要從 Sahana 社群取得使用方面的線上幫助,請前往', 'For messages that support alert network internal functions': '支援的訊息警示網路內部函數', 'For more details on the Sahana Eden system, see the': '更多關於 Sahana Eden 系統的資訊,請見', 'For more information, see': '想要了解更多資訊,請見', 'For': '適用於 的', 'Forest Fire': '樹系發動', 'Formal camp': '正式camp', 'Format': '格式', 'Forms': '表單', 'Found': '找到', 'Freezing Drizzle': '凍結毛毛雨', 'Freezing Rain': '凍結雨', 'Freezing Spray': '凍結噴灑', 'French': '法文', 'Friday': '星期五', 'From Inventory': '從庫存', 'From Location': '起點位置', 'From Organization': '來源組織', 'From Person': '從人員', 'From Warehouse': '從倉儲', 'From': '開始', 'Frost': 'frost', 'Fuel': '燃料', 'Fulfil. Status': '滿足。 狀態', 'Fulfillment Status': '供貨狀態', 'Full beard': '完整beard', 'Full': '滿載', 'Fullscreen Map': '全螢幕對映', 'Functional Tests': '功能測試', 'Functions available': '可用的函數', 'Funding Organization': '資金組織', 'Funeral': '喪葬', 'Further Action Recommended': '建議進一步的動作', 'GIS Reports of Shelter': '住房的地理信息系統報告', 'GIS integration to view location details of the Shelter': '地理信息系統集成查看住房的詳細位置介紹', 'GPS Marker': 'GPS標記', 'GPS Track File': 'GPS追蹤檔案', 'GPS Track': 'GPS跟踪', 'GPX Track': 'GPX跟踪', 'GRN Status': 'GRN狀態', 'Gale Wind': 'Gale wind', 'Gap Analysis Map': '差距分析對映', 'Gap Analysis Report': '差異分析報告', 'Gap Analysis': '間隙分析', 'Gap Map': '對映間隙', 'Gap Report': '報告間隙', 'Gateway Settings': '設定閘道', 'Gateway settings updated': '閘道設定更新', 'Gateway': '閘道', 'Gender': '性別', 'General Comment': '一般評論', 'General Medical/Surgical': '一般醫學/Surgical', 'General emergency and public safety': '緊急一般和公共安全', 'General information on demographics': '个人背景資訊的一般資訊', 'General': '一般', 'Generator': '產生者', 'Geocode': '地理', 'Geocoder Selection': '選擇地理編碼程式', 'Geometry Name': '幾何形狀名稱', 'Geonames.org search requires Internet connectivity!': 'Geonames.org搜尋需要網際網路連线功能!', 'Geophysical (inc. landslide)': 'Geophysical (收入。 landslide)', 'Geotechnical Hazards': 'Geotechnical危害', 'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo模組內無法使用執行中的Python-這需要安裝PDF輸出!', 'Get incoming recovery requests as RSS feed': '取得送入的回复要求為RSS訊息饋送', 'Girls 13-18 yrs in affected area': '在受影响地區的13-18歲女孩', 'Girls 13-18 yrs not attending school': '不上學的13-18歲女孩', 'Girls 6-12 yrs in affected area': '在受影響地區的6-12歲女童', 'Girls 6-12 yrs not attending school': '不上學的6-12歲女童', 'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '提供圖像的簡要描述,例如圖片的什麼地方可以看到什麼(可選)。', 'Give information about where and when you have seen the person': '提供位置資訊,當您已經看到了人員', 'Give information about where and when you have seen them': '提供位置資訊,當您已經看到了它們', 'Global Messaging Settings': '广域传訊設定', 'Go to Request': '跳至要求', 'Go': '執行', 'Good Condition': '狀湟良好', 'Good': '良好', 'Goods Received Note': '貨物收到附註', 'Government UID': '政府UID', 'Government building': '政府建築物', 'Government': '政府機關', 'Grade': '等級', 'Greek': '希臘文', 'Green': '綠色', 'Ground movement, fissures': '移動,接地fissures', 'Ground movement, settlement, slips': '移動接地,結算,跌倒而', 'Group %(group_id)s created': '群組 %(group_id)s 建立', 'Group Description': '群組說明', 'Group Details': '群組詳細資料', 'Group ID': '群組編號', 'Group Member added': '群組成員已新增', 'Group Members': '群組成員', 'Group Memberships': '加入群組', 'Group Name': '群組名稱', 'Group Title': '群組標題', 'Group Type': '群組類別', 'Group added': '群組已新增', 'Group deleted': '群組已刪除', 'Group description': '群組說明', 'Group name': '群組名稱', 'Group type': '群組類別', 'Group updated': '群組已更新', 'Group': '群組', 'Groups removed': '群組已刪除', 'Groups': '群組', 'Guest': '訪客', 'HR Manager': 'HR管理員', 'Hail': '冰雹', 'Hair Color': '頭髮顏色', 'Hair Length': '頭髮長度', 'Hair Style': '十字準线樣式', 'Has additional rights to modify records relating to this Organization or Site.': '有其他權限,以修改記錄相關的組織或站點。', 'Has data from this Reference Document been entered into Sahana?': '具有資料從這个参考文件被輸入Sahana嗎?', 'Has only read-only access to records relating to this Organization or Site.': '只有唯讀存取記錄相關的組織或站點。', 'Has the Certificate for receipt of the shipment been given to the sender?': '凭證已接收貨物的已被指定給寄件者?', 'Has the GRN (Goods Received Note) been completed?': '具有GRN (商品接收附註)已完成?', 'Has the safety and security of women and children in your community changed since the emergency?': '具有安全的女性和子項您社群中變更的緊急嗎?', 'Has your business been damaged in the course of the disaster?': '您的業務有損壞的過程中的災難?', 'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': '已收到任何家庭shelter/NFI輔助或协助預期在未來的天?', 'Have normal food sources been disrupted?': '正常食品來源已中斷?', 'Have schools received or are expecting to receive any assistance?': '學校有接收或預期接收任何幫助嗎?', 'Have the people received or are you expecting any medical or food assistance in the coming days?': '具有人員收到了嗎?或者您預期任何醫學或食品协助在未來的天?', 'Hazard Pay': '危害支付', 'Hazard': '危害', 'Hazardous Material': '危害性物料', 'Hazardous Road Conditions': '危險道路條件', 'Header Background': '標頭背景', 'Header background file %s missing!': '標頭背景檔案%!', 'Headquarters': '總公司', 'Health care assistance, Rank': '醫療协助,等級', 'Health center with beds': '使用"健康中心" beds', 'Health center without beds': '健康中心而不beds', 'Health center': '性能檢測中心', 'Health services functioning prior to disaster': '健康服務運作之前,災難', 'Health services functioning since disaster': '健康服務運作,因為災難', 'Health services status': '服務性能狀態', 'Health': '性能狀態', 'Healthcare Worker': '醫療保健工作者', 'Heat Wave': 'Wave散熱器', 'Heat and Humidity': '散熱器和濕度', 'Height (cm)': '高度(公分)', 'Height (m)': '高度(公尺)', 'Height': '高度', 'Help': '說明', 'Helps to monitor status of hospitals': '有助于監視狀態的醫院', 'Helps to report and search for Missing Persons': '關於查詢與登錄災民的使用說明', 'Helps to report and search for missing persons': '關於查詢與登錄災民的使用說明', 'Here are the solution items related to the problem.': '以下是解决方案項目相關的問題。', 'Here you will find all synchronization attempts made by either your machine or foreign machines for data exchange. This also lists data exchanges made using Sahana API.': '在這裡,您將尋找所有的同步化嘗試,或您的機器或外部機器以交換資料。 這也會列出資料交換所使用Sahana API。', 'Heritage Listed': '遺產列出', 'Hierarchy Level 0 Name (i.e. Country)': '層次〇的姓名(例如,国家)', 'Hierarchy Level 1 Name (e.g. State or Province)': '層次一名稱(例如,州或省)', 'Hierarchy Level 2 Name (e.g. District or County)': '層次二名稱(例如,地區或縣)', 'Hierarchy Level 3 Name (e.g. City / Town / Village)': '層次三名稱(例如,城市/鄉鎮/村落)', 'Hierarchy Level 4 Name (e.g. Neighbourhood)': '層次四名稱(例如, Neighbourhood)', 'Hierarchy Level 5 Name': '階層層次五名稱', 'High Water': '高臨界值', 'High': '高', 'Hindi': '北印度文', 'Hindu': '印度教', 'History': '歷程', 'Hit the back button on your browser to try again.': '命中"上一頁"按鈕將您的瀏览器然后再試一次。', 'Holiday Address': '假日位址', 'Home Address': '住家地址', 'Home Country': '住家所在國家或地區', 'Home Crime': '家庭犯罪', 'Home': '首頁', 'Hospital Details': '醫院詳細資料', 'Hospital Status Report': '醫院狀態報告', 'Hospital information added': '醫院資訊新增', 'Hospital information deleted': '醫院資訊刪除', 'Hospital information updated': '醫院資訊更新', 'Hospital status assessment.': '醫院狀態評量。', 'Hospital': '醫院', 'Hospitals': '醫院', 'Hot Spot': '熱點', 'Hour': '小時', 'Hourly': '每小時', 'Hours': '時數', 'Household kits received': '家庭套件接收', 'Household kits, source': '家庭套件,來源', 'How did boys 13-17yrs spend most of their time prior to the disaster?': '災難之前13到17歲的男孩是如何花費大部分的時間?', 'How did boys <12yrs spend most of their time prior to the disaster?': '災難之前小於12歲的男孩是如何花費大部分的時間?', 'How did boys girls 13-17yrs spend most of their time prior to the disaster?': '災難之前13到17歲的男孩女孩是如何花費大部分的時間?', 'How did girls <12yrs spend most of their time prior to the disaster?': '災難之前小於12歲的女孩是如何花費大部分的時間?', 'How do boys 13-17yrs spend most of their time now?': '13-17岁的男孩怎样利用他们大部分的時間?', 'How do boys <12yrs spend most of their time now?': '現在小於12歲的男孩是如何花費大部分的時間?', 'How do girls 13-17yrs spend most of their time now?': '13-17岁的女孩如何利用她们大部分的時間?', 'How do girls <12yrs spend most of their time now?': '少于12岁的女孩如何利用她们大部分的時間?', 'How does it work?': '運作方式?', 'How is this person affected by the disaster? (Select all that apply)': '這是如何影响的人員災難? (請選取所有適用項目)', 'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': '多久需要您到可用的水資源嗎? 指定所需的時間,前往"和"反面",包括佇列時間,以英尺。', 'How long does it take you to walk to the health service?': '您需要多久走到健康服务?', 'How long will the food last?': '如何將長的食物最后嗎?', 'How long will this water resource last?': '多久將此臨界值的資源?', 'How many Boys (0-17 yrs) are Dead due to the crisis': '多少男孩(〇-17年期)被停用,因為危機', 'How many Boys (0-17 yrs) are Injured due to the crisis': '多少男孩(〇-17年期的傷害,因為危機', 'How many Boys (0-17 yrs) are Missing due to the crisis': '多少男孩(〇-17年期)丟失了,因為危機', 'How many Girls (0-17 yrs) are Dead due to the crisis': '多少女孩(〇-17年期)被停用,因為危機', 'How many Girls (0-17 yrs) are Injured due to the crisis': '多少女孩(〇-17年期的傷害,因為危機', 'How many Girls (0-17 yrs) are Missing due to the crisis': '多少女孩(〇-17年期)丟失了,因為危機', 'How many Men (18 yrs+) are Dead due to the crisis': '有多少人(18 yrs+)被停用,因為危機', 'How many Men (18 yrs+) are Injured due to the crisis': '有多少人(18 yrs+)是可能由于危機', 'How many Men (18 yrs+) are Missing due to the crisis': '有多少人(18 yrs+)丟失了,因為危機', 'How many Women (18 yrs+) are Dead due to the crisis': '多少婦女(18 yrs+)被停用,因為危機', 'How many Women (18 yrs+) are Injured due to the crisis': '多少婦女(18 yrs+)是可能由于危機', 'How many Women (18 yrs+) are Missing due to the crisis': '多少婦女(18 yrs+)丟失了,因為危機', 'How many days will the supplies last?': '多少天會在提供最后一个嗎?', 'How many doctors in the health centers are still actively working?': '多少醫師在健康中心是仍然有效作用嗎?', 'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': '多少儲存是uninhabitable (uninhabitable =基礎和結构損毀)?', 'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': '多少儲存遭到損壞,但保留可用(可用= Windows中斷,是否在牆面,屋脊略有損壞)?', 'How many latrines are available in the village/IDP centre/Camp?': '多少latrines中可用的村落/發展中心/Camp?', 'How many midwives in the health centers are still actively working?': '多少midwives在健康中心是仍然有效作用嗎?', 'How many new cases have been admitted to this facility in the past 24h?': '多少新案例已送入此機能在過去小時?', 'How many nurses in the health centers are still actively working?': '多少nurses在健康中心是仍然有效作用嗎?', 'How many of the patients with the disease died in the past 24h at this facility?': '多少的病患的疾病已在過去小時在這个機能"?', 'How many of the primary school age boys (6-12) in the area are not attending school?': '多少的主要學校經歷時間提升(六-12)的范圍內不参加學校嗎?', 'How many of the primary school age girls (6-12) in the area are not attending school?': '多少的主要學校時間女孩(六-12)的范圍內不参加學校嗎?', 'How many of the primary/secondary schools are now open and running a regular schedule of class?': '多少的主要/次要學校現在開啟及執行定期的類別嗎?', 'How many of the secondary school age boys (13-18) in the area are not attending school?': '多少的次要學校經歷時間提升(13-18)的范圍內不参加學校嗎?', 'How many of the secondary school age girls (13-18) in the area are not attending school?': '多少的次要學校時間女孩(13-18)的范圍內不参加學校嗎?', 'How many patients with the disease are currently hospitalized at this facility?': '多少病患的疾病目前hospitalized在這个機能"?', 'How many primary school age boys (6-12) are in the affected area?': '多少主要學校經歷時間提升" (六-12)的受影响區域嗎?', 'How many primary school age girls (6-12) are in the affected area?': '多少主要學校時間女孩" (六-12)的受影响區域嗎?', 'How many primary/secondary schools were opening prior to the disaster?': '多少主要/次要學校已開啟之前,災難?', 'How many secondary school age boys (13-18) are in the affected area?': '多少次要學校經歷時間提升" (13-18)的受影响區域嗎?', 'How many secondary school age girls (13-18) are in the affected area?': '多少次要學校時間女孩" (13-18)的受影响區域嗎?', 'How many teachers have been affected by the disaster (affected = unable to work)?': '多少教師已受影响的災難(分配=無法運作)?', 'How many teachers worked in the schools prior to the disaster?': '多少教師已經在學校之前,災難?', 'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': '多少明細會出現。 高的縮放比例表示很多的詳細程度,而不是整个區域。 較低的縮放比例表示看到整个區域,但不是一个高層次的詳細資料。', 'Human Resource Details': '人力資源詳細資料', 'Human Resource Management': '人力資源管理', 'Human Resource added': '新增人力資源', 'Human Resource removed': '移除人力資源', 'Human Resource updated': '人力資源更新', 'Human Resource': '人力資源', 'Human Resources Management': '人力資源管理', 'Human Resources': '人力資源部', 'Hurricane Force Wind': '颶風强制wind', 'Hurricane': '台風', 'Hygiene kits received': 'Hygiene收到的套件', 'Hygiene kits, source': 'Hygiene套件,來源', 'Hygiene practice': 'Hygiene實務', 'Hygiene problems': 'Hygiene問題', 'I am available in the following area(s)': '我可以在下列區域(S)', 'ID Label': '識別編號或符號', 'ID Label:': '識別編號或符號:', 'ID Tag Number': 'ID標籤號碼', 'ID Tag': 'ID標籤', 'ID type': 'Id 類型', 'Ice Pressure': 'ICE壓力', 'Iceberg': '冰', 'Ideally a full URL to the source file, otherwise just a note on where data came from.': '理想的完整URL的原始檔,否則只注意事項在資料來源。', 'Identification Report': '識別報告', 'Identification Reports': '識別報告', 'Identification Status': '識別狀態', 'Identification label of the Storage bin.': '識別標籤的儲存体bin。', 'Identification': '識別', 'Identified as': '識別為', 'Identified by': '識別由', 'Identity Details': '身分詳細資料', 'Identity added': '新增身分', 'Identity deleted': '刪除身分', 'Identity updated': '更新身分', 'Identity': '身分', 'If Staff have login accounts then they are given access to edit the details of the': '如果人員已登入账户,然后它們會被授予存取編輯的詳細資料', 'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': '如果單元= M,基本單元=里,然后multiplicator是0.0001,因為" 1 = 0.001公里。', 'If a ticket was issued then please provide the Ticket ID.': '如果一个單發出,則請提供的摘記卷ID。', 'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': '如果使用者驗證它們自己的電子郵件位址与此網域,核准者欄位是用來判定是否由誰進一步核准是必要的。', 'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': '如果啟用,則日誌是維護所有記錄的使用者存取。 如果停用,則仍然可以啟用每一个模組的基準。', 'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': '如果啟用,則日誌是維護所有記錄的使用者編輯。 如果停用,則仍然可以啟用每一个模組的基準。', 'If it is a URL leading to HTML, then this will downloaded.': '如果它是一个URL產生HTML,則會下載。', 'If neither are defined, then the Default Marker is used.': '如果未定义,則預設記號使用。', 'If no marker defined then the system default marker is used': '如果沒有標記定义則系统預設標記使用', 'If no, specify why': '如果沒有,請指定原因', 'If none are selected, then all are searched.': '如果沒有選取,則所有搜尋。', 'If the location is a geographic area, then state at what level here.': '如果位置是地理區域,然后狀態層次為何在這裡。', 'If the request type is "Other", please enter request details here.': '若要求的類型為"其他",請輸入此要求的詳細資料。', 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': '如果這个欄位會移入,則的使用者指定的網域會自動被指派為人員的組織', 'If this is set to True then mails will be deleted from the server after downloading.': '如果這是設為true,那么郵件將從伺服器中刪除下載之后。', 'If this record should be restricted then select which role is required to access the record here.': '如果此記錄應限制,然后選取的角色需要存取記錄在這裡。', 'If this record should be restricted then select which role(s) are permitted to access the record here.': '如果此記錄應限制,然后選取的角色允許存取記錄在這裡。', 'If yes, specify what and by whom': '如果為"是",指定什么和由誰', 'If yes, which and how': '如果為"是",以及如何', 'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '如果您不輸入一个参照文件,您的電子郵件會顯示允許此資料將被驗證。', 'If you know what the Geonames ID of this location is then you can enter it here.': '如果您知道什么的GeoNames的ID這个位置之后,您可以在這裡輸入它。', 'If you know what the OSM ID of this location is then you can enter it here.': '如果您知道什么的系统ID的這个位置之后,您可以在這裡輸入它。', 'If you need to add a new document then you can click here to attach one.': '如果您需要新增一个新的文件,然后您可以按一下這裡,以連接一个。', 'If you run multiple servers in a network, you would probably see this place listing some other machines. Sahana can automatically pick servers in your organization (if they have sync username and password of your machine or if it is set to default) and add them to your list of machines to perform synchronization with. You can modify individual sync policy for each server. You can also add username and password of that server to retrieve and send data to that server. You can also manually add other servers.': '如果您執行多个伺服器在網路中,您可能會看到此處列出的某个其他機器。 Sahana可以自動選取伺服器組織中(如果它們已同步使用者名稱和密碼在您的機器,或如果它是設為預設值),并將它們新增至清單中的機器來執行同步化。 您可以修改个別同步化原則的每一个伺服器。 您也可以新增使用者名稱和密碼的伺服器,以擷取并传送資料至該伺服器。 您也可以手動新增其他伺服器。', 'If you want several values, then separate with': '如果您希望數个值,則隔開。', 'If you would like to help, then please': '如果您想要幫助,則請', 'Illegal Immigrant': '合法Immigrant', 'Image Details': '映像明細', 'Image Tags': '影像標籤', 'Image Type': '映像檔類型', 'Image Upload': '上載影像', 'Image added': '新增影像', 'Image deleted': '刪除影像', 'Image updated': '更新影像', 'Image': '映像檔', 'Image/Attachment': '影像/附件', 'Image/Other Attachment': '影像/其他附件', 'Imagery': '影像', 'Images': '影像', 'Immediate reconstruction assistance, Rank': '立即重新建构协助,等級', 'Impact Assessments': '評量影响', 'Impact Details': '影响詳細資料', 'Impact Type Details': '影响類型詳細資料', 'Impact Type added': '影响類型新增', 'Impact Type deleted': '影响類型刪除', 'Impact Type updated': '影响更新類型', 'Impact Type': '影響類型', 'Impact Types': '影响類型', 'Impact added': '影响新增', 'Impact deleted': '刪除影响', 'Impact updated': '影响更新', 'Impacts': '影響', 'Import & Export Data': '匯入及匯出資料', 'Import Data': '匯入資料', 'Import Job': '匯入工作', 'Import Jobs': '匯入工作', 'Import and Export': '匯入及匯出', 'Import from Ushahidi Instance': '從 Ushahidi實例進口', 'Import if Master': '如果主要匯入', 'Import job created': '匯入已建立工作', 'Import multiple tables as CSV': '多个表格匯入CSV', 'Import': '匯入', 'Import/Export': '匯入/匯出', 'Import/Master': '匯入/主要', 'Important': '重要性', 'Importantly where there are no aid services being provided': '重要有沒有輔助服務提供', 'Imported': '已匯入', 'Importing data from spreadsheets': '從試算表匯入資料', 'Improper decontamination': 'decontamination不當', 'Improper handling of dead bodies': '不當處理的停用主体', 'In Catalogs': '型錄中', 'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '在GeoServer,這是層的名稱。 在WFS getCapabilities,這是FeatureType名稱部分之后,冒號(:)。', 'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '在GeoServer,這是工作區名稱。 在WFS getCapabilities,這是FeatureType名稱組件之前的冒號(:)。', 'In Inventories': '在庫存', 'In Process': '正在處理', 'In Progress': '進行中', 'In Transit': '轉移中', 'In Window layout the map maximises to fill the window, so no need to set a large value here.': '視窗布置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。', 'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': '一般而言,什么是最需要舊的人員,殘障人士,子項,泉和婦女的社群嗎?', 'Inbound Mail Settings': '入埠郵件設定', 'Incident Categories': '事件種類', 'Incident Details': '事件明細', 'Incident Report Details': '事故報告詳細資料', 'Incident Report added': '新增事件報告', 'Incident Report deleted': '刪除事故報告', 'Incident Report updated': '更新事故報告', 'Incident Report': '事件報告', 'Incident Reporting System': '事件報告系统', 'Incident Reporting': '事件報告', 'Incident Reports': '事件報告', 'Incident added': '新增事件', 'Incident deleted': '刪除事件', 'Incident updated': '更新事件', 'Incident': '發生事件', 'Incidents': '發生事件', 'Incoming Shipment canceled': '進入出貨取消', 'Incoming Shipment updated': '進入出貨更新', 'Incoming': '送入的', 'Incomplete': '未完成', 'Individuals': '個人', 'Industrial Crime': '工業犯罪', 'Industrial': '製造業', 'Industry Fire': '產業發動', 'Industry close to village/camp': '產業關閉村落/camp', 'Infant (0-1)': '嬰兒 (0-1)', 'Infectious Disease (Hazardous Material)': 'Infectious疾病(危險物料)', 'Infectious Disease': 'Infectious疾病', 'Infestation': '影响', 'Informal Leader': '非正式領導者', 'Informal camp': '非正式camp', 'Information gaps': '資訊間隙', 'Infusion catheters available': 'catheters可用注入', 'Infusion catheters need per 24h': 'catheters需要注入每小時', 'Infusion catheters needed per 24h': 'catheters需要注入每小時', 'Infusions available': 'Infusions可用', 'Infusions needed per 24h': 'Infusions需要每小時', 'Injuries': '傷害', 'Input Job': '輸入工作', 'Inspected': '已檢驗', 'Inspection Date': '檢驗日期', 'Inspection date and time': '檢驗日期和時間', 'Inspection time': '檢驗時間', 'Inspector ID': '視察者 ID', 'Instance Type': '實例類型', 'Instance URL': '實例URL', 'Instant Porridge': '即時Porridge', 'Institution': '機構', 'Insufficient vars: Need module, resource, jresource, instance': '不足變數:需要模組,資源, jresource,實例', 'Insufficient': '不足', 'Intake Items': '進氣區項目', 'Intergovernmental Organization': 'Intergovernmental組織', 'Interior walls, partitions': '內部牆壁,分割區', 'Internal Features': '內部功能', 'Internal State': '內部狀態', 'International NGO': '国際NGO', 'International Organization': '国際組織', 'International Staff': '国際人員', 'Intervention': '介入', 'Interview taking place at': '進行訪談在', 'Invalid Organization ID!': '無效組織標識!', 'Invalid Query': '無效的查詢', 'Invalid Request': '無效要求', 'Invalid UUID!': 'UUID無效!', 'Invalid email': '無效的電子郵件', 'Invalid request!': '要求無效!', 'Invalid ticket': '無效的票据', 'Invalid': '無效', 'Inventories with Item': '与庫存項目', 'Inventories with Items': '与庫存項目', 'Inventories': '庫存', 'Inventory Item Details': '庫存項目詳細資料', 'Inventory Item added': '添加庫存項目', 'Inventory Item deleted': '庫存項目已刪除', 'Inventory Item updated': '庫存項目更新', 'Inventory Item': '資產項目', 'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': '庫存項目包括耗材与那些會變成資產在其目的地。', 'Inventory Items': '配備盤點項目', 'Inventory Location Details': '庫存位置詳細資料', 'Inventory Location added': '新增庫存位置', 'Inventory Location updated': '庫存位置更新', 'Inventory Location': '庫存位置', 'Inventory Locations': '庫存位置', 'Inventory Management': '庫存管理', 'Inventory Stock Position': '庫存位置', 'Inventory Store Details': '資產儲存庫明細', 'Inventory Store added': '資產新增至儲存庫', 'Inventory Store deleted': '庫存刪除儲存庫', 'Inventory Store updated': '庫存更新儲存庫', 'Inventory Store': '資產儲存庫', 'Inventory Stores': '儲存庫存', 'Inventory functionality is available for:': '資產功能可用于:', 'Inventory of Effects': '庫存的效果', 'Inventory': '庫存', 'Inventory/Ledger': '庫存/分類账', 'Is adequate food and water available for these institutions?': '足够食物和水用于這些機构嗎?', 'Is editing level L%d locations allowed?': '正在編輯層次L%d位置容許嗎?', 'Is it safe to collect water?': '它是安全來收集臨界值嗎?', 'Is there any industrial or agro-chemical production close to the affected area/village?': '是否有任何工業或agro-化學生產關閉至受影响的區域/村落?', 'Is this a strict hierarchy?': '這是一个嚴格階層?', 'Issuing Authority': '發出單位', 'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '它不只會擷取工作區所作用中,但也會擷取的相關資訊范圍的專案會提供每一个區域。', 'It gives four options: No Sync, Newer Timestamp, Keep All, Replace All': '它提供四个選項:沒有同步,新的時間戳記,保留所有,請更換所有', 'It is built using the Template agreed by a group of NGOs working together as the': '它建置于使用范本所認可群組的迫切合作的', 'It is suggested to open the 2 locations into new tabs so that it can be decided which is the best one to keep out of the 2.': '建議開啟這二个位置中新的標籤,以决定保留最好的一个。', 'Item Added to Shipment': '新增項目至出貨', 'Item Catalog Categories': '項目型錄種類', 'Item Catalog Category Details': '項目型錄種類詳細資料', 'Item Catalog Category added': '型錄項目新增種類', 'Item Catalog Category deleted': '項目刪除型錄種類', 'Item Catalog Category updated': '項目型錄種類更新', 'Item Catalog Category': '項目型錄種類', 'Item Catalog Details': '型錄項目詳細資料', 'Item Catalog added': '型錄項目新增', 'Item Catalog deleted': '型錄項目刪除', 'Item Catalog updated': '型錄項目更新', 'Item Catalogs': '型錄項目', 'Item Categories': '項目種類', 'Item Category Details': '項目種類明細', 'Item Category added': '項目新增種類', 'Item Category deleted': '刪除項目種類', 'Item Category updated': '更新項目種類', 'Item Category': '項目種類', 'Item Details': '項目明細', 'Item Pack Details': '項目套件詳細資料', 'Item Pack added': '項目套件新增', 'Item Pack deleted': '項目套件刪除', 'Item Pack updated': '項目更新套件', 'Item Packs': '項目套件', 'Item Sub-Categories': '項目子種類', 'Item Sub-Category Details': '項目子種類明細', 'Item Sub-Category added': '項目子新增種類', 'Item Sub-Category deleted': '項目子類別刪除', 'Item Sub-Category updated': '項目子類別更新', 'Item Sub-Category': '項目子種類', 'Item added to Inventory': '項目新增至庫存', 'Item added to shipment': '新增項目至出貨', 'Item added': '已新增項目', 'Item already in Bundle!': '項目已軟体組中!', 'Item already in Kit!': '項目已在套件!', 'Item already in budget!': '項目已在預算!', 'Item deleted': '已刪除項目', 'Item removed from Inventory': '從庫存移除的項目', 'Item updated': '更新項目', 'Item': '項目', 'Items in Category can be Assets': '項目中"類別"可以資產', 'Items': '項目', 'Japanese': '日文', 'Jerry can': 'Jerry可以', 'Jew': '猶太教', 'Job Role Catalog': '工作角色型錄', 'Job Role Details': '工作角色詳細資料', 'Job Role added': '工作角色新增', 'Job Role deleted': '工作角色刪除', 'Job Role updated': '工作角色更新', 'Job Role': '職位', 'Job Roles': '職務', 'Job Title': '工作職稱', 'Jobs': '工作', 'Journal Entry Details': '日誌項目詳細資料', 'Journal entry added': '新增日誌項目', 'Journal entry deleted': '日誌項目刪除', 'Journal entry updated': '日誌項目更新', 'Journal': '日誌', 'Just Once': '只要一次', 'KPIs': 'KPI', 'Keep All': '全部保留', 'Keep Local': '保持局部', 'Key Details': '索引鍵詳細資料', 'Key added': '新增金鑰', 'Key deleted': '鍵刪除', 'Key updated': '更新金鑰', 'Key': '注意', 'Keys': '索引鍵', 'Kit Contents': '套件內容', 'Kit Details': '套件明細', 'Kit Updated': '更新套件', 'Kit added': '新增套件', 'Kit deleted': '刪除套件', 'Kit updated': '更新套件', 'Kit': '套件', 'Kits': '配套', 'Known Identities': '已知身分', 'Known incidents of violence against women/girls': '已知事件的暴力對婦女/女孩', 'Known incidents of violence since disaster': '已知事件的暴力因為災難', 'LICENCE': '授權', 'LICENSE': '軟體使用權', 'LMS Administration': 'LMS管理', 'Label': '標籤(Label)', 'Lack of material': '缺少的物料', 'Lack of school uniform': '缺乏學校统一', 'Lack of supplies at school': '缺少的耗材校園', 'Lack of transport to school': '缺少的传輸學校', 'Lactating women': 'Lactating婦女', 'Landmark Details': '里程碑詳細資料', 'Landmark added': '新增里程碑', 'Landmark deleted': '刪除里程碑', 'Landmark updated': '里程碑更新', 'Landmarks': '里程碑', 'Language': '語言', 'Last Name': '姓', 'Last known location': '前次已知位置', 'Last name': '姓', 'Last synchronization on': '上次同步化', 'Last synchronization time': '上次同步化時間', 'Last updated by': '上次更新的人', 'Last updated on': '上次更新於', 'Last updated': '前次更新', 'Latitude & Longitude': '緯度和經度', 'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度是北美-南-(上下)。 緯度是〇equator与正在北部地區部分和負數在南部部分。', 'Latitude is North-South (Up-Down).': '緯度是北美-南-(上下)。', 'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度是〇equator与正在北部地區部分和負數在南部部分。', 'Latitude of Map Center': '緯度的對映中心', 'Latitude of far northern end of the region of interest.': '緯度的最北端結束的區域相關的。', 'Latitude of far southern end of the region of interest.': '緯度的最南部結束區域相關的。', 'Latitude should be between': '緯度必须介于', 'Latitude': '緯度', 'Law enforcement, military, homeland and local/private security': '法律强制,軍事,国土和本端/私密安全', 'Layer Details': '層詳細資料', 'Layer added': '新增層', 'Layer deleted': '刪除層', 'Layer updated': '更新層', 'Layer': '層', 'Layers updated': '層更新', 'Layers': '層', 'Layout': '配置', 'Leader': '領導人', 'Left-to-Right': '由左至右', 'Legend Format': '圖例格式', 'Length (m)': '長度(M)', 'Length': '長度', 'Level 1 Assessment Details': '層次一評量詳細資料', 'Level 1 Assessment added': '層次一評量新增', 'Level 1 Assessment deleted': '層次一評量刪除', 'Level 1 Assessment updated': '層次一評量更新', 'Level 1 Assessments': '層次一評量', 'Level 1': '層次 1', 'Level 2 Assessment Details': '層次二評量詳細資料', 'Level 2 Assessment added': '層次二評量新增', 'Level 2 Assessment deleted': '層次二評量刪除', 'Level 2 Assessment updated': '層次二評量更新', 'Level 2 Assessments': '層次二評量', 'Level 2 or detailed engineering evaluation recommended': '層次二或詳細工程評估建議', 'Level 2': '層次 2', 'Level': '層次', 'Library support not available for OpenID': '庫不支援可用于OpenID', 'Line': '明細行', 'LineString': '線串', 'Link Item & Shipment': '鏈結項目&出貨', 'Link an Item & Shipment': '鏈結項目与出貨', 'Linked Records': '鏈結記錄', 'Linked records': '鏈結記錄', 'List / Add Baseline Types': '清單/新增基準线類型', 'List / Add Impact Types': '清單/新增影响類型', 'List / Add Services': '清單/新增服務', 'List / Add Types': '清單/新增類型', 'List Activities': '列出活動', 'List Aid Requests': '需求列表', 'List All Assets': '所有資產清單', 'List All Catalog Items': '列出所有型錄項目', 'List All Commitments': '列出所有Commitments', 'List All Entries': '所有項目清單', 'List All Item Categories': '列出所有項目種類', 'List All Memberships': '顯示所有組員', 'List All Received Shipments': '列出所有接收出貨', 'List All Records': '所有記錄清單', 'List All Reports': '列示全部報告', 'List All Requested Items': '列出所有要求的項目', 'List All Requests': '所有要求清單', 'List All Sent Shipments': '列出所有传送出貨', 'List All': '列示全部', 'List Alternative Items': '替代清單項目', 'List Assessment Summaries': '清單評量摘要', 'List Assessments': '評量清單', 'List Asset Assignments': '列示資產分派', 'List Assets': '列出資產', 'List Availability': '清單可用性', 'List Baseline Types': '列舉基準線類型', 'List Baselines': '列舉基準線', 'List Brands': '列舉品牌', 'List Budgets': '列舉預算', 'List Bundles': '列舉捆綁', 'List Camp Services': 'Camp清單服務', 'List Camp Types': 'Camp清單類型', 'List Camps': '清單Camps', 'List Catalog Items': '列舉目錄項目', 'List Catalogs': '目錄清單', 'List Category<>Sub-Category<>Catalog Relation': '清單Category<>Sub-Category<>Catalog關系', 'List Certificates': '凭證清單', 'List Certifications': '認證清單', 'List Checklists': '核對清單', 'List Cluster Subsectors': '叢集清單Subsectors', 'List Clusters': '叢集清單', 'List Commitment Items': '清單項目承諾', 'List Commitments': '清單Commitments', 'List Competencies': '清單能力', 'List Competency Ratings': '清單能力等級', 'List Configs': '配置清單', 'List Conflicts': '冲突清單', 'List Contact Information': '聯絡資訊清單', 'List Contacts': '列出聯絡人', 'List Course Certicates': '清單進程凭證', 'List Courses': '課程清單', 'List Credentials': '認證清單', 'List Current': '現行清單', 'List Distribution Items': '配送清單項目', 'List Distributions': '配送清單', 'List Documents': '清單文件', 'List Donors': '清單Donors', 'List Events': '事件清單', 'List Facilities': '設備清單', 'List Feature Layers': '清單功能層', 'List Flood Reports': '水災清單報告', 'List Groups': '顯示群組', 'List Groups/View Members': '列示群組成員/檢視', 'List Hospitals': '醫院清單', 'List Human Resources': '人力資源清單', 'List Identities': '身分清單', 'List Images': '影像清單', 'List Impact Assessments': '清單影响評量', 'List Impact Types': '影响清單類型', 'List Impacts': '影响清單', 'List Incident Reports': '事件報告清單', 'List Incidents': '事件清單', 'List Inventory Items': '庫存項目清單', 'List Inventory Locations': '清單庫存位置', 'List Inventory Stores': '清單儲存庫存', 'List Item Catalog Categories': '型錄種類清單項目', 'List Item Catalogs': '清單項目型錄', 'List Item Categories': '項目種類清單', 'List Item Packs': '清單項目套件', 'List Item Sub-Categories': '清單項目子種類', 'List Items in Inventory': '清單中項目的庫存', 'List Items': '清單項目', 'List Job Roles': '列出工作角色', 'List Keys': '列出金鑰', 'List Kits': '套件清單', 'List Landmarks': '清單里程碑', 'List Layers': '層清單', 'List Level 1 Assessments': '清單層次一評量', 'List Level 1 assessments': '清單層次一評量', 'List Level 2 Assessments': '清單層次二評量', 'List Level 2 assessments': '清單層次二評量', 'List Locations': '列示位置', 'List Log Entries': '日誌項目清單', 'List Map Configurations': '對映清單配置', 'List Markers': '標記清單', 'List Members': '列示成員', 'List Memberships': '成員資格清單', 'List Messages': '列出訊息', 'List Metadata': 'meta資料清單', 'List Missing Persons': '失蹤災民列表', 'List Missions': '列出任務清單', 'List Need Types': '清單需要類型', 'List Needs': '需求清單', 'List Notes': '清單附註', 'List Offices': '辦公室清單', 'List Organizations': '組織清單', 'List Partners': '伙伴清單', 'List Peers': '對等清單', 'List Personal Effects': '列出个人效果', 'List Persons': '人員清單', 'List Photos': '清單照片', 'List Population Statistics': '列出人口統計資料', 'List Positions': '位置清單', 'List Problems': '問題清單', 'List Projections': '預測清單', 'List Projects': '專案清單', 'List Rapid Assessments': '快速清單評量', 'List Received Items': '清單接收項目', 'List Received Shipments': '清單收到出貨', 'List Records': '記錄清單', 'List Registrations': '登錄清單', 'List Relatives': '關係列表', 'List Relief Items': '浮雕清單項目', 'List Reports': '清單報告', 'List Request Items': '要求清單項目', 'List Requested Skills': '需求技能列表', 'List Requests': '要求清單', 'List Resources': '列出資源', 'List Responses': '清單回應', 'List Rivers': '清單Rivers', 'List Roles': '列出角色', 'List Rooms': '列出會談室清單', 'List Scenarios': '清單實務', 'List School Districts': '清單學校行政區', 'List School Reports': '學校清單報告', 'List Sections': '清單區段', 'List Sectors': '磁區清單', 'List Sent Items': '传送的項目清單', 'List Sent Shipments': '列出貨清單', 'List Service Profiles': '服務設定檔清單', 'List Settings': '清單設定', 'List Shelter Services': '列表庇護服務', 'List Shelter Types': '列表收容所類型', 'List Shelters': '列表收容所', 'List Shipment Transit Logs': '列表過境貨物日誌', 'List Shipment/Way Bills': '清單出貨/方式账單', 'List Shipment<>Item Relation': '列表運費<>物品關係', 'List Shipments': '列表裝運', 'List Sites': '站點清單', 'List Skill Equivalences': '技能清單同等', 'List Skill Provisions': '技能清單條款', 'List Skill Types': '技能清單類型', 'List Skill': '技能清單', 'List Skills': '技能清單', 'List Solutions': '解决方案清單', 'List Sources': '來源清單', 'List Staff Types': '人員清單類型', 'List Staff': '列出人員清單', 'List Status': '清單狀態', 'List Storage Bin Type(s)': 'bin清單儲存体類型(S)', 'List Storage Bins': '清單存儲Bin', 'List Storage Location': '儲存体位置清單', 'List Subscriptions': '清單訂閱', 'List Subsectors': '清單Subsectors', 'List Support Requests': '清單支援要求', 'List Survey Answers': '清單調查答案', 'List Survey Questions': '調查問題清單', 'List Survey Sections': '清單調查區段', 'List Survey Series': '清單調查系列', 'List Survey Templates': '清單調查范本', 'List Tasks': '列出作業', 'List Teams': '小組清單', 'List Themes': '布景主題清單', 'List Tickets': '問題單清單', 'List Tracks': '追蹤清單', 'List Trainings': '清單撰文', 'List Units': '單位清單', 'List Updates': '更新清單', 'List Users': '列出使用者', 'List Vehicle Details': '交通工具資料', 'List Vehicles': '交通工劇烈表', 'List Volunteers': '志愿者清單', 'List Warehouses': '倉庫清單', 'List all': '列示全部', 'List available Scenarios': '列出可用的實務范例', 'List of CSV files uploaded': '已上傳CSV 檔案列表', 'List of CSV files': 'CSV 檔案列表', 'List of Items': '項目清單', 'List of Missing Persons': '清單遺漏的人員', 'List of Peers': '清單的對等', 'List of Reports': '報告清單', 'List of Requests': '要求清單', 'List of Spreadsheets uploaded': '清單的試算表上传', 'List of Spreadsheets': '清單的試算表', 'List of Volunteers for this skill set': '清單的主動参与者的這个職位技能設定', 'List of Volunteers': '志愿者清單', 'List of addresses': '清單的位址', 'List unidentified': '未定义的清單', 'List': '清單', 'List/Add': '顯示/新增群組', 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '列出"誰正在做什么& "where"。可釋放機构來协調它們的活動', 'Live Help': '即時說明', 'Livelihood': '生計', 'Load Cleaned Data into Database': '加載已清理的數據到數據庫', 'Load Details': '載入詳細資料', 'Load Raw File into Grid': '載入原始檔案到網格', 'Load the details to help decide which is the best one to keep out of the 2.': '載入的詳細資料來协助判定哪个是最好的一个保留的二。', 'Loading Locations...': '正在載入位置...', 'Loading': '載入中', 'Local Language': '本地語言', 'Local Name': '綽號', 'Local Names': '綽號', 'Location 1': '位置一', 'Location 2': '位置二', 'Location De-duplicated': '位置取消重复', 'Location Details': '地點明細', 'Location Hierarchy Level 0 Name': '位置階層層次〇名稱', 'Location Hierarchy Level 1 Name': '位置階層層次一名稱', 'Location Hierarchy Level 2 Name': '位置階層層次二名稱', 'Location Hierarchy Level 3 Name': '位置階層層次三名稱', 'Location Hierarchy Level 4 Name': '位置階層層次四名稱', 'Location Hierarchy Level 5 Name': '位置階層層次五名稱', 'Location added': '新增位置', 'Location cannot be converted into a group.': '位置不能轉換成一个群組。', 'Location deleted': '位置已移除', 'Location details': '地點明細', 'Location group cannot be a parent.': '位置群組不能是母項。', 'Location group cannot have a parent.': '位置群組不能有一个母項。', 'Location groups can be used in the Regions menu.': '位置群組可用于區域的功能表。', 'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': '位置群組可能用來過濾顯示的內容在地圖上和在搜尋結果中只能實体所涵蓋的位置群組。', 'Location updated': '更新位置', 'Location': '地點', 'Location: ': '地點: ', 'Location:': '位置:', 'Locations De-duplicator': '取消位置duplicator', 'Locations of this level need to have a parent of level': '這个層次的位置需要有母項的層次', 'Locations should be different!': '應該是不同的位置!', 'Locations': '位置', 'Lockdown': '鎖定', 'Log Entry Details': '日誌項目詳細資料', 'Log entry added': '新增日誌項目', 'Log entry deleted': '刪除日誌', 'Log entry updated': '日誌項目更新', 'Log': '日誌', 'Logged in': '已登入', 'Logged out': '登出', 'Login': '登入', 'Logistics Management System': '物流管理系统', 'Logistics Management': '管理物流', 'Logistics': '物流', 'Logo file %s missing!': 'Logo file %s 失蹤!', 'Logo': 'logo', 'Logout': '登出', 'Long Text': '長文字', 'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': '經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator与正在北部地區部分和負數在南部部分。 經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 這些需要新增以小數度。', 'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是西-East (短)。 經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。', 'Longitude is West - East (sideways).': '經度是西-East (短)。', 'Longitude is West-East (sideways).': '經度是西-East (短)。', 'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。', 'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是〇本初子午线(透過格林威治,英国),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。', 'Longitude of Map Center': '經度的對映中心', 'Longitude of far eastern end of the region of interest.': '經度的最東部結束的區域相關的。', 'Longitude of far western end of the region of interest.': '經度的最西方結束的區域相關的。', 'Longitude should be between': '經度必须介于', 'Longitude': '經度', 'Lost Password': '忘記密碼', 'Lost': '遺失', 'Low': '低', 'Machine with which data was exchanged.': '機器資料交換。', 'Magnetic Storm': '磁性暴雨', 'Main cash source': '主要現金來源', 'Main income sources before disaster': '主收入來源前災難', 'Major Damage': '主要損壞', 'Major expenses': '主要費用', 'Major outward damage': '主要往外損壞', 'Make Commitment': '使承諾', 'Make New Commitment': '使新承諾', 'Make Pledge': '使質押', 'Make Request': '使要求', 'Make a Request for Aid': '提出請求的輔助', 'Make a Request': '使一个要求', 'Make a request': '使一个要求', 'Make preparations per the <instruction>': '準備讓每个<instruction>', 'Male': '男性', 'Malnutrition present prior to disaster': 'Malnutrition存在之前災難', 'Manage Category': '管理種類', 'Manage Events': '管理事件', 'Manage Item catalog': '管理項目型錄', 'Manage Kits': '管理套件', 'Manage Relief Item Catalogue': '管理浮雕項目型錄', 'Manage Sub-Category': '管理子種類', 'Manage Users & Roles': '管理使用者角色', 'Manage Warehouses/Sites': '管理倉庫/站點', 'Manage Your Facilities': '管理您的設備', 'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '管理要求提供,資產,人員或其他資源。 比對庫存位置提供要求。', 'Manage requests of hospitals for assistance.': '管理要求的醫院的协助。', 'Manage volunteers by capturing their skills, availability and allocation': '管理参与者擷取其技能,可用性和配置', 'Manage': '管理', 'Manager': '管理程式', 'Managing Office': '辦公室管理', 'Managing, Storing and Distributing Relief Items': '管理,儲存和散發的項目', 'Managing, Storing and Distributing Relief Items.': '管理,儲存和散發的項目。', 'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '必要的。 在GeoServer,這是層的名稱。 在WFS getCapabilities,這是FeatureType名稱部分之后,冒號(:)。', 'Mandatory. The URL to access the service.': '必要的。 的URL來存取服務。', 'Manual Synchronization': '手動同步化', 'Manual': '手動', 'Many': '許多', 'Map Center Latitude': '對映中心緯度', 'Map Center Longitude': '對映中心經度', 'Map Configuration Details': '對映配置詳細資料', 'Map Configuration added': '新增對映配置', 'Map Configuration deleted': '刪除對映配置', 'Map Configuration removed': '移除對映配置', 'Map Configuration updated': '對映配置更新', 'Map Configuration': '對映配置', 'Map Configurations': '對映配置', 'Map Height': '對映高度', 'Map Service Catalogue': '對服務型錄', 'Map Settings': '對映設定', 'Map Viewing Client': '檢視用户端對映', 'Map Width': '地圖寬度', 'Map Zoom': '對映縮放', 'Map of Hospitals': '對映的醫院', 'Map': '地圖', 'Mapping': '地圖模組', 'Marine Security': '海運安全', 'Marital Status': '婚姻狀況', 'Marker Details': '標記詳細資料', 'Marker added': '新增標記', 'Marker deleted': '標記刪除', 'Marker updated': '更新標記', 'Marker': '標記', 'Markers': '標記', 'Master Message Log to process incoming reports & requests': '主要訊息日誌以處理進入的報告和要求', 'Master Message Log': '主要訊息日誌', 'Match Percentage': '符合百分比', 'Match Requests': '符合要求', 'Match percentage indicates the % match between these two records': '相符百分比表示的百分比之間符合這二个記錄', 'Match?': '相符?', 'Matching Catalog Items': '相符的型錄項目', 'Matching Items': '相符的項目', 'Matching Records': '相符記錄', 'Matrix of Choices (Multiple Answers)': '矩陣的選項(多个答案)', 'Matrix of Choices (Only one answer)': '矩陣的選項(只能有一个答案)', 'Matrix of Text Fields': '矩陣的文字欄位', 'Max Persons per Dwelling': '每住宅最大人數', 'Maximum Location Latitude': '最大位置緯度', 'Maximum Location Longitude': '最大位置經度', 'Maximum Weight': '最大重量', 'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': '存儲位置的最大承重能力然後從下拉列表中選擇單位。', 'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': '最大重量的項目儲存在可以包含。 接着選擇裝置中的下拉列表。', 'Measure Area: Click the points around the polygon & end with a double-click': '測量面積:點擊多邊形周圍的點和雙擊結束', 'Measure Length: Click the points along the path & end with a double-click': '測量長度:按一下點,并沿着路迳&下一个按一下', 'Measures': '測量', 'Medical Attention': '醫學注意', 'Medical Staff': '醫療人員', 'Medical Supplies': '醫療補給品', 'Medical and public health': '醫療及公共健康', 'Medicine': '醫藥', 'Medium': '中', 'Megabytes per Month': '每月(MB)', 'Member removed from Group': '組員已刪除', 'Members': '成員', 'Membership Details': '組員內容', 'Membership updated': '組員已更新', 'Membership': '成員資格', 'Memberships': '群組設定', 'Mental': '養心殿', 'Message Details': '訊息詳細資料', 'Message Variable': '訊息變數', 'Message added': '新增訊息', 'Message deleted': '訊息已刪除', 'Message sent to outbox': '訊息传送至寄件匣', 'Message updated': '更新訊息', 'Message variable': '訊息變數', 'Message': '訊息', 'Messages': '訊息', 'Messaging settings updated': '传訊設定更新', 'Messaging': '傳訊模組', 'Metadata Details': 'Meta 資料的詳細資料', 'Metadata added': '新增meta資料', 'Metadata can be supplied here to be applied to all uploaded photos, if desired.': 'meta資料可以提供來套用至所有上传的照片, (如果想要)。', 'Metadata deleted': '刪除meta資料', 'Metadata updated': '更新meta資料', 'Metadata': 'meta 資料 (metadata)', 'Meteorological (inc. flood)': 'Meteorological (收入。 水災)', 'Method used': '使用方法', 'Micronutrient malnutrition prior to disaster': '災前微量營養素營養不良', 'Middle Name': '中間名', 'Migrants or ethnic minorities': 'Migrants或鬥爭', 'Military': '軍事', 'Minimum Bounding Box': '最小外框', 'Minimum Location Latitude': '最小位置緯度', 'Minimum Location Longitude': '最小位置經度', 'Minimum shift time is 6 hours': '最小時間為六小時移位', 'Minor Damage': '次要損壞', 'Minor/None': '次要/無', 'Minorities participating in coping activities': '勝出参与复制活動', 'Minute': '分鐘', 'Minutes must be a number between 0 and 60': '分鐘必须是〇和60之間的數字', 'Minutes must be between 0 and 60': '分鐘必须在〇和60之間', 'Minutes per Month': '分鐘每月', 'Minutes should be a number greater than 0 and less than 60': '分鐘應該是一个數字大于〇且小于60', 'Minutes should be greater than 0 and less than 60': '分鐘應該大于〇且小于60', 'Miscellaneous': '雜項', 'Missing Person Details': '遺漏人員詳細資料', 'Missing Person Registry': '遺漏人員登錄', 'Missing Person Reports': '失蹤人員報表', 'Missing Person': '遺漏人員', 'Missing Persons Registry': '遺漏人員登錄', 'Missing Persons Report': '失蹤人員報表', 'Missing Persons': '失蹤人員', 'Missing Report': '遺漏報告', 'Missing Senior Citizen': '遺漏資深市民', 'Missing Vulnerable Person': '遺漏漏洞人員', 'Missing': '遺漏', 'Mission Details': '任務詳細資料', 'Mission Record': '任務記錄', 'Mission added': '添加任務', 'Mission deleted': '刪除任務', 'Mission updated': '更新任務', 'Missions': '展望', 'Mobile Assess.': '行動評定。', 'Mobile Basic Assessment': '行動基本評量', 'Mobile Phone': '行動電話', 'Mobile': '行動電話', 'Mode': '模式', 'Model/Type': '型號/類型', 'Modem Settings': '數據機設定', 'Modem settings updated': '數据機設定更新', 'Modem': '數據機', 'Moderate': '普通', 'Moderator': '主持人', 'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': '修改功能:選取的功能時,您要deform和拖放的其中一个點deform的功能,您選擇的方式', 'Modify Information on groups and individuals': '修改群組的相關資訊及个人', 'Modifying data in spreadsheet before importing it to the database': '修改資料在試算表中匯入之前,將它的資料庫', 'Module Administration': '模組管理', 'Module disabled!': '模組已停用!', 'Module provides access to information on current Flood Levels.': '模組可讓您存取資訊目前正層次。', 'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': '模組儲存結构化報告來完成專業組織-目前資料包括WFP評估。', 'Module': '模組', 'Monday': '星期一', 'Monthly Cost': '每月成本', 'Monthly Salary': '每月薪資', 'Months': '月數', 'More about OpenID': '更多關於 OpenID', 'Morgue Status': 'Morgue狀態', 'Morgue Units Available': 'Morgue可用的單位', 'Motorcycle': '摩托車', 'Move Feature: Drag feature to desired location': '移動功能:拖曳功能所需的位置', 'Movements (Filter In/Out/Lost)': '移動過濾器(輸入/輸出/遺失)', 'MultiPolygon': '多個多邊形', 'Multiple Choice (Multiple Answers)': '多个選項(多个答案)', 'Multiple Choice (Only One Answer)': '多个選項(僅一回答)', 'Multiple Matches': '多個相符的項目', 'Multiple Text Fields': '多个文字欄位', 'Multiple': '多個', 'Muslim': '回教', 'Must a location have a parent location?': '必须有一个位置有一个母項位置?', 'My Current function': '我的現行函數', 'My Tasks': '我的任務', 'My Volunteering': '我的志工任務', 'N/A': '不適用', 'NO': '無影響', 'NZSEE Level 1': 'NZSEE層次一', 'NZSEE Level 2': 'NZSEE層次二', 'Name and/or ID Label': '名稱和/或識別號碼標籤', 'Name and/or ID': '名稱和/或識別號碼', 'Name of School': '學校的名稱', 'Name of Storage Bin Type.': '存儲箱類型名稱。', 'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': '文件的名稱(&可選的子路徑)位於靜態應該用於背景的頭。', 'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '檔案的名稱(&選用子路迳)位于靜態應該使用的最左影像。', 'Name of the file (& optional sub-path) located in views which should be used for footer.': '檔案的名稱(&選用子路迳)位于視圖中應該使用的頁尾。', 'Name of the person in local language and script (optional).': '的人員名稱以當地語言及Script (選用)。', 'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': '名稱的單位或部門這份報告的参照。 如果您保留空白醫院沒有子分區。', 'Name': '名稱', 'Name, Org and/or ID': '名稱,組織及/或ID', 'Name/Model/Type': '名稱/模型/類型', 'Names can be added in multiple languages': '名稱可以添加多語言', 'National ID Card': '国家ID卡', 'National NGO': 'NGO国家', 'National Staff': '国家人員', 'National': 'NATIONAL', 'Nationality of the person.': '聯絡人的國籍.', 'Nationality': '國籍', 'Nautical Accident': 'Nautical意外', 'Nautical Hijacking': 'Nautical强制存取', 'Need Type Details': '需要類型詳細資料', 'Need Type added': '需要新增類型', 'Need Type deleted': '需要刪除類型', 'Need Type updated': '需要更新類型', 'Need Type': '需要類型', 'Need Types': '需要類型', 'Need added': '需要新增', 'Need deleted': '需要刪除', 'Need to be logged-in to be able to submit assessments': '需要登入,才能提交評量', 'Need to configure Twitter Authentication': '需要配置Twitter鉴別', 'Need to select 2 Locations': '需要選取二个位置', 'Need to specify a Budget!': '需要指定的預算!', 'Need to specify a Kit!': '需要指定一个套件!', 'Need to specify a Resource!': '必须指定資源!', 'Need to specify a bundle!': '需要指定軟体組!', 'Need to specify a feature group!': '需要指定一項特性群組!', 'Need to specify a group!': '需要指定"群組"!', 'Need to specify a location to search for.': '需要指定一个位置來搜尋。', 'Need to specify a role!': '需要指定一个角色!', 'Need to specify a table!': '需要指定一个表格!', 'Need to specify a user!': '需要指定一个使用者!', 'Need updated': '需要更新', 'Needs Details': '需求詳細資料', 'Needs Maintenance': '需要維護', 'Needs to reduce vulnerability to violence': '需要减少漏洞暴力', 'Needs': '需要', 'Negative Flow Isolation': '負數流程隔離', 'Neighborhood': '鄰居', 'Neighbourhood': '鄰居', 'Neighbouring building hazard': '鄰近建置危害', 'Network': '網路', 'Neurology': '神經內科', 'New Assessment reported from': '新的評量報告從', 'New Certificate': '新建憑證', 'New Checklist': '新核對清單', 'New Entry': '新建文章', 'New Event': '新建事件', 'New Item Category': '新項目種類', 'New Job Role': '新工作角色', 'New Location Group': '新位置群組', 'New Location': '新位置', 'New Patient': '新病患', 'New Peer': '新的同層級', 'New Record': '新建記錄', 'New Report': '新建報告', 'New Request': '我的要求', 'New Scenario': '新方案', 'New Skill': '新技能', 'New Solution Choice': '新解决方案選項', 'New Staff Member': '新人員成員', 'New Support Request': '新申請', 'New Synchronization Peer': '新的同層級同步化', 'New Team': '新團隊', 'New Training Course': '新的培訓課程', 'New Volunteer': '新志工', 'New cases in the past 24h': '新案例在過去小時', 'New': '新建', 'Newer Timestamp': '更新時間戳記', 'News': '新聞', 'Next View': '下一頁', 'Next': '下一頁(N)', 'No Activities Found': '沒有找到的活動', 'No Activities currently registered in this event': '在本事件中,並無已登錄的活動', 'No Addresses currently registered': '沒有位址目前登錄', 'No Aid Requests currently registered': '目前沒有任何已登錄的需求', 'No Alternative Items currently registered': '沒有替代項目目前登錄', 'No Assessment Summaries currently registered': '沒有評估目前已登錄摘要', 'No Assessments currently registered': '沒有評估目前登錄', 'No Asset Assignments currently registered': '沒有資產指派目前已登錄', 'No Assets currently registered in this event': '沒有資產目前登錄在此事件', 'No Assets currently registered in this scenario': '沒有資產目前已登錄在這个實務中', 'No Assets currently registered': '沒有資產目前已登錄', 'No Baseline Types currently registered': '沒有基準线類型目前登錄', 'No Baselines currently registered': '沒有基準线目前登錄', 'No Brands currently registered': '沒有品牌目前登錄', 'No Budgets currently registered': '目前沒有預算登錄', 'No Bundles currently registered': '目前沒有軟体組登錄', 'No Camp Services currently registered': '沒有Camp服務目前登錄', 'No Camp Types currently registered': 'Camp沒有類型目前登錄', 'No Camps currently registered': 'Camps沒有目前登錄', 'No Catalog Items currently registered': '沒有型錄項目目前已登錄', 'No Catalogs currently registered': '任何型錄目前已登錄', 'No Category<>Sub-Category<>Catalog Relation currently registered': 'Category<>Sub-沒有Category<>Catalog關系目前已登錄', 'No Checklist available': '沒有可用的清單', 'No Cluster Subsectors currently registered': '沒有Subsectors叢集目前登錄', 'No Clusters currently registered': '沒有"叢集目前登錄', 'No Commitment Items currently registered': '不確定項目目前登錄', 'No Commitments': '沒有Commitments', 'No Configs currently defined': '沒有配置目前定义', 'No Credentials currently set': '目前沒有認證設定', 'No Details currently registered': '沒有詳細資料目前已登錄', 'No Distribution Items currently registered': '沒有分配項目目前登錄', 'No Distributions currently registered': '沒有當前發行版註冊', 'No Documents found': '找不到文件', 'No Donors currently registered': 'Donors目前沒有登錄', 'No Events currently registered': '目前登錄任何事件', 'No Facilities currently registered in this event': '沒有設備目前登錄在此事件', 'No Facilities currently registered in this scenario': '沒有設備目前登錄在這个實務中', 'No Feature Layers currently defined': '沒有功能層目前定义', 'No Flood Reports currently registered': '沒有溢出報告目前正在登錄', 'No GPS data currently registered': '目前無GPS資料被登錄', 'No Groups currently defined': '目前沒有群組定义', 'No Groups currently registered': '目前沒有群組', 'No Hospitals currently registered': '沒有醫院目前登錄', 'No Human Resources currently registered in this event': '沒有人力資源目前已在這个事件', 'No Human Resources currently registered in this scenario': '沒有人力資源目前已登錄在這个實務中', 'No Identification Report Available': '識別沒有報告可用', 'No Identities currently registered': '沒有目前登錄身分', 'No Image currently defined': '沒有映像檔目前定义', 'No Image': '沒有影像', 'No Images currently registered': '沒有影像目前登錄', 'No Impact Types currently registered': '沒有目前有登記的影響類型', 'No Impacts currently registered': '沒有影响目前已登錄', 'No Import Files currently uploaded': '目前無影響檔案被上傳', 'No Incident Reports currently registered': '目前没有事件報告記錄', 'No Incidents currently registered': '目前没有事件記錄', 'No Incoming Shipments': '沒有進貨', 'No Inventories currently have suitable alternative items in stock': '目前沒有合適的替代品尚有庫存', 'No Inventories currently have this item in stock': '目前沒有合適的替代品尚有庫存', 'No Inventory Items currently registered': '目前沒有登記的庫存項目', 'No Inventory Locations currently registered': '目前沒有登記的庫存地點', 'No Inventory Stores currently registered': '無庫存儲存目前已登錄', 'No Item Catalog Category currently registered': '沒有項目型錄種類目前已登錄', 'No Item Catalog currently registered': '沒有項目型錄目前已登錄', 'No Item Categories currently registered': '沒有項目種類目前登錄', 'No Item Packs currently registered': '沒有項目套件目前已登錄', 'No Item Sub-Category currently registered': '項目沒有子類別目前已登錄', 'No Item currently registered': '沒有項目目前已登錄', 'No Items currently registered in this Inventory': '沒有項目目前登錄在此資產', 'No Items currently registered': '沒有項目目前登錄', 'No Items currently requested': '沒有項目目前要求', 'No Keys currently defined': '目前未定义任何金鑰', 'No Kits currently registered': '套件沒有目前登錄', 'No Landmarks currently defined': '沒有目前定义里程碑', 'No Level 1 Assessments currently registered': '沒有層次一評量目前已登錄', 'No Level 2 Assessments currently registered': '沒有層次二評量目前已登錄', 'No Locations currently available': '目前可用的任何位置', 'No Locations currently registered': '任何位置目前登錄', 'No Map Configurations currently defined': '沒有對映配置目前定义', 'No Map Configurations currently registered in this event': '沒有對映配置目前登錄在此事件', 'No Map Configurations currently registered in this scenario': '沒有對映配置目前登錄在這个實務中', 'No Markers currently available': '沒有當前可用標記', 'No Match': '沒有相符的項目', 'No Matching Catalog Items': '沒有相符的型錄項目', 'No Matching Items': '沒有相符的項目', 'No Matching Records': '沒有相符的記錄', 'No Members currently registered': '沒有成員目前登錄', 'No Memberships currently defined': '沒有資格目前定义', 'No Messages currently in Outbox': '沒有訊息目前在寄件匣', 'No Metadata currently defined': '目前沒有meta資料定义', 'No Need Types currently registered': '不需要目前登錄類型', 'No Needs currently registered': '目前沒有登錄需要', 'No Offices currently registered': '沒有辦公室目前登錄', 'No Offices found!': '沒有辦公室找到!', 'No Organizations currently registered': '目前登錄任何組織', 'No Organizations registered!': '沒有組織登錄!', 'No Packs for Item': '此品項無包裝', 'No Partners currently registered': '沒有伙伴目前登錄', 'No Patients currently registered': '目前沒有病人登錄', 'No Peers currently registered': '沒有同層級目前登錄', 'No People currently committed': '目前無人承諾', 'No People currently registered in this camp': '沒有人員目前登錄在此camp', 'No People currently registered in this shelter': '沒有人員目前登錄在此shelter', 'No Persons currently registered': '沒有人員目前已登錄', 'No Persons currently reported missing': '沒有人員目前報告遺漏', 'No Persons found': '沒有找到人員', 'No Photos found': '沒有找到照片', 'No Picture': '沒有圖片', 'No Population Statistics currently registered': '沒有移入目前已登錄统計', 'No Presence Log Entries currently registered': '不存在日誌目前已登錄', 'No Problems currently defined': '目前沒有問題定义', 'No Projections currently defined': '沒有估算目前定义', 'No Projects currently registered': '沒有專案目前已登錄', 'No Rapid Assessments currently registered': '沒有快速評估目前登錄', 'No Received Items currently registered': '沒有收到項目目前登錄', 'No Received Shipments': '沒有收到出貨', 'No Records currently available': '沒有記錄當前可用', 'No Records matching the query': '沒有符合查詢的記錄', 'No Reports currently registered': '沒有報告目前登錄', 'No Request Items currently registered': '項目目前沒有要求註冊', 'No Requests have been made yet': '沒有要求尚未完成', 'No Requests match this criteria': '沒有要求符合此準則', 'No Requests': '沒有要求', 'No Responses currently registered': '沒有回應目前已登錄', 'No Rivers currently registered': 'Rivers目前沒有登錄', 'No Roles currently defined': '目前未定义任何角色', 'No Rooms currently registered': '沒有會談室目前登錄', 'No Scenarios currently registered': '沒有目前登錄實務', 'No School Districts currently registered': '沒有學校行政區目前登錄', 'No School Reports currently registered': '沒有學校報告目前登錄', 'No Sections currently registered': '沒有區段目前登錄', 'No Sectors currently registered': '目前沒有已註冊部門', 'No Sent Items currently registered': '目前沒有已發送項目', 'No Sent Shipments': '沒有已發送貨物', 'No Settings currently defined': '目前沒有定義設置', 'No Shelter Services currently registered': '目前沒有註冊住房服務', 'No Shelter Types currently registered': '沒有Shelter類型目前登錄', 'No Shelters currently registered': '目前沒有註冊住房', 'No Shipment Transit Logs currently registered': '沒有出貨日誌传送目前已登錄', 'No Shipment/Way Bills currently registered': '沒有出貨/方式账單目前已登錄', 'No Shipment<>Item Relation currently registered': '沒有Shipment<>Item關系目前登錄', 'No Sites currently registered': '沒有站點目前登錄', 'No Skill Types currently set': '技能沒有類型目前設定', 'No Solutions currently defined': '沒有解决方案目前定义', 'No Sources currently registered': '沒有來源目前登錄', 'No Staff Types currently registered': '沒有人員類型目前登錄', 'No Staff currently registered': '沒有人員目前已登錄', 'No Storage Bin Type currently registered': '沒有儲存体bin目前登錄類型', 'No Storage Bins currently registered': '沒有儲存目前登錄紙匣', 'No Storage Locations currently registered': '沒有儲存体位置目前登錄', 'No Subscription available': '沒有可用的訂閱', 'No Subsectors currently registered': '目前沒有已登錄界別分組', 'No Support Requests currently registered': '不支援要求目前已登錄', 'No Survey Answers currently entered.': '目前沒有已輸入調查答案。', 'No Survey Answers currently registered': '沒有意見調查答案目前登錄', 'No Survey Questions currently registered': '沒有調查問題目前登錄', 'No Survey Sections currently registered': '沒有調查區段目前登錄', 'No Survey Series currently registered': '沒有調查系列目前登錄', 'No Survey Template currently registered': '沒有調查范本目前已登錄', 'No Sync': '沒有同步', 'No Tasks currently registered in this event': '在此事件中,無任何登錄的任務', 'No Tasks currently registered in this scenario': '在此情境中,無任何登錄的任務', 'No Tasks with Location Data': '沒有作業位置資料', 'No Teams currently registered': '目前沒有已登錄團隊', 'No Themes currently defined': '沒有主題目前定义', 'No Tickets currently registered': '沒有單目前登錄', 'No Tracks currently available': '沒有追蹤目前可用', 'No Units currently registered': '單元沒有目前登錄', 'No Updates currently registered': '沒有更新目前已登錄', 'No Users currently registered': '沒有使用者目前已登錄', 'No Volunteers currently registered': '沒有目前志愿者註冊', 'No Warehouses currently registered': '沒有目前登錄倉庫', 'No access at all': '沒有存取在所有', 'No access to this record!': '無法存取這个記錄!', 'No action recommended': '沒有建議的動作', 'No conflicts logged': '沒有冲突登入', 'No contact information available': '沒有可用的聯絡資訊', 'No contacts currently registered': '沒有聯絡人目前登錄', 'No data in this table - cannot create PDF!': '沒有此表格中的資料-無法建立PDF!', 'No databases in this application': '這个應用程式中任何資料庫', 'No dead body reports available': '沒有传送主体可用報告', 'No entries found': '找不到項目', 'No entries matching the query': '沒有符合查詢的項目', 'No entry available': '沒有可用的項目', 'No import jobs': '沒有匯入工作', 'No linked records': '沒有鏈結記錄', 'No location known for this person': '任何位置的識別此人員', 'No location known of this person.': '位置沒有已知的這位人員。', 'No locations found for members of this team': '任何位置找到的此小組的成員', 'No locations registered at this level': '任何位置登錄在這个層次', 'No log entries matching the query': '沒有符合查詢的日誌項目', 'No matching records found.': '沒有找到相符的記錄。', 'No messages in the system': '系统中沒有訊息', 'No notes available': '沒有可用的筆記', 'No of Families Settled in the Schools': '沒有的系列已在學校', 'No of Families to whom Food Items are Available': '沒有的系列對象食品可用的項目', 'No of Families to whom Hygiene is Available': '沒有的系列對象Hygiene是可用的', 'No of Families to whom Non-Food Items are Available': '沒有的系列對象的非食品可用的項目', 'No of Female Students (Primary To Higher Secondary) in the Total Affectees': '沒有的女性學員(主要較次要)總數中Affectees', 'No of Female Teachers & Other Govt Servants in the Total Affectees': '沒有的女性教師和其他政府服務者總數中Affectees', 'No of Male Students (Primary To Higher Secondary) in the Total Affectees': '沒有的男性學員(主要較次要)總數中Affectees', 'No of Male Teachers & Other Govt Servants in the Total Affectees': '沒有的男性教師和其他政府服務者總數中Affectees', 'No of Rooms Occupied By Flood Affectees': '沒有的檔案室被水災Affectees', 'No peers currently registered': '沒有同層級目前登錄', 'No pending registrations found': '沒有擱置的登錄找到', 'No pending registrations matching the query': '沒有擱置符合查詢的登錄', 'No person record found for current user.': '沒有人員記錄找到的現行使用者。', 'No positions currently registered': '沒有位置目前登錄', 'No problem group defined yet': '沒有問題群組尚未定义', 'No records matching the query': '沒有符合查詢的記錄', 'No records to delete': '沒有要刪除的記錄', 'No recovery reports available': '回复沒有可用的報告', 'No report available.': '沒有可用的報告。', 'No reports available.': '沒有可用的報告。', 'No reports currently available': '目前沒有可用的報告', 'No requests currently registered': '沒有要求目前已登錄', 'No requests found': '找不到要求', 'No resources currently registered': '沒有資源目前已登錄', 'No resources currently reported': '沒有資源目前報告', 'No service profile available': '沒有可用的服務配置', 'No skills currently set': '任何技術目前設定', 'No staff or volunteers currently registered': '目前沒有已登錄人員或志願者', 'No status information available': '沒有可用的狀態資訊', 'No sync permitted!': '不允許同步!', 'No synchronization': '沒有同步化', 'No tasks currently assigned': '沒有任務被指派', 'No tasks currently registered': '沒有作業目前已登錄', 'No template found!': '沒有找到范本!', 'No units currently registered': '單元沒有目前登錄', 'No volunteer availability registered': '沒有已登錄志願者可用性', 'No volunteer information registered': '沒有主動資訊登錄', 'No': '無影響', 'Non-medical Staff': '非醫療工作人員', 'Non-structural Hazards': '非結构危害', 'None (no such record)': '無(無記錄)', 'None': '刪除', 'Normal food sources disrupted': '正常食品來源中斷', 'Normal': '正常', 'Not Applicable': '不適用', 'Not Authorised!': '未获授權!', 'Not Possible': '不可能', 'Not Set': '未設定', 'Not Authorized': '未获授權', 'Not installed or incorrectly configured.': '未安裝或配置不正確。', 'Not supported': '不支援', 'Not yet a Member of any Group': '沒有資格目前登錄', 'Note Details': '附註詳細資料', 'Note Status': '附註狀態', 'Note Type': '附註類型', 'Note added': '已新增附註', 'Note deleted': '刪除附註', 'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead': '注意這份清單只會顯示作用中的参与者。 若要查看所有人註冊系统中,搜尋"首頁"螢幕中的而不是', 'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': '注意這份清單只會顯示作用中的参与者。 若要查看所有人註冊系统中,搜尋從這个畫面而不是', 'Note updated': '附註更新', 'Note': '附註', 'Notes': '附註', 'Notice to Airmen': '注意要Airmen', 'Number of Columns': '直欄數', 'Number of Patients': '號碼的病人', 'Number of People Affected': '受影响的人員數', 'Number of People Deceased': '號碼的人員死亡', 'Number of People Injured': '號碼的人員受傷', 'Number of Rows': '橫列數', 'Number of Vehicles': '車輛數目', 'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'beds許多其他的類型預期為變成可用在此單元中下一个24小時。', 'Number of alternative places for studying': '號碼的替代工作區的研究', 'Number of available/vacant beds of that type in this unit at the time of reporting.': '號碼的可用/ beds用的輸入這个單位時間的報告。', 'Number of deaths during the past 24 hours.': '號碼的deaths在過去24小時。', 'Number of discharged patients during the past 24 hours.': '號碼的放電病患過去24小時。', 'Number of doctors actively working': '號碼的醫師主動工作', 'Number of doctors': '號碼的醫生', 'Number of houses damaged, but usable': '號碼的安置損壞,但可用', 'Number of houses destroyed/uninhabitable': '號碼的安置損毀/uninhabitable', 'Number of in-patients at the time of reporting.': '號碼的病患在時間的報告。', 'Number of latrines': '號碼的latrines', 'Number of midwives actively working': '號碼的midwives主動工作', 'Number of newly admitted patients during the past 24 hours.': '號碼的新送入病患過去24小時。', 'Number of non-medical staff': '號碼的非醫療工作人員', 'Number of nurses actively working': '號碼的nurses主動工作', 'Number of nurses': '號碼的nurses', 'Number of private schools': '號碼的專用學校', 'Number of public schools': '號碼的公用學校', 'Number of religious schools': '场所數學校', 'Number of residential units not habitable': '號碼的住宅單位不habitable', 'Number of residential units': '號碼的住宅單位', 'Number of schools damaged but usable': '號碼的學校損壞但可用', 'Number of schools destroyed/uninhabitable': '號碼的學校損毀/uninhabitable', 'Number of schools open before disaster': '號碼的學校先開啟災難', 'Number of schools open now': '號碼的學校開啟現在', 'Number of teachers affected by disaster': '號碼的教師影响災難', 'Number of teachers before disaster': '災前教師人數', 'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '這家醫院空置/可用病床數。 自動更新從每日報告。', 'Number of vacant/available units to which victims can be transported immediately.': '受害人可立即運送空置/可用單位數。', 'Number or Label on the identification tag this person is wearing (if any).': '聯絡人配戴的識別證編號或符號 (如果有).', 'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': '用以標記要搜尋的地方的號碼或代碼,如標誌代碼,網格坐標,場地參考號碼或類似(如果有)', 'Number': '號碼', 'Number/Percentage of affected population that is Female & Aged 0-5': '婦女和0-5歲數量 /占受影響的人口百分比', 'Number/Percentage of affected population that is Female & Aged 13-17': '婦女和13至17歲數量 /占受影響的人口百分比', 'Number/Percentage of affected population that is Female & Aged 18-25': '號碼/百分比的受影响的移入的女性值(18到25', 'Number/Percentage of affected population that is Female & Aged 26-60': '號碼/百分比的受影响的移入的女性值(26到60', 'Number/Percentage of affected population that is Female & Aged 6-12': '號碼/百分比的受影响的移入的女性值(六到12', 'Number/Percentage of affected population that is Female & Aged 61+': '號碼/百分比的受影响的移入的女性值(61+', 'Number/Percentage of affected population that is Male & Aged 0-5': '號碼/百分比的受影响的移入的男性值(〇到五', 'Number/Percentage of affected population that is Male & Aged 13-17': '號碼/百分比的受影响的移入的男性值(13到17', 'Number/Percentage of affected population that is Male & Aged 18-25': '號碼/百分比的受影响的移入的男性值(18到25', 'Number/Percentage of affected population that is Male & Aged 26-60': '受影響人口中男性26到60歲的人數/百分比', 'Number/Percentage of affected population that is Male & Aged 6-12': '受影響人口中男性6到12歲的人數/百分比', 'Number/Percentage of affected population that is Male & Aged 61+': '受影響人口中男性61歲以上的人數/百分比', 'Numbers Only': '只能填數字', 'Nurse': '護士', 'Nursing Information Manager': '看護資訊管理程式', 'Nutrition problems': '營養問題', 'Nutrition': '營養', 'OK': '確定', 'OR Reason': '或原因', 'OR Status Reason': '或狀態原因', 'OR Status': '或狀態', 'Observer': '觀察程式 (observer)', 'Obsolete': '已作廢', 'Office Address': '辦公室地址', 'Office Details': '辦公室詳細資料', 'Office Phone': '辦公室電話', 'Office added': '辦公室新增', 'Office deleted': '辦公室刪除', 'Office updated': '辦公室更新', 'Office': '辦公室', 'Offices & Warehouses': '辦公室与倉庫', 'Offices': '辦公室', 'Offline Sync (from USB/File Backup)': '離线同步(從USB/檔案備份)', 'Offline Sync': '離線同步', 'Old': '舊', 'Older people as primary caregivers of children': '舊的人員為主要caregivers的子項', 'Older people in care homes': '舊的人員在管理Home', 'Older people participating in coping activities': '舊的人員参与复制活動', 'Older people with chronical illnesses': '舊的人chronical疾病', 'Older person (>60 yrs)': '舊的人員(>60年期)', 'On by default? (only applicable to Overlays)': '依預設值嗎? (僅適用于覆蓋)', 'On by default?': '依預設值嗎?', 'On-site Hospitalization': '現场住院', 'One Time Cost': '單次成本', 'One time cost': '單次成本', 'One-time costs': '一-時間成本', 'One-time': '一次', 'Oops! Something went wrong...': '糟糕! 發生錯誤。', 'Oops! something went wrong on our side.': '糟糕! 發生錯誤的面。', 'Opacity (1 for opaque, 0 for fully-transparent)': '透明(一為透明,〇為完全透明)', 'Open Map': '開啟地圖', 'Open area': '開啟區域', 'Open in New Tab': '在新標籤中開啟', 'Open recent': '開啟最近文件', 'Open': '開啟', 'OpenID Login': 'OpenID 登入', 'OpenID authenticated successfully.': 'OpenID順利鉴別。', 'Operating Rooms': '作業房間', 'Operation': '作業', 'Optional link to an Incident which this Assessment was triggered by.': '選用鏈結到此事件評估所觸發。', 'Optional': '選用', 'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': '選用。 如果您想要的樣式功能為基礎的值屬性,請選取屬性以使用在這裡。', 'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '選用。 在GeoServer,這是工作區名稱空間URI (名稱不!)。在WFS getCapabilities,這是FeatureType名稱部分之前,冒號(:)。', 'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '選用。 在GeoServer,這是工作區名稱空間URI。 在WFS getCapabilities,這是FeatureType名稱組件之前的冒號(:)。', 'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': '選用。 名稱元素的內容應該是一个URL的映像檔放入蹦現畫面。', 'Optional. The name of an element whose contents should be put into Popups.': '選用。 名稱元素的內容應該進入蹦現畫面。', 'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': '選用。 綱目的名稱。 在Geoserver這个格式為http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name。', 'Options': '選項', 'Organization Details': '組織明細', 'Organization Registry': '組織登錄', 'Organization added': '新增組織', 'Organization deleted': '刪除組織', 'Organization updated': '更新組織', 'Organization': '組織', 'Organizations': '組織', 'Origin of the separated children': '原始的分隔的子項', 'Origin': '源點', 'Other (describe)': '其他(說明)', 'Other (specify)': '其他(請說明)', 'Other Evidence': '其他證据', 'Other Faucet/Piped Water': '其他Faucet/管道式臨界值', 'Other Isolation': '其他隔離', 'Other Name': '其他名稱', 'Other activities of boys 13-17yrs before disaster': '其他活動的提升13 17yrs之前災難', 'Other activities of boys 13-17yrs': '其他活動的男女13-17yrs', 'Other activities of boys <12yrs before disaster': '其他活動的男孩<12yrs之前災難', 'Other activities of boys <12yrs': '其他活動的<12yrs男女', 'Other activities of girls 13-17yrs before disaster': '其他活動的女孩13 17yrs之前災難', 'Other activities of girls 13-17yrs': '其他活動的女孩13-17yrs', 'Other activities of girls<12yrs before disaster': '其他活動的girls<12yrs之前災難', 'Other activities of girls<12yrs': '其他活動的girls<12yrs', 'Other alternative infant nutrition in use': '其他使用中的嬰兒營養品', 'Other alternative places for study': '其他研究區', 'Other assistance needed': '其他需要的恊助', 'Other assistance, Rank': '其他协助,等級', 'Other current health problems, adults': '其他目前健康問題,成人', 'Other current health problems, children': '其他目前健康問題,小孩', 'Other events': '其他事件', 'Other factors affecting school attendance': '其他因素影响學校与會者', 'Other major expenses': '其他主要費用', 'Other non-food items': '其他非食品項目', 'Other recommendations': '其他建議', 'Other residential': '其他居住地', 'Other school assistance received': '其他协助學校接收', 'Other school assistance, details': '其他协助學校,詳細資料', 'Other school assistance, source': '其他协助學校,來源', 'Other settings can only be set by editing a file on the server': '其他設定才能設定來編輯一个伺服器上的檔案', 'Other side dishes in stock': '其他端餐盤庫存', 'Other types of water storage containers': '其他類型的臨界值儲存体儲存區', 'Other ways to obtain food': '其他方式來取得食品', 'Other': '其他', 'Outbound Mail settings are configured in models/000_config.py.': '外寄郵件設定中配置模型/000_config.. py。', 'Outbox': '寄件匣', 'Outgoing SMS Handler': 'SMS送出的處理常式', 'Outgoing SMS handler': 'SMS送出的處理常式', 'Overall Hazards': '整体危害', 'Overhead falling hazard': '額外落在危害', 'Overland Flow Flood': '歐弗蘭流程水災', 'Overlays': '重疊', 'Owned Resources': '擁有的資源', 'PF Number': 'PF號碼', 'PIN number': '密碼', 'PIN': '密碼', 'PL Women': 'PL/I婦女', 'Pack': '包', 'Packs': '套件', 'Pan Map: keep the left mouse button pressed and drag the map': '平移對映:請將左滑鼠按鈕并拖曳的對映', 'Parameters': '參數', 'Parent Office': '母項辦公室', 'Parent needs to be of the correct level': '母項必须是正確的層次', 'Parent needs to be set for locations of level': '母項需要設定的位置的層次', 'Parent needs to be set': '母項需要設定', 'Parent': '母項', 'Parents/Caregivers missing children': '母項/Caregivers遺漏子項', 'Partial Database Synchronization': '部分資料庫同步化', 'Partial': '局部', 'Participant': '參與者', 'Partner Details': '夥伴詳細資料', 'Partner added': '新增伙伴', 'Partner deleted': '已刪除夥伴', 'Partner updated': '已更新夥伴', 'Partners': '夥伴', 'Pashto': '普什圖文', 'Pass': '通過 (pass)', 'Passport': '護照', 'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': '鉴別的密碼在同層級。 注意,只支援HTTP基本鉴別。', 'Password': '密碼', 'Path': '路徑', 'Patients': '病患', 'Peer Details': '同層級詳細資料', 'Peer Registration Details': '同層級註冊詳細資料', 'Peer Registration Request': '同層級註冊申請', 'Peer Registration': '同層級註冊', 'Peer Type': '等式類型', 'Peer UID': '同層級UID', 'Peer added': '新增同層級', 'Peer deleted': '刪除同層級', 'Peer not allowed to push': '同層級不允許推送', 'Peer registration request added': '已加入之同層級註冊申請', 'Peer registration request deleted': '已刪除之同層級註冊申請', 'Peer registration request updated': '已更新之同層級註冊申請', 'Peer updated': '更新同層級', 'Peer': '對等', 'Peers': '同層級', 'Pending Requests': '擱置要求', 'Pending': '擱置中', 'People Needing Food': '人員需要食品', 'People Needing Shelter': '人員需要Shelter', 'People Needing Water': '人員需要水', 'People Trapped': '人員再遷就', 'People with chronical illnesses': '与人員chronical疾病', 'People': '個人', 'Performance Rating': '效能等級', 'Person 1': '人員 1', 'Person 1, Person 2 are the potentially duplicate records': '一人,人員二是潛在的重复記錄', 'Person 2': '人員 2', 'Person Data': '人員資料', 'Person De-duplicator': 'DE人員-duplicator', 'Person Details': '人員明細', 'Person Finder': '人員搜尋器', 'Person Registry': '人員登錄', 'Person added to Group': '群組成員已新增', 'Person added to Team': '群組成員已新增', 'Person added': '已新增人員', 'Person deleted': '人員刪除', 'Person details updated': '人員詳細資料更新', 'Person found': '找到人員', 'Person interviewed': '人員受訪', 'Person missing': '遺漏人員', 'Person reporting': '人員報告', 'Person who has actually seen the person/group.': '人員實際上就是人員/群組。', 'Person who is reporting about the presence.': '人員報告的参与。', 'Person who observed the presence (if different from reporter).': '觀察人員的狀態(如果不同報告)。', 'Person': '聯絡人', 'Person/Group': '人員/群組', 'Personal Data': '個人資料', 'Personal Effects Details': '个人效果詳細資料', 'Personal Effects': '个人效果', 'Personal Map': '个人對映', 'Personal Profile': '個人設定檔', 'Personal impact of disaster': '个人的影响災難', 'Persons in institutions': '人員在機构', 'Persons per Dwelling': '每个人員住宅', 'Persons with disability (mental)': '与人員殘障人士(內部)', 'Persons with disability (physical)': '与人員殘障人士(實体)', 'Persons': '人員', 'Phone 1': '電話一', 'Phone 2': '電話二', 'Phone': '電話', 'Phone/Business': '電話/商業', 'Phone/Emergency': '電話/緊急', 'Phone/Exchange (Switchboard)': '電話/交換(switchboard)', 'Phone/Exchange': '電話/交換', 'Photo Details': '照片詳細資料', 'Photo Taken?': '照片採取?', 'Photo added': '新增照片', 'Photo deleted': '刪除照片', 'Photo updated': '更新照片', 'Photo': '照片', 'Photograph': '照片', 'Photos': '照片', 'Physical Description': '實體說明', 'Physical Safety': '實体安全', 'Physical': '實體', 'Picture upload and finger print upload facility': '圖像上传和指紋上載機能', 'Picture': '圖片', 'Place for solid waste disposal': '位置的實心廢棄物', 'Place of Recovery': '位置的回复', 'Place': '街 (Place)', 'Places for defecation': '工作區的defecation', 'Places the children have been sent to': '工作區的子項已传送至', 'Planning': '規劃', 'Playing': '播放', 'Please correct all errors.': '請更正所有錯誤。', 'Please enter a First Name': '請輸入名字', 'Please enter a Google Key if you wish to use Google Layers': '如果你想使用谷歌層,請輸入谷歌的關鍵鑰', 'Please enter a Yahoo Key if you wish to use Yahoo Layers': '如果你想使用雅虎層,請輸入雅虎的關鍵鑰', 'Please enter a first name': '請輸入名字', 'Please enter a site OR a location': '請輸入一个站點或一个位置', 'Please enter a valid email address': '請輸入一個有效的電子郵件地址', 'Please enter the first few letters of the Person/Group for the autocomplete.': '請輸入的前幾个字母的人員/群組使用。', 'Please enter the recipient': '請輸入收件者', 'Please fill this!': '請填寫這个!', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': '請提供URL的頁面時,您正在参照說明的您的預期發生,和什么實際發生。 如果一个單發出,則請提供的摘記卷ID。', 'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': '請提供URL的頁面時,您正在参照說明的您的預期發生,和什么實際發生。', 'Please report here where you are:': '請報告此位置您:', 'Please select another level': '請選取另一个層次', 'Please select': '請選取', 'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '請註冊您的行動電話,因為這可讓我們向您传送的文字訊息。 請包括完整區域碼。', 'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '請指定任何問題与障碍的適當處理的疾病,詳細的(號碼,適用的話)。 您也可以新增建議的狀湟可能改善。', 'Please use this field to record any additional information, including a history of the record if it is updated.': '請使用這个欄位來記錄任何其他資訊,包括一个歷史記錄的如果已更新。', 'Please use this field to record any additional information, including any Special Needs.': '請使用這个欄位來記錄任何其他資訊,包括任何特殊需求。', 'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': '請使用這个欄位來記錄任何其他資訊,例如Ushahidi實例ID。 包含歷史記錄的如果已更新。', 'Pledge Aid to match these Requests': '抵押輔助以符合這些要求', 'Pledge Aid': '抵押輔助', 'Pledge Status': '抵押狀態', 'Pledge Support': '抵押支援', 'Pledge': '選擇權質押', 'Pledged': '抵押', 'Pledges': '抵押', 'Poisonous Gas': '有毒瓦斯', 'Police': '保單', 'Policy': '政策', 'Pollution and other environmental': '污染和其他環境', 'Polygon reference of the rating unit': '多邊形参照的等級單元', 'Polygon': '多邊形', 'Poor': '差', 'Population Statistic Details': '移入统計資料明細', 'Population Statistic added': '人口统計資料新增', 'Population Statistic deleted': '人口统計資料刪除', 'Population Statistic updated': '人口统計資料更新', 'Population Statistics': '人口统計', 'Population and number of households': '人口與戶數', 'Population': '人口', 'Popup Fields': '蹦現欄位', 'Popup Label': '蹦現標籤', 'Porridge': '稀飯', 'Port Closure': '埠關閉', 'Port': '埠', 'Position Catalog': '位置型錄', 'Position Details': '位置詳細資料', 'Position added': '新增位置', 'Position deleted': '刪除位置', 'Position type': '位置類型', 'Position updated': '更新位置', 'Position': '位置', 'Positions': '職位', 'Postcode': 'postcode', 'Poultry restocking, Rank': 'Poultry簽有,等級', 'Pounds': '英鎊', 'Power Failure': '電源故障', 'Powered by Sahana Eden': '採用Sahana Eden', 'Pre-cast connections': '預先强制轉型連线', 'Preferred Name': '暱稱', 'Pregnant women': 'Pregnant婦女', 'Preliminary': '初步的', 'Presence Condition': '存在條件', 'Presence Log': '存在日誌', 'Presence': '存在', 'Previous View': '前一頁', 'Previous': '前一頁(P)', 'Primary Name': '主要名稱', 'Primary Occupancy': '主要佔用', 'Priority Level': '優先順序層次', 'Priority from 1 to 9. 1 is most preferred.': '优先順序從一到九。 一是最偏好。', 'Priority': '優先順序', 'Private': '專用', 'Problem Administration': '問題管理', 'Problem Details': '問題明細', 'Problem Group': '問題群組', 'Problem Title': '問題標題', 'Problem added': '新增問題', 'Problem connecting to twitter.com - please refresh': 'problem connecting to twitter.com-請重新整理', 'Problem deleted': '問題已刪除', 'Problem updated': '問題已更新', 'Problem': '問題', 'Problems': '問題', 'Procedure': '程序', 'Process Received Shipment': '程序接收到出貨', 'Process Shipment to Send': '出貨程序來传送', 'Procurements': '採購', 'Product Description': '產品說明', 'Product Name': '產品名稱', 'Profile updated': '更新設定檔', 'Profile': '設定檔', 'Project Activities': '專案活動', 'Project Details': '專案詳細資料', 'Project Management': '專案管理', 'Project Status': '項目狀態', 'Project Tracking': '項目追蹤', 'Project added': '新增專案', 'Project deleted': '已刪除專案', 'Project has no Lat/Lon': '專案沒有LAT/長', 'Project updated': '項目更新', 'Project': '專案 (project)', 'Projection Details': '預測明細', 'Projection added': '新增投射', 'Projection deleted': '投射刪除', 'Projection updated': '預測更新', 'Projection': '投射', 'Projections': '預測', 'Projects': '項目', 'Property reference in the council system': '議會制度中的物業參考', 'Protected resource': '受保護的資源', 'Protection': '保護', 'Provide Metadata for your media files': '提供meta資料的媒体檔案', 'Provide a password': '提供一个密碼。', 'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '提供一个可選的草稿的整个建筑物或損壞點。 指出損壞點。', 'Province': '省', 'Proxy-server': 'Proxy伺服器', 'Psychiatrics/Adult': 'Psychiatrics/Adult.txt', 'Public Event': '公用事件', 'Public and private transportation': '公共和私有運輸', 'Public assembly': '公共組件', 'Public': '公用', 'Pull tickets from external feed': '拉出單從外部源', 'Punjabi': '旁遮普文', 'Purchase Date': '採購日期', 'Push tickets to external system': '推送tickets to外部系统', 'Put a choice in the box': '放置選擇框中', 'Pyroclastic Flow': 'Pyroclastic流程', 'Pyroclastic Surge': 'Pyroclastic突波', 'Python Serial module not available within the running Python - this needs installing to activate the Modem': '序列模組Python內無法使用執行中的Python-這需要安裝到啟動數据機', 'Python needs the ReportLab module installed for PDF export': 'ReportLab模組內無法使用執行中的Python-這需要安裝PDF輸出!', 'Quantity Committed': '確定數量', 'Quantity Fulfilled': '履行數量', 'Quantity in Transit': '在途數量', 'Quantity': '數量', 'Quarantine': '隔離', 'Queries': '查詢', 'Query Feature': '查詢功能', 'Query': '查詢 (query)', 'Queryable?': '查詢?', 'RC frame with masonry infill': 'rc masonry infill与框架', 'RECORD A': '記錄 A', 'RECORD B': '記錄B', 'RESPONSE': '回應', 'RPC Service URL': 'RPC服務URL', 'Race': '競爭', 'Radio Callsign': '電台呼號', 'Radiological Hazard': 'Radiological危害', 'Railway Accident': '鐵路事故', 'Railway Hijacking': '鐵路强制存取', 'Rain Fall': '落在雨', 'Rapid Assessment Details': '快速評估詳細資料', 'Rapid Assessment added': '快速新增評量', 'Rapid Assessment deleted': '快速評估刪除', 'Rapid Assessment updated': '快速更新評量', 'Rapid Assessment': 'Rapid評量', 'Rapid Assessments & Flexible Impact Assessments': '快速評估彈性与影响評量', 'Rapid Assessments': 'Rapid評量', 'Rapid Close Lead': '快速關閉商機', 'Rapid Data Entry': '快速數據輸入', 'Rating Scale': '評分', 'Raw Database access': '原始資料庫存取', 'Read-Only': '唯讀', 'Read-only': '唯讀', 'Real World Arbitrary Units': '實際單位任意', 'Receive Items': '接收項目', 'Receive New Shipment': '接收新出貨', 'Receive Shipment': '接收貨物', 'Receive this shipment?': '接收此出貨?', 'Receive': '接收', 'Received By Person': '接收人', 'Received By': '接收', 'Received Item Details': '接收項目詳細資料', 'Received Item deleted': '接收項目刪除', 'Received Item updated': '接收更新項目', 'Received Shipment Details': '收到出貨詳細資料', 'Received Shipment canceled and items removed from Inventory': '接收貨物取消和項目從庫存移除', 'Received Shipment canceled': '接收貨物取消', 'Received Shipment updated': '接收貨物更新', 'Received Shipments': '收到出貨', 'Received': '已接收', 'Receiving and Sending Items': '接收和發送項目', 'Recipient': '收件者', 'Recipients': '收件人', 'Recommendations for Repair and Reconstruction or Demolition': '維修和重建或拆除的建議', 'Record %(id)s created': '記錄百分比 %(id)s 建立', 'Record %(id)s updated': '記錄百分比 %(id)s 更新', 'Record Details': '記錄詳細資料', 'Record ID': '記錄 ID', 'Record Saved': '儲存記錄', 'Record added': '已新增記錄', 'Record any restriction on use or entry': '記錄任何限制使用或項目', 'Record deleted': '刪除記錄', 'Record last updated': '記錄前次更新', 'Record not found!': '記錄未找到!', 'Record not found': '找不到記錄', 'Record updated': '更新記錄', 'Record': '記錄', 'Recording and Assigning Assets': '錄制及指派資產', 'Records': '記錄', 'Recovery Reports': '回復報告', 'Recovery Request added': '新增回复要求', 'Recovery Request deleted': '回复刪除要求', 'Recovery Request updated': '回复要求更新', 'Recovery Request': '回復要求', 'Recovery Requests': '回复要求', 'Recovery report added': '复原報告新增', 'Recovery report deleted': '复原報告刪除', 'Recovery report updated': '回复更新報告', 'Recovery': '回復', 'Recurring Cost': '循環成本', 'Recurring cost': '循環成本', 'Recurring costs': '循環成本', 'Recurring': '重複出現', 'Red Cross / Red Crescent': '紅色的叉號Crescent /紅', 'Red': '紅色', 'Reference Document': '参考文件', 'Refers to default syncronization policy adopted if data entry recieved from other machine is already present in your machine.': '参照預設同步化原則採用如果資料項目接收從其他機器中已存在您的機器。', 'Refresh Rate (seconds)': '更新頻率(秒)', 'Region Location': '區域位置', 'Region': '區域', 'Regional': '地區', 'Regions': '地區', 'Register Person into this Camp': '登錄人員到這个Camp', 'Register Person into this Shelter': '登錄人員到這个Shelter', 'Register Person': '登錄人員', 'Register them as a volunteer': '登錄它們作為一个主動', 'Register': '註冊', 'Registered People': '註冊的人員', 'Registered users can': '註冊使用者可以', 'Registering ad-hoc volunteers willing to contribute': '註冊特定参与者愿意提供', 'Registration Details': '註冊詳細資料', 'Registration added': '新增登錄', 'Registration entry deleted': '刪除登錄項目', 'Registration is pending approval': '申請等候核准中', 'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '登錄仍在擱置核准從核准者 (%s) -請稍候直到收到確認。', 'Registration key': '登錄索引鍵', 'Registration successful': '登錄成功', 'Registration updated': '更新登錄', 'Registration': '登錄', 'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '登錄追蹤所有的組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊范圍的專案會提供每一个區域。', 'Rehabilitation/Long Term Care': '复健/長期照護', 'Reinforced masonry': 'masonry强化', 'Rejected': '已拒絕', 'Reliable access to sanitation/hygiene items': '可靠地存取設施/hygiene項目', 'Relief Item Catalog': '浮雕項目型錄', 'Relief Item Details': '浮雕項目詳細資料', 'Relief Item': '浮雕項目', 'Relief Items stored in Inventories in different locations': '項目是儲存在位于不同地點的庫存', 'Relief Items': '浮雕項目', 'Relief Team': '救難隊', 'Relief': '浮雕', 'Religion': '宗教', 'Religious Leader': '宗教領導者', 'Religious': '教歷', 'Relocate as instructed in the <instruction>': '重新定位為中的指示<instruction>', 'Remove Activity from this event': '從這個事件中移除活動', 'Remove Asset from this event': '移除資產從這个事件', 'Remove Asset from this scenario': '移除資產從這个實務', 'Remove Document from this request': '從這個需求中移除文件', 'Remove Facility from this event': '移除機能從這个事件', 'Remove Facility from this scenario': '移除機能從這个實務', 'Remove Feature: Select the feature you wish to remove & press the delete key': '移除特性:選取的功能時,您要移除按下"刪除"鍵', 'Remove Human Resource from this event': '移除人力資源從這个事件', 'Remove Human Resource from this scenario': '移除人力資源從這个實務', 'Remove Item from Inventory': '從庫存移除項目', 'Remove Map Configuration from this event': '移除對映配置從這个事件', 'Remove Map Configuration from this scenario': '移除對映配置從這个實務', 'Remove Person from Group': '刪除組員', 'Remove Person from Team': '刪除組員', 'Remove Skill from Request': '從需求中移除技能', 'Remove Skill': '移除技能', 'Remove Task from this event': '從事件中移除任務', 'Remove Task from this scenario': '從情境中移除此任務', 'Remove this asset from this event': '移除這个資產從這个事件', 'Remove this asset from this scenario': '移除這个資產從這个實務', 'Remove this facility from this event': '從這個活動中移除此設備', 'Remove this facility from this scenario': '從這個情境中移除此設備', 'Remove this human resource from this event': '從這個活動中移除此人力資源', 'Remove this human resource from this scenario': '從這個情境中移除此人力資源', 'Remove this task from this event': '從這個活動中移除此任務', 'Remove this task from this scenario': '從這個情境中移除此任務', 'Remove': '移除', 'Removed from Group': '組員已刪除', 'Removed from Team': '組員已刪除', 'Repair': '修復', 'Repaired': '修复', 'Repeat your password': '重复您的密碼', 'Replace All': '全部取代', 'Replace if Master': '如果主要取代', 'Replace if Newer': '若較新,則取代', 'Replace with Remote': '以遠端取代', 'Replace': '取代', 'Replace/Master': '取代/主要', 'Replace/Newer': '取代/更新', 'Report Another Assessment...': '報告另一個評估...', 'Report Details': '報告詳細資料', 'Report Resource': '報告資源', 'Report Type': '報告類型', 'Report Types Include': '報告類型包括', 'Report a Problem with the Software': '回報軟體問題', 'Report added': '已新增報告', 'Report deleted': '已刪除報告', 'Report my location': '報告我的位置', 'Report that person missing': '報告的人員遺漏', 'Report the contributing factors for the current EMS status.': '報告的附加因素的現行EMS狀態。', 'Report the contributing factors for the current OR status.': '報告的附加因素的現行或狀態。', 'Report the person as found': '報告的人員發現', 'Report them as found': '它們報告發現', 'Report them missing': '它們報告遺漏', 'Report updated': '報告已更新', 'Report': '報告', 'Reported By': '報告者', 'Reporter Name': '報告名稱', 'Reporter': '報告者', 'Reporter:': '報告:', 'Reporting on the projects in the region': '報告中的專案區域', 'Reports': '報告', 'Request Added': '新增要求', 'Request Aid': '輔助請求', 'Request Canceled': '已取消申請', 'Request Detail': '要求詳細資料', 'Request Details': '要求的詳細資料', 'Request From': '要求來源', 'Request Item Details': '要求項目詳細資料', 'Request Item added': '要求新增項目', 'Request Item deleted': '要求刪除項目', 'Request Item from Available Inventory': '要求項目從可用庫存', 'Request Item updated': '要求更新項目', 'Request Item': '申請項目', 'Request Items': '申請項目', 'Request Status': '要求狀態', 'Request Type': '要求類型', 'Request Updated': '要求已更新項目', 'Request added': '新增要求', 'Request deleted': '已刪除要求', 'Request for Role Upgrade': '請求的角色升級', 'Request updated': '要求已更新項目', 'Request': '要求', 'Request, Response & Session': '要求,回應及階段作業', 'Requested By Facility': '所要求的機能', 'Requested By Site': '所要求的網站', 'Requested By Warehouse': '所要求的倉儲', 'Requested By': '申請者', 'Requested From': '要求從', 'Requested Items': '所要求的項目', 'Requested Skill Details': '所需技能細節', 'Requested Skill updated': '所需技能更新', 'Requested Skill': '所需技能', 'Requested Skills': '所需技能', 'Requested by': '要求者', 'Requested on': '要求上', 'Requested': '已要求', 'Requester': '要求者', 'Requestor': '要求者', 'Requests Management': '要求管理', 'Requests for Item': '要求的項目', 'Requests': '需求', 'Required Skill': '所需技能', 'Required by other servers.': '所需的其他伺服器。', 'Requires Login!': '需要登入!', 'Requires login': '需要登入', 'Rescue and recovery': '應急與恢復系統 (Rescue and Recovery)', 'Reset Password': '重設密碼', 'Reset form': '重設表單', 'Reset': '重設', 'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': '調整功能:選擇您希望調整的功能,然後拖動相關的點到你想要的大小', 'Resolve Conflict': '解決衝突', 'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '解決鏈接啟動一個新的畫面,有助於解決這些重複記錄和更新數據庫', 'Resolve': '解決', 'Resource Details': '資源詳細資料', 'Resource added': '新增資源', 'Resource deleted': '資源已刪除', 'Resource updated': '資源已更新', 'Resource': '資源', 'Resources': '資源', 'Respiratory Infections': '呼吸感染', 'Response Details': '回應明細', 'Response added': '新增回應', 'Response deleted': '刪除回應', 'Response updated': '回應已更新', 'Response': '回應', 'Responses': '回應', 'Restricted Access': '受限存取權', 'Restricted Use': '使用限制', 'Restrictions': '限制', 'Results': '結果', 'Resume Sync': '恢复同步', 'Retail Crime': '零售犯罪', 'Retrieve Password': '擷取密碼', 'Return to Request': '回到要求', 'Return': '返回', 'Returned From': '传回從', 'Returned': '已返回', 'Review Incoming Shipment to Receive': '檢閱送入出貨以接收', 'Rice': '每', 'Right now, your system is set default synchronization scheme. You are currently able to synchronize your server with other servers.': '現在,您的系统是設定預設配置同步化。 您目前無法同步化您的伺服器与其他伺服器。', 'Right-hand headline': '右手標題', 'Right-to-Left': '由右至左', 'Riot': '暴動', 'River Details': '金水河詳細資料', 'River added': '金水河新增', 'River deleted': '金水河刪除', 'River updated': '金水河更新', 'River': '金水河', 'Rivers': '河流', 'Road Accident': '道路事故', 'Road Closed': '道路關閉', 'Road Conditions': '道路條件', 'Road Delay': '道路延遲', 'Road Hijacking': '道路强制存取', 'Road Usage Condition': '道路使用條件', 'Role Details': '角色詳細資料', 'Role Required': '需要角色', 'Role Updated': '更新角色', 'Role added': '已新增角色', 'Role deleted': '已刪除角色', 'Role updated': '更新角色', 'Role': '角色', 'Role-based': '角色-基礎', 'Roles Permitted': '角色允許', 'Roles': '角色', 'Roof tile': '并排安設', 'Roofs, floors (vertical load)': '体型,地板(垂直載入)', 'Room Details': '教室詳細資料', 'Room added': '新增室', 'Room deleted': '教室已刪除', 'Room updated': '更新室', 'Rooms': '會議室', 'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': '旋轉功能:選取的功能時,您要旋轉和,然后拖曳相關的點旋轉為您所需的位置', 'Row Choices (One Per Line)': '列選項一(每行)', 'Rows in table': '表格中的橫列', 'Rows selected': '已選取的列數', 'Run Functional Tests': '執行功能測試', 'Run Interval': '執行間隔', 'Running Cost': '執行成本', 'Russian': '俄國人', 'SITUATION': '狀況', 'Safe environment for vulnerable groups': '安全環境的漏洞群組', 'Safety Assessment Form': '安全評量表單', 'Safety of children and women affected by disaster': '安全的子項和婦女影响災難', 'Safety of children and women affected by disaster?': '安全的子項和婦女影响災難?', 'Sahana Administrator': 'Sahana管理者', 'Sahana Blue': 'Sahana藍色', 'Sahana Community Chat': 'Sahana 社群聊天室', 'Sahana Eden <= Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=其他同步化(Sahana Agasti, Ushahidi,等等。 )', 'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=>其他(Sahana Agasti, Ushahidi,等等。 )', 'Sahana Eden <=> Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=>其他同步化(Sahana Agasti, Ushahidi,等等。 )', 'Sahana Eden <=> Other': 'Sahana Eden <=>其他', 'Sahana Eden <=> Sahana Eden sync': 'Sahana Eden <=> Sahana Eden同步', 'Sahana Eden Disaster Management Platform': 'Sahana Eden災難管理平台', 'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Humanitarian管理平台', 'Sahana Eden Website': 'Sahana Eden 網站', 'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management. The following modules are available': 'Sahana Eden 是一套救災管理網站系統,可協助救援單位進行災難管理的分工合作。 下列模組可用', 'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.': 'Sahana Eden 是一套救災管理網站系統,可協助救援單位進行災難管理的分工合作。', 'Sahana FOSS Disaster Management System': 'Sahana FOSS災難管理系统', 'Sahana Green': 'Sahana綠色', 'Sahana Login Approval Pending': 'Sahana登入擱置核准', 'Sahana Steel': 'Sahana鋼', 'Sahana access granted': 'Sahana授予的存取權', 'Sahana has to hook to a network port other than port being used by website (normally port 80). If your firewall blocks this port you have change it to any other free port. For information on eligible ports, see': 'Sahana已連結至一个網路埠以外的端口正在使用網站(通常是埠80)。 如果您的防火牆區塊這个埠就將它變更為任何其他可用的埠。 資格的相關資訊連接埠,請参閱', 'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana:新的請求已完成。 請登入以查看您是否可以滿足要求。', 'Salted Fish': '隨機fish', 'Salvage material usable from destroyed houses': '援救物料可從毀損安置', 'Salvage material usable from destroyed schools': '援救物料可從毀損學校', 'Sanitation problems': '消毒問題', 'Satellite Layer': '衛星層', 'Satellite Office': '卫星辦公室', 'Satellite': '衛星', 'Saturday': '星期六', 'Save any Changes in the one you wish to keep': '儲存任何變更在一个您要保留', 'Save': '儲存', 'Save: Default Lat, Lon & Zoom for the Viewport': '儲存:預設平面,縮放(完整的視角的', 'Saved.': '已儲存。', 'Saving...': '正在儲存...', 'Scale of Results': '小數位數的結果', 'Scanned File': '掃描檔案', 'Scenario Details': '實務詳細資料', 'Scenario added': '新增實務', 'Scenario deleted': '刪除情境', 'Scenario updated': '更新情境', 'Scenario': '實務', 'Scenarios': '實務', 'Schedule': '排程', 'Schema': '綱目', 'School Closure': '學校關閉', 'School Code': '學校代碼', 'School District Details': '學校特區詳細資料', 'School District added': '學校特區新增', 'School District deleted': '學校特區刪除', 'School District updated': '學校特區更新', 'School District': '學校特區', 'School Districts': '學校行政區', 'School Lockdown': '學校鎖定', 'School Report Details': '學校報告詳細資料', 'School Report added': '學校報告新增', 'School Report deleted': '學校報告刪除', 'School Report updated': '學校報告更新', 'School Reports will be moved to Shelter Registry as this is what they are. Rapid Assessments will be added here.': '學校將報告移動到Shelter登錄,因為這是它們。 快速評估將在此新增。', 'School Reports': '學校報告', 'School Teacher': '學校老師', 'School activities': '學校活動', 'School assistance received/expected': '學校协助/預期接收', 'School assistance': '學校协助', 'School attendance': '學校与會者', 'School destroyed': '學校損毀', 'School heavily damaged': '學校大量損壞', 'School tents received': '學校內容接收', 'School tents, source': '學校內容,來源', 'School used for other purpose': '學校用于其他用途', 'School': '學校', 'School/studying': '學校/研究', 'Schools': '學校', 'Search & List Bin Types': '搜尋清單bin類型', 'Search & List Bins': '搜尋貯存箱清單(&L)', 'Search & List Catalog': '搜尋型錄清單(&L)', 'Search & List Category': '搜尋種類清單(&L)', 'Search & List Items': '搜尋項目清單(&L)', 'Search & List Locations': '搜尋位置清單(&L)', 'Search & List Site': '搜尋網站清單(&L)', 'Search & List Sub-Category': '搜尋子類別清單', 'Search & List Unit': '搜尋和列舉單元', 'Search Activities': '搜尋活動', 'Search Activity Report': '搜尋活動報告', 'Search Addresses': '搜尋位址', 'Search Aid Requests': '搜尋輔助要求', 'Search Alternative Items': '搜尋替代項目', 'Search Assessment Summaries': '搜尋評量摘要', 'Search Assessments': '搜尋評量', 'Search Asset Assignments': '搜尋資產分派', 'Search Asset Log': '搜尋資產日誌', 'Search Assets': '搜尋資產', 'Search Baseline Type': '搜尋基準线類型', 'Search Baselines': '搜尋基準线', 'Search Brands': '搜尋品牌', 'Search Budgets': '搜尋預算', 'Search Bundles': '搜尋軟体組', 'Search Camp Services': 'Camp搜尋服務', 'Search Camp Types': 'Camp搜尋類型', 'Search Camps': '搜尋Camps', 'Search Catalog Items': '搜尋型錄項目', 'Search Catalogs': '搜尋型錄', 'Search Category<>Sub-Category<>Catalog Relation': '搜尋Category<>Sub-Category<>Catalog關系', 'Search Certificates': '搜尋凭證', 'Search Certifications': '認證搜尋', 'Search Checklists': '核對搜尋', 'Search Cluster Subsectors': '搜尋叢集Subsectors', 'Search Clusters': '搜尋叢集', 'Search Commitment Items': '搜尋項目承諾', 'Search Commitments': '搜尋Commitments', 'Search Competencies': '搜尋能力', 'Search Competency Ratings': '搜尋能力等級', 'Search Configs': '搜尋配置', 'Search Contact Information': '搜尋聯絡資訊', 'Search Contacts': '搜尋聯絡人', 'Search Course Certicates': '搜尋進程凭證', 'Search Courses': '搜尋課程', 'Search Credentials': '認證搜尋', 'Search Distribution Items': '搜尋項目分配', 'Search Distributions': '搜尋配送', 'Search Documents': '搜尋文件', 'Search Donors': '搜尋Donors', 'Search Entries': '搜尋項目', 'Search Events': '搜尋事件', 'Search Facilities': '搜尋機能', 'Search Feature Layers': '搜尋功能層', 'Search Flood Reports': '搜尋水災報告', 'Search Geonames': '搜尋GeoNames', 'Search Groups': '搜尋群組', 'Search Homes': '搜尋家庭', 'Search Hospitals': '搜尋醫院', 'Search Human Resources': '搜尋人力資源', 'Search Identity': '搜尋身分', 'Search Images': '搜尋影像', 'Search Impact Type': '搜尋影响類型', 'Search Impacts': '搜尋影响', 'Search Import Files': '搜尋匯入檔案', 'Search Incident Reports': '搜尋事件報告', 'Search Incidents': '搜尋事件', 'Search Inventory Items': '搜尋庫存項目', 'Search Inventory Stores': '儲存搜尋庫存', 'Search Inventory items': '搜尋庫存項目', 'Search Item Catalog Category(s)': '搜尋項目型錄分類', 'Search Item Catalog(s)': '搜尋項目目錄', 'Search Item Categories': '搜尋項目類別', 'Search Item Packs': '搜尋項目套件', 'Search Item Sub-Category(s)': '搜尋項目子類別(S)', 'Search Items': '搜尋項目', 'Search Job Roles': '搜尋工作角色', 'Search Keys': '搜尋關鍵字', 'Search Kits': '搜尋套件', 'Search Landmarks': '搜尋里程碑', 'Search Layers': '搜尋層', 'Search Level 1 Assessments': '搜尋層次一評量', 'Search Level 2 Assessments': '搜尋層次二評量', 'Search Level': '搜尋層級', 'Search Locations': '搜尋位置', 'Search Log Entry': '搜尋日誌項目', 'Search Map Configurations': '搜尋對映配置', 'Search Markers': '搜尋標記', 'Search Members': '搜尋成員', 'Search Membership': '搜尋成員資格', 'Search Memberships': '搜尋成員資格', 'Search Metadata': '搜尋meta資料', 'Search Missions': '搜尋任務', 'Search Need Type': '搜尋需要類型', 'Search Needs': '搜尋需求', 'Search Notes': '搜尋Notes', 'Search Offices': '搜尋辦公室', 'Search Organizations': '搜尋組織', 'Search Partners': '搜尋伙伴', 'Search Patients': '查詢病人', 'Search Peer': '搜尋同層級', 'Search Peers': '搜尋對等', 'Search Personal Effects': '搜尋个人效果', 'Search Persons': '搜尋人員', 'Search Photos': '搜尋照片', 'Search Population Statistics': '搜尋人口统計資料', 'Search Positions': '搜尋位置', 'Search Problems': '搜尋問題', 'Search Projections': '搜尋估算', 'Search Projects': '搜尋專案', 'Search Rapid Assessments': '快速搜尋評量', 'Search Received Items': '搜尋接收項目', 'Search Received Shipments': '搜尋收到出貨', 'Search Records': '搜尋記錄', 'Search Recovery Reports': '搜尋回复報告', 'Search Registations': '搜尋Registations', 'Search Registration Request': '搜尋登錄要求', 'Search Report': '搜尋報告', 'Search Reports': '搜尋報告', 'Search Request Items': '搜尋要求項目', 'Search Request': '搜尋要求', 'Search Requested Items': '搜尋所要求的項目', 'Search Requested Skills': '查詢要求技能', 'Search Requests': '搜尋需求', 'Search Resources': '搜尋資源', 'Search Responses': '搜尋回應', 'Search Rivers': '搜尋Rivers', 'Search Roles': '搜尋角色', 'Search Rooms': '搜尋檔案室', 'Search Scenarios': '搜尋實務', 'Search School Districts': '搜尋學校行政區', 'Search School Reports': '搜尋學校報告', 'Search Sections': '搜尋區段', 'Search Sectors': '搜尋煽形', 'Search Sent Items': '传送搜尋項目', 'Search Sent Shipments': '传送搜尋出貨', 'Search Service Profiles': '搜尋服務設定檔', 'Search Settings': '搜尋設定', 'Search Shelter Services': '搜尋Shelter服務', 'Search Shelter Types': '搜尋Shelter類型', 'Search Shelters': '搜尋Shelters', 'Search Shipment Transit Logs': '搜尋出貨传輸日誌', 'Search Shipment/Way Bills': '出貨/搜尋方式清單', 'Search Shipment<>Item Relation': '搜尋Shipment<>Item關系', 'Search Site(s)': '搜尋(S)', 'Search Skill Equivalences': '搜尋技能同等', 'Search Skill Provisions': '搜尋技能條款', 'Search Skill Type': '搜尋技能類型', 'Search Skill Types': '搜尋技能類型', 'Search Skill': '搜尋技能', 'Search Skills': '搜尋技能', 'Search Solutions': '搜尋解決方案', 'Search Sources': '搜尋來源', 'Search Staff Types': '搜尋人員類型', 'Search Staff or Volunteer': '搜尋人員或主動参与者', 'Search Staff': '搜尋人員', 'Search Status': '搜尋狀態', 'Search Storage Bin Type(s)': '搜尋儲存bin類型(S)', 'Search Storage Bin(s)': '搜尋儲存BIN(S)', 'Search Storage Location(s)': '搜尋儲存位置(S)', 'Search Subscriptions': '搜尋訂閱', 'Search Subsectors': '搜尋Subsectors', 'Search Support Requests': '搜尋支援要求', 'Search Tasks': '搜尋作業', 'Search Teams': '搜尋團隊', 'Search Themes': '搜尋主題', 'Search Tickets': '搜尋摘記卷', 'Search Tracks': '搜尋追蹤', 'Search Trainings': '搜尋撰文', 'Search Twitter Tags': '搜尋Twitter標籤', 'Search Units': '搜尋單位', 'Search Updates': '搜尋更新', 'Search Users': '搜尋使用者', 'Search Vehicle Details': '查詢交通工具細節', 'Search Vehicles': '查詢交通工具', 'Search Volunteer Availability': '搜尋自愿可用性', 'Search Volunteer Registrations': '搜尋自愿登錄', 'Search Volunteers': '搜尋志愿者', 'Search Warehouses': '搜尋倉庫', 'Search and Edit Group': '搜尋及編輯群組', 'Search and Edit Individual': '搜尋及編輯个別', 'Search by ID Tag': '搜尋依ID標籤', 'Search for Items': '搜尋項目', 'Search for Staff or Volunteers': '搜尋人員或志愿者', 'Search for a Hospital': '搜尋一个醫院', 'Search for a Location by name, including local names.': '搜尋位置名稱,包括本端名稱。', 'Search for a Location': '搜尋位置', 'Search for a Person': '人員查詢', 'Search for a Project': '搜尋一个專案', 'Search for a Request': '搜尋要求', 'Search for a shipment by looking for text in any field.': '搜尋一个出貨尋找文字中的任何欄位。', 'Search for a shipment received between these dates': '搜尋的貨物接收在這些日期之間', 'Search for a vehicle by text.': '以文字查詢交通工具.', 'Search for an Organization by name or acronym': '搜尋的組織名稱或縮寫', 'Search for an Organization by name or acronym.': '搜尋的組織名稱或縮寫。', 'Search for an asset by text.': '搜尋資產的文字。', 'Search for an item by category.': '搜尋項目類別。', 'Search for an item by Year of Manufacture.': '以製造日期查詢項目.', 'Search for an item by text.': '搜尋項目的文字。', 'Search for asset by country.': '搜尋資產的国家。', 'Search for office by country.': '搜尋的辦公室国家/地區。', 'Search for office by organization.': '搜尋的辦公室組織。', 'Search for office by text.': '搜尋的辦公室文字。', 'Search for warehouse by country.': '搜尋倉儲的国家/地區。', 'Search for warehouse by organization.': '搜尋倉儲的組織。', 'Search for warehouse by text.': '搜尋倉儲的文字。', 'Search here for a person record in order to:': '搜尋這裡的一个个人記錄中,以便:', 'Search messages': '搜尋訊息', 'Search': '搜尋', 'Searching for different groups and individuals': '搜尋不同的群組及个体', 'Secondary Server (Optional)': '次要伺服器(選用)', 'Seconds must be a number between 0 and 60': '秒必须是〇和60之間的數字', 'Seconds must be between 0 and 60': '秒必须在〇和60之間', 'Section Details': '區段詳細資料', 'Section added': '新增區段', 'Section deleted': '刪除區段', 'Section updated': '更新區段', 'Sections': '區段', 'Sector Details': '行業詳細資料', 'Sector added': '新增行業', 'Sector deleted': '刪除磁區', 'Sector updated': '行業更新', 'Sector': '區塊', 'Sector(s)': '磁區(S)', 'Sector(s):': '類別(秒):', 'Sectors': '磁區', 'Security Policy': '安全原則 (security policy)', 'Security Status': '安全狀態', 'Security problems': '安全問題', 'Security': '安全', 'See All Entries': '查看所有項目', 'See all': '請參閱全部', 'See unassigned recovery requests': '請参閱未回复要求', 'Seen': '看到', 'Select 2 potential locations from the dropdowns.': '選取二个可能位置的清單。', 'Select Items from the Request': '選取項目從要求', 'Select Items from this Inventory': '選取項目從這个資產', 'Select Language': '選取語言', 'Select Photos': '選取照片', 'Select a location': '選取位置', 'Select a question from the list': '從清單中選取一个問題', 'Select a range for the number of total beds': '選取一个范圍的總數beds', 'Select all that apply': '選取所有適用的', 'Select an Organization to see a list of offices': '選取組織才能見到一份清單的辦公室', 'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': '選取重疊的評估和活動相關的每一个需要識別填補空白區域。', 'Select the person assigned to this role for this project.': '選取人員指派給這个角色適用于這个專案。', 'Select the person associated with this scenario.': '選擇相關的聯絡人.', 'Select to show this configuration in the Regions menu.': '選取以顯示此配置中的功能表。', 'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': '選取是否要使用數据機, Tropo或其他的閘道传送SMS', 'Selects whether to use the gateway or the Modem for sending out SMS': '選取是否要使用"閘道"或數据的传送SMS', 'Self Registration': '自動登錄', 'Self-care': '自我管理', 'Self-registration': '自我登記', 'Send Alerts using Email &/or SMS': '透過電子郵件和/或簡訊發送通知', 'Send Commitment as Shipment': '传送承諾為出貨', 'Send Items': '传送項目', 'Send Mail': '傳送郵件', 'Send Message': '傳送訊息', 'Send New Shipment': '传送新出貨', 'Send Notification': '傳送通知', 'Send Shipment': '传送出貨', 'Send a message to this person': '传送訊息給這个人', 'Send a message to this team': '传送訊息至這个團隊', 'Send from %s': '传送從%s', 'Send message': '傳送訊息', 'Send new message': '传送新訊息', 'Send': '傳送', 'Sends & Receives Alerts via Email & SMS': '透過電子郵件和簡訊收發通知', 'Senior (50+)': '年長者 (65+)', 'Sensitivity': '靈敏度', 'Sent By Person': '传送人員', 'Sent By': '寄件者', 'Sent Item Details': '传送項目詳細資料', 'Sent Item deleted': '传送項目刪除', 'Sent Item updated': '传送更新項目', 'Sent Shipment Details': '传送出貨詳細資料', 'Sent Shipment canceled and items returned to Inventory': '传送出貨取消,退回至庫存項目', 'Sent Shipment canceled': '传送出貨取消', 'Sent Shipment updated': '传送更新出貨', 'Sent Shipments': '传送出貨', 'Sent': '已送出', 'Separate latrines for women and men': '个別latrines的男人或婦女,老人', 'Separated children, caregiving arrangements': '區隔子項, caregiving协議', 'Serial Number': '序號', 'Series': '系列', 'Server': '伺服器 (server)', 'Service Catalogue': '服務型錄', 'Service or Facility': '服務或機能', 'Service profile added': '新增服務設定檔', 'Service profile deleted': '服務設定檔刪除', 'Service profile updated': '服務設定檔更新', 'Service': '服務程式', 'Services Available': '服務可用', 'Services': '服務', 'Set Base Site': '設定基本網站', 'Set By': '設定者', 'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': '設為True,可允許編輯這个層次的位置階層的使用者不MapAdmins。', 'Setting Details': '設定明細', 'Setting added': '新增設定', 'Setting deleted': '設定已經刪除', 'Setting updated': '更新設定', 'Settings updated': '已更新設定', 'Settings were reset because authenticating with Twitter failed': '設定已重設,因為鉴別Twitter失敗', 'Settings which can be configured through the web interface are available here.': '設定可配置透過Web介面可用在這裡。', 'Settings': '設定', 'Severe': 'severe', 'Severity': '嚴重性', 'Share a common Marker (unless over-ridden at the Feature level)': '共用一个共同的標記(除非進行置換在特性層次)', 'Shelter & Essential NFIs': 'Shelter &重要NFIs', 'Shelter Details': 'Shelter詳細資料', 'Shelter Name': 'Shelter名稱', 'Shelter Registry': '庇護所登錄', 'Shelter Service Details': 'Shelter服務詳細資料', 'Shelter Service added': 'Shelter服務新增', 'Shelter Service deleted': 'Shelter服務刪除', 'Shelter Service updated': 'Shelter服務更新', 'Shelter Service': 'Shelter服務', 'Shelter Services': 'Shelter服務', 'Shelter Type Details': 'Shelter類型詳細資料', 'Shelter Type added': 'Shelter新增類型', 'Shelter Type deleted': 'Shelter刪除類型', 'Shelter Type updated': 'Shelter更新類型', 'Shelter Type': 'Shelter類型', 'Shelter Types and Services': 'Shelter類型和服務', 'Shelter Types': 'Shelter類型', 'Shelter added': 'Shelter新增', 'Shelter deleted': 'Shelter刪除', 'Shelter updated': 'Shelter更新', 'Shelter': '庇護所', 'Shelter/NFI Assistance': 'Shelter/NFI协助', 'Shelter/NFI assistance received/expected': 'Shelter/NFI协助/預期接收', 'Shelters': '庇護所', 'Shipment Created': '出貨建立', 'Shipment Details': '貨運詳細資料', 'Shipment Items received by Inventory': '出貨項目到庫存', 'Shipment Items sent from Inventory': '出貨項目传送從庫存', 'Shipment Items': '貨運項目', 'Shipment Transit Log Details': '出貨传輸日誌詳細資料', 'Shipment Transit Log added': '出貨传輸日誌添加', 'Shipment Transit Log deleted': '出貨传輸日誌刪除', 'Shipment Transit Log updated': '出貨運送日誌更新', 'Shipment Transit Logs': '出貨運送日誌', 'Shipment to Send': '出貨以传送', 'Shipment/Way Bill added': '已新增出貨/提單', 'Shipment/Way Bills Details': '出貨/提單詳細資料', 'Shipment/Way Bills deleted': '已刪除出貨/提單', 'Shipment/Way Bills updated': '已更新出貨/提單', 'Shipment/Way Bills': '出貨/提單', 'Shipment<>Item Relation added': '貨運<>新增項目關係', 'Shipment<>Item Relation deleted': '貨運<>刪除項目關係', 'Shipment<>Item Relation updated': '貨運<>更新項目關係', 'Shipment<>Item Relations Details': '貨運<>項目關係詳細資料', 'Shipment<>Item Relations': '貨運<>項目關係', 'Shipments To': '出貨至', 'Shipments': '貨物', 'Short Assessment': '短評量', 'Short Description': '簡要說明', 'Show Checklist': '顯示清單', 'Show Details': '顯示詳細資料', 'Show Map': '顯示地圖', 'Show Region in Menu?': '顯示區域功能?', 'Show on Map': '顯示在對映上', 'Show on map': '顯示在對映上', 'Sign in': '登入', 'Sign-in with OpenID:': '以 OpenID 登入:', 'Sign-up as a volunteer': '註冊為一个主動', 'Sign-up for Account': 'A}{\b\f4\fs20\\cf13账户', 'Sign-up succesful - you should hear from us soon!': '註冊成功-我們很快會與你聯絡!', 'Sindhi': '信德文', 'Single PDF File': '單一 PDF 檔案', 'Site Address': '站台位址', 'Site Administration': '網站管理', 'Site Description': '場所說明', 'Site Details': '網站詳細資料', 'Site ID': '網站 ID', 'Site Location Description': '網站位置說明', 'Site Location Name': '站台位置名稱', 'Site Manager': '網站管理', 'Site Name': '網站名稱', 'Site added': '新增網站', 'Site deleted': '刪除站點', 'Site updated': '更新站點', 'Site': '網站', 'Site/Warehouse': '網站/倉儲', 'Sites': '場所', 'Situation Awareness & Geospatial Analysis': '狀湟狀態& Geospatial分析', 'Situation Report': '報告狀湟', 'Situation': '狀況', 'Sketch': '概略圖', 'Skill Catalog': '技能型錄', 'Skill Details': '技能詳細資料', 'Skill Equivalence Details': '技能等值詳細資料', 'Skill Equivalence added': '技能新增等值', 'Skill Equivalence deleted': '技能刪除等值', 'Skill Equivalence updated': '技能等值更新', 'Skill Equivalence': '等值技能', 'Skill Equivalences': '同等技能', 'Skill Provision Catalog': '技能供應型錄', 'Skill Provision Details': '技能供應詳細資料', 'Skill Provision added': '技能供應新增', 'Skill Provision deleted': '技能供應刪除', 'Skill Provision updated': '技能供應更新', 'Skill Provision': '供應技能', 'Skill Provisions': '技術條款', 'Skill Status': '技能狀態', 'Skill TYpe': '技術類型', 'Skill Type Catalog': '型錄技術類型', 'Skill Type Details': '技術類型詳細資料', 'Skill Type added': '添加技術類型', 'Skill Type deleted': '刪除技術類型', 'Skill Type updated': '更新技術類型', 'Skill Type': '技術類型', 'Skill Types': '技能類型', 'Skill added to Request': '技能已新增至需求', 'Skill added': '添加技能', 'Skill deleted': '刪除技能', 'Skill removed from Request': '技能已從需求中移除', 'Skill removed': '技能已移除', 'Skill updated': '技術更新', 'Skill': '技能', 'Skills Catalog': '技能型錄', 'Skills Management': '技能管理', 'Skills': '技術', 'Slope failure, debris': '斜率失敗,碎屑', 'Small Trade': '小型交易', 'Smoke': '煙霧', 'Snapshot Report': 'Snapshot 報告', 'Snapshot': '快照', 'Snow Fall': '除雪落', 'Social': '社會', 'Soil bulging, liquefaction': '土壤膨脹, liquefaction', 'Solid waste': '實心廢棄', 'Solution Details': '解決方案明細', 'Solution Item': '解决方案項目', 'Solution added': '新增解决方案', 'Solution deleted': '刪除解决方案', 'Solution updated': '更新解决方案', 'Solution': '解决方案', 'Solutions': '解決方案', 'Some': '部分', 'Sorry - the server has a problem, please try again later.': '抱歉-伺服器發生問題,請稍后再試一次。', 'Sorry that location appears to be outside the area of the Parent.': '抱歉該位置可用區域之外的母項。', 'Sorry that location appears to be outside the area supported by this deployment.': '抱歉該位置可用請在區域外支援這个部署。', 'Sorry, I could not understand your request': '抱歉,我不瞭解您的請求', 'Sorry, only users with the MapAdmin role are allowed to create location groups.': '抱歉,只有在使用者与MapAdmin角色允許建立位置群組。', 'Sorry, only users with the MapAdmin role are allowed to edit these locations': '抱歉,只有在使用者与MapAdmin角色允許編輯位置', 'Sorry, something went wrong.': '很抱歉,發生錯誤。', 'Sorry, that page is forbidden for some reason.': '抱歉,該頁面是禁止的某些原因。', 'Sorry, that service is temporary unavailable.': '抱歉,該服務暫時無法使用。', 'Sorry, there are no addresses to display': '抱歉,沒有位址來顯示', 'Source Details': '來源詳細資料', 'Source ID': '來源 ID', 'Source Time': '時間來源', 'Source Type': '來源類型', 'Source added': '新增來源', 'Source deleted': '刪除來源', 'Source of Information': '來源的資訊', 'Source updated': '更新來源', 'Source': '原始檔', 'Sources of income': '來源的收入', 'Sources': '來源', 'Space Debris': '空間碎片', 'Spanish': '西班牙文', 'Special Ice': '特殊ICE', 'Special Marine': '特殊MARINE', 'Special needs': '特殊需求', 'Specialized Hospital': '特殊化醫院', 'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': '特定區域(例如建置/室温)的位置內的此人員/群組是出現。', 'Specific locations need to have a parent of level': '需要有一个特定位置的母項層次', 'Specify a descriptive title for the image.': '指定的叙述性標題的影像。', 'Specify the bed type of this unit.': '指定的平台類型的裝置。', 'Specify the minimum sustainability in weeks or days.': '指定的最小永續性以週為單位"或"日"。', 'Specify the number of available sets': '指定數目的組可用', 'Specify the number of available units (adult doses)': '指定的數目可用單元(成年人劑量)', 'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': '指定的數目可用單元(公升)的鳴鐘-Lactate或相等的解决方案', 'Specify the number of sets needed per 24h': '指定的數目集需要每小時', 'Specify the number of units (adult doses) needed per 24h': '指定的單位數目(成年人劑量)需要每小時', 'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': '指定的單位數目(公升)的鳴鐘-Lactate或相等的解决方案需要每小時', 'Spherical Mercator?': '描繪成球形Mercator?', 'Spreadsheet Importer': '匯入試算表', 'Spreadsheet uploaded': '上传試算表', 'Staff & Volunteers': '人員和志願者', 'Staff 2': '員工二', 'Staff Details': '人員明細', 'Staff ID': '人員ID', 'Staff Member Details': '人員成員詳細資料', 'Staff Members': '人員成員', 'Staff Record': '人員記錄', 'Staff Type Details': '員工類型詳細資料', 'Staff Type added': '人員新增類型', 'Staff Type deleted': '人員刪除類型', 'Staff Type updated': '人員更新類型', 'Staff Types': '員工類型', 'Staff added': '新增人員', 'Staff and Volunteers': '人員和志愿者', 'Staff deleted': '人員刪除', 'Staff member added': '新增人員', 'Staff present and caring for residents': '人員存在与維護的居民', 'Staff updated': '更新人員', 'Staff': '人員', 'Staff2': '員工 2', 'Staffing': '人員配置', 'Stairs': '階梯', 'Start Date': '開始日期', 'Start date': '開始日期 (start date)', 'Start of Period': '開始的期間', 'Start using your OpenID': '開始使用您的 OpenID', 'State': '省 (縣)', 'Stationery': '信笺', 'Status Report': '狀態報告', 'Status Update': '狀態更新', 'Status Updated': '狀態更新', 'Status added': '新增狀態', 'Status deleted': '刪除狀態', 'Status of clinical operation of the facility.': '狀態的臨床作業的機能。', 'Status of general operation of the facility.': '狀態的一般作業的機能。', 'Status of morgue capacity.': '狀態的morgue容量。', 'Status of operations of the emergency department of this hospital.': '狀態的作業的緊急部門的這个醫院。', 'Status of security procedures/access restrictions in the hospital.': '狀態的安全程序/存取限制在醫院。', 'Status of the operating rooms of this hospital.': '狀態的作業的會談室這个醫院。', 'Status updated': '狀態更新', 'Status': '狀態', 'Steel frame': '鋼框架', 'Stolen': '已遭竊', 'Storage Bin Details': '存儲Bin詳細資料', 'Storage Bin Number': '存儲Bin號碼', 'Storage Bin Type Details': '存儲Bin類型詳細資料', 'Storage Bin Type added': '存儲Bin新增類型', 'Storage Bin Type deleted': '存儲Bin類型刪除', 'Storage Bin Type updated': '存儲Bin更新類型', 'Storage Bin Type': 'Bin存儲類型', 'Storage Bin Types': '存儲Bin類型', 'Storage Bin added': '存儲Bin新增', 'Storage Bin deleted': '存儲Bin刪除', 'Storage Bin updated': '儲存更新bin', 'Storage Bin': '儲存體 Bin', 'Storage Bins': '存儲Bin', 'Storage Location Details': '儲存体位置詳細資料', 'Storage Location ID': '儲存体位置ID', 'Storage Location Name': '儲存体位置名稱', 'Storage Location added': '儲存体位置新增', 'Storage Location deleted': '儲存体位置刪除', 'Storage Location updated': '儲存体位置更新', 'Storage Location': '儲存體位置', 'Storage Locations': '儲存體位置', 'Store spreadsheets in the Eden database': '儲存試算表中的Eden資料庫', 'Storeys at and above ground level': 'Storeys在及以上接地層次', 'Storm Force Wind': '暴雨强制wind', 'Storm Surge': '暴雨突波', 'Street (continued)': '街道(續)', 'Street Address': '地址', 'Street': '街道', 'Strong Wind': 'strong wind', 'Structural Hazards': '結构性危害', 'Structural': '結構性', 'Style Field': '樣式欄位', 'Style Values': '樣式值', 'Sub Category': '子種類', 'Sub-type': '子類型', 'SubType': '子類型', 'Subject': '主旨', 'Submission successful - please wait': '提交成功-請稍候', 'Submission successful - please wait...': '提交成功-請稍候。', 'Submit New (full form)': '提交新的(完整形式)', 'Submit New (triage)': '提交新(分類)', 'Submit New': '提交新的', 'Submit a request for recovery': '提交要求的回复', 'Submit new Level 1 assessment (full form)': '提交新的層次一Assessment完整形式)', 'Submit new Level 1 assessment (triage)': '提交新層次一評量(分類)', 'Submit new Level 2 assessment': '提交新層次二評量', 'Submit': '確認送出', 'Submitting information about the individual such as identification numbers, physical appearance, last seen location, status, etc': '提交个人的相關資訊(例如識別號碼),實体外觀,前次看到的位置,狀態等', 'Subscription Details': '訂閱詳細資料', 'Subscription added': '已新增訂閱', 'Subscription deleted': '刪除訂閱', 'Subscription updated': '更新訂閱', 'Subscriptions': '訂閱', 'Subsector Details': 'Subsector詳細資料', 'Subsector added': '界別分組已新增', 'Subsector deleted': '界別分組已刪除', 'Subsector updated': 'Subsector更新', 'Subsector': '界別分組', 'Subsistence Cost': '補貼成本', 'Suburb': '西郊', 'Sufficient care/assistance for chronically ill': '足够的管理/的幫助chronically不正確', 'Suggest not changing this field unless you know what you are doing.': '建議不變更這个欄位,除非您知道您要做。', 'Summary by Administration Level': '摘要(依管理層次', 'Summary': '摘要', 'Sunday': '星期日', 'Supervisor': '監督者', 'Supply Chain Management': '供應鏈管理', 'Support Request': '支援要求', 'Support Requests': '支援要求', 'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '支援决策的大型群組的危機管理專家來幫助群組建立排序清單。', 'Sure you want to delete this object?': '確定要刪除嗎?', 'Survey Answer Details': '調查回答詳細資料', 'Survey Answer added': '調查回答新增', 'Survey Answer deleted': '調查回答刪除', 'Survey Answer updated': '調查回答更新', 'Survey Answer': '調查回答', 'Survey Module': '調查模組', 'Survey Name': '意見調查名稱', 'Survey Question Details': '調查問題詳細資料', 'Survey Question Display Name': '調查問題顯示名稱', 'Survey Question added': '調查問題添加', 'Survey Question deleted': '調查問題刪除', 'Survey Question updated': '調查問題更新', 'Survey Question': '調查問題', 'Survey Section Details': '調查區段詳細資料', 'Survey Section Display Name': '調查區段顯示名稱', 'Survey Section added': '調查區段新增', 'Survey Section deleted': '調查刪除區段', 'Survey Section updated': '調查區段更新', 'Survey Section': '意見調查區段', 'Survey Series Details': '調查系列詳細資料', 'Survey Series Name': '調查系列名稱', 'Survey Series added': '調查系列新增', 'Survey Series deleted': '調查系列刪除', 'Survey Series updated': '調查系列更新', 'Survey Series': '調查系列', 'Survey Template Details': '調查范本詳細資料', 'Survey Template added': '調查范本新增', 'Survey Template deleted': '調查刪除范本', 'Survey Template updated': '調查更新范本', 'Survey Template': '意見調查範本', 'Survey Templates': '意見調查範本', 'Switch this on to use individual CSS/Javascript files for diagnostics during development.': '這个交換器上使用个別CSS/JavaScript檔在開發期間進行診斷。', 'Symbology': '符號學', 'Sync Conflicts': '同步冲突', 'Sync History': '同步歷程', 'Sync Now': '立即同步', 'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': '同步伙伴的實例或同層級(SahanaEden, SahanaAgasti, Ushahidi,等等。 您要同步的資訊。 按一下鏈結,右邊的移至"頁面,您可以在其中新增同步伙伴,搜尋同步伙伴并對其進行修改。', 'Sync Partners': '同步伙伴', 'Sync Password': '同步密碼', 'Sync Policy': '同步原則', 'Sync Pools are groups of peers (SahanaEden & SahanaAgasti instances) willing to sync with each other. You can subscribe to different groups, define new groups and dicsover the existing ones. Click the link on the right to go to Sync Pools page.': '同步儲存區群組的對等(SahanaEden和SahanaAgasti實例)想要同步的每一个"其他"。 您可以訂閱不同的群組,定义新的群組及dicsover現有的范本。 上的鏈結,按一下滑鼠右鍵以跳至同步儲存區頁面。', 'Sync Pools': '同步儲存區', 'Sync Schedule': '同步排程', 'Sync Schedules': '同步排程', 'Sync Settings updated': '同步更新設定', 'Sync Settings': '同步設定', 'Sync Username': '同步使用者名稱', 'Sync process already started on': '同步程序已啟動', 'Synchronisation - Sync Now': '同步化-立即同步', 'Synchronisation History': '同步化歷程', 'Synchronisation': '同步化', 'Synchronization Conflicts': '同步化冲突', 'Synchronization Details': '同步化詳細資料', 'Synchronization History': '同步化歷程', 'Synchronization Peers': '同步化對等', 'Synchronization Settings': '同步化設定', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the automatic synchronization feature of SahanaEden': '可讓您同步化共用資料,您可以与其他更新您自己的資料庫与最新資料來自其他對等。 這个頁面提供您的相關資訊,請使用"自動同步處理功能的SahanaEden', 'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': '可讓您同步化共用資料,您可以与其他更新您自己的資料庫与最新資料來自其他對等。 這个頁面提供您的相關資訊,請使用"同步化的功能Sahana Eden', 'Synchronization not configured': '未配置同步化', 'Synchronization not configured.': '未配置同步化。', 'Synchronization settings updated': '同步化設定更新', 'Synchronization': '同步化', 'Syncronisation History': '同步歷程', 'Syncronisation Schedules': '同步排程', 'System allows the General Public to Report Incidents & have these Tracked.': '系统容許的一般公用報告事件和這些追蹤。', 'System allows the tracking & discovery of Items stored in Locations.': '可讓系统追蹤及探索項目中所儲存的位置。', 'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': '系统是一个中央线上資料庫所有的組織,釋放工作,政府代理和camp站點,以取代人員可以將提供的輔助的需求。 它可讓使用者配置的可用資源以滿足需求效益与效率。', 'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': '系统追蹤所有的志愿者工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊的服務范圍,它們提供在各个領域。', 'Table name': '表格名稱', 'Tags': '標籤', 'Take shelter in place or per <instruction>': '採取shelter處或每<instruction>', 'Task Details': '作業詳細資料', 'Task List': '作業清單', 'Task Status': '作業狀態', 'Task added': '新增作業', 'Task deleted': '作業已刪除', 'Task status': '作業狀態', 'Task updated': '作業已更新', 'Tasks': '作業', 'Team Description': '團隊說明', 'Team Details': '團隊詳細資料', 'Team Head': '團隊負責人', 'Team ID': '團隊 ID', 'Team Id': '團隊 ID', 'Team Leader': '團隊領導人', 'Team Member added': '新增團隊成員', 'Team Members': '團隊成員', 'Team Name': '團隊名稱', 'Team Type': '團隊類型', 'Team added': '新增團隊', 'Team deleted': '已刪除團隊', 'Team updated': '更新團隊', 'Team': '團隊', 'Teams': '團隊', 'Technical testing only, all recipients disregard': '技術僅測試,所有收件者不', 'Telecommunications': '電信', 'Telephone': '電話', 'Telephony': '電話系統', 'Temp folder %s not writable - unable to apply theme!': '暫存資料夾%無法寫入-無法套用布景主題!', 'Template file %s not readable - unable to apply theme!': '范本檔案%無法讀取-無法套用布景主題!', 'Templates': '範本', 'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '術語的第5層內的国家管理部門(例如表决或郵遞區號分區)。 這个層次不常使用。', 'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '術語的第4層內的国家管理部門(例如,村落,芳鄰"或Precinct)。', 'Term for the primary within-country administrative division (e.g. State or Province).': '術語的主要內的国家管理部門(例如州或省)。', 'Term for the secondary within-country administrative division (e.g. District or County).': '術語的次要內的国家管理部門(例如,地區或縣)。', 'Term for the third-level within-country administrative division (e.g. City or Town).': '術語的第3層內的国家管理部門(例如"城市"或"大街)。', 'Term for the top-level administrative division (i.e. Country).': '術語的最上層管理部門(例如国家)。', 'Territorial Authority': '完整省權限', 'Terrorism': 'terrorism', 'Tertiary Server (Optional)': '層伺服器(選用)', 'Test Results': '測試結果', 'Text Colour for Text blocks': '文字顏色的文字區塊', 'Text Direction': '文字方向', 'Text before each Text Field (One per line)': '文字之前每个文字欄位(一每行)', 'Text in Message': '訊息中的文字', 'Text in Message:': '訊息文字:', 'Text': '文字', 'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': '感謝您驗證您的電子郵件。 您的使用者账户仍在擱置中的核准的系统管理者 (%s).,您將得到時,以電子郵件通知您的账户已被啟用。', 'Thanks for your assistance': '感謝您的恊助', 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': "The query is a condition like db.table1.field1=='value'. Something like db.table1.field1 == db.table2.field2 results in a SQL JOIN.", 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': "The query is a condition like db.table1.field1=='value'. Something like db.table1.field1==db.table2.field2 results in a SQL JOIN.", 'The Area which this Site is located within.': '這个網站區域', 'The Assessments module allows field workers to send in assessments.': '評量模組容許現场工人传送中評估。', 'The Author of this Document (optional)': '作者的這份文件(可選)', 'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': '建置Asssesments模組容許建置安全要評估,例如,在一个諸如。', 'The Camp this Request is from': 'Camp在這个要求中', 'The Camp this person is checking into.': 'Camp在此人員檢查。', 'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '現行位置的人員/群組,它可以是一般(報告)或精確(用于顯示上一个)。 輸入幾个字元搜尋可用的位置。', 'The District for this Report.': '特區的這份報告。', 'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '電子郵件位址所核准要求传送(通常,這將是一个郵件,而不是个人)。 如果此欄位空白,則要求會自動核准網域是否相符。', 'The Group whose members can edit data in this record.': '的群組成員可以編輯此記錄中的數据。', 'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '事件報告系统可讓一般公用報告事件和這些追蹤。', 'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': '位置的站點,它可以是一般的報告)或精確(用于顯示上一个)。', 'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '位置的人員有來自,它可以是一般的報告)或精確(用于顯示上一个)。 輸入幾个字元搜尋可用的位置。', 'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '位置的人員進入,可為一般(報告)或精確(用于顯示上一个)。 輸入幾个字元搜尋可用的位置。', 'The Media Library provides a catalogue of digital media.': '媒体庫提供一个型錄的數位媒体。', 'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': '传訊模組是主要的通訊中心的Sahana系统。 它是用來传送警示及/或訊息使用SMS和電子郵件給不同的群組及个人之前,期間和之后發生。', 'The Office this record is associated with.': '辦公室的此記錄的關聯。', 'The Organization Registry keeps track of all the relief organizations working in the area.': '組織登錄記錄的所有釋放組織工作的范圍內。', 'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '組織登錄記錄的所有釋放組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊范圍的專案會提供每一个區域。', 'The Organization this record is associated with.': '組織此記錄的關聯。', 'The Organization which is funding this Activity.': '組織是資金此活動。', 'The Person currently filling this Role.': '人員目前填寫這个角色。', 'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': '專案追蹤模組可建立的活動,以滿足間隙需要評估。', 'The Rapid Assessments Module stores structured reports done by Professional Organizations.': '快速評估模組儲存結构化報告來完成專業組織。', 'The Request this record is associated with.': '要求此記錄的關聯。', 'The Role this person plays within this Office/Project.': '此人員的角色內扮演這个辦事處/專案。', 'The Role this person plays within this hospital.': '此人員的角色內扮演這个醫院。', 'The Role to which this Role reports.': '這个角色的角色的報告。', 'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Shelter登錄的shelters及儲存追蹤所有的基本相關資訊。 它与其他模組追蹤人員相關聯的shelter,可用的服務等等。', 'The Shelter this Request is from (optional).': '在Shelter這个要求(選用)。', 'The Shelter this Request is from': '在Shelter這个要求從', 'The Shelter this person is checking into.': 'Shelter此人員的檢查。', 'The Source this information came from.': '來源這个資訊來源。', 'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': '的URL GetCapabilities的WMS服務層您要存取透過對映。', 'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL的GetCapabilities頁面的一个網路對映服務(WMS)層您要使用透過瀏览器"畫面的"對映。', 'The URL of your web gateway without the post parameters': 'Web閘道的URL不POST参數', 'The URL to access the service.': '的URL來存取服務。', 'The Unique Identifier (UUID) as assigned to this facility by the government.': '唯一ID (UUID)作為指派給這个機能的政府。', 'The area is': '區域是', 'The attribute which is used for the title of popups.': '屬性用于標題的蹦現畫面。', 'The attribute within the KML which is used for the title of popups.': '屬性在KML用于標題的蹦現畫面。', 'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': '屬性(S)在KML用于檢索的蹦現畫面。 (屬性)之間使用空格', 'The body height (crown to heel) in cm.': '主体高度(繼位者的傾斜)在CM中。', 'The category of the Item.': '種類的項目。', 'The contact person for this organization.': '聯絡人的此組織。', 'The country the person usually lives in.': '聯絡人日常居住的國家.', 'The default Organization for whom this person is acting.': '預設組織給誰此人員是處理。', 'The default Organization for whom you are acting.': '預設組織您正為其處理。', 'The default policy for data import from this peer.': '預設原則的資料從這个同層級。', 'The descriptive name of the peer.': '同層級的叙述名稱。', 'The duplicate record will be deleted': '重复的記錄會被刪除', 'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': '輸入的單元鏈結至此單位。 例如:如果您輸入的M計量,然后選擇公里(如果有的話),然后輸入值0.001作為multiplicator。', 'The first or only name of the person (mandatory).': '聯絡人的名字 (必填).', 'The following modules are available': '下列模組可用', 'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'URL的格式http://your/web/map/service?service=WMS&request=GetCapabilities的位置/web/對映/服務代表的URL路迳WMS。', 'The hospital this record is associated with.': '醫院此記錄的關聯。', 'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': '項目是指定要传送給特定專案", "人口",村落或其他earmarking的"捐献"之類的授權代碼。', 'The language to use for notifications.': '使用的語言的通知。', 'The language you wish the site to be displayed in.': '語言,您希望將網站顯示于中。', 'The last known location of the missing person before disappearance.': '在最后一个已知位置遺漏的人,才disappearance。', 'The last known location of the missing person.': '在最后一个已知位置遺漏的人員。', 'The length is': '的長度是', 'The list of Brands are maintained by the Administrators.': '清單的品牌所維護的管理者。', 'The list of Catalogs are maintained by the Administrators.': '型錄清單所維護的管理者。', 'The list of Item categories are maintained by the Administrators.': '清單中的項目類別所維護的管理者。', 'The map will be displayed initially with this latitude at the center.': '地圖會顯示最初与此緯度在中心。', 'The map will be displayed initially with this longitude at the center.': '圖表會被最初顯示与此經度中心。', 'The minimum number of features to form a cluster.': '最小數目的功能,以形成叢集。', 'The name to be used when calling for or directly addressing the person (optional).': '致電或直呼聯絡人時所用的名字 (非必填).', 'The next screen will allow you to detail the number of people here & their needs.': '下一個畫面可讓你詳細記載這裡的人數以及他們的需要', 'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': '下一个畫面可讓您輸入的詳細清單項目和數量,如果適當的話,在。', 'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '測量單位的數量的替代項目等于一的測量單位的項目', 'The number of pixels apart that features need to be before they are clustered.': '像素數目以外的功能需要之前形成叢集。', 'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '數目并排的周圍顯示對映至下載。 〇,表示載入頁面第1更快速,數字越高,表示后續分割窗格會較快。', 'The person at the location who is reporting this incident (optional)': '人員的位置上使用報告此事件(可選)', 'The person reporting about the missing person.': '人員報告關于遺漏的人員。', 'The person reporting the missing person.': '人員報告遺漏的人員。', 'The post variable containing the phone number': 'POST變數含有的電話號碼', 'The post variable on the URL used for sending messages': 'POST變數的URL用于传送訊息', 'The post variables other than the ones containing the message and the phone number': '變數POST以外的項目包含的訊息及電話號碼', 'The request this record is associated with.': '要求此記錄的關聯。', 'The scanned copy of this document.': '這份文件的掃描。', 'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': '序列埠的數据機巳連接到/ dev/ttyUSB0等在Linux和COM1, COM2,等等Windows', 'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '伺服器未收到即時回應從另一个伺服器的存取,以填滿所要求的瀏览器。', 'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '伺服器收到不正確的回應從另一个伺服器的存取,以填滿所要求的瀏览器。', 'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': '在"簡易"原則允許匿名使用者讀取和註冊用户進行編輯。 ', 'The site where this position is based.': '此位置的场所的基礎。', 'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': '人員responsibile的設備可以提出要求,以取得协助。 承諾可以Zh這些要求,但要求會保持開啟狀態,直到要求者確認的要求已完成。', 'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '主題的事件不再對一个威胁或問題,并為任何后續動作的說明<instruction>', 'The time at which the Event started.': '的時間啟動事件。', 'The title of the WMS Browser panel in the Tools panel.': '標題的WMS瀏览器"中的"在"工具"畫面。', 'The token associated with this application on': '相關聯的記號与這个應用程式上', 'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': '唯一ID的同層級。 保留為空白(如果沒有同層級Sahana Eden實例,它將會被自動指派的案例。', 'The unique identifier which identifies this instance to other instances.': '唯一ID識別此實例到其他實例。', 'The way in which an item is normally distributed': '方式的一个項目通常是分散式', 'The weight in kg.': '加權中公斤。', 'The': '此', 'Theme Details': '佈景主題詳細資料', 'Theme added': '新增布景主題', 'Theme deleted': '刪除布景主題', 'Theme updated': '更新布景主題', 'Theme': '佈景主題', 'Themes': '佈景主題', 'There are errors': '有錯誤', 'There are insufficient items in the Inventory to send this shipment': '沒有足够的項目中資產传送至此出貨', 'There are multiple records at this location': '有多个記錄在這个位置', 'There are not sufficient items in the Inventory to send this shipment': '沒有足够的項目庫存中传送至此出貨', 'There are several ways which you can use to select the Location.': '有幾種方法可用來選取位置。', 'There is no Sahana account associated with that OpenID. Would you like to create one?': '沒有Sahana账户相關的OpenID。 您要建立一個嗎?', 'There is no address for this person yet. Add new address.': '沒有針對這个人員尚未。 新增地址。', 'There was a problem, sorry, please try again later.': '有問題,很抱歉,請稍后再試一次。', 'These are settings for Inbound Mail.': '這些設定的入埠郵件。', 'These are the Incident Categories visible to normal End-Users': '這些事件類別可見正常結束-使用者', 'These are the default settings for all users. To change settings just for you, click': '這些是預設的所有使用者的設定。 若要變更設定為您量身打造的,按一下', 'These need to be added in Decimal Degrees.': '這些需要新增以小數度。', 'They': '他們', 'This Group has no Members yet': '沒有成員目前登錄', 'This Team has no Members yet': '沒有成員目前登錄', 'This appears to be a duplicate of': '這顯然是一个重复的', 'This email address is already in use': '這個email已經被使用', 'This file already exists on the server as': '這个檔案已存在于伺服器上為', 'This form allows the administrator to remove a duplicate location by 1st updating all references to it by a different location.': '此表單可讓管理者來移除重复的位置第1更新所有参照另一个位置。', 'This form allows the administrator to remove a duplicate location.': '此表單可讓管理者來移除重复的位置。', 'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': '這是才適用這个層次在建构。 以防止意外修改之后,這个層次后,可以將其設為false。', 'This is the way to transfer data between machines as it maintains referential integrity.': '這是向之間传送資料的機器,因為它維護参照完整性。', 'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '這是向之間传送資料的機器,因為它維護参照完整性。。應該手動移除重复的資料第1!', 'This level is not open for editing.': '這个層次不開啟進行編輯。', 'This might be due to a temporary overloading or maintenance of the server.': '這可能是由于暫時超載或維護的伺服器。', 'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': '此模組容許庫存項目要求及發布之間的庫存的機能。', 'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': '這個單元可管理事件,不論事前計畫(如預演)或事件發生時。你可以安排適當的資源,如人力、物資、設備等,使其能更容易被動員', 'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': '此模組可讓您計划的實務練習和事件。 您可以配置適當的資源(人力,資產和設備),以便它們可以mobilized簡單。', 'This module assists the management of fatalities and the identification of the deceased.': '這个模組會协助管理的fatalities和識別的死亡。', 'This page provides you with information about how to use the automatic synchronization feature of Sahana': '這个頁面提供您的相關資訊,請使用"自動同步處理功能的Sahana', 'This page shows you logs of past syncs. Click on the link below to go to this page.': '這个頁面顯示您的日誌之前的同步。 按一下下面的鏈結,以跳至此頁面。', 'This screen allows you to upload a collection of photos to the server.': '這个畫面可讓您上传的集合,照片至伺服器。', 'This setting can only be controlled by the Administrator.': '這項設定只能由"管理者"。', 'This shipment has already been received.': '貨物已收到。', 'This shipment has already been sent.': '貨物已送出。', 'This shipment has not been received - it has NOT been canceled because can still be edited.': '未收到貨物-尚未取消,因為仍然可以編輯。', 'This shipment has not been sent - it has NOT been canceled because can still be edited.': '此出貨尚未传送其尚未取消,因為仍然可以編輯。', 'This shipment will be confirmed as received.': '這會在確認出貨為已接收。', 'Thursday': '星期四', 'Ticket Details': '問題單詳細內容', 'Ticket ID': '通行證 ID', 'Ticket added': '新增問題單', 'Ticket deleted': '單刪除', 'Ticket updated': '票据更新', 'Ticket': '通行證', 'Ticketing Module': '待辦事項模組', 'Tickets': '通行證', 'Tilt-up concrete': '傾斜的具体', 'Timber frame': 'Timber訊框', 'Time Stamp': '時間戳記', 'Time at which data was exchanged.': '時間資料交換。', 'Time needed to collect water': '需要時間來收集臨界值', 'Time of Request': '要求時間', 'Timeline Report': '報告時間表', 'Timeline': '時間表', 'Timestamp': '時間戳記', 'Title to show for the Web Map Service panel in the Tools panel.': '標題顯示的網頁對映服務"中的"在"工具"畫面。', 'Title': '標題', 'To Location': '終點位置', 'To Organization': '目標組織', 'To Person': '將人員', 'To Site': '目標場所', 'To begin the sync process, click the button on the right =>': '開始同步程序,請按一下右邊的按鈕=>', 'To begin the sync process, click this button =>': '開始同步程序,請按一下這个按鈕=>', 'To create a personal map configuration, click ': '若要建立個人化地圖設定,請點選 ', 'To create a personal map configuration, click': '若要建立个人配置,請按一下對映', 'To delete': '要刪除', 'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': '若要編輯OpenStreetMap,您必须編輯OpenStreetMap設定模型/000_config.. py', 'To search by job title, enter any portion of the title. You may use % as wildcard.': '搜尋工作職稱,輸入任何部分的標題。 您可以使用%作為通配符。', 'To submit a new job, use the': '提交一个新的工作,使用', 'To variable': '到變數', 'To': '起飛', 'Tools': '工具', 'Total # of Beneficiaries Reached': '總數的受益人達到', 'Total # of Target Beneficiaries': '總數的目標受益人', 'Total # of households of site visited': '總數的家庭场所的訪問', 'Total Beds': '總計Beds', 'Total Beneficiaries': '總受益人', 'Total Cost per Megabyte': '每MB成本總計', 'Total Cost per Minute': '每分鐘的總成本', 'Total Households': '總家庭', 'Total Monthly Cost': '每月成本總計', 'Total Monthly Cost:': '每月成本總計:', 'Total Monthly': '每月總計', 'Total No of Affectees (Including Students, Teachers & Others)': '總沒有的Affectees (包括學員,教師及其他)', 'Total No of Female Affectees (Including Students, Teachers & Others)': '總沒有的女性Affectees (包括學員,教師及其他)', 'Total No of Male Affectees (Including Students, Teachers & Others)': '總沒有的男性Affectees (包括學員,教師及其他)', 'Total No of Students (Primary To Higher Secondary) in the Total Affectees': '總沒有的學員(主要較次要)總數中Affectees', 'Total No of Teachers & Other Govt Servants in the Total Affectees': '總沒有的教職員和其他政府服務者總數中Affectees', 'Total One-time Costs': '總計一-時間成本', 'Total Persons': '總人員', 'Total Recurring Costs': '總循環成本', 'Total Unit Cost': '總單位成本', 'Total Unit Cost:': '總單位成本:', 'Total Units': '裝置總計', 'Total gross floor area (square meters)': '總毛利底板區域(平方公尺)', 'Total number of beds in this hospital. Automatically updated from daily reports.': 'beds總數在此醫院。 自動更新從每日報告。', 'Total number of houses in the area': '總數可容納的范圍內', 'Total number of schools in affected area': '總數學校中受影响的區域', 'Total population of site visited': '總体的網站瀏览', 'Total': '總計', 'Totals for Budget:': '預算的總計:', 'Totals for Bundle:': '總額的軟体組:', 'Totals for Kit:': '總額的套件:', 'Tourist Group': '觀光團', 'Town': '鄉鎮', 'Traces internally displaced people (IDPs) and their needs': '跟踪內部移動人員(IDP)及其需求', 'Tracing': '追蹤', 'Track Details': '追蹤詳細資料', 'Track deleted': '刪除跟踪', 'Track updated': '更新跟踪', 'Track uploaded': '跟踪上传', 'Track with this Person?': '跟踪与此人員嗎?', 'Track': '追蹤', 'Tracking of Projects, Activities and Tasks': '追蹤的專案,活動和任務', 'Tracking of basic information on the location, facilities and size of the Shelters': '追蹤的基本資訊的位置,設備和大小Shelters', 'Tracks requests for aid and matches them against donors who have pledged aid': '追蹤要求的輔助和符合那些對donors擁有抵押輔助', 'Tracks the location, distibution, capacity and breakdown of victims in Shelters': '追蹤位置,分佈值,容量和分解的受害者中Shelters', 'Tracks': '磁軌', 'Traffic Report': '資料流量報告', 'Training Course Catalog': '訓練課程型錄', 'Training Details': '訓練詳細資料', 'Training added': '新增訓練', 'Training deleted': '刪除訓練', 'Training updated': '訓練更新', 'Training': '訓練', 'Trainings': '撰文', 'Transit Status': '传輸狀態', 'Transit': '運輸', 'Transit. Status': '運輸。 狀態', 'Transition Effect': '轉變的效果', 'Transparent?': '透明?', 'Transport': '傳輸', 'Transportation assistance, Rank': '交通工具輔助,等級', 'Trauma Center': 'Trauma中心', 'Travel Cost': '旅行成本', 'Treatments': '離群值', 'Tree': '樹狀結構', 'Tropical Storm': '暴雨熱帶', 'Tropo Messaging Token': 'Tropo記號传訊', 'Tropo Settings': 'Tropo設定', 'Tropo Voice Token': 'Tropo語音記號', 'Tropo settings updated': 'Tropo更新設定', 'Truck': '卡車', 'Try checking the URL for errors, maybe it was mistyped.': '嘗試檢查的URL錯誤,可能是輸入錯誤。', 'Try hitting refresh/reload button or trying the URL from the address bar again.': "嘗試按重新整理/載入按鈕或試着的URL從'網址'列。", 'Try refreshing the page or hitting the back button on your browser.': '請嘗試重新整理頁面,或按"上一頁"按鈕的瀏览器。', 'Tsunami': '海嘯', 'Tuesday': '星期二', 'Twitter ID or #hashtag': 'Twitter ID或#hashtag', 'Twitter Settings': 'Twitter設定', 'Twitter': '推特', 'Type of Construction': '類型的建构', 'Type of cause': '原因類型', 'Type of place for defecation': '的工作區類型的defecation', 'Type of water source before the disaster': '類型的臨界值來源前的災難', 'Type': '類型', 'Type:': '類型:', 'Types of health services available': '類型的健康服務可用', 'Types of water storage containers available': '類型的臨界值儲存体儲存區可用', 'Types': '類型', 'URL of the Ushahidi instance': 'Ushahidi實例的網址', 'URL': '網址', 'UTC Offset': '世界標準時間時差', 'UUID of foreign Sahana server': 'UUID的外部Sahana伺服器', 'Un-Repairable': '取消-修复', 'Unable to parse CSV file!': '無法剖析CSV檔!', 'Unidentified': '識別', 'Union Council': '聯集委員會', 'Unit Bed Capacity': '單元容量平台', 'Unit Cost': '單位成本', 'Unit Details': '單元詳細資料', 'Unit Name': '單元名稱', 'Unit Set': '單元設定', 'Unit Short Code for e.g. m for meter.': '空頭代碼單元例如M的計量。', 'Unit added': '新增單元', 'Unit deleted': '刪除單元', 'Unit of Measure': '測量單位', 'Unit updated': '單元更新', 'Unit': '裝置', 'Units of Measure': '測量單位', 'Units': '單元', 'Unknown Peer': '不明的同層級', 'Unknown type of facility': '不明類型的機能', 'Unknown': '不明', 'Unresolved Conflicts': '尚未解决的冲突', 'Unselect to disable the modem': '若要停用取消數据機', 'Unsent': '未傳送', 'Unsupported data format!': '不受支援的資料格式!', 'Unsupported method!': '不受支援的方法!', 'Update Activity Report': '更新活動報告', 'Update Cholera Treatment Capability Information': '更新Cholera處理功能資訊', 'Update Details': '更新詳細資料', 'Update Import Job': '更新匯入工作', 'Update Request': '更新要求', 'Update Service Profile': '更新服務設定檔', 'Update Status': '更新狀態', 'Update Task Status': '更新作業狀態', 'Update Unit': '更新單元', 'Update added': '新增更新', 'Update deleted': '刪除更新', 'Update if Master': '如果更新主要', 'Update if Newer': '若較新則更新', 'Update updated': '更新更新', 'Update your current ordered list': '更新您現行排序清單', 'Update': '更新項目', 'Update/Master': '更新/主要', 'Update/Newer': '更新/新', 'Updated By': '更新者', 'Updates': '更新項目', 'Upload Photos': '上传照片', 'Upload Spreadsheet': '上传試算表', 'Upload Track': '上传跟踪', 'Upload a Spreadsheet': '上传一个試算表', 'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '上传影像檔案(BMP, GIF, JPEG或PNG),最大 300x300像素!', 'Upload an image file here.': '上传影像檔案在這裡。', 'Upload an image, such as a photo': '上传影像,例如圖片', 'Upload': '上傳', 'Urban Fire': '都市發動', 'Urban area': '都市區域', 'Urdu': '烏都文', 'Urgent': '緊急', 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用(...)&(...)的, (...)|(...)的或,且~(...)的建置更复雜的查詢。', 'Use Geocoder for address lookups?': '使用地理編碼程式的位址查閱嗎?', 'Use default from feature class': '使用從特性預設類別', 'Use default': '使用預設值', 'Use these links to download data that is currently in the database.': '使用這些鏈結來下載資料中的現行資料庫。', 'Use this link to review the situation.': '請利用這个鏈結來檢視狀湟。', 'Use this space to add a description about the Bin Type.': '使用此空間來新增說明的bin類型。', 'Use this space to add a description about the site location.': '使用此空間來新增說明的站點位置。', 'Use this space to add a description about the warehouse/site.': '使用此空間來新增說明倉儲或網站。', 'Use this space to add additional comments and notes about the Site/Warehouse.': '使用此空間來新增其他註解和附註的相關站點/WAREHOUSE。', 'Use this to indicate that the person has been found.': '使用此項來表示此人已被找到。', 'Used by IRS & Assess': '使用已送交IRS及評估', 'Used in onHover Tooltip & Cluster Popups to differentiate between types.': '在使用onHover工具提示和叢集蹦現以區分類型。', 'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': '用來建置onHover和工具提示第1欄位也用于叢集蹦現以區分記錄。', 'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '用來檢查的緯度輸入的位置是否合理。 可用來過濾列出的資源的位置。', 'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '用來檢查輸入的經度位置是否合理。 可用來過濾列出的資源的位置。', 'Used to import data from spreadsheets into the database': '用來匯入資料到工作表的資料庫', 'Used within Inventory Management, Request Management and Asset Management': '使用"庫存"內的管理要求管理"和"資產管理', 'User %(id)s Logged-in': '使用者 %(id)s 的登入', 'User %(id)s Logged-out': '使用者 %(id)s 登入登出', 'User %(id)s Profile updated': '使用者 %(id)s 設定檔更新', 'User %(id)s Registered': '使用者 %(id)s 登錄', 'User Account has been Disabled': '使用者账户已停用', 'User Details': '使用者詳細資料', 'User ID': '使用者 ID', 'User Management': '使用者管理', 'User Profile': '使用者設定檔', 'User Requests': '使用者要求', 'User Updated': '已更新使用者', 'User added': '已新增使用者', 'User already has this role': '使用者已具有此角色', 'User deleted': '已刪除使用者', 'User updated': '已更新使用者', 'User': '使用者', 'Username & Password': '使用者密碼(&P)', 'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': '使用者名稱的鉴別的同層級。 注意,只支援HTTP基本鉴別。', 'Username': '使用者名稱', 'Users can collaboratively add markers of what is occuring.': '使用者可以新增合作的標記是什么發生。', 'Users removed': '移除使用者', 'Users': '使用者', 'Uses the REST Query Format defined in': '使用其他查詢中定义的格式', 'Usual food sources in the area': '一般來源食品的范圍內', 'Utilities': '公用程式', 'Utility, telecommunication, other non-transport infrastructure': '公用程式,電信,其他非传輸基礎架构', 'Value': '值', 'Various Reporting functionalities': '各種報告功能', 'Vehicle Crime': '車輛犯罪', 'Vehicle Types': '車輛類型', 'Vehicle': '車輛', 'Vendor': '供應商', 'Verification Status': '驗證狀態', 'Verified': '已驗證', 'Verified?': '驗證?', 'Verify Password': '驗證密碼', 'Verify password': '驗證密碼', 'Version': '版本', 'Very Good': '非常良好', 'Very High': '非常高', 'View & Edit Pledges': '檢視和編輯抵押', 'View Alerts received using either Email or SMS': '檢視透過電子郵件或簡訊收到的通知', 'View All': '全部檢視', 'View Error Tickets': '檢視摘記卷錯誤', 'View Fullscreen Map': '檢視全螢幕對映', 'View Image': '檢視影像', 'View Items': '檢視項目', 'View Map': '檢視對映', 'View On Map': '檢視上對映', 'View Outbox': '檢視寄件匣', 'View Picture': '檢視圖片', 'View Requests & Pledge Aid': '檢視要求与抵押輔助', 'View Requests for Aid': '檢視要求的輔助', 'View Settings': '視圖設定', 'View Situation Map': '檢視狀湟對映', 'View Tickets': '檢視摘記卷', 'View and/or update their details': '檢視及/或更新其詳細資料', 'View or update the status of a hospital.': '檢視或更新狀態的醫院。', 'View pending requests and pledge support.': '檢視擱置要求和抵押支援。', 'View the hospitals on a map.': '檢視醫院圖上。', 'View/Edit the Database directly': '檢視/編輯資料庫直接', 'Village Leader': '村落領導者', 'Village': '村落', 'Visible?': '可見嗎?', 'Visual Recognition': '視覺化辨識', 'Volcanic Ash Cloud': 'Volcanic灰燼雲端', 'Volcanic Event': 'Volcanic事件', 'Volume (m3)': '磁區(M3)', 'Volume - Fluids': '磁區-液体', 'Volume - Solids': '磁區的固体', 'Volume Capacity': '容體容量', 'Volume/Dimensions': '磁碟區/維度', 'Volunteer Availability': '自愿可用性', 'Volunteer Data': '自愿資料', 'Volunteer Details': '自愿詳細資料', 'Volunteer Information': '志工資訊', 'Volunteer Management': '主動管理', 'Volunteer Project': '志願者專案', 'Volunteer Record': '志工記錄', 'Volunteer Registration': '志願者登錄', 'Volunteer Registrations': '自愿登錄', 'Volunteer Request': '自愿要求', 'Volunteer added': '新增志工', 'Volunteer availability added': '自愿可用性新增', 'Volunteer availability deleted': '自愿可用性刪除', 'Volunteer availability updated': '自愿可用性更新', 'Volunteer deleted': '志工刪除', 'Volunteer details updated': '更新志願者詳細資料', 'Volunteer location': '志願者位置', 'Volunteer registration added': '已新增志願者登記', 'Volunteer registration deleted': '已刪除志願者登記', 'Volunteer registration updated': '自愿登錄更新', 'Volunteers were notified!': '已主動通知!', 'Volunteers': '志工', 'Vote': '表決', 'Votes': '表決', 'WASH': '清洗', 'WFP Assessments': '世界糧食計劃組織的評估', 'WMS Browser Name': 'WMS瀏览器名稱', 'WMS Browser URL': 'WMS瀏览器URL', 'Walking Only': '僅查訪', 'Walking time to the health service': '遍訪時間,健康狀態"服務程式', 'Wall or other structural damage': '牆面或其他結构損壞', 'Warehouse Details': '詳細資料倉儲', 'Warehouse Management': '倉儲管理', 'Warehouse added': '新增倉儲', 'Warehouse deleted': '刪除倉庫', 'Warehouse updated': '更新倉儲', 'Warehouse': '倉儲', 'Warehouse/Sites Registry': '倉庫/站點登錄', 'Warehouses': '倉庫', 'Water Sanitation Hygiene': '水Hygiene設施', 'Water collection': '水集合', 'Water gallon': '水加侖', 'Water storage containers available for HH': '水儲存体儲存區可用的hh', 'Water storage containers in households': '水儲存体儲存區中家庭', 'Water storage containers sufficient per HH': '水儲存体儲存區足够每hh', 'Water supply': '水供應', 'Water': '水', 'Way Bill(s)': '方式账單(S)', 'We have tried': '我們已經嘗試', 'Web Map Service Browser Name': 'Web瀏览器名稱對映服務', 'Web Map Service Browser URL': 'Web瀏览器URL對映服務', 'Website': '網站', 'Wednesday': '星期三', 'Weekly': '每週', 'Weight (kg)': '重量 (公斤)', 'Weight': '重量', 'Welcome to the Sahana Eden Disaster Management System': '歡迎使用 Sahana Eden 救災管理系統', 'Welcome to the Sahana Portal at': '歡迎使用Sahana入口網站', 'Well-Known Text': '常用文字', 'Were basic medical supplies available for health services prior to the disaster?': '在基本醫療用品可用的健康服務之前,災難?', 'Were breast milk substitutes used prior to the disaster?': 'breast到espresso使用替換之前,災難?', 'Were there cases of malnutrition in this area prior to the disaster?': '在這個災難之前,這個地區有營養不良的個案嗎?', 'Were there health services functioning for the community prior to the disaster?': '在這個災難之前,這個地區有正常運作的健康醫療服務嗎?', 'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': '有報告或的迹象爆發的任何micronutrient malnutrition disorders之前的緊急嗎?', 'What are the factors affecting school attendance?': '有哪些因素會影響到學校上課的出席率?', 'What are your main sources of cash to restart your business?': '什麼是你重新創業的主要現金資本來源?', 'What are your main sources of income now?': '什麼是你現階段主要收入來源?', 'What do you spend most of your income on now?': '現階段你大部份收入花用在什麼地方?', 'What food stocks exist? (main dishes)': '什么食品股票存在嗎? (主要餐盤)', 'What food stocks exist? (side dishes)': '什么食品股票存在嗎? (側面餐盤)', 'What is the estimated total number of people in all of these institutions?': '把這些機構的人全部加起來, 大約總共是多少人?', 'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': '什麼是你每天主要的清潔用水來源 (例如: 清洗, 煮飯, 洗澡)?', 'What is your major source of drinking water?': '你的飲用水主要是來自於那裡?', 'What type of latrines are available in the village/IDP centre/Camp?': '什麼類型的廁所在村/ IDP中心/營可用?', 'What type of salvage material can be used from destroyed houses?': '什麼類型的打撈材料從被摧毀的房屋可用?', 'What type of salvage material can be used from destroyed schools?': '什麼類型的打撈材料從被摧毀的學校可用?', 'What types of health problems do children currently have?': '孩子目前有什麼類型的衛生問題?', 'What types of health problems do people currently have?': '人目前有什麼類型的衛生問題?', 'What types of health services are still functioning in the affected area?': '在受影響的地區什麼類型的衛生服務仍然運作?', 'What types of household water storage containers are available?': '有什麼類型的家庭儲水容器可用?', 'What were your main sources of income before the disaster?': '災難之前什麼是你的主要收入來源?', 'Wheat': '小麥', 'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': '當這項對映會出現,旨在點的集合,這項對映會縮放,以僅顯示區域外框的點。 這个值新增小裝載的距離外的點。 無此項,最外側的點上外框,且可能不可見。', 'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': '當這項對映會出現,旨在點的集合,這項對映會縮放,以僅顯示區域外框的點。 此值提供一个最小寬度和高度度的區域顯示。 否則,一个顯示一个不會顯示任何范圍在該點。 之后,這項對映會出現,它可以放大需求。', 'When reports were entered': '當報告已輸入', 'Where are the alternative places for studying?': '另類的學習地方在哪裡?', 'Where are the separated children originally from?': '位置是分隔的子項最初的?', 'Where do the majority of people defecate?': '在執行大部分的人defecate嗎?', 'Where have the children been sent?': '在具有子項已传送?', 'Where is solid waste disposed in the village/camp?': '位置是實心廢棄物丟棄在村落/camp?', 'Whereabouts': '下落', 'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': '這是否為Sahana Eden, Sahana Agasti, Ushahidi或其他實例。', 'Which API function was called, it can only have two values: getdata refers to data export operation and putdata refers to data import operation.': '該API函數呼叫,它只能有二个值: getData参照資料匯出作業, putdata是指資料匯入作業。', 'Who is doing what and where': '誰正在做什么和位置', 'Who usually collects water for the family?': '誰通常收集水的系列?', 'Width (m)': '寬度(M)', 'Width': '寬度', 'Wild Fire': '萬用字元發動', 'Wind Chill': '風硬化', 'Window frame': '視窗框', 'Winter Storm': '冬季暴雨', 'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': '在不提出任何人名或暗示任何人的情況下,你是否知道自從災難發生後, 有沒有任何對婦女或女孩暴力的事件?', 'Women of Child Bearing Age': '婦女的子項軸承經歷時間', 'Women participating in coping activities': '婦女参与复制活動', 'Women who are Pregnant or in Labour': '女性是Pregnant或在人工', 'Womens Focus Groups': 'Womens專題', 'Wooden plank': '木制plank', 'Wooden poles': '木制poles', 'Working hours end': '結束工作時數', 'Working hours start': '工作小時開始', 'Working or other to provide money/food': '工作或其他提供金錢/餐飲', 'Would you like to display the photos on the map?': '您想顯示的照片上的對映?', 'X-Ray': 'X光', 'YES': '是', 'Year built': '建置年份', 'Year of Manufacture': '年的制造', 'Yellow': '黃色', 'Yes': '是', 'You are a recovery team?': '您的回复團隊?', 'You are attempting to delete your own account - are you sure you want to proceed?': '您正在嘗試刪除您自己的账户-您確定要繼續進行嗎?', 'You are currently reported missing!': '您目前報告遺漏!', 'You can add information about your organization here. It is the information which other servers can read about you.': '您可以新增組織的相關資訊在這裡。 這是資訊的其他伺服器可以閱讀您。', 'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '您可以變更的配置同步化模組設定"區段中。 此配置包括您的UUID (唯一識別號碼),同步化排程, Beacon服務等等。 按一下下列鏈結以跳至"同步設定"頁面。', 'You can click on the map below to select the Lat/Lon fields': '您可以按一下"對映"下面選擇的平面/長欄位', 'You can click on the map below to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '您可以按一下"對映"下面選擇的平面/長欄位。 經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator与正在北部地區部分和負數在南部部分。 經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 需要將此新增以小數度。', 'You can click on the map below to select the Lat/Lon fields:': '您可以按一下"對映"下面選擇的平面/長欄位:', 'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '您可以按一下在對映至選取的平面/長欄位。 經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator与正在北部地區部分和負數在南部部分。 經度是〇本初子午线(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 需要將此新增以小數度。', 'You can select the Draw tool (': '您可以選取繪制工具', 'You can select the Draw tool': '您可以選取繪制工具', 'You can set the modem settings for SMS here.': '您可以設定數据機設定的SMS這裡。', 'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '您可以使用轉換工具來轉換或GPS座標或度/分鐘/秒。', 'You do no have permission to cancel this received shipment.': '您沒有許可權來取消此接收出貨。', 'You do no have permission to cancel this sent shipment.': '您沒有許可權來取消此传送出貨。', 'You do no have permission to make this commitment.': '您沒有許可權來進行此承諾。', 'You do no have permission to receive this shipment.': '您沒有許可權來接收此出貨。', 'You do no have permission to send this shipment.': '您沒有許可權來传送這份出貨。', 'You do not have permission for any facility to make a commitment.': '您沒有許可權的任何機能來使承諾。', 'You do not have permission for any facility to make a request.': '您沒有許可權的任何機能來提出請求。', 'You do not have permission for any site to add an inventory item.': '您沒有許可權的任何網站,以新增一个庫存項目。', 'You do not have permission for any site to receive a shipment.': '您沒有許可權的任何網站接收出貨。', 'You do not have permission for any site to send a shipment.': '您沒有許可權的任何網站传送出貨。', 'You do not have permission to cancel this received shipment.': '您沒有許可權來取消此接收出貨。', 'You do not have permission to cancel this sent shipment.': '您沒有許可權來取消此传送出貨。', 'You do not have permission to make this commitment.': '您沒有權限可讓此承諾。', 'You do not have permission to receive this shipment.': '您沒有許可權來接收這个出貨。', 'You do not have permission to send a shipment from this site.': '您沒有許可權來传送運送產品這个網站。', 'You do not have permission to send this shipment.': '您沒有許可權來传送這份出貨。', 'You have a personal map configuration. To change your personal configuration, click': '您有一个个人對映配置。 若要變更您的个人配置,請按一下', 'You have found a dead body?': '您找到一个停用身体?', 'You must be logged in to register volunteers.': '您必须登入,才能登錄参与者。', 'You must be logged in to report persons missing or found.': '您必须登入,才能報告人員遺漏或找到。', 'You must provide a series id to proceed.': '您必须提供一个系列ID來繼續。', 'You should edit Twitter settings in models/000_config.py': '您應該編輯Twitter中的設定模型/000_config.. py', 'Your action is required. Please approve user %s asap:': '您的動作是必要的。 請核准使用者%s ASAP:', 'Your action is required. Please approve user': '您的動作是必要的。 請核准使用者', 'Your current ordered list of solution items is shown below. You can change it by voting again.': '您的現行排序清單的解决方案項目如下所示。 您可以變更它的表决。', 'Your post was added successfully.': '已順利新增您的文章。', 'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': '您的系统已被指派一个唯一的識別碼(UUID),它在其他電腦可讓您用來識別您。 若要檢視您的UUID,您可以跳至同步化->同步設定。 您也可以查看其他設定這个頁面上。', 'Your unique identification key. It is a 16 character word (aka string). Other servers in your organization will recognize you from this.': '您的唯一識別金鑰。 它是一个16字元字組(亦稱為字串)。 其他伺服器的組織會識別您。', 'ZIP/Postcode': 'ZIP/郵遞區號', 'Zero Hour': '〇小時', 'Zeroconf Description': 'ZeroConf說明', 'Zeroconf Port': 'ZeroConf埠', 'Zinc roof': '鋅安設', 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': '放大:按一下在對映中,或使用滑鼠左鍵并拖動滑鼠來建立一个矩形', 'Zoom Levels': '縮放級別', 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': '縮小:按一下在對映中,或使用滑鼠左鍵并拖動滑鼠來建立一个矩形', 'Zoom to maximum map extent': '縮放至對映上限范圍', 'Zoom': '縮放', 'accepted': '接受', 'act': '行動', 'active': '作用中', 'added': '已新增', 'all records': '所有記錄', 'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': '允許的預算來開發基于員工和設備成本,包括任何管理成本。', 'allows for creation and management of surveys to assess the damage following a natural disaster.': '可讓您建立和管理的調查來評估損壞之后,自然災難。', 'an individual/team to do in 1-2 days': '个別團隊/一中執行的二天', 'approved': '已核准', 'are mandatory and must be filled': '的欄位為必填', 'assigned': '已指派', 'average': '平均值', 'black': '黑色', 'blond': '金色', 'blue': '藍色', 'brown': '棕色', 'by': '根據', 'c/o Name': 'C I/O名稱', 'can be used to extract data from spreadsheets and put them into database tables.': '可用來擷取資料的試算表和放置到資料庫表格。', 'cancelled': '已取消', 'check all': '勾選「全部」', 'click for more details': '按一下以取得更多詳細資料', 'collateral event': '抵押品事件', 'completed': '已完成', 'confirmed': '已確認', 'consider': '考量', 'criminal intent': '犯罪目的', 'critical': '重要', 'crud': 'CRUD', 'curly': '大括弧', 'currently registered': '目前登錄', 'daily': '每日', 'data uploaded': '上传資料', 'database %s select': '資料庫%選取', 'database': '資料庫', 'db': 'DB', 'deceased': '死亡', 'deferred': '延遲', 'delete all checked': '所有已刪除', 'delete': '刪除', 'deleted': '已刪除', 'denied': '已拒絕', 'description': '說明', 'design': '設計', 'diseased': '死者', 'displaced': '移離', 'divorced': '離婚', 'done!': '完成!', 'duplicate': '重複', 'edit': '編輯', 'editor': '編輯者', 'eg. gas, electricity, water': '例如: 瓦斯,電力,水', 'embedded': '內嵌的', 'enclosed area': '括住區域', 'export as csv file': '匯出為CSV檔案', 'fat': 'FAT', 'feedback': '讀者意見', 'female': '女性', 'final report': '最終報告', 'flush latrine with septic tank': 'latrine清除与septic油槽', 'follow-up assessment': '后續評量', 'food_sources': '食物來源', 'form data': '表單資料', 'found': '找到', 'from Twitter': '從Twitter', 'full': '滿載', 'getting': '取得', 'green': '綠色', 'grey': '灰色', 'here': '這裡', 'high': '高', 'highly critical': '高度重要', 'hourly': '每小時', 'households': '家庭', 'how to deal with duplicate data found between your machine and that particular sahana instance.': '如何處理重复資料之間找到在您的機器与該特定sahana實例。', 'http://openid.net/get-an-openid/start-using-your-openid/': 'https://myid.tw/profile/help', 'human error': '人為錯誤', 'identified': '識別', 'ignore': '忽略', 'immediately': '立即', 'improvement': '改進', 'in Deg Min Sec format': '在度最小秒格式', 'in GPS format': '在GPS格式', 'inactive': '非作用中', 'initial assessment': '起始評量', 'injured': '受傷', 'insert new %s': '插入新的%', 'insert new': '插入新建項目', 'invalid request': '無效要求', 'invalid': '無效', 'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': '是一个中央线上資料庫位置資訊的所有意外受害者和系列,特別是識別意外, evacuees和移動人員可以儲存。 資訊,如姓名,年齡,聯絡人編號,身分證號碼,取代位置,以及其他詳細資料擷取。 圖片和指紋詳細資料的人可以上传至系统中。 人員也可以擷取群組的效率和方便。', 'is an online bulletin board of missing and found people. It captures information about the people missing and found, as well as information of the person seeking them, increasing the chances of people finding each other. For example if two members of a family unit is looking for the head of the family, we can use this data at least to connect those two family members.': '是一个线上公布欄的遺失及找到的人員。 它會擷取資訊的人遺失及找到的,以及資訊的人員辨認,增加機會的人的其他人。 例如,如果二个成員的一系列單元正在尋找的標頭,系列,我們可以使用這个資料至少連接二个系列的成員。', 'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': '可想而知是要由數个子模組合力提供复雜的功能,用于管理釋放和專案項目來組織。 這包括進氣系统,倉儲管理系统,商品追蹤,供應鏈管理,車隊管理,採購,財務追蹤和其他資產和資源管理功能', 'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '跟踪記錄所有傳入門票,讓他們進行分類和路由到適當的地方行動。', 'keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '追蹤所有的組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊范圍的專案會提供每一个區域。', 'leave empty to detach account': '保留空白以分離账户', 'legend URL': '圖註URL', 'light': '光亮', 'locations': '位置', 'login': '登入', 'long': 'Long', 'long>12cm': '超過12cm', 'low': '低', 'male': '男性', 'manual': '手動', 'married': '已婚', 'medium': '中', 'menu item': '功能表項目', 'meters': '米', 'missing': '遺漏', 'module allows the site administrator to configure various options.': '模組可讓網站管理者配置的各種選項。', 'module helps monitoring the status of hospitals.': '模組有助于監視狀態的醫院。', 'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': '模組提供一種機制來提供合作開發的概述,意外,使用連线對映(GIS)。', 'more': '更多模組', 'n/a': '不適用', 'natural hazard': '自然危害', 'never': '絕不', 'new record inserted': '插入新記錄', 'new': '新建', 'next 100 rows': '下100个橫列', 'no': '無影響', 'non-critical': '非重要', 'none': '無', 'normal': '正常', 'not accessible - no cached version available!': '無法存取-無快取可用版本!', 'not accessible - using cached version from': '無法存取-使用快取的版本。', 'not specified': '未指定', 'not writable - unable to cache GeoRSS layers!': '無法寫入-無法快取GeoRSS層!', 'not writable - unable to cache KML layers!': '無法寫入-無法快取KML層!', 'num Zoom Levels': 'num个縮放級別', 'obsolete': '已作廢', 'on': '開啟', 'once': '一次', 'open defecation': '開啟defecation', 'operational intent': '目的作業', 'optional': '選用', 'or import from csv file': '或從CSV檔案', 'other': '其他', 'over one hour': '上一个小時', 'people': '個人', 'piece': '片段', 'pit latrine': 'PIT latrine', 'pit': 'PIT', 'postponed': '延遲', 'preliminary template or draft, not actionable in its current form': '初步范本或初稿,不可在其現行表單', 'previous 100 rows': '前100个橫列', 'primary incident': '主要事件', 'provides a catalogue of digital media.': '提供一个型錄的數位媒体。', 'record does not exist': '記錄不存在', 'record id': '記錄 ID', 'records deleted': '已刪除的記錄', 'red': '紅色', 'refresh': '重新整理', 'reported': '已報告', 'reports successfully imported.': '報告已順利匯入。', 'representation of the Polygon/Line.': '表示法的多邊形/行。', 'retired': '已撤回', 'retry': '重試', 'review': '檢閱', 'river': '金水河', 'secondary effect': '次要效果', 'see comment': '請参閱註解', 'selected': '已選取', 'separated from family': '分開系列', 'separated': '分居', 'short': 'Short', 'sides': '側邊', 'sign-up now': '現在註冊', 'simple': '簡單', 'single': '單身', 'skills': '技術', 'slim': 'Slim', 'specify': '指定', 'staff members': '人員成員', 'staff': '人員', 'state location': '位置狀態', 'state': '狀況', 'status': '狀態', 'straight': '直線', 'suffered financial losses': '艱辛的財務損失', 'supports nurses in the field to assess the situation, report on their activities and keep oversight.': '支援nurses,以將欄位中評估狀湟,報告其活動和保留監督。', 'table': '表格', 'tall': '頁高', 'technical failure': '技術失敗', 'times and it is still not working. We give in. Sorry.': '時間,它仍無法運作。 我們提供。 抱歉。', 'to access the system': '來使用系統功能', 'total': '總計', 'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '追蹤所有shelters和儲存基本相關資訊。 它与其他模組追蹤人員相關聯的shelter,可用的服務等等。', 'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy模組內無法使用執行中的Python-這需要安裝非Tropo Twitter支援中心!', 'unable to parse csv file': '無法剖析CSV檔', 'unapproved': '核准', 'uncheck all': '取消全選', 'unidentified': '識別', 'uninhabitable = foundation and structure destroyed': 'uninhabitable = Foundation及結构損毀', 'unknown': '不明', 'unspecified': '未指定的', 'unverified': '未驗證', 'updated': '已更新', 'updates only': '僅更新', 'urgent': '緊急', 'verified': '已驗證', 'view': '視圖', 'volunteer': '志工', 'volunteers': '志工', 'wavy': '波浪形', 'weekly': '每週', 'white': '白色', 'wider area, longer term, usually contain multiple Activities': '寬區域,長期的,通常包含多个活動', 'widowed': '鰥居', 'window': '視窗', 'windows broken, cracks in walls, roof slightly damaged': 'Windows中斷,是否在牆面,屋脊略有損壞', 'wish': '希望', 'within human habitat': '在人類居住的範圍', 'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt模組內無法使用執行中的Python-這需要安裝XLS輸出!', 'yes': '是', }
gnarula/eden_deployment
languages/zh-tw.py
Python
mit
338,613
[ "VisIt" ]
17461ed23fc22fbfdbd441c8ba2565277fe34d7ba7aac1da4e2d9f2f5c42add2
############################################################################### ## ## Copyright (C) 2014-2016, New York University. ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: contact@vistrails.org ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### """The Visualization ToolKit (VTK) is an open source, freely available software system for 3D computer graphics, image processing, and visualization used by thousands of researchers and developers around the world. http://www.vtk.org""" from __future__ import division from identifiers import * from vistrails.core import debug from vistrails.core.requirements import MissingRequirement def package_dependencies(): import vistrails.core.packagemanager manager = vistrails.core.packagemanager.get_package_manager() if manager.has_package('org.vistrails.vistrails.spreadsheet'): return ['org.vistrails.vistrails.spreadsheet'] else: return [] def package_requirements(): from vistrails.core.requirements import require_python_module, \ python_module_exists require_python_module('vtk', { 'linux-debian': 'python-vtk', 'linux-ubuntu': 'python-vtk', 'linux-fedora': 'vtk-python'}) from vistrails.gui.requirements import require_pyqt4_api2 try: require_pyqt4_api2() except MissingRequirement: debug.warning('PyQt4 is not available. There will be no interaction ' 'between VTK and the spreadsheet.')
VisTrails/VisTrails
vistrails/packages/vtk/__init__.py
Python
bsd-3-clause
3,146
[ "VTK" ]
84507dfae34d7864132af3e74891a75d7bdbc75e8d215579be12162c77994473
""" Principal Component Analysis """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # License: BSD Style. import numpy as np import warnings from scipy import linalg from math import log from ..base import BaseEstimator, TransformerMixin from ..utils import array2d, check_random_state, as_float_array from ..utils import atleast2d_or_csr from ..utils.extmath import fast_logdet from ..utils.extmath import safe_sparse_dot from ..utils.extmath import randomized_svd def _assess_dimension_(spectrum, rank, n_samples, n_features): """Compute the likelihood of a rank ``rank`` dataset The dataset is assumed to be embedded in gaussian noise of shape(n, dimf) having spectrum ``spectrum``. Parameters ---------- spectrum: array of shape (n) data spectrum rank: int, tested rank value n_samples: int, number of samples dim: int, embedding/empirical dimension Returns ------- ll: float, The log-likelihood Notes ----- This implements the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604` """ if rank > len(spectrum): raise ValueError("The tested rank cannot exceed the rank of the" " dataset") from scipy.special import gammaln pu = -rank * np.log(2) for i in range(rank): pu += (gammaln((n_features - i) / 2) - np.log(np.pi) * (n_features - i) / 2) pl = np.sum(np.log(spectrum[:rank])) pl = -pl * n_samples / 2 if rank == n_features: pv = 0 v = 1 else: v = np.sum(spectrum[rank:]) / (n_features - rank) pv = -np.log(v) * n_samples * (n_features - rank) / 2 m = n_features * rank - rank * (rank + 1) / 2 pp = np.log(2 * np.pi) * (m + rank + 1) / 2 pa = 0 spectrum_ = spectrum.copy() spectrum_[rank:n_features] = v for i in range(rank): for j in range(i + 1, len(spectrum)): pa += (np.log((spectrum[i] - spectrum[j]) * (1. / spectrum_[j] - 1. / spectrum_[i])) + np.log(n_samples)) ll = pu + pl + pv + pp - pa / 2 - rank * np.log(n_samples) / 2 return ll def _infer_dimension_(spectrum, n_samples, n_features): """Infers the dimension of a dataset of shape (n_samples, n_features) The dataset is described by its spectrum `spectrum`. """ ll = [] for rank in range(len(spectrum)): ll.append(_assess_dimension_(spectrum, rank, n_samples, n_features)) ll = np.array(ll) return ll.argmax() class PCA(BaseEstimator, TransformerMixin): """Principal component analysis (PCA) Linear dimensionality reduction using Singular Value Decomposition of the data and keeping only the most significant singular vectors to project the data to a lower dimensional space. This implementation uses the scipy.linalg implementation of the singular value decomposition. It only works for dense arrays and is not scalable to large dimensional data. The time complexity of this implementation is ``O(n ** 3)`` assuming n ~ n_samples ~ n_features. Parameters ---------- n_components : int, None or string Number of components to keep. if n_components is not set all components are kept:: n_components == min(n_samples, n_features) if n_components == 'mle', Minka\'s MLE is used to guess the dimension if ``0 < n_components < 1``, select the number of components such that the amount of variance that needs to be explained is greater than the percentage specified by n_components copy : bool If False, data passed to fit are overwritten whiten : bool, optional When True (False by default) the `components_` vectors are divided by n_samples times singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making there data respect some hard-wired assumptions. Attributes ---------- `components_` : array, [n_components, n_features] Components with maximum variance. `explained_variance_ratio_` : array, [n_components] Percentage of variance explained by each of the selected components. \ k is not set then all components are stored and the sum of explained \ variances is equal to 1.0 Notes ----- For n_components='mle', this class uses the method of `Thomas P. Minka: Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604` Due to implementation subtleties of the Singular Value Decomposition (SVD), which is used in this implementation, running fit twice on the same matrix can lead to principal components with signs flipped (change in direction). For this reason, it is important to always use the same estimator object to transform data in a consistent fashion. Examples -------- >>> import numpy as np >>> from sklearn.decomposition import PCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> pca = PCA(n_components=2) >>> pca.fit(X) PCA(copy=True, n_components=2, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] See also -------- ProbabilisticPCA RandomizedPCA KernelPCA SparsePCA """ def __init__(self, n_components=None, copy=True, whiten=False): self.n_components = n_components self.copy = copy self.whiten = whiten def fit(self, X, y=None, **params): """Fit the model with X. Parameters ---------- X: array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ self._fit(X, **params) return self def fit_transform(self, X, y=None, **params): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ U, S, V = self._fit(X, **params) U = U[:, :self.n_components] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= np.sqrt(X.shape[0]) else: # X_new = X * V = U * S * V^T * V = U * S U *= S[:self.n_components] return U def _fit(self, X): X = array2d(X) n_samples, n_features = X.shape X = as_float_array(X, copy=self.copy) # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ U, S, V = linalg.svd(X, full_matrices=False) self.explained_variance_ = (S ** 2) / n_samples self.explained_variance_ratio_ = (self.explained_variance_ / self.explained_variance_.sum()) if self.whiten: self.components_ = V / S[:, np.newaxis] * np.sqrt(n_samples) else: self.components_ = V if self.n_components == 'mle': if n_samples < n_features: raise ValueError("n_components='mle' is only supported " "if n_samples >= n_features") self.n_components = _infer_dimension_(self.explained_variance_, n_samples, n_features) elif (self.n_components is not None and 0 < self.n_components and self.n_components < 1.0): # number of components for which the cumulated explained variance # percentage is superior to the desired threshold ratio_cumsum = self.explained_variance_ratio_.cumsum() self.n_components = np.sum(ratio_cumsum < self.n_components) + 1 if self.n_components is not None: self.components_ = self.components_[:self.n_components, :] self.explained_variance_ = \ self.explained_variance_[:self.n_components] self.explained_variance_ratio_ = \ self.explained_variance_ratio_[:self.n_components] return (U, S, V) def transform(self, X): """Apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ X = array2d(X) X_transformed = X - self.mean_ X_transformed = np.dot(X_transformed, self.components_.T) return X_transformed def inverse_transform(self, X): """Transform data back to its original space, i.e., return an input X_original whose transform would be X Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation as transform. """ return np.dot(X, self.components_) + self.mean_ class ProbabilisticPCA(PCA): """Additional layer on top of PCA that adds a probabilistic evaluation""" __doc__ += PCA.__doc__ def fit(self, X, y=None, homoscedastic=True): """Additionally to PCA.fit, learns a covariance model Parameters ---------- X : array of shape(n_samples, n_features) The data to fit homoscedastic : bool, optional, If True, average variance across remaining dimensions """ PCA.fit(self, X) n_features = X.shape[1] self._dim = n_features Xr = X - self.mean_ Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_) n_samples = X.shape[0] if n_features <= self.n_components: delta = np.zeros(n_features) elif homoscedastic: delta = ((Xr ** 2).sum() * np.ones(n_features) / (n_samples * n_features)) else: delta = (Xr ** 2).mean(0) / (n_features - self.n_components) self.covariance_ = np.diag(delta) n_components = self.n_components if n_components is None: n_components = n_features for k in range(n_components): add_cov = np.outer(self.components_[k], self.components_[k]) self.covariance_ += self.explained_variance_[k] * add_cov return self def score(self, X, y=None): """Return a score associated to new data Parameters ---------- X: array of shape(n_samples, n_features) The data to test Returns ------- ll: array of shape (n_samples), log-likelihood of each row of X under the current model """ Xr = X - self.mean_ n_features = X.shape[1] log_like = np.zeros(X.shape[0]) self.precision_ = linalg.inv(self.covariance_) log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1) log_like -= .5 * (fast_logdet(self.covariance_) + n_features * log(2 * np.pi)) return log_like class RandomizedPCA(BaseEstimator, TransformerMixin): """Principal component analysis (PCA) using randomized SVD Linear dimensionality reduction using approximated Singular Value Decomposition of the data and keeping only the most significant singular vectors to project the data to a lower dimensional space. This implementation uses a randomized SVD implementation and can handle both scipy.sparse and numpy dense arrays as input. Parameters ---------- n_components : int Maximum number of components to keep: default is 50. copy : bool If False, data passed to fit are overwritten iterated_power : int, optional Number of iteration for the power method. 3 by default. whiten : bool, optional When True (False by default) the `components_` vectors are divided by the singular values to ensure uncorrelated outputs with unit component-wise variances. Whitening will remove some information from the transformed signal (the relative variance scales of the components) but can sometime improve the predictive accuracy of the downstream estimators by making their data respect some hard-wired assumptions. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. Attributes ---------- `components_` : array, [n_components, n_features] Components with maximum variance. `explained_variance_ratio_` : array, [n_components] Percentage of variance explained by each of the selected components. \ k is not set then all components are stored and the sum of explained \ variances is equal to 1.0 Examples -------- >>> import numpy as np >>> from sklearn.decomposition import RandomizedPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> pca = RandomizedPCA(n_components=2) >>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE RandomizedPCA(copy=True, iterated_power=3, n_components=2, random_state=None, whiten=False) >>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.99244... 0.00755...] See also -------- PCA ProbabilisticPCA References ---------- .. [Halko2009] `Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909)` .. [MRT] `A randomized algorithm for the decomposition of matrices Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert` """ def __init__(self, n_components=None, copy=True, iterated_power=3, whiten=False, random_state=None): self.n_components = n_components self.copy = copy self.iterated_power = iterated_power self.whiten = whiten self.mean_ = None self.random_state = random_state def fit(self, X, y=None): """Fit the model to the data X. Parameters ---------- X: array-like or scipy.sparse matrix, shape (n_samples, n_features) Training vector, where n_samples in the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ random_state = check_random_state(self.random_state) if not hasattr(X, 'todense'): # not a sparse matrix, ensure this is a 2D array X = np.atleast_2d(as_float_array(X, copy=self.copy)) n_samples = X.shape[0] if not hasattr(X, 'todense'): # Center data self.mean_ = np.mean(X, axis=0) X -= self.mean_ if self.n_components is None: n_components = X.shape[1] else: n_components = self.n_components U, S, V = randomized_svd(X, n_components, n_iter=self.iterated_power, random_state=random_state) self.explained_variance_ = exp_var = (S ** 2) / n_samples self.explained_variance_ratio_ = exp_var / exp_var.sum() if self.whiten: n = X.shape[0] self.components_ = V / S[:, np.newaxis] * np.sqrt(n) else: self.components_ = V return self def transform(self, X): """Apply dimensionality reduction on X. Parameters ---------- X : array-like or scipy.sparse matrix, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ X = atleast2d_or_csr(X) if self.mean_ is not None: X = X - self.mean_ X = safe_sparse_dot(X, self.components_.T) return X def inverse_transform(self, X): """Transform data back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like or scipy.sparse matrix, shape (n_samples, n_components) New data, where n_samples in the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform does not compute the exact inverse operation of transform. """ X_original = safe_sparse_dot(X, self.components_) if self.mean_ is not None: X_original = X_original + self.mean_ return X_original
lucidfrontier45/scikit-learn
sklearn/decomposition/pca.py
Python
bsd-3-clause
18,088
[ "Gaussian" ]
ff41b7cf69a9c126686ee5427de850a7a74b7fc81785bf17aa68313291698184
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################# # Copyright 2016 Konrad Sakowski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################# # import os import collections from sqlalchemy import create_engine, Column, Boolean, Integer, String, Float, event, ForeignKey, UniqueConstraint, MetaData, Table, select, func from sqlalchemy.orm import sessionmaker, mapper, relationship, backref, column_property from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.serializer import loads, dumps from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method from sqlalchemy import inspect Base = declarative_base() from logging import info, warning, debug, error, exception, critical import xmltodict import bunch import xmlbublus from xmlbublus import ensure_list definicje = xmlbublus.Definicje(); definicje.load('definicje.xml'); gatunek = xmltodict.parse(open('gatunek.xml','r').read()); rules = bunch.bunchify(xmltodict.parse(open('rules.xml','r').read())['rules']); # bunch - access as dict or as a class attributes - both supported class General(object): pass; class Player(object): pass; class StarSystem(object): pass; class Planet(object): def __init__(self): exists = False; class Race(object): pass; class Civilization(object): pass; class Colony(object): pass; class ColonySlot(object): pass; class Building(object): pass; class CivFeature(object): pass; class CivTech(object): pass; class Gatunek(object): pass; class WartCechy(object): pass; class Technology(object): pass; engine = create_engine('sqlite:///test.db', echo=False) Session = sessionmaker(bind=engine) session = Session() metadata = MetaData() starsystem_columns = [ Column('id', Integer, primary_key=True), Column('name', String(50)), Column('x', Integer), Column('y', Integer), Column('color', String(20)), Column('blockade', Integer ), # blokada przez nieprzyjacielską flotę ] starsystem_columns += definicje['UKL'].dbcolumn(); starsystemtab = Table('starsystemtab', metadata,*starsystem_columns) mapper(StarSystem, starsystemtab) planet_columns = [ Column('id', Integer, primary_key=True), Column('starsystem_id', Integer, ForeignKey('starsystemtab.id')), ] #for code in listykodow['PLK']+listykodow['PLI']: # if(definicje[code].typ() == "integer"): # planet_columns.append(Column(code, Integer)); # elif(definicje[code].typ() == "bool"): # planet_columns.append(Column(code, Boolean)); # else: # warning("Code %s: unknown type: %s"%((code, definicje[code].typ()))); planet_columns += definicje['PLA'].dbcolumn(); for cl in definicje['PLA']: planet_columns += [ Column('V'+cl, Integer), ]; planettab = Table('planettab', metadata,*planet_columns) mapper(Planet, planettab, properties={ 'star' : relationship(StarSystem, backref='planets', order_by=planettab.c.OR), } ); # Table with single row for storing variables generaltab = Table('generaltab', metadata,*[ Column('id', Integer, primary_key=True), Column('max_x', Integer), # map dimensions Column('max_y', Integer), # map dimensions ]) mapper(General, generaltab) # Human players list playertab = Table('playertab', metadata,*[ Column('player_id', Integer, primary_key=True), Column('frame_id', Integer), # Map type: 0: Galaxy, 1: Starsystem 2: Planet Column('val1', Integer), Column('val2', Integer), Column('race_id', Integer), Column('civ_id', Integer), Column('EOTmenu', Integer), Column('curx', Integer), Column('cury', Integer), ]) mapper(Player, playertab) gatunek_columns = { Column('gat_id', Integer, primary_key=True), Column('name', String(50)) } gatunkitab = Table('gatunkitab', metadata, *gatunek_columns) mapper(Gatunek,gatunkitab); wartcech_columns = { Column('cech_id', Integer), Column('cecha', String(2), primary_key=True), Column('val1', Integer, primary_key=True), Column('val2', Integer) } wartcechtab = Table('wartcechtab', metadata, *wartcech_columns) mapper(WartCechy,wartcechtab); races_columns = [ Column('race_id', Integer, primary_key=True), Column('civ_id', Integer, ForeignKey("civilizationtab.civ_id")), Column('gat_id', Integer, ForeignKey("gatunkitab.gat_id")), Column('name', String(50)), ] for zm in gatunek['Istoty']['Gatunki']['Gatunek'][0].keys()+gatunek['Istoty']['Gatunki']['Gatunek'][0]['Utrzymanie'].keys(): if not zm in ['Utrzymanie', 'Kod', 'Nazwa', 'Cechy_Gat', 'Odpornosci', 'Typ', 'Rozmiar']: races_columns.append(Column('G_'+zm, Integer)); for zm in gatunek['Istoty']['Cywilizacje']['Cywilizacja'][0].keys(): if not zm in ['Przyjaciel', 'Wrog', 'Kod', 'Nazwa', 'Morale', 'Produkcja', 'Rozmnazanie', 'Grywalna', 'Natives']: races_columns.append(Column('R_'+zm, Integer)); for zm in gatunek['Istoty']['Cywilizacje']['Cywilizacja'][0]['Morale'].keys(): races_columns.append(Column('M_'+zm, Integer)); for zm in gatunek['Istoty']['Cywilizacje']['Cywilizacja'][0]['Produkcja'].keys(): races_columns.append(Column('P_'+zm, Integer)); for zm in gatunek['Istoty']['Cywilizacje']['Cywilizacja'][0]['Rozmnazanie'].keys(): races_columns.append(Column('R_'+zm, Integer)); racetab = Table('racetab', metadata,*races_columns) mapper(Race,racetab, # AAS properties={ 'feature' : relationship(CivFeature, backref='races') } ); civfeatures_columns = [ Column('id', Integer, primary_key=True), Column('race_id', Integer, ForeignKey("racetab.race_id")), Column('feature',String(2)), Column('czynnik',String(2)), Column('val1',Integer), Column('val2',Integer), ] civfeaturetab = Table('civfeaturetab', metadata,*civfeatures_columns) mapper(CivFeature,civfeaturetab); colonies_columns = [ Column('col_id', Integer, primary_key=True), Column('civ_id', Integer, ForeignKey("civilizationtab.civ_id")), Column('race_id', Integer, ForeignKey("racetab.race_id")), Column('planet_id', Integer, ForeignKey("planettab.id")), Column('star_id', Integer, ForeignKey("starsystemtab.id")), Column('enable_pula', Integer), # AAS w przypadku braku żywności, 1: mozliwe uzupełnianie z puli, 0: niemożliwe, -1: mozliwe uzupełnianie z puli ale jest blokada Column('capitol',Integer), # AAS 0: nie, 1: local gvt, 2: empire gvt Column('Ekonomia', Integer), # typ gospodarki surowcami, zgodnie z gatunki.Ekonomia Column('sur_limit', Integer), # maksymalna ilość surowca przechowywana na lokalnie na planecie, nie powinna być większa niż limitzas Column('enable_upgr',Integer), # AAS <>0 komputer moze sam upgrade'ować budynki Column('enable_bld',Integer), # AAS <>0 komputer może sam budować budynki Column('enable_supp',Integer), # AAS <>0 komputer może sam uzupełniać surowce do budowy budynków z puli #Column('POP',Float), #Column('PPO',Float), ] #for code in listykodow['PLKI']: # colonies_columns.append(Column(code, Integer)); #colonies_columns += xmlbublus.listakodow_to_dbcolumns(listykodow['PLK'], definicje); colonies_columns += definicje['PLK'].dbcolumn(); # AAS dla mojej ekonomii for cl in definicje['PLK']: if 'Z'==cl[:1]: colonies_columns += [ Column('I'+cl[1:], Integer), ]; colonies_columns += [ Column('PN'+cl[1:], Integer), ]; colonies_columns += [ Column('ZB'+cl[1:], Integer), ]; colonytab = Table('colonytab', metadata,*colonies_columns) mapper(Colony, colonytab, properties={ 'planet' : relationship(Planet, backref=backref("colony", uselist=False)), # 1 do 1 'star' : relationship(StarSystem, backref=backref("colonies")), 'race' : relationship(Race, backref='colonies'), 'civ' : relationship(Civilization, backref='colonies'), } ); # AAS drzewo technologii Tech_columns = [ Column('tech_id', String(3), primary_key=True), Column('req_tech_1', String(3), ForeignKey("technologtab.tech_id")), Column('req_tech_2', String(3), ForeignKey("technologtab.tech_id")), Column('req_tech_3', String(3), ForeignKey("technologtab.tech_id")), Column('name', String(32)), Column('typ', Integer), Column('SC', Integer), Column('TC', Integer), ] technologtab = Table('technologtab', metadata,*Tech_columns) mapper(Technology, technologtab); # AAS technologie cywilizacji civ_tech_columns = [ Column('id', Integer, primary_key=True), Column('civ_id', Integer, ForeignKey("civilizationtab.civ_id")), Column('tech', String(3), ForeignKey("technologtab.tech_id")), Column('lvl',Integer), # AAS 0: nie, 1: local gvt, 2: empire gvt ] civ_techtab = Table('civ_techtab', metadata,*civ_tech_columns) mapper(CivTech, civ_techtab, properties={ 'civ' : relationship(Civilization, backref='civtech'), 'techdesc' : relationship(Technology, backref='techciv'), } ); # AAS opisy budynków buildings_columns = [ Column('bld_id', Integer, primary_key=True), Column('kod', String(3)), Column('typ', String(1)), Column('slot', String(3)), Column('name', String(50)), Column('czas_konstr', Integer), Column('bld_nr', Integer), Column('lvl', Integer), Column('count', Integer), Column('capitol', Integer), Column('tech1', String(3)), Column('tech2', String(3)), Column('tech3', String(3)), Column('tech4', String(3)), Column('tech5', String(3)), Column('typ1', String(12)), # Uwaga: typ cechy, nazwa tabeli (race, planet, colony etc.) Column('cecha1', String(6)), # Column('wart1', Integer), Column('typ2', String(12)), Column('cecha2', String(6)), Column('wart2', Integer), Column('typ3', String(12)), Column('cecha3', String(6)), Column('wart3', Integer), Column('typ4', String(12)), Column('cecha4', String(6)), Column('wart4', Integer), Column('typ5', String(12)), Column('cecha5', String(6)), Column('wart5', Integer), # Column('typ6', String(12)), # Column('cecha6', String(6)), # Column('wart6', Integer), # Column('typ7', String(12)), # Column('cecha7', String(6)), # Column('wart7', Integer), ] for cl in definicje['PLK']: if 'Z'==cl[:1]: buildings_columns += [ Column(cl, Integer), ]; for cl in definicje['PLK']: if 'P'==cl[:1]: buildings_columns += [ Column(cl, Integer), ]; buildings_columns += [ Column(cl+'_t', String(12)), ]; buildings_columns += [ Column(cl+'_c', String(6)), ]; buildingtab = Table('buildingtab', metadata,*buildings_columns) mapper(Building,buildingtab); # AAS miejsca pod budynki w koloniach col_slot_columns = [ Column('slot_id', Integer, primary_key=True), Column('planet_id', Integer, ForeignKey("planettab.id")), Column('bld_id', Integer, ForeignKey("buildingtab.bld_id")), Column('race_id', Integer, ForeignKey("racetab.race_id")), Column('slot_type', Integer), # 0 lądowy 1 wodny Column('lvl', Integer ), Column('readytime', Integer ), # czas do gotowości Column('deliverywait', Integer ) # czeka na dostwę ze stolicy ] colonyslottab = Table('colonyslottab', metadata,*col_slot_columns) mapper(ColonySlot,colonyslottab, properties={ 'building' : relationship(Building, backref=backref("buildings", uselist=False)), 'planet' : relationship(Planet, backref=backref("slots")) } ); civilizations_columns = [ Column('civ_id', Integer, primary_key=True), Column('gat_id', Integer, ForeignKey("gatunkitab.gat_id")), Column('color', String(20)), Column('colony_count',Integer), Column('colony_opt',Integer), Column('Ekonomia',Integer), Column('cap_x',Integer), Column('cap_y',Integer), Column('typ',Integer), Column('player',Integer), ] civilizations_columns += definicje['CIV'].dbcolumn(); civilizationtab = Table('civilizationtab', metadata,*civilizations_columns) mapper(Civilization,civilizationtab, properties={ 'race' : relationship(Race, backref='civilization', uselist=False), 'IPO' : column_property(select([func.sum(Colony.POP)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'MPO' : column_property(select([func.sum(Colony.MPO)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'PPO' : column_property(select([func.sum(Colony.PPO)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'PSC' : column_property(select([func.sum(Colony.PSC)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'PTC' : column_property(select([func.sum(Colony.PTC)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'MSE' : column_property(select([100000 * func.count(Colony.col_id)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'PSE' : column_property(select([func.sum(Colony.PSE)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'ZSE' : column_property(select([func.sum(Colony.ZSE)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'MSM' : column_property(select([100000 * func.count(Colony.col_id)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'PSM' : column_property(select([func.sum(Colony.PSM)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'ZSM' : column_property(select([func.sum(Colony.ZSM)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'MSR' : column_property(select([100000 * func.count(Colony.col_id)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'PSR' : column_property(select([func.sum(Colony.PSR)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'ZSR' : column_property(select([func.sum(Colony.ZSR)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'MSW' : column_property(select([100000 * func.count(Colony.col_id)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'PSW' : column_property(select([func.sum(Colony.PSW)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'ZSW' : column_property(select([func.sum(Colony.ZSW)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'MZG' : column_property(select([2 * func.sum(Colony.POP)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'PZG' : column_property(select([func.sum(Colony.PZG)]).where(Colony.civ_id==civilizationtab.c.civ_id)), 'ZZG' : column_property(select([func.sum(Colony.ZZG)]).where(Colony.civ_id==civilizationtab.c.civ_id)), } ); def create_tables(): metadata.drop_all(engine) metadata.create_all(engine) session.commit(); def table_overwrite(srcsess, destsess, desteng, table): table.drop(desteng, checkfirst=True); table.create(desteng); for row in srcsess.execute(table.select()).fetchall(): print row destsess.execute(table.insert(), row); destsess.commit(); def save_or_load(srcsess, destsess, desteng): table_overwrite(srcsess, destsess, desteng, generaltab); table_overwrite(srcsess, destsess, desteng, playertab); table_overwrite(srcsess, destsess, desteng, starsystemtab); table_overwrite(srcsess, destsess, desteng, planettab); table_overwrite(srcsess, destsess, desteng, civilizationtab); table_overwrite(srcsess, destsess, desteng, racetab); table_overwrite(srcsess, destsess, desteng, civfeaturetab); table_overwrite(srcsess, destsess, desteng, colonytab); table_overwrite(srcsess, destsess, desteng, buildingtab); table_overwrite(srcsess, destsess, desteng, colonyslottab); def load_game(filename): engine_from = create_engine('sqlite:///'+filename, echo=True) Session_from = sessionmaker(bind=engine_from) session_from = Session_from() save_or_load(session_from, session, engine); def save_game(filename): engine_to = create_engine('sqlite:///'+filename, echo=True) Session_to = sessionmaker(bind=engine_to) session_to = Session_to() save_or_load(session, session_to, engine_to); save_dir = os.path.expanduser("~")+"/.bublus"; if not os.path.exists(save_dir): os.makedirs(save_dir); def update_starsystem_colors(): stars = session.query(StarSystem).all() for star in stars: star.color = "WHITE" session.add(star); stars = session.query(StarSystem).join(Planet).join(Colony).all() for star in stars: debug(star.name); color = set(colony.civ.color for colony in star.colonies); if(len(color) == 1): star.color = color.pop(); else: star.color = "RED"; debug(star.color); session.add(star); session.commit();
ghkonrad/bublus
libbublus.py
Python
apache-2.0
16,357
[ "Galaxy" ]
5317000cf9a80936973403266e4ba516fb53844fe0dbf0390cb1543d07e795b9
import mdtraj import numpy as np from subprocess import call, PIPE def write_cpptraj_script(traj, top, frame1=1, frame2=1, outfile=None, write=True, run=False): """ Create a cpptraj script to load specific range of frames from a trajectory and write them out to a file :param traj: str, Location in disk of trajectories to load :param top: str, Location in disk of the topology file :param frame1: int, The first frame to load :param frame2: int, The last frame to load :param outfile: str, Name (with file format extension) of the output trajectory :param write: bool, Whether to write the script to a file in disk :param run: bool, Whether to run the script after writing it to disk :return cmds: str, the string representing the cpptraj script """ if run and not write: raise ValueError('Cannot call the script without writing it to disk') if outfile is None: outfile = 'pdbs/' + traj.split('.')[0] + '.pdb' commands = [ 'parm {}'.format(top), 'trajin {} {} {}'.format(traj, frame1, frame2), 'trajout {}'.format(outfile), 'run' ] cmds = '\n'.join(commands) if write: with open('script.cpptraj', 'w') as f: f.write(cmds) if run: call(['cpptraj', '-i', 'script.cpptraj'], stdout=PIPE) return cmds def load_Trajs(trajfiles_list, prmtop_file, stride=1, chunk=1000): """ Iteratively loads a list of NetCDF files and returns them as a list of mdtraj.Trajectory objects Parameters ---------- trajfiles_list: list of str List with the names of trajectory files prmtop_file: str Name of the prmtop file stride: int Frames to be used when loading the trajectories chunk: int Number of frames to load at once from disk per iteration. If 0, load all. Returns ------- list_chunks: list List of mdtraj.Trajectory objects, each of 'chunk' lenght """ list_chunks = [] for traj in trajfiles_list: for frag in mdtraj.iterload(traj, chunk=chunk, top=prmtop_file, stride=stride): list_chunks.append(frag) return(list_chunks) def load_Trajs_generator(trajfiles_list, prmtop_file, stride, chunk): """ Iteratively loads a list of NetCDF files and returns them as an iterable of mdtraj.Trajectory objects Parameters ---------- trajfiles_list: list of str List with the names of trajectory files prmtop_file: str Name of the prmtop file stride: int Frames to be used when loading the trajectories chunk: int Number of frames to load at once from disk per iteration. If 0, load all. Yields ------ frag: mdtraj.Trajectory """ try: for traj in trajfiles_list: for frag in mdtraj.iterload(traj, chunk=chunk, top=prmtop_file, stride=stride): yield frag except OSError: # User passed a single long trajectory as a string # so there's no need to iterate through it. for frag in mdtraj.iterload(trajfiles_list, chunk=chunk, top=prmtop_file, stride=stride): yield frag def traj_list_to_dict(trajfiles_list, prmtop_file, stride=1): """ Loads a list of trajs passed as a list of strings into a dictionary with keys as integers from 0 """ trajs_dict = {} for i, traj in enumerate(trajfiles_list): trajs_dict[i] = mdtraj.load(traj, top=prmtop_file, stride=stride) return trajs_dict def split_trajs_by_type(traj_dict, meta): """ Find the kind of types of simulations inside the meta object and build a dictionary that has them as keys. Then, build a dictionary of the trajs inside traj_dict that belong to each type. """ if len(traj_dict) != len(meta): raise ValueError('Lengths of traj_dict and meta do not match.') type_set = set(meta['type']) # dict which stores each subtype dict of trajs type_dict = dict.fromkeys(type_set) for t in type_set: new_dict = {} for i, row in meta.iterrows(): if row['type'] == t: new_dict[i] = traj_dict[i] type_dict[t] = new_dict return type_dict def trim_centers_by_region(clusterer, x1=None, x2=None, y1=None, y2=None, obs=(0, 1)): """ Find the cluster centers that fall within a user-defined region. :param clusterer: an msmbuilder cluster object :param x1: float The low limit of the x axis :param x2: float The high limit of the x axis :param y1: float The low limit of the y axis :param y2: float The high limit of the y axis :param obs: tuple, the dimensions to sample :return trimmed: np.array, Cluster centers that are within the region """ if not hasattr(clusterer, 'cluster_centers_'): raise AttributeError('The provided clusterer object has no cluster_centers_ property.') centers = clusterer.cluster_centers_ pruned = centers[:, obs] if x1 is None: x1 = np.min(pruned[:, 0]) if y1 is None: y1 = np.min(pruned[:, 1]) if x2 is None: x2 = np.max(pruned[:, 0]) if y2 is None: y2 = np.max(pruned[:, 1]) trimmed = centers[ ((pruned[:, 0] > x1) & (pruned[:, 0] < x2)) & ((pruned[:, 1] > y1) & (pruned[:, 1] < y2)) ] return trimmed def cartesian_product(x, y): return np.transpose([np.tile(x, len(y)), np.repeat(y, len(x))]) def generate_traj_from_stateinds(inds, meta, atom_selection='all'): """ Concatenate several frames from different trajectories to create a new one. Parameters ---------- inds: list of tuples, Each element of the list has to be a 2D tuple of ints (traj_index, frame_index) meta: a metadata object atom_selection: str, Which atoms to load Returns ------- traj: mdtraj.Trajectory """ frame_list = [] for traj_i, frame_i in inds: top = mdtraj.load_prmtop(meta.loc[traj_i]['top_fn']) atoms = top.select(atom_selection) frame_list.append( mdtraj.load_frame(meta.loc[traj_i]['traj_fn'], atom_indices=atoms, index=frame_i, top=meta.loc[traj_i]['top_fn']) ) traj = mdtraj.join(frame_list, check_topology=False) traj.center_coordinates() traj.superpose(traj, 0) return traj def load_in_vmd(dirname, inds): k = len(inds[0]) templ = [ '# Defaults', 'mol default material AOChalky', 'mol default representation NewCartoon', 'color Display {Background} white', 'axes location off', ] for i in range(k): templ += [ '# State {}'.format(i), 'mol new {}/{:03d}.pdb'.format(dirname, i), 'mol rename top State-{}'.format(i), 'mol modcolor 0 top ColorID {}'.format(i), 'mol drawframes top 0 0:{k}'.format(k=k), 'mol modselect 0 top resid 1 to 161', 'mol modcolor 0 top ColorID 0', 'mol addrep top', 'mol modselect 1 top resid 162 to 248', 'mol modcolor 1 top ColorID 7', 'mol addrep top', 'mol modselect 2 top resid 249 to 419', 'mol modcolor 2 top ColorID 1', 'mol addrep top', 'mol modselect 3 top not protein and not resname CAL', 'mol modstyle 3 top Licorice', 'mol addrep top', 'mol modselect 4 top resname CAL', 'mol modstyle 4 top VDW', 'mol modcolor 4 top ColorID 6' '', ] return '\n'.join(templ) def get_source_sink(msm, clusterer, eigenvector, out_naming='msm'): """ Get the source and sink of a given eigenvector, in cluster naming of clusterer object :param msm: :param clusterer: :param eigenvector: :return: """ source_msm_naming = np.argmin(msm.left_eigenvectors_[:, eigenvector]) sink_msm_naming = np.argmax(msm.left_eigenvectors_[:, eigenvector]) source_clusterer_naming = msm.state_labels_[source_msm_naming] sink_clusterer_naming = msm.state_labels_[sink_msm_naming] assert msm.mapping_[source_clusterer_naming] == source_msm_naming assert msm.mapping_[sink_clusterer_naming] == sink_msm_naming if out_naming == 'msm': return source_msm_naming, sink_msm_naming elif out_naming == 'clusterer': return source_clusterer_naming, sink_clusterer_naming else: raise ValueError('out_naming is not valid')
jeiros/Scripts
AnalysisMDTraj/traj_utils.py
Python
mit
8,855
[ "MDTraj", "NetCDF" ]
134940efef3755013a11eaa7db23ac40e081351faf2c24a1482ae77957be9f38
from .base import Message from .decoder import register_to_decoder @register_to_decoder() class ControlConstantsMessage(Message): """ Receive Control Constants changes """ cmd = 'S' mode = None beer_setpoint = None fridge_setpoint = None data_mapping = { 'mode': 'mode', 'beerSet': 'beer_setpoint', 'fridgeSet': 'fridge_setpoint' } def __init__(self, mode, beer_setpoint, fridge_setpoint): self.mode = mode self.beer_setpoint = beer_setpoint self.fridge_setpoint = fridge_setpoint def visit(self, aMessageHandler): aMessageHandler.control_constants(self) def __str__(self): return "Control Constants <mode:{0}, beer setpoint:{1}, fridget setpoint:{2}>".format(self.mode, self.beer_setpoint, self.fridge_setpoint) @register_to_decoder() class ControlSettingsMessage(Message): """ Receive Control Settings changes """ cmd = 'C' data_mapping = { 'tempFormat': 'temp_format', 'heater1_kp': 'heater1_kp', 'heater1_ti': 'heater1_ti', 'heater1_td': 'heater1_td', 'heater1_infilt': 'heater1_infilter', 'heater1_dfilt': 'heater1_dfilter', 'heater1PwmPeriod': 'heater1_pwm_period', 'heater2_kp': 'heater2_kp', 'heater2_ti': 'heater2_ti', 'heater2_td': 'heater2_td', 'heater2_infilt': 'heater2_infilter', 'heater2_dfilt': 'heater2_dfilter', 'heater2PwmPeriod': 'heater2_pwm_period', 'cooler_kp': 'cooler_kp', 'cooler_ti': 'cooler_ti', 'cooler_td': 'cooler_td', 'cooler_infilt': 'cooler_infilter', 'cooler_dfilt': 'cooler_dfilter', 'coolerPwmPeriod': 'cooler_pwm_period', 'beer2fridge_kp': 'beer2fridge_kp', 'beer2fridge_ti': 'beer2fridge_ti', 'beer2fridge_td': 'beer2fridge_td', 'beer2fridge_infilt': 'beer2fridge_infilter', 'beer2fridge_dfilt': 'beer2fridge_dfilter', 'minCoolTime': 'min_cool_time', 'minCoolIdleTime': 'min_cool_idle_time', 'beer2fridge_pidMax': 'beer2fridge_pid_max', 'deadTime': 'deadtime' } def __init__(self, temp_format, heater1_kp, heater1_ti, heater1_td, heater1_infilter, heater1_dfilter, heater1_pwm_period, heater2_kp, heater2_ti, heater2_td, heater2_infilter, heater2_dfilter, heater2_pwm_period, cooler_kp, cooler_ti, cooler_td, cooler_infilter, cooler_dfilter, cooler_pwm_period, min_cool_time, min_cool_idle_time, beer2fridge_kp, beer2fridge_ti, beer2fridge_td, beer2fridge_infilter, beer2fridge_dfilter, beer2fridge_pid_max, deadtime): self.temp_format = temp_format # heater 1 self.heater1_kp = heater1_kp self.heater1_ti = heater1_ti self.heater1_td = heater1_td self.heater1_infilter = heater1_infilter self.heater1_dfilter = heater1_dfilter self.heater1_pwm_period = heater1_pwm_period # heater 2 self.heater2_kp = heater2_kp self.heater2_ti = heater2_ti self.heater2_td = heater2_td self.heater2_infilter = heater2_infilter self.heater2_dfilter = heater2_dfilter self.heater2_pwm_period = heater2_pwm_period # cooler 1 self.cooler_kp = cooler_kp self.cooler_ti = cooler_ti self.cooler_td = cooler_td self.cooler_infilter = cooler_infilter self.cooler_dfilter = cooler_dfilter self.cooler_pwm_period = cooler_pwm_period self.min_cool_time = min_cool_time self.min_cool_idle_time = min_cool_idle_time # beer2fridge self.beer2fridge_kp = beer2fridge_kp self.beer2fridge_ti = beer2fridge_ti self.beer2fridge_td = beer2fridge_td self.beer2fridge_infilter = beer2fridge_infilter self.beer2fridge_dfilter = beer2fridge_dfilter self.beer2fridge_pid_max = beer2fridge_pid_max self.deadtime = deadtime def visit(self, aMessageHandler): aMessageHandler.control_settings(self) def __str__(self): return "Control Settings <...>"
glibersat/brewpiv2
brewpiv2/messages/control.py
Python
agpl-3.0
4,405
[ "VisIt" ]
3b8c6f5b4506dec1ec9207a37a37a4aebe3b910370d1581f921ef62444bc63e2
# Copyright 2014 Google Inc. All Rights Reserved. """Command for creating images.""" from googlecloudsdk.calliope import exceptions from googlecloudsdk.compute.lib import base_classes from googlecloudsdk.compute.lib import constants from googlecloudsdk.compute.lib import utils class Create(base_classes.BaseAsyncCreator): """Create Google Compute Engine images.""" @staticmethod def Args(parser): parser.add_argument( '--description', help=('An optional, textual description for the image being created.')) source_group = parser.add_mutually_exclusive_group(required=True) source_uri = source_group.add_argument( '--source-uri', help=('The full Google Cloud Storage URI where the disk image is ' 'stored.')) source_uri.detailed_help = """\ The full Google Cloud Storage URI where the disk image is stored. This file must be a gzip-compressed tarball whose name ends in ``.tar.gz''. This flag is mutually exclusive with ``--source-disk''. """ source_disk = source_group.add_argument( '--source-disk', help='A source disk to create the image from.') source_disk.detailed_help = """\ A source disk to create the image from. The value for this option can be the name of a disk with the zone specified via ``--source-disk-zone'' flag. This flag is mutually exclusive with ``--source-uri''. """ source_disk_zone = parser.add_argument( '--source-disk-zone', help='The zone of the disk specified by --source-disk.') source_disk_zone.detailed_help = ("""\ The zone of the disk specified by --source-disk. """ + constants.ZONE_PROPERTY_EXPLANATION) parser.add_argument( 'name', metavar='NAME', help='The name of the image to create.') @property def service(self): return self.compute.images @property def method(self): return 'Insert' @property def resource_type(self): return 'images' def CreateRequests(self, args): """Returns a list of requests necessary for adding images.""" image = self.messages.Image( name=args.name, description=args.description, sourceType=self.messages.Image.SourceTypeValueValuesEnum.RAW) # Validate parameters. if args.source_disk_zone and not args.source_disk: raise exceptions.ToolException( 'You cannot specify [--source-disk-zone] unless you are specifying ' '[--source-disk].') if args.source_uri: source_uri = utils.NormalizeGoogleStorageUri(args.source_uri) image.rawDisk = self.messages.Image.RawDiskValue(source=source_uri) else: source_disk_ref = self.CreateZonalReference( args.source_disk, args.source_disk_zone, flag_names=['--source-disk-zone'], resource_type='disks') image.sourceDisk = source_disk_ref.SelfLink() request = self.messages.ComputeImagesInsertRequest( image=image, project=self.project) return [request] Create.detailed_help = { 'brief': 'Create Google Compute Engine images', 'DESCRIPTION': """\ *{command}* is used to create custom disk images. The resulting image can be provided during instance or disk creation so that the instance attached to the resulting disks has access to a known set of software or files from the image. Images can be created from gzipped compressed tarball containing raw disk data or from existing disks in any zone. Images are global resources, so they can be used across zones and projects. To learn more about creating image tarballs, visit link:https://developers.google.com/compute/docs/images[]. """, }
harshilasu/LinkurApp
y/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/images/create.py
Python
gpl-3.0
3,836
[ "VisIt" ]
f6df4be3cb37306e375625e46f0a9b1f36f6031ec81ed4c396d3b2f9c6f8d9d0
import os import fileinput import sys from os.path import join, getsize licenseFile = open( "COPYING" ) licenseLines = licenseFile.readlines( 100000 ) total = 0 for root, dirs, files in os.walk(sys.argv[1]): for name in files: f = open( join(root, name), "r+" ) fileLines = f.readlines(100000) total += len( fileLines ) f.seek( 0, os.SEEK_SET ) for line in licenseLines: f.write( line ) for line in fileLines: f.write( line ) if '.svn' in dirs: dirs.remove('.svn') # don't visit CVS directories print "Total lines: " + str(total)
2666hz/Cinder
tools/scripts/prependLicense.py
Python
bsd-2-clause
622
[ "VisIt" ]
63cc77504fdfff0bbd2f56d6e2c3b9ab3aa6339646aed03a1ccef523782aa4da
import pylab as pyl import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy as scp import scipy.ndimage as ni import scipy.io import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle import ghmm import random import sys sys.path.insert(0, '/home/tapo/git/hrl_haptic_manipulation_in_clutter/sandbox_tapo_darpa_m3/src/skin_related/Classification/Classification_with_HMM/Single_Contact_Classification/multivariate_gaussian_emissions') from test_crossvalidation_force_motion_10_states import cov_rf from test_crossvalidation_force_motion_10_states import cov_rm from test_crossvalidation_force_motion_10_states import cov_sf from test_crossvalidation_force_motion_10_states import cov_sm #print cov_rf def scaling(Fvec_a,Fvec_c): # With Scaling max_a = np.max(abs(Fvec_a)) min_a = np.min(abs(Fvec_a)) mean_a = np.mean(Fvec_a) std_a = np.std(Fvec_a) #Fvec_a = (Fvec_a)/max_a #Fvec_a = (Fvec_a-mean_a) #Fvec_a = (Fvec_a-mean_a)/max_a Fvec_a = (Fvec_a-mean_a)/std_a max_c = np.max(abs(Fvec_c)) min_c = np.min(abs(Fvec_c)) mean_c = np.mean(Fvec_c) std_c = np.std(Fvec_c) #Fvec_c = (Fvec_c)/max_c #Fvec_c = (Fvec_c-mean_c) #Fvec_c = (Fvec_c-mean_c)/max_c Fvec_c = (Fvec_c-mean_c)/std_c #Fvec_c = Fvec_c*np.max((max_a,max_c))/max_c data = np.concatenate((Fvec_a,Fvec_c),axis=0) #print np.shape(data) return data # Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models def feature_to_mu_cov(fvec1,fvec2): index = 0 m,n = np.shape(fvec1) #print m,n mu_1 = np.zeros((10,1)) mu_2 = np.zeros((10,1)) cov = np.zeros((10,2,2)) DIVS = m/10 while (index < 10): m_init = index*DIVS temp_fvec1 = fvec1[(m_init):(m_init+DIVS),0:] #print temp_fvec1 temp_fvec2 = fvec2[(m_init):(m_init+DIVS),0:] temp_fvec1 = np.reshape(temp_fvec1,DIVS*n) #print temp_fvec1 temp_fvec2 = np.reshape(temp_fvec2,DIVS*n) mu_1[index] = np.mean(temp_fvec1) mu_2[index] = np.mean(temp_fvec2) cov[index,:,:] = np.cov(np.concatenate((temp_fvec1,temp_fvec2),axis=0)) if index == 0: print 'mean = ', mu_2[index] #print 'mean = ', scp.mean(fvec2[(m_init):(m_init+DIVS),0:]) #print np.shape(np.concatenate((temp_fvec1,temp_fvec2),axis=0)) #print cov[index,:,:] #print scp.std(fvec2[(m_init):(m_init+DIVS),0:]) #print scp.std(temp_fvec2) index = index+1 return mu_1,mu_2,cov if __name__ == '__main__': ### Simulation Data tSamples = 121 data_rf_training = scipy.io.loadmat('rigid_fixed_object_training.mat') data_sf_training = scipy.io.loadmat('soft_fixed_object_training.mat') data_rm_training = scipy.io.loadmat('rigid_movable_object_training.mat') data_sm_training = scipy.io.loadmat('soft_movable_object_training.mat') simuldata_training = np.zeros((tSamples,400)) datatime = np.arange(0,1.21,0.01) dataforce_rf_training = np.transpose(data_rf_training['sensed_force_rf']) dataforce_sf_training = np.transpose(data_sf_training['sensed_force_sf']) dataforce_rm_training = np.transpose(data_rm_training['sensed_force_rm']) dataforce_sm_training = np.transpose(data_sm_training['sensed_force_sm']) datamotion_rf_training = np.transpose(data_rf_training['robot_pos_rf']) datamotion_sf_training = np.transpose(data_sf_training['robot_pos_sf']) datamotion_rm_training = np.transpose(data_rm_training['robot_pos_rm']) datamotion_sm_training = np.transpose(data_sm_training['robot_pos_sm']) data_RF_training = scaling(dataforce_rf_training,datamotion_rf_training) data_SF_training = scaling(dataforce_sf_training,datamotion_sf_training) data_RM_training = scaling(dataforce_rm_training,datamotion_rm_training) data_SM_training = scaling(dataforce_sm_training,datamotion_sm_training) #print np.shape(data_RF_training) simuldata_training = np.concatenate((data_RF_training, data_RM_training, data_SF_training, data_SM_training), axis = 1) Fmat_training = np.matrix(simuldata_training) #print np.shape(Fmat_training[0]) # Checking the Data-Matrix m_tot, n_tot = np.shape(Fmat_training) #print " " #print 'Total_Matrix_Shape:',m_tot,n_tot mu_rf_force,mu_rf_motion,cov_rf_sim = feature_to_mu_cov(Fmat_training[0:121,0:100],Fmat_training[121:242,0:100]) mu_rm_force,mu_rm_motion,cov_rm_sim = feature_to_mu_cov(Fmat_training[0:121,100:200],Fmat_training[121:242,100:200]) mu_sf_force,mu_sf_motion,cov_sf_sim = feature_to_mu_cov(Fmat_training[0:121,200:300],Fmat_training[121:242,200:300]) mu_sm_force,mu_sm_motion,cov_sm_sim = feature_to_mu_cov(Fmat_training[0:121,300:400],Fmat_training[121:242,300:400]) #print [mu_rf, sigma_rf] # HMM - Implementation: # 10 Hidden States # Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state # Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable # Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch) # For new objects, it is classified according to which model it represenst the closest.. F = ghmm.Float() # emission domain of this model # A - Transition Matrix A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05], [0.0, 0.1, 0.25, 0.25, 0.1, 0.1, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.0, 0.1, 0.3, 0.20, 0.20, 0.1, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]] # B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma) B_rf = [0.0]*10 B_rm = [0.0]*10 B_sf = [0.0]*10 B_sm = [0.0]*10 #for num_states in range(10): #B_rf[num_states] = [[mu_rf_force[num_states][0],mu_rf_motion[num_states][0]],[cov_rf_sim[num_states][0][0],cov_rf_sim[num_states][0][1],cov_rf_sim[num_states][1][0],cov_rf_sim[num_states][1][1]]] #B_rm[num_states] = [[mu_rm_force[num_states][0],mu_rm_motion[num_states][0]],[cov_rm_sim[num_states][0][0],cov_rm_sim[num_states][0][1],cov_rm_sim[num_states][1][0],cov_rm_sim[num_states][1][1]]] #B_sf[num_states] = [[mu_sf_force[num_states][0],mu_sf_motion[num_states][0]],[cov_sf_sim[num_states][0][0],cov_sf_sim[num_states][0][1],cov_sf_sim[num_states][1][0],cov_sf_sim[num_states][1][1]]] #B_sm[num_states] = [[mu_sm_force[num_states][0],mu_sm_motion[num_states][0]],[cov_sm_sim[num_states][0][0],cov_sm_sim[num_states][0][1],cov_sm_sim[num_states][1][0],cov_sm_sim[num_states][1][1]]] #print cov_rf_sim[num_states][0][0],cov_rf_sim[num_states][0][1],cov_rf_sim[num_states][1][0],cov_rf_sim[num_states][1][1] #print "----" for num_states in range(10): B_rf[num_states] = [[mu_rf_force[num_states][0],mu_rf_motion[num_states][0]],[cov_rf[num_states][0][0],cov_rf[num_states][0][1],cov_rf[num_states][1][0],cov_rf[num_states][1][1]]] B_rm[num_states] = [[mu_rm_force[num_states][0],mu_rm_motion[num_states][0]],[cov_rm[num_states][0][0],cov_rm[num_states][0][1],cov_rm[num_states][1][0],cov_rm[num_states][1][1]]] B_sf[num_states] = [[mu_sf_force[num_states][0],mu_sf_motion[num_states][0]],[cov_sf[num_states][0][0],cov_sf[num_states][0][1],cov_sf[num_states][1][0],cov_sf[num_states][1][1]]] B_sm[num_states] = [[mu_sm_force[num_states][0],mu_sm_motion[num_states][0]],[cov_sm[num_states][0][0],cov_sm[num_states][0][1],cov_sm[num_states][1][0],cov_sm[num_states][1][1]]] #print cov_rf[num_states][0][0],cov_rf[num_states][0][1],cov_rf[num_states][1][0],cov_rf[num_states][1][1] #print "----" #for num_states in range(10): #B_rf[num_states] = [[mu_rf_force[num_states][0],mu_rf_motion[num_states][0]],[1.3,0.3,0.9,1.7]] #B_rm[num_states] = [[mu_rm_force[num_states][0],mu_rm_motion[num_states][0]],[1.4,0.4,0.6,1.8]] #B_sf[num_states] = [[mu_sf_force[num_states][0],mu_sf_motion[num_states][0]],[1.3,0.5,0.9,1.7]] #B_sm[num_states] = [[mu_sm_force[num_states][0],mu_sm_motion[num_states][0]],[1.5,0.3,0.7,0.5]] #print cov_rf[num_states][0][0],cov_rf[num_states][0][1],cov_rf[num_states][1][0],cov_rf[num_states][1][1] #print "----" #print B_sm #print mu_sm_motion # pi - initial probabilities per state pi = [0.1] * 10 # generate RF, RM, SF, SM models from parameters model_rf = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rf, pi) # Will be Trained model_rm = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_rm, pi) # Will be Trained model_sf = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sf, pi) # Will be Trained model_sm = ghmm.HMMFromMatrices(F,ghmm.MultivariateGaussianDistribution(F), A, B_sm, pi) # Will be Trained # For Training total_seq = np.zeros((242,400)) temp_seq1 = Fmat_training[0:121,:] temp_seq2 = Fmat_training[121:242,:] i = 0 j = 0 while i < (np.size(Fmat_training,0)): total_seq[i] = temp_seq1[j] total_seq[i+1] = temp_seq2[j] j=j+1 i=i+2 m_total, n_total = np.shape(total_seq) #print 'Total_Sequence_Shape:', m_total, n_total total_seq_rf = total_seq[:,0:100] total_seq_rm = total_seq[:,100:200] total_seq_sf = total_seq[:,200:300] total_seq_sm = total_seq[:,300:400] train_seq_rf = (np.array(total_seq_rf).T).tolist() train_seq_rm = (np.array(total_seq_rm).T).tolist() train_seq_sf = (np.array(total_seq_sf).T).tolist() train_seq_sm = (np.array(total_seq_sm).T).tolist() #print train_seq_rf final_ts_rf = ghmm.SequenceSet(F,train_seq_rf) final_ts_rm = ghmm.SequenceSet(F,train_seq_rm) final_ts_sf = ghmm.SequenceSet(F,train_seq_sf) final_ts_sm = ghmm.SequenceSet(F,train_seq_sm) model_rf.baumWelch(final_ts_rf) model_rm.baumWelch(final_ts_rm) model_sf.baumWelch(final_ts_sf) model_sm.baumWelch(final_ts_sm) # For Testing ### Simulation Data from All Variation data_rf = scipy.io.loadmat('rigid_fixed.mat') data_sf = scipy.io.loadmat('soft_fixed.mat') data_rm = scipy.io.loadmat('rigid_movable.mat') data_sm = scipy.io.loadmat('soft_movable.mat') simuldata = np.zeros((tSamples,8000)) dataforce_rf = np.transpose(data_rf['sensed_force_rf']) dataforce_sf = np.transpose(data_sf['sensed_force_sf']) dataforce_rm = np.transpose(data_rm['sensed_force_rm']) dataforce_sm = np.transpose(data_sm['sensed_force_sm']) datamotion_rf = np.transpose(data_rf['robot_pos_rf']) datamotion_sf = np.transpose(data_sf['robot_pos_sf']) datamotion_rm = np.transpose(data_rm['robot_pos_rm']) datamotion_sm = np.transpose(data_sm['robot_pos_sm']) data_RF = scaling(dataforce_rf,datamotion_rf) data_SF = scaling(dataforce_sf,datamotion_sf) data_RM = scaling(dataforce_rm,datamotion_rm) data_SM = scaling(dataforce_sm,datamotion_sm) #print np.shape(data_RF) simuldata = np.concatenate((data_RF, data_RM, data_SF, data_SM), axis = 1) Fmat = np.matrix(simuldata) #print np.shape(Fmat[0]) # Checking the Data-Matrix m_tot, n_tot = np.shape(Fmat) #print " " #print 'Total_Matrix_Shape:',m_tot,n_tot total_seq = np.zeros((242,8000)) temp_seq1 = Fmat[0:121,:] temp_seq2 = Fmat[121:242,:] i = 0 j = 0 while i < (np.size(Fmat,0)): total_seq[i] = temp_seq1[j] total_seq[i+1] = temp_seq2[j] j=j+1 i=i+2 m_total, n_total = np.shape(total_seq) #print 'Total_Sequence_Shape:', m_total, n_total rf_final = np.matrix(np.zeros((8000,1))) rm_final = np.matrix(np.zeros((8000,1))) sf_final = np.matrix(np.zeros((8000,1))) sm_final = np.matrix(np.zeros((8000,1))) total_seq_rf = total_seq[:,0:2000] total_seq_rm = total_seq[:,2000:4000] total_seq_sf = total_seq[:,4000:6000] total_seq_sm = total_seq[:,6000:8000] total_seq_obj = np.matrix(np.column_stack((total_seq_rf,total_seq_rm,total_seq_sf,total_seq_sm))) #print np.shape(total_seq_obj) rf = np.matrix(np.zeros(np.size(total_seq_obj,1))) rm = np.matrix(np.zeros(np.size(total_seq_obj,1))) sf = np.matrix(np.zeros(np.size(total_seq_obj,1))) sm = np.matrix(np.zeros(np.size(total_seq_obj,1))) #print np.shape(rf) #print np.size(total_seq_obj,1) k = 0 while (k < np.size(total_seq_obj,1)): test_seq_obj = (np.array(total_seq_obj[:,k]).T).tolist() new_test_seq_obj = np.array(sum(test_seq_obj,[])) ts_obj = new_test_seq_obj final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist()) # Find Viterbi Path path_rf_obj = model_rf.viterbi(final_ts_obj) print "Rigid_Fixed_Model_Path" print path_rf_obj #print np.shape(path_rf_obj[0]) path_rm_obj = model_rm.viterbi(final_ts_obj) print "Rigid_Movable_Model_Path" print path_rm_obj #print np.shape(path_rm_obj[0]) path_sf_obj = model_sf.viterbi(final_ts_obj) print "Soft_Fixed_Model_Path" print path_sf_obj #print np.shape(path_sf_obj[0]) path_sm_obj = model_sm.viterbi(final_ts_obj) print "Soft_Movable_Model_Path" print path_sm_obj #print np.shape(path_sm_obj[0]) obj = max(path_rf_obj[1],path_rm_obj[1],path_sf_obj[1],path_sm_obj[1]) #print obj if obj == path_rf_obj[1]: rf[0,k] = 1 elif obj == path_rm_obj[1]: rm[0,k] = 1 elif obj == path_sf_obj[1]: sf[0,k] = 1 else: sm[0,k] = 1 k = k+1 #print rf.T rf_final = rf_final + rf.T rm_final = rm_final + rm.T sf_final = sf_final + sf.T sm_final = sm_final + sm.T #print rf_final #print rm_final #print sf_final #print sm_final # Confusion Matrix cmat = np.zeros((4,4)) arrsum_rf = np.zeros((4,1)) arrsum_rm = np.zeros((4,1)) arrsum_sf = np.zeros((4,1)) arrsum_sm = np.zeros((4,1)) k = 2000 i = 0 while (k < 8001): arrsum_rf[i] = np.sum(rf_final[k-2000:k,0]) arrsum_rm[i] = np.sum(rm_final[k-2000:k,0]) arrsum_sf[i] = np.sum(sf_final[k-2000:k,0]) arrsum_sm[i] = np.sum(sm_final[k-2000:k,0]) i = i+1 k = k+2000 i=0 while (i < 4): j=0 while (j < 4): if (i == 0): cmat[i][j] = arrsum_rf[j] elif (i == 1): cmat[i][j] = arrsum_rm[j] elif (i == 2): cmat[i][j] = arrsum_sf[j] else: cmat[i][j] = arrsum_sm[j] j = j+1 i = i+1 #print cmat # Plot Confusion Matrix Nlabels = 4 fig = pp.figure() ax = fig.add_subplot(111) figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels], cmap='gray_r') ax.set_title('Performance of HMM Models') pp.xlabel("Targets") pp.ylabel("Predictions") ax.set_xticks([0.5,1.5,2.5,3.5]) ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable']) ax.set_yticks([3.5,2.5,1.5,0.5]) ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable']) figbar = fig.colorbar(figplot) i = 0 while (i < 4): j = 0 while (j < 4): pp.text(j+0.5,3.5-i,cmat[i][j],color='k') if cmat[i][j] > 1600: pp.text(j+0.5,3.5-i,cmat[i][j],color='w') j = j+1 i = i+1 pp.show()
tapomayukh/projects_in_python
classification/Classification_with_HMM/Single_Contact_Classification/simulation_results/Combined/object_training/hmm_crossvalidation_force_motion_10_states_object_training_all_testing.py
Python
mit
16,524
[ "Gaussian", "Mayavi" ]
73de233246dbe7ac84263911f087ee8c1f89d8de9c4bca98e568ba7687794a08
# -*- coding: utf-8 -*- """Algorithms for spectral clustering""" # Author: Gael Varoquaux gael.varoquaux@normalesup.org # Brian Cheung # Wei LI <kuantkid@gmail.com> # License: BSD 3 clause import warnings import numpy as np from sklearn.utils import check_random_state from sklearn.utils.validation import check_array from sklearn.metrics.pairwise import pairwise_kernels from sklearn.neighbors import kneighbors_graph from sklearn.manifold import spectral_embedding from sklearn.cluster.k_means_ import k_means from sklearn.cluster import SpectralClustering from sklearn.cluster.spectral import discretize def spectral_clustering(affinity, n_clusters=8, n_components=None, eigen_solver=None, random_state=None, n_init=10, eigen_tol=0.0, assign_labels='kmeans', norm_laplacian=True): """Apply clustering to a projection to the normalized laplacian. In practice Spectral Clustering is very useful when the structure of the individual clusters is highly non-convex or more generally when a measure of the center and spread of the cluster is not a suitable description of the complete cluster. For instance when clusters are nested circles on the 2D plan. If affinity is the adjacency matrix of a graph, this method can be used to find normalized graph cuts. Read more in the :ref:`User Guide <spectral_clustering>`. Parameters ----------- affinity : array-like or sparse matrix, shape: (n_samples, n_samples) The affinity matrix describing the relationship of the samples to embed. **Must be symmetric**. Possible examples: - adjacency matrix of a graph, - heat kernel of the pairwise distance matrix of the samples, - symmetric k-nearest neighbours connectivity matrix of the samples. n_clusters : integer, optional Number of clusters to extract. n_components : integer, optional, default is n_clusters Number of eigen vectors to use for the spectral embedding eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by the K-Means initialization. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. eigen_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. assign_labels : {'kmeans', 'discretize'}, default: 'kmeans' The strategy to use to assign labels in the embedding space. There are two ways to assign labels after the laplacian embedding. k-means can be applied and is a popular choice. But it can also be sensitive to initialization. Discretization is another approach which is less sensitive to random initialization. See the 'Multiclass spectral clustering' paper referenced below for more details on the discretization approach. Returns ------- labels : array of integers, shape: n_samples The labels of the clusters. References ---------- - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf Notes ------ The graph should contain only one connect component, elsewhere the results make little sense. This algorithm solves the normalized cut for k=2: it is a normalized spectral clustering. """ if assign_labels not in ('kmeans', 'discretize'): raise ValueError("The 'assign_labels' parameter should be " "'kmeans' or 'discretize', but '%s' was given" % assign_labels) random_state = check_random_state(random_state) n_components = n_clusters if n_components is None else n_components maps = spectral_embedding(affinity, n_components=n_components, eigen_solver=eigen_solver, random_state=random_state, eigen_tol=eigen_tol, drop_first=False, norm_laplacian=norm_laplacian) if assign_labels == 'kmeans': _, labels, _ = k_means(maps, n_clusters, random_state=random_state, n_init=n_init) else: labels = discretize(maps, random_state=random_state) return labels class SpectralClustering(SpectralClustering): """Apply clustering to a projection to the normalized laplacian. In practice Spectral Clustering is very useful when the structure of the individual clusters is highly non-convex or more generally when a measure of the center and spread of the cluster is not a suitable description of the complete cluster. For instance when clusters are nested circles on the 2D plan. If affinity is the adjacency matrix of a graph, this method can be used to find normalized graph cuts. When calling ``fit``, an affinity matrix is constructed using either kernel function such the Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``:: np.exp(-gamma * d(X,X) ** 2) or a k-nearest neighbors connectivity matrix. Alternatively, using ``precomputed``, a user-provided affinity matrix can be used. Read more in the :ref:`User Guide <spectral_clustering>`. Parameters ----------- n_clusters : integer, optional The dimension of the projection subspace. affinity : string, array-like or callable, default 'rbf' If a string, this may be one of 'nearest_neighbors', 'precomputed', 'rbf' or one of the kernels supported by `sklearn.metrics.pairwise_kernels`. Only kernels that produce similarity scores (non-negative values that increase with similarity) should be used. This property is not checked by the clustering algorithm. gamma : float Scaling factor of RBF, polynomial, exponential chi^2 and sigmoid affinity kernel. Ignored for ``affinity='nearest_neighbors'``. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. n_neighbors : integer Number of neighbors to use when constructing the affinity matrix using the nearest neighbors method. Ignored for ``affinity='rbf'``. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by the K-Means initialization. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. eigen_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. assign_labels : {'kmeans', 'discretize'}, default: 'kmeans' The strategy to use to assign labels in the embedding space. There are two ways to assign labels after the laplacian embedding. k-means can be applied and is a popular choice. But it can also be sensitive to initialization. Discretization is another approach which is less sensitive to random initialization. kernel_params : dictionary of string to any, optional Parameters (keyword arguments) and values for kernel passed as callable object. Ignored by other kernels. Attributes ---------- affinity_matrix_ : array-like, shape (n_samples, n_samples) Affinity matrix used for clustering. Available only if after calling ``fit``. labels_ : Labels of each point Notes ----- If you have an affinity matrix, such as a distance matrix, for which 0 means identical elements, and high values means very dissimilar elements, it can be transformed in a similarity matrix that is well suited for the algorithm by applying the Gaussian (RBF, heat) kernel:: np.exp(- X ** 2 / (2. * delta ** 2)) Another alternative is to take a symmetric version of the k nearest neighbors connectivity matrix of the points. If the pyamg package is installed, it is used: this greatly speeds up computation. References ---------- - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf """ def __init__(self, n_clusters=8, eigen_solver=None, random_state=None, n_init=10, gamma=1., affinity='rbf', n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1, kernel_params=None, norm_laplacian=True): super(SpectralClustering, self).__init__( n_clusters=n_clusters, eigen_solver=eigen_solver, random_state=random_state, n_init=n_init, gamma=gamma, affinity=affinity, n_neighbors=n_neighbors, eigen_tol=eigen_tol, assign_labels=assign_labels, degree=degree, coef0=coef0, kernel_params=kernel_params) self.norm_laplacian = norm_laplacian def fit(self, X, y=None): """Creates an affinity matrix for X using the selected affinity, then applies spectral clustering to this affinity matrix. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) OR, if affinity==`precomputed`, a precomputed affinity matrix of shape (n_samples, n_samples) """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) if X.shape[0] == X.shape[1] and self.affinity != "precomputed": warnings.warn("The spectral clustering API has changed. ``fit``" "now constructs an affinity matrix from data. To use" " a custom affinity matrix, " "set ``affinity=precomputed``.") if self.affinity == 'nearest_neighbors': connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True) self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) elif self.affinity == 'precomputed': self.affinity_matrix_ = X else: params = self.kernel_params if params is None: params = {} if not callable(self.affinity): params['gamma'] = self.gamma params['degree'] = self.degree params['coef0'] = self.coef0 self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity, filter_params=True, **params) random_state = check_random_state(self.random_state) self.labels_ = spectral_clustering(self.affinity_matrix_, n_clusters=self.n_clusters, eigen_solver=self.eigen_solver, random_state=random_state, n_init=self.n_init, eigen_tol=self.eigen_tol, assign_labels=self.assign_labels, norm_laplacian=self.norm_laplacian) return self
slipguru/ignet
icing/externals/spectral.py
Python
bsd-2-clause
13,176
[ "Brian", "Gaussian" ]
3363f58de07e83d2d650d9e9a53e1ab1fec62a8cc2a32e10ea1aa7dac0990966
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) Pootle contributors. # # This file is a part of the Pootle project. It is distributed under the GPL3 # or later license. See the LICENSE file for a copy of the license and the # AUTHORS file for copyright and authorship information. import logging from django.contrib.auth import get_user_model from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType from django.utils.translation import ugettext_noop as _ from pootle.core.models import Revision from pootle_app.models import Directory from pootle_app.models.permissions import PermissionSet, get_pootle_permission from pootle_language.models import Language from pootle_project.models import Project from staticpages.models import StaticPage as Announcement logger = logging.getLogger(__name__) def initdb(create_projects=True): """Populate the database with default initial data. This creates the default database to get a working Pootle installation. """ create_revision() create_essential_users() create_root_directories() create_template_languages() if create_projects: create_terminology_project() create_pootle_permissions() create_pootle_permission_sets() if create_projects: create_default_projects() create_default_languages() def _create_object(model_klass, **criteria): instance, created = model_klass.objects.get_or_create(**criteria) if created: logger.debug( "Created %s: '%s'" % (instance.__class__.__name__, instance)) else: logger.debug( "%s already exists - skipping: '%s'" % (instance.__class__.__name__, instance)) return instance, created def _create_pootle_user(**criteria): user, created = _create_object(get_user_model(), **criteria) if created: user.set_unusable_password() user.save() return user def _create_pootle_permission_set(permissions, **criteria): permission_set, created = _create_object(PermissionSet, **criteria) if created: permission_set.positive_permissions = permissions permission_set.save() return permission_set def create_revision(): Revision.initialize() def create_essential_users(): """Create the 'default' and 'nobody' User instances. These users are required for Pootle's permission system. """ # The nobody user is used to represent an anonymous user in cases where # we need to associate model information with such a user. An example is # in the permission system: we need a way to store rights for anonymous # users; thus we use the nobody user. criteria = { 'username': u"nobody", 'full_name': u"any anonymous user", 'is_active': True, } _create_pootle_user(**criteria) # The 'default' user represents any valid, non-anonymous user and is used # to associate information any such user. An example is in the permission # system: we need a way to store default rights for users. We use the # 'default' user for this. # # In a future version of Pootle we should think about using Django's # groups to do better permissions handling. criteria = { 'username': u"default", 'full_name': u"any authenticated user", 'is_active': True, } _create_pootle_user(**criteria) # The system user represents a system, and is used to # associate updates done by bulk commands as update_stores. criteria = { 'username': u"system", 'full_name': u"system user", 'is_active': True, } _create_pootle_user(**criteria) def create_pootle_permissions(): """Create Pootle's directory level permissions.""" args = { 'app_label': "pootle_app", 'model': "directory", } pootle_content_type, created = _create_object(ContentType, **args) pootle_content_type.name = 'pootle' pootle_content_type.save() # Create the permissions. permissions = [ { 'name': _("Can access a project"), 'codename': "view", }, { 'name': _("Cannot access a project"), 'codename': "hide", }, { 'name': _("Can make a suggestion for a translation"), 'codename': "suggest", }, { 'name': _("Can submit a translation"), 'codename': "translate", }, { 'name': _("Can review suggestions"), 'codename': "review", }, { 'name': _("Can administrate a translation project"), 'codename': "administrate", }, ] criteria = { 'content_type': pootle_content_type, } for permission in permissions: criteria.update(permission) _create_object(Permission, **criteria) def create_pootle_permission_sets(): """Create the default permission set for the 'nobody' and 'default' users. 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged in user. """ User = get_user_model() nobody = User.objects.get(username='nobody') default = User.objects.get(username='default') view = get_pootle_permission('view') suggest = get_pootle_permission('suggest') translate = get_pootle_permission('translate') # Default permissions for tree root. criteria = { 'user': nobody, 'directory': Directory.objects.root, } _create_pootle_permission_set([view, suggest], **criteria) criteria['user'] = default _create_pootle_permission_set([view, suggest, translate], **criteria) # Default permissions for templates language. # Override with no permissions for templates language. criteria = { 'user': nobody, 'directory': Directory.objects.get(pootle_path="/templates/"), } _create_pootle_permission_set([], **criteria) criteria['user'] = default _create_pootle_permission_set([], **criteria) def require_english(): """Create the English Language item.""" criteria = { 'code': "en", 'fullname': u"English", 'nplurals': 2, 'pluralequation': "(n != 1)", } en, created = _create_object(Language, **criteria) return en def create_root_directories(): """Create the root Directory items.""" root, created = _create_object(Directory, **dict(name="")) _create_object(Directory, **dict(name="projects", parent=root)) def create_template_languages(): """Create the 'templates' and English languages. The 'templates' language is used to give users access to the untranslated template files. """ _create_object(Language, **dict(code="templates", fullname="Templates")) require_english() def create_terminology_project(): """Create the terminology project. The terminology project is used to display terminology suggestions while translating. """ criteria = { 'code': "terminology", 'fullname': u"Terminology", 'source_language': require_english(), 'checkstyle': "terminology", } _create_object(Project, **criteria) def create_default_projects(): """Create the default projects that we host. You might want to add your projects here, although you can also add things through the web interface later. """ from pootle_project.models import Project en = require_english() criteria = { 'code': u"tutorial", 'source_language': en, 'fullname': u"Tutorial", 'checkstyle': "standard", 'localfiletype': "po", 'treestyle': "auto", } tutorial, created = _create_object(Project, **criteria) criteria = { 'active': True, 'title': "Project instructions", 'body': ('<div dir="ltr" lang="en">Tutorial project where users can ' 'play with Pootle and learn more about translation and ' 'localisation.<br />For more help on localisation, visit the ' '<a href="http://docs.translatehouse.org/projects/' 'localization-guide/en/latest/guide/start.html">localisation ' 'guide</a>.</div>'), 'virtual_path': "announcements/projects/"+tutorial.code, } _create_object(Announcement, **criteria) def create_default_languages(): """Create the default languages.""" from translate.lang import data, factory from pootle_language.models import Language # import languages from toolkit for code in data.languages.keys(): try: tk_lang = factory.getlanguage(code) criteria = { 'code': code, 'fullname': tk_lang.fullname, 'nplurals': tk_lang.nplurals, 'pluralequation': tk_lang.pluralequation, } try: criteria['specialchars'] = tk_lang.specialchars except AttributeError: pass _create_object(Language, **criteria) except: pass
Yelp/pootle
pootle/core/initdb.py
Python
gpl-3.0
9,174
[ "VisIt" ]
2145d91ccbf546eab9b08b98106a5ae69a7b80b07a0dc45c2c7252cc01e8a271
############################################################################## # # Copyright (c) 2009-2013 by University of Queensland # http://www.uq.edu.au # # Primary Business: Queensland, Australia # Licensed under the Open Software License version 3.0 # http://www.opensource.org/licenses/osl-3.0.php # # Development until 2012 by Earth Systems Science Computational Center (ESSCC) # Development since 2012 by School of Earth Sciences # ############################################################################## """3D gravity/magnetic joint inversion using netCDF data""" # Set parameters MAGNETIC_DATASET = '${magnetic-file}' GRAVITY_DATASET = '${gravity-file}' # background magnetic flux density (B_north, B_east, B_vertical) in nano Tesla. B_b = [${bb-north}, ${bb-east}, ${bb-vertical}] # amount of horizontal padding (this affects end result, about 20% recommended) PAD_X = ${x-padding} PAD_Y = ${y-padding} # maximum depth (in meters) DEPTH = ${max-depth} # buffer zone above data (in meters; 6-10km recommended) AIR = ${air-buffer} # number of mesh elements in vertical direction (~1 element per 2km recommended) NE_Z = ${vertical-mesh-elements} # trade-off factors mu_gravity = ${mu-gravity} mu_magnetic = ${mu-magnetic} N_THREADS = ${n-threads} ####### Do not change anything below this line ####### import os import subprocess import sys try: from esys.downunder import * from esys.escript import unitsSI as U from esys.weipa import * except ImportError: line=["/opt/escript/bin/run-escript","-t" + str(N_THREADS)]+sys.argv ret=subprocess.call(line) sys.exit(ret) def saveAndUpload(fn, **args): saveSilo(fn, **args) subprocess.call(["cloud", "upload", fn, fn, "--set-acl=public-read"]) def statusCallback(k, x, Jx, g_Jx, norm_dx): print("Iteration %s complete. Error=%s" % (k, norm_dx)) B_b=[b*U.Nano*U.Tesla for b in B_b] MAG_UNITS = U.Nano * U.Tesla GRAV_UNITS = 1e-6 * U.m/(U.sec**2) # Setup and run the inversion grav_source=NetCdfData(NetCdfData.GRAVITY, GRAVITY_DATASET, scale_factor=GRAV_UNITS) mag_source=NetCdfData(NetCdfData.MAGNETIC, MAGNETIC_DATASET, scale_factor=MAG_UNITS) db=DomainBuilder(dim=3) db.addSource(grav_source) db.addSource(mag_source) db.setVerticalExtents(depth=DEPTH, air_layer=AIR, num_cells=NE_Z) db.setFractionalPadding(pad_x=PAD_X, pad_y=PAD_Y) db.setBackgroundMagneticFluxDensity(B_b) db.fixDensityBelow(depth=DEPTH) db.fixSusceptibilityBelow(depth=DEPTH) inv=JointGravityMagneticInversion() inv.setup(db) inv.setSolverCallback(statusCallback) inv.getCostFunction().setTradeOffFactorsModels([mu_gravity, mu_magnetic]) inv.getCostFunction().setTradeOffFactorsRegularization(mu = [1.,1.], mu_c=1.) density, susceptibility = inv.run() print("density = %s"%density) print("susceptibility = %s"%susceptibility) g, wg = db.getGravitySurveys()[0] B, wB = db.getMagneticSurveys()[0] saveAndUpload("result.silo", density=density, gravity_anomaly=g, gravity_weight=wg, susceptibility=susceptibility, magnetic_anomaly=B, magnetic_weight=wB) print("Results saved in result.silo") # Visualise result.silo using VisIt import visit visit.LaunchNowin() saveatts = visit.SaveWindowAttributes() saveatts.family = 0 saveatts.width = 1024 saveatts.height = 768 saveatts.resConstraint = saveatts.NoConstraint saveatts.outputToCurrentDirectory = 1 saveatts.fileName = 'result-susceptibility.png' visit.SetSaveWindowAttributes(saveatts) visit.OpenDatabase('result.silo') visit.AddPlot('Contour', 'susceptibility') c=visit.ContourAttributes() c.colorType=c.ColorByColorTable c.colorTableName = "hot" visit.SetPlotOptions(c) visit.DrawPlots() visit.SaveWindow() # save susceptibility image visit.ChangeActivePlotsVar('density') saveatts.fileName = 'result-density.png' visit.SetSaveWindowAttributes(saveatts) v=visit.GetView3D() v.viewNormal=(-0.554924, 0.703901, 0.443377) v.viewUp=(0.272066, -0.3501, 0.896331) visit.SetView3D(v) visit.SaveWindow() # save density image visit.DeleteAllPlots() visit.CloseDatabase('result.silo') subprocess.call(["cloud", "upload", "result-density.png", "result-density.png", "--set-acl=public-read"]) subprocess.call(["cloud", "upload", "result-susceptibility.png", "result-susceptibility.png", "--set-acl=public-read"])
squireg/ANVGL-Portal
src/main/resources/org/auscope/portal/server/scriptbuilder/templates/escript-joint.py
Python
lgpl-3.0
4,365
[ "NetCDF", "VisIt" ]
79ea526a6d42b9a234eff7ca33965771e7cad6449f04bdd6539a995e93a3c3e8
""" Communicability and centrality measures. """ # Copyright (C) 2011 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import networkx as nx __author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)', 'Franck Kalala (franckkalala@yahoo.fr']) __all__ = ['communicability_centrality_exp', 'communicability_centrality', 'communicability_betweenness_centrality', 'communicability', 'communicability_exp', 'estrada_index', ] def communicability_centrality_exp(G): r"""Return the communicability centrality for each node of G Communicability centrality, also called subgraph centrality, of a node `n` is the sum of closed walks of all lengths starting and ending at node `n`. Parameters ---------- G: graph Returns ------- nodes:dictionary Dictionary of nodes with communicability centrality as the value. Raises ------ NetworkXError If the graph is not undirected and simple. See Also -------- communicability: Communicability between all pairs of nodes in G. communicability_centrality: Communicability centrality for each node of G. Notes ----- This version of the algorithm exponentiates the adjacency matrix. The communicability centrality of a node `u` in G can be found using the matrix exponential of the adjacency matrix of G [1]_ [2]_, .. math:: SC(u)=(e^A)_{uu} . References ---------- .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, "Subgraph centrality in complex networks", Physical Review E 71, 056103 (2005). http://arxiv.org/abs/cond-mat/0504730 .. [2] Ernesto Estrada, Naomichi Hatano, "Communicability in complex networks", Phys. Rev. E 77, 036111 (2008). http://arxiv.org/abs/0707.0756 Examples -------- >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)]) >>> sc = nx.communicability_centrality_exp(G) """ # alternative implementation that calculates the matrix exponential try: import scipy.linalg except ImportError: raise ImportError('subgraph_centrality_exp() requires SciPy: ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError(\ "subgraph_centrality() not defined for digraphs.") if G.is_multigraph(): raise nx.NetworkXError(\ "subgraph_centrality() not defined for multigraphs.") nodelist = G.nodes() # ordering of nodes in matrix A = nx.to_numpy_matrix(G,nodelist) # convert to 0-1 matrix A[A!=0.0] = 1 expA = scipy.linalg.expm(A) # convert diagonal to dictionary keyed by node sc = dict(zip(nodelist,expA.diagonal())) return sc def communicability_centrality(G): r"""Return communicability centrality for each node in G. Communicability centrality, also called subgraph centrality, of a node `n` is the sum of closed walks of all lengths starting and ending at node `n`. Parameters ---------- G: graph Returns ------- nodes: dictionary Dictionary of nodes with communicability centrality as the value. Raises ------ NetworkXError If the graph is not undirected and simple. See Also -------- communicability: Communicability between all pairs of nodes in G. communicability_centrality: Communicability centrality for each node of G. Notes ----- This version of the algorithm computes eigenvalues and eigenvectors of the adjacency matrix. Communicability centrality of a node `u` in G can be found using a spectral decomposition of the adjacency matrix [1]_ [2]_, .. math:: SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}}, where `v_j` is an eigenvector of the adjacency matrix `A` of G corresponding corresponding to the eigenvalue `\lambda_j`. Examples -------- >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)]) >>> sc = nx.communicability_centrality(G) References ---------- .. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez, "Subgraph centrality in complex networks", Physical Review E 71, 056103 (2005). http://arxiv.org/abs/cond-mat/0504730 .. [2] Ernesto Estrada, Naomichi Hatano, "Communicability in complex networks", Phys. Rev. E 77, 036111 (2008). http://arxiv.org/abs/0707.0756 """ try: import numpy import numpy.linalg except ImportError: raise ImportError('subgraph_centrality() requires NumPy: ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError(\ "communicability_centrality() not defined for digraphs.") if G.is_multigraph(): raise nx.NetworkXError(\ "communicability_centrality() not defined for multigraphs.") nodelist = G.nodes() # ordering of nodes in matrix A = nx.to_numpy_matrix(G,nodelist) # convert to 0-1 matrix A[A!=0.0] = 1 w,v = numpy.linalg.eigh(A) vsquare = numpy.array(v)**2 expw = numpy.exp(w) xg = numpy.dot(vsquare,expw) # convert vector dictionary keyed by node sc = dict(zip(nodelist,xg)) return sc def communicability_betweenness_centrality(G, normalized=True): r"""Return communicability betweenness for all pairs of nodes in G. Communicability betweenness measure makes use of the number of walks connecting every pair of nodes as the basis of a betweenness centrality measure. Parameters ---------- G: graph Returns ------- nodes:dictionary Dictionary of nodes with communicability betweenness as the value. Raises ------ NetworkXError If the graph is not undirected and simple. See Also -------- communicability: Communicability between all pairs of nodes in G. communicability_centrality: Communicability centrality for each node of G using matrix exponential. communicability_centrality_exp: Communicability centrality for each node in G using spectral decomposition. Notes ----- Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges, and `A` denote the adjacency matrix of `G`. Let `G(r)=(V,E(r))` be the graph resulting from removing all edges connected to node `r` but not the node itself. The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros only in row and column `r`. The communicability betweenness of a node `r` is [1]_ .. math:: \omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}}, p\neq q, q\neq r, where `G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks involving node r, `G_{pq}=(e^{A})_{pq}` is the number of closed walks starting at node `p` and ending at node `q`, and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the number of terms in the sum. The resulting `\omega_{r}` takes values between zero and one. The lower bound cannot be attained for a connected graph, and the upper bound is attained in the star graph. References ---------- .. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano, "Communicability Betweenness in Complex Networks" Physica A 388 (2009) 764-774. http://arxiv.org/abs/0905.4102 Examples -------- >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)]) >>> cbc = nx.communicability_betweenness_centrality(G) """ try: import scipy import scipy.linalg except ImportError: raise ImportError('comunicability_betweenness() requires SciPy: ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError(\ "communicability_betweenness() not defined for digraphs.") if G.is_multigraph(): raise nx.NetworkXError(\ "communicability_betweenness() not defined for multigraphs.") nodelist = G.nodes() # ordering of nodes in matrix n = len(nodelist) A = nx.to_numpy_matrix(G,nodelist) # convert to 0-1 matrix A[A!=0.0] = 1 expA = scipy.linalg.expm(A) mapping = dict(zip(nodelist,range(n))) sc = {} for v in G: # remove row and col of node v i = mapping[v] row = A[i,:].copy() col = A[:,i].copy() A[i,:] = 0 A[:,i] = 0 B = (expA - scipy.linalg.expm(A)) / expA # sum with row/col of node v and diag set to zero B[i,:] = 0 B[:,i] = 0 B -= scipy.diag(scipy.diag(B)) sc[v] = B.sum() # put row and col back A[i,:] = row A[:,i] = col # rescaling sc = _rescale(sc,normalized=normalized) return sc def _rescale(sc,normalized): # helper to rescale betweenness centrality if normalized is True: order=len(sc) if order <=2: scale=None else: scale=1.0/((order-1.0)**2-(order-1.0)) if scale is not None: for v in sc: sc[v] *= scale return sc def communicability(G): r"""Return communicability between all pairs of nodes in G. The communicability between pairs of nodes in G is the sum of closed walks of different lengths starting at node u and ending at node v. Parameters ---------- G: graph Returns ------- comm: dictionary of dictionaries Dictionary of dictionaries keyed by nodes with communicability as the value. Raises ------ NetworkXError If the graph is not undirected and simple. See Also -------- communicability_centrality_exp: Communicability centrality for each node of G using matrix exponential. communicability_centrality: Communicability centrality for each node in G using spectral decomposition. communicability: Communicability between pairs of nodes in G. Notes ----- This algorithm uses a spectral decomposition of the adjacency matrix. Let G=(V,E) be a simple undirected graph. Using the connection between the powers of the adjacency matrix and the number of walks in the graph, the communicability between nodes `u` and `v` based on the graph spectrum is [1]_ .. math:: C(u,v)=\sum_{j=1}^{n}\phi_{j}(u)\phi_{j}(v)e^{\lambda_{j}}, where `\phi_{j}(u)` is the `u\rm{th}` element of the `j\rm{th}` orthonormal eigenvector of the adjacency matrix associated with the eigenvalue `\lambda_{j}`. References ---------- .. [1] Ernesto Estrada, Naomichi Hatano, "Communicability in complex networks", Phys. Rev. E 77, 036111 (2008). http://arxiv.org/abs/0707.0756 Examples -------- >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)]) >>> c = nx.communicability(G) """ try: import numpy import scipy.linalg except ImportError: raise ImportError('communicability() requires SciPy: ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError(\ "communicability() not defined for digraphs.") if G.is_multigraph(): raise nx.NetworkXError(\ "communicability() not defined for multigraphs.") nodelist = G.nodes() # ordering of nodes in matrix A = nx.to_numpy_matrix(G,nodelist) # convert to 0-1 matrix A[A!=0.0] = 1 w,vec = numpy.linalg.eigh(A) expw = numpy.exp(w) mapping = dict(zip(nodelist,range(len(nodelist)))) sc={} # computing communicabilities for u in G: sc[u]={} for v in G: s = 0 p = mapping[u] q = mapping[v] for j in range(len(nodelist)): s += vec[:,j][p,0]*vec[:,j][q,0]*expw[j] sc[u][v] = s return sc def communicability_exp(G): r"""Return communicability between all pairs of nodes in G. Communicability between pair of node (u,v) of node in G is the sum of closed walks of different lengths starting at node u and ending at node v. Parameters ---------- G: graph Returns ------- comm: dictionary of dictionaries Dictionary of dictionaries keyed by nodes with communicability as the value. Raises ------ NetworkXError If the graph is not undirected and simple. See Also -------- communicability_centrality_exp: Communicability centrality for each node of G using matrix exponential. communicability_centrality: Communicability centrality for each node in G using spectral decomposition. communicability_exp: Communicability between all pairs of nodes in G using spectral decomposition. Notes ----- This algorithm uses matrix exponentiation of the adjacency matrix. Let G=(V,E) be a simple undirected graph. Using the connection between the powers of the adjacency matrix and the number of walks in the graph, the communicability between nodes u and v is [1]_, .. math:: C(u,v) = (e^A)_{uv}, where `A` is the adjacency matrix of G. References ---------- .. [1] Ernesto Estrada, Naomichi Hatano, "Communicability in complex networks", Phys. Rev. E 77, 036111 (2008). http://arxiv.org/abs/0707.0756 Examples -------- >>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)]) >>> c = nx.communicability_exp(G) """ try: import scipy.linalg except ImportError: raise ImportError('communicability() requires SciPy: ', 'http://scipy.org/') if G.is_directed(): raise nx.NetworkXError(\ "communicability() not defined for digraphs.") if G.is_multigraph(): raise nx.NetworkXError(\ "communicability() not defined for multigraphs.") nodelist = G.nodes() # ordering of nodes in matrix A = nx.to_numpy_matrix(G,nodelist) # convert to 0-1 matrix A[A!=0.0] = 1 # communicability matrix expA = scipy.linalg.expm(A) mapping = dict(zip(nodelist,range(len(nodelist)))) sc = {} for u in G: sc[u]={} for v in G: sc[u][v] = expA[mapping[u],mapping[v]] return sc def estrada_index(G): r"""Return the Estrada index of a the graph G. Parameters ---------- G: graph Returns ------- estrada index: float Raises ------ NetworkXError If the graph is not undirected and simple. See also -------- estrada_index_exp Notes ----- Let `G=(V,E)` be a simple undirected graph with `n` nodes and let `\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}` be a non-increasing ordering of the eigenvalues of its adjacency matrix `A`. The Estrada index is .. math:: EE(G)=\sum_{j=1}^n e^{\lambda _j}. References ---------- .. [1] E. Estrada, Characterization of 3D molecular structure, Chem. Phys. Lett. 319, 713 (2000). Examples -------- >>> G=nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)]) >>> ei=nx.estrada_index(G) """ try: import numpy import numpy.linalg except ImportError: raise ImportError('estrada_index() requires NumPy: ', 'http://scipy.org/') return sum(communicability_centrality(G).values()) # fixture for nose tests def setup_module(module): from nose import SkipTest try: import numpy except: raise SkipTest("NumPy not available") try: import scipy except: raise SkipTest("SciPy not available")
ChristianKniep/QNIB
serverfiles/usr/local/lib/networkx-1.6/networkx/algorithms/centrality/communicability_alg.py
Python
gpl-2.0
16,015
[ "Desmond" ]
200b2aecba87cb33a3f09b7f594440d64f5ceabe82e317f6fb97c9d250f4e77c
# Copyright (c) 2014, James Hensman, Alan Saul # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from ..core import GP from ..core.parameterization import ObsAr from .. import kern from ..core.parameterization.param import Param from ..inference.latent_function_inference import VarGauss log_2_pi = np.log(2*np.pi) class GPVariationalGaussianApproximation(GP): """ The Variational Gaussian Approximation revisited @article{Opper:2009, title = {The Variational Gaussian Approximation Revisited}, author = {Opper, Manfred and Archambeau, C{\'e}dric}, journal = {Neural Comput.}, year = {2009}, pages = {786--792}, } """ def __init__(self, X, Y, kernel, likelihood, Y_metadata=None): num_data = Y.shape[0] self.alpha = Param('alpha', np.zeros((num_data,1))) # only one latent fn for now. self.beta = Param('beta', np.ones(num_data)) inf = VarGauss(self.alpha, self.beta) super(GPVariationalGaussianApproximation, self).__init__(X, Y, kernel, likelihood, name='VarGP', inference_method=inf, Y_metadata=Y_metadata) self.link_parameter(self.alpha) self.link_parameter(self.beta)
jameshensman/GPy
GPy/models/gp_var_gauss.py
Python
bsd-3-clause
1,230
[ "Gaussian" ]
7b7db0a333d9b28ee399bfd1c428200ba49c3629eb9e3af0184eca06780e379b
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: Li Yao # @File: prepare-data-for-giremi.py # @License: MIT # @Bitbutcket: https://bitbucket.org/li_yao/ # @Github: https://github.com/liyao001 import pysam import getopt import sys def is_snp(table, chrom, pos): flag = 0 try: for hit in table.fetch(reference=chrom, start=int(pos) - 1, end=int(pos), parser=pysam.asGTF()): flag = 1 break except: pass return str(flag) def get_gene_annotation(table, chrom, pos): gene_symbol = 'Inte' gene_strand = '#' try: for hit in table.fetch(reference=chrom, start=int(pos) - 1, end=int(pos), parser=pysam.asGTF()): gene_symbol = hit.gene_id if hit.strand != '.': gene_strand = hit.strand except: pass return gene_symbol, gene_strand def helper(input_file, snp_file, gene_annotation_file): snpFile = pysam.Tabixfile(snp_file) refSeqFile = pysam.Tabixfile(gene_annotation_file) rawInput = open(input_file, mode='r') newFile = open(input_file + ".giremi", mode='w') new_lines = [] first_line = 1 i = 0 for line in rawInput.readlines(): i+=1 if first_line == 1: first_line = 0 continue cols = line.replace('\n', '').replace('\r', '').split('\t') coordinate = cols[0] position = cols[1] gene_symbol, gene_strand = get_gene_annotation(refSeqFile, coordinate, position) snp_mark = is_snp(snpFile, coordinate, position) record = (coordinate, str(int(position) - 1), position, gene_symbol, snp_mark, gene_strand + '\n') new_lines.append('\t'.join(record)) newFile.writelines(new_lines) newFile.close() rawInput.close() def main(): try: opts, args = getopt.getopt(sys.argv[1:], "i:s:g:", ["input_file=", "snp_file=", "gene_file="]) except getopt.GetoptError, err: print str(err) sys.exit() input_file = None; snp_file = None; gene_annot = None; if len(opts)==0: help() sys.exit() for o, a in opts: if o in ("-i","--input_file"): input_file = a elif o in ("-s", "--snp_file"): snp_file = a elif o in ("-g", "--gene_file"): gene_annot = a print 'Please wait......' helper(input_file, snp_file, gene_annot) print 'Done!' if __name__ == '__main__': main()
yaoli95/biotoolset
convert/prepare-data-for-giremi.py
Python
gpl-3.0
2,454
[ "pysam" ]
f19d1f0161fe9d9edc9ea9004671a7c4c70e8a44e2ee6e420d92edc21fc6d757
######################################################################## # $HeadURL$ # File : CREAMComputingElement.py # Author : A.T. ######################################################################## """ CREAM Computing Element """ __RCSID__ = "$Id$" from DIRAC.Resources.Computing.ComputingElement import ComputingElement from DIRAC.Core.Utilities.Grid import executeGridCommand from DIRAC.Core.Utilities.File import makeGuid from DIRAC import S_OK, S_ERROR import os, re, tempfile from types import StringTypes CE_NAME = 'CREAM' MANDATORY_PARAMETERS = [ 'Queue' ] class CREAMComputingElement( ComputingElement ): ############################################################################# def __init__( self, ceUniqueID ): """ Standard constructor. """ ComputingElement.__init__( self, ceUniqueID ) self.ceType = CE_NAME self.submittedJobs = 0 self.mandatoryParameters = MANDATORY_PARAMETERS self.pilotProxy = '' self.queue = '' self.outputURL = 'gsiftp://localhost' self.gridEnv = '' self.proxyRenewal = 0 ############################################################################# def _addCEConfigDefaults( self ): """Method to make sure all necessary Configuration Parameters are defined """ # First assure that any global parameters are loaded ComputingElement._addCEConfigDefaults( self ) def __writeJDL( self, executableFile ): """ Create the JDL for submission """ workingDirectory = self.ceParameters['WorkingDirectory'] fd, name = tempfile.mkstemp( suffix = '.jdl', prefix = 'CREAM_', dir = workingDirectory ) diracStamp = os.path.basename( name ).replace( '.jdl', '' ).replace( 'CREAM_', '' ) jdlFile = os.fdopen( fd, 'w' ) jdl = """ [ JobType = "Normal"; Executable = "%(executable)s"; StdOutput="%(diracStamp)s.out"; StdError="%(diracStamp)s.err"; InputSandbox={"%(executableFile)s"}; OutputSandbox={"%(diracStamp)s.out", "%(diracStamp)s.err"}; OutputSandboxBaseDestUri="%(outputURL)s"; ] """ % { 'executableFile':executableFile, 'executable':os.path.basename( executableFile ), 'outputURL':self.outputURL, 'diracStamp':diracStamp } jdlFile.write( jdl ) jdlFile.close() return name, diracStamp def _reset( self ): self.queue = self.ceParameters['Queue'] self.outputURL = self.ceParameters.get( 'OutputURL', 'gsiftp://localhost' ) self.gridEnv = self.ceParameters['GridEnv'] ############################################################################# def submitJob( self, executableFile, proxy, numberOfJobs = 1 ): """ Method to submit job """ self.log.verbose( "Executable file path: %s" % executableFile ) if not os.access( executableFile, 5 ): os.chmod( executableFile, 0755 ) batchIDList = [] stampDict = {} if numberOfJobs == 1: jdlName, diracStamp = self.__writeJDL( executableFile ) cmd = ['glite-ce-job-submit', '-n', '-a', '-N', '-r', '%s/%s' % ( self.ceName, self.queue ), '%s' % jdlName ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) os.unlink( jdlName ) if result['OK']: if result['Value'][0]: # We have got a non-zero status code return S_ERROR( 'Pilot submission failed with error: %s ' % result['Value'][2].strip() ) pilotJobReference = result['Value'][1].strip() if not pilotJobReference: return S_ERROR( 'No pilot reference returned from the glite job submission command' ) if not pilotJobReference.startswith( 'https' ): return S_ERROR( 'Invalid pilot reference %s' % pilotJobReference ) batchIDList.append( pilotJobReference ) stampDict[pilotJobReference] = diracStamp else: delegationID = makeGuid() cmd = [ 'glite-ce-delegate-proxy', '-e', '%s' % self.ceName, '%s' % delegationID ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) if not result['OK']: self.log.error( 'Failed to delegate proxy: %s' % result['Message'] ) return result for _i in range( numberOfJobs ): jdlName, diracStamp = self.__writeJDL( executableFile ) cmd = ['glite-ce-job-submit', '-n', '-N', '-r', '%s/%s' % ( self.ceName, self.queue ), '-D', '%s' % delegationID, '%s' % jdlName ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) os.unlink( jdlName ) if not result['OK']: break if result['Value'][0] != 0: break pilotJobReference = result['Value'][1].strip() if pilotJobReference and pilotJobReference.startswith( 'https' ): batchIDList.append( pilotJobReference ) stampDict[pilotJobReference] = diracStamp else: break if batchIDList: result = S_OK( batchIDList ) result['PilotStampDict'] = stampDict else: result = S_ERROR( 'No pilot references obtained from the glite job submission' ) return result def killJob( self, jobIDList ): """ Kill the specified jobs """ jobList = list( jobIDList ) if type( jobIDList ) in StringTypes: jobList = [ jobIDList ] cmd = ['glite-ce-job-cancel', '-n', '-N'] + jobList result = executeGridCommand( self.proxy, cmd, self.gridEnv ) if not result['OK']: return result if result['Value'][0] != 0: return S_ERROR( 'Failed kill job: %s' % result['Value'][0][1] ) return S_OK() ############################################################################# def getCEStatus( self, jobIDList = None ): """ Method to return information on running and pending jobs. :param list jobIDList: list of job IDs to be considered """ statusList = ['REGISTERED', 'PENDING', 'IDLE', 'RUNNING', 'REALLY-RUNNING'] cmd = ['glite-ce-job-status', '-n', '-a', '-e', '%s' % self.ceName, '-s', '%s' % ':'.join( statusList ) ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) resultDict = {} if not result['OK']: return result if result['Value'][0]: if result['Value'][0] == 11: return S_ERROR( 'Segmentation fault while calling glite-ce-job-status' ) elif result['Value'][2]: return S_ERROR( result['Value'][2] ) elif "Authorization error" in result['Value'][1]: return S_ERROR( "Authorization error" ) elif "FaultString" in result['Value'][1]: res = re.search( 'FaultString=\[([\w\s]+)\]', result['Value'][1] ) fault = '' if res: fault = res.group( 1 ) detail = '' res = re.search( 'FaultDetail=\[([\w\s]+)\]', result['Value'][1] ) if res: detail = res.group( 1 ) return S_ERROR( "Error: %s:%s" % (fault,detail) ) else: return S_ERROR( 'Error while interrogating CE status' ) if result['Value'][1]: resultDict = self.__parseJobStatus( result['Value'][1] ) running = 0 waiting = 0 statusDict = {} for ref, status in resultDict.items(): if jobIDList is not None and not ref in jobIDList: continue if status == 'Scheduled': waiting += 1 if status == 'Running': running += 1 statusDict[ref] = status result = S_OK() result['RunningJobs'] = running result['WaitingJobs'] = waiting result['SubmittedJobs'] = 0 result['JobStatusDict'] = statusDict return result def getJobStatus( self, jobIDList ): """ Get the status information for the given list of jobs """ if self.proxyRenewal % 60 == 0: self.proxyRenewal += 1 statusList = ['REGISTERED', 'PENDING', 'IDLE', 'RUNNING', 'REALLY-RUNNING'] cmd = ['glite-ce-job-status', '-L', '2', '--all', '-e', '%s' % self.ceName, '-s', '%s' % ':'.join( statusList ) ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) if result['OK']: delegationIDs = [] for line in result['Value'][1].split( '\n' ): if line.find( 'Deleg Proxy ID' ) != -1: delegationID = line.split()[-1].replace( '[', '' ).replace( ']', '' ) if delegationID not in delegationIDs: delegationIDs.append( delegationID ) if delegationIDs: cmd = ['glite-ce-proxy-renew', '-e', self.ceName ] cmd.extend( delegationIDs ) self.log.info( 'Refreshing proxy for:', ' '.join( delegationIDs ) ) result = executeGridCommand( self.proxy, cmd, self.gridEnv ) workingDirectory = self.ceParameters['WorkingDirectory'] fd, idFileName = tempfile.mkstemp( suffix = '.ids', prefix = 'CREAM_', dir = workingDirectory ) idFile = os.fdopen( fd, 'w' ) idFile.write( '##CREAMJOBS##' ) for id_ in jobIDList: if ":::" in id_: ref, stamp = id_.split( ':::' ) else: ref = id_ idFile.write( '\n' + ref ) idFile.close() cmd = ['glite-ce-job-status', '-n', '-i', '%s' % idFileName ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) os.unlink( idFileName ) resultDict = {} if not result['OK']: self.log.error( 'Failed to get job status', result['Message'] ) return result if result['Value'][0]: if result['Value'][2]: return S_ERROR( result['Value'][2] ) else: return S_ERROR( 'Error while interrogating job statuses' ) if result['Value'][1]: resultDict = self.__parseJobStatus( result['Value'][1] ) if not resultDict: return S_ERROR( 'No job statuses returned' ) # If CE does not know about a job, set the status to Unknown for job in jobIDList: if not resultDict.has_key( job ): resultDict[job] = 'Unknown' return S_OK( resultDict ) def __parseJobStatus( self, output ): """ Parse the output of the glite-ce-job-status """ resultDict = {} ref = '' for line in output.split( '\n' ): if not line: continue match = re.search( 'JobID=\[(.*)\]', line ) if match and len( match.groups() ) == 1: ref = match.group( 1 ) match = re.search( 'Status.*\[(.*)\]', line ) if match and len( match.groups() ) == 1: creamStatus = match.group( 1 ) if creamStatus in ['DONE-OK']: resultDict[ref] = 'Done' elif creamStatus in ['DONE-FAILED']: resultDict[ref] = 'Failed' elif creamStatus in ['REGISTERED', 'PENDING', 'IDLE']: resultDict[ref] = 'Scheduled' elif creamStatus in ['ABORTED']: resultDict[ref] = 'Aborted' elif creamStatus in ['CANCELLED']: resultDict[ref] = 'Killed' elif creamStatus in ['RUNNING', 'REALLY-RUNNING']: resultDict[ref] = 'Running' elif creamStatus == 'N/A': resultDict[ref] = 'Unknown' else: resultDict[ref] = creamStatus.capitalize() return resultDict def getJobOutput( self, jobID, localDir = None ): """ Get the specified job standard output and error files. If the localDir is provided, the output is returned as file in this directory. Otherwise, the output is returned as strings. """ if jobID.find( ':::' ) != -1: pilotRef, stamp = jobID.split( ':::' ) else: pilotRef = jobID stamp = '' if not stamp: return S_ERROR( 'Pilot stamp not defined for %s' % pilotRef ) outURL = self.ceParameters.get( 'OutputURL', 'gsiftp://localhost' ) if outURL == 'gsiftp://localhost': result = self.__resolveOutputURL( pilotRef ) if not result['OK']: return result outURL = result['Value'] outputURL = os.path.join( outURL, '%s.out' % stamp ) errorURL = os.path.join( outURL, '%s.err' % stamp ) workingDirectory = self.ceParameters['WorkingDirectory'] outFileName = os.path.join( workingDirectory, os.path.basename( outputURL ) ) errFileName = os.path.join( workingDirectory, os.path.basename( errorURL ) ) cmd = ['globus-url-copy', '%s' % outputURL, 'file://%s' % outFileName ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) output = '' if result['OK']: if not result['Value'][0]: outFile = open( outFileName, 'r' ) output = outFile.read() outFile.close() os.unlink( outFileName ) elif result['Value'][0] == 1 and "No such file or directory" in result['Value'][2]: output = "Standard Output is not available on the CREAM service" if os.path.exists( outFileName ): os.unlink( outFileName ) else: error = '\n'.join( result['Value'][1:] ) return S_ERROR( error ) else: return S_ERROR( 'Failed to retrieve output for %s' % jobID ) cmd = ['globus-url-copy', '%s' % errorURL, '%s' % errFileName ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) error = '' if result['OK']: if not result['Value'][0]: errFile = open( errFileName, 'r' ) error = errFile.read() errFile.close() os.unlink( errFileName ) elif result['Value'][0] == 1 and "No such file or directory" in result['Value'][2]: error = "Standard Error is not available on the CREAM service" if os.path.exists( errFileName ): os.unlink( errFileName ) else: return S_ERROR( 'Failed to retrieve error for %s' % jobID ) return S_OK( ( output, error ) ) def __resolveOutputURL( self, pilotRef ): """ Resolve the URL of the pilot output files """ cmd = [ 'glite-ce-job-status', '-L', '2', '%s' % pilotRef, '| grep -i osb' ] result = executeGridCommand( self.proxy, cmd, self.gridEnv ) url = '' if result['OK']: if not result['Value'][0]: output = result['Value'][1] for line in output.split( '\n' ): line = line.strip() if line.find( 'OSB' ) != -1: match = re.search( '\[(.*)\]', line ) if match: url = match.group( 1 ) if url: return S_OK( url ) else: return S_ERROR( 'output URL not found for %s' % pilotRef ) else: return S_ERROR( 'Failed to retrieve long status for %s' % pilotRef ) #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
calancha/DIRAC
Resources/Computing/CREAMComputingElement.py
Python
gpl-3.0
14,506
[ "DIRAC" ]
c54394b084eaa692bf5fa33051ecccd9e90d0c7bd09d25b2b7d6965c71a4fc7f
''' FreeDiskSpaceCommand The Command gets the free space that is left in a Storage Element Note: there are, still, many references to "space tokens", for example ResourceManagementClient().selectSpaceTokenOccupancyCache(token=elementName) This is for historical reasons, and shoud be fixed one day. For the moment, when you see "token" or "space token" here, just read "StorageElement". ''' __RCSID__ = '$Id$' import sys import errno from datetime import datetime from DIRAC import S_OK, S_ERROR, gLogger from DIRAC.Core.Utilities.File import convertSizeUnits from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers from DIRAC.ResourceStatusSystem.Command.Command import Command from DIRAC.ResourceStatusSystem.Utilities import CSHelpers from DIRAC.Resources.Storage.StorageElement import StorageElement from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient class FreeDiskSpaceCommand(Command): ''' Uses diskSpace method to get the free space ''' def __init__(self, args=None, clients=None): super(FreeDiskSpaceCommand, self).__init__(args, clients=clients) self.rmClient = ResourceManagementClient() def _prepareCommand(self): ''' FreeDiskSpaceCommand requires one argument: - name : <str> ''' if 'name' not in self.args: return S_ERROR('"name" not found in self.args') elementName = self.args['name'] # We keep TB as default as this is what was used (and will still be used) # in the policy for "space tokens" ("real", "data" SEs) unit = self.args.get('unit', 'TB') return S_OK((elementName, unit)) def doNew(self, masterParams=None): """ Gets the parameters to run, either from the master method or from its own arguments. Gets the total and the free disk space of a storage element and inserts the results in the SpaceTokenOccupancyCache table of ResourceManagementDB database. The result is also returned to the caller, not only inserted. What is inserted in the DB will normally be in MB, what is returned will be in the specified unit. """ if masterParams is not None: elementName, unit = masterParams else: params = self._prepareCommand() if not params['OK']: return params elementName, unit = params['Value'] endpointResult = CSHelpers.getStorageElementEndpoint(elementName) if not endpointResult['OK']: return endpointResult se = StorageElement(elementName) occupancyResult = se.getOccupancy(unit=unit) if not occupancyResult['OK']: return occupancyResult occupancy = occupancyResult['Value'] free = occupancy['Free'] total = occupancy['Total'] spaceReservation = occupancy.get('SpaceReservation', '') # We only take the first one, in case there are severals. # Most probably not ideal, because it would be nice to stay # consistent, but well... endpoint = endpointResult['Value'][0] results = {'Endpoint': endpoint, 'Free': free, 'Total': total, 'SpaceReservation': spaceReservation, 'ElementName': elementName} result = self._storeCommand(results) if not result['OK']: return result return S_OK({'Free': free, 'Total': total}) def _storeCommand(self, results): """ Here purely for extensibility """ return self.rmClient.addOrModifySpaceTokenOccupancyCache(endpoint=results['Endpoint'], lastCheckTime=datetime.utcnow(), free=results['Free'], total=results['Total'], token=results['ElementName']) def doCache(self): """ This is a method that gets the element's details from the spaceTokenOccupancyCache DB table. It will return a dictionary with th results, converted to "correct" unit. """ params = self._prepareCommand() if not params['OK']: return params elementName, unit = params['Value'] result = self.rmClient.selectSpaceTokenOccupancyCache(token=elementName) if not result['OK']: return result if not result['Value']: return S_ERROR(errno.ENODATA, "No occupancy recorded") # results are normally in 'MB' free = result['Value'][0][3] total = result['Value'][0][4] free = convertSizeUnits(free, 'MB', unit) total = convertSizeUnits(total, 'MB', unit) if free == -sys.maxsize or total == -sys.maxsize: return S_ERROR("No valid unit specified") return S_OK({'Free': free, 'Total': total}) def doMaster(self): """ This method calls the doNew method for each storage element that exists in the CS. """ for name in DMSHelpers().getStorageElements(): # keeping TB as default diskSpace = self.doNew((name, 'MB')) if not diskSpace['OK']: gLogger.warn("Unable to calculate free/total disk space", "name: %s" % name) gLogger.warn(diskSpace['Message']) continue return S_OK()
andresailer/DIRAC
ResourceStatusSystem/Command/FreeDiskSpaceCommand.py
Python
gpl-3.0
5,209
[ "DIRAC" ]
b18d539b7a106e8d947dfed618f07029dfa5d0570f9a527ac911997a49424346
"""ropemacs, an emacs mode for using rope refactoring library""" import sys import ropemode.decorators import ropemode.environment import ropemode.interface from Pymacs import lisp from rope.base import utils class LispUtils(ropemode.environment.Environment): def ask(self, prompt, default=None, starting=None): if default is not None: prompt = prompt + ('[%s] ' % default) result = lisp.read_from_minibuffer(prompt, starting, None, None, None, default, None) if result == '' and default is not None: return default return result def ask_values(self, prompt, values, default=None, starting=None, exact=True): if self._emacs_version() < 22: values = [[value, value] for value in values] if exact and default is not None: prompt = prompt + ('[%s] ' % default) reader = lisp['ropemacs-completing-read-function'].value() result = reader(prompt, values, None, exact, starting) if result == '' and exact: return default return result def ask_completion(self, prompt, values, starting=None): return self.ask_values(prompt, values, starting=starting, exact=None) def ask_directory(self, prompt, default=None, starting=None): location = starting or default if location is not None: prompt = prompt + ('[%s] ' % location) if lisp.fboundp(lisp['read-directory-name']): # returns default when starting is entered result = lisp.read_directory_name(prompt, location, location) else: result = lisp.read_file_name(prompt, location, location) if result == '' and location is not None: return location return result def message(self, msg): message(msg) def yes_or_no(self, prompt): return lisp.yes_or_no_p(prompt) def y_or_n(self, prompt): return lisp.y_or_n_p(prompt) def get(self, name, default=None): lispname = 'ropemacs-' + name.replace('_', '-') if lisp.boundp(lisp[lispname]): return lisp[lispname].value() return default def get_offset(self): return lisp.point() - 1 def get_text(self): end = lisp.buffer_size() + 1 old_min = lisp.point_min() old_max = lisp.point_max() narrowed = (old_min != 1 or old_max != end) if narrowed: lisp.narrow_to_region(1, lisp.buffer_size() + 1) try: return lisp.buffer_string() finally: if narrowed: lisp.narrow_to_region(old_min, old_max) def get_region(self): offset1 = self.get_offset() lisp.exchange_point_and_mark() offset2 = self.get_offset() lisp.exchange_point_and_mark() return min(offset1, offset2), max(offset1, offset2) def filename(self): return lisp.buffer_file_name() def is_modified(self): return lisp.buffer_modified_p() def goto_line(self, lineno): lisp.goto_line(lineno) def insert_line(self, line, lineno): current = lisp.point() lisp.goto_line(lineno) lisp.insert(line + '\n') lisp.goto_char(current + len(line) + 1) def insert(self, text): lisp.insert(text) def delete(self, start, end): lisp.delete_region(start, end) def filenames(self): result = [] for buffer in lisp.buffer_list(): filename = lisp.buffer_file_name(buffer) if filename: result.append(filename) return result def save_files(self, filenames): ask = self.get('confirm_saving') initial = lisp.current_buffer() for filename in filenames: buffer = lisp.find_buffer_visiting(filename) if buffer: if lisp.buffer_modified_p(buffer): if not ask or lisp.y_or_n_p('Save %s buffer?' % filename): lisp.set_buffer(buffer) lisp.save_buffer() lisp.set_buffer(initial) def reload_files(self, filenames, moves={}): if self.filename() in moves: initial = None else: initial = lisp.current_buffer() for filename in filenames: buffer = lisp.find_buffer_visiting(filename) if buffer: if filename in moves: lisp.kill_buffer(buffer) lisp.find_file(moves[filename]) else: lisp.set_buffer(buffer) lisp.revert_buffer(False, True) if initial is not None: lisp.set_buffer(initial) def find_file(self, filename, readonly=False, other=False): if other: lisp.find_file_other_window(filename) elif readonly: lisp.find_file_read_only(filename) else: lisp.find_file(filename) def _make_buffer(self, name, contents, empty_goto=True, switch=False, window='other', modes=[], fit_lines=None): """Make an emacs buffer `window` can be one of `None`, 'current' or 'other'. """ new_buffer = lisp.get_buffer_create(name) lisp.set_buffer(new_buffer) lisp.toggle_read_only(-1) lisp.erase_buffer() if contents or empty_goto: lisp.insert(contents) for mode in modes: lisp[mode + '-mode']() lisp.buffer_disable_undo(new_buffer) lisp.toggle_read_only(1) if switch: if window == 'current': lisp.switch_to_buffer(new_buffer) else: lisp.switch_to_buffer_other_window(new_buffer) lisp.goto_char(lisp.point_min()) elif window == 'other': if self.get("use_pop_to_buffer"): lisp.pop_to_buffer(new_buffer) lisp.goto_char(lisp.point_min()) else: new_window = lisp.display_buffer(new_buffer) lisp.set_window_point(new_window, lisp.point_min()) if (fit_lines and lisp.fboundp(lisp['fit-window-to-buffer'])): lisp.fit_window_to_buffer(new_window, fit_lines) lisp.bury_buffer(new_buffer) return new_buffer def _hide_buffer(self, name, delete=True): buffer = lisp.get_buffer(name) if buffer is not None: window = lisp.get_buffer_window(buffer) if window is not None: lisp.bury_buffer(buffer) if delete: lisp.delete_window(window) else: if lisp.buffer_name(lisp.current_buffer()) == name: lisp.switch_to_buffer(None) def _emacs_version(self): return int(lisp['emacs-version'].value().split('.')[0]) def create_progress(self, name): if lisp.fboundp(lisp['make-progress-reporter']): progress = _LispProgress(name) else: progress = _OldProgress(name) return progress def current_word(self): return lisp.current_word() def push_mark(self): marker_ring = self.get('marker_ring') marker = lisp.point_marker() lisp.ring_insert(marker_ring, marker) def pop_mark(self): marker_ring = self.get('marker_ring') if lisp.ring_empty_p(marker_ring): self.message("There are no more marked buffers in \ the rope-marker-ring") else: oldbuf = lisp.current_buffer() marker = lisp.ring_remove(marker_ring, 0) marker_buffer = lisp.marker_buffer(marker) if marker_buffer is None: lisp.message("The marked buffer has been deleted") return marker_point = lisp.marker_position(marker) lisp.set_buffer(marker_buffer) lisp.goto_char(marker_point) #Kill that marker so it doesn't slow down editing. lisp.set_marker(marker, None, None) if not lisp.eq(oldbuf, marker_buffer): lisp.switch_to_buffer(marker_buffer) def prefix_value(self, prefix): return lisp.prefix_numeric_value(prefix) def read_line_from_file(self, filename, lineno): with open(filename) as f: for i, line in enumerate(f): if i+1 == lineno: return line return "" # If lineno goes beyond the end of the file def show_occurrences(self, locations): buffer = self._make_buffer('*rope-occurrences*', "", switch=False) lisp.set_buffer(buffer) lisp.toggle_read_only(0) trunc_length = len(lisp.rope_get_project_root()) lisp.insert('List of occurrences:\n') for location in locations: code_line = self.read_line_from_file(location.filename, location.lineno).rstrip() filename = location.filename[trunc_length:] lineno = str(location.lineno) offset = str(location.offset) lisp.insert(filename + ":" + lineno + ":" + code_line + " " + offset) beginning = lisp.line_beginning_position() end = beginning + len(filename) lisp.add_text_properties(beginning, end, [lisp.face, lisp.button]) lisp.add_text_properties(beginning, end, [lisp.mouse_face, lisp.highlight, lisp.help_echo, "mouse-2: visit this file in other window"]) lisp.insert("\n") lisp.toggle_read_only(1) lisp.set(lisp["next-error-function"], lisp.rope_occurrences_next) lisp.local_set_key('\r', lisp.rope_occurrences_goto) lisp.local_set_key((lisp.mouse_1,), lisp.rope_occurrences_goto) lisp.local_set_key('q', lisp.delete_window) def show_doc(self, docs, altview=False): use_minibuffer = not altview if self.get('separate_doc_buffer'): use_minibuffer = not use_minibuffer if not use_minibuffer: fit_lines = self.get('max_doc_buffer_height') buffer = self._make_buffer('*rope-pydoc*', docs, empty_goto=False, fit_lines=fit_lines) lisp.local_set_key('q', lisp.bury_buffer) elif docs: docs = '\n'.join(docs.split('\n')[:7]) self.message(docs) def preview_changes(self, diffs): self._make_buffer('*rope-preview*', diffs, switch=True, modes=['diff'], window='current') try: return self.yes_or_no('Do the changes? ') finally: self._hide_buffer('*rope-preview*', delete=False) def local_command(self, name, callback, key=None, prefix=False): globals()[name] = callback self._set_interaction(callback, prefix) if self.local_prefix and key: key = self._key_sequence(self.local_prefix + ' ' + key) self._bind_local(_lisp_name(name), key) def _bind_local(self, name, key): lisp('(define-key ropemacs-local-keymap "%s" \'%s)' % (self._key_sequence(key), name)) def global_command(self, name, callback, key=None, prefix=False): globals()[name] = callback self._set_interaction(callback, prefix) if self.global_prefix and key: key = self._key_sequence(self.global_prefix + ' ' + key) lisp.global_set_key(key, lisp[_lisp_name(name)]) def _key_sequence(self, sequence): result = [] for key in sequence.split(): if key.startswith('C-'): number = ord(key[-1].upper()) - ord('A') + 1 result.append(chr(number)) elif key.startswith('M-'): number = ord(key[-1].upper()) + 0x80 result.append(chr(number)) else: result.append(key) return ''.join(result) def _set_interaction(self, callback, prefix): if hasattr(callback, 'im_func'): callback = callback.im_func if prefix: callback_interaction = 'P' else: callback_interaction = '' try: callback.interaction = callback_interaction except AttributeError: callback.__func__.interaction = callback_interaction def add_hook(self, name, callback, hook): mapping = {'before_save': 'before-save-hook', 'after_save': 'after-save-hook', 'exit': 'kill-emacs-hook'} globals()[name] = callback lisp.add_hook(lisp[mapping[hook]], lisp[_lisp_name(name)]) @property @utils.saveit def global_prefix(self): return self.get('global_prefix') @property @utils.saveit def local_prefix(self): return self.get('local_prefix') def _lisp_name(name): return 'rope-' + name.replace('_', '-') class _LispProgress(object): def __init__(self, name): self.progress = lisp.make_progress_reporter('%s ... ' % name, 0, 100) def update(self, percent): lisp.progress_reporter_update(self.progress, percent) def done(self): lisp.progress_reporter_done(self.progress) class _OldProgress(object): def __init__(self, name): self.name = name self.update(0) def update(self, percent): if percent != 0: message('%s ... %s%%%%' % (self.name, percent)) else: message('%s ... ' % self.name) def done(self): message('%s ... done' % self.name) def message(message): lisp.message(message.replace('%', '%%')) def occurrences_goto(): if lisp.line_number_at_pos() < 1: lisp.forward_line(1 - lisp.line_number_at_pos()) lisp.end_of_line() end = lisp.point() lisp.beginning_of_line() line = lisp.buffer_substring_no_properties(lisp.point(), end) tokens = line.split() semicolon_tokens = line.split(":") project_root = lisp.rope_get_project_root() if tokens and semicolon_tokens: # Mark this line with an arrow lisp(''' (remove-overlays (point-min) (point-max)) (overlay-put (make-overlay (line-beginning-position) (line-end-position)) 'before-string (propertize "A" 'display '(left-fringe right-triangle))) ''') filename = project_root + "/" + semicolon_tokens[0] offset = int(tokens[-1]) resource = _interface._get_resource(filename) LispUtils().find_file(resource.real_path, other=True) lisp.goto_char(offset + 1) occurrences_goto.interaction = '' def occurrences_next(arg, reset): lisp.switch_to_buffer_other_window('*rope-occurrences*', True) if reset: lisp.goto_char(lisp.point_min()) lisp.forward_line(arg) if lisp.eobp(): lisp.message("Cycling rope occurrences") lisp.goto_char(lisp.point_min()) occurrences_goto() occurrences_next.interaction = '' DEFVARS = """\ (defgroup ropemacs nil "ropemacs, an emacs plugin for rope." :link '(url-link "http://rope.sourceforge.net/ropemacs.html") :prefix "rope-") (defcustom ropemacs-confirm-saving t "Shows whether to confirm saving modified buffers before refactorings. If non-nil, you have to confirm saving all modified python files before refactorings; otherwise they are saved automatically.") (defcustom ropemacs-codeassist-maxfixes 1 "The number of errors to fix before code-assist. How many errors to fix, at most, when proposing code completions.") (defcustom ropemacs-separate-doc-buffer t "Should `rope-show-doc' use a separate buffer or the minibuffer.") (defcustom ropemacs-max-doc-buffer-height 22 "The maximum buffer height for `rope-show-doc'.") (defcustom ropemacs-use-pop-to-buffer nil "Use native `pop-to-buffer' to show new buffer. This affect all ropemacs function including `rope-show-doc'.") (defcustom ropemacs-enable-autoimport 'nil "Specifies whether autoimport should be enabled.") (defcustom ropemacs-autoimport-modules nil "The name of modules whose global names should be cached. The `rope-generate-autoimport-cache' reads this list and fills its cache.") (defcustom ropemacs-autoimport-underlineds 'nil "If set, autoimport will cache names starting with underlines, too.") (defcustom ropemacs-completing-read-function (if (and (boundp 'ido-mode) ido-mode) 'ido-completing-read 'completing-read) "Function to call when prompting user to choose between a list of options. This should take the same arguments as `completing-read'. Possible values are `completing-read' and `ido-completing-read'. Note that you must set `ido-mode' if using`ido-completing-read'." :type 'function) (make-obsolete-variable 'rope-confirm-saving 'ropemacs-confirm-saving) (make-obsolete-variable 'rope-code-assist-max-fixes 'ropemacs-codeassist-maxfixes) (defcustom ropemacs-local-prefix "C-c r" "The prefix for ropemacs refactorings. Use nil to prevent binding keys.") (defcustom ropemacs-global-prefix "C-x p" "The prefix for ropemacs project commands. Use nil to prevent binding keys.") (defcustom ropemacs-marker-ring-length 16 "Length of the rope marker ring.") (defcustom ropemacs-marker-ring (make-ring ropemacs-marker-ring-length) "Ring of markers which are locations from which goto-definition was invoked.") (defcustom ropemacs-enable-shortcuts 't "Shows whether to bind ropemacs shortcuts keys. If non-nil it binds: ================ ============================ Key Command ================ ============================ M-/ rope-code-assist C-c g rope-goto-definition C-c u rope-pop-mark C-c d rope-show-doc C-c f rope-find-occurrences M-? rope-lucky-assist ================ ============================ ") (defvar ropemacs-local-keymap (make-sparse-keymap)) (easy-menu-define ropemacs-mode-menu ropemacs-local-keymap "`ropemacs' menu" '("Rope" ["Code assist" rope-code-assist t] ["Lucky assist" rope-lucky-assist t] ["Goto definition" rope-goto-definition t] ["Pop mark" rope-pop-mark t] ["Jump to global" rope-jump-to-global t] ["Show documentation" rope-show-doc t] ["Find Occurrences" rope-find-occurrences t] ["Analyze module" rope-analyze-module t] ("Refactor" ["Inline" rope-inline t] ["Extract Variable" rope-extract-variable t] ["Extract Method" rope-extract-method t] ["Organize Imports" rope-organize-imports t] ["Rename" rope-rename t] ["Move" rope-move t] ["Restructure" rope-restructure t] ["Use Function" rope-use-function t] ["Introduce Factory" rope-introduce-factory t] ("Generate" ["Class" rope-generate-class t] ["Function" rope-generate-function t] ["Module" rope-generate-module t] ["Package" rope-generate-package t] ["Variable" rope-generate-variable t] ) ("Module" ["Module to Package" rope-module-to-package t] ["Rename Module" rope-rename-current-module t] ["Move Module" rope-move-current-module t] ) "--" ["Undo" rope-undo t] ["Redo" rope-redo t] ) ("Project" ["Open project" rope-open-project t] ["Close project" rope-close-project t] ["Find file" rope-find-file t] ["Open project config" rope-project-config t] ) ("Create" ["Module" rope-create-module t] ["Package" rope-create-package t] ["File" rope-create-file t] ["Directory" rope-create-directory t] ) )) (defcustom ropemacs-guess-project 'nil "Try to guess the project when needed. If non-nil, ropemacs tries to guess and open the project that contains a file on which the rope command is performed when no project is already opened.") (provide 'ropemacs) """ MINOR_MODE = """\ (require 'thingatpt) (define-minor-mode ropemacs-mode "ropemacs, rope in emacs!" nil " Rope" ropemacs-local-keymap (if ropemacs-mode (add-hook 'completion-at-point-functions 'ropemacs-completion-at-point nil t) (remove-hook 'completion-at-point-functions 'ropemacs-completion-at-point t))) (defun ropemacs-completion-at-point () (unless (nth 8 (syntax-ppss)) (let ((bounds (or (bounds-of-thing-at-point 'symbol) (cons (point) (point))))) (list (car bounds) (cdr bounds) 'ropemacs--completion-table :company-doc-buffer 'ropemacs--completion-doc-buffer :company-location 'ropemacs--completion-location)))) (defalias 'ropemacs--completion-table (if (fboundp 'completion-table-with-cache) (completion-table-with-cache #'ropemacs--completion-candidates) (completion-table-dynamic #'ropemacs--completion-candidates))) (defun ropemacs--completion-candidates (prefix) (mapcar (lambda (element) (concat prefix element)) (rope-completions))) (defun ropemacs--with-inserted (candidate fn) (let ((inhibit-modification-hooks t) (inhibit-point-motion-hooks t) (modified-p (buffer-modified-p)) (beg (or (car (bounds-of-thing-at-point 'symbol)) (point))) (pt (point))) (insert (substring candidate (- pt beg))) (unwind-protect (funcall fn) (delete-region pt (point)) (set-buffer-modified-p modified-p)))) (defun ropemacs--completion-doc-buffer (candidate) (let ((doc (ropemacs--with-inserted candidate #'rope-get-doc))) (when doc (with-current-buffer (get-buffer-create "*ropemacs-completion-doc*") (erase-buffer) (insert doc) (goto-char (point-min)) (current-buffer))))) (defun ropemacs--completion-location (candidate) (let ((location (ropemacs--with-inserted candidate #'rope-definition-location))) (when location (cons (elt location 0) (elt location 1))))) """ shortcuts = [('M-/', 'rope-code-assist'), ('M-?', 'rope-lucky-assist'), ('C-c g', 'rope-goto-definition'), ('C-c u', 'rope-pop-mark'), ('C-c d', 'rope-show-doc'), ('C-c f', 'rope-find-occurrences')] _interface = None def _load_ropemacs(): global _interface ropemode.decorators.logger.message = message lisp(DEFVARS) _interface = ropemode.interface.RopeMode(env=LispUtils()) _interface.init() lisp(MINOR_MODE) if LispUtils().get('enable_shortcuts'): for key, command in shortcuts: LispUtils()._bind_local(command, key) lisp.add_hook(lisp['python-mode-hook'], lisp['ropemacs-mode']) def _started_from_pymacs(): import inspect frame = sys._getframe() while frame: # checking frame.f_code.co_name == 'pymacs_load_helper' might # be very fragile. filename = inspect.getfile(frame).rstrip('c') if filename.endswith(('Pymacs.py', 'pymacs.py')): return True frame = frame.f_back if _started_from_pymacs(): _load_ropemacs()
mcepl/ropemacs
ropemacs/__init__.py
Python
gpl-2.0
24,177
[ "VisIt" ]
426078099fdb042d3d5d0012b7b11395bc0f91eb6d41c58fb53bb333da672327
import sys, os, re import subprocess from os import listdir from os.path import isfile, join from datetime import datetime from tqdm import tnrange, tqdm_notebook ''' main 1. find r1,r2-good_pairs in input directory (*.fastq) 2. run bwa mem (mapping) on it ''' def main(inputdir, outputdir, refway, bwaline): before = datetime.now() inputdir = os.path.abspath(inputdir) + '/' outputdir = os.path.abspath(outputdir) + '/' # Read files in folder onlyfiles = [f for f in listdir(inputdir) if isfile(join(inputdir, f))] r1_files = {} r2_files = {} for filename in onlyfiles: filename = filename.rstrip() if re.search('good', filename): if re.search('R1', filename): key_filename = filename.split('R1')[0] r1_files[key_filename] = filename elif re.search('R2', filename): key_filename = filename.split('R2')[0] r2_files[key_filename] = filename conform_files = [] nonconform_files = [] for key in r1_files: if key in r2_files: conform_files.append([r1_files[key], r2_files[key]]) del r2_files[key] else: nonconform_files.append(r1_files[key]) nonconform_files = nonconform_files + list(r2_files.values()) if not os.path.exists(outputdir): os.makedirs(outputdir) stat_name = ''.join([str(before.year), str(before.month), str(before.day), str(before.hour), str(before.minute), str(before.second)]) logfile = open(outputdir + 'logfile_' + stat_name + '.log', 'w') for filename1, filename2 in tqdm_notebook(conform_files, desc=''): readsname = filename1.split('R1')[0] readsname = readsname.rsplit('.', 1)[0] bwamem = ' '.join([bwaline, refway, inputdir + filename1, inputdir + filename2, '>', outputdir + readsname + '.sam']) print (bwamem) p = subprocess.Popen (bwamem, stderr=subprocess.PIPE, shell = True) logline = p.stderr.read().decode() logfile.write(logline) logfile.write('\n#################################\n') logfile.close() if len(nonconform_files) != 0: print ('I can\'t read this files' + str(nonconform_files))
labcfg/retroparty
bwamem.py
Python
apache-2.0
2,401
[ "BWA" ]
7e4b23f94febbc776ee43ae4316b18145412fe238e2c7b68ba00343cef416b83
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sys import os def welcome(): """ Perform a bunch of sanity tests to make sure the Add-on SDK environment is sane, and then display a welcome message. """ try: if sys.version_info[0] > 2: print ("Error: You appear to be using Python %d, but " "the Add-on SDK only supports the Python 2.x line." % (sys.version_info[0])) return import mozrunner if 'CUDDLEFISH_ROOT' not in os.environ: print ("Error: CUDDLEFISH_ROOT environment variable does " "not exist! It should point to the root of the " "Add-on SDK repository.") return env_root = os.environ['CUDDLEFISH_ROOT'] bin_dir = os.path.join(env_root, 'bin') python_lib_dir = os.path.join(env_root, 'python-lib') path = os.environ['PATH'].split(os.path.pathsep) if bin_dir not in path: print ("Warning: the Add-on SDK binary directory %s " "does not appear to be in your PATH. You may " "not be able to run 'cfx' or other SDK tools." % bin_dir) if python_lib_dir not in sys.path: print ("Warning: the Add-on SDK python-lib directory %s " "does not appear to be in your sys.path, which " "is odd because I'm running from it." % python_lib_dir) if not mozrunner.__path__[0].startswith(env_root): print ("Warning: your mozrunner package is installed at %s, " "which does not seem to be located inside the Jetpack " "SDK. This may cause problems, and you may want to " "uninstall the other version. See bug 556562 for " "more information." % mozrunner.__path__[0]) except Exception: # Apparently we can't get the actual exception object in the # 'except' clause in a way that's syntax-compatible for both # Python 2.x and 3.x, so we'll have to use the traceback module. import traceback _, e, _ = sys.exc_info() print ("Verification of Add-on SDK environment failed (%s)." % e) print ("Your SDK may not work properly.") return print ("Welcome to the Add-on SDK. For the docs, visit https://developer.mozilla.org/en-US/Add-ons/SDK") if __name__ == '__main__': welcome()
Yukarumya/Yukarum-Redfoxes
addon-sdk/source/python-lib/jetpack_sdk_env.py
Python
mpl-2.0
2,637
[ "VisIt" ]
02cf3b15fc41b73fe3ea46fc351c493d751bf3910ec51efbc173d8d5f9a7304f
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import logging import random from odoo import api, models, fields, tools, _ from odoo.http import request from odoo.exceptions import UserError, ValidationError _logger = logging.getLogger(__name__) class SaleOrder(models.Model): _inherit = "sale.order" website_order_line = fields.One2many( 'sale.order.line', 'order_id', string='Order Lines displayed on Website', readonly=True, help='Order Lines to be displayed on the website. They should not be used for computation purpose.', ) cart_quantity = fields.Integer(compute='_compute_cart_info', string='Cart Quantity') payment_acquirer_id = fields.Many2one('payment.acquirer', string='Payment Acquirer', copy=False) payment_tx_id = fields.Many2one('payment.transaction', string='Transaction', copy=False) only_services = fields.Boolean(compute='_compute_cart_info', string='Only Services') @api.multi @api.depends('website_order_line.product_uom_qty', 'website_order_line.product_id') def _compute_cart_info(self): for order in self: order.cart_quantity = int(sum(order.mapped('website_order_line.product_uom_qty'))) order.only_services = all(l.product_id.type in ('service', 'digital') for l in order.website_order_line) @api.model def _get_errors(self, order): return [] @api.model def _get_website_data(self, order): return { 'partner': order.partner_id.id, 'order': order } @api.multi def _cart_find_product_line(self, product_id=None, line_id=None, **kwargs): self.ensure_one() domain = [('order_id', '=', self.id), ('product_id', '=', product_id)] if line_id: domain += [('id', '=', line_id)] return self.env['sale.order.line'].sudo().search(domain) @api.multi def _website_product_id_change(self, order_id, product_id, qty=0): order = self.sudo().browse(order_id) product_context = dict(self.env.context) product_context.setdefault('lang', order.partner_id.lang) product_context.update({ 'partner': order.partner_id.id, 'quantity': qty, 'date': order.date_order, 'pricelist': order.pricelist_id.id, }) product = self.env['product.product'].with_context(product_context).browse(product_id) values = { 'product_id': product_id, 'name': product.display_name, 'product_uom_qty': qty, 'order_id': order_id, 'product_uom': product.uom_id.id, 'price_unit': product.price, } if product.description_sale: values['name'] += '\n %s' % (product.description_sale) return values @api.multi def _cart_update(self, product_id=None, line_id=None, add_qty=0, set_qty=0, **kwargs): """ Add or set product quantity, add_qty can be negative """ self.ensure_one() SaleOrderLineSudo = self.env['sale.order.line'].sudo() quantity = 0 order_line = False if self.state != 'draft': request.session['sale_order_id'] = None raise UserError(_('It is forbidden to modify a sale order which is not in draft status')) if line_id is not False: order_lines = self._cart_find_product_line(product_id, line_id, **kwargs) order_line = order_lines and order_lines[0] # Create line if no line with product_id can be located if not order_line: values = self._website_product_id_change(self.id, product_id, qty=1) order_line = SaleOrderLineSudo.create(values) try: order_line._compute_tax_id() except ValidationError as e: # The validation may occur in backend (eg: taxcloud) but should fail silently in frontend _logger.debug("ValidationError occurs during tax compute. %s" % (e)) if add_qty: add_qty -= 1 # compute new quantity if set_qty: quantity = set_qty elif add_qty is not None: quantity = order_line.product_uom_qty + (add_qty or 0) # Remove zero of negative lines if quantity <= 0: order_line.unlink() else: # update line values = self._website_product_id_change(self.id, product_id, qty=quantity) order_line.write(values) return {'line_id': order_line.id, 'quantity': quantity} def _cart_accessories(self): """ Suggest accessories based on 'Accessory Products' of products in cart """ for order in self: accessory_products = order.website_order_line.mapped('product_id.accessory_product_ids').filtered(lambda product: product.website_published) accessory_products -= order.website_order_line.mapped('product_id') return random.sample(accessory_products, len(accessory_products)) class Website(models.Model): _inherit = 'website' pricelist_id = fields.Many2one('product.pricelist', compute='_compute_pricelist_id', string='Default Pricelist') currency_id = fields.Many2one('res.currency', related='pricelist_id.currency_id', string='Default Currency') salesperson_id = fields.Many2one('res.users', string='Salesperson') salesteam_id = fields.Many2one('crm.team', string='Sales Team') website_pricelist_ids = fields.One2many('website_pricelist', 'website_id', string='Price list available for this Ecommerce/Website') @api.multi def _compute_pricelist_id(self): for website in self: website.pricelist_id = website.with_context(website_id=website.id).get_current_pricelist() # This method is cached, must not return records! See also #8795 @tools.ormcache('self.env.uid', 'country_code', 'show_visible', 'website_pl', 'current_pl', 'all_pl', 'partner_pl', 'order_pl') def _get_pl_partner_order(self, country_code, show_visible, website_pl, current_pl, all_pl, partner_pl=False, order_pl=False): """ Return the list of pricelists that can be used on website for the current user. :param str country_code: code iso or False, If set, we search only price list available for this country :param bool show_visible: if True, we don't display pricelist where selectable is False (Eg: Code promo) :param int website_pl: The default pricelist used on this website :param int current_pl: The current pricelist used on the website (If not selectable but the current pricelist we had this pricelist anyway) :param list all_pl: List of all pricelist available for this website :param int partner_pl: the partner pricelist :param int order_pl: the current cart pricelist :returns: list of pricelist ids """ pricelists = self.env['product.pricelist'] if country_code: for cgroup in self.env['res.country.group'].search([('country_ids.code', '=', country_code)]): for group_pricelists in cgroup.website_pricelist_ids: if not show_visible or group_pricelists.selectable or group_pricelists.pricelist_id.id in (current_pl, order_pl): pricelists |= group_pricelists.pricelist_id if not pricelists: # no pricelist for this country, or no GeoIP pricelists |= all_pl.filtered(lambda pl: not show_visible or pl.selectable or pl.pricelist_id.id in (current_pl, order_pl)).mapped('pricelist_id') partner = self.env.user.partner_id if not pricelists or (partner_pl or partner.property_product_pricelist.id) != website_pl: pricelists |= partner.property_product_pricelist # This method is cached, must not return records! See also #8795 return pricelists.sorted(lambda pl: pl.name).ids def _get_pl(self, country_code, show_visible, website_pl, current_pl, all_pl): pl_ids = self._get_pl_partner_order(country_code, show_visible, website_pl, current_pl, all_pl) return self.env['product.pricelist'].browse(pl_ids) def get_pricelist_available(self, show_visible=False): """ Return the list of pricelists that can be used on website for the current user. Country restrictions will be detected with GeoIP (if installed). :param bool show_visible: if True, we don't display pricelist where selectable is False (Eg: Code promo) :returns: pricelist recordset """ website = request.website if not request.website: if self.env.context.get('website_id'): website = self.browse(self.env.context['website_id']) else: website = self.search([], limit=1) isocountry = request.session.geoip and request.session.geoip.get('country_code') or False partner = self.env.user.partner_id order_pl = partner.last_website_so_id and partner.last_website_so_id.state == 'draft' and partner.last_website_so_id.pricelist_id partner_pl = partner.property_product_pricelist pricelists = website._get_pl_partner_order(isocountry, show_visible, website.user_id.sudo().partner_id.property_product_pricelist.id, request.session.get('website_sale_current_pl'), website.website_pricelist_ids, partner_pl=partner_pl and partner_pl.id or None, order_pl=order_pl and order_pl.id or None) return self.env['product.pricelist'].browse(pricelists) def is_pricelist_available(self, pl_id): """ Return a boolean to specify if a specific pricelist can be manually set on the website. Warning: It check only if pricelist is in the 'selectable' pricelists or the current pricelist. :param int pl_id: The pricelist id to check :returns: Boolean, True if valid / available """ return pl_id in self.get_pricelist_available(show_visible=False).ids def get_current_pricelist(self): """ :returns: The current pricelist record """ # The list of available pricelists for this user. # If the user is signed in, and has a pricelist set different than the public user pricelist # then this pricelist will always be considered as available available_pricelists = self.get_pricelist_available() pl = None partner = self.env.user.partner_id if request.session.get('website_sale_current_pl'): # `website_sale_current_pl` is set only if the user specifically chose it: # - Either, he chose it from the pricelist selection # - Either, he entered a coupon code pl = self.env['product.pricelist'].browse(request.session['website_sale_current_pl']) if pl not in available_pricelists: pl = None request.session.pop('website_sale_current_pl') if not pl: # If the user has a saved cart, it take the pricelist of this cart, except if # the order is no longer draft (It has already been confirmed, or cancelled, ...) pl = partner.last_website_so_id.state == 'draft' and partner.last_website_so_id.pricelist_id if not pl: # The pricelist of the user set on its partner form. # If the user is not signed in, it's the public user pricelist pl = partner.property_product_pricelist if available_pricelists and pl not in available_pricelists: # If there is at least one pricelist in the available pricelists # and the chosen pricelist is not within them # it then choose the first available pricelist. # This can only happen when the pricelist is the public user pricelist and this pricelist is not in the available pricelist for this localization # If the user is signed in, and has a special pricelist (different than the public user pricelist), # then this special pricelist is amongs these available pricelists, and therefore it won't fall in this case. pl = available_pricelists[0] if not pl: _logger.error('Fail to find pricelist for partner "%s" (id %s)', partner.name, partner.id) return pl @api.multi def sale_product_domain(self): return [("sale_ok", "=", True)] @api.multi def sale_get_order(self, force_create=False, code=None, update_pricelist=False, force_pricelist=False): """ Return the current sale order after mofications specified by params. :param bool force_create: Create sale order if not already existing :param str code: Code to force a pricelist (promo code) If empty, it's a special case to reset the pricelist with the first available else the default. :param bool update_pricelist: Force to recompute all the lines from sale order to adapt the price with the current pricelist. :param int force_pricelist: pricelist_id - if set, we change the pricelist with this one :returns: browse record for the current sale order """ self.ensure_one() partner = self.env.user.partner_id sale_order_id = request.session.get('sale_order_id') if not sale_order_id: last_order = partner.last_website_so_id available_pricelists = self.get_pricelist_available() # Do not reload the cart of this user last visit if the cart is no longer draft or uses a pricelist no longer available. sale_order_id = last_order.state == 'draft' and last_order.pricelist_id in available_pricelists and last_order.id # Test validity of the sale_order_id sale_order = self.env['sale.order'].sudo().browse(sale_order_id).exists() if sale_order_id else None pricelist_id = request.session.get('website_sale_current_pl') or self.get_current_pricelist().id if self.env['product.pricelist'].browse(force_pricelist).exists(): pricelist_id = force_pricelist request.session['website_sale_current_pl'] = pricelist_id update_pricelist = True # create so if needed if not sale_order and (force_create or code): # TODO cache partner_id session affiliate_id = request.session.get('affiliate_id') if self.env['res.users'].sudo().browse(affiliate_id).exists(): salesperson_id = affiliate_id else: salesperson_id = request.website.salesperson_id.id addr = partner.address_get(['delivery', 'invoice']) sale_order = self.env['sale.order'].sudo().create({ 'partner_id': partner.id, 'pricelist_id': pricelist_id, 'payment_term_id': partner.property_payment_term_id.id, 'team_id': self.salesteam_id.id, 'partner_invoice_id': addr['invoice'], 'partner_shipping_id': addr['delivery'], 'user_id': salesperson_id or self.salesperson_id.id, }) request.session['sale_order_id'] = sale_order.id if request.website.partner_id.id != partner.id: partner.write({'last_website_so_id': sale_order_id}) if sale_order: # check for change of pricelist with a coupon pricelist_id = pricelist_id or partner.property_product_pricelist.id # check for change of partner_id ie after signup if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id: flag_pricelist = False if pricelist_id != sale_order.pricelist_id.id: flag_pricelist = True fiscal_position = sale_order.fiscal_position_id.id # change the partner, and trigger the onchange sale_order.write({'partner_id': partner.id}) sale_order.onchange_partner_id() # check the pricelist : update it if the pricelist is not the 'forced' one values = {} if sale_order.pricelist_id: if sale_order.pricelist_id.id != pricelist_id: values['pricelist_id'] = pricelist_id update_pricelist = True # if fiscal position, update the order lines taxes if sale_order.fiscal_position_id: sale_order._compute_tax_id() # if values, then make the SO update if values: sale_order.write(values) # check if the fiscal position has changed with the partner_id update recent_fiscal_position = sale_order.fiscal_position_id.id if flag_pricelist or recent_fiscal_position != fiscal_position: update_pricelist = True if code and code != sale_order.pricelist_id.code: code_pricelist = self.env['product.pricelist'].search([('code', '=', code)], limit=1) if code_pricelist: pricelist_id = code_pricelist.id update_pricelist = True elif code is not None and sale_order.pricelist_id.code: # code is not None when user removes code and click on "Apply" pricelist_id = partner.property_product_pricelist.id update_pricelist = True # update the pricelist if update_pricelist: request.session['website_sale_current_pl'] = pricelist_id values = {'pricelist_id': pricelist_id} sale_order.write(values) for line in sale_order.order_line: if line.exists(): sale_order._cart_update(product_id=line.product_id.id, line_id=line.id, add_qty=0) else: request.session['sale_order_id'] = None return None return sale_order def sale_get_transaction(self): tx_id = request.session.get('sale_transaction_id') if tx_id: transaction = self.env['payment.transaction'].sudo().browse(tx_id) if transaction.state != 'cancel': return transaction else: request.session['sale_transaction_id'] = False return False def sale_reset(self): request.session.update({ 'sale_order_id': False, 'sale_transaction_id': False, 'website_sale_current_pl': False, }) @api.model def get_product_price(self, product, qty=1, public=False, **kw): pricelist = request.website.get_current_pricelist() return product.display_price(pricelist, qty=qty, public=public) class WebsitePricelist(models.Model): _name = 'website_pricelist' _description = 'Website Pricelist' name = fields.Char('Pricelist Name', compute='_get_display_name', required=True) website_id = fields.Many2one('website', string="Website", required=True) selectable = fields.Boolean(help="Allow the end user to choose this price list") pricelist_id = fields.Many2one('product.pricelist', string='Pricelist') country_group_ids = fields.Many2many('res.country.group', 'res_country_group_website_pricelist_rel', 'website_pricelist_id', 'res_country_group_id', string='Country Groups') def clear_cache(self): # website._get_pl() is cached to avoid to recompute at each request the # list of available pricelists. So, we need to invalidate the cache when # we change the config of website price list to force to recompute. website = self.env['website'] website._get_pl_partner_order.clear_cache(website) @api.multi def _get_display_name(self): for website_pl in self: website_pl.name = _("Website Pricelist for %s") % website_pl.pricelist_id.name @api.model def create(self, data): res = super(WebsitePricelist, self).create(data) self.clear_cache() return res @api.multi def write(self, data): res = super(WebsitePricelist, self).write(data) self.clear_cache() return res @api.multi def unlink(self): res = super(WebsitePricelist, self).unlink() self.clear_cache() return res class ResCountry(models.Model): _inherit = 'res.country' def get_website_sale_countries(self, mode='billing'): return self.sudo().search([]) def get_website_sale_states(self, mode='billing'): return self.sudo().state_ids class ResCountryGroup(models.Model): _inherit = 'res.country.group' website_pricelist_ids = fields.Many2many('website_pricelist', 'res_country_group_website_pricelist_rel', 'res_country_group_id', 'website_pricelist_id', string='Website Price Lists') class ResPartner(models.Model): _inherit = 'res.partner' last_website_so_id = fields.Many2one('sale.order', string='Last Online Sale Order')
ayepezv/GAD_ERP
addons/website_sale/models/sale_order.py
Python
gpl-3.0
21,571
[ "VisIt" ]
1cf2c05e565278f7282d87e3bf010d3cc8293d7d2ac7f02a8dc9dd8c68966f2e
#! /usr/bin/python3 -B # *************************************************** * # This file is a part of ccROJ project * # distributed under GNU General Public License v3.0. * # Please visit the webpage: github.com/dsp-box/ccROJ * # for more information. * # contact: Krzysztof Czarnecki * # email: czarnecki.krzysiek@gmail.com * # *************************************************** */ # *************************************************** */ # import import sys, re # *************************************************** */ # parser definition class line_generator: def __init__(self, file_list): self.flist = file_list self.flags = {} self.flags["open"] = False self.flags["close"] = False self.flags["close2"] = False self.flags["block"] = False self.flags["sign"] = False self.flags["struct"] = False def get_internal_generator(self): for fname in self.flist: with open(fname, "r") as fd: for line in fd: yield line def __iter__(self): g = self.get_internal_generator() for line in g: if self.check_empty_line(line): continue self.check_type_line(line) self.check_block_line(line) self.check_struct_line(line) # sign line modification if self.flags["sign"]: line = re.sub("\s*[{;]\s*$", "", line) line = line.replace("struct", " <span class='range'>struct</span>") line = line.replace("#define", "<span class='func'>define</span>") line = re.sub("([a-zA-Z0-9_]*)(?=\s*[(])", "<span class='func'>\\1</span>", line) line = re.sub("([a-zA-Z0-9_]*)(?=\s*[:]{2})", "<span class='range'>\\1</span>", line) line = re.sub("(?<=[:]{2})([~\sa-zA-Z0-9_]*)(?=[(])", "<span class='func'>\\1</span>", line) line = "<span class='bold'>%s</span>" % line line = "<span class='key'>@sign</span>: %s" % line # remove initial * if self.flags["block"]: line = re.sub("^\s*[*]\s*", "", line) line = re.sub("([@][a-z]*)", "<span class='key'>\\1</span>", line) # struct line modification if self.flags["struct"]: line = re.sub("\s*[;]\s*$", "", line) line = re.sub("\s*([a-zA-Z0-9_]*)\s*$", " <span class='arg'>\\1</span>", line) line = "<span class='bold'>%s</span>" % line line = line.replace("\n", " ") if self.flags["sign"] or self.flags["block"] or self.flags["struct"]: line = re.sub("(\sa_[a-zA-Z0-9_]*)", "<span class='arg'>\\1</span>", line) line = re.sub("(\sm_[a-zA-Z0-9_]*)", "<span class='field'>\\1</span>", line) yield line self.flags["close2"] = self.flags["close"] def check_struct_line(self, line): ret = re.match("^\s*struct", line) if ret and self.flags["sign"]: self.flags["struct"] = True ret = re.match("^\s*};", line) if ret: self.flags["struct"] = False self.flags["close2"] = True def check_empty_line(self, line): ret = re.match("^\s*$", line) if ret: self.flags["sign"] = False self.flags["close"] = False self.flags["block"] = False self.flags["open"] = False if re.match("^\s*[*]\s*$", line): ret = True return ret def check_type_line(self, line): ret = re.match("^\s*[*]\s*@type: private.*", line) if ret: return ret = re.match("^\s*[*]\s*@type.*", line) if ret: self.flags["open"] = True self.flags["block"] = True self.block_type = re.split('^\s*[*]\s*@type:\s*', line[0:-1])[1] else: self.flags["open"] = False def check_block_line(self, line): self.flags["sign"] = False ret = re.match("^\s*[*].*", line) if not ret: if self.flags["block"]: self.flags["sign"] = True self.flags["block"] = False if re.match("\s*[*][/]\s*", line): self.flags["close"] = True else: self.flags["close"] = False # *************************************************** */ # some additional functions def print_line(line): print(line + "<br>") def print_head(): print("<html>") print("<head>") print(" <title>ccROJ</title>") print(" <link rel='stylesheet' href='style.css'>") print(" <script src='head.js'></script>") print("</head>") print("<body>") print("<br><br>") def print_tail(): print("<br>") print("</body>") print("</html>") def print_files(file_list): print("<div class=\"file\">") print("<span class=\"key\">@files</span>:<br>") for i in file_list: print(i, "<br>") print("</div>") # *************************************************** */ # generate print_head() flist = sys.argv[1:] print_files(flist) g = line_generator(flist) for line in g: if not g.flags["sign"] and g.flags["close2"]: print("</div>") if g.flags["open"]: print("<br><div class='%s'>" % g.block_type) if not g.flags["open"] and not g.flags["close"]: print_line(line) if g.flags["sign"] and not g.flags["struct"]: print("</div>") print_tail()
dsp-box/ccROJ
scripts/doc-gener.py
Python
gpl-3.0
5,650
[ "VisIt" ]
90f4f91d5b035e43e90d7ee927f715d44e83653c4a4ca5f2a32ea49be6618cca
import sys PY3 = (sys.version_info[0] >= 3) if PY3: string_types = str, else: string_types = basestring, from pybindgen.utils import any, mangle_name import warnings import traceback from pybindgen.typehandlers.base import Parameter, ReturnValue, \ join_ctype_and_name, CodeGenerationError, \ param_type_matcher, return_type_matcher, CodegenErrorBase, \ DeclarationsScope, CodeBlock, NotSupportedError, ForwardWrapperBase, ReverseWrapperBase, \ TypeConfigurationError from pybindgen.typehandlers.codesink import NullCodeSink, MemoryCodeSink from pybindgen.cppattribute import CppInstanceAttributeGetter, CppInstanceAttributeSetter, \ CppStaticAttributeGetter, CppStaticAttributeSetter, \ PyGetSetDef, PyMetaclass from pybindgen.pytypeobject import PyTypeObject, PyNumberMethods, PySequenceMethods from pybindgen.cppcustomattribute import CppCustomInstanceAttributeGetter, CppCustomInstanceAttributeSetter from pybindgen import settings from pybindgen import utils from pybindgen.cppclass_container import CppClassContainerTraits from . import function import collections # Prepare for python 3.9 try: collectionsCallable = collections.Callable except AttributeError: import collections.abc collectionsCallable = collections.abc.Callable try: set except NameError: from sets import Set as set def _type_no_ref(value_type): if value_type.type_traits.type_is_reference: return str(value_type.type_traits.target) else: return str(value_type.type_traits.ctype_no_modifiers) def get_python_to_c_converter(value, root_module, code_sink): if isinstance(value, CppClass): val_converter = root_module.generate_python_to_c_type_converter(value.ThisClassReturn(value.full_name), code_sink) val_name = value.full_name elif isinstance(value, ReturnValue): val_name = _type_no_ref(value) if val_name != value.ctype: value = ReturnValue.new(val_name) val_converter = root_module.generate_python_to_c_type_converter(value, code_sink) elif isinstance(value, Parameter): val_name = _type_no_ref(value) val_return_type = ReturnValue.new(val_name) val_converter = root_module.generate_python_to_c_type_converter(val_return_type, code_sink) else: raise ValueError("Don't know how to convert %r" % (value,)) return val_converter, val_name def get_c_to_python_converter(value, root_module, code_sink): if isinstance(value, CppClass): val_converter = root_module.generate_c_to_python_type_converter(value.ThisClassReturn(value.full_name), code_sink) val_name = value.full_name elif isinstance(value, ReturnValue): val_converter = root_module.generate_c_to_python_type_converter(value, code_sink) val_name = _type_no_ref(value) elif isinstance(value, Parameter): val_return_type = ReturnValue.new(value.ctype) val_converter = root_module.generate_c_to_python_type_converter(val_return_type, code_sink) val_name = _type_no_ref(value) else: raise ValueError("Don't know how to convert %s" % str(value)) return val_converter, val_name class MemoryPolicy(object): """memory management policy for a C++ class or C/C++ struct""" def __init__(self): if type(self) is MemoryPolicy: raise NotImplementedError("class is abstract") def get_free_code(self, object_expression): """ Return a code statement to free an underlying C/C++ object. """ raise NotImplementedError def get_pointer_type(self, class_full_name): return "%s *" % (class_full_name,) def get_pointer_to_void_name(self, object_name): return "%s" % object_name def get_instance_creation_function(self): return default_instance_creation_function def get_delete_code(self, cpp_class): raise NotImplementedError def get_pystruct_init_code(self, cpp_class, obj): return '' def register_ptr_parameter_and_return(self, cls, name): class ThisClassPtrParameter(CppClassPtrParameter): """Register this C++ class as pass-by-pointer parameter""" CTYPES = [] cpp_class = cls cls.ThisClassPtrParameter = ThisClassPtrParameter try: param_type_matcher.register(name+'*', cls.ThisClassPtrParameter) except ValueError: pass class ThisClassPtrReturn(CppClassPtrReturnValue): """Register this C++ class as pointer return""" CTYPES = [] cpp_class = cls cls.ThisClassPtrReturn = ThisClassPtrReturn try: return_type_matcher.register(name+'*', cls.ThisClassPtrReturn) except ValueError: pass def register_ptr_alias_parameter_and_return(self, cls, alias): cls.ThisClassPtrParameter.CTYPES.append(alias+'*') try: param_type_matcher.register(alias+'*', cls.ThisClassPtrParameter) except ValueError: pass cls.ThisClassPtrReturn.CTYPES.append(alias+'*') try: return_type_matcher.register(alias+'*', cls.ThisClassPtrReturn) except ValueError: pass class ReferenceCountingPolicy(MemoryPolicy): def write_incref(self, code_block, obj_expr): """ Write code to increase the reference code of an object of this class (the real C++ class, not the wrapper). Should only be called if the class supports reference counting, as reported by the attribute `CppClass.has_reference_counting`. """ raise NotImplementedError def write_decref(self, code_block, obj_expr): """ Write code to decrease the reference code of an object of this class (the real C++ class, not the wrapper). Should only be called if the class supports reference counting, as reported by the attribute `CppClass.has_reference_counting`. """ raise NotImplementedError class ReferenceCountingMethodsPolicy(ReferenceCountingPolicy): def __init__(self, incref_method, decref_method, peekref_method=None): super(ReferenceCountingMethodsPolicy, self).__init__() self.incref_method = incref_method self.decref_method = decref_method self.peekref_method = peekref_method def write_incref(self, code_block, obj_expr): code_block.write_code('%s->%s();' % (obj_expr, self.incref_method)) def write_decref(self, code_block, obj_expr): code_block.write_code('%s->%s();' % (obj_expr, self.decref_method)) def get_delete_code(self, cpp_class): delete_code = ("if (self->obj) {\n" " %s *tmp = self->obj;\n" " self->obj = NULL;\n" " tmp->%s();\n" "}" % (cpp_class.full_name, self.decref_method)) return delete_code def __repr__(self): return 'cppclass.ReferenceCountingMethodsPolicy(incref_method=%r, decref_method=%r, peekref_method=%r)' \ % (self.incref_method, self.decref_method, self.peekref_method) class ReferenceCountingFunctionsPolicy(ReferenceCountingPolicy): def __init__(self, incref_function, decref_function, peekref_function=None): super(ReferenceCountingFunctionsPolicy, self).__init__() self.incref_function = incref_function self.decref_function = decref_function self.peekref_function = peekref_function def write_incref(self, code_block, obj_expr): code_block.write_code('%s(%s);' % (self.incref_function, obj_expr)) def write_decref(self, code_block, obj_expr): code_block.write_code('%s(%s);' % (self.decref_function, obj_expr)) def get_delete_code(self, cpp_class): delete_code = ("if (self->obj) {\n" " %s *tmp = self->obj;\n" " self->obj = NULL;\n" " %s(tmp);\n" "}" % (cpp_class.full_name, self.decref_function)) return delete_code def __repr__(self): return 'cppclass.ReferenceCountingFunctionsPolicy(incref_function=%r, decref_function=%r, peekref_function=%r)' \ % (self.incref_function, self.decref_function, self.peekref_function) class FreeFunctionPolicy(MemoryPolicy): def __init__(self, free_function): super(FreeFunctionPolicy, self).__init__() self.free_function = free_function def get_delete_code(self, cpp_class): delete_code = ("if (self->obj) {\n" " %s *tmp = self->obj;\n" " self->obj = NULL;\n" " %s(tmp);\n" "}" % (cpp_class.full_name, self.free_function)) return delete_code def __repr__(self): return 'cppclass.FreeFunctionPolicy(%r)' % self.free_function class SmartPointerPolicy(MemoryPolicy): pointer_template = None # class should fill this or create descriptor/getter def default_instance_creation_function(cpp_class, code_block, lvalue, parameters, construct_type_name): """ Default "instance creation function"; it is called whenever a new C++ class instance needs to be created; this default implementation uses a standard C++ new allocator. :param cpp_class: the CppClass object whose instance is to be created :param code_block: CodeBlock object on which the instance creation code should be generated :param lvalue: lvalue expression that should hold the result in the end :param parameters: stringified list of parameters :param construct_type_name: actual name of type to be constructed (it is not always the class name, sometimes it's the python helper class) """ assert lvalue assert not lvalue.startswith('None') if cpp_class.incomplete_type: raise CodeGenerationError("%s cannot be constructed (incomplete type)" % cpp_class.full_name) code_block.write_code( "%s = new %s(%s);" % (lvalue, construct_type_name, parameters)) class CppHelperClass(object): """ Generates code for a C++ proxy subclass that takes care of forwarding virtual methods from C++ to Python. """ def __init__(self, class_): """ :param class_: original CppClass wrapper object """ self.class_ = class_ self.name = class_.pystruct + "__PythonHelper" self.virtual_parent_callers = {} self.virtual_proxies = [] self.cannot_be_constructed = False self.custom_methods = [] self.post_generation_code = [] self.virtual_methods = [] def add_virtual_method(self, method): assert method.is_virtual assert method.class_ is not None for existing in self.virtual_methods: if method.matches_signature(existing): return # don't re-add already existing method if isinstance(method, CppDummyMethod): if method.is_pure_virtual: self.cannot_be_constructed = True else: self.virtual_methods.append(method) if not method.is_pure_virtual: if settings._get_deprecated_virtuals(): vis = ['public', 'protected'] else: vis = ['protected'] if method.visibility in vis: parent_caller = CppVirtualMethodParentCaller(method) #parent_caller.class_ = method.class_ parent_caller.helper_class = self parent_caller.main_wrapper = method # XXX: need to explain this self.add_virtual_parent_caller(parent_caller) proxy = CppVirtualMethodProxy(method) proxy.main_wrapper = method # XXX: need to explain this self.add_virtual_proxy(proxy) def add_virtual_parent_caller(self, parent_caller): """Add a new CppVirtualMethodParentCaller object to this helper class""" assert isinstance(parent_caller, CppVirtualMethodParentCaller) name = parent_caller.method_name try: overload = self.virtual_parent_callers[name] except KeyError: overload = CppOverloadedMethod(name) ## implicit conversions + virtual methods disabled ## temporarily until I can figure out how to fix the unit ## tests. overload.enable_implicit_conversions = False #overload.static_decl = False overload.pystruct = self.class_.pystruct self.virtual_parent_callers[name] = overload assert self.class_ is not None for existing in overload.wrappers: if parent_caller.matches_signature(existing): break # don't re-add already existing method else: overload.add(parent_caller) def add_custom_method(self, declaration, body=None): """ Add a custom method to the helper class, given by a declaration line and a body. The body can be None, in case the whole method definition is included in the declaration itself. """ self.custom_methods.append((declaration, body)) def add_post_generation_code(self, code): """ Add custom code to be included right after the helper class is generated. """ self.post_generation_code.append(code) def add_virtual_proxy(self, virtual_proxy): """Add a new CppVirtualMethodProxy object to this class""" assert isinstance(virtual_proxy, CppVirtualMethodProxy) self.virtual_proxies.append(virtual_proxy) def generate_forward_declarations(self, code_sink_param): """ Generate the proxy class (declaration only) to a given code sink """ code_sink = MemoryCodeSink() if self._generate_forward_declarations(code_sink): code_sink.flush_to(code_sink_param) else: self.cannot_be_constructed = True def _generate_forward_declarations(self, code_sink): """ Generate the proxy class (declaration only) to a given code sink. Returns True if all is well, False if a pure virtual method was found that could not be generated. """ code_sink.writeln("class %s : public %s\n{\npublic:" % (self.name, self.class_.full_name)) code_sink.indent() code_sink.writeln("PyObject *m_pyself;") if not self.class_.import_from_module: ## replicate the parent constructors in the helper class implemented_constructor_signatures = [] for cons in self.class_.constructors: ## filter out duplicated constructors signature = [param.ctype for param in cons.parameters] if signature in implemented_constructor_signatures: continue implemented_constructor_signatures.append(signature) params = [join_ctype_and_name(param.ctype, param.name) for param in cons.parameters] code_sink.writeln("%s(%s)" % (self.name, ', '.join(params))) code_sink.indent() code_sink.writeln(": %s(%s), m_pyself(NULL)\n{}" % (self.class_.full_name, ', '.join([param.name for param in cons.parameters]))) code_sink.unindent() code_sink.writeln() ## add the set_pyobj method code_sink.writeln(""" void set_pyobj(PyObject *pyobj) { Py_XDECREF(m_pyself); Py_INCREF(pyobj); m_pyself = pyobj; } """) ## write a destructor code_sink.writeln("virtual ~%s()\n{" % self.name) code_sink.indent() code_sink.writeln("Py_CLEAR(m_pyself);") code_sink.unindent() code_sink.writeln("}\n") if not self.class_.import_from_module: ## write the parent callers (_name) for parent_caller in self.virtual_parent_callers.values(): #parent_caller.class_ = self.class_ parent_caller.helper_class = self parent_caller.reset_code_generation_state() ## test code generation try: try: utils.call_with_error_handling(parent_caller.generate, (NullCodeSink(),), {}, parent_caller) except utils.SkipWrapper: continue finally: parent_caller.reset_code_generation_state() code_sink.writeln() parent_caller.generate_class_declaration(code_sink) for parent_caller_wrapper in parent_caller.wrappers: parent_caller_wrapper.generate_parent_caller_method(code_sink) ## write the virtual proxies for virtual_proxy in self.virtual_proxies: #virtual_proxy.class_ = self.class_ virtual_proxy.helper_class = self ## test code generation #virtual_proxy.class_ = self.class_ #virtual_proxy.helper_class = self virtual_proxy.reset_code_generation_state() try: try: utils.call_with_error_handling(virtual_proxy.generate, (NullCodeSink(),), {}, virtual_proxy) except utils.SkipWrapper: if virtual_proxy.method.is_pure_virtual: return False continue finally: virtual_proxy.reset_code_generation_state() code_sink.writeln() virtual_proxy.generate_declaration(code_sink) for custom_declaration, dummy in self.custom_methods: code_sink.writeln(custom_declaration) code_sink.unindent() code_sink.writeln("};\n") if not self.class_.import_from_module: for code in self.post_generation_code: code_sink.writeln(code) code_sink.writeln() return True def generate(self, code_sink): """ Generate the proxy class (virtual method bodies only) to a given code sink. returns pymethodef list of parent callers """ if self.class_.import_from_module: return ## write the parent callers (_name) method_defs = [] for name, parent_caller in self.virtual_parent_callers.items(): #parent_caller.class_ = self.class_ parent_caller.helper_class = self code_sink.writeln() ## parent_caller.generate(code_sink) try: utils.call_with_error_handling(parent_caller.generate, (code_sink,), {}, parent_caller) except utils.SkipWrapper: continue if settings._get_deprecated_virtuals(): parent_caller_name = '_'+name else: parent_caller_name = name method_defs.append(parent_caller.get_py_method_def(parent_caller_name)) ## write the virtual proxies for virtual_proxy in self.virtual_proxies: #virtual_proxy.class_ = self.class_ virtual_proxy.helper_class = self code_sink.writeln() ## virtual_proxy.generate(code_sink) try: utils.call_with_error_handling(virtual_proxy.generate, (code_sink,), {}, virtual_proxy) except utils.SkipWrapper: assert not virtual_proxy.method.is_pure_virtual continue for dummy, custom_body in self.custom_methods: if custom_body: code_sink.writeln(custom_body) return method_defs class CppClass(object): """ A CppClass object takes care of generating the code for wrapping a C++ class """ def __init__(self, name, parent=None, incref_method=None, decref_method=None, automatic_type_narrowing=None, allow_subclassing=None, is_singleton=False, outer_class=None, peekref_method=None, template_parameters=(), custom_template_class_name=None, incomplete_type=False, free_function=None, incref_function=None, decref_function=None, python_name=None, memory_policy=None, foreign_cpp_namespace=None, docstring=None, custom_name=None, import_from_module=None, destructor_visibility='public' ): """ :param name: class name :param parent: optional parent class wrapper, or list of parents. Valid values are None, a CppClass instance, or a list of CppClass instances. :param incref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the name of the method that increments the reference count (may be inherited from parent if not given) :param decref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the name of the method that decrements the reference count (may be inherited from parent if not given) :param automatic_type_narrowing: if True, automatic return type narrowing will be done on objects of this class and its descendants when returned by pointer from a function or method. :param allow_subclassing: if True, generated class wrappers will allow subclassing in Python. :param is_singleton: if True, the class is considered a singleton, and so the python wrapper will never call the C++ class destructor to free the value. :param peekref_method: (deprecated in favour of memory_policy) if the class supports reference counting, the name of the method that returns the current reference count. :param free_function: (deprecated in favour of memory_policy) name of C function used to deallocate class instances :param incref_function: (deprecated in favour of memory_policy) same as incref_method, but as a function instead of method :param decref_function: (deprecated in favour of memory_policy) same as decref_method, but as a function instead of method :param python_name: name of the class as it will appear from Python side. This parameter is DEPRECATED in favour of custom_name. :param memory_policy: memory management policy; if None, it inherits from the parent class. Only root classes can have a memory policy defined. :type memory_policy: L{MemoryPolicy} :param foreign_cpp_namespace: if set, the class is assumed to belong to the given C++ namespace, regardless of the C++ namespace of the python module it will be added to. For instance, this can be useful to wrap std classes, like std::ofstream, without having to create an extra python submodule. :param docstring: None or a string containing the docstring that will be generated for the class :param custom_name: an alternative name to give to this class at python-side; if omitted, the name of the class in the python module will be the same name as the class in C++ (minus namespace). :param import_from_module: if not None, the type is imported from a foreign Python module with the given name. """ assert outer_class is None or isinstance(outer_class, CppClass) self.incomplete_type = incomplete_type self.outer_class = outer_class self._module = None self.name = name self.docstring = docstring self.mangled_name = None self.mangled_full_name = None self.template_parameters = template_parameters self.container_traits = None self.import_from_module = import_from_module assert destructor_visibility in ['public', 'private', 'protected'] self.destructor_visibility = destructor_visibility self.custom_name = custom_name if custom_template_class_name: warnings.warn("Use the custom_name parameter.", DeprecationWarning, stacklevel=2) self.custom_name = custom_template_class_name if python_name: warnings.warn("Use the custom_name parameter.", DeprecationWarning, stacklevel=2) self.custom_name = python_name self.is_singleton = is_singleton self.foreign_cpp_namespace = foreign_cpp_namespace self.full_name = None # full name with C++ namespaces attached and template parameters self.methods = collections.OrderedDict() # name => OverloadedMethod self._dummy_methods = [] # methods that have parameter/retval binding problems self.nonpublic_methods = [] self.constructors = [] # (name, wrapper) pairs self.pytype = PyTypeObject() self.slots = self.pytype.slots self.helper_class = None self.instance_creation_function = None self.post_instance_creation_function = None ## set to True when we become aware generating the helper ## class is not going to be possible self.helper_class_disabled = False self.cannot_be_constructed = '' # reason self.has_trivial_constructor = False self.has_copy_constructor = False self.has_output_stream_operator = False self._have_pure_virtual_methods = None self._wrapper_registry = None self.binary_comparison_operators = set() self.binary_numeric_operators = dict() self.inplace_numeric_operators = dict() self.unary_numeric_operators = dict() self.valid_sequence_methods = {"__len__" : "sq_length", "__add__" : "sq_concat", "__mul__" : "sq_repeat", "__getitem__" : "sq_item", "__getslice__" : "sq_slice", "__setitem__" : "sq_ass_item", "__setslice__" : "sq_ass_slice", "__contains__" : "sq_contains", "__iadd__" : "sq_inplace_concat", "__imul__" : "sq_inplace_repeat"} ## list of CppClasses from which a value of this class can be ## implicitly generated; corresponds to a ## operator ThisClass(); in the other class. self.implicitly_converts_from = [] ## list of hook functions to call just prior to helper class ## code generation. self.helper_class_hooks = [] self._pystruct = None #"***GIVE ME A NAME***" self.metaclass_name = "***GIVE ME A NAME***" self.pytypestruct = "***GIVE ME A NAME***" self.instance_attributes = PyGetSetDef("%s__getsets" % self._pystruct) self.static_attributes = PyGetSetDef("%s__getsets" % self.metaclass_name) if isinstance(parent, list): self.bases = list(parent) self.parent = self.bases[0] elif isinstance(parent, CppClass): self.parent = parent self.bases = [parent] elif parent is None: self.parent = None self.bases = [] else: raise TypeError("'parent' must be None, CppClass instance, or a list of CppClass instances") if free_function: warnings.warn("Use FreeFunctionPolicy and memory_policy parameter.", DeprecationWarning) assert memory_policy is None memory_policy = FreeFunctionPolicy(free_function) elif incref_method: warnings.warn("Use ReferenceCountingMethodsPolicy and memory_policy parameter.", DeprecationWarning) assert memory_policy is None memory_policy = ReferenceCountingMethodsPolicy(incref_method, decref_method, peekref_method) elif incref_function: warnings.warn("Use ReferenceCountingFunctionsPolicy and memory_policy parameter.", DeprecationWarning) assert memory_policy is None memory_policy = ReferenceCountingFunctionsPolicy(incref_function, decref_function) if not self.bases: assert memory_policy is None or isinstance(memory_policy, MemoryPolicy) self.memory_policy = memory_policy else: for base in self.bases: if base.memory_policy is not None: self.memory_policy = base.memory_policy assert memory_policy is None, \ "changing memory policy from parent (%s) to child (%s) class not permitted" \ % (base.name, self.name) break else: self.memory_policy = memory_policy if automatic_type_narrowing is None: if not self.bases: self.automatic_type_narrowing = settings.automatic_type_narrowing else: self.automatic_type_narrowing = self.parent.automatic_type_narrowing else: self.automatic_type_narrowing = automatic_type_narrowing if allow_subclassing is None: if self.parent is None: self.allow_subclassing = settings.allow_subclassing else: self.allow_subclassing = self.parent.allow_subclassing else: if any([p.allow_subclassing for p in self.bases]) and not allow_subclassing: raise ValueError("Cannot disable subclassing if a parent class allows it") else: self.allow_subclassing = allow_subclassing if self.destructor_visibility not in ['public', 'protected']: self.allow_subclassing = False self.typeid_map_name = None if name != 'dummy': ## register type handlers class ThisClassParameter(CppClassParameter): """Register this C++ class as pass-by-value parameter""" CTYPES = [] cpp_class = self self.ThisClassParameter = ThisClassParameter try: param_type_matcher.register(name, self.ThisClassParameter) except ValueError: pass class ThisClassRefParameter(CppClassRefParameter): """Register this C++ class as pass-by-reference parameter""" CTYPES = [] cpp_class = self self.ThisClassRefParameter = ThisClassRefParameter try: param_type_matcher.register(name+'&', self.ThisClassRefParameter) except ValueError: pass class ThisClassReturn(CppClassReturnValue): """Register this C++ class as value return""" CTYPES = [] cpp_class = self self.ThisClassReturn = ThisClassReturn self.ThisClassRefReturn = ThisClassReturn try: return_type_matcher.register(name, self.ThisClassReturn) return_type_matcher.register(name, self.ThisClassRefReturn) except ValueError: pass if self.memory_policy is not None: self.memory_policy.register_ptr_parameter_and_return(self, name) else: # Regular pointer class ThisClassPtrParameter(CppClassPtrParameter): """Register this C++ class as pass-by-pointer parameter""" CTYPES = [] cpp_class = self self.ThisClassPtrParameter = ThisClassPtrParameter try: param_type_matcher.register(name+'*', self.ThisClassPtrParameter) except ValueError: pass class ThisClassPtrReturn(CppClassPtrReturnValue): """Register this C++ class as pointer return""" CTYPES = [] cpp_class = self self.ThisClassPtrReturn = ThisClassPtrReturn try: return_type_matcher.register(name+'*', self.ThisClassPtrReturn) except ValueError: pass class ThisClassRefReturn(CppClassRefReturnValue): """Register this C++ class as reference return""" CTYPES = [] cpp_class = self self.ThisClassRefReturn = ThisClassRefReturn try: return_type_matcher.register(name+'&', self.ThisClassRefReturn) except ValueError: pass def __repr__(self): return "<pybindgen.CppClass %r>" % self.full_name def add_container_traits(self, *args, **kwargs): assert self.container_traits is None self.container_traits = CppClassContainerTraits(self, *args, **kwargs) def add_binary_comparison_operator(self, operator): """ Add support for a C++ binary comparison operator, such as == or <. The binary operator is assumed to operate with both operands of the type of the class, either by reference or by value. :param operator: string indicating the name of the operator to support, e.g. '==' """ operator = utils.ascii(operator) if not isinstance(operator, string_types): raise TypeError("expected operator name as string") if operator not in ['==', '!=', '<', '<=', '>', '>=']: raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,)) self.binary_comparison_operators.add(operator) def add_binary_numeric_operator(self, operator, result_cppclass=None, left_cppclass=None, right=None): """ Add support for a C++ binary numeric operator, such as +, -, \\*, or /. :param operator: string indicating the name of the operator to support, e.g. '==' :param result_cppclass: the CppClass object of the result type, assumed to be this class if omitted :param left_cppclass: the CppClass object of the left operand type, assumed to be this class if omitted :param right: the type of the right parameter. Can be a CppClass, Parameter, or param spec. Assumed to be this class if omitted """ operator = utils.ascii(operator) if not isinstance(operator, string_types): raise TypeError("expected operator name as string") if operator not in ['+', '-', '*', '/']: raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,)) try: l = self.binary_numeric_operators[operator] except KeyError: l = [] self.binary_numeric_operators[operator] = l if result_cppclass is None: result_cppclass = self if left_cppclass is None: left_cppclass = self if right is None: right = self elif isinstance(right, CppClass): pass else: if isinstance(right, string_types): right = utils.param(right, 'right') try: right = utils.eval_param(right, None) except utils.SkipWrapper: return op = (result_cppclass, left_cppclass, right) if op not in l: l.append(op) def add_inplace_numeric_operator(self, operator, right=None): """ Add support for a C++ inplace numeric operator, such as +=, -=, \\*=, or /=. :param operator: string indicating the name of the operator to support, e.g. '+=' :param right: the type of the right parameter. Can be a CppClass, Parameter, or param spec. Assumed to be this class if omitted """ operator = utils.ascii(operator) if not isinstance(operator, string_types): raise TypeError("expected operator name as string") if operator not in ['+=', '-=', '*=', '/=']: raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,)) try: l = self.inplace_numeric_operators[operator] except KeyError: l = [] self.inplace_numeric_operators[operator] = l if right is None: right = self else: if isinstance(right, string_types): right = utils.param(right, 'right') try: right = utils.eval_param(right, None) except utils.SkipWrapper: return if right not in l: l.append((self, self, right)) def add_unary_numeric_operator(self, operator, result_cppclass=None, left_cppclass=None): """ Add support for a C++ unary numeric operators, currently only -. :param operator: string indicating the name of the operator to support, e.g. '-' :param result_cppclass: the CppClass object of the result type, assumed to be this class if omitted :param left_cppclass: the CppClass object of the left operand type, assumed to be this class if omitted """ operator = utils.ascii(operator) if not isinstance(operator, string_types): raise TypeError("expected operator name as string") if operator not in ['-']: raise ValueError("The operator %r is invalid or not yet supported by PyBindGen" % (operator,)) try: l = self.unary_numeric_operators[operator] except KeyError: l = [] self.unary_numeric_operators[operator] = l if result_cppclass is None: result_cppclass = self if left_cppclass is None: left_cppclass = self op = (result_cppclass, left_cppclass) if op not in l: l.append(op) def add_class(self, *args, **kwargs): """ Add a nested class. See L{CppClass} for information about accepted parameters. """ assert 'outer_class' not in kwargs kwargs['outer_class'] = self return self.module.add_class(*args, **kwargs) def add_enum(self, *args, **kwargs): """ Add a nested enum. See L{Enum} for information about accepted parameters. """ assert 'outer_class' not in kwargs kwargs['outer_class'] = self return self.module.add_enum(*args, **kwargs) def get_mro(self): """ Get the method resolution order (MRO) of this class. :return: an iterator that gives CppClass objects, from leaf to root class """ to_visit = [self] visited = set() while to_visit: cls = to_visit.pop(0) visited.add(cls) yield cls for base in cls.bases: if base not in visited: to_visit.append(base) def get_all_methods(self): """Returns an iterator to iterate over all methods of the class""" for overload in self.methods.values(): for method in overload.wrappers: yield method for method in self.nonpublic_methods: yield method def get_have_pure_virtual_methods(self): """ Returns True if the class has pure virtual methods with no implementation (which would mean the type is not instantiable directly, only through a helper class). """ if self._have_pure_virtual_methods is not None: return self._have_pure_virtual_methods mro = list(self.get_mro()) mro_reversed = list(mro) mro_reversed.reverse() self._have_pure_virtual_methods = False for pos, cls in enumerate(mro_reversed): for method in list(cls.get_all_methods()) + cls._dummy_methods: if not isinstance(method, CppMethod): continue if method.is_pure_virtual: ## found a pure virtual method; now go see in the ## child classes, check if any of them implements ## this pure virtual method. implemented = False for child_cls in mro_reversed[pos+1:]: for child_method in list(child_cls.get_all_methods()) + child_cls._dummy_methods: if not isinstance(child_method, CppMethod): continue if not child_method.is_virtual: continue if not child_method.matches_signature(method): continue if not child_method.is_pure_virtual: implemented = True break if implemented: break if not implemented: self._have_pure_virtual_methods = True return self._have_pure_virtual_methods have_pure_virtual_methods = property(get_have_pure_virtual_methods) def is_subclass(self, other): """Return True if this CppClass instance represents a class that is a subclass of another class represented by the CppClasss object \\`other\\'.""" if not isinstance(other, CppClass): raise TypeError return other in self.get_mro() def add_helper_class_hook(self, hook): """ Add a hook function to be called just prior to a helper class being generated. The hook function applies to this class and all subclasses. The hook function is called like this:: hook_function(helper_class) """ if not isinstance(hook, collectionsCallable): raise TypeError("hook function must be callable") self.helper_class_hooks.append(hook) def _get_all_helper_class_hooks(self): """ Returns a list of all helper class hook functions, including the ones registered with parent classes. Parent hooks will appear first in the list. """ l = [] for cls in self.get_mro(): l = cls.helper_class_hooks + l return l def set_instance_creation_function(self, instance_creation_function): """Set a custom function to be called to create instances of this class and its subclasses. :param instance_creation_function: instance creation function; see default_instance_creation_function() for signature and example. """ self.instance_creation_function = instance_creation_function def set_post_instance_creation_function(self, post_instance_creation_function): """Set a custom function to be called to add code after an instance is created (usually by the "instance creation function") and registered with the Python runtime. :param post_instance_creation_function: post instance creation function """ self.post_instance_creation_function = post_instance_creation_function def get_instance_creation_function(self): for cls in self.get_mro(): if cls.instance_creation_function is not None: return cls.instance_creation_function if cls.memory_policy is not None: return cls.memory_policy.get_instance_creation_function() return default_instance_creation_function def get_post_instance_creation_function(self): for cls in self.get_mro(): if cls.post_instance_creation_function is not None: return cls.post_instance_creation_function return None def write_create_instance(self, code_block, lvalue, parameters, construct_type_name=None): instance_creation_func = self.get_instance_creation_function() if construct_type_name is None: construct_type_name = self.get_construct_name() instance_creation_func(self, code_block, lvalue, parameters, construct_type_name) def write_post_instance_creation_code(self, code_block, lvalue, parameters, construct_type_name=None): post_instance_creation_func = self.get_post_instance_creation_function() if post_instance_creation_func is None: return if construct_type_name is None: construct_type_name = self.get_construct_name() post_instance_creation_func(self, code_block, lvalue, parameters, construct_type_name) def get_pystruct(self): if self._pystruct is None: raise ValueError return self._pystruct pystruct = property(get_pystruct) def get_construct_name(self): """Get a name usable for new %s construction, or raise CodeGenerationError if none found""" if self.cannot_be_constructed: raise CodeGenerationError("%s cannot be constructed (%s)" % (self.full_name, self.cannot_be_constructed)) if self.have_pure_virtual_methods: raise CodeGenerationError("%s cannot be constructed (class has pure virtual methods)" % self.full_name) else: return self.full_name def implicitly_converts_to(self, other): """ Declares that values of this class can be implicitly converted to another class; corresponds to a operator AnotherClass(); special method. """ assert isinstance(other, CppClass) other.implicitly_converts_from.append(self) def get_all_implicit_conversions(self): """ Gets a new list of all other classes whose value can be implicitly converted to a value of this class. >>> Foo = CppClass("Foo") >>> Bar = CppClass("Bar") >>> Zbr = CppClass("Zbr") >>> Bar.implicitly_converts_to(Foo) >>> Zbr.implicitly_converts_to(Bar) >>> l = Foo.get_all_implicit_conversions() >>> l.sort(lambda cls1, cls2: cmp(cls1.name, cls2.name)) >>> [cls.name for cls in l] ['Bar'] """ return list(self.implicitly_converts_from) # classes = [] # to_visit = list(self.implicitly_converts_from) # while to_visit: # source = to_visit.pop(0) # if source in classes or source is self: # continue # classes.append(source) # to_visit.extend(source.implicitly_converts_from) # return classes def _update_names(self): prefix = settings.name_prefix.capitalize() if self.outer_class is None: if self.foreign_cpp_namespace: self.full_name = self.foreign_cpp_namespace + '::' + self.name else: if self._module.cpp_namespace_prefix: if self._module.cpp_namespace_prefix == '::': self.full_name = '::' + self.name else: self.full_name = self._module.cpp_namespace_prefix + '::' + self.name else: self.full_name = self.name else: assert not self.foreign_cpp_namespace self.full_name = '::'.join([self.outer_class.full_name, self.name]) def make_upper(s): if s and s[0].islower(): return s[0].upper()+s[1:] else: return s def mangle(name): return mangle_name(name) def flatten(name): "make a name like::This look LikeThis" return ''.join([make_upper(mangle(s)) for s in name.split('::')]) self.mangled_name = flatten(self.name) self.mangled_full_name = flatten(self.full_name) if self.template_parameters: self.full_name += "< %s >" % (', '.join(self.template_parameters)) mangled_template_params = '__' + '_'.join([flatten(s) for s in self.template_parameters]) self.mangled_name += mangled_template_params self.mangled_full_name += mangled_template_params self._pystruct = "Py%s%s" % (prefix, self.mangled_full_name) self.metaclass_name = "%sMeta" % self.mangled_full_name self.pytypestruct = "Py%s%s_Type" % (prefix, self.mangled_full_name) self.instance_attributes.cname = "%s__getsets" % self._pystruct self.static_attributes.cname = "%s__getsets" % self.metaclass_name ## re-register the class type handlers, now with class full name self.register_alias(self.full_name) if self.get_type_narrowing_root() is self: self.typeid_map_name = "%s__typeid_map" % self.pystruct else: self.typeid_map_name = None def register_alias(self, alias): """Re-register the class with another base name, in addition to any registrations that might have already been done.""" self.module.register_type(None, alias, self) self.ThisClassParameter.CTYPES.append(alias) try: param_type_matcher.register(alias, self.ThisClassParameter) except ValueError: pass self.ThisClassRefParameter.CTYPES.append(alias+'&') try: param_type_matcher.register(alias+'&', self.ThisClassRefParameter) except ValueError: pass self.ThisClassReturn.CTYPES.append(alias) try: return_type_matcher.register(alias, self.ThisClassReturn) except ValueError: pass if self.memory_policy is not None: self.memory_policy.register_ptr_alias_parameter_and_return(self, alias) else: self.ThisClassPtrParameter.CTYPES.append(alias+'*') try: param_type_matcher.register(alias+'*', self.ThisClassPtrParameter) except ValueError: pass self.ThisClassPtrReturn.CTYPES.append(alias+'*') try: return_type_matcher.register(alias+'*', self.ThisClassPtrReturn) except ValueError: pass self.ThisClassRefReturn.CTYPES.append(alias) try: return_type_matcher.register(alias+'&', self.ThisClassRefReturn) except ValueError: pass def get_module(self): """Get the Module object this class belongs to""" return self._module def set_module(self, module): """Set the Module object this class belongs to""" self._module = module self._update_names() module = property(get_module, set_module) def inherit_default_constructors(self): """inherit the default constructors from the parentclass according to C++ language rules""" for base in self.bases: for cons in base.constructors: if len(cons.parameters) == 0: self.add_constructor([], visibility=cons.visibility) elif (len(cons.parameters) == 1 and isinstance(cons.parameters[0], self.parent.ThisClassRefParameter)): self.add_constructor([self.ThisClassRefParameter( self.full_name + "&", "obj", cons.parameters[0].direction)], visibility=cons.visibility) def get_helper_class(self): """gets the "helper class" for this class wrapper, creating it if necessary""" for cls in self.get_mro(): if cls.helper_class_disabled: return None if not self.allow_subclassing: return None if self.helper_class is None: if not self.is_singleton: self.helper_class = CppHelperClass(self) self.module.add_include('<typeinfo>') return self.helper_class def get_type_narrowing_root(self): """Find the root CppClass along the subtree of all parent classes that have automatic_type_narrowing=True Note: multiple inheritance not implemented""" if not self.automatic_type_narrowing: return None root = self while (root.parent is not None and root.parent.automatic_type_narrowing): root = root.parent return root def _register_typeid(self, module): """register this class with the typeid map root class""" root = self.get_type_narrowing_root() module.after_init.write_code("%s.register_wrapper(typeid(%s), &%s);" % (root.typeid_map_name, self.full_name, self.pytypestruct)) def _generate_typeid_map(self, code_sink, module): """generate the typeid map and fill it with values""" try: module.declare_one_time_definition("TypeIDMap") except KeyError: pass else: code_sink.writeln(''' #include <map> #include <string> #include <typeinfo> #if defined(__GNUC__) && __GNUC__ >= 3 && !defined(__clang__) # include <cxxabi.h> #endif #define PBG_TYPEMAP_DEBUG 0 namespace pybindgen { class TypeMap { std::map<std::string, PyTypeObject *> m_map; public: TypeMap() {} void register_wrapper(const std::type_info &cpp_type_info, PyTypeObject *python_wrapper) { #if PBG_TYPEMAP_DEBUG std::cerr << "register_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ", python_wrapper=" << python_wrapper->tp_name << ")" << std::endl; #endif m_map[std::string(cpp_type_info.name())] = python_wrapper; } ''') if settings.gcc_rtti_abi_complete: code_sink.writeln(''' PyTypeObject * lookup_wrapper(const std::type_info &cpp_type_info, PyTypeObject *fallback_wrapper) { #if PBG_TYPEMAP_DEBUG std::cerr << "lookup_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ")" << std::endl; #endif PyTypeObject *python_wrapper = m_map[cpp_type_info.name()]; if (python_wrapper) return python_wrapper; else { #if defined(__GNUC__) && __GNUC__ >= 3 && !defined(__clang__) // Get closest (in the single inheritance tree provided by cxxabi.h) // registered python wrapper. const abi::__si_class_type_info *_typeinfo = dynamic_cast<const abi::__si_class_type_info*> (&cpp_type_info); #if PBG_TYPEMAP_DEBUG std::cerr << " -> looking at C++ type " << _typeinfo->name() << std::endl; #endif while (_typeinfo && (python_wrapper = m_map[std::string(_typeinfo->name())]) == 0) { _typeinfo = dynamic_cast<const abi::__si_class_type_info*> (_typeinfo->__base_type); #if PBG_TYPEMAP_DEBUG std::cerr << " -> looking at C++ type " << _typeinfo->name() << std::endl; #endif } #if PBG_TYPEMAP_DEBUG if (python_wrapper) { std::cerr << " -> found match " << std::endl; } else { std::cerr << " -> return fallback wrapper" << std::endl; } #endif return python_wrapper? python_wrapper : fallback_wrapper; #else // non gcc 3+ compilers can only match against explicitly registered classes, not hidden subclasses return fallback_wrapper; #endif } } }; } ''') else: code_sink.writeln(''' PyTypeObject * lookup_wrapper(const std::type_info &cpp_type_info, PyTypeObject *fallback_wrapper) { #if PBG_TYPEMAP_DEBUG std::cerr << "lookup_wrapper(this=" << this << ", type_name=" << cpp_type_info.name() << ")" << std::endl; #endif PyTypeObject *python_wrapper = m_map[cpp_type_info.name()]; return python_wrapper? python_wrapper : fallback_wrapper; } }; } ''') if self.import_from_module: code_sink.writeln("\nextern pybindgen::TypeMap *_%s;\n" % self.typeid_map_name) code_sink.writeln("#define %s (*_%s)\n" % (self.typeid_map_name, self.typeid_map_name)) else: code_sink.writeln("\nextern pybindgen::TypeMap %s;\n" % self.typeid_map_name) def _add_method_obj(self, method): """ Add a method object to the class. For internal use. :param method: a L{CppMethod} or L{Function} instance that can generate the method wrapper """ if isinstance(method, CppMethod): name = method.mangled_name elif isinstance(method, function.Function): name = method.custom_name assert isinstance(method.parameters[0], CppClassParameterBase) assert method.parameters[0].cpp_class is self, \ "expected first parameter to be of class %s, but it is of class %s" % \ (self.full_name, method.parameters[0].cpp_class.full_name) method.parameters[0].take_value_from_python_self = True method.module = self.module method.is_virtual = False method.is_pure_virtual = False method.self_parameter_pystruct = self.pystruct method.visibility = 'public' method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS else: raise TypeError method.class_ = self if method.visibility == 'protected' and not method.is_virtual: helper_class = self.get_helper_class() if helper_class is not None: parent_caller = CppVirtualMethodParentCaller(method) parent_caller.helper_class = helper_class parent_caller.main_wrapper = method helper_class.add_virtual_parent_caller(parent_caller) elif method.visibility == 'public': if name == '__call__': # needs special handling method.force_parse = method.PARSE_TUPLE_AND_KEYWORDS try: overload = self.methods[name] except KeyError: overload = CppOverloadedMethod(name) overload.pystruct = self.pystruct self.methods[name] = overload ## add it.... try: utils.call_with_error_handling(overload.add, (method,), {}, method) except utils.SkipWrapper: return # Grr! I hate C++. Overloading + inheritance = disaster! # So I ended up coding something which C++ does not in # fact support, but I feel bad to just throw away my good # code due to a C++ fault, so I am leaving here the code # disabled. Maybe some future C++ version will come along # and fix this problem, who knows :P if 0: # due to a limitation of the pybindgen overloading # strategy, we need to re-wrap for this class all # methods with the same name and different signature # from parent classes. overload._compute_all_wrappers() if isinstance(method, CppMethod): mro = self.get_mro() next(mro) # skip 'self' for cls in mro: try: parent_overload = cls.methods[name] except KeyError: continue parent_overload._compute_all_wrappers() for parent_method in parent_overload.all_wrappers: already_exists = False for existing_method in overload.all_wrappers: if existing_method.matches_signature(parent_method): already_exists = True break if not already_exists: new_method = parent_method.clone() new_method.class_ = self overload.add(new_method) else: self.nonpublic_methods.append(method) if method.is_virtual: self._have_pure_virtual_methods = None helper_class = self.get_helper_class() if helper_class is not None: helper_class.add_virtual_method(method) def add_method(self, *args, **kwargs): """ Add a method to the class. See the documentation for L{CppMethod.__init__} for information on accepted parameters. """ ## <compat> if len(args) >= 1 and isinstance(args[0], CppMethod): meth = args[0] warnings.warn("add_method has changed API; see the API documentation", DeprecationWarning, stacklevel=2) if len(args) == 2: meth.custom_name = args[1] elif 'name' in kwargs: assert len(args) == 1 meth.custom_name = kwargs['name'] else: assert len(args) == 1 assert len(kwargs) == 0 elif len(args) >= 1 and isinstance(args[0], function.Function): meth = args[0] warnings.warn("add_method has changed API; see the API documentation", DeprecationWarning, stacklevel=2) if len(args) == 2: meth.custom_name = args[1] elif 'name' in kwargs: assert len(args) == 1 meth.custom_name = kwargs['name'] else: assert len(args) == 1 assert len(kwargs) == 0 ## </compat> else: try: meth = CppMethod(*args, **kwargs) except utils.SkipWrapper: if kwargs.get('is_virtual', False): ## if the method was supposed to be virtual, this ## is a very important fact that needs to be ## recorded in the class, even if the method is ## not wrapped. method = CppDummyMethod(*args, **kwargs) method.class_ = self self._dummy_methods.append(method) self._have_pure_virtual_methods = None helper_class = self.get_helper_class() if helper_class is not None: helper_class.add_virtual_method(method) if helper_class.cannot_be_constructed: self.helper_class = None self.helper_class_disabled = True return None self._add_method_obj(meth) return meth def add_function_as_method(self, *args, **kwargs): """ Add a function as method of the class. See the documentation for L{Function.__init__} for information on accepted parameters. TODO: explain the implicit first function parameter """ try: meth = function.Function(*args, **kwargs) except utils.SkipWrapper: return None self._add_method_obj(meth) return meth def add_custom_method_wrapper(self, *args, **kwargs): """ Adds a custom method wrapper. See L{CustomCppMethodWrapper} for more information. """ try: meth = CustomCppMethodWrapper(*args, **kwargs) except utils.SkipWrapper: return None self._add_method_obj(meth) return meth def set_helper_class_disabled(self, flag=True): self.helper_class_disabled = flag if flag: self.helper_class = None def set_cannot_be_constructed(self, reason): assert isinstance(reason, string_types) self.cannot_be_constructed = reason def _add_constructor_obj(self, wrapper): """ Add a constructor to the class. :param wrapper: a CppConstructor instance """ assert isinstance(wrapper, CppConstructor) wrapper.set_class(self) self.constructors.append(wrapper) if not wrapper.parameters: self.has_trivial_constructor = True # FIXME: I don't remember what is this used for anymore, maybe remove if len(wrapper.parameters) == 1 and isinstance(wrapper.parameters[0], (CppClassRefParameter, CppClassParameter)) \ and wrapper.parameters[0].cpp_class is self and wrapper.visibility == 'public': self.has_copy_constructor = True def add_output_stream_operator(self): """ Add str() support based on C++ output stream operator. Calling this method enables wrapping of an assumed to be defined operator function:: std::ostream & operator << (std::ostream &, MyClass const &); The wrapper will be registered as an str() python operator, and will call the C++ operator function to convert the value to a string. """ self.has_output_stream_operator = True self.module.add_include("<ostream>") self.module.add_include("<sstream>") def add_constructor(self, *args, **kwargs): """ Add a constructor to the class. See the documentation for L{CppConstructor.__init__} for information on accepted parameters. """ ## <compat> if len(args) == 1 and isinstance(args[0], CppConstructor): warnings.warn("add_constructor has changed API; see the API documentation", DeprecationWarning, stacklevel=2) constructor = args[0] elif len(args) == 1 and isinstance(args[0], function.Function): warnings.warn("add_constructor has changed API; see the API documentation", DeprecationWarning, stacklevel=2) func = args[0] constructor = CppFunctionAsConstructor(func.function_name, func.parameters) constructor.module = self.module ## </compat> else: try: constructor = CppConstructor(*args, **kwargs) except utils.SkipWrapper: return None self._add_constructor_obj(constructor) return constructor def add_copy_constructor(self): """ Utility method to add a 'copy constructor' method to this class. """ try: constructor = CppConstructor([self.ThisClassRefParameter("const %s &" % self.full_name, 'ctor_arg')]) except utils.SkipWrapper: return None self._add_constructor_obj(constructor) return constructor def add_function_as_constructor(self, *args, **kwargs): """ Wrap a function that behaves as a constructor to the class. See the documentation for L{CppFunctionAsConstructor.__init__} for information on accepted parameters. """ try: constructor = CppFunctionAsConstructor(*args, **kwargs) except utils.SkipWrapper: return None self._add_constructor_obj(constructor) return constructor def add_static_attribute(self, name, value_type, is_const=False): """ :param value_type: a ReturnValue object :param name: attribute name (i.e. the name of the class member variable) :param is_const: True if the attribute is const, i.e. cannot be modified """ ## backward compatibility check if isinstance(value_type, string_types) and isinstance(name, ReturnValue): warnings.warn("add_static_attribute has changed API; see the API documentation (but trying to correct...)", DeprecationWarning, stacklevel=2) value_type, name = name, value_type try: value_type = utils.eval_retval(value_type, None) except utils.SkipWrapper: return assert isinstance(value_type, ReturnValue) getter = CppStaticAttributeGetter(value_type, self, name) getter.stack_where_defined = traceback.extract_stack() if is_const: setter = None else: setter = CppStaticAttributeSetter(value_type, self, name) setter.stack_where_defined = traceback.extract_stack() self.static_attributes.add_attribute(name, getter, setter) def add_custom_instance_attribute(self, name, value_type, getter, is_const=False, setter=None, custom_name=None, getter_template_parameters=[], setter_template_parameters=[]): """ :param value_type: a ReturnValue object :param name: attribute name (i.e. the name of the class member variable) :param is_const: True if the attribute is const, i.e. cannot be modified :param getter: None, or name of a method of this class used to get the value :param setter: None, or name of a method of this class used to set the value :param getter_template_parameters: optional list of template parameters for getter function :param setter_template_parameters: optional list of template parameters for setter function """ ## backward compatibility check if isinstance(value_type, string_types) and isinstance(name, ReturnValue): warnings.warn("add_custom_instance_attribute has changed API; see the API documentation (but trying to correct...)", DeprecationWarning, stacklevel=2) value_type, name = name, value_type try: value_type = utils.eval_retval(value_type, None) except utils.SkipWrapper: return assert isinstance(value_type, ReturnValue) getter_wrapper = CppCustomInstanceAttributeGetter(value_type, self, name, getter=getter, template_parameters = getter_template_parameters) getter_wrapper.stack_where_defined = traceback.extract_stack() if is_const: setter_wrapper = None assert setter is None else: setter_wrapper = CppCustomInstanceAttributeSetter(value_type, self, name, setter=setter, template_parameters = setter_template_parameters) setter_wrapper.stack_where_defined = traceback.extract_stack() self.instance_attributes.add_attribute(name, getter_wrapper, setter_wrapper, custom_name) def add_instance_attribute(self, name, value_type, is_const=False, getter=None, setter=None, custom_name=None): """ :param value_type: a ReturnValue object :param name: attribute name (i.e. the name of the class member variable) :param is_const: True if the attribute is const, i.e. cannot be modified :param getter: None, or name of a method of this class used to get the value :param setter: None, or name of a method of this class used to set the value """ ## backward compatibility check if isinstance(value_type, string_types) and isinstance(name, ReturnValue): warnings.warn("add_static_attribute has changed API; see the API documentation (but trying to correct...)", DeprecationWarning, stacklevel=2) value_type, name = name, value_type try: value_type = utils.eval_retval(value_type, None) except utils.SkipWrapper: return assert isinstance(value_type, ReturnValue) getter_wrapper = CppInstanceAttributeGetter(value_type, self, name, getter=getter) getter_wrapper.stack_where_defined = traceback.extract_stack() if is_const: setter_wrapper = None assert setter is None else: setter_wrapper = CppInstanceAttributeSetter(value_type, self, name, setter=setter) setter_wrapper.stack_where_defined = traceback.extract_stack() self.instance_attributes.add_attribute(name, getter_wrapper, setter_wrapper, custom_name) def _inherit_helper_class_parent_virtuals(self): """ Given a class containing a helper class, add all virtual methods from the all parent classes of this class. """ mro = self.get_mro() next(mro) # skip 'self' for cls in mro: for method in cls.get_all_methods(): if not method.is_virtual: continue method = method.clone() self.helper_class.add_virtual_method(method) def _get_wrapper_registry(self): # there is one wrapper registry object per root class only, # which is used for all subclasses. if self.parent is None: if self._wrapper_registry is None: self._wrapper_registry = settings.wrapper_registry(self.pystruct) return self._wrapper_registry else: return self.parent._get_wrapper_registry() wrapper_registry = property(_get_wrapper_registry) def generate_forward_declarations(self, code_sink, module): """ Generates forward declarations for the instance and type structures. """ if self.memory_policy is not None: pointer_type = self.memory_policy.get_pointer_type(self.full_name) else: pointer_type = self.full_name + " *" if self.allow_subclassing: code_sink.writeln(''' typedef struct { PyObject_HEAD %sobj; PyObject *inst_dict; PyBindGenWrapperFlags flags:8; } %s; ''' % (pointer_type, self.pystruct)) else: code_sink.writeln(''' typedef struct { PyObject_HEAD %sobj; PyBindGenWrapperFlags flags:8; } %s; ''' % (pointer_type, self.pystruct)) code_sink.writeln() if self.import_from_module: code_sink.writeln('extern PyTypeObject *_%s;' % (self.pytypestruct,)) code_sink.writeln('#define %s (*_%s)' % (self.pytypestruct, self.pytypestruct)) else: code_sink.writeln('extern PyTypeObject %s;' % (self.pytypestruct,)) if not self.static_attributes.empty(): code_sink.writeln('extern PyTypeObject Py%s_Type;' % (self.metaclass_name,)) code_sink.writeln() if self.helper_class is not None: self._inherit_helper_class_parent_virtuals() for hook in self._get_all_helper_class_hooks(): hook(self.helper_class) self.helper_class.generate_forward_declarations(code_sink) if self.helper_class.cannot_be_constructed: self.helper_class = None self.helper_class_disabled = True if self.have_pure_virtual_methods and self.helper_class is None: self.cannot_be_constructed = "have pure virtual methods but no helper class" if self.typeid_map_name is not None: self._generate_typeid_map(code_sink, module) if self.container_traits is not None: self.container_traits.generate_forward_declarations(code_sink, module) if self.parent is None: self.wrapper_registry.generate_forward_declarations(code_sink, module, self.import_from_module) def get_python_name(self): if self.template_parameters: if self.custom_name is None: class_python_name = self.mangled_name else: class_python_name = self.custom_name else: if self.custom_name is None: class_python_name = self.name else: class_python_name = self.custom_name return class_python_name def _generate_import_from_module(self, code_sink, module): if module.parent is None: error_retcode = "MOD_ERROR" else: error_retcode = "NULL" # TODO: skip this step if the requested typestructure is never used if ' named ' in self.import_from_module: module_name, type_name = self.import_from_module.split(" named ") else: module_name, type_name = self.import_from_module, self.name code_sink.writeln("PyTypeObject *_%s;" % self.pytypestruct) module.after_init.write_code("/* Import the %r class from module %r */" % (self.full_name, self.import_from_module)) module.after_init.write_code("{"); module.after_init.indent() module.after_init.write_code("PyObject *module = PyImport_ImportModule((char*) \"%s\");" % module_name) module.after_init.write_code( "if (module == NULL) {\n" " return %s;\n" "}" % (error_retcode,)) module.after_init.write_code("_%s = (PyTypeObject*) PyObject_GetAttrString(module, (char*) \"%s\");\n" % (self.pytypestruct, self.get_python_name())) module.after_init.write_code("if (PyErr_Occurred()) PyErr_Clear();") if self.typeid_map_name is not None: code_sink.writeln("pybindgen::TypeMap *_%s;" % self.typeid_map_name) module.after_init.write_code("/* Import the %r class type map from module %r */" % (self.full_name, self.import_from_module)) module.after_init.write_code("PyObject *_cobj = PyObject_GetAttrString(module, (char*) \"_%s\");" % (self.typeid_map_name)) module.after_init.write_code("if (_cobj == NULL) {\n" " _%s = new pybindgen::TypeMap;\n" " PyErr_Clear();\n" "} else {\n" " _%s = reinterpret_cast<pybindgen::TypeMap*> (PyCObject_AsVoidPtr (_cobj));\n" " Py_DECREF(_cobj);\n" "}" % (self.typeid_map_name, self.typeid_map_name)) if self.parent is None: self.wrapper_registry.generate_import(code_sink, module.after_init, "module") module.after_init.unindent(); module.after_init.write_code("}") if self.helper_class is not None: self.helper_class.generate(code_sink) def generate(self, code_sink, module): """Generates the class to a code sink""" if self.import_from_module: self._generate_import_from_module(code_sink, module) return # .......................... RETURN if self.typeid_map_name is not None: code_sink.writeln("\npybindgen::TypeMap %s;\n" % self.typeid_map_name) module.after_init.write_code("PyModule_AddObject(m, (char *) \"_%s\", PyCObject_FromVoidPtr(&%s, NULL));" % (self.typeid_map_name, self.typeid_map_name)) if self.automatic_type_narrowing: self._register_typeid(module) if self.parent is None: self.wrapper_registry.generate(code_sink, module) if self.helper_class is not None: parent_caller_methods = self.helper_class.generate(code_sink) else: parent_caller_methods = [] ## generate getsets instance_getsets = self.instance_attributes.generate(code_sink) self.slots.setdefault("tp_getset", instance_getsets) static_getsets = self.static_attributes.generate(code_sink) ## --- register the class type in the module --- module.after_init.write_code("/* Register the '%s' class */" % self.full_name) ## generate a metaclass if needed if static_getsets == '0': metaclass = None else: if self.parent is None: parent_typestruct = 'PyBaseObject_Type' else: parent_typestruct = self.parent.pytypestruct metaclass = PyMetaclass(self.metaclass_name, "Py_TYPE(&%s)" % parent_typestruct, self.static_attributes) metaclass.generate(code_sink, module) if self.parent is not None: assert isinstance(self.parent, CppClass) module.after_init.write_code('%s.tp_base = &%s;' % (self.pytypestruct, self.parent.pytypestruct)) if len(self.bases) > 1: module.after_init.write_code('%s.tp_bases = PyTuple_New(%i);' % (self.pytypestruct, len(self.bases),)) for basenum, base in enumerate(self.bases): module.after_init.write_code(' Py_INCREF((PyObject *) &%s);' % (base.pytypestruct,)) module.after_init.write_code(' PyTuple_SET_ITEM(%s.tp_bases, %i, (PyObject *) &%s);' % (self.pytypestruct, basenum, base.pytypestruct)) if metaclass is not None: module.after_init.write_code('Py_TYPE(&%s) = &%s;' % (self.pytypestruct, metaclass.pytypestruct)) module.after_init.write_error_check('PyType_Ready(&%s)' % (self.pytypestruct,)) class_python_name = self.get_python_name() if self.outer_class is None: module.after_init.write_code( 'PyModule_AddObject(m, (char *) \"%s\", (PyObject *) &%s);' % ( class_python_name, self.pytypestruct)) else: module.after_init.write_code( 'PyDict_SetItemString((PyObject*) %s.tp_dict, (char *) \"%s\", (PyObject *) &%s);' % ( self.outer_class.pytypestruct, class_python_name, self.pytypestruct)) have_constructor = self._generate_constructor(code_sink) self._generate_methods(code_sink, parent_caller_methods) if self.allow_subclassing: self._generate_gc_methods(code_sink) self._generate_destructor(code_sink, have_constructor) if self.has_output_stream_operator: self._generate_str(code_sink) #self._generate_tp_hash(code_sink) #self._generate_tp_compare(code_sink) #if self.slots.get("tp_hash", "NULL") == "NULL": # self.slots["tp_hash"] = self._generate_tp_hash(code_sink) if self.slots.get("tp_richcompare", "NULL") == "NULL" and self.binary_comparison_operators: self.slots["tp_richcompare"] = self._generate_tp_richcompare(code_sink) if self.binary_numeric_operators or self.inplace_numeric_operators: self.slots["tp_as_number"] = self._generate_number_methods(code_sink) if self.have_sequence_methods(): self.slots["tp_as_sequence"] = self._generate_sequence_methods(code_sink) if self.container_traits is not None: self.container_traits.generate(code_sink, module) self._generate_type_structure(code_sink, self.docstring) def _generate_number_methods(self, code_sink): number_methods_var_name = "%s__py_number_methods" % (self.mangled_full_name,) pynumbermethods = PyNumberMethods() pynumbermethods.slots['variable'] = number_methods_var_name # iterate over all types and request generation of the # convertion functions for that type (so that those functions # are not generated in the middle of one of the wrappers we # are about to generate) root_module = self.module.get_root() for dummy_op_symbol, op_types in self.binary_numeric_operators.items(): for (retval, left, right) in op_types: get_c_to_python_converter(retval, root_module, code_sink) get_python_to_c_converter(left, root_module, code_sink) get_python_to_c_converter(right, root_module, code_sink) for dummy_op_symbol, op_types in self.inplace_numeric_operators.items(): for (retval, left, right) in op_types: get_python_to_c_converter(left, root_module, code_sink) get_python_to_c_converter(right, root_module, code_sink) get_c_to_python_converter(retval, root_module, code_sink) for dummy_op_symbol, op_types in self.unary_numeric_operators.items(): for (retval, left) in op_types: get_c_to_python_converter(retval, root_module, code_sink) get_python_to_c_converter(left, root_module, code_sink) def try_wrap_operator(op_symbol, slot_name): if op_symbol in self.binary_numeric_operators: op_types = self.binary_numeric_operators[op_symbol] elif op_symbol in self.inplace_numeric_operators: op_types = self.inplace_numeric_operators[op_symbol] else: return wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name) pynumbermethods.slots[slot_name] = wrapper_name code_sink.writeln(("static PyObject*\n" "%s (PyObject *py_left, PyObject *py_right)\n" "{") % wrapper_name) code_sink.indent() for (retval, left, right) in op_types: retval_converter, retval_name = get_c_to_python_converter(retval, root_module, code_sink) left_converter, left_name = get_python_to_c_converter(left, root_module, code_sink) right_converter, right_name = get_python_to_c_converter(right, root_module, code_sink) code_sink.writeln("{") code_sink.indent() code_sink.writeln("%s left;" % left_name) code_sink.writeln("%s right;" % right_name) code_sink.writeln("if (%s(py_left, &left) && %s(py_right, &right)) {" % (left_converter, right_converter)) code_sink.indent() code_sink.writeln("%s result = (left %s right);" % (retval_name, op_symbol)) code_sink.writeln("return %s(&result);" % retval_converter) code_sink.unindent() code_sink.writeln("}") code_sink.writeln("PyErr_Clear();") code_sink.unindent() code_sink.writeln("}") code_sink.writeln("Py_INCREF(Py_NotImplemented);") code_sink.writeln("return Py_NotImplemented;") code_sink.unindent() code_sink.writeln("}") def try_wrap_unary_operator(op_symbol, slot_name): if op_symbol in self.unary_numeric_operators: op_types = self.unary_numeric_operators[op_symbol] else: return wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name) pynumbermethods.slots[slot_name] = wrapper_name code_sink.writeln(("static PyObject*\n" "%s (PyObject *py_self)\n" "{") % wrapper_name) code_sink.indent() for (retval, left) in op_types: retval_converter, retval_name = get_c_to_python_converter(retval, root_module, code_sink) left_converter, left_name = get_python_to_c_converter(left, root_module, code_sink) code_sink.writeln("{") code_sink.indent() code_sink.writeln("%s self;" % left_name) code_sink.writeln("if (%s(py_self, &self)) {" % (left_converter)) code_sink.indent() code_sink.writeln("%s result = %s(self);" % (retval_name, op_symbol)) code_sink.writeln("return %s(&result);" % retval_converter) code_sink.unindent() code_sink.writeln("}") code_sink.writeln("PyErr_Clear();") code_sink.unindent() code_sink.writeln("}") code_sink.writeln("Py_INCREF(Py_NotImplemented);") code_sink.writeln("return Py_NotImplemented;") code_sink.unindent() code_sink.writeln("}") try_wrap_operator('+', 'nb_add') try_wrap_operator('-', 'nb_subtract') try_wrap_operator('*', 'nb_multiply') try_wrap_operator('/', 'nb_divide') try_wrap_operator('+=', 'nb_inplace_add') try_wrap_operator('-=', 'nb_inplace_subtract') try_wrap_operator('*=', 'nb_inplace_multiply') try_wrap_operator('/=', 'nb_inplace_divide') try_wrap_unary_operator('-', 'nb_negative') pynumbermethods.generate(code_sink) return '&' + number_methods_var_name def _generate_sequence_methods(self, code_sink): sequence_methods_var_name = "%s__py_sequence_methods" % (self.mangled_full_name,) pysequencemethods = PySequenceMethods() pysequencemethods.slots['variable'] = sequence_methods_var_name root_module = self.module.get_root() self_converter = root_module.generate_python_to_c_type_converter(self.ThisClassReturn(self.full_name), code_sink) def try_wrap_sequence_method(py_name, slot_name): if py_name in self.methods: numwraps = len(self.methods[py_name].wrappers) some_wrapper_is_function = max([isinstance(x, function.Function) for x in self.methods[py_name].wrappers]) meth_wrapper_actual_name = self.methods[py_name].wrapper_actual_name wrapper_name = "%s__%s" % (self.mangled_full_name, slot_name) pysequencemethods.slots[slot_name] = wrapper_name if py_name == "__len__" and (numwraps > 1 or some_wrapper_is_function): template = pysequencemethods.FUNCTION_TEMPLATES[slot_name + "_ARGS"] else: template = pysequencemethods.FUNCTION_TEMPLATES[slot_name] code_sink.writeln(template % {'wrapper_name' : wrapper_name, 'py_struct' : self._pystruct, 'method_name' : meth_wrapper_actual_name}) return for py_name in self.valid_sequence_methods: slot_name = self.valid_sequence_methods[py_name] try_wrap_sequence_method(py_name, slot_name) pysequencemethods.generate(code_sink) return '&' + sequence_methods_var_name def have_sequence_methods(self): """Determine if this object has sequence methods registered.""" for x in self.valid_sequence_methods: if x in self.methods: return True return False def _generate_type_structure(self, code_sink, docstring): """generate the type structure""" self.slots.setdefault("tp_basicsize", "sizeof(%s)" % (self.pystruct,)) tp_flags = set(['Py_TPFLAGS_DEFAULT']) if self.allow_subclassing: tp_flags.add("Py_TPFLAGS_HAVE_GC") tp_flags.add("Py_TPFLAGS_BASETYPE") self.slots.setdefault("tp_dictoffset", "offsetof(%s, inst_dict)" % self.pystruct) else: self.slots.setdefault("tp_dictoffset", "0") if self.binary_numeric_operators: tp_flags.add("Py_TPFLAGS_CHECKTYPES") self.slots.setdefault("tp_flags", '|'.join(sorted(tp_flags))) if docstring is None: docstring = self.generate_docstring() self.slots.setdefault("tp_doc", (docstring is None and 'NULL' or "\"%s\"" % (docstring,))) dict_ = self.slots dict_.setdefault("typestruct", self.pytypestruct) if self.outer_class is None: mod_path = self._module.get_module_path() mod_path.append(self.mangled_name) dict_.setdefault("tp_name", '.'.join(mod_path)) else: dict_.setdefault("tp_name", '%s.%s' % (self.outer_class.slots['tp_name'], self.name)) ## tp_call support try: call_method = self.methods['__call__'] except KeyError: pass else: if call_method.wrapper_actual_name: dict_.setdefault("tp_call", call_method.wrapper_actual_name) self.pytype.generate(code_sink) def generate_docstring(self): name = self.get_python_name() return "\\n".join(sorted([c.generate_docstring(name) for c in self.constructors], key=len, reverse=True)) def _generate_constructor(self, code_sink): """generate the constructor, if any""" have_constructor = True if self.constructors and ((not self.cannot_be_constructed) or self.helper_class is not None and not self.helper_class.cannot_be_constructed): code_sink.writeln() overload = CppOverloadedConstructor(None) self.constructors_overload = overload overload.pystruct = self.pystruct for constructor in self.constructors: try: overload.add(constructor) except CodegenErrorBase: continue if overload.wrappers: try: overload.generate(code_sink) except utils.SkipWrapper: constructor = None have_constructor = False else: constructor = overload.wrapper_actual_name code_sink.writeln() else: constructor = None have_constructor = False else: ## In C++, and unlike Python, constructors with ## parameters are not automatically inheritted by ## subclasses. We must generate a 'no constructor' ## tp_init to prevent this type from inheriting a ## tp_init that will allocate an instance of the ## parent class instead of this class. code_sink.writeln() wrapper = CppNoConstructor(self.cannot_be_constructed) wrapper.generate(code_sink, self) constructor = wrapper.wrapper_actual_name have_constructor = False code_sink.writeln() self.slots.setdefault("tp_init", (constructor is None and "NULL" or constructor)) return have_constructor def _generate_copy_method(self, code_sink): construct_name = self.get_construct_name() copy_wrapper_name = '_wrap_%s__copy__' % self.pystruct code_sink.writeln(''' static PyObject*\n%s(%s *self, PyObject *PYBINDGEN_UNUSED(_args)) { ''' % (copy_wrapper_name, self.pystruct)) code_sink.indent() declarations = DeclarationsScope() code_block = CodeBlock("return NULL;", declarations) py_copy = declarations.declare_variable("%s*" % self.pystruct, "py_copy") self.write_allocate_pystruct(code_block, py_copy) code_block.write_code("%s->obj = new %s(*self->obj);" % (py_copy, construct_name)) if self.allow_subclassing: code_block.write_code("%s->inst_dict = NULL;" % py_copy) code_block.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % py_copy) self.wrapper_registry.write_register_new_wrapper(code_block, py_copy, "%s->obj" % py_copy) code_block.write_code("return (PyObject*) %s;" % py_copy) declarations.get_code_sink().flush_to(code_sink) code_block.write_cleanup() code_block.sink.flush_to(code_sink) code_sink.unindent() code_sink.writeln("}") code_sink.writeln() return copy_wrapper_name def _generate_MI_parent_methods(self, code_sink): methods = {} mro = self.get_mro() next(mro) for base in mro: for method_name, parent_overload in base.methods.items(): # skip methods registered via special type slots, not method table if method_name in (['__call__'] + list(self.valid_sequence_methods)): continue try: overload = methods[method_name] except KeyError: overload = CppOverloadedMethod(method_name) overload.pystruct = self.pystruct methods[method_name] = overload for parent_wrapper in parent_overload.wrappers: if parent_wrapper.visibility != 'public': continue # the method may have been re-defined as private in our class private = False for leaf_wrapper in self.nonpublic_methods: if leaf_wrapper.matches_signature(parent_wrapper): private = True break if private: continue # the method may have already been wrapped in our class already_wrapped = False try: overload = self.methods[method_name] except KeyError: pass else: for leaf_wrapper in overload.wrappers: if leaf_wrapper.matches_signature(parent_wrapper): already_wrapped = True break if already_wrapped: continue wrapper = parent_wrapper.clone() wrapper.original_class = base wrapper.class_ = self overload.add(wrapper) method_defs = [] for method_name, overload in methods.items(): if not overload.wrappers: continue classes = [] for wrapper in overload.wrappers: if wrapper.original_class not in classes: classes.append(wrapper.original_class) if len(classes) > 1: continue # overloading with multiple base classes is just too confusing try: utils.call_with_error_handling(overload.generate, (code_sink,), {}, overload) except utils.SkipWrapper: continue code_sink.writeln() method_defs.append(overload.get_py_method_def(method_name)) return method_defs def _generate_methods(self, code_sink, parent_caller_methods): """generate the method wrappers""" method_defs = [] for meth_name, overload in self.methods.items(): code_sink.writeln() #overload.generate(code_sink) try: utils.call_with_error_handling(overload.generate, (code_sink,), {}, overload) except utils.SkipWrapper: continue # skip methods registered via special type slots, not method table if meth_name not in (['__call__'] + list(self.valid_sequence_methods)): method_defs.append(overload.get_py_method_def(meth_name)) code_sink.writeln() method_defs.extend(parent_caller_methods) if len(self.bases) > 1: # https://bugs.launchpad.net/pybindgen/+bug/563786 method_defs.extend(self._generate_MI_parent_methods(code_sink)) if self.has_copy_constructor: try: copy_wrapper_name = utils.call_with_error_handling(self._generate_copy_method, (code_sink,), {}, self) except utils.SkipWrapper: pass else: method_defs.append('{(char *) "__copy__", (PyCFunction) %s, METH_NOARGS, NULL},' % copy_wrapper_name) ## generate the method table code_sink.writeln("static PyMethodDef %s_methods[] = {" % (self.pystruct,)) code_sink.indent() for methdef in method_defs: code_sink.writeln(methdef) code_sink.writeln("{NULL, NULL, 0, NULL}") code_sink.unindent() code_sink.writeln("};") self.slots.setdefault("tp_methods", "%s_methods" % (self.pystruct,)) def _get_delete_code(self): if self.is_singleton: delete_code = '' else: if self.memory_policy is not None: delete_code = self.memory_policy.get_delete_code(self) else: if self.incomplete_type: raise CodeGenerationError("Cannot finish generating class %s: " "type is incomplete, but no free/unref_function defined" % self.full_name) if self.destructor_visibility == 'public': delete_code = (" %s *tmp = self->obj;\n" " self->obj = NULL;\n" " if (!(self->flags&PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED)) {\n" " delete tmp;\n" " }" % (self.full_name,)) else: delete_code = (" self->obj = NULL;\n") return delete_code def _generate_gc_methods(self, code_sink): """Generate tp_clear and tp_traverse""" ## --- tp_clear --- tp_clear_function_name = "%s__tp_clear" % (self.pystruct,) self.slots.setdefault("tp_clear", tp_clear_function_name ) delete_code = self._get_delete_code() code_sink.writeln(r''' static void %s(%s *self) { Py_CLEAR(self->inst_dict); %s } ''' % (tp_clear_function_name, self.pystruct, delete_code)) ## --- tp_traverse --- tp_traverse_function_name = "%s__tp_traverse" % (self.pystruct,) self.slots.setdefault("tp_traverse", tp_traverse_function_name ) if self.helper_class is None: visit_self = '' else: if not isinstance(self.memory_policy, ReferenceCountingMethodsPolicy) or self.memory_policy.peekref_method is None: peekref_code = '' else: peekref_code = " && self->obj->%s() == 1" % self.memory_policy.peekref_method visit_self = ''' if (self->obj && typeid(*self->obj).name() == typeid(%s).name() %s) Py_VISIT((PyObject *) self); ''' % (self.helper_class.name, peekref_code) code_sink.writeln(r''' static int %s(%s *self, visitproc visit, void *arg) { Py_VISIT(self->inst_dict); %s return 0; } ''' % (tp_traverse_function_name, self.pystruct, visit_self)) def _generate_str(self, code_sink): """Generate a tp_str function and register it in the type""" tp_str_function_name = "_wrap_%s__tp_str" % (self.pystruct,) self.slots.setdefault("tp_str", tp_str_function_name ) code_sink.writeln(''' static PyObject * %s(%s *self) { std::ostringstream oss; oss << *self->obj; return PyUnicode_FromString(oss.str ().c_str ()); } ''' % (tp_str_function_name, self.pystruct)) def _generate_tp_hash(self, code_sink): """generates a tp_hash function, which returns a hash of the self->obj pointer""" tp_hash_function_name = "_wrap_%s__tp_hash" % (self.pystruct,) self.slots.setdefault("tp_hash", tp_hash_function_name ) code_sink.writeln(''' static long %s(%s *self) { return (long) self->obj; } ''' % (tp_hash_function_name, self.pystruct)) return tp_hash_function_name def _generate_tp_compare(self, code_sink): """generates a tp_compare function, which compares the ->obj pointers""" tp_compare_function_name = "_wrap_%s__tp_compare" % (self.pystruct,) self.slots.setdefault("tp_compare", tp_compare_function_name ) code_sink.writeln(''' static int %s(%s *self, %s *other) { if (self->obj == other->obj) return 0; if (self->obj > other->obj) return -1; return 1; } ''' % (tp_compare_function_name, self.pystruct, self.pystruct)) def _generate_destructor(self, code_sink, have_constructor): """Generate a tp_dealloc function and register it in the type""" ## don't generate destructor if overridden by user if "tp_dealloc" in self.slots: return tp_dealloc_function_name = "_wrap_%s__tp_dealloc" % (self.pystruct,) code_sink.writeln(r''' static void %s(%s *self) {''' % (tp_dealloc_function_name, self.pystruct)) code_sink.indent() code_block = CodeBlock("PyErr_Print(); return;", DeclarationsScope()) if self.memory_policy is not None: self.wrapper_registry.write_unregister_wrapper(code_block, 'self', self.memory_policy.get_pointer_to_void_name('self->obj')) else: self.wrapper_registry.write_unregister_wrapper(code_block, 'self', 'self->obj') if self.allow_subclassing: code_block.write_code("%s(self);" % self.slots["tp_clear"]) else: code_block.write_code(self._get_delete_code()) code_block.write_code('Py_TYPE(self)->tp_free((PyObject*)self);') code_block.write_cleanup() code_block.declarations.get_code_sink().flush_to(code_sink) code_block.sink.flush_to(code_sink) code_sink.unindent() code_sink.writeln('}\n') self.slots.setdefault("tp_dealloc", tp_dealloc_function_name ) def _generate_tp_richcompare(self, code_sink): tp_richcompare_function_name = "_wrap_%s__tp_richcompare" % (self.pystruct,) code_sink.writeln("static PyObject*\n%s (%s *self, %s *other, int opid)" % (tp_richcompare_function_name, self.pystruct, self.pystruct)) code_sink.writeln("{") code_sink.indent() code_sink.writeln(""" if (!PyObject_IsInstance((PyObject*) other, (PyObject*) &%s)) { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; }""" % self.pytypestruct) code_sink.writeln("switch (opid)\n{") def wrap_operator(name, opid_code): code_sink.writeln("case %s:" % opid_code) code_sink.indent() if name in self.binary_comparison_operators: code_sink.writeln("if (*self->obj %(OP)s *other->obj) {\n" " Py_INCREF(Py_True);\n" " return Py_True;\n" "} else {\n" " Py_INCREF(Py_False);\n" " return Py_False;\n" "}" % dict(OP=name)) else: code_sink.writeln("Py_INCREF(Py_NotImplemented);\n" "return Py_NotImplemented;") code_sink.unindent() wrap_operator('<', 'Py_LT') wrap_operator('<=', 'Py_LE') wrap_operator('==', 'Py_EQ') wrap_operator('!=', 'Py_NE') wrap_operator('>=', 'Py_GE') wrap_operator('>', 'Py_GT') code_sink.writeln("} /* closes switch (opid) */") code_sink.writeln("Py_INCREF(Py_NotImplemented);\n" "return Py_NotImplemented;") code_sink.unindent() code_sink.writeln("}\n") return tp_richcompare_function_name def generate_typedef(self, module, alias): """ Generates the appropriate Module code to register the class with a new name in that module (typedef alias). """ module.after_init.write_code( 'PyModule_AddObject(m, (char *) \"%s\", (PyObject *) &%s);' % ( alias, self.pytypestruct)) def write_allocate_pystruct(self, code_block, lvalue, wrapper_type=None): """ Generates code to allocate a python wrapper structure, using PyObject_New or PyObject_GC_New, plus some additional strcture initialization that may be needed. """ if self.allow_subclassing: new_func = 'PyObject_GC_New' else: new_func = 'PyObject_New' if wrapper_type is None: wrapper_type = '&'+self.pytypestruct code_block.write_code("%s = %s(%s, %s);" % (lvalue, new_func, self.pystruct, wrapper_type)) if self.allow_subclassing: code_block.write_code( "%s->inst_dict = NULL;" % (lvalue,)) if self.memory_policy is not None: code_block.write_code(self.memory_policy.get_pystruct_init_code(self, lvalue)) # from pybindgen.cppclass_typehandlers import CppClassParameter, CppClassRefParameter, \ # CppClassReturnValue, CppClassRefReturnValue, CppClassPtrParameter, CppClassPtrReturnValue, CppClassParameterBase, \ # CppClassSharedPtrParameter, CppClassSharedPtrReturnValue #from pybindgen.function import function from pybindgen.cppmethod import CppMethod, CppConstructor, CppNoConstructor, CppFunctionAsConstructor, \ CppOverloadedMethod, CppOverloadedConstructor, \ CppVirtualMethodParentCaller, CppVirtualMethodProxy, CustomCppMethodWrapper, \ CppDummyMethod def common_shared_object_return(value, py_name, cpp_class, code_block, type_traits, caller_owns_return, reference_existing_object, type_is_pointer, caller_manages_return=True, free_after_copy=False): if type_is_pointer: value_value = '(*%s)' % value value_ptr = value else: value_ptr = '(&%s)' % value value_value = value def write_create_new_wrapper(): """Code path that creates a new wrapper for the returned object""" ## Find out what Python wrapper to use, in case ## automatic_type_narrowing is active and we are not forced to ## make a copy of the object if (cpp_class.automatic_type_narrowing and (caller_owns_return or isinstance(cpp_class.memory_policy, ReferenceCountingPolicy))): typeid_map_name = cpp_class.get_type_narrowing_root().typeid_map_name wrapper_type = code_block.declare_variable( 'PyTypeObject*', 'wrapper_type', '0') code_block.write_code( '%s = %s.lookup_wrapper(typeid(%s), &%s);' % (wrapper_type, typeid_map_name, value_value, cpp_class.pytypestruct)) else: wrapper_type = '&'+cpp_class.pytypestruct ## Create the Python wrapper object cpp_class.write_allocate_pystruct(code_block, py_name, wrapper_type) if cpp_class.allow_subclassing: code_block.write_code( "%s->inst_dict = NULL;" % (py_name,)) ## Assign the C++ value to the Python wrapper if caller_owns_return: if type_traits.target_is_const: code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr)) else: code_block.write_code("%s->obj = %s;" % (py_name, value_ptr)) code_block.write_code( "%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,)) else: if not isinstance(cpp_class.memory_policy, ReferenceCountingPolicy): if reference_existing_object: if type_traits.target_is_const: code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr)) else: code_block.write_code("%s->obj = %s;" % (py_name, value_ptr)) code_block.write_code( "%s->flags = PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED;" % (py_name,)) else: if caller_manages_return: # The PyObject creates its own copy if not cpp_class.has_copy_constructor: raise CodeGenerationError("Class {0} cannot be copied".format(cpp_class.full_name)) cpp_class.write_create_instance(code_block, "%s->obj" % py_name, value_value) code_block.write_code( "%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,)) cpp_class.write_post_instance_creation_code(code_block, "%s->obj" % py_name, value_value) else: if type_traits.target_is_const: code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr)) else: code_block.write_code("%s->obj = %s;" % (py_name, value_ptr)) code_block.write_code( "%s->flags = PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED;" % (py_name,)) else: if caller_manages_return: ## The PyObject gets a new reference to the same obj code_block.write_code( "%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,)) cpp_class.memory_policy.write_incref(code_block, value_ptr) if type_traits.target_is_const: code_block.write_code("%s->obj = (%s*) (%s);" % (py_name, cpp_class.full_name, value_ptr)) else: code_block.write_code("%s->obj = %s;" % (py_name, value_ptr)) else: if type_traits.target_is_const: code_block.write_code("%s->obj = (%s *) (%s);" % (py_name, cpp_class.full_name, value_ptr)) else: code_block.write_code("%s->obj = %s;" % (py_name, value_ptr)) code_block.write_code( "%s->flags = PYBINDGEN_WRAPPER_FLAG_OBJECT_NOT_OWNED;" % (py_name,)) ## closes def write_create_new_wrapper(): if cpp_class.helper_class is None: try: if cpp_class.memory_policy is not None: cpp_class.wrapper_registry.write_lookup_wrapper( code_block, cpp_class.pystruct, py_name, cpp_class.memory_policy.get_pointer_to_void_name(value_ptr)) else: cpp_class.wrapper_registry.write_lookup_wrapper( code_block, cpp_class.pystruct, py_name, value_ptr) except NotSupportedError: write_create_new_wrapper() if cpp_class.memory_policy is not None: cpp_class.wrapper_registry.write_register_new_wrapper( code_block, py_name, cpp_class.memory_policy.get_pointer_to_void_name("%s->obj" % py_name)) else: cpp_class.wrapper_registry.write_register_new_wrapper( code_block, py_name, "%s->obj" % py_name) else: code_block.write_code("if (%s == NULL) {" % py_name) code_block.indent() write_create_new_wrapper() if cpp_class.memory_policy is not None: cpp_class.wrapper_registry.write_register_new_wrapper( code_block, py_name, cpp_class.memory_policy.get_pointer_to_void_name("%s->obj" % py_name)) else: cpp_class.wrapper_registry.write_register_new_wrapper( code_block, py_name, "%s->obj" % py_name) code_block.unindent() # If we are already referencing the existing python wrapper, # we do not need a reference to the C++ object as well. if caller_owns_return and \ isinstance(cpp_class.memory_policy, ReferenceCountingPolicy): code_block.write_code("} else {") code_block.indent() cpp_class.memory_policy.write_decref(code_block, value_ptr) code_block.unindent() code_block.write_code("}") else: code_block.write_code("}") else: # since there is a helper class, check if this C++ object is an instance of that class # http://stackoverflow.com/questions/579887/how-expensive-is-rtti/1468564#1468564 code_block.write_code("if (typeid(%s).name() == typeid(%s).name())\n{" % (value_value, cpp_class.helper_class.name)) code_block.indent() # yes, this is an instance of the helper class; we can get # the existing python wrapper directly from the helper # class... if type_traits.target_is_const: const_cast_value = "const_cast<%s *>(%s) " % (cpp_class.full_name, value_ptr) else: const_cast_value = value_ptr code_block.write_code( "%s = reinterpret_cast< %s* >(reinterpret_cast< %s* >(%s)->m_pyself);" % (py_name, cpp_class.pystruct, cpp_class.helper_class.name, const_cast_value)) code_block.write_code("%s->obj = %s;" % (py_name, const_cast_value)) # We are already referencing the existing python wrapper, # so we do not need a reference to the C++ object as well. if caller_owns_return and \ isinstance(cpp_class.memory_policy, ReferenceCountingPolicy): cpp_class.memory_policy.write_decref(code_block, value_ptr) code_block.write_code("Py_INCREF(%s);" % py_name) code_block.unindent() code_block.write_code("} else {") # if (typeid(*(%s)) == typeid(%s)) { ... code_block.indent() # no, this is not an instance of the helper class, we may # need to create a new wrapper, or reference existing one # if the wrapper registry tells us there is one already. # first check in the wrapper registry... try: if cpp_class.memory_policy is not None: cpp_class.wrapper_registry.write_lookup_wrapper( code_block, cpp_class.pystruct, py_name, cpp_class.memory_policy.get_pointer_to_void_name(value_ptr)) else: cpp_class.wrapper_registry.write_lookup_wrapper( code_block, cpp_class.pystruct, py_name, value_ptr) except NotSupportedError: write_create_new_wrapper() cpp_class.wrapper_registry.write_register_new_wrapper( code_block, py_name, "%s->obj" % py_name) else: code_block.write_code("if (%s == NULL) {" % py_name) code_block.indent() # wrapper registry told us there is no wrapper for # this instance => need to create new one write_create_new_wrapper() cpp_class.wrapper_registry.write_register_new_wrapper( code_block, py_name, "%s->obj" % py_name) code_block.unindent() # handle ownership rules... if caller_owns_return and \ isinstance(cpp_class.memory_policy, ReferenceCountingPolicy): code_block.write_code("} else {") code_block.indent() # If we are already referencing the existing python wrapper, # we do not need a reference to the C++ object as well. cpp_class.memory_policy.write_decref(code_block, value_ptr) code_block.unindent() code_block.write_code("}") else: code_block.write_code("}") code_block.unindent() code_block.write_code("}") # closes: if (typeid(*(%s)) == typeid(%s)) { ... } else { ... class CppClassParameterBase(Parameter): "Base class for all C++ Class parameter handlers" CTYPES = [] cpp_class = None #cppclass.CppClass('dummy') # CppClass instance DIRECTIONS = [Parameter.DIRECTION_IN] def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False, default_value=None): """ :param ctype: C type, normally 'MyClass*' :param name: parameter name """ if ctype == self.cpp_class.name: ctype = self.cpp_class.full_name super(CppClassParameterBase, self).__init__( ctype, name, direction, is_const, default_value) ## name of the PyFoo * variable used in parameter parsing self.py_name = None ## it True, this parameter is 'fake', and instead of being ## passed a parameter from python it is assumed to be the ## 'self' parameter of a method wrapper self.take_value_from_python_self = False class CppClassReturnValueBase(ReturnValue): "Class return handlers -- base class" CTYPES = [] cpp_class = None #cppclass.CppClass('dummy') # CppClass instance def __init__(self, ctype, is_const=False): super(CppClassReturnValueBase, self).__init__(ctype, is_const=is_const) ## name of the PyFoo * variable used in return value building self.py_name = None class CppClassParameter(CppClassParameterBase): """ Class parameter "by-value" handler """ CTYPES = [] cpp_class = None #cppclass.CppClass('dummy') # CppClass instance DIRECTIONS = [Parameter.DIRECTION_IN] def convert_python_to_c(self, wrapper): "parses python args to get C++ value" #assert isinstance(wrapper, ForwardWrapperBase) #assert isinstance(self.cpp_class, cppclass.CppClass) if self.take_value_from_python_self: self.py_name = 'self' wrapper.call_params.append( '*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name)) else: implicit_conversion_sources = self.cpp_class.get_all_implicit_conversions() if not implicit_conversion_sources: if self.default_value is not None: self.cpp_class.get_construct_name() # raises an exception if the class cannot be constructed self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', self.name, 'NULL') wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=True) wrapper.call_params.append( '(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct, self.py_name, self.default_value)) else: self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', self.name) wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name) wrapper.call_params.append( '*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name)) else: if self.default_value is None: self.py_name = wrapper.declarations.declare_variable( 'PyObject*', self.name) tmp_value_variable = wrapper.declarations.declare_variable( self.cpp_class.full_name, self.name) wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name) else: self.py_name = wrapper.declarations.declare_variable( 'PyObject*', self.name, 'NULL') tmp_value_variable = wrapper.declarations.declare_variable( self.cpp_class.full_name, self.name) wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name, optional=True) if self.default_value is None: wrapper.before_call.write_code("if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n" " %s = *((%s *) %s)->obj;" % (self.py_name, self.cpp_class.pytypestruct, tmp_value_variable, self.cpp_class.pystruct, self.py_name)) else: wrapper.before_call.write_code( "if (%s == NULL) {\n" " %s = %s;" % (self.py_name, tmp_value_variable, self.default_value)) wrapper.before_call.write_code( "} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n" " %s = *((%s *) %s)->obj;" % (self.py_name, self.cpp_class.pytypestruct, tmp_value_variable, self.cpp_class.pystruct, self.py_name)) for conversion_source in implicit_conversion_sources: wrapper.before_call.write_code("} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n" " %s = *((%s *) %s)->obj;" % (self.py_name, conversion_source.pytypestruct, tmp_value_variable, conversion_source.pystruct, self.py_name)) wrapper.before_call.write_code("} else {\n") wrapper.before_call.indent() possible_type_names = ", ".join([cls.name for cls in [self.cpp_class] + implicit_conversion_sources]) wrapper.before_call.write_code("PyErr_Format(PyExc_TypeError, \"parameter must an instance of one of the types (%s), not %%s\", Py_TYPE(%s)->tp_name);" % (possible_type_names, self.py_name)) wrapper.before_call.write_error_return() wrapper.before_call.unindent() wrapper.before_call.write_code("}") wrapper.call_params.append(tmp_value_variable) def convert_c_to_python(self, wrapper): '''Write some code before calling the Python method.''' assert isinstance(wrapper, ReverseWrapperBase) self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name) self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name) if self.cpp_class.allow_subclassing: wrapper.before_call.write_code( "%s->inst_dict = NULL;" % (self.py_name,)) wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,)) self.cpp_class.write_create_instance(wrapper.before_call, "%s->obj" % self.py_name, self.value) self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name, "%s->obj" % self.py_name) self.cpp_class.write_post_instance_creation_code(wrapper.before_call, "%s->obj" % self.py_name, self.value) wrapper.build_params.add_parameter("N", [self.py_name]) class CppClassRefParameter(CppClassParameterBase): "Class& handlers" CTYPES = [] cpp_class = None #cppclass.CppClass('dummy') # CppClass instance DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT, Parameter.DIRECTION_INOUT] def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, is_const=False, default_value=None, default_value_type=None): """ :param ctype: C type, normally 'MyClass*' :param name: parameter name """ if ctype == self.cpp_class.name: ctype = self.cpp_class.full_name super(CppClassRefParameter, self).__init__( ctype, name, direction, is_const, default_value) self.default_value_type = default_value_type def convert_python_to_c(self, wrapper): "parses python args to get C++ value" #assert isinstance(wrapper, ForwardWrapperBase) #assert isinstance(self.cpp_class, cppclass.CppClass) if self.direction == Parameter.DIRECTION_IN: if self.take_value_from_python_self: self.py_name = 'self' wrapper.call_params.append( '*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name)) else: implicit_conversion_sources = self.cpp_class.get_all_implicit_conversions() if not (implicit_conversion_sources and self.type_traits.target_is_const): if self.default_value is not None: self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', self.name, 'NULL') wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=True) if self.default_value_type is not None: default_value_name = wrapper.declarations.declare_variable( self.default_value_type, "%s_default" % self.name, self.default_value) wrapper.call_params.append( '(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct, self.py_name, default_value_name)) else: self.cpp_class.get_construct_name() # raises an exception if the class cannot be constructed wrapper.call_params.append( '(%s ? (*((%s *) %s)->obj) : %s)' % (self.py_name, self.cpp_class.pystruct, self.py_name, self.default_value)) else: self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', self.name) wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name) wrapper.call_params.append( '*((%s *) %s)->obj' % (self.cpp_class.pystruct, self.py_name)) else: if self.default_value is not None: warnings.warn("with implicit conversions, default value " "in C++ class reference parameters is ignored.") self.py_name = wrapper.declarations.declare_variable( 'PyObject*', self.name) tmp_value_variable = wrapper.declarations.declare_variable( self.cpp_class.full_name, self.name) wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name) wrapper.before_call.write_code("if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n" " %s = *((%s *) %s)->obj;" % (self.py_name, self.cpp_class.pytypestruct, tmp_value_variable, self.cpp_class.pystruct, self.py_name)) for conversion_source in implicit_conversion_sources: wrapper.before_call.write_code("} else if (PyObject_IsInstance(%s, (PyObject*) &%s)) {\n" " %s = *((%s *) %s)->obj;" % (self.py_name, conversion_source.pytypestruct, tmp_value_variable, conversion_source.pystruct, self.py_name)) wrapper.before_call.write_code("} else {\n") wrapper.before_call.indent() possible_type_names = ", ".join([cls.name for cls in [self.cpp_class] + implicit_conversion_sources]) wrapper.before_call.write_code("PyErr_Format(PyExc_TypeError, \"parameter must an instance of one of the types (%s), not %%s\", Py_TYPE(%s)->tp_name);" % (possible_type_names, self.py_name)) wrapper.before_call.write_error_return() wrapper.before_call.unindent() wrapper.before_call.write_code("}") wrapper.call_params.append(tmp_value_variable) elif self.direction == Parameter.DIRECTION_OUT: assert not self.take_value_from_python_self self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', self.name) self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name) if self.cpp_class.allow_subclassing: wrapper.after_call.write_code( "%s->inst_dict = NULL;" % (self.py_name,)) wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,)) self.cpp_class.write_create_instance(wrapper.before_call, "%s->obj" % self.py_name, '') self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name, "%s->obj" % self.py_name) self.cpp_class.write_post_instance_creation_code(wrapper.before_call, "%s->obj" % self.py_name, '') wrapper.call_params.append('*%s->obj' % (self.py_name,)) wrapper.build_params.add_parameter("N", [self.py_name]) ## well, personally I think inout here doesn't make much sense ## (it's just plain confusing), but might as well support it.. ## C++ class reference inout parameters allow "inplace" ## modifications, i.e. the object is not explicitly returned ## but is instead modified by the callee. elif self.direction == Parameter.DIRECTION_INOUT: assert not self.take_value_from_python_self self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', self.name) wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name) wrapper.call_params.append( '*%s->obj' % (self.py_name)) def convert_c_to_python(self, wrapper): '''Write some code before calling the Python method.''' assert isinstance(wrapper, ReverseWrapperBase) self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name) self.cpp_class.write_allocate_pystruct(wrapper.before_call, self.py_name) if self.cpp_class.allow_subclassing: wrapper.before_call.write_code( "%s->inst_dict = NULL;" % (self.py_name,)) wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (self.py_name,)) if self.direction == Parameter.DIRECTION_IN: if not self.cpp_class.has_copy_constructor: raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name)) self.cpp_class.write_create_instance(wrapper.before_call, "%s->obj" % self.py_name, self.value) self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, self.py_name, "%s->obj" % self.py_name) self.cpp_class.write_post_instance_creation_code(wrapper.before_call, "%s->obj" % self.py_name, self.value) wrapper.build_params.add_parameter("N", [self.py_name]) else: ## out/inout case: ## the callee receives a "temporary wrapper", which loses ## the ->obj pointer after the python call; this is so ## that the python code directly manipulates the object ## received as parameter, instead of a copy. if self.type_traits.target_is_const: value = "(%s*) (&(%s))" % (self.cpp_class.full_name, self.value) else: value = "&(%s)" % self.value wrapper.before_call.write_code( "%s->obj = %s;" % (self.py_name, value)) wrapper.build_params.add_parameter("O", [self.py_name]) wrapper.before_call.add_cleanup_code("Py_DECREF(%s);" % self.py_name) if self.cpp_class.has_copy_constructor: ## if after the call we notice the callee kept a reference ## to the pyobject, we then swap pywrapper->obj for a copy ## of the original object. Else the ->obj pointer is ## simply erased (we never owned this object in the first ## place). wrapper.after_call.write_code( "if (Py_REFCNT(%s) == 1)\n" " %s->obj = NULL;\n" "else{\n" % (self.py_name, self.py_name)) wrapper.after_call.indent() self.cpp_class.write_create_instance(wrapper.after_call, "%s->obj" % self.py_name, self.value) self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, self.py_name, "%s->obj" % self.py_name) self.cpp_class.write_post_instance_creation_code(wrapper.after_call, "%s->obj" % self.py_name, self.value) wrapper.after_call.unindent() wrapper.after_call.write_code('}') else: ## it's not safe for the python wrapper to keep a ## pointer to the object anymore; just set it to NULL. wrapper.after_call.write_code("%s->obj = NULL;" % (self.py_name,)) class CppClassReturnValue(CppClassReturnValueBase): "Class return handlers" CTYPES = [] cpp_class = None #cppclass.CppClass('dummy') # CppClass instance REQUIRES_ASSIGNMENT_CONSTRUCTOR = True def __init__(self, ctype, is_const=False): """override to fix the ctype parameter with namespace information""" if ctype == self.cpp_class.name: ctype = self.cpp_class.full_name super(CppClassReturnValue, self).__init__(ctype, is_const=is_const) def get_c_error_return(self): # only used in reverse wrappers """See ReturnValue.get_c_error_return""" if self.type_traits.type_is_reference: raise NotSupportedError return "return %s();" % (self.cpp_class.full_name,) def convert_c_to_python(self, wrapper): """see ReturnValue.convert_c_to_python""" py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name) self.py_name = py_name self.cpp_class.write_allocate_pystruct(wrapper.after_call, self.py_name) if self.cpp_class.allow_subclassing: wrapper.after_call.write_code( "%s->inst_dict = NULL;" % (py_name,)) wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,)) if not self.cpp_class.has_copy_constructor: raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name)) self.cpp_class.write_create_instance(wrapper.after_call, "%s->obj" % py_name, self.value) self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, py_name, "%s->obj" % py_name) self.cpp_class.write_post_instance_creation_code(wrapper.after_call, "%s->obj" % py_name, self.value) #... wrapper.build_params.add_parameter("N", [py_name], prepend=True) def convert_python_to_c(self, wrapper): """see ReturnValue.convert_python_to_c""" if self.type_traits.type_is_reference: raise NotSupportedError name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name) wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+name]) if self.REQUIRES_ASSIGNMENT_CONSTRUCTOR: wrapper.after_call.write_code('%s %s = *%s->obj;' % (self.cpp_class.full_name, self.value, name)) else: wrapper.after_call.write_code('%s = *%s->obj;' % (self.value, name)) class CppClassRefReturnValue(CppClassReturnValueBase): "Class return handlers" CTYPES = [] cpp_class = None #cppclass.CppClass('dummy') # CppClass instance REQUIRES_ASSIGNMENT_CONSTRUCTOR = True def __init__(self, ctype, is_const=False, caller_owns_return=False, reference_existing_object=None, return_internal_reference=None, caller_manages_return=True): #override to fix the ctype parameter with namespace information if ctype == self.cpp_class.name: ctype = self.cpp_class.full_name super(CppClassRefReturnValue, self).__init__(ctype, is_const=is_const) self.reference_existing_object = reference_existing_object self.return_internal_reference = return_internal_reference if self.return_internal_reference: assert self.reference_existing_object is None self.reference_existing_object = True self.caller_owns_return = caller_owns_return self.caller_manages_return = caller_manages_return def get_c_error_return(self): # only used in reverse wrappers """See ReturnValue.get_c_error_return""" if ( self.type_traits.type_is_reference and not self.type_traits.target_is_const ): raise NotSupportedError("non-const reference return not supported") return "{static %s __err; return __err;}" % (self.cpp_class.full_name,) def convert_c_to_python(self, wrapper): """see ReturnValue.convert_c_to_python""" py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name) self.py_name = py_name if self.reference_existing_object or self.caller_owns_return or not self.caller_manages_return: common_shared_object_return(self.value, py_name, self.cpp_class, wrapper.after_call, self.type_traits, self.caller_owns_return, self.reference_existing_object, type_is_pointer=False, caller_manages_return=self.caller_manages_return) else: self.cpp_class.write_allocate_pystruct(wrapper.after_call, py_name) wrapper.after_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % (py_name,)) if not self.cpp_class.has_copy_constructor: raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name)) self.cpp_class.write_create_instance(wrapper.after_call, "%s->obj" % py_name, self.value) self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.after_call, py_name, "%s->obj" % py_name) self.cpp_class.write_post_instance_creation_code(wrapper.after_call, "%s->obj" % py_name, self.value) #... wrapper.build_params.add_parameter("N", [py_name], prepend=True) def convert_python_to_c(self, wrapper): """see ReturnValue.convert_python_to_c""" if ( self.type_traits.type_is_reference and not self.type_traits.target_is_const ): raise NotSupportedError("non-const reference return not supported") name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name) wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+name]) if self.REQUIRES_ASSIGNMENT_CONSTRUCTOR: wrapper.after_call.write_code('%s %s = *%s->obj;' % (self.cpp_class.full_name, self.value, name)) else: wrapper.after_call.write_code('%s = *%s->obj;' % (self.value, name)) class CppClassPtrParameter(CppClassParameterBase): "Class* handlers" CTYPES = [] cpp_class = None #cppclass.CppClass('dummy') # CppClass instance DIRECTIONS = [Parameter.DIRECTION_IN, Parameter.DIRECTION_OUT, Parameter.DIRECTION_INOUT] SUPPORTS_TRANSFORMATIONS = True def __init__(self, ctype, name, direction=Parameter.DIRECTION_IN, transfer_ownership=None, custodian=None, is_const=False, null_ok=False, default_value=None): """ Type handler for a pointer-to-class parameter (MyClass*) :param ctype: C type, normally 'MyClass*' :param name: parameter name :param transfer_ownership: if True, the callee becomes responsible for freeing the object. If False, the caller remains responsible for the object. In either case, the original object pointer is passed, not a copy. In case transfer_ownership=True, it is invalid to perform operations on the object after the call (calling any method will cause a null pointer dereference and crash the program). :param custodian: if given, points to an object (custodian) that keeps the python wrapper for the parameter alive. Possible values are: - None: no object is custodian; - -1: the return value object; - 0: the instance of the method in which the ReturnValue is being used will become the custodian; - integer > 0: parameter number, starting at 1 (i.e. not counting the self/this parameter), whose object will be used as custodian. :param is_const: if true, the parameter has a const attached to the leftmost :param null_ok: if true, None is accepted and mapped into a C NULL pointer :param default_value: default parameter value (as C expression string); probably, the only default value that makes sense here is probably 'NULL'. .. note:: Only arguments which are instances of C++ classes wrapped by PyBindGen can be used as custodians. """ if ctype == self.cpp_class.name: ctype = self.cpp_class.full_name super(CppClassPtrParameter, self).__init__( ctype, name, direction, is_const, default_value) if transfer_ownership is None and self.type_traits.target_is_const: transfer_ownership = False self.custodian = custodian self.transfer_ownership = transfer_ownership self.null_ok = null_ok if transfer_ownership is None: raise TypeConfigurationError("Missing transfer_ownership option") def convert_python_to_c(self, wrapper): "parses python args to get C++ value" #assert isinstance(wrapper, ForwardWrapperBase) #assert isinstance(self.cpp_class, cppclass.CppClass) if self.take_value_from_python_self: self.py_name = 'self' value_ptr = 'self->obj' else: self.py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', self.name, initializer=(self.default_value and 'NULL' or None)) value_ptr = wrapper.declarations.declare_variable("%s*" % self.cpp_class.full_name, "%s_ptr" % self.name) if self.null_ok: num = wrapper.parse_params.add_parameter('O', ['&'+self.py_name], self.name, optional=bool(self.default_value)) wrapper.before_call.write_error_check( "%s && ((PyObject *) %s != Py_None) && !PyObject_IsInstance((PyObject *) %s, (PyObject *) &%s)" % (self.py_name, self.py_name, self.py_name, self.cpp_class.pytypestruct), 'PyErr_SetString(PyExc_TypeError, "Parameter %i must be of type %s");' % (num, self.cpp_class.name)) wrapper.before_call.write_code("if (%(PYNAME)s) {\n" " if ((PyObject *) %(PYNAME)s == Py_None)\n" " %(VALUE)s = NULL;\n" " else\n" " %(VALUE)s = %(PYNAME)s->obj;\n" "} else {\n" " %(VALUE)s = NULL;\n" "}" % dict(PYNAME=self.py_name, VALUE=value_ptr)) else: wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+self.py_name], self.name, optional=bool(self.default_value)) wrapper.before_call.write_code("%s = (%s ? %s->obj : NULL);" % (value_ptr, self.py_name, self.py_name)) value = self.transformation.transform(self, wrapper.declarations, wrapper.before_call, value_ptr) wrapper.call_params.append(value) if self.transfer_ownership: if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy): # if we transfer ownership, in the end we no longer own the object, so clear our pointer wrapper.after_call.write_code('if (%s) {' % self.py_name) wrapper.after_call.indent() if self.cpp_class.memory_policy is not None: self.cpp_class.wrapper_registry.write_unregister_wrapper(wrapper.after_call, '%s' % self.py_name, self.cpp_class.memory_policy.get_pointer_to_void_name('%s->obj' % self.py_name)) else: self.cpp_class.wrapper_registry.write_unregister_wrapper(wrapper.after_call, '%s' % self.py_name, '%s->obj' % self.py_name) wrapper.after_call.write_code('%s->obj = NULL;' % self.py_name) wrapper.after_call.unindent() wrapper.after_call.write_code('}') else: wrapper.before_call.write_code("if (%s) {" % self.py_name) wrapper.before_call.indent() self.cpp_class.memory_policy.write_incref(wrapper.before_call, "%s->obj" % self.py_name) wrapper.before_call.unindent() wrapper.before_call.write_code("}") def convert_c_to_python(self, wrapper): """foo""" ## Value transformations value = self.transformation.untransform( self, wrapper.declarations, wrapper.after_call, self.value) ## declare wrapper variable py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name) self.py_name = py_name def write_create_new_wrapper(): """Code path that creates a new wrapper for the parameter""" ## Find out what Python wrapper to use, in case ## automatic_type_narrowing is active and we are not forced to ## make a copy of the object if (self.cpp_class.automatic_type_narrowing and (self.transfer_ownership or isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy))): typeid_map_name = self.cpp_class.get_type_narrowing_root().typeid_map_name wrapper_type = wrapper.declarations.declare_variable( 'PyTypeObject*', 'wrapper_type', '0') wrapper.before_call.write_code( '%s = %s.lookup_wrapper(typeid(*%s), &%s);' % (wrapper_type, typeid_map_name, value, self.cpp_class.pytypestruct)) else: wrapper_type = '&'+self.cpp_class.pytypestruct ## Create the Python wrapper object self.cpp_class.write_allocate_pystruct(wrapper.before_call, py_name, wrapper_type) wrapper.before_call.write_code("%s->flags = PYBINDGEN_WRAPPER_FLAG_NONE;" % py_name) self.py_name = py_name ## Assign the C++ value to the Python wrapper if self.transfer_ownership: wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value)) else: if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy): ## The PyObject gets a temporary pointer to the ## original value; the pointer is converted to a ## copy in case the callee retains a reference to ## the object after the call. if self.direction == Parameter.DIRECTION_IN: if not self.cpp_class.has_copy_constructor: raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name)) self.cpp_class.write_create_instance(wrapper.before_call, "%s->obj" % self.py_name, '*'+self.value) self.cpp_class.write_post_instance_creation_code(wrapper.before_call, "%s->obj" % self.py_name, '*'+self.value) else: ## out/inout case: ## the callee receives a "temporary wrapper", which loses ## the ->obj pointer after the python call; this is so ## that the python code directly manipulates the object ## received as parameter, instead of a copy. if self.type_traits.target_is_const: unconst_value = "(%s*) (%s)" % (self.cpp_class.full_name, value) else: unconst_value = value wrapper.before_call.write_code( "%s->obj = %s;" % (self.py_name, unconst_value)) wrapper.build_params.add_parameter("O", [self.py_name]) wrapper.before_call.add_cleanup_code("Py_DECREF(%s);" % self.py_name) if self.cpp_class.has_copy_constructor: ## if after the call we notice the callee kept a reference ## to the pyobject, we then swap pywrapper->obj for a copy ## of the original object. Else the ->obj pointer is ## simply erased (we never owned this object in the first ## place). wrapper.after_call.write_code( "if (Py_REFCNT(%s) == 1)\n" " %s->obj = NULL;\n" "else {\n" % (self.py_name, self.py_name)) wrapper.after_call.indent() self.cpp_class.write_create_instance(wrapper.after_call, "%s->obj" % self.py_name, '*'+value) self.cpp_class.write_post_instance_creation_code(wrapper.after_call, "%s->obj" % self.py_name, '*'+value) wrapper.after_call.unindent() wrapper.after_call.write_code('}') else: ## it's not safe for the python wrapper to keep a ## pointer to the object anymore; just set it to NULL. wrapper.after_call.write_code("%s->obj = NULL;" % (self.py_name,)) else: ## The PyObject gets a new reference to the same obj self.cpp_class.memory_policy.write_incref(wrapper.before_call, value) if self.type_traits.target_is_const: wrapper.before_call.write_code("%s->obj = (%s*) (%s);" % (py_name, self.cpp_class.full_name, value)) else: wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value)) ## closes def write_create_new_wrapper(): if self.cpp_class.helper_class is None: try: if self.cpp_class.memory_policy is not None: self.cpp_class.wrapper_registry.write_lookup_wrapper( wrapper.before_call, self.cpp_class.pystruct, py_name, self.cpp_class.memory_policy.get_pointer_to_void_name(value)) else: self.cpp_class.wrapper_registry.write_lookup_wrapper( wrapper.before_call, self.cpp_class.pystruct, py_name, value) except NotSupportedError: write_create_new_wrapper() self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name, "%s->obj" % py_name) else: wrapper.before_call.write_code("if (%s == NULL)\n{" % py_name) wrapper.before_call.indent() write_create_new_wrapper() self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name, "%s->obj" % py_name) wrapper.before_call.unindent() wrapper.before_call.write_code('}') wrapper.build_params.add_parameter("N", [py_name]) else: wrapper.before_call.write_code("if (typeid(*(%s)).name() == typeid(%s).name())\n{" % (value, self.cpp_class.helper_class.name)) wrapper.before_call.indent() if self.type_traits.target_is_const: wrapper.before_call.write_code( "%s = (%s*) (((%s*) ((%s*) %s))->m_pyself);" % (py_name, self.cpp_class.pystruct, self.cpp_class.helper_class.name, self.cpp_class.full_name, value)) wrapper.before_call.write_code("%s->obj = (%s*) (%s);" % (py_name, self.cpp_class.full_name, value)) else: wrapper.before_call.write_code( "%s = (%s*) (((%s*) %s)->m_pyself);" % (py_name, self.cpp_class.pystruct, self.cpp_class.helper_class.name, value)) wrapper.before_call.write_code("%s->obj = %s;" % (py_name, value)) wrapper.before_call.write_code("Py_INCREF(%s);" % py_name) wrapper.before_call.unindent() wrapper.before_call.write_code("} else {") wrapper.before_call.indent() try: if self.cpp_class.memory_policy is not None: self.cpp_class.wrapper_registry.write_lookup_wrapper( wrapper.before_call, self.cpp_class.pystruct, py_name, self.cpp_class.memory_policy.get_pointer_to_void_name(value)) else: self.cpp_class.wrapper_registry.write_lookup_wrapper( wrapper.before_call, self.cpp_class.pystruct, py_name, value) except NotSupportedError: write_create_new_wrapper() self.cpp_class.wrapper_registry.write_register_new_wrapper( wrapper.before_call, py_name, "%s->obj" % py_name) else: wrapper.before_call.write_code("if (%s == NULL)\n{" % py_name) wrapper.before_call.indent() write_create_new_wrapper() self.cpp_class.wrapper_registry.write_register_new_wrapper(wrapper.before_call, py_name, "%s->obj" % py_name) wrapper.before_call.unindent() wrapper.before_call.write_code('}') # closes if (%s == NULL) wrapper.before_call.unindent() wrapper.before_call.write_code("}") # closes if (typeid(*(%s)) == typeid(%s))\n{ wrapper.build_params.add_parameter("N", [py_name]) class CppClassPtrReturnValue(CppClassReturnValueBase): "Class* return handler" CTYPES = [] SUPPORTS_TRANSFORMATIONS = True cpp_class = None #cppclass.CppClass('dummy') # CppClass instance def __init__(self, ctype, caller_owns_return=None, custodian=None, is_const=False, reference_existing_object=None, return_internal_reference=None, caller_manages_return=True, free_after_copy=False): """ :param ctype: C type, normally 'MyClass*' :param caller_owns_return: if true, ownership of the object pointer is transferred to the caller :param free_after_copy: if true, the python wrapper must call delete on the returned pointer once it has taken a copy. :param custodian: bind the life cycle of the python wrapper for the return value object (ward) to that of the object indicated by this parameter (custodian). Possible values are: - None: no object is custodian; - 0: the instance of the method in which the ReturnValue is being used will become the custodian; - integer > 0: parameter number, starting at 1 (i.e. not counting the self/this parameter), whose object will be used as custodian. :param reference_existing_object: if true, ownership of the pointed-to object remains to be the caller's, but we do not make a copy. The callee gets a reference to the existing object, but is not responsible for freeing it. Note that using this memory management style is dangerous, as it exposes the Python programmer to the possibility of keeping a reference to an object that may have been deallocated in the mean time. Calling methods on such an object would lead to a memory error. :param return_internal_reference: like reference_existing_object, but additionally adds custodian/ward to bind the lifetime of the 'self' object (instance the method is bound to) to the lifetime of the return value. .. note:: Only arguments which are instances of C++ classes wrapped by PyBindGen can be used as custodians. """ if ctype == self.cpp_class.name: ctype = self.cpp_class.full_name super(CppClassPtrReturnValue, self).__init__(ctype, is_const=is_const) if caller_owns_return is None: # For "const Foo*", we assume caller_owns_return=False by default if self.type_traits.target_is_const: caller_owns_return = False self.caller_owns_return = caller_owns_return self.caller_manages_return = caller_manages_return self.free_after_copy = free_after_copy self.reference_existing_object = reference_existing_object self.return_internal_reference = return_internal_reference if self.return_internal_reference: assert self.reference_existing_object is None self.reference_existing_object = True self.custodian = custodian if self.caller_owns_return and self.free_after_copy: raise TypeConfigurationError("only one of caller_owns_return or free_after_copy can be given") if self.caller_owns_return is None\ and self.free_after_copy is None \ and self.reference_existing_object is None: raise TypeConfigurationError("Either caller_owns_return or self.reference_existing_object must be given") def get_c_error_return(self): # only used in reverse wrappers """See ReturnValue.get_c_error_return""" return "return NULL;" def convert_c_to_python(self, wrapper): """See ReturnValue.convert_c_to_python""" ## Value transformations value = self.transformation.untransform( self, wrapper.declarations, wrapper.after_call, self.value) # if value is NULL, return None wrapper.after_call.write_code("if (!(%s)) {\n" " Py_INCREF(Py_None);\n" " return Py_None;\n" "}" % value) ## declare wrapper variable py_name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', 'py_'+self.cpp_class.name) self.py_name = py_name common_shared_object_return(value, py_name, self.cpp_class, wrapper.after_call, self.type_traits, self.caller_owns_return, self.reference_existing_object, type_is_pointer=True, caller_manages_return=self.caller_manages_return, free_after_copy=self.free_after_copy) # return the value wrapper.build_params.add_parameter("N", [py_name], prepend=True) if self.free_after_copy: wrapper.after_call.add_cleanup_code("delete retval;") wrapper.after_call.add_cleanup_code("// free_after_copy for %s* %ss" % (self.cpp_class.name,wrapper.function_name)) def convert_python_to_c(self, wrapper): """See ReturnValue.convert_python_to_c""" name = wrapper.declarations.declare_variable( self.cpp_class.pystruct+'*', "tmp_%s" % self.cpp_class.name) wrapper.parse_params.add_parameter( 'O!', ['&'+self.cpp_class.pytypestruct, '&'+name]) value = self.transformation.transform( self, wrapper.declarations, wrapper.after_call, "%s->obj" % name) ## now the hairy part :) if self.caller_owns_return: if not isinstance(self.cpp_class.memory_policy, ReferenceCountingPolicy): ## the caller receives a copy, if possible try: if not self.cpp_class.has_copy_constructor: raise CodeGenerationError("Class {0} cannot be copied".format(self.cpp_class.full_name)) self.cpp_class.write_create_instance(wrapper.after_call, "%s" % self.value, '*'+value) except CodeGenerationError: copy_possible = False else: copy_possible = True if copy_possible: self.cpp_class.write_post_instance_creation_code(wrapper.after_call, "%s" % self.value, '*'+value) else: # value = pyobj->obj; pyobj->obj = NULL; wrapper.after_call.write_code( "%s = %s;" % (self.value, value)) wrapper.after_call.write_code( "%s = NULL;" % (value,)) else: ## the caller gets a new reference to the same obj self.cpp_class.memory_policy.write_incref(wrapper.after_call, value) if self.type_traits.target_is_const: wrapper.after_call.write_code( "%s = const_cast< %s* >(%s);" % (self.value, self.cpp_class.full_name, value)) else: wrapper.after_call.write_code( "%s = %s;" % (self.value, value)) else: ## caller gets a shared pointer ## but this is dangerous, avoid at all cost!!! wrapper.after_call.write_code( "// dangerous!\n%s = %s;" % (self.value, value)) warnings.warn("Returning shared pointers is dangerous!" " The C++ API should be redesigned " "to avoid this situation.") ## ## Core of the custodians-and-wards implementation ## def scan_custodians_and_wards(wrapper): """ Scans the return value and parameters for custodian/ward options, converts them to add_custodian_and_ward API calls. Wrappers that implement custodian_and_ward are: CppMethod, Function, and CppConstructor. """ assert hasattr(wrapper, "add_custodian_and_ward") for num, param in enumerate(wrapper.parameters): custodian = getattr(param, 'custodian', None) if custodian is not None: wrapper.add_custodian_and_ward(custodian, num+1) custodian = getattr(wrapper.return_value, 'custodian', None) if custodian is not None: wrapper.add_custodian_and_ward(custodian, -1) if getattr(wrapper.return_value, "return_internal_reference", False): wrapper.add_custodian_and_ward(-1, 0) def _add_ward(code_block, custodian, ward): wards = code_block.declare_variable( 'PyObject*', 'wards') code_block.write_code( "%(wards)s = PyObject_GetAttrString(%(custodian)s, (char *) \"__wards__\");" % vars()) code_block.write_code( "if (%(wards)s == NULL) {\n" " PyErr_Clear();\n" " %(wards)s = PyList_New(0);\n" " PyObject_SetAttrString(%(custodian)s, (char *) \"__wards__\", %(wards)s);\n" "}" % vars()) code_block.write_code( "if (%(ward)s && !PySequence_Contains(%(wards)s, %(ward)s))\n" " PyList_Append(%(wards)s, %(ward)s);" % dict(wards=wards, ward=ward)) code_block.add_cleanup_code("Py_DECREF(%s);" % wards) def _get_custodian_or_ward(wrapper, num): if num == -1: assert wrapper.return_value.py_name is not None return "((PyObject *) %s)" % wrapper.return_value.py_name elif num == 0: return "((PyObject *) self)" else: assert wrapper.parameters[num-1].py_name is not None return "((PyObject *) %s)" % wrapper.parameters[num-1].py_name def implement_parameter_custodians_precall(wrapper): for custodian, ward, postcall in wrapper.custodians_and_wards: if not postcall: _add_ward(wrapper.before_call, _get_custodian_or_ward(wrapper, custodian), _get_custodian_or_ward(wrapper, ward)) def implement_parameter_custodians_postcall(wrapper): for custodian, ward, postcall in wrapper.custodians_and_wards: if postcall: _add_ward(wrapper.after_call, _get_custodian_or_ward(wrapper, custodian), _get_custodian_or_ward(wrapper, ward))
gjcarneiro/pybindgen
pybindgen/cppclass.py
Python
lgpl-2.1
175,750
[ "VisIt" ]
1f5007262875ae849cf2e710e96117bcda2cd102868d11eaa9f09315077efff9
#!/usr/bin/python # # Simple script which convert the FITS headers associated to Brian McLean DSS images into simplified JSON files # Fabien Chereau fchereau@eso.org # import math import os import sys import Image from astLib import astWCS import skyTile levels = ["x64", "x32", "x16", "x8", "x4", "x2", "x1"] # Define the invalid zones in the plates corners for N and S plates removeBoxN = [64 * 300 - 2000, 1199] removeBoxS = [480, 624] def getIntersectPoly(baseFileName, curLevel, i, j): """Return the convex polygons in pixel space defining the valid area of the tile or None if the poly is fully in the invalid area""" scale = 2 ** (6 - curLevel) * 300 if baseFileName[0] == 'N': box = removeBoxN x = float(box[0] - i * scale) / scale * 300. y = float(box[1] - j * scale) / scale * 300. # x,y is the position of the box top left corner in pixel wrt lower left corner of current tile if x > 300. or y <= 0.: # Tile fully valid return [[[0, 0], [300, 0], [300, 300], [0, 300]]] if x <= 0. and y >= 300.: # Tile fully invalid return None if x <= 0.: # assert y > 0 # (always true, tested above) assert y <= 300. return [[[0, y], [300, y], [300, 300], [0, 300]]] if y >= 300.: assert x > 0 # assert x <= 300. # (always true, tested above) return [[[0, 0], [x, 0], [x, 300], [0, 300]]] return [[[0, 0], [x, 0], [x, 300], [0, 300]], [[x, y], [300, y], [300, 300], [x, 300]]] else: box = removeBoxS x = float(i * scale - box[0]) / scale * 300. y = float(box[1] - j * scale) / scale * 300. # x,y is the position of the box top right corner in pixel wrt lower left corner of current tile if x > 0. or y <= 0.: # Tile fully valid return [[[0, 0], [300, 0], [300, 300], [0, 300]]] if x <= -300. and y >= 300.: # Tile fully invalid return None if x <= -300.: # assert y > 0 # (always true, tested above) assert y <= 300. return [[[0, y], [300, y], [300, 300], [0, 300]]] if y >= 300.: # assert x <= 0 # (always true, tested above) assert x > -300. return [[[-x, 0], [300, 0], [300, 300], [-x, 300]]] return [[[-x, 0], [300, 0], [300, 300], [-x, 300]], [[0, y], [-x, y], [-x, 300], [0, 300]]] def createTile(currentLevel, maxLevel, i, j, outDirectory, plateName, special=False): # Create the associated tile description t = skyTile.SkyImageTile() t.level = currentLevel t.i = i t.j = j t.imageUrl = "x%.2d/" % (2 ** currentLevel) + "x%.2d_%.2d_%.2d.jpg" % (2 ** currentLevel, i, j) if currentLevel == 0: t.credits = "Copyright (C) 2008, STScI Digitized Sky Survey" t.infoUrl = "http://stdatu.stsci.edu/cgi-bin/dss_form" # t.maxBrightness = 10 # Create the matching sky polygons, return if there is no relevant polygons if special is True: pl = [[[0, 0], [300, 0], [300, 300], [0, 300]]] else: pl = getIntersectPoly(plateName, currentLevel, i, j) if pl is None or not pl: return None # Get the WCS from the input FITS header file for the tile wcs = astWCS.WCS(plateName + "/" + levels[currentLevel] + "/" + plateName + "_%.2d_%.2d_" % (i, j) + levels[ currentLevel] + ".hhh") naxis1 = wcs.header.get('NAXIS1') naxis2 = wcs.header.get('NAXIS2') t.skyConvexPolygons = [] for idx, poly in enumerate(pl): p = [wcs.pix2wcs(v[0] + 0.5, v[1] + 0.5) for iv, v in enumerate(poly)] t.skyConvexPolygons.append(p) t.textureCoords = [] for idx, poly in enumerate(pl): p = [(float(v[0]) / naxis1, float(v[1]) / naxis2) for iv, v in enumerate(poly)] t.textureCoords.append(p) v10 = wcs.pix2wcs(1, 0) v01 = wcs.pix2wcs(0, 1) v00 = wcs.pix2wcs(0, 0) t.minResolution = max(abs(v10[0] - v00[0]) * math.cos(v00[1] * math.pi / 180.), abs(v01[1] - v00[1])) if (currentLevel >= maxLevel): return t # Recursively creates the 4 sub-tiles sub = createTile(currentLevel + 1, maxLevel, i * 2, j * 2, outDirectory, plateName) if sub != None: t.subTiles.append(sub) sub = createTile(currentLevel + 1, maxLevel, i * 2 + 1, j * 2, outDirectory, plateName) if sub != None: t.subTiles.append(sub) sub = createTile(currentLevel + 1, maxLevel, i * 2 + 1, j * 2 + 1, outDirectory, plateName) if sub != None: t.subTiles.append(sub) sub = createTile(currentLevel + 1, maxLevel, i * 2, j * 2 + 1, outDirectory, plateName) if sub != None: t.subTiles.append(sub) return t def generateJpgTiles(inDirectory, outDirectory): # Create a reduced 256x256 version of all the jpeg for curLevel in range(0, len(levels)): fullOutDir = outDirectory + "/x%.2d" % (2 ** curLevel) if not os.path.exists(fullOutDir): os.makedirs(fullOutDir) print "Create directory " + fullOutDir for i in range(0, 2 ** curLevel): for j in range(0, 2 ** curLevel): baseFileName = "x%.2d_%.2d_%.2d" % (2 ** curLevel, i, j) im = Image.open( inDirectory + "/" + levels[curLevel] + "/" + inDirectory + '_' + "%.2d_%.2d_" % (i, j) + levels[ curLevel] + ".jpg") # Enhance darker part of the image im3 = im.point(lambda t: 2. * t - 256. * (t / 256.) ** 1.6) im2 = im3.transform((256, 256), Image.EXTENT, (0, 0, 300, 300), Image.BILINEAR) im2.save(fullOutDir + '/' + baseFileName + ".jpg") def plateRange(): if len(sys.argv) != 4: print "Usage: " + sys.argv[0] + " prefix startPlate stopPlate " exit(-1) prefix = sys.argv[1] outDir = "/tmp/tmpPlate" nRange = range(int(sys.argv[2]), int(sys.argv[3])) for i in nRange: if os.path.exists(outDir): os.system("rm -r " + outDir) os.makedirs(outDir) plateName = prefix + "%.3i" % i generateJpgTiles(plateName, outDir) # Create all the JSON files masterTile = createTile(0, 6, 0, 0, outDir, plateName) masterTile.outputJSON(qCompress=True, maxLevelPerFile=2, outDir=outDir + '/') command = "cd /tmp && mv tmpPlate " + plateName + " && tar -cf " + plateName + ".tar " + plateName + " && rm -rf " + plateName print command os.system(command) command = "cd /tmp && scp " + plateName + ".tar vosw@voint1.hq.eso.org:/work/fabienDSS2/" + plateName + ".tar" print command os.system(command) command = "rm /tmp/" + plateName + ".tar" print command os.system(command) def mainHeader(): # Generate the top level file containing pointers on all outDir = "/tmp/tmpPlate" with open('/tmp/allDSS.json', 'w') as f: f.write('{\n') f.write('"minResolution" : 0.1,\n') f.write('"maxBrightness" : 13,\n') f.write('"subTiles" : \n[\n') for prefix in ['N', 'S']: if prefix == 'N': nRange = range(2, 898) if prefix == 'S': nRange = range(1, 894) for i in nRange: plateName = prefix + "%.3i" % i ti = createTile(0, 0, 0, 0, outDir, plateName, True) assert ti != None f.write('\t{\n') f.write('\t\t"minResolution" : %.8f,\n' % ti.minResolution) f.write('\t\t"worldCoords" : ') skyTile.writePolys(ti.skyConvexPolygons, f) f.write(',\n') f.write('\t\t"subTiles" : ["' + plateName + "/x01_00_00.json.qZ" + '"]\n') f.write('\t},\n') f.seek(-2, os.SEEK_CUR) f.write('\n]}\n') if __name__ == "__main__": import psyco psyco.full() plateRange()
Stellarium/stellarium
util/dssheaderToJSON.py
Python
gpl-2.0
8,050
[ "Brian" ]
13f2ddd646556ea9929dbae2051e7c4d171cfcd6a48a06bc1e1c2fe28b227480
#!/bin/python import vtk import vtk.util.colors points = vtk.vtkPoints() points.InsertNextPoint(0, 0, 0) points.InsertNextPoint(0, 1, 0) points.InsertNextPoint(1, 0, 0) points.InsertNextPoint(0, 0, 1) pointsPolyData = vtk.vtkPolyData() pointsPolyData.SetPoints(points) vertexFilter = vtk.vtkVertexGlyphFilter() vertexFilter.SetInputData(pointsPolyData) vertexFilter.Update() mapper = vtk.vtkPolyDataMapper() mapper.SetInputData(vertexFilter.GetOutput()) actor = vtk.vtkActor() actor.SetMapper(mapper) actor.GetProperty().SetColor(vtk.util.colors.banana) renderer = vtk.vtkRenderer() renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderer.AddActor(actor) renderer.SetBackground(0.,0.,0.) renderWindowInteractor.Initialize() #renderer.ResetCamera() #renderer.GetActiveCamera().Zoom(1.5) renderWindow.Render() renderWindowInteractor.Start()
trianam/tests
python/vtkPoints.py
Python
gpl-2.0
1,012
[ "VTK" ]
1d297a7d8e1221da609434a07bf3ee4a90cfa9dd92ec138d6d9dcf9b87384b44
# Licensed under GPL version 3 - see LICENSE.rst import inspect from functools import wraps import collections from copy import copy import warnings import numpy as np from transforms3d import affines, euler from astropy.table import Table from astropy import table import astropy.units as u from ..simulator import Parallel from .uncertainties import generate_facet_uncertainty as genfacun from ..analysis.gratings import (resolvingpower_from_photonlist, effectivearea_from_photonlist) from ..analysis.gratings import AnalysisError __all__ = ['oneormoreelements', 'wiggle', 'moveglobal', 'moveindividual', 'varyperiod', 'varyorderselector', 'varyattribute', 'run_tolerances', 'run_tolerances_for_energies', 'CaptureResAeff', 'generate_6d_wigglelist', 'select_1dof_changed', 'plot_wiggle', 'load_and_plot', ] def oneormoreelements(func): '''Decorator for functions that modify optical elements. The functions in this module are written to work on a single optical element. This decorator allows them to accept a list of elements or a single element. ''' @wraps(func) def func_wrapper(elements, *args, **kwargs): if isinstance(elements, collections.Iterable): for e in elements: func(e, *args, **kwargs) else: func(elements, *args, **kwargs) return func_wrapper @oneormoreelements def wiggle(e, dx=0, dy=0, dz=0, rx=0., ry=0., rz=0.): '''Move and rotate elements around principal axes. Parameters ---------- e : `marxs.simulator.Parallel` or list of those elements Elements where uncertainties will be set dx, dy, dz : float accuracy of grating positioning in x, y, z (in mm) - Gaussian sigma, not FWHM! rx, ry, rz : float accuracy of grating positioning. Rotation around x, y, z (in rad) - Gaussian sigma, not FWHM! ''' e.elem_uncertainty = genfacun(len(e.elements), [dx, dy, dz], [rx, ry, rz]) e.generate_elements() @oneormoreelements def moveglobal(e, dx=0, dy=0, dz=0, rx=0., ry=0., rz=0.): '''Move and rotate origin of the whole `marxs.simulator.Parallel` object. Parameters ---------- e :`marxs.simulator.Parallel` or list of those elements Elements where uncertainties will be set dx, dy, dz : float translation in x, y, z (in mm) rx, ry, rz : float Rotation around x, y, z (in rad) ''' e.uncertainty = affines.compose([dx, dy, dz], euler.euler2mat(rx, ry, rz, 'sxyz'), np.ones(3)) e.generate_elements() @oneormoreelements def moveindividual(e, dx=0, dy=0, dz=0, rx=0, ry=0, rz=0): '''Move and rotate all elements of `marxs.simulator.Parallel` object. Unlike `~marxs.design.tolerancing.moveglobal` this does not move to center of the `~marxs.simulator.Parallel` object, instead it moves all elements individually. Unlike `~marxs.design.tolerancing.wiggle`, each element is moved by the same amount, not a number drawn from from distribution. Parameters ---------- e :`marxs.simulator.Parallel` or list of those elements Elements where uncertainties will be set dx, dy, dz : float translation in x, y, z (in mm) rx, ry, rz : float Rotation around x, y, z (in rad) ''' e.elem_uncertainty = [affines.compose((dx, dy, dz), euler.euler2mat(rx, ry, rz, 'sxyz'), np.ones(3))] * len(e.elements) e.generate_elements() @oneormoreelements def varyattribute(element, **kwargs): '''Modify the attributes of an element. This function modifies the attributes of an object. The keyword arguments are name and value of the attributes to be changed. This can be used for a wide variety of MARXS objects where the value of a parameter is stored as an instance attribute. Parameters ---------- element : object Some optical component. keywords : it depends Examples -------- In a `marxs.optics.RadialMirrorScatter` object, the width of the scattering can be modified like this: >>> from marxs.optics import RadialMirrorScatter >>> from marxs.design.tolerancing import varyattribute >>> rms = RadialMirrorScatter(inplanescatter=1e-5, perpplanescatter=0) >>> varyattribute(rms, inplanescatter=2e-5) It is usually easy to see which attributes of a `marxs.simulator.SimulationSequenceElement` are relevant to be changed in tolerancing simulations when looking at the attributes or the implementation of those elements. ''' for key, val in kwargs.items(): # check is needed because key could be misspelled so that we set an attribute # that did not exist before and that is never used if not hasattr(element, key): raise ValueError(f'Object {element} does not have {key} attribute.') setattr(element, key, val) @oneormoreelements def varyperiod(element, period_mean, period_sigma): '''Randomly draw different grating periods for different gratings This function needs to be called with gratings as parameters, e.g. >>> from marxs.optics import CATGrating >>> from marxs.design.tolerancing import varyperiod >>> grating = CATGrating(order_selector=None, d=0.001) >>> varyperiod(grating, 2e-4, 1e-5) and the parameters are expected to have two components: center and sigma of a Gaussian distribution for grating contstant d. Parameters ---------- element :`marxs.optics.FlatGrating` or similar (or list of those elements) Elements where uncertainties will be set period_mean : float Center of Gaussian (in mm) period_sigma : float Sigma of Gaussian (in mm) ''' if not hasattr(element, '_d'): raise ValueError(f'Object {element} does not have grating period `_d` attribute.') element._d = np.random.normal(period_mean, period_sigma) @oneormoreelements def varyorderselector(element, order_selector, *args, **kwargs): '''Modify the OrderSelector for a grating Parameters ---------- element :`marxs.optics.FlatGrating` or similar (or list of those elements) Elements where the OrderSelector will be changed order_selector : class This should be a subclass of `InterpolateRalfTable` which determines how the order will be selected. In the case of the default class, the blaze angle of an incoming photons will be modified randomly to represent small-scale deviations from the flatness of the gratings. args, kwargs : All other parameters are used to initialize the OrderSelector ''' if not hasattr(element, 'order_selector'): raise ValueError(f'Object {element} does not have an order_selector attribute.') element.order_selector = order_selector(*args, **kwargs) def run_tolerances(photons_in, instrum, wigglefunc, wiggleparts, parameters, analyzefunc): '''Run tolerancing calculations for a range of parameters This function takes an instrument configuration and a function to change one aspect of it. For every change it runs a simulations and calculates a figure of merit. As the name indicates, this function is designed to derive alignment tolerances for certain instrument parts but it might be general enough for other parameter studies in instrument design. Parameters ---------- photons_in : `astropy.table.Table` Input photons list. To speed up the computation, it is useful if photon list has been run through all elements of the instrument that are located before the first element that is toleranced here. instrum : `marxs.simulator.Sequence` object An instance of the instrument which contains all elements that `photons_in` still have to pass. This can include elements that are toleranced in this run and those that are located behind them. wigglefunc : callable Function that modifies the `instrum` with the following calling signature: ``wigglefunc(wiggleparts, pars)`` where ``pars`` is one dict from `parameters`. Note that this function is called with `wiggleparts` which can be the same as `instrum` or just a subset. wiggleparts : `marxs.base.SimulationSequenceElement` instance Element which is modified by `wigglefunc`. Typically this is a subset of `instrum`. For example, to tolerance the mirror alignments `wiggleparts` would just be the mirror objects, while `instrum` would contain all the parts of the instrument that the photons need to run though up to the detector. parameters : list of dicts List of parameter values for calls to `wigglefunc`. analyzefunc : callable function or object This is called with a photon table and should return a dictionary of results. This could be, e.g. a `marxs.design.tolerancing.CaptureResAeff` instance. Returns ------- result : list of dicts Each dict contains parameters and results for one run. Notes ----- The format of input and output as lists of dicts is chosen because this would work well for a parallel version of this function which could have the same interface when it is implemented. ''' out = [] for i, pars in enumerate(parameters): print(f'Working on simulation {i}/{len(parameters)}') wigglefunc(wiggleparts, **pars) photons = instrum(photons_in.copy()) cpars = copy(pars) cpars.update(analyzefunc(photons)) out.append(cpars) return out class CaptureResAeff(): '''Capture resolving power and effective area for a tolerancing simulation. Instances of this class can be called with a photon list for a tolerancing simulation. The photon list will be analysed for resolving power and effective area in a number of relevant orders. This is implemented as a class and not a simple function. When the class is initialized a number of parameters that are true for any to the analysis (e.g. the names of certain columns) are set and saved in the class instance. The implementation of this class is geared towards instruments with gratings but can also serve as an example how a complex analysis that derives several different parameters can be implemented. Results for effective area and resolving power are reported on a per order basis and also summarized for all grating orders combined and the zeroth order separately. Parameters ---------- A_geom : number Geometric area of aperture for the simulations that this instance will analyze. order_col : string Column names for grating orders orders : array Order numbers to consider in the analysis dispersion_coord : string Dispersion coordinate for `marxs.analysis.gratings.resolvingpower_from_photonlist` ''' def __init__(self, A_geom=1, order_col='order', orders=np.arange(-10, 11), dispersion_coord='det_x'): self.A_geom = A_geom self.order_col = order_col self.orders = np.asanyarray(orders) self.dispersion_coord = dispersion_coord def __call__(self, photons): '''Calculate Aeff and R for an input photon list. Parameters ---------- photons : `astropy.table.Table` Photon list. Returns ------- result : dict Dictionary with per-order Aeff and R, as well as values summed over all grating orders. ''' aeff = effectivearea_from_photonlist(photons, self.orders, len(photons), self.A_geom, self.order_col) try: ind = (np.isfinite(photons[self.dispersion_coord]) & (photons['probability'] > 0)) res, pos, std = resolvingpower_from_photonlist(photons[ind], self.orders, col=self.dispersion_coord, zeropos=None, ordercol=self.order_col) except AnalysisError: # Something did not work, e.g. too few photons to find zeroth order res = np.nan * np.ones(len(self.orders)) disporders = self.orders != 0 # The following lines work for an empty photon list, too. aeffgrat = np.sum(aeff[disporders]) aeff0 = np.sum(aeff[~disporders]) # Division by 0 causes more nans, so filter those out # Also, res is nan if less than 20 photons are detected # so we need to filter those out, too. ind = disporders & (aeff > 0) & np.isfinite(res) if ind.sum() == 0: # Dispersed spectrum misses detector avggratres = np.nan else: avggratres = np.average(res[ind], weights=aeff[ind] / aeff[ind].sum()) return {'Aeff0': aeff0, 'Aeffgrat': aeffgrat, 'Aeff': aeff, 'Rgrat': avggratres, 'R': res} def run_tolerances_for_energies(source, energies, instrum_before, instrum_remaining, wigglefunc, wiggleparts, parameters, analyzefunc, reset=None, t_source=1): '''Run tolerancing for a grid of parameters and energies This function loops over `~marxs.design.tolerancing.run_tolerances` for different energies. This function takes an instrument configuration and a function to change one aspect of it. For every change it runs a simulations and calculates a figure of merit. As the name indicates, this function is designed to derive alignment tolerances for certain instrument parts but it might be general enough for other parameter studies in instrument design. There are two nested loops here looping over energy and tolerancing parameters. In this case, the outer loop is over energies and then for every energy, `~marxs.design.tolerancing.run_tolerances` loops over the parameters. This works well where running the ``wigglefunc`` is relatively fast, but propagating the photons through ``instrum_before`` is slow and minimizes the memory footprint, because only one photon list is in memory at any one time. It also implies that runs for the same wiggle parameters but different energies are run on different realizations of any random draws that are performed by ``wigglefunc``. Parameters ---------- source : `marxs.source.Source` Source used to generate the photons for every energy. This function changes the energy of the source, so the source should be set for monoenergetic emission with a constant flux of 1. energies : `astropy.units.quantity.Quantity` An array of energy values. instrum_before : `marxs.simulator.Sequence` An instance of the instrument which contains all elements **before** the first elements in ``wiggleparts``. The first element should be a `marxs.source.pointing.PointingModel`. In principle, ``instrum_before`` can be an empty sequence and the entire instrument can be defined in ``instrum_remaining``, however, ``instrum_before`` is run only once per energy and thus it can greatly speed up the simulation to set this. instrum_remaining : `marxs.simulator.Sequence` An instance of the instrument which contains all elements not included in ``instrum_before``. ``instrum_remaining`` should include elements that are toleranced in this run and those that are located behind them. wigglefunc, wiggleparts, parameters, analyzefunc : These parameters are passed to `marxs.design.tolerancing.run_tolerances`. See that function for a description of these parameters. reset : dict or ``None`` A dictionary of values for the ``wigglefunc`` function that resets the positions or properties of the wiggled elements to their default. If ``reset=None``, then the elements affected by ``wigglefunc`` will be in the state corresponding to the last entry in ``parameters`` when this function exits. t_source : int parameter for ``source.generate_photons()``. If the source flux is set to one, then ``t_source`` determines the number of photons used in the tolerancing simulation. Returns ------- result : `astropy.table.Table` Each row in the table contains energy, wave, parameter values, and results from ``analyzefunc`` for a single run. ''' wave = energies.to(u.Angstrom, equivalencies=u.spectral()) outtabs = [] for i, e in enumerate(energies): source.energy = e.to(u.keV).value photons_in = source.generate_photons(t_source) photons_in = instrum_before(photons_in) data = run_tolerances(photons_in, instrum_remaining, wigglefunc, wiggleparts, parameters, analyzefunc) # convert tab into a table. # astropy.tables has problems with Quantities as input tab = Table([{d: data[i][d].value if isinstance(data[i][d], u.Quantity) else data[i][d] for d in data[i]} for i in range(len(data))]) tab['energy'] = e tab['wave'] = wave[i] outtabs.append(tab) # Reset so that same instance of instrum can be used again if reset is not None: wigglefunc(wiggleparts, **reset) return table.vstack(outtabs) def generate_6d_wigglelist(trans, rot, names=['dx', 'dy', 'dz', 'rx', 'ry', 'rz']): '''Generate a list of parameters for the wiggle functions in this module. This modules contains several wiggle functions such as `~marxs.design.tolerances.moveglobal` or `~marxs.design.tolerances.wiggle` that expect input for 6 degrees of freedom, three translations and three rotations. Commonly, we want to study and dof at a time. This function helps with generating lists of dicts for that purpose. Parameters ---------- trans : `astropy.units.quantity.Quantity` Steps for translation. The first element should be 0. rot : `astropy.units.quantity.Quantity` Steps for rotation. The first element should be 0. Returns ------- changeglobal : list of dicts This list contains changes in positive and negative directions. Use this as input for e.g. `~marxs.design.tolerances.moveglobal`. changeindividual : list of dicts This list contains only one side, so use this as input for e.g. `~marxs.design.tolerances.wiggle` where the actual misalignment is drawn from a distribution. Examples -------- In this example, we take small steps close to 0 and then increase the step size for larger distances, going up to a misalignment of 10 mm in linear translation and 2 degrees in rotation:: >>> import astropy.units as u >>> from marxs.design.tolerancing import generate_6d_wigglelist >>> trans = [0., .1, .2, .4, .7] * u.cm >>> rot = [0., 2., 5., 10., 20] * u.arcmin >>> lglob, lind = generate_6d_wigglelist(trans, rot) ''' if (trans.value[0]) != 0 or (rot.value[0] != 0): warnings.warn('First element of trans and rot should be 0.') n_trans = len(trans) n_rot = len(rot) n = 3 * n_trans changeglobal = np.zeros((n_trans * 2 * 3 + n_rot * 2 * 3, 6)) for i in range(3): changeglobal[i * n_trans: (i + 1) * n_trans, i] = trans.to(u.mm).value changeglobal[n + i * n_rot: n + (i + 1) * n_rot, i + 3] = rot.to(u.rad).value half = changeglobal.shape[0] / 2 changeglobal[int(half):, :] = - changeglobal[:int(half), :] changeindividual = changeglobal[: int(half), :] # Remove multiple entries with [0,0,0,0, ...] changeglobal = np.unique(changeglobal, axis=0) changeindividual = np.unique(changeindividual, axis=0) # numpy array to list of dicts changeglobal = [dict(zip(names, row)) for row in changeglobal] changeindividual = [dict(zip(names, row)) for row in changeindividual] return changeglobal, changeindividual def select_1dof_changed(table, par, parlist=['dx', 'dy', 'dz', 'rx', 'ry', 'rz']): '''Select subset of tolerancing table with changes in 1 dof only This function selects a subset of a table where all parameters other than the parameter selected right now are zero. This is a helper function for analyzing and plotting results. Parameters ---------- table : `astropy.table.Table` Table with wiggle results par : string Name of parameter to be selected parlist : list of strings Name of all parameters in ``table`` Returns ------- tab : `astropy.table.Table` Filtered table ''' pars = set(parlist) ind = np.ones(len(table), dtype=bool) for p in pars - set([par]): ind *= table[p] == 0 return table[ind] def plot_wiggle(tab, par, parlist, ax, axt=None, R_col='Rgrat', Aeff_col='Aeffgrat', axes_facecolor='w'): '''Plotting function for overview plot wiggeling 1 dof at the time. For parameters starting with "d" (e.g. "dx", "dy", "dz"), the plot axes will be labeled as a shift, for parameters tarting with "r" as rotation. Parameters ---------- table : `astropy.table.Table` Table with wiggle results par : string Name of parameter to be plotted parlist : list of strings Name of all parameters in ``table`` ax : `matplotlib.axes.Axes` Axis object to plot into. axt : ``None`` or `matplotlib.axes.Axes` If this is ``None``, twin axis are created to show resolving power and effective area in one plot. Alternatively, a second axes instance can be given here. R_col : string Column name in ``tab`` that hold the resolving power to be plotted. Default is set to work with `marxs.design.tolerancing.CaptureResAeff`. Aeff_col : string Column name in ``tab`` that hold the effective area to be plotted. Default is set to work with `marxs.design.tolerancing.CaptureResAeff`. axes_facecolor : any matplotlib color specification Color for the background in the plot. ''' import matplotlib.pyplot as plt t = select_1dof_changed(tab, par, parlist) t.sort(par) t_wave = t.group_by('wave') if axt is None: axt = ax.twinx() for key, g in zip(t_wave.groups.keys, t_wave.groups): if par[0] == 'd': x = g[par] elif par[0] == 'r': x = np.rad2deg(g[par].data) else: raise ValueError("Don't know how to plot {}. Parameter names should start with 'd' for shifts and 'r' for rotations.".format(par)) ax.plot(x, g[R_col], label='{:3.1f} $\AA$'.format(key[0]), lw=1.5) axt.plot(x, g[Aeff_col], ':', label='{:2.0f} $\AA$'.format(key[0]), lw=2) ax.set_ylabel('Resolving power (solid lines)') axt.set_ylabel('$A_{eff}$ [cm$^2$] (dotted lines)') if par[0] == 'd': ax.set_xlabel('shift [mm]') ax.set_title('Shift along {}'.format(par[1])) elif par[0] == 'r': ax.set_xlabel('Rotation [degree]') ax.set_title('Rotation around {}'.format(par[1])) for a in [ax, axt]: a.set_facecolor(axes_facecolor) a.set_axisbelow(True) a.grid(axis='x', c='1.0', lw=2, ls='solid') wiggle_plot_facecolors = {'global': '0.9', 'individual': (1.0, 0.9, 0.9)} '''Default background colors for wiggle overview plots. If the key of the dict matches part of the filename, the color listed in the dict is applied. ''' def load_and_plot(filename, parlist=['dx', 'dy', 'dz', 'rx', 'ry', 'rz'], **kwargs): '''Load a table with wiggle results and make default plot This is a function to generate a quicklook image with many hardcoded defaults for figure size, colors etc. In particular, this function is written for the display of 6d plots which vary 6 degrees of freedom, one at a time. The color for the background in the plot is set depending on the filename using the ``string : color`` assignments in `~marxs.design.tolerancing.wiggle_plot_facecolors`. No fancy regexp based match is applied, this is simply a check with ``in``. Parameters ---------- filename : string Path to a file with data that can be plotted by `~marxs.design.tolerancing.plot_wiggle`. parlist : list of strings Name of all parameters in ``table``. This function only plots six of them. Returns ------- tab : `astropy.table.Table` Table of data read from ``filename`` fig : `matplotlib.figure.Figure` Figure with plot. kwargs : All other parameters are passed to `~marxs.design.tolerancing.plot_wiggle`. ''' import matplotlib.pyplot as plt tab = Table.read(filename) if 'axis_facecolor' not in kwargs: for n, c in wiggle_plot_facecolors.items(): if n in filename: kwargs['axes_facecolor'] = c fig = plt.figure(figsize=(12, 8)) fig.subplots_adjust(wspace=.6, hspace=.3) for i, par in enumerate(parlist): ax = fig.add_subplot(2, 3, i + 1) plot_wiggle(tab, par, parlist, ax, **kwargs) return tab, fig
hamogu/marxs
marxs/design/tolerancing.py
Python
gpl-3.0
26,010
[ "Gaussian" ]
93364b723ac96f2d27a49d28d857e5fa268fb23bfe7c356e3854a32a65efaf02
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- http://www.mdanalysis.org # Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # # test deprecated code # - in particular stubs introduced in 0.11.0 (and which # will be removed in 1.0) class TestImports(object): def test_core_units(self): try: import MDAnalysis.core.units except ImportError: raise AssertionError("MDAnalysis.core.units not available") def test_core_util(self): try: import MDAnalysis.core.util except ImportError: raise AssertionError("MDAnalysis.core.util not available") def test_core_log(self): try: import MDAnalysis.core.log except ImportError: raise AssertionError("MDAnalysis.core.log not available") def test_core_distances(self): try: import MDAnalysis.core.distances except ImportError: raise AssertionError("MDAnalysis.core.distances not available") def test_core_transformations(self): try: import MDAnalysis.core.transformations except ImportError: raise AssertionError("MDAnalysis.core.transformations not available") def test_core_qcprot(self): try: import MDAnalysis.core.qcprot except ImportError: raise AssertionError("MDAnalysis.core.qcprot not available") def test_KDTree(self): try: import MDAnalysis.KDTree except ImportError: raise AssertionError("MDAnalysis.KDTree not available") def test_analysis_x3dna(self): try: import MDAnalysis.analysis.x3dna from MDAnalysis.analysis.x3dna import X3DNA except ImportError: raise AssertionError("MDAnalysis.analysis.x3dna not available")
alejob/mdanalysis
testsuite/MDAnalysisTests/test_deprecated.py
Python
gpl-2.0
2,753
[ "MDAnalysis" ]
3287311019df34fba7daa0a3e0c22ea7640f9bc87361b520a582311fed094283
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import unicode_literals import unittest from pymatgen.electronic_structure.core import Orbital, Spin class SpinTest(unittest.TestCase): def test_init(self): self.assertEqual(int(Spin.up), 1) self.assertEqual(int(Spin.down), -1) def test_from_int(self): self.assertEqual(Spin.from_int(1), Spin.up) self.assertEqual(Spin.from_int(-1), Spin.down) self.assertRaises(ValueError, Spin.from_int, 0) def test_cached(self): self.assertEqual(id(Spin.from_int(1)), id(Spin.up)) class OrbitalTest(unittest.TestCase): def test_init(self): for i, orb in enumerate(Orbital.all_orbitals): self.assertEqual(Orbital.from_vasp_index(i), orb) self.assertRaises(IndexError, Orbital.from_vasp_index, 100) def test_cached(self): self.assertEqual(id(Orbital.from_vasp_index(0)), id(Orbital.s)) if __name__ == '__main__': unittest.main()
migueldiascosta/pymatgen
pymatgen/electronic_structure/tests/test_core.py
Python
mit
1,059
[ "pymatgen" ]
4b8d509346e2cd2e7133fae43f135bf89ccf388ddd331e737f56e40492eaeb2e
#!/usr/bin/env import argparse import json import os import subprocess import sys import requests from tnseq_api import authenticateByEnv # Number of reads to use for approximating average read length. READ_LEN_SAMPLE = 10 # Approximate length of barcode to remove from read 2 with paired end reads. # This is used to approximate read length. READ2_BARCODE = 30 # If a contig has MIN_GENES number of genes or fewer, skip this contig. MIN_GENES = 5 def get_genome(parameters): target_file = os.path.join(parameters["output_path"], parameters["gid"] + ".fna") if not os.path.exists(target_file): # .replace("data_url",parameters["data_url"]).replace("gid",parameters["gid"]) genome_url = ( "{data_url}/genome_sequence/?eq(genome_id,{gid})&limit(25000)".format( data_url=parameters["data_url"], gid=parameters["gid"] ) ) print(genome_url) headers = {"accept": "application/sralign+dna+fasta"} # print "switch THE HEADER BACK!" # headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'} req = requests.Request("GET", genome_url, headers=headers) authenticateByEnv(req) prepared = req.prepare() # pretty_print_POST(prepared) s = requests.Session() response = s.send(prepared) handle = open(target_file, "wb") if not response.ok: sys.stderr.write("API not responding. Please try again later.\n") sys.exit(2) for block in response.iter_content(1024): handle.write(block) handle.close() # copy_file = os.path.join(parameters["output_path"],parameters["gid"]+"_2"+".fna") # with open(copy_file, "w") as sink_file: # sink_file.write(line) # os.remove(target_file) # os.rename(copy_file, target_file) contig_ids = [] with open(target_file, "r") as source_file: for line in source_file: if line.startswith(">"): line = line.split()[0] + "\n" contig_ids.append(line[1:].strip()) if len(contig_ids) == 0: sys.stderr.write("Genome appears empty!\n") raise Exception return target_file, contig_ids def get_annotation(parameters): target_file = os.path.join(parameters["output_path"], parameters["gid"] + ".gff") if not os.path.exists(target_file): annotation_url = "data_url/genome_feature/?and(eq(genome_id,gid),eq(annotation,PATRIC),or(eq(feature_type,CDS),eq(feature_type,tRNA),eq(feature_type,rRNA)))&limit(25000)".replace( "data_url", parameters["data_url"] ).replace( "gid", parameters["gid"] ) print(annotation_url) headers = {"accept": "application/gff"} # headers = {"accept":"application/cufflinks+gff"} # print "switch THE HEADER BACK!" # headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'} req = requests.Request("GET", annotation_url, headers=headers) authenticateByEnv(req) prepared = req.prepare() # pretty_print_POST(prepared) s = requests.Session() response = s.send(prepared) handle = open(target_file, "wb") if not response.ok: sys.stderr.write("API not responding. Please try again later.\n") sys.exit(2) else: for block in response.iter_content(1024): handle.write(block) return target_file def get_files(job_data, server_data): genome_dirs = [job_data["output_path"]] job_data["data_url"] = server_data["data_url"] job_data["gid"] = job_data["reference_genome_id"] contig_ids = [] for _ in [job_data["reference_genome_id"]]: get_annotation(job_data) _, iterm_ids = get_genome(job_data) contig_ids.append(iterm_ids) return genome_dirs, contig_ids # contrasts are always defined as either [control,treatment] or [control] # run transit per contrast # when allow UI definition of conditions and contrasts will need to clean up names for file friendliness def run_transit(genome_list, library_dict, parameters): contrasts = parameters["contrasts"] output_path = parameters["output_path"] # # Verify recipe is valid. From transit command line help. # # grep CZJJ01000038 197.5220.gff | sort -nk4 > CZJJ01000038.gff valid_recipes = { "griffin": 1, "tn5gaps": 1, "rankproduct": 1, "hmm": 1, "binomial": 1, "gumbel": 1, "resampling": 1, } recipe = parameters["recipe"] if not valid_recipes[recipe]: sys.stderr.write("Invalid recipe " + recipe) sys.exit(2) for genome in genome_list: contig_ids = genome["contig_ids"] for contig in contig_ids: if len(contig_ids) > 1: annotation = os.path.join(output_path, contig + ".sorted.gff") with open( os.path.join(output_path, contig + ".gff"), "w" ) as contig_gff: try: subprocess.check_call( ["grep", contig, genome["annotation"]], stdout=contig_gff ) except subprocess.CalledProcessError: pass with open(annotation, "w") as contig_sorted_gff: subprocess.check_call( ["sort", "-nk4", os.path.join(output_path, contig + ".gff")], stdout=contig_sorted_gff, ) os.remove(os.path.join(output_path, contig + ".gff")) else: annotation = genome["annotation"] with open(annotation, "r") as contig_gff: gene_count = 0 for line in contig_gff: if not line.startswith("#"): gene_count += 1 if gene_count <= MIN_GENES: print( "Contig {} has {} genes in the annotation. Skipping.".format( contig, gene_count ) ) continue cmd = ["transit", recipe] for contrast in contrasts: if len(contig_ids) > 1: output_file = os.path.join( output_path, "_".join([recipe] + contrast) + "_" + contig + "_transit.txt", ) else: output_file = os.path.join( output_path, "_".join([recipe] + contrast) + "_transit.txt" ) cur_cmd = list(cmd) # make a copy control_files = [] exp_files = [] condition = contrast[0] for r in library_dict[condition]["replicates"]: control_files.append(r[genome["genome"]]["wig"][contig]) # control_files.append(r[genome["genome"]]["wig"]) if len(contrast) == 2: condition = contrast[1] for r in library_dict[condition]["replicates"]: exp_files.append(r[genome["genome"]]["wig"][contig]) # exp_files.append(r[genome["genome"]]["wig"]) if len(control_files) > 0: cur_cmd.append(",".join(control_files)) else: sys.stderr.write("Missing control files for " + recipe) sys.exit(2) if recipe == "resampling": if len(exp_files) > 0: cur_cmd.append(",".join(exp_files)) else: sys.stderr.write("Missing exp files for " + recipe) sys.exit(2) # cur_cmd.append(genome["annotation"]) cur_cmd.append(annotation) cur_cmd.append(output_file) print(" ".join(cur_cmd)) sys.stdout.flush() subprocess.check_call(cur_cmd) # call transit def handle_gzip(file_path): if file_path.endswith(".gz"): if os.path.isfile(file_path): subprocess.check_call(["gunzip", file_path]) file_path = file_path[0 : len(file_path) - 3] sys.stderr.write( "File {} has size {}.\n".format(file_path, os.path.getsize(file_path)) ) sys.stderr.flush() return file_path def read_length_estimate(file_path, primer, f_factor=0): """ Gives an estimate of average read length after primer trimming. """ get_next = False counter = 0 reads = [] with open(file_path) as fd: for line in fd: if counter >= READ_LEN_SAMPLE: break if get_next: reads.append(line.strip()) get_next = False counter += 1 elif line.startswith("@") or line.startswith(">"): get_next = True try: return max( 10, sum([len(r) if primer not in r else len(r) - len(primer) for r in reads]) / len(reads) - f_factor, ) except ZeroDivisionError: sys.stderr.write(str(reads)) raise ZeroDivisionError def run_alignment(genome_list, library_dict, parameters): # modifies library_dict sub replicates to include 'bowtie' dict recording output files output_path = parameters["output_path"] key_handle = open(os.path.join(parameters["output_path"], "output_keys.txt"), "w") for genome in genome_list: contig_ids = genome["contig_ids"] contig_ids_str = ",".join(contig_ids) genome_link = os.path.join(output_path, os.path.basename(genome["genome"])) final_cleanup = [] if not os.path.exists(genome_link): subprocess.check_call(["ln", "-s", genome["genome"], genome_link]) bwa_loc = str( subprocess.run(["which", "bwa"], capture_output=True).stdout, "utf-8" ).strip() if bwa_loc.startswith("which"): raise LookupError sys.stderr.write("Using BWA located at: {}\n".format(bwa_loc)) sys.stderr.flush() # bwa_loc = "/opt/patric-common/runtime/bin/bwa" cmd = [ "tpp", "-bwa", bwa_loc, "-ref", genome["genome"], ] # thread_count=multiprocessing.cpu_count() # cmd+=["-p",str(thread_count)] # if genome["dir"].endswith('/'): # genome["dir"]=genome["dir"][:-1] # genome["dir"]=os.path.abspath(genome["dir"]) # genome["output"]=os.path.join(output_path,os.path.basename(genome["dir"])) for library in library_dict: rcount = 0 for r in library_dict[library]["replicates"]: cur_cleanup = [] rcount += 1 result_name = library + str(rcount) target_dir = output_path # target_dir=os.path.join(genome["output"],library,"replicate"+str(rcount)) # target_dir=os.path.abspath(target_dir) # subprocess.call(["mkdir","-p",target_dir]) cur_cmd = list(cmd) r["read1"] = handle_gzip(r["read1"]) r1_len = read_length_estimate(r["read1"], parameters["primer"]) r2_len = None if "read2" in r: r["read2"] = handle_gzip(r["read2"]) r2_len = read_length_estimate( r["read2"], parameters["primer"], READ2_BARCODE ) sys.stdout.write( "Approximate average reads 1 length: {}; reads 2 length: {}.\n".format( r1_len, r2_len ) ) sys.stdout.flush() if r2_len is not None: r_est = (r1_len + r2_len) / 2.0 else: r_est = r1_len alg = "mem" if r_est <= 70: alg = "aln" if "read2" in r: read_link1 = os.path.join(output_path, os.path.basename(r["read1"])) read_link2 = os.path.join(output_path, os.path.basename(r["read2"])) if not os.path.exists(read_link1): subprocess.check_call(["ln", "-s", r["read1"], read_link1]) if not os.path.exists(read_link2): subprocess.check_call(["ln", "-s", r["read2"], read_link2]) cur_cmd += ["-reads1", read_link1, "-reads2", read_link2] name1 = os.path.splitext(os.path.basename(r["read1"]))[0] name2 = os.path.splitext(os.path.basename(r["read2"]))[0] key_handle.write("\t".join([name1, name2, result_name]) + "\n") base_name = os.path.join(target_dir, result_name) else: read_link1 = os.path.join(output_path, os.path.basename(r["read1"])) if not os.path.exists(read_link1): subprocess.check_call(["ln", "-s", r["read1"], read_link1]) cur_cmd += ["-reads1", read_link1] name1 = os.path.splitext(os.path.basename(r["read1"]))[0] key_handle.write("\t".join([name1, result_name]) + "\n") base_name = os.path.join(target_dir, result_name) sam_file = base_name + ".sam" wig_file = base_name + ".wig" cur_cleanup.append(sam_file) bam_file = sam_file[:-4] + ".bam" r[genome["genome"]] = {} r[genome["genome"]]["bam"] = bam_file r[genome["genome"]]["wig"] = ( {contig_ids[0]: wig_file} if len(contig_ids) <= 1 else { contig_id: "{}_{}.wig".format(base_name, contig_id) for contig_id in contig_ids } ) cur_cmd += ["-output", base_name] cur_cmd += [ "-protocol", parameters["protocol"], "-bwa-alg", alg, "-primer", parameters["primer"], "-replicon-ids", contig_ids_str, ] if os.path.exists(bam_file): sys.stderr.write( bam_file + " alignments file already exists. skipping\n" ) else: print(" ".join(cur_cmd)) sys.stdout.flush() subprocess.check_call(cur_cmd) # if len(contig_ids) > 1: # cat_cmd = ["cat"] + [r[genome["genome"]]["wig"][contig] for contig in contig_ids] # # cat_cmd += [">", wig_file] # with open(wig_file, "w") as wig_fd: # subprocess.run(cat_cmd, stdout=wig_fd) # r[genome["genome"]]["wig"] = wig_file if not os.path.exists(bam_file): # subprocess.check_call("samtools view -Su "+sam_file+" | samtools sort -o - - > "+bam_file, shell=True)#convert to bam bam_out = open(bam_file, "w") p1 = subprocess.Popen( ["samtools", "view", "-Su", sam_file], stdout=subprocess.PIPE ) p2 = subprocess.Popen( ["samtools", "sort", "-o", "-", "-"], stdout=bam_out, stdin=p1.stdout, ) p1.stdout.close() output = p2.communicate() bam_out.close() print("pipeline done " + str(output)) p1_stat = p1.wait() p2_stat = p2.wait() print("view status = %d" % p1_stat) print("sort status = %d" % p2_stat) subprocess.check_call(["samtools", "index", bam_file]) # subprocess.check_call('samtools view -S -b %s > %s' % (sam_file, bam_file+".tmp"), shell=True) # subprocess.check_call('samtools sort %s %s' % (bam_file+".tmp", bam_file), shell=True) for garbage in cur_cleanup: if os.path.exists(garbage): subprocess.call(["rm", garbage]) key_handle.close() for garbage in final_cleanup: subprocess.call(["rm", garbage]) def main(server_setup, job_data): required_data = [ "experimental_conditions", "read_files", "reference_genome_id", "recipe", "contrasts", "protocol", ] fail = False for data in required_data: if not data in job_data or len(job_data[data]) == 0: sys.stderr.write("Missing " + data + "\n") fail = True if "primer" not in job_data: sys.stderr.write( "The primer string was not found. Using default: ACTTATCAGCCAACCTGTTA.\n" ) primer_string = "ACTTATCAGCCAACCTGTTA" else: primer_string = job_data["primer"].upper() job_data["primer"] = primer_string for char in primer_string: if char not in ["A", "C", "T", "G", "N"]: sys.stderr.write("The primer is not a DNA string.\n") fail = True # Could validate that the recipe and protocol make sense. print(job_data) if fail: sys.exit(2) # library_list=job_data["experimental_conditions"] output_path = job_data["output_path"] = os.path.abspath(job_data["output_path"]) # for lib in library_list: # library_dict[lib]={"library":lib} # job_data["read_files"]=job_data["read_files"].split() # count=0 # add read/replicate structure to library dict # for read in job_data["read_files"]: # replicates=read.split(',') # rep_store=library_dict[library_list[count]]["replicates"]=[] # for rep in replicates: # pair=rep.split('%') # pair_dict={"read1":pair[0]} # if len(pair) == 2: # pair_dict["read2"]=pair[1] # rep_store.append(pair_dict) # count+=1 library_dict = job_data["read_files"] genome_dirs, contig_ids = get_files(job_data, server_setup) genome_list = [] for i, g in enumerate(genome_dirs): cur_genome = { "genome": [], "annotation": [], "dir": g, "contig_ids": contig_ids[i], "hisat_index": [], } for f in os.listdir(g): if f.endswith(".fna") or f.endswith(".fa") or f.endswith(".fasta"): cur_genome["genome"].append(os.path.abspath(os.path.join(g, f))) elif f.endswith(".gff"): cur_genome["annotation"].append(os.path.abspath(os.path.join(g, f))) if len(cur_genome["genome"]) != 1: sys.stderr.write("Too many or too few fasta files present in " + g + "\n") sys.exit(2) else: cur_genome["genome"] = cur_genome["genome"][0] if len(cur_genome["annotation"]) != 1: sys.stderr.write("Too many or too few gff files present in " + g + "\n") sys.exit(2) else: cur_genome["annotation"] = cur_genome["annotation"][0] # if args.index: # if len(cur_genome["hisat_index"]) != 1: # sys.stderr.write("Missing hisat index tar file for "+g+"\n") # sys.exit(2) # else: # cur_genome["hisat_index"]=cur_genome["hisat_index"][0] genome_list.append(cur_genome) output_path = os.path.abspath(output_path) if not os.path.exists(output_path): subprocess.call(["mkdir", "-p", output_path]) run_alignment(genome_list, library_dict, job_data) sys.stderr.write("TPP is finished.\n") sys.stderr.flush() run_transit(genome_list, library_dict, job_data) # cleanup(genome_list, library_dict, parameters, output_path) if __name__ == "__main__": parser = argparse.ArgumentParser() jobinfo = parser.add_mutually_exclusive_group(required=True) jobinfo.add_argument( "--jfile", help='json file for job {"reference_genome_id":[x],"experimental_conditions":[x], "transit_params":{key:value}, "output_path":x, "read_files":x', ) jobinfo.add_argument("--jstring", help="json string from user input") serverinfo = parser.add_mutually_exclusive_group(required=True) serverinfo.add_argument("--sfile", help="server setup JSON file") serverinfo.add_argument("--sstring", help="server setup JSON string") parser.add_argument( "-o", help="output directory. defaults to current directory.", required=False ) # parser.add_argument('-g', help='genome ids to get *.fna and annotation *.gff', required=True) # parser.add_argument('-L', help='csv list of library names for comparison', required=True) # parser.add_argument('-p', help='JSON formatted parameter list for TRANSIT keyed to program', required=True) # parser.add_argument('-o', help='output directory. defaults to current directory.', required=False) # parser.add_argument('readfiles', nargs='+', help="whitespace sep list of read files. shoudld be \ # ws separates control (first) from experiment files (second),\ # a comma separates replicates, and a percent separates pairs.") if len(sys.argv) == 1: parser.print_help() sys.exit(2) args = parser.parse_args() try: job_data = ( json.loads(args.jstring) if args.jstring else json.load(open(args.jfile, "r")) ) except: sys.stderr.write("Failed to parse user provided form data \n") raise # parse setup data try: server_setup = ( json.loads(args.sstring) if args.sstring else json.load(open(args.sfile, "r")) ) except: sys.stderr.write("Failed to parse server data\n") raise if "data_api" in server_setup and (not "data_url" in server_setup): server_setup["data_url"] = server_setup["data_api"] job_data["output_path"] = args.o main(server_setup, job_data)
PATRIC3/p3_tnseq
service-scripts/p3_tnseq.py
Python
mit
22,831
[ "BWA", "Bowtie" ]
4134c4b193a13bb866687ba70324508a4816a1678fc9aa5374b3bc086ec4240c
import cart import math import gui import random import brain import copy import pool def randomSeed(): """ Random number between -0.5 and 0.5 """ return (0.5 - random.random()) def randomWeights(sizes,fact): weight=[] for i in range(len(sizes)-1): layer=[] for _ in range(sizes[i+1]): w=[] for _ in range(sizes[i]): w.append(randomSeed()*fact) w.append(randomSeed()*fact) layer.append(w) weight.append(layer) return weight # example showing how you might mutate the weights def mutate(weights_orig,mutate_amount): # copy original into a new array w=copy.deepcopy(weights_orig) for layer in xrange(len(w)): for neuron in xrange(len(w[layer])): for i in xrange(len(w[layer][neuron])): w[layer][neuron][i] += randomSeed()*mutate_amount return w class Factory: def seed(self): return randomWeights(sizes,1.0) def mate(self,a,b): print " Mating not implemented " return None def mutate(self,a): # randomly replace a character scale=random.random() return mutate(a,scale) def draw(screen,cart,force): """ Called by the simulation to display the current state of the cart. """ # clear screen screen.fill((0,0,0)) # get the size of the window wid=gui.dim_window[0] hei=gui.dim_window[1] # pendulum length length=cart.l scale=hei/length/3.0 # map position onto the screen x1=wid/2.0+cart.getX()*scale # if too big/small limit the position if x1 > wid*.8: x1 =wid*.8 if x1 < wid*.2: x1 = wid*.2 # base line for the cart y1=hei*.6 # angle of pendulum ang=cart.getAngle() # x,y of the end of pendulum x2=x1+scale*math.sin(ang)*length y2=y1+scale*math.cos(ang)*length # draw pendulum col=(255,0,0) thick=3 gui.draw_line(screen,x1,y1,x2,y2,col,thick) col=(0,255,0) gui.fill_circle(screen,x2,y2,12,col) # draw cart col=(0,0,255) thick=20 gui.draw_line(screen,x1-20,y1,x1+20,y1,col,thick) # display the state of the cart col=(0,255,255) state=cart.state str2="" str2+= "Phi: %5.2f " % (state[0]-math.pi) str2+= "dphidt: %5.2f " % state[1] str2+= "x: %5.2f " % state[2] str2+= "dxdt: %5.2f " %state[3] str2+= " force: %5.2f " % force gui.draw_string(screen,str2,(20,10),col,16) # copy screen onto the display gui.blit(screen) #### MAIN CODE ######################### # create an inverted pendulum cart=cart.Cart() dt=.1 factory=Factory() pool=pool.Pool(size=10,factory=factory,breed_prob=.0,seed_prob=0.1) frameRate=10 # slow down to 10 steps per second. screen = gui.init_surface((800,200)," CART + IP demo" ) graphics=False best_fit=-1e32 tot_time=0.0 best_guess=None inc=False force_scale=10.0 sizes=[2,1] net=None INIT_ANG=0.3 GOAL=500.0 TOTAL_TIME_MAX=10000 guess=pool.create() net=brain.FeedForwardBrain(weight=guess) # set the initial angle cart.setAngle(math.pi+INIT_ANG) time=0.0 tot_time=0.0 while not gui.check_for_quit(): # loop until user hits escape # Test for falling over # if fallen then reset with a random angle if abs(math.pi-cart.getAngle()) > INIT_ANG: fit=time pool.add(guess,fit) print pool cart.setAngle(math.pi+INIT_ANG) guess=pool.create() net.setWeights(guess) time=0; input = copy.deepcopy(cart.getState()) input[0]=input[0]-math.pi out=net.ffwd(input) force=(out[0]-0.5)*force_scale # step the car for a single GUI frame cart.step(force,dt) time+=dt tot_time+=dt if time > GOAL or tot_time > TOTAL_TIME_MAX: brain.saveBrain(net) break ################ RESET CODE ############################## ############################################################ if graphics: # draw the cart and display info draw(screen,cart,force) # slow the gui down to the given frameRate gui.tick(frameRate) if time > GOAL: print " SUCCESS ",time,tot_time else: print " best =",best_fit
pauljohnleonard/pod-world
CI_2014/ATTIC/PJL/03_ga_main.py
Python
gpl-2.0
4,830
[ "NEURON" ]
b579128c9e47268091a7ef660d35111c422dfd80e5ccab221926c8b958949eb4
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RKernlab(RPackage): """Kernel-Based Machine Learning Lab Kernel-based machine learning methods for classification, regression, clustering, novelty detection, quantile regression and dimensionality reduction. Among other methods 'kernlab' includes Support Vector Machines, Spectral Clustering, Kernel PCA, Gaussian Processes and a QP solver.""" homepage = "https://cloud.r-project.org/package=kernlab" url = "https://cloud.r-project.org/src/contrib/kernlab_0.9-25.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/kernlab" version('0.9-29', sha256='c3da693a0041dd34f869e7b63a8d8cf7d4bc588ac601bcdddcf7d44f68b3106f') version('0.9-27', sha256='f6add50ed4097f04d09411491625f8d46eafc4f003b1c1cff78a6fff8cc31dd4') version('0.9-26', sha256='954940478c6fcf60433e50e43cf10d70bcb0a809848ca8b9d683bf371cd56077') version('0.9-25', sha256='b9de072754bb03c02c4d6a5ca20f2290fd090de328b55ab334ac0b397ac2ca62') depends_on('r@2.10:', type=('build', 'run'))
LLNL/spack
var/spack/repos/builtin/packages/r-kernlab/package.py
Python
lgpl-2.1
1,244
[ "Gaussian" ]
5bae81d3e85cdd5e9ba08fe3b6fc1f41fe5f75af28d0db3c5c17b522d511bd15
#!/usr/bin/python #Audio Tools, a module and set of tools for manipulating audio data #Copyright (C) 2007-2012 Brian Langenberger #This program is free software; you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation; either version 2 of the License, or #(at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA from audiotools.bitstream import BitstreamReader from audiotools.pcm import from_channels, from_list from math import log from hashlib import md5 def sub_blocks(reader, sub_blocks_size): while (sub_blocks_size > 0): sub_block = Sub_Block.read(reader) yield sub_block sub_blocks_size -= sub_block.total_size() class WavPackDecoder: def __init__(self, filename): self.reader = BitstreamReader(open(filename, "rb"), 1) #read initial block to populate #sample_rate, bits_per_sample, channels, and channel_mask self.reader.mark() block_header = Block_Header.read(self.reader) sub_blocks_size = block_header.block_size - 24 sub_blocks_data = self.reader.substream(sub_blocks_size) if (block_header.sample_rate != 15): self.sample_rate = [6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000, 192000][block_header.sample_rate] else: sub_blocks_data.mark() try: for sub_block in sub_blocks(sub_blocks_data, sub_blocks_size): if (((sub_block.metadata_function == 7) and (sub_block.nondecoder_data == 1))): self.sample_rate = sub_block.data.read( sub_block.data_size() * 8) break else: raise ValueError("invalid sample rate") finally: sub_blocks_data.rewind() sub_blocks_data.unmark() self.bits_per_sample = [8, 16, 24, 32][block_header.bits_per_sample] if (block_header.initial_block and block_header.final_block): if (((block_header.mono_output == 0) or (block_header.false_stereo == 1))): self.channels = 2 self.channel_mask = 0x3 else: self.channels = 1 self.channel_mask = 0x4 else: #look for channel mask sub block sub_blocks_data.mark() for sub_block in sub_blocks(sub_blocks_data, sub_blocks_size): if (((sub_block.metadata_function == 13) and (sub_block.nondecoder_data == 0))): self.channels = sub_block.data.read(8) self.channel_mask = sub_block.data.read( (sub_block.data_size() - 1) * 8) break else: #FIXME - handle case of no channel mask sub block raise NotImplementedError() sub_blocks_data.rewind() sub_blocks_data.unmark() self.reader.rewind() self.reader.unmark() self.pcm_finished = False self.md5_checked = False self.md5sum = md5() def read(self, pcm_frames): if (self.pcm_finished): if (not self.md5_checked): self.reader.mark() try: try: header = Block_Header.read(self.reader) sub_blocks_size = header.block_size - 24 sub_blocks_data = \ self.reader.substream(sub_blocks_size) for sub_block in sub_blocks(sub_blocks_data, sub_blocks_size): if (((sub_block.metadata_function == 6) and (sub_block.nondecoder_data == 1))): if ((sub_block.data.read_bytes(16) != self.md5sum.digest())): raise ValueError("invalid stream MD5 sum") except (IOError, ValueError): #no error if a block isn't found pass finally: self.reader.rewind() self.reader.unmark() return from_list([], self.channels, self.bits_per_sample, True) channels = [] while (True): # in place of a do-while loop try: block_header = Block_Header.read(self.reader) except (ValueError, IOError): self.pcm_finished = True return from_list([], self.channels, self.bits_per_sample, True) sub_blocks_size = block_header.block_size - 24 sub_blocks_data = self.reader.substream(sub_blocks_size) channels.extend(read_block(block_header, sub_blocks_size, sub_blocks_data)) if (block_header.final_block == 1): break if ((block_header.block_index + block_header.block_samples) >= block_header.total_samples): self.pcm_finished = True #combine channels of audio data into single block block = from_channels([from_list(ch, 1, self.bits_per_sample, True) for ch in channels]) #update MD5 sum self.md5sum.update(block.to_bytes(False, self.bits_per_sample > 8)) #return single block of audio data return block def close(self): self.reader.close() class Block_Header: def __init__(self, block_id, block_size, version, track_number, index_number, total_samples, block_index, block_samples, bits_per_sample, mono_output, hybrid_mode, joint_stereo, channel_decorrelation, hybrid_noise_shaping, floating_point_data, extended_size_integers, hybrid_controls_bitrate, hybrid_noise_balanced, initial_block, final_block, left_shift_data, maximum_magnitude, sample_rate, use_IIR, false_stereo, CRC): if (block_id != "wvpk"): raise ValueError("invalid WavPack block ID") self.block_size = block_size self.version = version self.track_number = track_number self.index_number = index_number self.total_samples = total_samples self.block_index = block_index self.block_samples = block_samples self.bits_per_sample = bits_per_sample self.mono_output = mono_output self.hybrid_mode = hybrid_mode self.joint_stereo = joint_stereo self.channel_decorrelation = channel_decorrelation self.hybrid_noise_shaping = hybrid_noise_shaping self.floating_point_data = floating_point_data self.extended_size_integers = extended_size_integers self.hybrid_controls_bitrate = hybrid_controls_bitrate self.hybrid_noise_balanced = hybrid_noise_balanced self.initial_block = initial_block self.final_block = final_block self.left_shift_data = left_shift_data self.maximum_magnitude = maximum_magnitude self.sample_rate = sample_rate self.use_IIR = use_IIR self.false_stereo = false_stereo self.CRC = CRC def __repr__(self): return "Block_Header(%s)" % \ ", ".join(["%s=%s" % (attr, getattr(self, attr)) for attr in ["block_size", "version", "track_number", "index_number", "total_samples", "block_index", "block_samples", "bits_per_sample", "mono_output", "hybrid_mode", "joint_stereo", "channel_decorrelation", "hybrid_noise_shaping", "floating_point_data", "extended_size_integers", "hybrid_controls_bitrate", "hybrid_noise_balanced", "initial_block", "final_block", "left_shift_data", "maximum_magnitude", "sample_rate", "use_IIR", "false_stereo", "CRC"]]) @classmethod def read(cls, reader): return cls(*reader.parse("4b 32u 16u 8u 8u 32u 32u 32u" + "2u 1u 1u 1u 1u 1u 1u 1u " + "1u 1u 1u 1u 5u 5u 4u 2p 1u 1u 1p" + "32u")) class Sub_Block: def __init__(self, metadata_function, nondecoder_data, actual_size_1_less, large_block, sub_block_size, data): self.metadata_function = metadata_function self.nondecoder_data = nondecoder_data self.actual_size_1_less = actual_size_1_less self.large_block = large_block self.sub_block_size = sub_block_size self.data = data def __repr__(self): return "Sub_Block(%s)" % \ ", ".join(["%s=%s" % (attr, getattr(self, attr)) for attr in ["metadata_function", "nondecoder_data", "actual_size_1_less", "large_block", "sub_block_size", "data"]]) def total_size(self): if (self.large_block): return 1 + 3 + (self.sub_block_size * 2) else: return 1 + 1 + (self.sub_block_size * 2) def data_size(self): if (self.actual_size_1_less): return self.sub_block_size * 2 - 1 else: return self.sub_block_size * 2 @classmethod def read(cls, reader): (metadata_function, nondecoder_data, actual_size_1_less, large_block) = reader.parse("5u 1u 1u 1u") if (large_block == 0): sub_block_size = reader.read(8) else: sub_block_size = reader.read(24) if (actual_size_1_less == 0): data = reader.substream(sub_block_size * 2) else: data = reader.substream(sub_block_size * 2 - 1) reader.skip(8) return cls(metadata_function, nondecoder_data, actual_size_1_less, large_block, sub_block_size, data) def read_block(block_header, sub_blocks_size, sub_blocks_data): """returns 1 or 2 channels of PCM data integers""" decorrelation_terms_read = False decorrelation_weights_read = False decorrelation_samples_read = False entropies_read = False residuals_read = False extended_integers_read = False while (sub_blocks_size > 0): (metadata_function, nondecoder_data, actual_size_1_less, large_sub_block) = sub_blocks_data.parse("5u 1u 1u 1u") if (large_sub_block == 0): sub_block_size = sub_blocks_data.read(8) else: sub_block_size = sub_blocks_data.read(24) if (actual_size_1_less == 0): sub_block_data = sub_blocks_data.substream(sub_block_size * 2) else: sub_block_data = sub_blocks_data.substream(sub_block_size * 2 - 1) sub_blocks_data.skip(8) if (nondecoder_data == 0): if (metadata_function == 2): (decorrelation_terms, decorrelation_deltas) = read_decorrelation_terms( sub_block_size, actual_size_1_less, sub_block_data) decorrelation_terms_read = True if (metadata_function == 3): if (not decorrelation_terms_read): raise ValueError( "weights sub block found before terms sub block") decorrelation_weights = read_decorrelation_weights( block_header, len(decorrelation_terms), sub_block_size, actual_size_1_less, sub_block_data) decorrelation_weights_read = True if (metadata_function == 4): if (not decorrelation_terms_read): raise ValueError( "samples sub block found before terms sub block") if (actual_size_1_less): raise ValueError( "decorrelation samples must have an even byte count") decorrelation_samples = read_decorrelation_samples( block_header, decorrelation_terms, sub_block_size, sub_block_data) decorrelation_samples_read = True if (metadata_function == 5): entropies = read_entropy_variables(block_header, sub_block_data) entropies_read = True if (metadata_function == 9): (zero_bits, one_bits, duplicate_bits) = read_extended_integers(sub_block_data) extended_integers_read = True if (metadata_function == 10): if (not entropies_read): raise ValueError("bitstream sub block before " + "entropy variables sub block") residuals = read_bitstream(block_header, entropies, sub_block_data) residuals_read = True if (large_sub_block == 0): sub_blocks_size -= (2 + 2 * sub_block_size) else: sub_blocks_size -= (4 + 2 * sub_block_size) if (decorrelation_terms_read): if (not decorrelation_weights_read): raise ValueError("decorrelation weights sub block not found") if (not decorrelation_samples_read): raise ValueError("decorrelation samples sub block not found") if (not residuals_read): raise ValueError("bitstream sub block not found") if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)): if (decorrelation_terms_read and len(decorrelation_terms) > 0): decorrelated = decorrelate_channels(residuals, decorrelation_terms, decorrelation_deltas, decorrelation_weights, decorrelation_samples) else: decorrelated = residuals if (block_header.joint_stereo == 1): left_right = undo_joint_stereo(decorrelated) else: left_right = decorrelated channels_crc = calculate_crc(left_right) if (channels_crc != block_header.CRC): raise ValueError("CRC mismatch (0x%8.8X != 0x%8.8X)" % (channels_crc, block_header.CRC)) if (block_header.extended_size_integers == 1): un_shifted = undo_extended_integers(zero_bits, one_bits, duplicate_bits, left_right) else: un_shifted = left_right return un_shifted else: if (decorrelation_terms_read and len(decorrelation_terms) > 0): decorrelated = decorrelate_channels(residuals, decorrelation_terms, decorrelation_deltas, decorrelation_weights, decorrelation_samples) else: decorrelated = residuals channels_crc = calculate_crc(decorrelated) if (channels_crc != block_header.CRC): raise ValueError("CRC mismatch (0x%8.8X != 0x%8.8X)" % (channels_crc, block_header.CRC)) if (block_header.extended_size_integers == 1): un_shifted = undo_extended_integers(zero_bits, one_bits, duplicate_bits, decorrelated) else: un_shifted = decorrelated if (block_header.false_stereo == 0): return un_shifted else: return (un_shifted[0], un_shifted[0]) def read_decorrelation_terms(sub_block_size, actual_size_1_less, sub_block_data): """returns a list of decorrelation terms and a list of decorrelation deltas per decorrelation pass term[pass] , delta[pass]""" if (actual_size_1_less == 0): passes = sub_block_size * 2 else: passes = sub_block_size * 2 - 1 if (passes > 16): raise ValueError("invalid decorrelation passes count") decorrelation_terms = [] decorrelation_deltas = [] for i in xrange(passes): decorrelation_terms.append(sub_block_data.read(5) - 5) if (not (((1 <= decorrelation_terms[-1]) and (decorrelation_terms[-1] <= 18)) or ((-3 <= decorrelation_terms[-1]) and (decorrelation_terms[-1] <= -1)))): raise ValueError("invalid decorrelation term") decorrelation_deltas.append(sub_block_data.read(3)) decorrelation_terms.reverse() decorrelation_deltas.reverse() return (decorrelation_terms, decorrelation_deltas) def read_decorrelation_weights(block_header, decorrelation_terms_count, sub_block_size, actual_size_1_less, sub_block_data): """returns one tuple of decorrelation weights per decorrelation pass the number of weights in each tuple equals the number of channels weight[pass][channel] """ if (actual_size_1_less == 0): weight_count = sub_block_size * 2 else: weight_count = sub_block_size * 2 - 1 weight_values = [] for i in xrange(weight_count): value_i = sub_block_data.read_signed(8) if (value_i > 0): weight_values.append((value_i * 2 ** 3) + ((value_i * 2 ** 3 + 2 ** 6) / 2 ** 7)) elif(value_i == 0): weight_values.append(0) else: weight_values.append(value_i * 2 ** 3) weights = [] if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)): #two channels if ((weight_count / 2) > decorrelation_terms_count): raise ValueError("invalid number of decorrelation weights") for i in xrange(weight_count / 2): weights.append((weight_values[i * 2], weight_values[i * 2 + 1])) for i in xrange(weight_count / 2, decorrelation_terms_count): weights.append((0, 0)) weights.reverse() else: #one channel if (weight_count > decorrelation_terms_count): raise ValueError("invalid number of decorrelation weights") for i in xrange(weight_count): weights.append((weight_values[i], )) for i in xrange(weight_count, decorrelation_terms_count): weights.append((0, 0)) weights.reverse() return weights def read_decorrelation_samples(block_header, decorrelation_terms, sub_block_size, sub_block_data): """returns one tuple of decorrelation samples lists per decorrelation pass sample[pass][channel][s]""" sub_block_bytes = sub_block_size * 2 samples = [] if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)): #two channels for term in reversed(decorrelation_terms): if ((17 <= term) and (term <= 18)): if (sub_block_bytes >= 8): samples.append(([read_exp2(sub_block_data), read_exp2(sub_block_data)], [read_exp2(sub_block_data), read_exp2(sub_block_data)])) sub_block_bytes -= 8 else: samples.append(([0, 0], [0, 0])) sub_block_bytes = 0 elif ((1 <= term) and (term <= 8)): term_samples = ([], []) if (sub_block_bytes >= (term * 4)): for s in xrange(term): term_samples[0].append(read_exp2(sub_block_data)) term_samples[1].append(read_exp2(sub_block_data)) sub_block_bytes -= (term * 4) else: for s in xrange(term): term_samples[0].append(0) term_samples[1].append(0) sub_block_bytes = 0 samples.append(term_samples) elif ((-3 <= term) and (term <= -1)): if (sub_block_bytes >= 4): samples.append(([read_exp2(sub_block_data)], [read_exp2(sub_block_data)])) sub_block_bytes -= 4 else: samples.append(([0], [0])) sub_block_bytes = 0 else: raise ValueError("invalid decorrelation term") samples.reverse() return samples else: #one channel for term in reversed(decorrelation_terms): if ((17 <= term) and (term <= 18)): if (sub_block_bytes >= 4): samples.append(([read_exp2(sub_block_data), read_exp2(sub_block_data)],)) sub_block_bytes -= 4 else: samples[0].append(([0, 0],)) sub_block_bytes = 0 elif ((1 <= term) and (term <= 8)): term_samples = ([],) if (sub_block_bytes >= (term * 2)): for s in xrange(term): term_samples[0].append(read_exp2(sub_block_data)) sub_block_bytes -= (term * 2) else: for s in xrange(term): term_samples[0].append(0) sub_block_bytes = 0 samples.append(term_samples) else: raise ValueError("invalid decorrelation term") samples.reverse() return samples def read_entropy_variables(block_header, sub_block_data): entropies = ([], []) for i in xrange(3): entropies[0].append(read_exp2(sub_block_data)) if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)): for i in xrange(3): entropies[1].append(read_exp2(sub_block_data)) else: entropies[1].extend([0, 0, 0]) return entropies def read_bitstream(block_header, entropies, sub_block_data): if ((block_header.mono_output == 0) and (block_header.false_stereo == 0)): channel_count = 2 residuals = ([], []) else: channel_count = 1 residuals = ([], ) u = None i = 0 while (i < (block_header.block_samples * channel_count)): if ((u is None) and (entropies[0][0] < 2) and (entropies[1][0] < 2)): #handle long run of 0 residuals zeroes = read_egc(sub_block_data) if (zeroes > 0): for j in xrange(zeroes): residuals[i % channel_count].append(0) i += 1 entropies[0][0] = entropies[0][1] = entropies[0][2] = 0 entropies[1][0] = entropies[1][1] = entropies[1][2] = 0 if (i < (block_header.block_samples * channel_count)): (residual, u) = read_residual( sub_block_data, u, entropies[i % channel_count]) residuals[i % channel_count].append(residual) i += 1 else: (residual, u) = read_residual( sub_block_data, u, entropies[i % channel_count]) residuals[i % channel_count].append(residual) i += 1 return residuals def read_egc(reader): t = reader.unary(0) if (t > 0): p = reader.read(t - 1) return 2 ** (t - 1) + p else: return t def read_residual(reader, last_u, entropies): if (last_u is None): u = reader.unary(0) if (u == 16): u += read_egc(reader) m = u / 2 elif ((last_u % 2) == 1): u = reader.unary(0) if (u == 16): u += read_egc(reader) m = (u / 2) + 1 else: u = None m = 0 if (m == 0): base = 0 add = entropies[0] >> 4 entropies[0] -= ((entropies[0] + 126) >> 7) * 2 elif (m == 1): base = (entropies[0] >> 4) + 1 add = entropies[1] >> 4 entropies[0] += ((entropies[0] + 128) >> 7) * 5 entropies[1] -= ((entropies[1] + 62) >> 6) * 2 elif (m == 2): base = ((entropies[0] >> 4) + 1) + ((entropies[1] >> 4) + 1) add = entropies[2] >> 4 entropies[0] += ((entropies[0] + 128) >> 7) * 5 entropies[1] += ((entropies[1] + 64) >> 6) * 5 entropies[2] -= ((entropies[2] + 30) >> 5) * 2 else: base = (((entropies[0] >> 4) + 1) + ((entropies[1] >> 4) + 1) + (((entropies[2] >> 4) + 1) * (m - 2))) add = entropies[2] >> 4 entropies[0] += ((entropies[0] + 128) >> 7) * 5 entropies[1] += ((entropies[1] + 64) >> 6) * 5 entropies[2] += ((entropies[2] + 32) >> 5) * 5 if (add == 0): unsigned = base else: p = int(log(add) / log(2)) e = 2 ** (p + 1) - add - 1 r = reader.read(p) if (r >= e): b = reader.read(1) unsigned = base + (r * 2) - e + b else: unsigned = base + r sign = reader.read(1) if (sign == 1): return (-unsigned - 1, u) else: return (unsigned, u) def undo_joint_stereo(samples): assert(len(samples) == 2) assert(len(samples[0]) == len(samples[1])) stereo = [[], []] for (mid, side) in zip(*samples): right = side - (mid >> 1) left = mid + right stereo[0].append(left) stereo[1].append(right) return stereo def read_extended_integers(sub_block_data): (sent_bits, zero_bits, one_bits, duplicate_bits) = sub_block_data.parse("8u 8u 8u 8u") return (zero_bits, one_bits, duplicate_bits) def undo_extended_integers(zero_bits, one_bits, duplicate_bits, channels): un_shifted = [] for channel in channels: if (zero_bits > 0): un_shifted.append([s << zero_bits for s in channel]) elif (one_bits > 0): ones = (1 << one_bits) - 1 un_shifted.append([(s << one_bits) + ones for s in channel]) elif (duplicate_bits > 0): dupes = [] ones = (1 << duplicate_bits) - 1 for s in channel: if ((s % 2) == 0): dupes.append(s << duplicate_bits) else: dupes.append((s << duplicate_bits) + ones) un_shifted.append(dupes) else: un_shifted.append(channel) return tuple(un_shifted) EXP2 = [0x100, 0x101, 0x101, 0x102, 0x103, 0x103, 0x104, 0x105, 0x106, 0x106, 0x107, 0x108, 0x108, 0x109, 0x10a, 0x10b, 0x10b, 0x10c, 0x10d, 0x10e, 0x10e, 0x10f, 0x110, 0x110, 0x111, 0x112, 0x113, 0x113, 0x114, 0x115, 0x116, 0x116, 0x117, 0x118, 0x119, 0x119, 0x11a, 0x11b, 0x11c, 0x11d, 0x11d, 0x11e, 0x11f, 0x120, 0x120, 0x121, 0x122, 0x123, 0x124, 0x124, 0x125, 0x126, 0x127, 0x128, 0x128, 0x129, 0x12a, 0x12b, 0x12c, 0x12c, 0x12d, 0x12e, 0x12f, 0x130, 0x130, 0x131, 0x132, 0x133, 0x134, 0x135, 0x135, 0x136, 0x137, 0x138, 0x139, 0x13a, 0x13a, 0x13b, 0x13c, 0x13d, 0x13e, 0x13f, 0x140, 0x141, 0x141, 0x142, 0x143, 0x144, 0x145, 0x146, 0x147, 0x148, 0x148, 0x149, 0x14a, 0x14b, 0x14c, 0x14d, 0x14e, 0x14f, 0x150, 0x151, 0x151, 0x152, 0x153, 0x154, 0x155, 0x156, 0x157, 0x158, 0x159, 0x15a, 0x15b, 0x15c, 0x15d, 0x15e, 0x15e, 0x15f, 0x160, 0x161, 0x162, 0x163, 0x164, 0x165, 0x166, 0x167, 0x168, 0x169, 0x16a, 0x16b, 0x16c, 0x16d, 0x16e, 0x16f, 0x170, 0x171, 0x172, 0x173, 0x174, 0x175, 0x176, 0x177, 0x178, 0x179, 0x17a, 0x17b, 0x17c, 0x17d, 0x17e, 0x17f, 0x180, 0x181, 0x182, 0x183, 0x184, 0x185, 0x187, 0x188, 0x189, 0x18a, 0x18b, 0x18c, 0x18d, 0x18e, 0x18f, 0x190, 0x191, 0x192, 0x193, 0x195, 0x196, 0x197, 0x198, 0x199, 0x19a, 0x19b, 0x19c, 0x19d, 0x19f, 0x1a0, 0x1a1, 0x1a2, 0x1a3, 0x1a4, 0x1a5, 0x1a6, 0x1a8, 0x1a9, 0x1aa, 0x1ab, 0x1ac, 0x1ad, 0x1af, 0x1b0, 0x1b1, 0x1b2, 0x1b3, 0x1b4, 0x1b6, 0x1b7, 0x1b8, 0x1b9, 0x1ba, 0x1bc, 0x1bd, 0x1be, 0x1bf, 0x1c0, 0x1c2, 0x1c3, 0x1c4, 0x1c5, 0x1c6, 0x1c8, 0x1c9, 0x1ca, 0x1cb, 0x1cd, 0x1ce, 0x1cf, 0x1d0, 0x1d2, 0x1d3, 0x1d4, 0x1d6, 0x1d7, 0x1d8, 0x1d9, 0x1db, 0x1dc, 0x1dd, 0x1de, 0x1e0, 0x1e1, 0x1e2, 0x1e4, 0x1e5, 0x1e6, 0x1e8, 0x1e9, 0x1ea, 0x1ec, 0x1ed, 0x1ee, 0x1f0, 0x1f1, 0x1f2, 0x1f4, 0x1f5, 0x1f6, 0x1f8, 0x1f9, 0x1fa, 0x1fc, 0x1fd, 0x1ff] def read_exp2(reader): value = reader.read_signed(16) if ((-32768 <= value) and (value < -2304)): return -(EXP2[-value & 0xFF] << ((-value >> 8) - 9)) elif ((-2304 <= value) and (value < 0)): return -(EXP2[-value & 0xFF] >> (9 - (-value >> 8))) elif ((0 <= value) and (value <= 2304)): return EXP2[value & 0xFF] >> (9 - (value >> 8)) elif ((2304 < value) and (value <= 32767)): return EXP2[value & 0xFF] << ((value >> 8) - 9) def decorrelate_channels(residuals, decorrelation_terms, decorrelation_deltas, decorrelation_weights, decorrelation_samples): """returns a tuple of 1 or 2 lists of decorrelated channel data""" if (len(residuals) == 2): latest_pass = [r[:] for r in residuals] for (term, delta, weights, samples) in zip(decorrelation_terms, decorrelation_deltas, decorrelation_weights, decorrelation_samples): latest_pass = decorrelation_pass_2ch(latest_pass, term, delta, weights, samples) return latest_pass else: latest_pass = residuals[0][:] for (term, delta, weight, samples) in zip(decorrelation_terms, decorrelation_deltas, decorrelation_weights, decorrelation_samples): latest_pass = decorrelation_pass_1ch(latest_pass, term, delta, weight[0], samples[0]) return (latest_pass, ) def decorrelation_pass_1ch(correlated_samples, term, delta, weight, decorrelation_samples): if (term == 18): assert(len(decorrelation_samples) == 2) decorrelated = decorrelation_samples[:] decorrelated.reverse() for i in xrange(len(correlated_samples)): temp = (3 * decorrelated[i + 1] - decorrelated[i]) / 2 decorrelated.append(apply_weight(weight, temp) + correlated_samples[i]) weight += update_weight(temp, correlated_samples[i], delta) return decorrelated[2:] elif (term == 17): assert(len(decorrelation_samples) == 2) decorrelated = decorrelation_samples[:] decorrelated.reverse() for i in xrange(len(correlated_samples)): temp = 2 * decorrelated[i + 1] - decorrelated[i] decorrelated.append(apply_weight(weight, temp) + correlated_samples[i]) weight += update_weight(temp, correlated_samples[i], delta) return decorrelated[2:] elif ((1 <= term) and (term <= 8)): assert(len(decorrelation_samples) == term) decorrelated = decorrelation_samples[:] for i in xrange(len(correlated_samples)): decorrelated.append(apply_weight(weight, decorrelated[i]) + correlated_samples[i]) weight += update_weight(decorrelated[i], correlated_samples[i], delta) return decorrelated[term:] else: raise ValueError("unsupported term") def decorrelation_pass_2ch(correlated, term, delta, weights, decorrelation_samples): assert(len(correlated) == 2) assert(len(correlated[0]) == len(correlated[1])) assert(len(weights) == 2) if (((17 <= term) and (term <= 18)) or ((1 <= term) and (term <= 8))): return (decorrelation_pass_1ch(correlated[0], term, delta, weights[0], decorrelation_samples[0]), decorrelation_pass_1ch(correlated[1], term, delta, weights[1], decorrelation_samples[1])) elif ((-3 <= term) and (term <= -1)): assert(len(decorrelation_samples[0]) == 1) decorrelated = ([decorrelation_samples[1][0]], [decorrelation_samples[0][0]]) weights = list(weights) if (term == -1): for i in xrange(len(correlated[0])): decorrelated[0].append(apply_weight(weights[0], decorrelated[1][i]) + correlated[0][i]) decorrelated[1].append(apply_weight(weights[1], decorrelated[0][i + 1]) + correlated[1][i]) weights[0] += update_weight(decorrelated[1][i], correlated[0][i], delta) weights[1] += update_weight(decorrelated[0][i + 1], correlated[1][i], delta) weights[0] = max(min(weights[0], 1024), -1024) weights[1] = max(min(weights[1], 1024), -1024) elif (term == -2): for i in xrange(len(correlated[0])): decorrelated[1].append(apply_weight(weights[1], decorrelated[0][i]) + correlated[1][i]) decorrelated[0].append(apply_weight(weights[0], decorrelated[1][i + 1]) + correlated[0][i]) weights[1] += update_weight(decorrelated[0][i], correlated[1][i], delta) weights[0] += update_weight(decorrelated[1][i + 1], correlated[0][i], delta) weights[1] = max(min(weights[1], 1024), -1024) weights[0] = max(min(weights[0], 1024), -1024) elif (term == -3): for i in xrange(len(correlated[0])): decorrelated[0].append(apply_weight(weights[0], decorrelated[1][i]) + correlated[0][i]) decorrelated[1].append(apply_weight(weights[1], decorrelated[0][i]) + correlated[1][i]) weights[0] += update_weight(decorrelated[1][i], correlated[0][i], delta) weights[1] += update_weight(decorrelated[0][i], correlated[1][i], delta) weights[0] = max(min(weights[0], 1024), -1024) weights[1] = max(min(weights[1], 1024), -1024) assert(len(decorrelated[0]) == len(decorrelated[1])) return (decorrelated[0][1:], decorrelated[1][1:]) else: raise ValueError("unsupported term") def apply_weight(weight, sample): return ((weight * sample) + 512) >> 10 def update_weight(source, result, delta): if ((source == 0) or (result == 0)): return 0 elif ((source ^ result) >= 0): return delta else: return -delta def calculate_crc(samples): crc = 0xFFFFFFFF for frame in zip(*samples): for s in frame: crc = 3 * crc + s if (crc >= 0): return crc % 0x100000000 else: return (2 ** 32 - (-crc)) % 0x100000000
R-a-dio/python-audio-tools
audiotools/py_decoders/wavpack.py
Python
gpl-2.0
38,340
[ "Brian" ]
c96418e9f675acdf43103b09d566b1d0c9ebb16608c10b6b2174df75d4e2043a
# This file is part of the Minecraft Overviewer. # # Minecraft Overviewer is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation, either version 3 of the License, or (at # your option) any later version. # # Minecraft Overviewer is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with the Overviewer. If not, see <http://www.gnu.org/licenses/>. from collections import OrderedDict import sys import imp import os import os.path import zipfile from io import BytesIO import math from random import randint import numpy from PIL import Image, ImageEnhance, ImageOps, ImageDraw import logging import functools from . import util BLOCKTEX = "assets/minecraft/textures/block/" # global variables to collate information in @material decorators blockmap_generators = {} known_blocks = set() used_datas = set() max_blockid = 0 max_data = 0 transparent_blocks = set() solid_blocks = set() fluid_blocks = set() nospawn_blocks = set() nodata_blocks = set() # This is here for circular import reasons. # Please don't ask, I choose to repress these memories. # ... okay fine I'll tell you. # Initialising the C extension requires access to the globals above. # Due to the circular import, this wouldn't work, unless we reload the # module in the C extension or just move the import below its dependencies. from .c_overviewer import alpha_over class TextureException(Exception): "To be thrown when a texture is not found." pass color_map = ["white", "orange", "magenta", "light_blue", "yellow", "lime", "pink", "gray", "light_gray", "cyan", "purple", "blue", "brown", "green", "red", "black"] ## ## Textures object ## class Textures(object): """An object that generates a set of block sprites to use while rendering. It accepts a background color, north direction, and local textures path. """ def __init__(self, texturepath=None, bgcolor=(26, 26, 26, 0), northdirection=0): self.bgcolor = bgcolor self.rotation = northdirection self.find_file_local_path = texturepath # not yet configurable self.texture_size = 24 self.texture_dimensions = (self.texture_size, self.texture_size) # this is set in in generate() self.generated = False # see load_image_texture() self.texture_cache = {} # once we find a jarfile that contains a texture, we cache the ZipFile object here self.jars = OrderedDict() ## ## pickle support ## def __getstate__(self): # we must get rid of the huge image lists, and other images attributes = self.__dict__.copy() for attr in ['blockmap', 'biome_grass_texture', 'watertexture', 'lavatexture', 'firetexture', 'portaltexture', 'lightcolor', 'grasscolor', 'foliagecolor', 'watercolor', 'texture_cache']: try: del attributes[attr] except KeyError: pass attributes['jars'] = OrderedDict() return attributes def __setstate__(self, attrs): # regenerate textures, if needed for attr, val in list(attrs.items()): setattr(self, attr, val) self.texture_cache = {} if self.generated: self.generate() ## ## The big one: generate() ## def generate(self): # Make sure we have the foliage/grasscolor images available try: self.load_foliage_color() self.load_grass_color() except TextureException as e: logging.error( "Your system is missing either assets/minecraft/textures/colormap/foliage.png " "or assets/minecraft/textures/colormap/grass.png. Either complement your " "resource pack with these texture files, or install the vanilla Minecraft " "client to use as a fallback.") raise e # generate biome grass mask self.biome_grass_texture = self.build_block(self.load_image_texture("assets/minecraft/textures/block/grass_block_top.png"), self.load_image_texture("assets/minecraft/textures/block/grass_block_side_overlay.png")) # generate the blocks global blockmap_generators global known_blocks, used_datas self.blockmap = [None] * max_blockid * max_data for (blockid, data), texgen in list(blockmap_generators.items()): tex = texgen(self, blockid, data) self.blockmap[blockid * max_data + data] = self.generate_texture_tuple(tex) if self.texture_size != 24: # rescale biome grass self.biome_grass_texture = self.biome_grass_texture.resize(self.texture_dimensions, Image.ANTIALIAS) # rescale the rest for i, tex in enumerate(blockmap): if tex is None: continue block = tex[0] scaled_block = block.resize(self.texture_dimensions, Image.ANTIALIAS) blockmap[i] = self.generate_texture_tuple(scaled_block) self.generated = True ## ## Helpers for opening textures ## def find_file(self, filename, mode="rb", verbose=False): """Searches for the given file and returns an open handle to it. This searches the following locations in this order: * In the directory textures_path given in the initializer if not already open * In an already open resource pack or client jar file * In the resource pack given by textures_path * The program dir (same dir as overviewer.py) for extracted textures * On Darwin, in /Applications/Minecraft for extracted textures * Inside a minecraft client jar. Client jars are searched for in the following location depending on platform: * On Windows, at %APPDATA%/.minecraft/versions/ * On Darwin, at $HOME/Library/Application Support/minecraft/versions * at $HOME/.minecraft/versions/ Only the latest non-snapshot version >1.6 is used * The overviewer_core/data/textures dir """ if verbose: logging.info("Starting search for {0}".format(filename)) # Look for the file is stored in with the overviewer # installation. We include a few files that aren't included with Minecraft # textures. This used to be for things such as water and lava, since # they were generated by the game and not stored as images. Nowdays I # believe that's not true, but we still have a few files distributed # with overviewer. # Do this first so we don't try all .jar files for stuff like "water.png" programdir = util.get_program_path() if verbose: logging.info("Looking for texture in overviewer_core/data/textures") path = os.path.join(programdir, "overviewer_core", "data", "textures", filename) if os.path.isfile(path): if verbose: logging.info("Found %s in '%s'", filename, path) return open(path, mode) elif hasattr(sys, "frozen") or imp.is_frozen("__main__"): # windows special case, when the package dir doesn't exist path = os.path.join(programdir, "textures", filename) if os.path.isfile(path): if verbose: logging.info("Found %s in '%s'", filename, path) return open(path, mode) # A texture path was given on the command line. Search this location # for the file first. if self.find_file_local_path: if (self.find_file_local_path not in self.jars and os.path.isfile(self.find_file_local_path)): # Must be a resource pack. Look for the requested file within # it. try: pack = zipfile.ZipFile(self.find_file_local_path) # pack.getinfo() will raise KeyError if the file is # not found. pack.getinfo(filename) if verbose: logging.info("Found %s in '%s'", filename, self.find_file_local_path) self.jars[self.find_file_local_path] = pack # ok cool now move this to the start so we pick it first self.jars.move_to_end(self.find_file_local_path, last=False) return pack.open(filename) except (zipfile.BadZipfile, KeyError, IOError): pass elif os.path.isdir(self.find_file_local_path): full_path = os.path.join(self.find_file_local_path, filename) if os.path.isfile(full_path): if verbose: logging.info("Found %s in '%s'", filename, full_path) return open(full_path, mode) # We already have some jars open, better use them. if len(self.jars) > 0: for jarpath in self.jars: try: jar = self.jars[jarpath] jar.getinfo(filename) if verbose: logging.info("Found (cached) %s in '%s'", filename, jarpath) return jar.open(filename) except (KeyError, IOError) as e: pass # If we haven't returned at this point, then the requested file was NOT # found in the user-specified texture path or resource pack. if verbose: logging.info("Did not find the file in specified texture path") # Look in the location of the overviewer executable for the given path path = os.path.join(programdir, filename) if os.path.isfile(path): if verbose: logging.info("Found %s in '%s'", filename, path) return open(path, mode) if sys.platform.startswith("darwin"): path = os.path.join("/Applications/Minecraft", filename) if os.path.isfile(path): if verbose: logging.info("Found %s in '%s'", filename, path) return open(path, mode) if verbose: logging.info("Did not find the file in overviewer executable directory") if verbose: logging.info("Looking for installed minecraft jar files...") # Find an installed minecraft client jar and look in it for the texture # file we need. versiondir = "" if "APPDATA" in os.environ and sys.platform.startswith("win"): versiondir = os.path.join(os.environ['APPDATA'], ".minecraft", "versions") elif "HOME" in os.environ: # For linux: versiondir = os.path.join(os.environ['HOME'], ".minecraft", "versions") if not os.path.exists(versiondir) and sys.platform.startswith("darwin"): # For Mac: versiondir = os.path.join(os.environ['HOME'], "Library", "Application Support", "minecraft", "versions") try: if verbose: logging.info("Looking in the following directory: \"%s\"" % versiondir) versions = os.listdir(versiondir) if verbose: logging.info("Found these versions: {0}".format(versions)) except OSError: # Directory doesn't exist? Ignore it. It will find no versions and # fall through the checks below to the error at the bottom of the # method. versions = [] available_versions = [] for version in versions: # Look for the latest non-snapshot that is at least 1.8. This # version is only compatible with >=1.8, and we cannot in general # tell if a snapshot is more or less recent than a release. # Allow two component names such as "1.8" and three component names # such as "1.8.1" if version.count(".") not in (1,2): continue try: versionparts = [int(x) for x in version.split(".")] except ValueError: continue if versionparts < [1,8]: continue available_versions.append(versionparts) available_versions.sort(reverse=True) if not available_versions: if verbose: logging.info("Did not find any non-snapshot minecraft jars >=1.8.0") while(available_versions): most_recent_version = available_versions.pop(0) if verbose: logging.info("Trying {0}. Searching it for the file...".format(".".join(str(x) for x in most_recent_version))) jarname = ".".join(str(x) for x in most_recent_version) jarpath = os.path.join(versiondir, jarname, jarname + ".jar") if os.path.isfile(jarpath): try: jar = zipfile.ZipFile(jarpath) jar.getinfo(filename) if verbose: logging.info("Found %s in '%s'", filename, jarpath) self.jars[jarpath] = jar return jar.open(filename) except (KeyError, IOError) as e: pass except (zipfile.BadZipFile) as e: logging.warning("Your jar {0} is corrupted, I'll be skipping it, but you " "should probably look into that.".format(jarpath)) if verbose: logging.info("Did not find file {0} in jar {1}".format(filename, jarpath)) raise TextureException("Could not find the textures while searching for '{0}'. Try specifying the 'texturepath' option in your config file.\nSet it to the path to a Minecraft Resource pack.\nAlternately, install the Minecraft client (which includes textures)\nAlso see <http://docs.overviewer.org/en/latest/running/#installing-the-textures>\n(Remember, this version of Overviewer requires a 1.17-compatible resource pack)\n(Also note that I won't automatically use snapshots; you'll have to use the texturepath option to use a snapshot jar)".format(filename)) def load_image_texture(self, filename): # Textures may be animated or in a different resolution than 16x16. # This method will always return a 16x16 image img = self.load_image(filename) w,h = img.size if w != h: img = img.crop((0,0,w,w)) if w != 16: img = img.resize((16, 16), Image.ANTIALIAS) self.texture_cache[filename] = img return img def load_image(self, filename): """Returns an image object""" try: img = self.texture_cache[filename] if isinstance(img, Exception): # Did we cache an exception? raise img # Okay then, raise it. return img except KeyError: pass try: fileobj = self.find_file(filename, verbose=logging.getLogger().isEnabledFor(logging.DEBUG)) except (TextureException, IOError) as e: # We cache when our good friend find_file can't find # a texture, so that we do not repeatedly search for it. self.texture_cache[filename] = e raise e buffer = BytesIO(fileobj.read()) try: img = Image.open(buffer).convert("RGBA") except IOError: raise TextureException("The texture {} appears to be corrupted. Please fix it. Run " "Overviewer in verbose mode (-v) to find out where I loaded " "that file from.".format(filename)) self.texture_cache[filename] = img return img def load_water(self): """Special-case function for loading water.""" watertexture = getattr(self, "watertexture", None) if watertexture: return watertexture watertexture = self.load_image_texture("assets/minecraft/textures/block/water_still.png") self.watertexture = watertexture return watertexture def load_lava(self): """Special-case function for loading lava.""" lavatexture = getattr(self, "lavatexture", None) if lavatexture: return lavatexture lavatexture = self.load_image_texture("assets/minecraft/textures/block/lava_still.png") self.lavatexture = lavatexture return lavatexture def load_portal(self): """Special-case function for loading portal.""" portaltexture = getattr(self, "portaltexture", None) if portaltexture: return portaltexture portaltexture = self.load_image_texture("assets/minecraft/textures/block/nether_portal.png") self.portaltexture = portaltexture return portaltexture def load_light_color(self): """Helper function to load the light color texture.""" if hasattr(self, "lightcolor"): return self.lightcolor try: lightcolor = list(self.load_image("light_normal.png").getdata()) except Exception: logging.warning("Light color image could not be found.") lightcolor = None self.lightcolor = lightcolor return lightcolor def load_grass_color(self): """Helper function to load the grass color texture.""" if not hasattr(self, "grasscolor"): self.grasscolor = list(self.load_image("assets/minecraft/textures/colormap/grass.png").getdata()) return self.grasscolor def load_foliage_color(self): """Helper function to load the foliage color texture.""" if not hasattr(self, "foliagecolor"): self.foliagecolor = list(self.load_image("assets/minecraft/textures/colormap/foliage.png").getdata()) return self.foliagecolor #I guess "watercolor" is wrong. But I can't correct as my texture pack don't define water color. def load_water_color(self): """Helper function to load the water color texture.""" if not hasattr(self, "watercolor"): self.watercolor = list(self.load_image("watercolor.png").getdata()) return self.watercolor def _split_terrain(self, terrain): """Builds and returns a length 256 array of each 16x16 chunk of texture. """ textures = [] (terrain_width, terrain_height) = terrain.size texture_resolution = terrain_width / 16 for y in range(16): for x in range(16): left = x*texture_resolution upper = y*texture_resolution right = left+texture_resolution lower = upper+texture_resolution region = terrain.transform( (16, 16), Image.EXTENT, (left,upper,right,lower), Image.BICUBIC) textures.append(region) return textures ## ## Image Transformation Functions ## @staticmethod def transform_image_top(img): """Takes a PIL image and rotates it left 45 degrees and shrinks the y axis by a factor of 2. Returns the resulting image, which will be 24x12 pixels """ # Resize to 17x17, since the diagonal is approximately 24 pixels, a nice # even number that can be split in half twice img = img.resize((17, 17), Image.ANTIALIAS) # Build the Affine transformation matrix for this perspective transform = numpy.matrix(numpy.identity(3)) # Translate up and left, since rotations are about the origin transform *= numpy.matrix([[1,0,8.5],[0,1,8.5],[0,0,1]]) # Rotate 45 degrees ratio = math.cos(math.pi/4) #transform *= numpy.matrix("[0.707,-0.707,0;0.707,0.707,0;0,0,1]") transform *= numpy.matrix([[ratio,-ratio,0],[ratio,ratio,0],[0,0,1]]) # Translate back down and right transform *= numpy.matrix([[1,0,-12],[0,1,-12],[0,0,1]]) # scale the image down by a factor of 2 transform *= numpy.matrix("[1,0,0;0,2,0;0,0,1]") transform = numpy.array(transform)[:2,:].ravel().tolist() newimg = img.transform((24,12), Image.AFFINE, transform) return newimg @staticmethod def transform_image_side(img): """Takes an image and shears it for the left side of the cube (reflect for the right side)""" # Size of the cube side before shear img = img.resize((12,12), Image.ANTIALIAS) # Apply shear transform = numpy.matrix(numpy.identity(3)) transform *= numpy.matrix("[1,0,0;-0.5,1,0;0,0,1]") transform = numpy.array(transform)[:2,:].ravel().tolist() newimg = img.transform((12,18), Image.AFFINE, transform) return newimg @staticmethod def transform_image_slope(img): """Takes an image and shears it in the shape of a slope going up in the -y direction (reflect for +x direction). Used for minetracks""" # Take the same size as trasform_image_side img = img.resize((12,12), Image.ANTIALIAS) # Apply shear transform = numpy.matrix(numpy.identity(3)) transform *= numpy.matrix("[0.75,-0.5,3;0.25,0.5,-3;0,0,1]") transform = numpy.array(transform)[:2,:].ravel().tolist() newimg = img.transform((24,24), Image.AFFINE, transform) return newimg @staticmethod def transform_image_angle(img, angle): """Takes an image an shears it in arbitrary angle with the axis of rotation being vertical. WARNING! Don't use angle = pi/2 (or multiplies), it will return a blank image (or maybe garbage). NOTE: angle is in the image not in game, so for the left side of a block angle = 30 degree. """ # Take the same size as trasform_image_side img = img.resize((12,12), Image.ANTIALIAS) # some values cos_angle = math.cos(angle) sin_angle = math.sin(angle) # function_x and function_y are used to keep the result image in the # same position, and constant_x and constant_y are the coordinates # for the center for angle = 0. constant_x = 6. constant_y = 6. function_x = 6.*(1-cos_angle) function_y = -6*sin_angle big_term = ( (sin_angle * (function_x + constant_x)) - cos_angle* (function_y + constant_y))/cos_angle # The numpy array is not really used, but is helpful to # see the matrix used for the transformation. transform = numpy.array([[1./cos_angle, 0, -(function_x + constant_x)/cos_angle], [-sin_angle/(cos_angle), 1., big_term ], [0, 0, 1.]]) transform = tuple(transform[0]) + tuple(transform[1]) newimg = img.transform((24,24), Image.AFFINE, transform) return newimg def build_block(self, top, side): """From a top texture and a side texture, build a block image. top and side should be 16x16 image objects. Returns a 24x24 image """ img = Image.new("RGBA", (24,24), self.bgcolor) original_texture = top.copy() top = self.transform_image_top(top) if not side: alpha_over(img, top, (0,0), top) return img side = self.transform_image_side(side) otherside = side.transpose(Image.FLIP_LEFT_RIGHT) # Darken the sides slightly. These methods also affect the alpha layer, # so save them first (we don't want to "darken" the alpha layer making # the block transparent) sidealpha = side.split()[3] side = ImageEnhance.Brightness(side).enhance(0.9) side.putalpha(sidealpha) othersidealpha = otherside.split()[3] otherside = ImageEnhance.Brightness(otherside).enhance(0.8) otherside.putalpha(othersidealpha) alpha_over(img, top, (0,0), top) alpha_over(img, side, (0,6), side) alpha_over(img, otherside, (12,6), otherside) # Manually touch up 6 pixels that leave a gap because of how the # shearing works out. This makes the blocks perfectly tessellate-able for x,y in [(13,23), (17,21), (21,19)]: # Copy a pixel to x,y from x-1,y img.putpixel((x,y), img.getpixel((x-1,y))) for x,y in [(3,4), (7,2), (11,0)]: # Copy a pixel to x,y from x+1,y img.putpixel((x,y), img.getpixel((x+1,y))) return img def build_slab_block(self, top, side, upper): """From a top texture and a side texture, build a slab block image. top and side should be 16x16 image objects. Returns a 24x24 image """ # cut the side texture in half mask = side.crop((0,8,16,16)) side = Image.new(side.mode, side.size, self.bgcolor) alpha_over(side, mask,(0,0,16,8), mask) # plain slab top = self.transform_image_top(top) side = self.transform_image_side(side) otherside = side.transpose(Image.FLIP_LEFT_RIGHT) sidealpha = side.split()[3] side = ImageEnhance.Brightness(side).enhance(0.9) side.putalpha(sidealpha) othersidealpha = otherside.split()[3] otherside = ImageEnhance.Brightness(otherside).enhance(0.8) otherside.putalpha(othersidealpha) # upside down slab delta = 0 if upper: delta = 6 img = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(img, side, (0,12 - delta), side) alpha_over(img, otherside, (12,12 - delta), otherside) alpha_over(img, top, (0,6 - delta), top) # Manually touch up 6 pixels that leave a gap because of how the # shearing works out. This makes the blocks perfectly tessellate-able if upper: for x,y in [(3,4), (7,2), (11,0)]: # Copy a pixel to x,y from x+1,y img.putpixel((x,y), img.getpixel((x+1,y))) for x,y in [(13,17), (17,15), (21,13)]: # Copy a pixel to x,y from x-1,y img.putpixel((x,y), img.getpixel((x-1,y))) else: for x,y in [(3,10), (7,8), (11,6)]: # Copy a pixel to x,y from x+1,y img.putpixel((x,y), img.getpixel((x+1,y))) for x,y in [(13,23), (17,21), (21,19)]: # Copy a pixel to x,y from x-1,y img.putpixel((x,y), img.getpixel((x-1,y))) return img def build_full_block(self, top, side1, side2, side3, side4, bottom=None): """From a top texture, a bottom texture and 4 different side textures, build a full block with four differnts faces. All images should be 16x16 image objects. Returns a 24x24 image. Can be used to render any block. side1 is in the -y face of the cube (top left, east) side2 is in the +x (top right, south) side3 is in the -x (bottom left, north) side4 is in the +y (bottom right, west) A non transparent block uses top, side 3 and side 4. If top is a tuple then first item is the top image and the second item is an increment (integer) from 0 to 16 (pixels in the original minecraft texture). This increment will be used to crop the side images and to paste the top image increment pixels lower, so if you use an increment of 8, it will draw a half-block. NOTE: this method uses the bottom of the texture image (as done in minecraft with beds and cakes) """ increment = 0 if isinstance(top, tuple): increment = int(round((top[1] / 16.)*12.)) # range increment in the block height in pixels (half texture size) crop_height = increment top = top[0] if side1 is not None: side1 = side1.copy() ImageDraw.Draw(side1).rectangle((0, 0,16,crop_height),outline=(0,0,0,0),fill=(0,0,0,0)) if side2 is not None: side2 = side2.copy() ImageDraw.Draw(side2).rectangle((0, 0,16,crop_height),outline=(0,0,0,0),fill=(0,0,0,0)) if side3 is not None: side3 = side3.copy() ImageDraw.Draw(side3).rectangle((0, 0,16,crop_height),outline=(0,0,0,0),fill=(0,0,0,0)) if side4 is not None: side4 = side4.copy() ImageDraw.Draw(side4).rectangle((0, 0,16,crop_height),outline=(0,0,0,0),fill=(0,0,0,0)) img = Image.new("RGBA", (24,24), self.bgcolor) # first back sides if side1 is not None : side1 = self.transform_image_side(side1) side1 = side1.transpose(Image.FLIP_LEFT_RIGHT) # Darken this side. sidealpha = side1.split()[3] side1 = ImageEnhance.Brightness(side1).enhance(0.9) side1.putalpha(sidealpha) alpha_over(img, side1, (0,0), side1) if side2 is not None : side2 = self.transform_image_side(side2) # Darken this side. sidealpha2 = side2.split()[3] side2 = ImageEnhance.Brightness(side2).enhance(0.8) side2.putalpha(sidealpha2) alpha_over(img, side2, (12,0), side2) if bottom is not None : bottom = self.transform_image_top(bottom) alpha_over(img, bottom, (0,12), bottom) # front sides if side3 is not None : side3 = self.transform_image_side(side3) # Darken this side sidealpha = side3.split()[3] side3 = ImageEnhance.Brightness(side3).enhance(0.9) side3.putalpha(sidealpha) alpha_over(img, side3, (0,6), side3) if side4 is not None : side4 = self.transform_image_side(side4) side4 = side4.transpose(Image.FLIP_LEFT_RIGHT) # Darken this side sidealpha = side4.split()[3] side4 = ImageEnhance.Brightness(side4).enhance(0.8) side4.putalpha(sidealpha) alpha_over(img, side4, (12,6), side4) if top is not None : top = self.transform_image_top(top) alpha_over(img, top, (0, increment), top) # Manually touch up 6 pixels that leave a gap because of how the # shearing works out. This makes the blocks perfectly tessellate-able for x,y in [(13,23), (17,21), (21,19)]: # Copy a pixel to x,y from x-1,y img.putpixel((x,y), img.getpixel((x-1,y))) for x,y in [(3,4), (7,2), (11,0)]: # Copy a pixel to x,y from x+1,y img.putpixel((x,y), img.getpixel((x+1,y))) return img def build_sprite(self, side): """From a side texture, create a sprite-like texture such as those used for spiderwebs or flowers.""" img = Image.new("RGBA", (24,24), self.bgcolor) side = self.transform_image_side(side) otherside = side.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, side, (6,3), side) alpha_over(img, otherside, (6,3), otherside) return img def build_billboard(self, tex): """From a texture, create a billboard-like texture such as those used for tall grass or melon stems. """ img = Image.new("RGBA", (24,24), self.bgcolor) front = tex.resize((14, 12), Image.ANTIALIAS) alpha_over(img, front, (5,9)) return img def generate_opaque_mask(self, img): """ Takes the alpha channel of the image and generates a mask (used for lighting the block) that deprecates values of alpha smallers than 50, and sets every other value to 255. """ alpha = img.split()[3] return alpha.point(lambda a: int(min(a, 25.5) * 10)) def tint_texture(self, im, c): # apparently converting to grayscale drops the alpha channel? i = ImageOps.colorize(ImageOps.grayscale(im), (0,0,0), c) i.putalpha(im.split()[3]); # copy the alpha band back in. assuming RGBA return i def generate_texture_tuple(self, img): """ This takes an image and returns the needed tuple for the blockmap array.""" if img is None: return None return (img, self.generate_opaque_mask(img)) ## ## The other big one: @material and associated framework ## # the material registration decorator def material(blockid=[], data=[0], **kwargs): # mapping from property name to the set to store them in properties = {"transparent" : transparent_blocks, "solid" : solid_blocks, "fluid" : fluid_blocks, "nospawn" : nospawn_blocks, "nodata" : nodata_blocks} # make sure blockid and data are iterable try: iter(blockid) except Exception: blockid = [blockid,] try: iter(data) except Exception: data = [data,] def inner_material(func): global blockmap_generators global max_data, max_blockid # create a wrapper function with a known signature @functools.wraps(func) def func_wrapper(texobj, blockid, data): return func(texobj, blockid, data) used_datas.update(data) if max(data) >= max_data: max_data = max(data) + 1 for block in blockid: # set the property sets appropriately known_blocks.update([block]) if block >= max_blockid: max_blockid = block + 1 for prop in properties: try: if block in kwargs.get(prop, []): properties[prop].update([block]) except TypeError: if kwargs.get(prop, False): properties[prop].update([block]) # populate blockmap_generators with our function for d in data: blockmap_generators[(block, d)] = func_wrapper return func_wrapper return inner_material # shortcut function for pure blocks, default to solid, nodata def block(blockid=[], top_image=None, side_image=None, **kwargs): new_kwargs = {'solid' : True, 'nodata' : True} new_kwargs.update(kwargs) if top_image is None: raise ValueError("top_image was not provided") if side_image is None: side_image = top_image @material(blockid=blockid, **new_kwargs) def inner_block(self, unused_id, unused_data): return self.build_block(self.load_image_texture(top_image), self.load_image_texture(side_image)) return inner_block # shortcut function for sprite blocks, defaults to transparent, nodata def sprite(blockid=[], imagename=None, **kwargs): new_kwargs = {'transparent' : True, 'nodata' : True} new_kwargs.update(kwargs) if imagename is None: raise ValueError("imagename was not provided") @material(blockid=blockid, **new_kwargs) def inner_sprite(self, unused_id, unused_data): return self.build_sprite(self.load_image_texture(imagename)) return inner_sprite # shortcut function for billboard blocks, defaults to transparent, nodata def billboard(blockid=[], imagename=None, **kwargs): new_kwargs = {'transparent' : True, 'nodata' : True} new_kwargs.update(kwargs) if imagename is None: raise ValueError("imagename was not provided") @material(blockid=blockid, **new_kwargs) def inner_billboard(self, unused_id, unused_data): return self.build_billboard(self.load_image_texture(imagename)) return inner_billboard ## ## and finally: actual texture definitions ## # stone @material(blockid=1, data=list(range(7)), solid=True) def stone(self, blockid, data): if data == 0: # regular old-school stone img = self.load_image_texture("assets/minecraft/textures/block/stone.png") elif data == 1: # granite img = self.load_image_texture("assets/minecraft/textures/block/granite.png") elif data == 2: # polished granite img = self.load_image_texture("assets/minecraft/textures/block/polished_granite.png") elif data == 3: # diorite img = self.load_image_texture("assets/minecraft/textures/block/diorite.png") elif data == 4: # polished diorite img = self.load_image_texture("assets/minecraft/textures/block/polished_diorite.png") elif data == 5: # andesite img = self.load_image_texture("assets/minecraft/textures/block/andesite.png") elif data == 6: # polished andesite img = self.load_image_texture("assets/minecraft/textures/block/polished_andesite.png") return self.build_block(img, img) @material(blockid=2, data=list(range(11))+[0x10,], solid=True) def grass(self, blockid, data): # 0x10 bit means SNOW side_img = self.load_image_texture("assets/minecraft/textures/block/grass_block_side.png") if data & 0x10: side_img = self.load_image_texture("assets/minecraft/textures/block/grass_block_snow.png") img = self.build_block(self.load_image_texture("assets/minecraft/textures/block/grass_block_top.png"), side_img) if not data & 0x10: alpha_over(img, self.biome_grass_texture, (0, 0), self.biome_grass_texture) return img # dirt @material(blockid=3, data=list(range(3)), solid=True) def dirt_blocks(self, blockid, data): texture_map = [{"top": "dirt", "side": "dirt"}, # Normal {"top": "coarse_dirt", "side": "coarse_dirt"}, # Coarse {"top": "podzol_top", "side": "podzol_side"}] # Podzol top_img = self.load_image_texture("assets/minecraft/textures/block/%s.png" % texture_map[data]["top"]) side_img = self.load_image_texture("assets/minecraft/textures/block/%s.png" % texture_map[data]["side"]) return self.build_block(top_img, side_img) # cobblestone block(blockid=4, top_image="assets/minecraft/textures/block/cobblestone.png") # wooden planks @material(blockid=5, data=list(range(8)), solid=True) def wooden_planks(self, blockid, data): if data == 0: # normal return self.build_block(self.load_image_texture("assets/minecraft/textures/block/oak_planks.png"), self.load_image_texture("assets/minecraft/textures/block/oak_planks.png")) if data == 1: # pine return self.build_block(self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png"),self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png")) if data == 2: # birch return self.build_block(self.load_image_texture("assets/minecraft/textures/block/birch_planks.png"),self.load_image_texture("assets/minecraft/textures/block/birch_planks.png")) if data == 3: # jungle wood return self.build_block(self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png"),self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png")) if data == 4: # acacia return self.build_block(self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png"),self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png")) if data == 5: # dark oak return self.build_block(self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png"),self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png")) if data == 6: # crimson return self.build_block(self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png"),self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png")) if data == 7: # warped return self.build_block(self.load_image_texture("assets/minecraft/textures/block/warped_planks.png"),self.load_image_texture("assets/minecraft/textures/block/warped_planks.png")) @material(blockid=6, data=list(range(16)), transparent=True) def saplings(self, blockid, data): # usual saplings tex = self.load_image_texture("assets/minecraft/textures/block/oak_sapling.png") if data & 0x3 == 1: # spruce sapling tex = self.load_image_texture("assets/minecraft/textures/block/spruce_sapling.png") elif data & 0x3 == 2: # birch sapling tex = self.load_image_texture("assets/minecraft/textures/block/birch_sapling.png") elif data & 0x3 == 3: # jungle sapling tex = self.load_image_texture("assets/minecraft/textures/block/jungle_sapling.png") elif data & 0x3 == 4: # acacia sapling tex = self.load_image_texture("assets/minecraft/textures/block/acacia_sapling.png") elif data & 0x3 == 5: # dark oak/roofed oak/big oak sapling tex = self.load_image_texture("assets/minecraft/textures/block/dark_oak_sapling.png") return self.build_sprite(tex) sprite(blockid=11385, imagename="assets/minecraft/textures/block/oak_sapling.png") sprite(blockid=11386, imagename="assets/minecraft/textures/block/spruce_sapling.png") sprite(blockid=11387, imagename="assets/minecraft/textures/block/birch_sapling.png") sprite(blockid=11388, imagename="assets/minecraft/textures/block/jungle_sapling.png") sprite(blockid=11389, imagename="assets/minecraft/textures/block/acacia_sapling.png") sprite(blockid=11390, imagename="assets/minecraft/textures/block/dark_oak_sapling.png") sprite(blockid=11413, imagename="assets/minecraft/textures/block/bamboo_stage0.png") # bedrock block(blockid=7, top_image="assets/minecraft/textures/block/bedrock.png") # water, glass, and ice (no inner surfaces) # uses pseudo-ancildata found in iterate.c @material(blockid=[8, 9, 20, 79, 95], data=list(range(512)), fluid=(8, 9), transparent=True, nospawn=True, solid=(79, 20, 95)) def no_inner_surfaces(self, blockid, data): if blockid == 8 or blockid == 9: texture = self.load_water() elif blockid == 20: texture = self.load_image_texture("assets/minecraft/textures/block/glass.png") elif blockid == 95: texture = self.load_image_texture("assets/minecraft/textures/block/%s_stained_glass.png" % color_map[data & 0x0f]) else: texture = self.load_image_texture("assets/minecraft/textures/block/ice.png") # now that we've used the lower 4 bits to get color, shift down to get the 5 bits that encode face hiding if not (blockid == 8 or blockid == 9): # water doesn't have a shifted pseudodata data = data >> 4 if (data & 0b10000) == 16: top = texture else: top = None if (data & 0b0001) == 1: side1 = texture # top left else: side1 = None if (data & 0b1000) == 8: side2 = texture # top right else: side2 = None if (data & 0b0010) == 2: side3 = texture # bottom left else: side3 = None if (data & 0b0100) == 4: side4 = texture # bottom right else: side4 = None # if nothing shown do not draw at all if top is None and side3 is None and side4 is None: return None img = self.build_full_block(top,None,None,side3,side4) return img @material(blockid=[10, 11], data=list(range(16)), fluid=True, transparent=False, nospawn=True) def lava(self, blockid, data): lavatex = self.load_lava() return self.build_block(lavatex, lavatex) # sand @material(blockid=12, data=list(range(2)), solid=True) def sand_blocks(self, blockid, data): if data == 0: # normal img = self.build_block(self.load_image_texture("assets/minecraft/textures/block/sand.png"), self.load_image_texture("assets/minecraft/textures/block/sand.png")) if data == 1: # red img = self.build_block(self.load_image_texture("assets/minecraft/textures/block/red_sand.png"), self.load_image_texture("assets/minecraft/textures/block/red_sand.png")) return img # gravel block(blockid=13, top_image="assets/minecraft/textures/block/gravel.png") # gold ore block(blockid=14, top_image="assets/minecraft/textures/block/gold_ore.png") # iron ore block(blockid=15, top_image="assets/minecraft/textures/block/iron_ore.png") # coal ore block(blockid=16, top_image="assets/minecraft/textures/block/coal_ore.png") @material(blockid=[17, 162, 11306, 11307, 11308, 11309, 11310, 11311, 1008, 1009], data=list(range(12)), solid=True) def wood(self, blockid, data): # extract orientation and wood type frorm data bits wood_type = data & 3 wood_orientation = data & 12 if self.rotation == 1: if wood_orientation == 4: wood_orientation = 8 elif wood_orientation == 8: wood_orientation = 4 elif self.rotation == 3: if wood_orientation == 4: wood_orientation = 8 elif wood_orientation == 8: wood_orientation = 4 # dictionary of blockid : { wood_type : (top, side) } wood_tex = { 17: { 0: ("oak_log_top.png", "oak_log.png"), 1: ("spruce_log_top.png", "spruce_log.png"), 2: ("birch_log_top.png", "birch_log.png"), 3: ("jungle_log_top.png", "jungle_log.png"), }, 162: { 0: ("acacia_log_top.png", "acacia_log.png"), 1: ("dark_oak_log_top.png", "dark_oak_log.png"), }, 11306: { 0: ("stripped_oak_log_top.png", "stripped_oak_log.png"), 1: ("stripped_spruce_log_top.png", "stripped_spruce_log.png"), 2: ("stripped_birch_log_top.png", "stripped_birch_log.png"), 3: ("stripped_jungle_log_top.png", "stripped_jungle_log.png"), }, 11307: { 0: ("stripped_acacia_log_top.png", "stripped_acacia_log.png"), 1: ("stripped_dark_oak_log_top.png", "stripped_dark_oak_log.png"), }, 11308: { 0: ("oak_log.png", None), 1: ("spruce_log.png", None), 2: ("birch_log.png", None), 3: ("jungle_log.png", None), }, 11309: { 0: ("acacia_log.png", None), 1: ("dark_oak_log.png", None), }, 11310: { 0: ("stripped_oak_log.png", None), 1: ("stripped_spruce_log.png", None), 2: ("stripped_birch_log.png", None), 3: ("stripped_jungle_log.png", None), }, 11311: { 0: ("stripped_acacia_log.png", None), 1: ("stripped_dark_oak_log.png", None), }, 1008: { 0: ("warped_stem_top.png", "warped_stem.png"), 1: ("warped_stem_top.png", "stripped_warped_stem.png"), 2: ("crimson_stem_top.png", "crimson_stem.png"), 3: ("crimson_stem_top.png", "stripped_crimson_stem.png"), }, 1009: { 0: ("warped_stem.png", None), 1: ("stripped_warped_stem.png", None), 2: ("crimson_stem.png", None), 3: ("stripped_crimson_stem.png", None), } } top_f, side_f = wood_tex[blockid].get(wood_type, wood_tex[blockid][0]) if not side_f: side_f = top_f top = self.load_image_texture(BLOCKTEX + top_f) side = self.load_image_texture(BLOCKTEX + side_f) # choose orientation and paste textures if wood_orientation == 0: return self.build_block(top, side) elif wood_orientation == 4: # east-west orientation return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90)) elif wood_orientation == 8: # north-south orientation return self.build_full_block(side, None, None, side.rotate(270), top) @material(blockid=[18, 161], data=list(range(16)), transparent=True, solid=True) def leaves(self, blockid, data): # mask out the bits 4 and 8 # they are used for player placed and check-for-decay blocks data = data & 0x7 t = self.load_image_texture("assets/minecraft/textures/block/oak_leaves.png") if (blockid, data) == (18, 1): # pine! t = self.load_image_texture("assets/minecraft/textures/block/spruce_leaves.png") elif (blockid, data) == (18, 2): # birth tree t = self.load_image_texture("assets/minecraft/textures/block/birch_leaves.png") elif (blockid, data) == (18, 3): # jungle tree t = self.load_image_texture("assets/minecraft/textures/block/jungle_leaves.png") elif (blockid, data) == (161, 4): # acacia tree t = self.load_image_texture("assets/minecraft/textures/block/acacia_leaves.png") elif (blockid, data) == (161, 5): t = self.load_image_texture("assets/minecraft/textures/block/dark_oak_leaves.png") return self.build_block(t, t) # sponge block(blockid=19, top_image="assets/minecraft/textures/block/sponge.png") # lapis lazuli ore block(blockid=21, top_image="assets/minecraft/textures/block/lapis_ore.png") # lapis lazuli block block(blockid=22, top_image="assets/minecraft/textures/block/lapis_block.png") # dispenser, dropper, furnace, blast furnace, and smoker @material(blockid=[23, 61, 158, 11362, 11364], data=list(range(14)), solid=True) def furnaces(self, blockid, data): # first, do the rotation if needed # Masked as bit 4 indicates whether the block is lit/triggered or not if self.rotation in [1, 2, 3] and data & 0b111 in [2, 3, 4, 5]: rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3}, 2: {2: 3, 3: 2, 4: 5, 5: 4}, 3: {2: 4, 3: 5, 4: 3, 5: 2}} data = data & 0b1000 | rotation_map[self.rotation][data & 0b111] # Rotation angles for top texture using data & 0b111 as an index top_rotation_map = [0, 0, 180, 0, 270, 90, 0, 0] # Dispenser texture_map = {23: {'top': 'furnace_top', 'side': 'furnace_side', 'front': 'dispenser_front', 'top_vert': 'dispenser_front_vertical'}, # Furnace 61: {'top': 'furnace_top', 'side': 'furnace_side', 'front': 'furnace_front', 'front_on': 'furnace_front_on'}, # Dropper 158: {'top': 'furnace_top', 'side': 'furnace_side', 'front': 'dropper_front', 'top_vert': 'dropper_front_vertical'}, # Blast furance 11362: {'top': 'blast_furnace_top', 'side': 'blast_furnace_side', 'front': 'blast_furnace_front', 'front_on': 'blast_furnace_front_on'}, # Smoker 11364: {'top': 'smoker_top', 'side': 'smoker_side', 'front': 'smoker_front', 'front_on': 'smoker_front_on'}} if data & 0b111 in [0, 1] and 'top_vert' in texture_map[blockid]: # Block has a special top texture when it faces up/down # This also affects which texture is used for the sides/front top_name = 'top_vert' if data & 0b111 == 1 else 'top' side_name = 'top' front_name = 'top' else: top_name = 'top' side_name = 'side' # Use block's lit/on front texture if it is defined & bit 4 is set # Note: Some front_on texture images have multiple frames, # but load_image_texture() crops this appropriately # as long as the image width is 16px if data & 0b1000 == 8 and 'front_on' in texture_map[blockid]: front_name = 'front_on' else: front_name = 'front' top = self.load_image_texture("assets/minecraft/textures/block/%s.png" % texture_map[blockid][top_name]).copy() top = top.rotate(top_rotation_map[data & 0b111]) side = self.load_image_texture("assets/minecraft/textures/block/%s.png" % texture_map[blockid][side_name]) front = self.load_image_texture("assets/minecraft/textures/block/%s.png" % texture_map[blockid][front_name]) if data & 0b111 == 3: # pointing west return self.build_full_block(top, None, None, side, front) elif data & 0b111 == 4: # pointing north return self.build_full_block(top, None, None, front, side) else: # in any other direction the front can't be seen return self.build_full_block(top, None, None, side, side) # sandstone @material(blockid=24, data=list(range(3)), solid=True) def sandstone(self, blockid, data): top = self.load_image_texture("assets/minecraft/textures/block/sandstone_top.png") if data == 0: # normal return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/sandstone.png")) if data == 1: # hieroglyphic return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/chiseled_sandstone.png")) if data == 2: # soft return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/cut_sandstone.png")) # red sandstone @material(blockid=179, data=list(range(3)), solid=True) def sandstone(self, blockid, data): top = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png") if data == 0: # normal side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone.png") return self.build_full_block(top, None, None, side, side, self.load_image_texture("assets/minecraft/textures/block/red_sandstone_bottom.png") ) if data == 1: # hieroglyphic return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/chiseled_red_sandstone.png")) if data == 2: # soft return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/cut_red_sandstone.png")) # note block block(blockid=25, top_image="assets/minecraft/textures/block/note_block.png") # Bed @material(blockid=26, data=list(range(256)), transparent=True, nospawn=True) def bed(self, blockid, data): # Bits 1-2 Rotation # Bit 3 Occupancy, no impact on appearance # Bit 4 Foot/Head of bed (0 = foot, 1 = head) # Bits 5-8 Color # first get rotation done # Masked to not clobber block head/foot & color info data = data & 0b11111100 | ((self.rotation + (data & 0b11)) % 4) bed_texture = self.load_image("assets/minecraft/textures/entity/bed/%s.png" % color_map[data >> 4]) increment = 8 left_face = None right_face = None top_face = None if data & 0x8 == 0x8: # head of the bed top = bed_texture.copy().crop((6, 6, 22, 22)) # Composing the side side = Image.new("RGBA", (16, 16), self.bgcolor) side_part1 = bed_texture.copy().crop((0, 6, 6, 22)).rotate(90, expand=True) # foot of the bed side_part2 = bed_texture.copy().crop((53, 3, 56, 6)) side_part2_f = side_part2.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(side, side_part1, (0, 7), side_part1) alpha_over(side, side_part2, (0, 13), side_part2) end = Image.new("RGBA", (16, 16), self.bgcolor) end_part = bed_texture.copy().crop((6, 0, 22, 6)).rotate(180) alpha_over(end, end_part, (0, 7), end_part) alpha_over(end, side_part2, (0, 13), side_part2) alpha_over(end, side_part2_f, (13, 13), side_part2_f) if data & 0x03 == 0x00: # South top_face = top.rotate(180) left_face = side.transpose(Image.FLIP_LEFT_RIGHT) right_face = end elif data & 0x03 == 0x01: # West top_face = top.rotate(90) left_face = end right_face = side.transpose(Image.FLIP_LEFT_RIGHT) elif data & 0x03 == 0x02: # North top_face = top left_face = side elif data & 0x03 == 0x03: # East top_face = top.rotate(270) right_face = side else: # foot of the bed top = bed_texture.copy().crop((6, 28, 22, 44)) side = Image.new("RGBA", (16, 16), self.bgcolor) side_part1 = bed_texture.copy().crop((0, 28, 6, 44)).rotate(90, expand=True) side_part2 = bed_texture.copy().crop((53, 3, 56, 6)) side_part2_f = side_part2.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(side, side_part1, (0, 7), side_part1) alpha_over(side, side_part2, (13, 13), side_part2) end = Image.new("RGBA", (16, 16), self.bgcolor) end_part = bed_texture.copy().crop((22, 22, 38, 28)).rotate(180) alpha_over(end, end_part, (0, 7), end_part) alpha_over(end, side_part2, (0, 13), side_part2) alpha_over(end, side_part2_f, (13, 13), side_part2_f) if data & 0x03 == 0x00: # South top_face = top.rotate(180) left_face = side.transpose(Image.FLIP_LEFT_RIGHT) elif data & 0x03 == 0x01: # West top_face = top.rotate(90) right_face = side.transpose(Image.FLIP_LEFT_RIGHT) elif data & 0x03 == 0x02: # North top_face = top left_face = side right_face = end elif data & 0x03 == 0x03: # East top_face = top.rotate(270) left_face = end right_face = side top_face = (top_face, increment) return self.build_full_block(top_face, None, None, left_face, right_face) # powered, detector, activator and normal rails @material(blockid=[27, 28, 66, 157], data=list(range(14)), transparent=True) def rails(self, blockid, data): # first, do rotation # Masked to not clobber powered rail on/off info # Ascending and flat straight if self.rotation == 1: if (data & 0b0111) == 0: data = data & 0b1000 | 1 elif (data & 0b0111) == 1: data = data & 0b1000 | 0 elif (data & 0b0111) == 2: data = data & 0b1000 | 5 elif (data & 0b0111) == 3: data = data & 0b1000 | 4 elif (data & 0b0111) == 4: data = data & 0b1000 | 2 elif (data & 0b0111) == 5: data = data & 0b1000 | 3 elif self.rotation == 2: if (data & 0b0111) == 2: data = data & 0b1000 | 3 elif (data & 0b0111) == 3: data = data & 0b1000 | 2 elif (data & 0b0111) == 4: data = data & 0b1000 | 5 elif (data & 0b0111) == 5: data = data & 0b1000 | 4 elif self.rotation == 3: if (data & 0b0111) == 0: data = data & 0b1000 | 1 elif (data & 0b0111) == 1: data = data & 0b1000 | 0 elif (data & 0b0111) == 2: data = data & 0b1000 | 4 elif (data & 0b0111) == 3: data = data & 0b1000 | 5 elif (data & 0b0111) == 4: data = data & 0b1000 | 3 elif (data & 0b0111) == 5: data = data & 0b1000 | 2 if blockid == 66: # normal minetrack only #Corners if self.rotation == 1: if data == 6: data = 7 elif data == 7: data = 8 elif data == 8: data = 6 elif data == 9: data = 9 elif self.rotation == 2: if data == 6: data = 8 elif data == 7: data = 9 elif data == 8: data = 6 elif data == 9: data = 7 elif self.rotation == 3: if data == 6: data = 9 elif data == 7: data = 6 elif data == 8: data = 8 elif data == 9: data = 7 img = Image.new("RGBA", (24,24), self.bgcolor) if blockid == 27: # powered rail if data & 0x8 == 0: # unpowered raw_straight = self.load_image_texture("assets/minecraft/textures/block/powered_rail.png") raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # they don't exist but make the code # much simplier elif data & 0x8 == 0x8: # powered raw_straight = self.load_image_texture("assets/minecraft/textures/block/powered_rail_on.png") raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # leave corners for code simplicity # filter the 'powered' bit data = data & 0x7 elif blockid == 28: # detector rail raw_straight = self.load_image_texture("assets/minecraft/textures/block/detector_rail.png") raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # leave corners for code simplicity elif blockid == 66: # normal rail raw_straight = self.load_image_texture("assets/minecraft/textures/block/rail.png") raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") elif blockid == 157: # activator rail if data & 0x8 == 0: # unpowered raw_straight = self.load_image_texture("assets/minecraft/textures/block/activator_rail.png") raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # they don't exist but make the code # much simplier elif data & 0x8 == 0x8: # powered raw_straight = self.load_image_texture("assets/minecraft/textures/block/activator_rail_on.png") raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # leave corners for code simplicity # filter the 'powered' bit data = data & 0x7 ## use transform_image to scale and shear if data == 0: track = self.transform_image_top(raw_straight) alpha_over(img, track, (0,12), track) elif data == 6: track = self.transform_image_top(raw_corner) alpha_over(img, track, (0,12), track) elif data == 7: track = self.transform_image_top(raw_corner.rotate(270)) alpha_over(img, track, (0,12), track) elif data == 8: # flip track = self.transform_image_top(raw_corner.transpose(Image.FLIP_TOP_BOTTOM).rotate(90)) alpha_over(img, track, (0,12), track) elif data == 9: track = self.transform_image_top(raw_corner.transpose(Image.FLIP_TOP_BOTTOM)) alpha_over(img, track, (0,12), track) elif data == 1: track = self.transform_image_top(raw_straight.rotate(90)) alpha_over(img, track, (0,12), track) #slopes elif data == 2: # slope going up in +x direction track = self.transform_image_slope(raw_straight) track = track.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, track, (2,0), track) # the 2 pixels move is needed to fit with the adjacent tracks elif data == 3: # slope going up in -x direction # tracks are sprites, in this case we are seeing the "side" of # the sprite, so draw a line to make it beautiful. ImageDraw.Draw(img).line([(11,11),(23,17)],fill=(164,164,164)) # grey from track texture (exterior grey). # the track doesn't start from image corners, be carefull drawing the line! elif data == 4: # slope going up in -y direction track = self.transform_image_slope(raw_straight) alpha_over(img, track, (0,0), track) elif data == 5: # slope going up in +y direction # same as "data == 3" ImageDraw.Draw(img).line([(1,17),(12,11)],fill=(164,164,164)) return img # sticky and normal piston body @material(blockid=[29, 33], data=[0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13], transparent=True, solid=True, nospawn=True) def piston(self, blockid, data): # first, rotation # Masked to not clobber block head/foot info if self.rotation in [1, 2, 3] and (data & 0b111) in [2, 3, 4, 5]: rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3}, 2: {2: 3, 3: 2, 4: 5, 5: 4}, 3: {2: 4, 3: 5, 4: 3, 5: 2}} data = (data & 0b1000) | rotation_map[self.rotation][data & 0b111] if blockid == 29: # sticky piston_t = self.load_image_texture("assets/minecraft/textures/block/piston_top_sticky.png").copy() else: # normal piston_t = self.load_image_texture("assets/minecraft/textures/block/piston_top.png").copy() # other textures side_t = self.load_image_texture("assets/minecraft/textures/block/piston_side.png").copy() back_t = self.load_image_texture("assets/minecraft/textures/block/piston_bottom.png").copy() interior_t = self.load_image_texture("assets/minecraft/textures/block/piston_inner.png").copy() if data & 0x08 == 0x08: # pushed out, non full blocks, tricky stuff # remove piston texture from piston body ImageDraw.Draw(side_t).rectangle((0, 0, 16, 3), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) if data & 0x07 == 0x0: # down side_t = side_t.rotate(180) img = self.build_full_block(back_t, None, None, side_t, side_t) elif data & 0x07 == 0x1: # up img = self.build_full_block((interior_t, 4), None, None, side_t, side_t) elif data & 0x07 == 0x2: # north img = self.build_full_block(side_t, None, None, side_t.rotate(90), back_t) elif data & 0x07 == 0x3: # south img = self.build_full_block(side_t.rotate(180), None, None, side_t.rotate(270), None) temp = self.transform_image_side(interior_t) temp = temp.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, temp, (9, 4), temp) elif data & 0x07 == 0x4: # west img = self.build_full_block(side_t.rotate(90), None, None, None, side_t.rotate(270)) temp = self.transform_image_side(interior_t) alpha_over(img, temp, (3, 4), temp) elif data & 0x07 == 0x5: # east img = self.build_full_block(side_t.rotate(270), None, None, back_t, side_t.rotate(90)) else: # pushed in, normal full blocks, easy stuff if data & 0x07 == 0x0: # down side_t = side_t.rotate(180) img = self.build_full_block(back_t, None, None, side_t, side_t) elif data & 0x07 == 0x1: # up img = self.build_full_block(piston_t, None, None, side_t, side_t) elif data & 0x07 == 0x2: # north img = self.build_full_block(side_t, None, None, side_t.rotate(90), back_t) elif data & 0x07 == 0x3: # south img = self.build_full_block(side_t.rotate(180), None, None, side_t.rotate(270), piston_t) elif data & 0x07 == 0x4: # west img = self.build_full_block(side_t.rotate(90), None, None, piston_t, side_t.rotate(270)) elif data & 0x07 == 0x5: # east img = self.build_full_block(side_t.rotate(270), None, None, back_t, side_t.rotate(90)) return img # sticky and normal piston shaft @material(blockid=34, data=[0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13], transparent=True, nospawn=True) def piston_extension(self, blockid, data): # first, rotation # Masked to not clobber block head/foot info if self.rotation in [1, 2, 3] and (data & 0b111) in [2, 3, 4, 5]: rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3}, 2: {2: 3, 3: 2, 4: 5, 5: 4}, 3: {2: 4, 3: 5, 4: 3, 5: 2}} data = (data & 0b1000) | rotation_map[self.rotation][data & 0b111] if data & 0x8 == 0x8: # sticky piston_t = self.load_image_texture("assets/minecraft/textures/block/piston_top_sticky.png").copy() else: # normal piston_t = self.load_image_texture("assets/minecraft/textures/block/piston_top.png").copy() # other textures side_t = self.load_image_texture("assets/minecraft/textures/block/piston_side.png").copy() back_t = self.load_image_texture("assets/minecraft/textures/block/piston_top.png").copy() # crop piston body ImageDraw.Draw(side_t).rectangle((0, 4, 16, 16), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) # generate the horizontal piston extension stick h_stick = Image.new("RGBA", (24, 24), self.bgcolor) temp = self.transform_image_side(side_t) alpha_over(h_stick, temp, (1, 7), temp) temp = self.transform_image_top(side_t.rotate(90)) alpha_over(h_stick, temp, (1, 1), temp) # Darken it sidealpha = h_stick.split()[3] h_stick = ImageEnhance.Brightness(h_stick).enhance(0.85) h_stick.putalpha(sidealpha) # generate the vertical piston extension stick v_stick = Image.new("RGBA", (24, 24), self.bgcolor) temp = self.transform_image_side(side_t.rotate(90)) alpha_over(v_stick, temp, (12, 6), temp) temp = temp.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(v_stick, temp, (1, 6), temp) # Darken it sidealpha = v_stick.split()[3] v_stick = ImageEnhance.Brightness(v_stick).enhance(0.85) v_stick.putalpha(sidealpha) # Piston orientation is stored in the 3 first bits if data & 0x07 == 0x0: # down side_t = side_t.rotate(180) img = self.build_full_block((back_t, 12), None, None, side_t, side_t) alpha_over(img, v_stick, (0, -3), v_stick) elif data & 0x07 == 0x1: # up img = Image.new("RGBA", (24, 24), self.bgcolor) img2 = self.build_full_block(piston_t, None, None, side_t, side_t) alpha_over(img, v_stick, (0, 4), v_stick) alpha_over(img, img2, (0, 0), img2) elif data & 0x07 == 0x2: # north img = self.build_full_block(side_t, None, None, side_t.rotate(90), None) temp = self.transform_image_side(back_t).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, temp, (2, 2), temp) alpha_over(img, h_stick, (6, 3), h_stick) elif data & 0x07 == 0x3: # south img = Image.new("RGBA", (24, 24), self.bgcolor) img2 = self.build_full_block(side_t.rotate(180), None, None, side_t.rotate(270), piston_t) alpha_over(img, h_stick, (0, 0), h_stick) alpha_over(img, img2, (0, 0), img2) elif data & 0x07 == 0x4: # west img = self.build_full_block(side_t.rotate(90), None, None, piston_t, side_t.rotate(270)) h_stick = h_stick.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, h_stick, (0, 0), h_stick) elif data & 0x07 == 0x5: # east img = Image.new("RGBA", (24, 24), self.bgcolor) img2 = self.build_full_block(side_t.rotate(270), None, None, None, side_t.rotate(90)) h_stick = h_stick.transpose(Image.FLIP_LEFT_RIGHT) temp = self.transform_image_side(back_t) alpha_over(img2, temp, (10, 2), temp) alpha_over(img, img2, (0, 0), img2) alpha_over(img, h_stick, (-3, 2), h_stick) return img # cobweb sprite(blockid=30, imagename="assets/minecraft/textures/block/cobweb.png", nospawn=True) @material(blockid=31, data=list(range(3)), transparent=True) def tall_grass(self, blockid, data): if data == 0: # dead shrub texture = self.load_image_texture("assets/minecraft/textures/block/dead_bush.png") elif data == 1: # tall grass texture = self.load_image_texture("assets/minecraft/textures/block/grass.png") elif data == 2: # fern texture = self.load_image_texture("assets/minecraft/textures/block/fern.png") return self.build_billboard(texture) # dead bush billboard(blockid=32, imagename="assets/minecraft/textures/block/dead_bush.png") @material(blockid=35, data=list(range(16)), solid=True) def wool(self, blockid, data): texture = self.load_image_texture("assets/minecraft/textures/block/%s_wool.png" % color_map[data]) return self.build_block(texture, texture) # dandelion sprite(blockid=37, imagename="assets/minecraft/textures/block/dandelion.png") # flowers @material(blockid=38, data=list(range(13)), transparent=True) def flower(self, blockid, data): flower_map = ["poppy", "blue_orchid", "allium", "azure_bluet", "red_tulip", "orange_tulip", "white_tulip", "pink_tulip", "oxeye_daisy", "dandelion", "wither_rose", "cornflower", "lily_of_the_valley"] texture = self.load_image_texture("assets/minecraft/textures/block/%s.png" % flower_map[data]) return self.build_billboard(texture) # brown mushroom sprite(blockid=39, imagename="assets/minecraft/textures/block/brown_mushroom.png") # red mushroom sprite(blockid=40, imagename="assets/minecraft/textures/block/red_mushroom.png") # warped fungus sprite(blockid=1016, imagename="assets/minecraft/textures/block/warped_fungus.png") # crimson fungus sprite(blockid=1017, imagename="assets/minecraft/textures/block/crimson_fungus.png") # warped roots sprite(blockid=1018, imagename="assets/minecraft/textures/block/warped_roots.png") # crimson roots sprite(blockid=1019, imagename="assets/minecraft/textures/block/crimson_roots.png") # block of gold block(blockid=41, top_image="assets/minecraft/textures/block/gold_block.png") # block of iron block(blockid=42, top_image="assets/minecraft/textures/block/iron_block.png") # double slabs and slabs # these wooden slabs are unobtainable without cheating, they are still # here because lots of pre-1.3 worlds use this blocks, add prismarine slabs @material(blockid=[43, 44, 181, 182, 204, 205] + list(range(11340, 11359)) + list(range(1027, 1030)) + list(range(1072, 1080)), data=list(range(16)), transparent=[44, 182, 205] + list(range(11340, 11359)) + list(range(1027, 1030)) + list(range(1072, 1080)), solid=True) def slabs(self, blockid, data): if blockid == 44 or blockid == 182: texture = data & 7 else: # data > 8 are special double slabs texture = data if blockid == 44 or blockid == 43: if texture== 0: # stone slab top = self.load_image_texture("assets/minecraft/textures/block/stone.png") side = self.load_image_texture("assets/minecraft/textures/block/stone.png") elif texture== 1: # sandstone slab top = self.load_image_texture("assets/minecraft/textures/block/sandstone_top.png") side = self.load_image_texture("assets/minecraft/textures/block/sandstone.png") elif texture== 2: # wooden slab top = side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png") elif texture== 3: # cobblestone slab top = side = self.load_image_texture("assets/minecraft/textures/block/cobblestone.png") elif texture== 4: # brick top = side = self.load_image_texture("assets/minecraft/textures/block/bricks.png") elif texture== 5: # stone brick top = side = self.load_image_texture("assets/minecraft/textures/block/stone_bricks.png") elif texture== 6: # nether brick slab top = side = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png") elif texture== 7: #quartz top = side = self.load_image_texture("assets/minecraft/textures/block/quartz_block_side.png") elif texture== 8: # special stone double slab with top texture only top = side = self.load_image_texture("assets/minecraft/textures/block/smooth_stone.png") elif texture== 9: # special sandstone double slab with top texture only top = side = self.load_image_texture("assets/minecraft/textures/block/sandstone_top.png") else: return None elif blockid == 182: # single red sandstone slab if texture == 0: top = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png") side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone.png") else: return None elif blockid == 181: # double red sandstone slab if texture == 0: # red sandstone top = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png") side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone.png") elif texture == 8: # 'full' red sandstone (smooth) top = side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png"); else: return None elif blockid == 204 or blockid == 205: # purpur slab (single=205 double=204) top = side = self.load_image_texture("assets/minecraft/textures/block/purpur_block.png"); elif blockid == 11340: # prismarine slabs top = side = self.load_image_texture("assets/minecraft/textures/block/prismarine.png").copy() elif blockid == 11341: # dark prismarine slabs top = side = self.load_image_texture("assets/minecraft/textures/block/dark_prismarine.png").copy() elif blockid == 11342: # prismarine brick slabs top = side = self.load_image_texture("assets/minecraft/textures/block/prismarine_bricks.png").copy() elif blockid == 11343: # andesite slabs top = side = self.load_image_texture("assets/minecraft/textures/block/andesite.png").copy() elif blockid == 11344: # diorite slabs top = side = self.load_image_texture("assets/minecraft/textures/block/diorite.png").copy() elif blockid == 11345: # granite slabs top = side = self.load_image_texture("assets/minecraft/textures/block/granite.png").copy() elif blockid == 11346: # polished andesite slabs top = side = self.load_image_texture("assets/minecraft/textures/block/polished_andesite.png").copy() elif blockid == 11347: # polished diorite slabs top = side = self.load_image_texture("assets/minecraft/textures/block/polished_diorite.png").copy() elif blockid == 11348: # polished granite slabs top = side = self.load_image_texture("assets/minecraft/textures/block/polished_granite.png").copy() elif blockid == 11349: # red nether brick slab top = side = self.load_image_texture("assets/minecraft/textures/block/red_nether_bricks.png").copy() elif blockid == 11350: # smooth sandstone slab top = side = self.load_image_texture("assets/minecraft/textures/block/sandstone_top.png").copy() elif blockid == 11351: # cut sandstone slab top = side = self.load_image_texture("assets/minecraft/textures/block/cut_sandstone.png").copy() elif blockid == 11352: # smooth red sandstone slab top = side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png").copy() elif blockid == 11353: # cut red sandstone slab top = side = self.load_image_texture("assets/minecraft/textures/block/cut_red_sandstone.png").copy() elif blockid == 11354: # end_stone_brick_slab top = side = self.load_image_texture("assets/minecraft/textures/block/end_stone_bricks.png").copy() elif blockid == 11355: # mossy_cobblestone_slab top = side = self.load_image_texture("assets/minecraft/textures/block/mossy_cobblestone.png").copy() elif blockid == 11356: # mossy_stone_brick_slab top = side = self.load_image_texture("assets/minecraft/textures/block/mossy_stone_bricks.png").copy() elif blockid == 11357: # smooth_quartz_slab top = side = self.load_image_texture("assets/minecraft/textures/block/quartz_block_bottom.png").copy() elif blockid == 11358: # smooth_stone_slab top = self.load_image_texture("assets/minecraft/textures/block/smooth_stone.png").copy() side = self.load_image_texture("assets/minecraft/textures/block/smooth_stone_slab_side.png").copy() elif blockid == 1027: # blackstone_slab top = side = self.load_image_texture("assets/minecraft/textures/block/blackstone.png").copy() elif blockid == 1028: # polished_blackstone_slab top = side = self.load_image_texture("assets/minecraft/textures/block/polished_blackstone.png").copy() elif blockid == 1029: # polished_blackstone_brick_slab top = side = self.load_image_texture("assets/minecraft/textures/block/polished_blackstone_bricks.png").copy() elif blockid in range(1072, 1080): copper_tex = { 1072: "assets/minecraft/textures/block/cut_copper.png", 1076: "assets/minecraft/textures/block/cut_copper.png", 1073: "assets/minecraft/textures/block/exposed_cut_copper.png", 1077: "assets/minecraft/textures/block/exposed_cut_copper.png", 1074: "assets/minecraft/textures/block/weathered_cut_copper.png", 1078: "assets/minecraft/textures/block/weathered_cut_copper.png", 1075: "assets/minecraft/textures/block/oxidized_cut_copper.png", 1079: "assets/minecraft/textures/block/oxidized_cut_copper.png", } top = side = self.load_image_texture(copper_tex[blockid]).copy() if blockid == 43 or blockid == 181 or blockid == 204: # double slab return self.build_block(top, side) return self.build_slab_block(top, side, data & 8 == 8); # brick block block(blockid=45, top_image="assets/minecraft/textures/block/bricks.png") # TNT block(blockid=46, top_image="assets/minecraft/textures/block/tnt_top.png", side_image="assets/minecraft/textures/block/tnt_side.png", nospawn=True) # bookshelf block(blockid=47, top_image="assets/minecraft/textures/block/oak_planks.png", side_image="assets/minecraft/textures/block/bookshelf.png") # moss stone block(blockid=48, top_image="assets/minecraft/textures/block/mossy_cobblestone.png") # obsidian block(blockid=49, top_image="assets/minecraft/textures/block/obsidian.png") # torch, redstone torch (off), redstone torch(on), soul_torch @material(blockid=[50, 75, 76, 1039], data=[1, 2, 3, 4, 5], transparent=True) def torches(self, blockid, data): # first, rotations if self.rotation == 1: if data == 1: data = 3 elif data == 2: data = 4 elif data == 3: data = 2 elif data == 4: data = 1 elif self.rotation == 2: if data == 1: data = 2 elif data == 2: data = 1 elif data == 3: data = 4 elif data == 4: data = 3 elif self.rotation == 3: if data == 1: data = 4 elif data == 2: data = 3 elif data == 3: data = 1 elif data == 4: data = 2 # choose the proper texture if blockid == 50: # torch small = self.load_image_texture("assets/minecraft/textures/block/torch.png") elif blockid == 75: # off redstone torch small = self.load_image_texture("assets/minecraft/textures/block/redstone_torch_off.png") elif blockid == 76: # on redstone torch small = self.load_image_texture("assets/minecraft/textures/block/redstone_torch.png") elif blockid == 1039: # soul torch small= self.load_image_texture("assets/minecraft/textures/block/soul_torch.png") # compose a torch bigger than the normal # (better for doing transformations) torch = Image.new("RGBA", (16,16), self.bgcolor) alpha_over(torch,small,(-4,-3)) alpha_over(torch,small,(-5,-2)) alpha_over(torch,small,(-3,-2)) # angle of inclination of the texture rotation = 15 if data == 1: # pointing south torch = torch.rotate(-rotation, Image.NEAREST) # nearest filter is more nitid. img = self.build_full_block(None, None, None, torch, None, None) elif data == 2: # pointing north torch = torch.rotate(rotation, Image.NEAREST) img = self.build_full_block(None, None, torch, None, None, None) elif data == 3: # pointing west torch = torch.rotate(rotation, Image.NEAREST) img = self.build_full_block(None, torch, None, None, None, None) elif data == 4: # pointing east torch = torch.rotate(-rotation, Image.NEAREST) img = self.build_full_block(None, None, None, None, torch, None) elif data == 5: # standing on the floor # compose a "3d torch". img = Image.new("RGBA", (24,24), self.bgcolor) small_crop = small.crop((2,2,14,14)) slice = small_crop.copy() ImageDraw.Draw(slice).rectangle((6,0,12,12),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(slice).rectangle((0,0,4,12),outline=(0,0,0,0),fill=(0,0,0,0)) alpha_over(img, slice, (7,5)) alpha_over(img, small_crop, (6,6)) alpha_over(img, small_crop, (7,6)) alpha_over(img, slice, (7,7)) return img # lantern @material(blockid=[11373, 1038], data=[0, 1], transparent=True) def lantern(self, blockid, data): # get the multipart texture of the lantern if blockid == 11373: inputtexture = self.load_image_texture("assets/minecraft/textures/block/lantern.png") if blockid == 1038: inputtexture = self.load_image_texture("assets/minecraft/textures/block/soul_lantern.png") # # now create a textures, using the parts defined in lantern.json # JSON data for sides # from": [ 5, 1, 5 ], # "to": [11, 8, 11 ], # { "uv": [ 0, 2, 6, 9 ], "texture": "#all" } side_crop = inputtexture.crop((0, 2, 6, 9)) side_slice = side_crop.copy() side_texture = Image.new("RGBA", (16, 16), self.bgcolor) side_texture.paste(side_slice,(5, 8)) # JSON data for top # { "uv": [ 0, 9, 6, 15 ], "texture": "#all" } top_crop = inputtexture.crop((0, 9, 6, 15)) top_slice = top_crop.copy() top_texture = Image.new("RGBA", (16, 16), self.bgcolor) top_texture.paste(top_slice,(5, 5)) # mimic parts of build_full_block, to get an object smaller than a block # build_full_block(self, top, side1, side2, side3, side4, bottom=None): # a non transparent block uses top, side 3 and side 4. img = Image.new("RGBA", (24, 24), self.bgcolor) # prepare the side textures # side3 side3 = self.transform_image_side(side_texture) # Darken this side sidealpha = side3.split()[3] side3 = ImageEnhance.Brightness(side3).enhance(0.9) side3.putalpha(sidealpha) # place the transformed texture hangoff = 0 if data == 1: hangoff = 8 xoff = 4 yoff =- hangoff alpha_over(img, side3, (xoff+0, yoff+6), side3) # side4 side4 = self.transform_image_side(side_texture) side4 = side4.transpose(Image.FLIP_LEFT_RIGHT) # Darken this side sidealpha = side4.split()[3] side4 = ImageEnhance.Brightness(side4).enhance(0.8) side4.putalpha(sidealpha) alpha_over(img, side4, (12-xoff, yoff+6), side4) # top top = self.transform_image_top(top_texture) alpha_over(img, top, (0, 8-hangoff), top) return img # bamboo @material(blockid=11416, transparent=True) def bamboo(self, blockid, data): # get the multipart texture of the lantern inputtexture = self.load_image_texture("assets/minecraft/textures/block/bamboo_stalk.png") # # now create a textures, using the parts defined in bamboo1_age0.json # { "from": [ 7, 0, 7 ], # "to": [ 9, 16, 9 ], # "faces": { # "down": { "uv": [ 13, 4, 15, 6 ], "texture": "#all", "cullface": "down" }, # "up": { "uv": [ 13, 0, 15, 2], "texture": "#all", "cullface": "up" }, # "north": { "uv": [ 0, 0, 2, 16 ], "texture": "#all" }, # "south": { "uv": [ 0, 0, 2, 16 ], "texture": "#all" }, # "west": { "uv": [ 0, 0, 2, 16 ], "texture": "#all" }, # "east": { "uv": [ 0, 0, 2, 16 ], "texture": "#all" } # } # } side_crop = inputtexture.crop((0, 0, 3, 16)) side_slice = side_crop.copy() side_texture = Image.new("RGBA", (16, 16), self.bgcolor) side_texture.paste(side_slice,(0, 0)) # JSON data for top # "up": { "uv": [ 13, 0, 15, 2], "texture": "#all", "cullface": "up" }, top_crop = inputtexture.crop((13, 0, 16, 3)) top_slice = top_crop.copy() top_texture = Image.new("RGBA", (16, 16), self.bgcolor) top_texture.paste(top_slice,(5, 5)) # mimic parts of build_full_block, to get an object smaller than a block # build_full_block(self, top, side1, side2, side3, side4, bottom=None): # a non transparent block uses top, side 3 and side 4. img = Image.new("RGBA", (24, 24), self.bgcolor) # prepare the side textures # side3 side3 = self.transform_image_side(side_texture) # Darken this side sidealpha = side3.split()[3] side3 = ImageEnhance.Brightness(side3).enhance(0.9) side3.putalpha(sidealpha) # place the transformed texture xoff = 3 yoff = 0 alpha_over(img, side3, (4+xoff, yoff), side3) # side4 side4 = self.transform_image_side(side_texture) side4 = side4.transpose(Image.FLIP_LEFT_RIGHT) # Darken this side sidealpha = side4.split()[3] side4 = ImageEnhance.Brightness(side4).enhance(0.8) side4.putalpha(sidealpha) alpha_over(img, side4, (-4+xoff, yoff), side4) # top top = self.transform_image_top(top_texture) alpha_over(img, top, (-4+xoff, -5), top) return img # composter @material(blockid=11417, data=list(range(9)), transparent=True) def composter(self, blockid, data): side = self.load_image_texture("assets/minecraft/textures/block/composter_side.png") top = self.load_image_texture("assets/minecraft/textures/block/composter_top.png") # bottom = self.load_image_texture("assets/minecraft/textures/block/composter_bottom.png") if data == 0: # empty return self.build_full_block(top, side, side, side, side) if data == 8: compost = self.transform_image_top( self.load_image_texture("assets/minecraft/textures/block/composter_ready.png")) else: compost = self.transform_image_top( self.load_image_texture("assets/minecraft/textures/block/composter_compost.png")) nudge = {1: (0, 9), 2: (0, 8), 3: (0, 7), 4: (0, 6), 5: (0, 4), 6: (0, 2), 7: (0, 0), 8: (0, 0)} img = self.build_full_block(None, side, side, None, None) alpha_over(img, compost, nudge[data], compost) img2 = self.build_full_block(top, None, None, side, side) alpha_over(img, img2, (0, 0), img2) return img # fire and soul_fire @material(blockid=[51, 1040], transparent=True) def fire(self, blockid, data): if blockid == 51: textureNS = self.load_image_texture("assets/minecraft/textures/block/fire_0.png") textureEW = self.load_image_texture("assets/minecraft/textures/block/fire_1.png") elif blockid == 1040: textureNS = self.load_image_texture("assets/minecraft/textures/block/soul_fire_0.png") textureEW = self.load_image_texture("assets/minecraft/textures/block/soul_fire_1.png") side1 = self.transform_image_side(textureNS) side2 = self.transform_image_side(textureEW).transpose(Image.FLIP_LEFT_RIGHT) img = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(img, side1, (12,0), side1) alpha_over(img, side2, (0,0), side2) alpha_over(img, side1, (0,6), side1) alpha_over(img, side2, (12,6), side2) return img # monster spawner block(blockid=52, top_image="assets/minecraft/textures/block/spawner.png", transparent=True) # wooden, cobblestone, red brick, stone brick, netherbrick, sandstone, spruce, birch, # jungle, quartz, red sandstone, purper_stairs, crimson_stairs, warped_stairs, (dark) prismarine, # mossy brick and mossy cobblestone, stone smooth_quartz # polished_granite polished_andesite polished_diorite granite diorite andesite end_stone_bricks red_nether_brick stairs # smooth_red_sandstone blackstone polished_blackstone polished_blackstone_brick # also all the copper variants @material(blockid=[53, 67, 108, 109, 114, 128, 134, 135, 136, 156, 163, 164, 180, 203, 509, 510, 11337, 11338, 11339, 11370, 11371, 11374, 11375, 11376, 11377, 11378, 11379, 11380, 11381, 11382, 11383, 11384, 11415, 1030, 1031, 1032, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071], data=list(range(128)), transparent=True, solid=True, nospawn=True) def stairs(self, blockid, data): # preserve the upside-down bit upside_down = data & 0x4 # find solid quarters within the top or bottom half of the block # NW NE SE SW quarters = [data & 0x8, data & 0x10, data & 0x20, data & 0x40] # rotate the quarters so we can pretend northdirection is always upper-left numpy.roll(quarters, [0,1,3,2][self.rotation]) nw,ne,se,sw = quarters stair_id_to_tex = { 53: "assets/minecraft/textures/block/oak_planks.png", 67: "assets/minecraft/textures/block/cobblestone.png", 108: "assets/minecraft/textures/block/bricks.png", 109: "assets/minecraft/textures/block/stone_bricks.png", 114: "assets/minecraft/textures/block/nether_bricks.png", 128: "assets/minecraft/textures/block/sandstone.png", 134: "assets/minecraft/textures/block/spruce_planks.png", 135: "assets/minecraft/textures/block/birch_planks.png", 136: "assets/minecraft/textures/block/jungle_planks.png", 156: "assets/minecraft/textures/block/quartz_block_side.png", 163: "assets/minecraft/textures/block/acacia_planks.png", 164: "assets/minecraft/textures/block/dark_oak_planks.png", 180: "assets/minecraft/textures/block/red_sandstone.png", 203: "assets/minecraft/textures/block/purpur_block.png", 509: "assets/minecraft/textures/block/crimson_planks.png", 510: "assets/minecraft/textures/block/warped_planks.png", 11337: "assets/minecraft/textures/block/prismarine.png", 11338: "assets/minecraft/textures/block/dark_prismarine.png", 11339: "assets/minecraft/textures/block/prismarine_bricks.png", 11370: "assets/minecraft/textures/block/mossy_stone_bricks.png", 11371: "assets/minecraft/textures/block/mossy_cobblestone.png", 11374: "assets/minecraft/textures/block/sandstone_top.png", 11375: "assets/minecraft/textures/block/quartz_block_side.png", 11376: "assets/minecraft/textures/block/polished_granite.png", 11377: "assets/minecraft/textures/block/polished_diorite.png", 11378: "assets/minecraft/textures/block/polished_andesite.png", 11379: "assets/minecraft/textures/block/stone.png", 11380: "assets/minecraft/textures/block/granite.png", 11381: "assets/minecraft/textures/block/diorite.png", 11382: "assets/minecraft/textures/block/andesite.png", 11383: "assets/minecraft/textures/block/end_stone_bricks.png", 11384: "assets/minecraft/textures/block/red_nether_bricks.png", 11415: "assets/minecraft/textures/block/red_sandstone_top.png", 1030: "assets/minecraft/textures/block/blackstone.png", 1031: "assets/minecraft/textures/block/polished_blackstone.png", 1032: "assets/minecraft/textures/block/polished_blackstone_bricks.png", # Cut copper stairs 1064: "assets/minecraft/textures/block/cut_copper.png", 1065: "assets/minecraft/textures/block/exposed_cut_copper.png", 1066: "assets/minecraft/textures/block/weathered_cut_copper.png", 1067: "assets/minecraft/textures/block/oxidized_cut_copper.png", # Waxed cut copper stairs 1068: "assets/minecraft/textures/block/cut_copper.png", 1069: "assets/minecraft/textures/block/exposed_cut_copper.png", 1070: "assets/minecraft/textures/block/weathered_cut_copper.png", 1071: "assets/minecraft/textures/block/oxidized_cut_copper.png", } texture = self.load_image_texture(stair_id_to_tex[blockid]).copy() outside_l = texture.copy() outside_r = texture.copy() inside_l = texture.copy() inside_r = texture.copy() # sandstone, red sandstone, and quartz stairs have special top texture special_tops = { 128: "assets/minecraft/textures/block/sandstone_top.png", 156: "assets/minecraft/textures/block/quartz_block_top.png", 180: "assets/minecraft/textures/block/red_sandstone_top.png", 11375: "assets/minecraft/textures/block/quartz_block_top.png", } if blockid in special_tops: texture = self.load_image_texture(special_tops[blockid]).copy() slab_top = texture.copy() push = 8 if upside_down else 0 def rect(tex,coords): ImageDraw.Draw(tex).rectangle(coords,outline=(0,0,0,0),fill=(0,0,0,0)) # cut out top or bottom half from inner surfaces rect(inside_l, (0,8-push,15,15-push)) rect(inside_r, (0,8-push,15,15-push)) # cut out missing or obstructed quarters from each surface if not nw: rect(outside_l, (0,push,7,7+push)) rect(texture, (0,0,7,7)) if not nw or sw: rect(inside_r, (8,push,15,7+push)) # will be flipped if not ne: rect(texture, (8,0,15,7)) if not ne or nw: rect(inside_l, (0,push,7,7+push)) if not ne or se: rect(inside_r, (0,push,7,7+push)) # will be flipped if not se: rect(outside_r, (0,push,7,7+push)) # will be flipped rect(texture, (8,8,15,15)) if not se or sw: rect(inside_l, (8,push,15,7+push)) if not sw: rect(outside_l, (8,push,15,7+push)) rect(outside_r, (8,push,15,7+push)) # will be flipped rect(texture, (0,8,7,15)) img = Image.new("RGBA", (24,24), self.bgcolor) if upside_down: # top should have no cut-outs after all texture = slab_top else: # render the slab-level surface slab_top = self.transform_image_top(slab_top) alpha_over(img, slab_top, (0,6)) # render inner left surface inside_l = self.transform_image_side(inside_l) # Darken the vertical part of the second step sidealpha = inside_l.split()[3] # darken it a bit more than usual, looks better inside_l = ImageEnhance.Brightness(inside_l).enhance(0.8) inside_l.putalpha(sidealpha) alpha_over(img, inside_l, (6,3)) # render inner right surface inside_r = self.transform_image_side(inside_r).transpose(Image.FLIP_LEFT_RIGHT) # Darken the vertical part of the second step sidealpha = inside_r.split()[3] # darken it a bit more than usual, looks better inside_r = ImageEnhance.Brightness(inside_r).enhance(0.7) inside_r.putalpha(sidealpha) alpha_over(img, inside_r, (6,3)) # render outer surfaces alpha_over(img, self.build_full_block(texture, None, None, outside_l, outside_r)) return img # normal, locked (used in april's fool day), ender and trapped chest # NOTE: locked chest used to be id95 (which is now stained glass) @material(blockid=[54, 130, 146], data=list(range(30)), transparent = True) def chests(self, blockid, data): # the first 3 bits are the orientation as stored in minecraft, # bits 0x8 and 0x10 indicate which half of the double chest is it. # first, do the rotation if needed orientation_data = data & 7 if self.rotation == 1: if orientation_data == 2: data = 5 | (data & 24) elif orientation_data == 3: data = 4 | (data & 24) elif orientation_data == 4: data = 2 | (data & 24) elif orientation_data == 5: data = 3 | (data & 24) elif self.rotation == 2: if orientation_data == 2: data = 3 | (data & 24) elif orientation_data == 3: data = 2 | (data & 24) elif orientation_data == 4: data = 5 | (data & 24) elif orientation_data == 5: data = 4 | (data & 24) elif self.rotation == 3: if orientation_data == 2: data = 4 | (data & 24) elif orientation_data == 3: data = 5 | (data & 24) elif orientation_data == 4: data = 3 | (data & 24) elif orientation_data == 5: data = 2 | (data & 24) if blockid == 130 and not data in [2, 3, 4, 5]: return None # iterate.c will only return the ancil data (without pseudo # ancil data) for locked and ender chests, so only # ancilData = 2,3,4,5 are used for this blockids if data & 24 == 0: if blockid == 130: t = self.load_image("assets/minecraft/textures/entity/chest/ender.png") else: try: t = self.load_image("assets/minecraft/textures/entity/chest/normal.png") except (TextureException, IOError): t = self.load_image("assets/minecraft/textures/entity/chest/chest.png") t = ImageOps.flip(t) # for some reason the 1.15 images are upside down # the textures is no longer in terrain.png, get it from # item/chest.png and get by cropping all the needed stuff if t.size != (64, 64): t = t.resize((64, 64), Image.ANTIALIAS) # top top = t.crop((28, 50, 42, 64)) top.load() # every crop need a load, crop is a lazy operation # see PIL manual img = Image.new("RGBA", (16, 16), self.bgcolor) alpha_over(img, top, (1, 1)) top = img # front front_top = t.crop((42, 45, 56, 50)) front_top.load() front_bottom = t.crop((42, 21, 56, 31)) front_bottom.load() front_lock = t.crop((1, 59, 3, 63)) front_lock.load() front = Image.new("RGBA", (16, 16), self.bgcolor) alpha_over(front, front_top, (1, 1)) alpha_over(front, front_bottom, (1, 5)) alpha_over(front, front_lock, (7, 3)) # left side # left side, right side, and back are essentially the same for # the default texture, we take it anyway just in case other # textures make use of it. side_l_top = t.crop((14, 45, 28, 50)) side_l_top.load() side_l_bottom = t.crop((14, 21, 28, 31)) side_l_bottom.load() side_l = Image.new("RGBA", (16, 16), self.bgcolor) alpha_over(side_l, side_l_top, (1, 1)) alpha_over(side_l, side_l_bottom, (1, 5)) # right side side_r_top = t.crop((28, 45, 42, 50)) side_r_top.load() side_r_bottom = t.crop((28, 21, 42, 31)) side_r_bottom.load() side_r = Image.new("RGBA", (16, 16), self.bgcolor) alpha_over(side_r, side_r_top, (1, 1)) alpha_over(side_r, side_r_bottom, (1, 5)) # back back_top = t.crop((0, 45, 14, 50)) back_top.load() back_bottom = t.crop((0, 21, 14, 31)) back_bottom.load() back = Image.new("RGBA", (16, 16), self.bgcolor) alpha_over(back, back_top, (1, 1)) alpha_over(back, back_bottom, (1, 5)) else: # large chest # the textures is no longer in terrain.png, get it from # item/chest.png and get all the needed stuff t_left = self.load_image("assets/minecraft/textures/entity/chest/normal_left.png") t_right = self.load_image("assets/minecraft/textures/entity/chest/normal_right.png") # for some reason the 1.15 images are upside down t_left = ImageOps.flip(t_left) t_right = ImageOps.flip(t_right) # Top top_left = t_right.crop((29, 50, 44, 64)) top_left.load() top_right = t_left.crop((29, 50, 44, 64)) top_right.load() top = Image.new("RGBA", (32, 16), self.bgcolor) alpha_over(top,top_left, (1, 1)) alpha_over(top,top_right, (16, 1)) # Front front_top_left = t_left.crop((43, 45, 58, 50)) front_top_left.load() front_top_right = t_right.crop((43, 45, 58, 50)) front_top_right.load() front_bottom_left = t_left.crop((43, 21, 58, 31)) front_bottom_left.load() front_bottom_right = t_right.crop((43, 21, 58, 31)) front_bottom_right.load() front_lock = t_left.crop((1, 59, 3, 63)) front_lock.load() front = Image.new("RGBA", (32, 16), self.bgcolor) alpha_over(front, front_top_left, (1, 1)) alpha_over(front, front_top_right, (16, 1)) alpha_over(front, front_bottom_left, (1, 5)) alpha_over(front, front_bottom_right, (16, 5)) alpha_over(front, front_lock, (15, 3)) # Back back_top_left = t_right.crop((14, 45, 29, 50)) back_top_left.load() back_top_right = t_left.crop((14, 45, 29, 50)) back_top_right.load() back_bottom_left = t_right.crop((14, 21, 29, 31)) back_bottom_left.load() back_bottom_right = t_left.crop((14, 21, 29, 31)) back_bottom_right.load() back = Image.new("RGBA", (32, 16), self.bgcolor) alpha_over(back, back_top_left, (1, 1)) alpha_over(back, back_top_right, (16, 1)) alpha_over(back, back_bottom_left, (1, 5)) alpha_over(back, back_bottom_right, (16, 5)) # left side side_l_top = t_left.crop((29, 45, 43, 50)) side_l_top.load() side_l_bottom = t_left.crop((29, 21, 43, 31)) side_l_bottom.load() side_l = Image.new("RGBA", (16, 16), self.bgcolor) alpha_over(side_l, side_l_top, (1, 1)) alpha_over(side_l, side_l_bottom, (1, 5)) # right side side_r_top = t_right.crop((0, 45, 14, 50)) side_r_top.load() side_r_bottom = t_right.crop((0, 21, 14, 31)) side_r_bottom.load() side_r = Image.new("RGBA", (16, 16), self.bgcolor) alpha_over(side_r, side_r_top, (1, 1)) alpha_over(side_r, side_r_bottom, (1, 5)) # double chest, left half if ((data & 24 == 8 and data & 7 in [3, 5]) or (data & 24 == 16 and data & 7 in [2, 4])): top = top.crop((0, 0, 16, 16)) top.load() front = front.crop((0, 0, 16, 16)) front.load() back = back.crop((0, 0, 16, 16)) back.load() #~ side = side_l # double chest, right half elif ((data & 24 == 16 and data & 7 in [3, 5]) or (data & 24 == 8 and data & 7 in [2, 4])): top = top.crop((16, 0, 32, 16)) top.load() front = front.crop((16, 0, 32, 16)) front.load() back = back.crop((16, 0, 32, 16)) back.load() #~ side = side_r else: # just in case return None # compose the final block img = Image.new("RGBA", (24, 24), self.bgcolor) if data & 7 == 2: # north side = self.transform_image_side(side_r) alpha_over(img, side, (1, 7)) back = self.transform_image_side(back) alpha_over(img, back.transpose(Image.FLIP_LEFT_RIGHT), (11, 7)) front = self.transform_image_side(front) top = self.transform_image_top(top.rotate(180)) alpha_over(img, top, (0, 2)) elif data & 7 == 3: # south side = self.transform_image_side(side_l) alpha_over(img, side, (1, 7)) front = self.transform_image_side(front).transpose(Image.FLIP_LEFT_RIGHT) top = self.transform_image_top(top.rotate(180)) alpha_over(img, top, (0, 2)) alpha_over(img, front, (11, 7)) elif data & 7 == 4: # west side = self.transform_image_side(side_r) alpha_over(img, side.transpose(Image.FLIP_LEFT_RIGHT), (11, 7)) front = self.transform_image_side(front) alpha_over(img, front, (1, 7)) top = self.transform_image_top(top.rotate(270)) alpha_over(img, top, (0, 2)) elif data & 7 == 5: # east back = self.transform_image_side(back) side = self.transform_image_side(side_l).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, side, (11, 7)) alpha_over(img, back, (1, 7)) top = self.transform_image_top(top.rotate(270)) alpha_over(img, top, (0, 2)) else: # just in case img = None return img # redstone wire # uses pseudo-ancildata found in iterate.c @material(blockid=55, data=list(range(128)), transparent=True) def wire(self, blockid, data): if data & 0b1000000 == 64: # powered redstone wire redstone_wire_t = self.load_image_texture("assets/minecraft/textures/block/redstone_dust_line0.png").rotate(90) redstone_wire_t = self.tint_texture(redstone_wire_t,(255,0,0)) redstone_cross_t = self.load_image_texture("assets/minecraft/textures/block/redstone_dust_dot.png") redstone_cross_t = self.tint_texture(redstone_cross_t,(255,0,0)) else: # unpowered redstone wire redstone_wire_t = self.load_image_texture("assets/minecraft/textures/block/redstone_dust_line0.png").rotate(90) redstone_wire_t = self.tint_texture(redstone_wire_t,(48,0,0)) redstone_cross_t = self.load_image_texture("assets/minecraft/textures/block/redstone_dust_dot.png") redstone_cross_t = self.tint_texture(redstone_cross_t,(48,0,0)) # generate an image per redstone direction branch_top_left = redstone_cross_t.copy() ImageDraw.Draw(branch_top_left).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(branch_top_left).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(branch_top_left).rectangle((0,11,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) branch_top_right = redstone_cross_t.copy() ImageDraw.Draw(branch_top_right).rectangle((0,0,15,4),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(branch_top_right).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(branch_top_right).rectangle((0,11,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) branch_bottom_right = redstone_cross_t.copy() ImageDraw.Draw(branch_bottom_right).rectangle((0,0,15,4),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(branch_bottom_right).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(branch_bottom_right).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) branch_bottom_left = redstone_cross_t.copy() ImageDraw.Draw(branch_bottom_left).rectangle((0,0,15,4),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(branch_bottom_left).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(branch_bottom_left).rectangle((0,11,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # generate the bottom texture if data & 0b111111 == 0: bottom = redstone_cross_t.copy() # see iterate.c for where these masks come from has_x = (data & 0b1010) > 0 has_z = (data & 0b0101) > 0 if has_x and has_z: bottom = redstone_cross_t.copy() if has_x: alpha_over(bottom, redstone_wire_t.copy()) if has_z: alpha_over(bottom, redstone_wire_t.copy().rotate(90)) else: if has_x: bottom = redstone_wire_t.copy() elif has_z: bottom = redstone_wire_t.copy().rotate(90) elif data & 0b1111 == 0: bottom = redstone_cross_t.copy() # check for going up redstone wire if data & 0b100000 == 32: side1 = redstone_wire_t.rotate(90) else: side1 = None if data & 0b010000 == 16: side2 = redstone_wire_t.rotate(90) else: side2 = None img = self.build_full_block(None,side1,side2,None,None,bottom) return img # diamond ore block(blockid=56, top_image="assets/minecraft/textures/block/diamond_ore.png") # diamond block block(blockid=57, top_image="assets/minecraft/textures/block/diamond_block.png") # Table blocks with no facing or other properties where sides are not all the same # Includes: Crafting table, fletching table, cartography table, smithing table @material(blockid=[58, 11359, 11360, 11361], solid=True, nodata=True) def block_table(self, blockid, data): block_name = {58: "crafting_table", 11359: "fletching_table", 11360: "cartography_table", 11361: "smithing_table"}[blockid] # Top texture doesn't vary with self.rotation, but texture rotation does top_tex = block_name + "_top" top_rot = [0, 270, 180, 90][self.rotation] # List of side textures from side 1 to 4 for each blockid side_tex_map = {58: ["front", "side", "front", "side"], 11359: ["front", "side", "side", "front"], 11360: ["side3", "side3", "side2", "side1"], 11361: ["front", "side", "side", "front"]}[blockid] # Determine which side textures to use side3_id = [2, 3, 1, 0][self.rotation] side4_id = [3, 1, 0, 2][self.rotation] side3_tex = block_name + "_" + side_tex_map[side3_id] side4_tex = block_name + "_" + side_tex_map[side4_id] tex_path = "assets/minecraft/textures/block" top = self.load_image_texture("{}/{}.png".format(tex_path, top_tex)).copy() side3 = self.load_image_texture("{}/{}.png".format(tex_path, side3_tex)) side4 = self.load_image_texture("{}/{}.png".format(tex_path, side4_tex)).copy() top = top.rotate(top_rot) side4 = side4.transpose(Image.FLIP_LEFT_RIGHT) return self.build_full_block(top, None, None, side3, side4, None) @material(blockid=11366, data=list(range(8)), transparent=True, solid=True, nospawn=True) def lectern(self, blockid, data): # Do rotation, mask to not clobber book data data = data & 0b100 | ((self.rotation + (data & 0b11)) % 4) # Load textures base_raw_t = self.load_image_texture("assets/minecraft/textures/block/lectern_base.png") front_raw_t = self.load_image_texture("assets/minecraft/textures/block/lectern_front.png") side_raw_t = self.load_image_texture("assets/minecraft/textures/block/lectern_sides.png") top_raw_t = self.load_image_texture("assets/minecraft/textures/block/lectern_top.png") def create_tile(img_src, coord_crop, coord_paste, rot): # Takes an image, crops a region, optionally rotates the # texture, then finally pastes it onto a 16x16 image img_out = Image.new("RGBA", (16, 16), self.bgcolor) img_in = img_src.crop(coord_crop) if rot != 0: img_in = img_in.rotate(rot, expand=True) img_out.paste(img_in, coord_paste) return img_out def darken_image(img_src, darken_value): # Takes an image & alters the brightness, leaving alpha intact alpha = img_src.split()[3] img_out = ImageEnhance.Brightness(img_src).enhance(darken_value) img_out.putalpha(alpha) return img_out # Generate base base_top_t = base_raw_t.rotate([0, 270, 180, 90][data & 0b11]) # Front & side textures are one pixel taller than they should be # pre-transformation as otherwise the topmost row of pixels # post-transformation are rather transparent, which results in # a visible gap between the base's sides & top base_front_t = create_tile(base_raw_t, (0, 13, 16, 16), (0, 13), 0) base_side_t = create_tile(base_raw_t, (0, 5, 16, 8), (0, 13), 0) base_side3_t = base_front_t if data & 0b11 == 1 else base_side_t base_side4_t = base_front_t if data & 0b11 == 0 else base_side_t img = self.build_full_block((base_top_t, 14), None, None, base_side3_t, base_side4_t, None) # Generate central pillar side_flip_t = side_raw_t.transpose(Image.FLIP_LEFT_RIGHT) # Define parameters used to obtain the texture for each side pillar_param = [{'img': front_raw_t, 'crop': (8, 4, 16, 16), 'paste': (4, 2), 'rot': 0}, # South {'img': side_raw_t, 'crop': (2, 8, 15, 16), 'paste': (4, 1), 'rot': 270}, # West {'img': front_raw_t, 'crop': (0, 4, 8, 13), 'paste': (4, 5), 'rot': 0}, # North {'img': side_flip_t, 'crop': (2, 8, 15, 16), 'paste': (4, 1), 'rot': 90}] # East # Determine which sides are rendered pillar_side = [pillar_param[(3 - (data & 0b11)) % 4], pillar_param[(2 - (data & 0b11)) % 4]] pillar_side3_t = create_tile(pillar_side[0]['img'], pillar_side[0]['crop'], pillar_side[0]['paste'], pillar_side[0]['rot']) pillar_side4_t = create_tile(pillar_side[1]['img'], pillar_side[1]['crop'], pillar_side[1]['paste'], pillar_side[1]['rot']) pillar_side4_t = pillar_side4_t.transpose(Image.FLIP_LEFT_RIGHT) pillar_side3_t = self.transform_image_side(pillar_side3_t) pillar_side3_t = darken_image(pillar_side3_t, 0.9) pillar_side4_t = self.transform_image_side(pillar_side4_t).transpose(Image.FLIP_LEFT_RIGHT) pillar_side4_t = darken_image(pillar_side4_t, 0.8) alpha_over(img, pillar_side3_t, (3, 4), pillar_side3_t) alpha_over(img, pillar_side4_t, (9, 4), pillar_side4_t) # Generate stand if (data & 0b11) in [0, 1]: # South, West stand_side3_t = create_tile(side_raw_t, (0, 0, 16, 4), (0, 4), 0) stand_side4_t = create_tile(side_raw_t, (0, 4, 13, 8), (0, 0), -22.5) else: # North, East stand_side3_t = create_tile(side_raw_t, (0, 4, 16, 8), (0, 0), 0) stand_side4_t = create_tile(side_raw_t, (0, 4, 13, 8), (0, 0), 22.5) stand_side3_t = self.transform_image_angle(stand_side3_t, math.radians(22.5)) stand_side3_t = darken_image(stand_side3_t, 0.9) stand_side4_t = self.transform_image_side(stand_side4_t).transpose(Image.FLIP_LEFT_RIGHT) stand_side4_t = darken_image(stand_side4_t, 0.8) stand_top_t = create_tile(top_raw_t, (0, 1, 16, 14), (0, 1), 0) if data & 0b100: # Lectern has a book, modify the stand top texture book_raw_t = self.load_image("assets/minecraft/textures/entity/enchanting_table_book.png") book_t = Image.new("RGBA", (14, 10), self.bgcolor) book_part_t = book_raw_t.crop((0, 0, 7, 10)) # Left cover alpha_over(stand_top_t, book_part_t, (1, 3), book_part_t) book_part_t = book_raw_t.crop((15, 0, 22, 10)) # Right cover alpha_over(stand_top_t, book_part_t, (8, 3)) book_part_t = book_raw_t.crop((24, 10, 29, 18)).rotate(180) # Left page alpha_over(stand_top_t, book_part_t, (3, 4), book_part_t) book_part_t = book_raw_t.crop((29, 10, 34, 18)).rotate(180) # Right page alpha_over(stand_top_t, book_part_t, (8, 4), book_part_t) # Perform affine transformation transform_matrix = numpy.matrix(numpy.identity(3)) if (data & 0b11) in [0, 1]: # South, West # Translate: 8 -X, 8 -Y transform_matrix *= numpy.matrix([[1, 0, 8], [0, 1, 8], [0, 0, 1]]) # Rotate 40 degrees clockwise tc = math.cos(math.radians(40)) ts = math.sin(math.radians(40)) transform_matrix *= numpy.matrix([[tc, ts, 0], [-ts, tc, 0], [0, 0, 1]]) # Shear in the Y direction tt = math.tan(math.radians(10)) transform_matrix *= numpy.matrix([[1, 0, 0], [tt, 1, 0], [0, 0, 1]]) # Scale to 70% height & 110% width transform_matrix *= numpy.matrix([[1 / 1.1, 0, 0], [0, 1 / 0.7, 0], [0, 0, 1]]) # Translate: 12 +X, 8 +Y transform_matrix *= numpy.matrix([[1, 0, -12], [0, 1, -8], [0, 0, 1]]) else: # North, East # Translate: 8 -X, 8 -Y transform_matrix *= numpy.matrix([[1, 0, 8], [0, 1, 8], [0, 0, 1]]) # Shear in the X direction tt = math.tan(math.radians(25)) transform_matrix *= numpy.matrix([[1, tt, 0], [0, 1, 0], [0, 0, 1]]) # Scale to 80% height transform_matrix *= numpy.matrix([[1, 0, 0], [0, 1 / 0.8, 0], [0, 0, 1]]) # Rotate 220 degrees clockwise tc = math.cos(math.radians(40 + 180)) ts = math.sin(math.radians(40 + 180)) transform_matrix *= numpy.matrix([[tc, ts, 0], [-ts, tc, 0], [0, 0, 1]]) # Scale to 60% height transform_matrix *= numpy.matrix([[1, 0, 0], [0, 1 / 0.6, 0], [0, 0, 1]]) # Translate: +13 X, +7 Y transform_matrix *= numpy.matrix([[1, 0, -13], [0, 1, -7], [0, 0, 1]]) transform_matrix = numpy.array(transform_matrix)[:2, :].ravel().tolist() stand_top_t = stand_top_t.transform((24, 24), Image.AFFINE, transform_matrix) img_stand = Image.new("RGBA", (24, 24), self.bgcolor) alpha_over(img_stand, stand_side3_t, (-4, 2), stand_side3_t) # Fix some holes alpha_over(img_stand, stand_side3_t, (-3, 3), stand_side3_t) alpha_over(img_stand, stand_side4_t, (12, 5), stand_side4_t) alpha_over(img_stand, stand_top_t, (0, 0), stand_top_t) # Flip the stand if North or South facing if (data & 0b11) in [0, 2]: img_stand = img_stand.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, img_stand, (0, -2), img_stand) return img @material(blockid=11367, data=list(range(4)), solid=True) def loom(self, blockid, data): # Do rotation data = (self.rotation + data) % 4 top_rot = [180, 90, 0, 270][data] side3_tex = "front" if data == 1 else "side" side4_tex = "front" if data == 0 else "side" tex_path = "assets/minecraft/textures/block" top = self.load_image_texture("{}/loom_top.png".format(tex_path)).copy() side3 = self.load_image_texture("{}/loom_{}.png".format(tex_path, side3_tex)) side4 = self.load_image_texture("{}/loom_{}.png".format(tex_path, side4_tex)).copy() top = top.rotate(top_rot) side4 = side4.transpose(Image.FLIP_LEFT_RIGHT) return self.build_full_block(top, None, None, side3, side4, None) @material(blockid=11368, data=list(range(4)), transparent=True, solid=True, nospawn=True) def stonecutter(self, blockid, data): # Do rotation data = (self.rotation + data) % 4 top_t = self.load_image_texture("assets/minecraft/textures/block/stonecutter_top.png").copy() side_t = self.load_image_texture("assets/minecraft/textures/block/stonecutter_side.png") # Stonecutter saw texture contains multiple tiles, since it's # 16px wide rely on load_image_texture() to crop appropriately blade_t = self.load_image_texture("assets/minecraft/textures/block/stonecutter_saw.png").copy() top_t = top_t.rotate([180, 90, 0, 270][data]) img = self.build_full_block((top_t, 7), None, None, side_t, side_t, None) # Add saw blade if data in [0, 2]: blade_t = blade_t.transpose(Image.FLIP_LEFT_RIGHT) blade_t = self.transform_image_side(blade_t) if data in [0, 2]: blade_t = blade_t.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, blade_t, (6, -4), blade_t) return img @material(blockid=11369, data=list(range(12)), transparent=True, solid=True, nospawn=True) def grindstone(self, blockid, data): # Do rotation, mask to not clobber mounting info data = data & 0b1100 | ((self.rotation + (data & 0b11)) % 4) # Load textures side_raw_t = self.load_image_texture("assets/minecraft/textures/block/grindstone_side.png").copy() round_raw_t = self.load_image_texture("assets/minecraft/textures/block/grindstone_round.png").copy() pivot_raw_t = self.load_image_texture("assets/minecraft/textures/block/grindstone_pivot.png").copy() leg_raw_t = self.load_image_texture("assets/minecraft/textures/block/dark_oak_log.png").copy() def create_tile(img_src, coord_crop, coord_paste, scale): # Takes an image, crops a region, optionally scales the # texture, then finally pastes it onto a 16x16 image img_out = Image.new("RGBA", (16, 16), self.bgcolor) img_in = img_src.crop(coord_crop) if scale >= 0 and scale != 1: w, h = img_in.size img_in = img_in.resize((int(w * scale), int(h * scale)), Image.NEAREST) img_out.paste(img_in, coord_paste) return img_out # Set variables defining positions of various parts wall_mounted = bool(data & 0b0100) rot_leg = [0, 270, 0][data >> 2] if wall_mounted: pos_leg = (32, 28) if data & 0b11 in [2, 3] else (10, 18) coord_leg = [(0, 0), (-10, -1), (2, 3)] offset_final = [(2, 1), (-2, 1), (-2, -1), (2, -1)][data & 0b11] else: pos_leg = [(22, 31), (22, 9)][data >> 3] coord_leg = [(0, 0), (-1, 2), (-2, -3)] offset_final = (0, 2 * (data >> 2) - 1) # Create parts # Scale up small parts like pivot & leg to avoid ugly results # when shearing & combining parts, then scale down to original # size just before final image composition scale_factor = 2 side_t = create_tile(side_raw_t, (0, 0, 12, 12), (2, 0), 1) round_ud_t = create_tile(round_raw_t, (0, 0, 8, 12), (4, 2), 1) round_lr_t = create_tile(round_raw_t, (0, 0, 8, 12), (4, 0), 1) pivot_outer_t = create_tile(pivot_raw_t, (0, 0, 6, 6), (2, 2), scale_factor) pivot_lr_t = create_tile(pivot_raw_t, (6, 0, 8, 6), (2, 2), scale_factor) pivot_ud_t = create_tile(pivot_raw_t, (8, 0, 10, 6), (2, 2), scale_factor) leg_outer_t = create_tile(leg_raw_t, (6, 9, 10, 16), (2, 2), scale_factor).rotate(rot_leg) leg_lr_t = create_tile(leg_raw_t, (12, 9, 14, 16), (2, 2), scale_factor).rotate(rot_leg) leg_ud_t = create_tile(leg_raw_t, (2, 6, 4, 10), (2, 2), scale_factor) # Transform to block sides & tops side_t = self.transform_image_side(side_t) round_ud_t = self.transform_image_top(round_ud_t) round_lr_t = self.transform_image_side(round_lr_t).transpose(Image.FLIP_LEFT_RIGHT) pivot_outer_t = self.transform_image_side(pivot_outer_t) pivot_lr_t = self.transform_image_side(pivot_lr_t).transpose(Image.FLIP_LEFT_RIGHT) pivot_ud_t = self.transform_image_top(pivot_ud_t) leg_outer_t = self.transform_image_side(leg_outer_t) if wall_mounted: leg_lr_t = self.transform_image_top(leg_lr_t).transpose(Image.FLIP_LEFT_RIGHT) leg_ud_t = self.transform_image_side(leg_ud_t).transpose(Image.FLIP_LEFT_RIGHT) else: leg_lr_t = self.transform_image_side(leg_lr_t).transpose(Image.FLIP_LEFT_RIGHT) leg_ud_t = self.transform_image_top(leg_ud_t) # Compose leg texture img_leg = Image.new("RGBA", (24 * scale_factor, 24 * scale_factor), self.bgcolor) alpha_over(img_leg, leg_outer_t, coord_leg[0], leg_outer_t) alpha_over(img_leg, leg_lr_t, coord_leg[1], leg_lr_t) alpha_over(img_leg, leg_ud_t, coord_leg[2], leg_ud_t) # Compose pivot texture (& combine with leg) img_pivot = Image.new("RGBA", (24 * scale_factor, 24 * scale_factor), self.bgcolor) alpha_over(img_pivot, pivot_ud_t, (20, 18), pivot_ud_t) alpha_over(img_pivot, pivot_lr_t, (23, 24), pivot_lr_t) # Fix gaps between face edges alpha_over(img_pivot, pivot_lr_t, (24, 24), pivot_lr_t) alpha_over(img_pivot, img_leg, pos_leg, img_leg) alpha_over(img_pivot, pivot_outer_t, (21, 21), pivot_outer_t) if hasattr(Image, "LANCZOS"): # workaround for older Pillow img_pivot = img_pivot.resize((24, 24), Image.LANCZOS) else: img_pivot = img_pivot.resize((24, 24)) # Combine leg, side, round & pivot img = Image.new("RGBA", (24, 24), self.bgcolor) img_final = img.copy() alpha_over(img, img_pivot, (1, -5), img_pivot) alpha_over(img, round_ud_t, (0, 2), round_ud_t) # Fix gaps between face edges alpha_over(img, side_t, (3, 6), side_t) alpha_over(img, round_ud_t, (0, 1), round_ud_t) alpha_over(img, round_lr_t, (10, 6), round_lr_t) alpha_over(img, img_pivot, (-5, -1), img_pivot) if (data & 0b11) in [1, 3]: img = img.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img_final, img, offset_final, img) return img_final # crops with 8 data values (like wheat) @material(blockid=59, data=list(range(8)), transparent=True, nospawn=True) def crops8(self, blockid, data): raw_crop = self.load_image_texture("assets/minecraft/textures/block/wheat_stage%d.png" % data) crop1 = self.transform_image_top(raw_crop) crop2 = self.transform_image_side(raw_crop) crop3 = crop2.transpose(Image.FLIP_LEFT_RIGHT) img = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(img, crop1, (0,12), crop1) alpha_over(img, crop2, (6,3), crop2) alpha_over(img, crop3, (6,3), crop3) return img # farmland and grass path (15/16 blocks) @material(blockid=[60, 208], data=list(range(2)), solid=True, transparent=True, nospawn=True) def farmland(self, blockid, data): if blockid == 60: side = self.load_image_texture("assets/minecraft/textures/block/dirt.png").copy() if data == 0: top = self.load_image_texture("assets/minecraft/textures/block/farmland.png") else: top = self.load_image_texture("assets/minecraft/textures/block/farmland_moist.png") # dirt.png is 16 pixels tall, so we need to crop it before building full block side = side.crop((0, 1, 16, 16)) else: top = self.load_image_texture("assets/minecraft/textures/block/dirt_path_top.png") side = self.load_image_texture("assets/minecraft/textures/block/dirt_path_side.png") # side already has 1 transparent pixel at the top, so it doesn't need to be modified # just shift the top image down 1 pixel return self.build_full_block((top, 1), side, side, side, side) # signposts @material(blockid=[63,11401,11402,11403,11404,11405,11406,12505,12506], data=list(range(16)), transparent=True) def signpost(self, blockid, data): # first rotations if self.rotation == 1: data = (data + 4) % 16 elif self.rotation == 2: data = (data + 8) % 16 elif self.rotation == 3: data = (data + 12) % 16 sign_texture = { # (texture on sign, texture on stick) 63: ("oak_planks.png", "oak_log.png"), 11401: ("oak_planks.png", "oak_log.png"), 11402: ("spruce_planks.png", "spruce_log.png"), 11403: ("birch_planks.png", "birch_log.png"), 11404: ("jungle_planks.png", "jungle_log.png"), 11405: ("acacia_planks.png", "acacia_log.png"), 11406: ("dark_oak_planks.png", "dark_oak_log.png"), 12505: ("crimson_planks.png", "crimson_stem.png"), 12506: ("warped_planks.png", "warped_stem.png"), } texture_path, texture_stick_path = ["assets/minecraft/textures/block/" + x for x in sign_texture[blockid]] texture = self.load_image_texture(texture_path).copy() # cut the planks to the size of a signpost ImageDraw.Draw(texture).rectangle((0,12,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # If the signpost is looking directly to the image, draw some # random dots, they will look as text. if data in (0,1,2,3,4,5,15): for i in range(15): x = randint(4,11) y = randint(3,7) texture.putpixel((x,y),(0,0,0,255)) # Minecraft uses wood texture for the signpost stick texture_stick = self.load_image_texture(texture_stick_path) texture_stick = texture_stick.resize((12,12), Image.ANTIALIAS) ImageDraw.Draw(texture_stick).rectangle((2,0,12,12),outline=(0,0,0,0),fill=(0,0,0,0)) img = Image.new("RGBA", (24,24), self.bgcolor) # W N ~90 E S ~270 angles = (330.,345.,0.,15.,30.,55.,95.,120.,150.,165.,180.,195.,210.,230.,265.,310.) angle = math.radians(angles[data]) post = self.transform_image_angle(texture, angle) # choose the position of the "3D effect" incrementx = 0 if data in (1,6,7,8,9,14): incrementx = -1 elif data in (3,4,5,11,12,13): incrementx = +1 alpha_over(img, texture_stick,(11, 8),texture_stick) # post2 is a brighter signpost pasted with a small shift, # gives to the signpost some 3D effect. post2 = ImageEnhance.Brightness(post).enhance(1.2) alpha_over(img, post2,(incrementx, -3),post2) alpha_over(img, post, (0,-2), post) return img # wooden and iron door # uses pseudo-ancildata found in iterate.c @material(blockid=[64,71,193,194,195,196,197, 499, 500], data=list(range(32)), transparent=True) def door(self, blockid, data): #Masked to not clobber block top/bottom & swung info if self.rotation == 1: if (data & 0b00011) == 0: data = data & 0b11100 | 1 elif (data & 0b00011) == 1: data = data & 0b11100 | 2 elif (data & 0b00011) == 2: data = data & 0b11100 | 3 elif (data & 0b00011) == 3: data = data & 0b11100 | 0 elif self.rotation == 2: if (data & 0b00011) == 0: data = data & 0b11100 | 2 elif (data & 0b00011) == 1: data = data & 0b11100 | 3 elif (data & 0b00011) == 2: data = data & 0b11100 | 0 elif (data & 0b00011) == 3: data = data & 0b11100 | 1 elif self.rotation == 3: if (data & 0b00011) == 0: data = data & 0b11100 | 3 elif (data & 0b00011) == 1: data = data & 0b11100 | 0 elif (data & 0b00011) == 2: data = data & 0b11100 | 1 elif (data & 0b00011) == 3: data = data & 0b11100 | 2 if data & 0x8 == 0x8: # top of the door if blockid == 64: # classic wood door raw_door = self.load_image_texture("assets/minecraft/textures/block/oak_door_top.png") elif blockid == 71: # iron door raw_door = self.load_image_texture("assets/minecraft/textures/block/iron_door_top.png") elif blockid == 193: # spruce door raw_door = self.load_image_texture("assets/minecraft/textures/block/spruce_door_top.png") elif blockid == 194: # birch door raw_door = self.load_image_texture("assets/minecraft/textures/block/birch_door_top.png") elif blockid == 195: # jungle door raw_door = self.load_image_texture("assets/minecraft/textures/block/jungle_door_top.png") elif blockid == 196: # acacia door raw_door = self.load_image_texture("assets/minecraft/textures/block/acacia_door_top.png") elif blockid == 197: # dark_oak door raw_door = self.load_image_texture("assets/minecraft/textures/block/dark_oak_door_top.png") elif blockid == 499: # crimson door raw_door = self.load_image_texture("assets/minecraft/textures/block/crimson_door_top.png") elif blockid == 500: # warped door raw_door = self.load_image_texture("assets/minecraft/textures/block/warped_door_top.png") else: # bottom of the door if blockid == 64: raw_door = self.load_image_texture("assets/minecraft/textures/block/oak_door_bottom.png") elif blockid == 71: # iron door raw_door = self.load_image_texture("assets/minecraft/textures/block/iron_door_bottom.png") elif blockid == 193: # spruce door raw_door = self.load_image_texture("assets/minecraft/textures/block/spruce_door_bottom.png") elif blockid == 194: # birch door raw_door = self.load_image_texture("assets/minecraft/textures/block/birch_door_bottom.png") elif blockid == 195: # jungle door raw_door = self.load_image_texture("assets/minecraft/textures/block/jungle_door_bottom.png") elif blockid == 196: # acacia door raw_door = self.load_image_texture("assets/minecraft/textures/block/acacia_door_bottom.png") elif blockid == 197: # dark_oak door raw_door = self.load_image_texture("assets/minecraft/textures/block/dark_oak_door_bottom.png") elif blockid == 499: # crimson door raw_door = self.load_image_texture("assets/minecraft/textures/block/crimson_door_bottom.png") elif blockid == 500: # warped door raw_door = self.load_image_texture("assets/minecraft/textures/block/warped_door_bottom.png") # if you want to render all doors as closed, then force # force closed to be True if data & 0x4 == 0x4: closed = False else: closed = True if data & 0x10 == 0x10: # hinge on the left (facing same door direction) hinge_on_left = True else: # hinge on the right (default single door) hinge_on_left = False # mask out the high bits to figure out the orientation img = Image.new("RGBA", (24,24), self.bgcolor) if (data & 0x03) == 0: # facing west when closed if hinge_on_left: if closed: tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT)) alpha_over(img, tex, (0,6), tex) else: # flip first to set the doornob on the correct side tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT)) tex = tex.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (12,6), tex) else: if closed: tex = self.transform_image_side(raw_door) alpha_over(img, tex, (0,6), tex) else: # flip first to set the doornob on the correct side tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT)) tex = tex.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (0,0), tex) if (data & 0x03) == 1: # facing north when closed if hinge_on_left: if closed: tex = self.transform_image_side(raw_door).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (0,0), tex) else: # flip first to set the doornob on the correct side tex = self.transform_image_side(raw_door) alpha_over(img, tex, (0,6), tex) else: if closed: tex = self.transform_image_side(raw_door).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (0,0), tex) else: # flip first to set the doornob on the correct side tex = self.transform_image_side(raw_door) alpha_over(img, tex, (12,0), tex) if (data & 0x03) == 2: # facing east when closed if hinge_on_left: if closed: tex = self.transform_image_side(raw_door) alpha_over(img, tex, (12,0), tex) else: # flip first to set the doornob on the correct side tex = self.transform_image_side(raw_door) tex = tex.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (0,0), tex) else: if closed: tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT)) alpha_over(img, tex, (12,0), tex) else: # flip first to set the doornob on the correct side tex = self.transform_image_side(raw_door).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (12,6), tex) if (data & 0x03) == 3: # facing south when closed if hinge_on_left: if closed: tex = self.transform_image_side(raw_door).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (12,6), tex) else: # flip first to set the doornob on the correct side tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT)) alpha_over(img, tex, (12,0), tex) else: if closed: tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT)) tex = tex.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (12,6), tex) else: # flip first to set the doornob on the correct side tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT)) alpha_over(img, tex, (0,6), tex) return img # ladder @material(blockid=65, data=[2, 3, 4, 5], transparent=True) def ladder(self, blockid, data): # first rotations if self.rotation == 1: if data == 2: data = 5 elif data == 3: data = 4 elif data == 4: data = 2 elif data == 5: data = 3 elif self.rotation == 2: if data == 2: data = 3 elif data == 3: data = 2 elif data == 4: data = 5 elif data == 5: data = 4 elif self.rotation == 3: if data == 2: data = 4 elif data == 3: data = 5 elif data == 4: data = 3 elif data == 5: data = 2 img = Image.new("RGBA", (24,24), self.bgcolor) raw_texture = self.load_image_texture("assets/minecraft/textures/block/ladder.png") if data == 5: # normally this ladder would be obsured by the block it's attached to # but since ladders can apparently be placed on transparent blocks, we # have to render this thing anyway. same for data == 2 tex = self.transform_image_side(raw_texture) alpha_over(img, tex, (0,6), tex) return img if data == 2: tex = self.transform_image_side(raw_texture).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (12,6), tex) return img if data == 3: tex = self.transform_image_side(raw_texture).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, tex, (0,0), tex) return img if data == 4: tex = self.transform_image_side(raw_texture) alpha_over(img, tex, (12,0), tex) return img # wall signs @material(blockid=[68,11407,11408,11409,11410,11411,11412,12507,12508], data=[2, 3, 4, 5], transparent=True) def wall_sign(self, blockid, data): # wall sign # first rotations if self.rotation == 1: if data == 2: data = 5 elif data == 3: data = 4 elif data == 4: data = 2 elif data == 5: data = 3 elif self.rotation == 2: if data == 2: data = 3 elif data == 3: data = 2 elif data == 4: data = 5 elif data == 5: data = 4 elif self.rotation == 3: if data == 2: data = 4 elif data == 3: data = 5 elif data == 4: data = 3 elif data == 5: data = 2 sign_texture = { 68: "oak_planks.png", 11407: "oak_planks.png", 11408: "spruce_planks.png", 11409: "birch_planks.png", 11410: "jungle_planks.png", 11411: "acacia_planks.png", 11412: "dark_oak_planks.png", 12507: "crimson_planks.png", 12508: "warped_planks.png", } texture_path = "assets/minecraft/textures/block/" + sign_texture[blockid] texture = self.load_image_texture(texture_path).copy() # cut the planks to the size of a signpost ImageDraw.Draw(texture).rectangle((0,12,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # draw some random black dots, they will look as text """ don't draw text at the moment, they are used in blank for decoration if data in (3,4): for i in range(15): x = randint(4,11) y = randint(3,7) texture.putpixel((x,y),(0,0,0,255)) """ img = Image.new("RGBA", (24,24), self.bgcolor) incrementx = 0 if data == 2: # east incrementx = +1 sign = self.build_full_block(None, None, None, None, texture) elif data == 3: # west incrementx = -1 sign = self.build_full_block(None, texture, None, None, None) elif data == 4: # north incrementx = +1 sign = self.build_full_block(None, None, texture, None, None) elif data == 5: # south incrementx = -1 sign = self.build_full_block(None, None, None, texture, None) sign2 = ImageEnhance.Brightness(sign).enhance(1.2) alpha_over(img, sign2,(incrementx, 2),sign2) alpha_over(img, sign, (0,3), sign) return img # levers @material(blockid=69, data=list(range(16)), transparent=True) def levers(self, blockid, data): if data & 8 == 8: powered = True else: powered = False data = data & 7 # first rotations if self.rotation == 1: # on wall levers if data == 1: data = 3 elif data == 2: data = 4 elif data == 3: data = 2 elif data == 4: data = 1 # on floor levers elif data == 5: data = 6 elif data == 6: data = 5 elif self.rotation == 2: if data == 1: data = 2 elif data == 2: data = 1 elif data == 3: data = 4 elif data == 4: data = 3 elif data == 5: data = 5 elif data == 6: data = 6 elif self.rotation == 3: if data == 1: data = 4 elif data == 2: data = 3 elif data == 3: data = 1 elif data == 4: data = 2 elif data == 5: data = 6 elif data == 6: data = 5 # generate the texture for the base of the lever t_base = self.load_image_texture("assets/minecraft/textures/block/stone.png").copy() ImageDraw.Draw(t_base).rectangle((0,0,15,3),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(t_base).rectangle((0,12,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(t_base).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(t_base).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # generate the texture for the stick stick = self.load_image_texture("assets/minecraft/textures/block/lever.png").copy() c_stick = Image.new("RGBA", (16,16), self.bgcolor) tmp = ImageEnhance.Brightness(stick).enhance(0.8) alpha_over(c_stick, tmp, (1,0), tmp) alpha_over(c_stick, stick, (0,0), stick) t_stick = self.transform_image_side(c_stick.rotate(45, Image.NEAREST)) # where the lever will be composed img = Image.new("RGBA", (24,24), self.bgcolor) # wall levers if data == 1: # facing SOUTH # levers can't be placed in transparent blocks, so this # direction is almost invisible return None elif data == 2: # facing NORTH base = self.transform_image_side(t_base) # paste it twice with different brightness to make a fake 3D effect alpha_over(img, base, (12,-1), base) alpha = base.split()[3] base = ImageEnhance.Brightness(base).enhance(0.9) base.putalpha(alpha) alpha_over(img, base, (11,0), base) # paste the lever stick pos = (7,-7) if powered: t_stick = t_stick.transpose(Image.FLIP_TOP_BOTTOM) pos = (7,6) alpha_over(img, t_stick, pos, t_stick) elif data == 3: # facing WEST base = self.transform_image_side(t_base) # paste it twice with different brightness to make a fake 3D effect base = base.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, base, (0,-1), base) alpha = base.split()[3] base = ImageEnhance.Brightness(base).enhance(0.9) base.putalpha(alpha) alpha_over(img, base, (1,0), base) # paste the lever stick t_stick = t_stick.transpose(Image.FLIP_LEFT_RIGHT) pos = (5,-7) if powered: t_stick = t_stick.transpose(Image.FLIP_TOP_BOTTOM) pos = (6,6) alpha_over(img, t_stick, pos, t_stick) elif data == 4: # facing EAST # levers can't be placed in transparent blocks, so this # direction is almost invisible return None # floor levers elif data == 5: # pointing south when off # lever base, fake 3d again base = self.transform_image_top(t_base) alpha = base.split()[3] tmp = ImageEnhance.Brightness(base).enhance(0.8) tmp.putalpha(alpha) alpha_over(img, tmp, (0,12), tmp) alpha_over(img, base, (0,11), base) # lever stick pos = (3,2) if not powered: t_stick = t_stick.transpose(Image.FLIP_LEFT_RIGHT) pos = (11,2) alpha_over(img, t_stick, pos, t_stick) elif data == 6: # pointing east when off # lever base, fake 3d again base = self.transform_image_top(t_base.rotate(90)) alpha = base.split()[3] tmp = ImageEnhance.Brightness(base).enhance(0.8) tmp.putalpha(alpha) alpha_over(img, tmp, (0,12), tmp) alpha_over(img, base, (0,11), base) # lever stick pos = (2,3) if not powered: t_stick = t_stick.transpose(Image.FLIP_LEFT_RIGHT) pos = (10,2) alpha_over(img, t_stick, pos, t_stick) return img # wooden and stone pressure plates, and weighted pressure plates @material(blockid=[70, 72,147,148,11301,11302,11303,11304,11305, 1033,11517,11518], data=[0,1], transparent=True) def pressure_plate(self, blockid, data): texture_name = {70:"assets/minecraft/textures/block/stone.png", # stone 72:"assets/minecraft/textures/block/oak_planks.png", # oak 11301:"assets/minecraft/textures/block/spruce_planks.png", # spruce 11302:"assets/minecraft/textures/block/birch_planks.png", # birch 11303:"assets/minecraft/textures/block/jungle_planks.png", # jungle 11304:"assets/minecraft/textures/block/acacia_planks.png", # acacia 11305:"assets/minecraft/textures/block/dark_oak_planks.png", # dark oak 11517:"assets/minecraft/textures/block/crimson_planks.png", # crimson 11518:"assets/minecraft/textures/block/warped_planks.png", # warped 147:"assets/minecraft/textures/block/gold_block.png", # light golden 148:"assets/minecraft/textures/block/iron_block.png", # heavy iron 1033:"assets/minecraft/textures/block/polished_blackstone.png" }[blockid] t = self.load_image_texture(texture_name).copy() # cut out the outside border, pressure plates are smaller # than a normal block ImageDraw.Draw(t).rectangle((0,0,15,15),outline=(0,0,0,0)) # create the textures and a darker version to make a 3d by # pasting them with an offstet of 1 pixel img = Image.new("RGBA", (24,24), self.bgcolor) top = self.transform_image_top(t) alpha = top.split()[3] topd = ImageEnhance.Brightness(top).enhance(0.8) topd.putalpha(alpha) #show it 3d or 2d if unpressed or pressed if data == 0: alpha_over(img,topd, (0,12),topd) alpha_over(img,top, (0,11),top) elif data == 1: alpha_over(img,top, (0,12),top) return img # normal and glowing redstone ore block(blockid=[73, 74], top_image="assets/minecraft/textures/block/redstone_ore.png") # stone and wood buttons @material(blockid=(77,143,11326,11327,11328,11329,11330,1034,11515,11516), data=list(range(16)), transparent=True) def buttons(self, blockid, data): # 0x8 is set if the button is pressed mask this info and render # it as unpressed data = data & 0x7 if self.rotation == 1: if data == 1: data = 3 elif data == 2: data = 4 elif data == 3: data = 2 elif data == 4: data = 1 elif data == 5: data = 6 elif data == 6: data = 5 elif self.rotation == 2: if data == 1: data = 2 elif data == 2: data = 1 elif data == 3: data = 4 elif data == 4: data = 3 elif self.rotation == 3: if data == 1: data = 4 elif data == 2: data = 3 elif data == 3: data = 1 elif data == 4: data = 2 elif data == 5: data = 6 elif data == 6: data = 5 texturepath = {77:"assets/minecraft/textures/block/stone.png", 143:"assets/minecraft/textures/block/oak_planks.png", 11326:"assets/minecraft/textures/block/spruce_planks.png", 11327:"assets/minecraft/textures/block/birch_planks.png", 11328:"assets/minecraft/textures/block/jungle_planks.png", 11329:"assets/minecraft/textures/block/acacia_planks.png", 11330:"assets/minecraft/textures/block/dark_oak_planks.png", 1034:"assets/minecraft/textures/block/polished_blackstone.png", 11515:"assets/minecraft/textures/block/crimson_planks.png", 11516:"assets/minecraft/textures/block/warped_planks.png" }[blockid] t = self.load_image_texture(texturepath).copy() # generate the texture for the button ImageDraw.Draw(t).rectangle((0,0,15,5),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(t).rectangle((0,10,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(t).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(t).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) img = Image.new("RGBA", (24,24), self.bgcolor) if data < 5: button = self.transform_image_side(t) if data == 1: # facing SOUTH # buttons can't be placed in transparent blocks, so this # direction can't be seen return None elif data == 2: # facing NORTH # paste it twice with different brightness to make a 3D effect alpha_over(img, button, (12,-1), button) alpha = button.split()[3] button = ImageEnhance.Brightness(button).enhance(0.9) button.putalpha(alpha) alpha_over(img, button, (11,0), button) elif data == 3: # facing WEST # paste it twice with different brightness to make a 3D effect button = button.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, button, (0,-1), button) alpha = button.split()[3] button = ImageEnhance.Brightness(button).enhance(0.9) button.putalpha(alpha) alpha_over(img, button, (1,0), button) elif data == 4: # facing EAST # buttons can't be placed in transparent blocks, so this # direction can't be seen return None else: if data == 5: # long axis east-west button = self.transform_image_top(t) else: # long axis north-south button = self.transform_image_top(t.rotate(90)) # paste it twice with different brightness to make a 3D effect alpha_over(img, button, (0,12), button) alpha = button.split()[3] button = ImageEnhance.Brightness(button).enhance(0.9) button.putalpha(alpha) alpha_over(img, button, (0,11), button) return img # end rod @material(blockid=198, data=list(range(6)), transparent=True, solid=True) def end_rod(self, blockid, data): tex = self.load_image_texture("assets/minecraft/textures/block/end_rod.png") img = Image.new("RGBA", (24, 24), self.bgcolor) mask = tex.crop((0, 0, 2, 15)) sidetex = Image.new(tex.mode, tex.size, self.bgcolor) alpha_over(sidetex, mask, (14, 0), mask) mask = tex.crop((2, 3, 6, 7)) bottom = Image.new(tex.mode, tex.size, self.bgcolor) alpha_over(bottom, mask, (5, 6), mask) if data == 1 or data == 0: side = self.transform_image_side(sidetex) otherside = side.transpose(Image.FLIP_LEFT_RIGHT) bottom = self.transform_image_top(bottom) if data == 1: # up mask = tex.crop((2, 0, 4, 2)) top = Image.new(tex.mode, tex.size, self.bgcolor) alpha_over(top, mask, (7, 2), mask) top = self.transform_image_top(top) alpha_over(img, bottom, (0, 11), bottom) alpha_over(img, side, (0, 0), side) alpha_over(img, otherside, (11, 0), otherside) alpha_over(img, top, (3, 1), top) elif data == 0: # down alpha_over(img, side, (0, 0), side) alpha_over(img, otherside, (11, 0), otherside) alpha_over(img, bottom, (0, 0), bottom) else: otherside = self.transform_image_top(sidetex) sidetex = sidetex.rotate(90) side = self.transform_image_side(sidetex) bottom = self.transform_image_side(bottom) bottom = bottom.transpose(Image.FLIP_LEFT_RIGHT) def draw_south(): alpha_over(img, bottom, (0, 0), bottom) alpha_over(img, side, (7, 8), side) alpha_over(img, otherside, (-3, 9), otherside) def draw_north(): alpha_over(img, side, (7, 8), side) alpha_over(img, otherside, (-3, 9), otherside) alpha_over(img, bottom, (12, 6), bottom) def draw_west(): _bottom = bottom.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, _bottom, (13, 0), _bottom) _side = side.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, _side, (7, 8), _side) _otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, _otherside, (4, 9), _otherside) def draw_east(): _side = side.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, _side, (7, 8), _side) _otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, _otherside, (4, 9), _otherside) _bottom = bottom.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, _bottom, (0, 6), _bottom) draw_funcs = [ draw_south, draw_west, draw_north, draw_east ] if data == 3: # south draw_funcs[self.rotation]() elif data == 2: # north draw_funcs[(self.rotation + 2) % len(draw_funcs)]() elif data == 4: # west draw_funcs[(self.rotation + 1) % len(draw_funcs)]() elif data == 5: # east draw_funcs[(self.rotation + 3) % len(draw_funcs)]() return img # snow @material(blockid=78, data=list(range(1, 9)), transparent=True, solid=True) def snow(self, blockid, data): # still not rendered correctly: data other than 0 tex = self.load_image_texture("assets/minecraft/textures/block/snow.png") y = 16 - (data * 2) mask = tex.crop((0, y, 16, 16)) sidetex = Image.new(tex.mode, tex.size, self.bgcolor) alpha_over(sidetex, mask, (0,y,16,16), mask) img = Image.new("RGBA", (24,24), self.bgcolor) top = self.transform_image_top(tex) side = self.transform_image_side(sidetex) otherside = side.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, side, (0, 6), side) alpha_over(img, otherside, (12, 6), otherside) alpha_over(img, top, (0, 12 - int(12 / 8 * data)), top) return img # snow block block(blockid=80, top_image="assets/minecraft/textures/block/snow.png") # cactus @material(blockid=81, data=list(range(15)), transparent=True, solid=True, nospawn=True) def cactus(self, blockid, data): top = self.load_image_texture("assets/minecraft/textures/block/cactus_top.png") side = self.load_image_texture("assets/minecraft/textures/block/cactus_side.png") img = Image.new("RGBA", (24,24), self.bgcolor) top = self.transform_image_top(top) side = self.transform_image_side(side) otherside = side.transpose(Image.FLIP_LEFT_RIGHT) sidealpha = side.split()[3] side = ImageEnhance.Brightness(side).enhance(0.9) side.putalpha(sidealpha) othersidealpha = otherside.split()[3] otherside = ImageEnhance.Brightness(otherside).enhance(0.8) otherside.putalpha(othersidealpha) alpha_over(img, side, (1,6), side) alpha_over(img, otherside, (11,6), otherside) alpha_over(img, top, (0,0), top) return img # clay block block(blockid=82, top_image="assets/minecraft/textures/block/clay.png") # sugar cane @material(blockid=83, data=list(range(16)), transparent=True) def sugar_cane(self, blockid, data): tex = self.load_image_texture("assets/minecraft/textures/block/sugar_cane.png") return self.build_sprite(tex) # jukebox @material(blockid=84, data=list(range(16)), solid=True) def jukebox(self, blockid, data): return self.build_block(self.load_image_texture("assets/minecraft/textures/block/jukebox_top.png"), self.load_image_texture("assets/minecraft/textures/block/note_block.png")) # nether and normal fences # uses pseudo-ancildata found in iterate.c @material(blockid=[85, 188, 189, 190, 191, 192, 113, 511, 512], data=list(range(16)), transparent=True, nospawn=True) def fence(self, blockid, data): # no need for rotations, it uses pseudo data. # create needed images for Big stick fence if blockid == 85: # normal fence fence_top = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png").copy() elif blockid == 188: # spruce fence fence_top = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png").copy() elif blockid == 189: # birch fence fence_top = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png").copy() elif blockid == 190: # jungle fence fence_top = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png").copy() elif blockid == 191: # big/dark oak fence fence_top = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png").copy() elif blockid == 192: # acacia fence fence_top = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png").copy() elif blockid == 511: # crimson_fence fence_top = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png").copy() elif blockid == 512: # warped fence fence_top = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png").copy() else: # netherbrick fence fence_top = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png").copy() fence_side = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png").copy() fence_small_side = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png").copy() # generate the textures of the fence ImageDraw.Draw(fence_top).rectangle((0,0,5,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_top).rectangle((10,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_top).rectangle((0,0,15,5),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_top).rectangle((0,10,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_side).rectangle((0,0,5,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_side).rectangle((10,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # Create the sides and the top of the big stick fence_side = self.transform_image_side(fence_side) fence_other_side = fence_side.transpose(Image.FLIP_LEFT_RIGHT) fence_top = self.transform_image_top(fence_top) # Darken the sides slightly. These methods also affect the alpha layer, # so save them first (we don't want to "darken" the alpha layer making # the block transparent) sidealpha = fence_side.split()[3] fence_side = ImageEnhance.Brightness(fence_side).enhance(0.9) fence_side.putalpha(sidealpha) othersidealpha = fence_other_side.split()[3] fence_other_side = ImageEnhance.Brightness(fence_other_side).enhance(0.8) fence_other_side.putalpha(othersidealpha) # Compose the fence big stick fence_big = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(fence_big,fence_side, (5,4),fence_side) alpha_over(fence_big,fence_other_side, (7,4),fence_other_side) alpha_over(fence_big,fence_top, (0,0),fence_top) # Now render the small sticks. # Create needed images ImageDraw.Draw(fence_small_side).rectangle((0,0,15,0),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_small_side).rectangle((0,4,15,6),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_small_side).rectangle((0,10,15,16),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_small_side).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(fence_small_side).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # Create the sides and the top of the small sticks fence_small_side = self.transform_image_side(fence_small_side) fence_small_other_side = fence_small_side.transpose(Image.FLIP_LEFT_RIGHT) # Darken the sides slightly. These methods also affect the alpha layer, # so save them first (we don't want to "darken" the alpha layer making # the block transparent) sidealpha = fence_small_other_side.split()[3] fence_small_other_side = ImageEnhance.Brightness(fence_small_other_side).enhance(0.9) fence_small_other_side.putalpha(sidealpha) sidealpha = fence_small_side.split()[3] fence_small_side = ImageEnhance.Brightness(fence_small_side).enhance(0.9) fence_small_side.putalpha(sidealpha) # Create img to compose the fence img = Image.new("RGBA", (24,24), self.bgcolor) # Position of fence small sticks in img. # These postitions are strange because the small sticks of the # fence are at the very left and at the very right of the 16x16 images pos_top_left = (2,3) pos_top_right = (10,3) pos_bottom_right = (10,7) pos_bottom_left = (2,7) # +x axis points top right direction # +y axis points bottom right direction # First compose small sticks in the back of the image, # then big stick and thecn small sticks in the front. if (data & 0b0001) == 1: alpha_over(img,fence_small_side, pos_top_left,fence_small_side) # top left if (data & 0b1000) == 8: alpha_over(img,fence_small_other_side, pos_top_right,fence_small_other_side) # top right alpha_over(img,fence_big,(0,0),fence_big) if (data & 0b0010) == 2: alpha_over(img,fence_small_other_side, pos_bottom_left,fence_small_other_side) # bottom left if (data & 0b0100) == 4: alpha_over(img,fence_small_side, pos_bottom_right,fence_small_side) # bottom right return img # pumpkin @material(blockid=[86, 91,11300], data=list(range(4)), solid=True) def pumpkin(self, blockid, data): # pumpkins, jack-o-lantern # rotation if self.rotation == 1: if data == 0: data = 1 elif data == 1: data = 2 elif data == 2: data = 3 elif data == 3: data = 0 elif self.rotation == 2: if data == 0: data = 2 elif data == 1: data = 3 elif data == 2: data = 0 elif data == 3: data = 1 elif self.rotation == 3: if data == 0: data = 3 elif data == 1: data = 0 elif data == 2: data = 1 elif data == 3: data = 2 # texture generation top = self.load_image_texture("assets/minecraft/textures/block/pumpkin_top.png") frontName = {86: "assets/minecraft/textures/block/pumpkin_side.png", 91: "assets/minecraft/textures/block/jack_o_lantern.png", 11300: "assets/minecraft/textures/block/carved_pumpkin.png" }[blockid] front = self.load_image_texture(frontName) side = self.load_image_texture("assets/minecraft/textures/block/pumpkin_side.png") if data == 0: # pointing west img = self.build_full_block(top, None, None, side, front) elif data == 1: # pointing north img = self.build_full_block(top, None, None, front, side) else: # in any other direction the front can't be seen img = self.build_full_block(top, None, None, side, side) return img # netherrack block(blockid=87, top_image="assets/minecraft/textures/block/netherrack.png") # soul sand block(blockid=88, top_image="assets/minecraft/textures/block/soul_sand.png") # glowstone block(blockid=89, top_image="assets/minecraft/textures/block/glowstone.png") # shroomlight block(blockid=1011, top_image="assets/minecraft/textures/block/shroomlight.png") # portal @material(blockid=90, data=[1, 2, 4, 5, 8, 10], transparent=True) def portal(self, blockid, data): # no rotations, uses pseudo data portaltexture = self.load_portal() img = Image.new("RGBA", (24,24), self.bgcolor) side = self.transform_image_side(portaltexture) otherside = side.transpose(Image.FLIP_TOP_BOTTOM) if data in (1,4,5): alpha_over(img, side, (5,4), side) if data in (2,8,10): alpha_over(img, otherside, (5,4), otherside) return img # cake! @material(blockid=92, data=list(range(7)), transparent=True, nospawn=True) def cake(self, blockid, data): # cake textures top = self.load_image_texture("assets/minecraft/textures/block/cake_top.png").copy() side = self.load_image_texture("assets/minecraft/textures/block/cake_side.png").copy() fullside = side.copy() inside = self.load_image_texture("assets/minecraft/textures/block/cake_inner.png") img = Image.new("RGBA", (24, 24), self.bgcolor) if data == 0: # unbitten cake top = self.transform_image_top(top) side = self.transform_image_side(side) otherside = side.transpose(Image.FLIP_LEFT_RIGHT) # darken sides slightly sidealpha = side.split()[3] side = ImageEnhance.Brightness(side).enhance(0.9) side.putalpha(sidealpha) othersidealpha = otherside.split()[3] otherside = ImageEnhance.Brightness(otherside).enhance(0.8) otherside.putalpha(othersidealpha) # composite the cake alpha_over(img, side, (1, 6), side) alpha_over(img, otherside, (11, 5), otherside) # workaround, fixes a hole alpha_over(img, otherside, (12, 6), otherside) alpha_over(img, top, (0, 6), top) else: # cut the textures for a bitten cake bite_width = int(14 / 7) # Cake is 14px wide with 7 slices coord = 1 + bite_width * data ImageDraw.Draw(side).rectangle((16 - coord, 0, 16, 16), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) ImageDraw.Draw(top).rectangle((0, 0, coord - 1, 16), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) # the bitten part of the cake always points to the west # composite the cake for every north orientation if self.rotation == 0: # north top-left # create right side rs = self.transform_image_side(side).transpose(Image.FLIP_LEFT_RIGHT) # create bitten side and its coords deltax = bite_width * data deltay = -1 * data if data in [3, 4, 5, 6]: deltax -= 1 ls = self.transform_image_side(inside) # create top side t = self.transform_image_top(top) # darken sides slightly sidealpha = ls.split()[3] ls = ImageEnhance.Brightness(ls).enhance(0.9) ls.putalpha(sidealpha) othersidealpha = rs.split()[3] rs = ImageEnhance.Brightness(rs).enhance(0.8) rs.putalpha(othersidealpha) # compose the cake alpha_over(img, rs, (12, 6), rs) alpha_over(img, ls, (1 + deltax, 6 + deltay), ls) alpha_over(img, t, (1, 6), t) elif self.rotation == 1: # north top-right # bitten side not shown # create left side ls = self.transform_image_side(side.transpose(Image.FLIP_LEFT_RIGHT)) # create top t = self.transform_image_top(top.rotate(-90)) # create right side rs = self.transform_image_side(fullside).transpose(Image.FLIP_LEFT_RIGHT) # darken sides slightly sidealpha = ls.split()[3] ls = ImageEnhance.Brightness(ls).enhance(0.9) ls.putalpha(sidealpha) othersidealpha = rs.split()[3] rs = ImageEnhance.Brightness(rs).enhance(0.8) rs.putalpha(othersidealpha) # compose the cake alpha_over(img, ls, (2, 6), ls) alpha_over(img, t, (1, 6), t) alpha_over(img, rs, (12, 6), rs) elif self.rotation == 2: # north bottom-right # bitten side not shown # left side ls = self.transform_image_side(fullside) # top t = self.transform_image_top(top.rotate(180)) # right side rs = self.transform_image_side(side.transpose(Image.FLIP_LEFT_RIGHT)) rs = rs.transpose(Image.FLIP_LEFT_RIGHT) # darken sides slightly sidealpha = ls.split()[3] ls = ImageEnhance.Brightness(ls).enhance(0.9) ls.putalpha(sidealpha) othersidealpha = rs.split()[3] rs = ImageEnhance.Brightness(rs).enhance(0.8) rs.putalpha(othersidealpha) # compose the cake alpha_over(img, ls, (2, 6), ls) alpha_over(img, t, (1, 6), t) alpha_over(img, rs, (12, 6), rs) elif self.rotation == 3: # north bottom-left # create left side ls = self.transform_image_side(side) # create top t = self.transform_image_top(top.rotate(90)) # create right side and its coords deltax = 12 - bite_width * data deltay = -1 * data if data in [3, 4, 5, 6]: deltax += 1 rs = self.transform_image_side(inside).transpose(Image.FLIP_LEFT_RIGHT) # darken sides slightly sidealpha = ls.split()[3] ls = ImageEnhance.Brightness(ls).enhance(0.9) ls.putalpha(sidealpha) othersidealpha = rs.split()[3] rs = ImageEnhance.Brightness(rs).enhance(0.8) rs.putalpha(othersidealpha) # compose the cake alpha_over(img, ls, (2, 6), ls) alpha_over(img, t, (1, 6), t) alpha_over(img, rs, (1 + deltax, 6 + deltay), rs) return img # redstone repeaters ON and OFF @material(blockid=[93,94], data=list(range(16)), transparent=True, nospawn=True) def repeater(self, blockid, data): # rotation # Masked to not clobber delay info if self.rotation == 1: if (data & 0b0011) == 0: data = data & 0b1100 | 1 elif (data & 0b0011) == 1: data = data & 0b1100 | 2 elif (data & 0b0011) == 2: data = data & 0b1100 | 3 elif (data & 0b0011) == 3: data = data & 0b1100 | 0 elif self.rotation == 2: if (data & 0b0011) == 0: data = data & 0b1100 | 2 elif (data & 0b0011) == 1: data = data & 0b1100 | 3 elif (data & 0b0011) == 2: data = data & 0b1100 | 0 elif (data & 0b0011) == 3: data = data & 0b1100 | 1 elif self.rotation == 3: if (data & 0b0011) == 0: data = data & 0b1100 | 3 elif (data & 0b0011) == 1: data = data & 0b1100 | 0 elif (data & 0b0011) == 2: data = data & 0b1100 | 1 elif (data & 0b0011) == 3: data = data & 0b1100 | 2 # generate the diode top = self.load_image_texture("assets/minecraft/textures/block/repeater.png") if blockid == 93 else self.load_image_texture("assets/minecraft/textures/block/repeater_on.png") side = self.load_image_texture("assets/minecraft/textures/block/smooth_stone_slab_side.png") increment = 13 if (data & 0x3) == 0: # pointing east pass if (data & 0x3) == 1: # pointing south top = top.rotate(270) if (data & 0x3) == 2: # pointing west top = top.rotate(180) if (data & 0x3) == 3: # pointing north top = top.rotate(90) img = self.build_full_block( (top, increment), None, None, side, side) # compose a "3d" redstone torch t = self.load_image_texture("assets/minecraft/textures/block/redstone_torch_off.png").copy() if blockid == 93 else self.load_image_texture("assets/minecraft/textures/block/redstone_torch.png").copy() torch = Image.new("RGBA", (24,24), self.bgcolor) t_crop = t.crop((2,2,14,14)) slice = t_crop.copy() ImageDraw.Draw(slice).rectangle((6,0,12,12),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(slice).rectangle((0,0,4,12),outline=(0,0,0,0),fill=(0,0,0,0)) alpha_over(torch, slice, (6,4)) alpha_over(torch, t_crop, (5,5)) alpha_over(torch, t_crop, (6,5)) alpha_over(torch, slice, (6,6)) # paste redstone torches everywhere! # the torch is too tall for the repeater, crop the bottom. ImageDraw.Draw(torch).rectangle((0,16,24,24),outline=(0,0,0,0),fill=(0,0,0,0)) # touch up the 3d effect with big rectangles, just in case, for other texture packs ImageDraw.Draw(torch).rectangle((0,24,10,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(torch).rectangle((12,15,24,24),outline=(0,0,0,0),fill=(0,0,0,0)) # torch positions for every redstone torch orientation. # # This is a horrible list of torch orientations. I tried to # obtain these orientations by rotating the positions for one # orientation, but pixel rounding is horrible and messes the # torches. if (data & 0x3) == 0: # pointing east if (data & 0xC) == 0: # one tick delay moving_torch = (1,1) static_torch = (-3,-1) elif (data & 0xC) == 4: # two ticks delay moving_torch = (2,2) static_torch = (-3,-1) elif (data & 0xC) == 8: # three ticks delay moving_torch = (3,2) static_torch = (-3,-1) elif (data & 0xC) == 12: # four ticks delay moving_torch = (4,3) static_torch = (-3,-1) elif (data & 0x3) == 1: # pointing south if (data & 0xC) == 0: # one tick delay moving_torch = (1,1) static_torch = (5,-1) elif (data & 0xC) == 4: # two ticks delay moving_torch = (0,2) static_torch = (5,-1) elif (data & 0xC) == 8: # three ticks delay moving_torch = (-1,2) static_torch = (5,-1) elif (data & 0xC) == 12: # four ticks delay moving_torch = (-2,3) static_torch = (5,-1) elif (data & 0x3) == 2: # pointing west if (data & 0xC) == 0: # one tick delay moving_torch = (1,1) static_torch = (5,3) elif (data & 0xC) == 4: # two ticks delay moving_torch = (0,0) static_torch = (5,3) elif (data & 0xC) == 8: # three ticks delay moving_torch = (-1,0) static_torch = (5,3) elif (data & 0xC) == 12: # four ticks delay moving_torch = (-2,-1) static_torch = (5,3) elif (data & 0x3) == 3: # pointing north if (data & 0xC) == 0: # one tick delay moving_torch = (1,1) static_torch = (-3,3) elif (data & 0xC) == 4: # two ticks delay moving_torch = (2,0) static_torch = (-3,3) elif (data & 0xC) == 8: # three ticks delay moving_torch = (3,0) static_torch = (-3,3) elif (data & 0xC) == 12: # four ticks delay moving_torch = (4,-1) static_torch = (-3,3) # this paste order it's ok for east and south orientation # but it's wrong for north and west orientations. But using the # default texture pack the torches are small enough to no overlap. alpha_over(img, torch, static_torch, torch) alpha_over(img, torch, moving_torch, torch) return img # redstone comparator (149 is inactive, 150 is active) @material(blockid=[149,150], data=list(range(16)), transparent=True, nospawn=True) def comparator(self, blockid, data): # rotation # add self.rotation to the lower 2 bits, mod 4 data = data & 0b1100 | (((data & 0b11) + self.rotation) % 4) top = self.load_image_texture("assets/minecraft/textures/block/comparator.png") if blockid == 149 else self.load_image_texture("assets/minecraft/textures/block/comparator_on.png") side = self.load_image_texture("assets/minecraft/textures/block/smooth_stone_slab_side.png") increment = 13 if (data & 0x3) == 0: # pointing north pass static_torch = (-3,-1) torch = ((0,2),(6,-1)) if (data & 0x3) == 1: # pointing east top = top.rotate(270) static_torch = (5,-1) torch = ((-4,-1),(0,2)) if (data & 0x3) == 2: # pointing south top = top.rotate(180) static_torch = (5,3) torch = ((0,-4),(-4,-1)) if (data & 0x3) == 3: # pointing west top = top.rotate(90) static_torch = (-3,3) torch = ((1,-4),(6,-1)) def build_torch(active): # compose a "3d" redstone torch t = self.load_image_texture("assets/minecraft/textures/block/redstone_torch_off.png").copy() if not active else self.load_image_texture("assets/minecraft/textures/block/redstone_torch.png").copy() torch = Image.new("RGBA", (24,24), self.bgcolor) t_crop = t.crop((2,2,14,14)) slice = t_crop.copy() ImageDraw.Draw(slice).rectangle((6,0,12,12),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(slice).rectangle((0,0,4,12),outline=(0,0,0,0),fill=(0,0,0,0)) alpha_over(torch, slice, (6,4)) alpha_over(torch, t_crop, (5,5)) alpha_over(torch, t_crop, (6,5)) alpha_over(torch, slice, (6,6)) return torch active_torch = build_torch(True) inactive_torch = build_torch(False) back_torch = active_torch if (blockid == 150 or data & 0b1000 == 0b1000) else inactive_torch static_torch_img = active_torch if (data & 0b100 == 0b100) else inactive_torch img = self.build_full_block( (top, increment), None, None, side, side) alpha_over(img, static_torch_img, static_torch, static_torch_img) alpha_over(img, back_torch, torch[0], back_torch) alpha_over(img, back_torch, torch[1], back_torch) return img # trapdoor # the trapdoor is looks like a sprite when opened, that's not good @material(blockid=[96,167,11332,11333,11334,11335,11336,12501,12502], data=list(range(16)), transparent=True, nospawn=True) def trapdoor(self, blockid, data): # rotation # Masked to not clobber opened/closed info if self.rotation == 1: if (data & 0b0011) == 0: data = data & 0b1100 | 3 elif (data & 0b0011) == 1: data = data & 0b1100 | 2 elif (data & 0b0011) == 2: data = data & 0b1100 | 0 elif (data & 0b0011) == 3: data = data & 0b1100 | 1 elif self.rotation == 2: if (data & 0b0011) == 0: data = data & 0b1100 | 1 elif (data & 0b0011) == 1: data = data & 0b1100 | 0 elif (data & 0b0011) == 2: data = data & 0b1100 | 3 elif (data & 0b0011) == 3: data = data & 0b1100 | 2 elif self.rotation == 3: if (data & 0b0011) == 0: data = data & 0b1100 | 2 elif (data & 0b0011) == 1: data = data & 0b1100 | 3 elif (data & 0b0011) == 2: data = data & 0b1100 | 1 elif (data & 0b0011) == 3: data = data & 0b1100 | 0 # texture generation texturepath = {96:"assets/minecraft/textures/block/oak_trapdoor.png", 167:"assets/minecraft/textures/block/iron_trapdoor.png", 11332:"assets/minecraft/textures/block/spruce_trapdoor.png", 11333:"assets/minecraft/textures/block/birch_trapdoor.png", 11334:"assets/minecraft/textures/block/jungle_trapdoor.png", 11335:"assets/minecraft/textures/block/acacia_trapdoor.png", 11336:"assets/minecraft/textures/block/dark_oak_trapdoor.png", 12501:"assets/minecraft/textures/block/crimson_trapdoor.png", 12502:"assets/minecraft/textures/block/warped_trapdoor.png", }[blockid] if data & 0x4 == 0x4: # opened trapdoor if data & 0x08 == 0x08: texture = self.load_image_texture(texturepath).transpose(Image.FLIP_TOP_BOTTOM) else: texture = self.load_image_texture(texturepath) if data & 0x3 == 0: # west img = self.build_full_block(None, None, None, None, texture) if data & 0x3 == 1: # east img = self.build_full_block(None, texture, None, None, None) if data & 0x3 == 2: # south img = self.build_full_block(None, None, texture, None, None) if data & 0x3 == 3: # north img = self.build_full_block(None, None, None, texture, None) elif data & 0x4 == 0: # closed trapdoor texture = self.load_image_texture(texturepath) if data & 0x8 == 0x8: # is a top trapdoor img = Image.new("RGBA", (24,24), self.bgcolor) t = self.build_full_block((texture, 12), None, None, texture, texture) alpha_over(img, t, (0,-9),t) else: # is a bottom trapdoor img = self.build_full_block((texture, 12), None, None, texture, texture) return img # block with hidden silverfish (stone, cobblestone and stone brick) @material(blockid=97, data=list(range(3)), solid=True) def hidden_silverfish(self, blockid, data): if data == 0: # stone t = self.load_image_texture("assets/minecraft/textures/block/stone.png") elif data == 1: # cobblestone t = self.load_image_texture("assets/minecraft/textures/block/cobblestone.png") elif data == 2: # stone brick t = self.load_image_texture("assets/minecraft/textures/block/stone_bricks.png") img = self.build_block(t, t) return img # stone brick @material(blockid=98, data=list(range(4)), solid=True) def stone_brick(self, blockid, data): if data == 0: # normal t = self.load_image_texture("assets/minecraft/textures/block/stone_bricks.png") elif data == 1: # mossy t = self.load_image_texture("assets/minecraft/textures/block/mossy_stone_bricks.png") elif data == 2: # cracked t = self.load_image_texture("assets/minecraft/textures/block/cracked_stone_bricks.png") elif data == 3: # "circle" stone brick t = self.load_image_texture("assets/minecraft/textures/block/chiseled_stone_bricks.png") img = self.build_full_block(t, None, None, t, t) return img # huge brown/red mushrooms, and mushroom stems @material(blockid=[99, 100, 139], data=list(range(64)), solid=True) def huge_mushroom(self, blockid, data): # Re-arrange the bits in data based on self.rotation # rotation bit: 654321 # 0 DUENWS # 1 DUNWSE # 2 DUWSEN # 3 DUSENW if self.rotation in [1, 2, 3]: bit_map = {1: [6, 5, 3, 2, 1, 4], 2: [6, 5, 2, 1, 4, 3], 3: [6, 5, 1, 4, 3, 2]} new_data = 0 # Add the ith bit to new_data then shift left one at a time, # re-ordering data's bits in the order specified in bit_map for i in bit_map[self.rotation]: new_data = new_data << 1 new_data |= (data >> (i - 1)) & 1 data = new_data # texture generation texture_map = {99: "brown_mushroom_block", 100: "red_mushroom_block", 139: "mushroom_stem"} cap = self.load_image_texture("assets/minecraft/textures/block/%s.png" % texture_map[blockid]) porous = self.load_image_texture("assets/minecraft/textures/block/mushroom_block_inside.png") # Faces visible after amending data for rotation are: up, West, and South side_up = cap if data & 0b010000 else porous # Up side_west = cap if data & 0b000010 else porous # West side_south = cap if data & 0b000001 else porous # South side_south = side_south.transpose(Image.FLIP_LEFT_RIGHT) return self.build_full_block(side_up, None, None, side_west, side_south) # iron bars and glass pane # TODO glass pane is not a sprite, it has a texture for the side, # at the moment is not used @material(blockid=[101,102, 160], data=list(range(256)), transparent=True, nospawn=True) def panes(self, blockid, data): # no rotation, uses pseudo data if blockid == 101: # iron bars t = self.load_image_texture("assets/minecraft/textures/block/iron_bars.png") elif blockid == 160: t = self.load_image_texture("assets/minecraft/textures/block/%s_stained_glass.png" % color_map[data & 0xf]) else: # glass panes t = self.load_image_texture("assets/minecraft/textures/block/glass.png") left = t.copy() right = t.copy() center = t.copy() # generate the four small pieces of the glass pane ImageDraw.Draw(right).rectangle((0,0,7,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(left).rectangle((8,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(center).rectangle((0,0,6,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(center).rectangle((9,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) up_center = self.transform_image_side(center) up_left = self.transform_image_side(left) up_right = self.transform_image_side(right).transpose(Image.FLIP_TOP_BOTTOM) dw_right = self.transform_image_side(right) dw_left = self.transform_image_side(left).transpose(Image.FLIP_TOP_BOTTOM) # Create img to compose the texture img = Image.new("RGBA", (24,24), self.bgcolor) # +x axis points top right direction # +y axis points bottom right direction # First compose things in the back of the image, # then things in the front. # the lower 4 bits encode color, the upper 4 encode adjencies data = data >> 4 if data == 0: alpha_over(img, up_center, (6, 3), up_center) # center else: def draw_top_left(): alpha_over(img, up_left, (6, 3), up_left) # top left def draw_top_right(): alpha_over(img, up_right, (6, 3), up_right) # top right def draw_bottom_right(): alpha_over(img, dw_right, (6, 3), dw_right) # bottom right def draw_bottom_left(): alpha_over(img, dw_left, (6, 3), dw_left) # bottom left draw_funcs = [draw_top_left, draw_top_right, draw_bottom_right, draw_bottom_left] if (data & 0b0001) == 1: draw_funcs[(self.rotation + 0) % len(draw_funcs)]() if (data & 0b0010) == 2: draw_funcs[(self.rotation + 1) % len(draw_funcs)]() if (data & 0b0100) == 4: draw_funcs[(self.rotation + 2) % len(draw_funcs)]() if (data & 0b1000) == 8: draw_funcs[(self.rotation + 3) % len(draw_funcs)]() return img # melon block(blockid=103, top_image="assets/minecraft/textures/block/melon_top.png", side_image="assets/minecraft/textures/block/melon_side.png", solid=True) # pumpkin and melon stem # TODO To render it as in game needs from pseudo data and ancil data: # once fully grown the stem bends to the melon/pumpkin block, # at the moment only render the growing stem @material(blockid=[104,105], data=list(range(8)), transparent=True) def stem(self, blockid, data): # the ancildata value indicates how much of the texture # is shown. # not fully grown stem or no pumpkin/melon touching it, # straight up stem t = self.load_image_texture("assets/minecraft/textures/block/melon_stem.png").copy() img = Image.new("RGBA", (16,16), self.bgcolor) alpha_over(img, t, (0, int(16 - 16*((data + 1)/8.))), t) img = self.build_sprite(t) if data & 7 == 7: # fully grown stem gets brown color! # there is a conditional in rendermode-normal.c to not # tint the data value 7 img = self.tint_texture(img, (211,169,116)) return img # nether vines billboard(blockid=1012, imagename="assets/minecraft/textures/block/twisting_vines.png") billboard(blockid=1013, imagename="assets/minecraft/textures/block/twisting_vines_plant.png") billboard(blockid=1014, imagename="assets/minecraft/textures/block/weeping_vines.png") billboard(blockid=1015, imagename="assets/minecraft/textures/block/weeping_vines_plant.png") # vines @material(blockid=106, data=list(range(32)), transparent=True, solid=False, nospawn=True) def vines(self, blockid, data): # Re-arrange the bits in data based on self.rotation # rotation bit: 54321 # 0 UENWS # 1 UNWSE # 2 UWSEN # 3 USENW if self.rotation in [1, 2, 3]: bit_map = {1: [5, 3, 2, 1, 4], 2: [5, 2, 1, 4, 3], 3: [5, 1, 4, 3, 2]} new_data = 0 # Add the ith bit to new_data then shift left one at a time, # re-ordering data's bits in the order specified in bit_map for i in bit_map[self.rotation]: new_data = new_data << 1 new_data |= (data >> (i - 1)) & 1 data = new_data # decode data and prepare textures raw_texture = self.load_image_texture("assets/minecraft/textures/block/vine.png") side_up = raw_texture if data & 0b10000 else None # Up side_east = raw_texture if data & 0b01000 else None # East side_north = raw_texture if data & 0b00100 else None # North side_west = raw_texture if data & 0b00010 else None # West side_south = raw_texture if data & 0b00001 else None # South return self.build_full_block(side_up, side_north, side_east, side_west, side_south) # fence gates @material(blockid=[107, 183, 184, 185, 186, 187, 513, 514], data=list(range(8)), transparent=True, nospawn=True) def fence_gate(self, blockid, data): # rotation opened = False if data & 0x4: data = data & 0x3 opened = True if self.rotation == 1: if data == 0: data = 1 elif data == 1: data = 2 elif data == 2: data = 3 elif data == 3: data = 0 elif self.rotation == 2: if data == 0: data = 2 elif data == 1: data = 3 elif data == 2: data = 0 elif data == 3: data = 1 elif self.rotation == 3: if data == 0: data = 3 elif data == 1: data = 0 elif data == 2: data = 1 elif data == 3: data = 2 if opened: data = data | 0x4 # create the closed gate side if blockid == 107: # Oak gate_side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png").copy() elif blockid == 183: # Spruce gate_side = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png").copy() elif blockid == 184: # Birch gate_side = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png").copy() elif blockid == 185: # Jungle gate_side = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png").copy() elif blockid == 186: # Dark Oak gate_side = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png").copy() elif blockid == 187: # Acacia gate_side = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png").copy() elif blockid == 513: # Crimson gate_side = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png").copy() elif blockid == 514: # Warped gate_side = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png").copy() else: return None gate_side_draw = ImageDraw.Draw(gate_side) gate_side_draw.rectangle((7,0,15,0),outline=(0,0,0,0),fill=(0,0,0,0)) gate_side_draw.rectangle((7,4,9,6),outline=(0,0,0,0),fill=(0,0,0,0)) gate_side_draw.rectangle((7,10,15,16),outline=(0,0,0,0),fill=(0,0,0,0)) gate_side_draw.rectangle((0,12,15,16),outline=(0,0,0,0),fill=(0,0,0,0)) gate_side_draw.rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0)) gate_side_draw.rectangle((14,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # darken the sides slightly, as with the fences sidealpha = gate_side.split()[3] gate_side = ImageEnhance.Brightness(gate_side).enhance(0.9) gate_side.putalpha(sidealpha) # create the other sides mirror_gate_side = self.transform_image_side(gate_side.transpose(Image.FLIP_LEFT_RIGHT)) gate_side = self.transform_image_side(gate_side) gate_other_side = gate_side.transpose(Image.FLIP_LEFT_RIGHT) mirror_gate_other_side = mirror_gate_side.transpose(Image.FLIP_LEFT_RIGHT) # Create img to compose the fence gate img = Image.new("RGBA", (24,24), self.bgcolor) if data & 0x4: # opened data = data & 0x3 if data == 0: alpha_over(img, gate_side, (2,8), gate_side) alpha_over(img, gate_side, (13,3), gate_side) elif data == 1: alpha_over(img, gate_other_side, (-1,3), gate_other_side) alpha_over(img, gate_other_side, (10,8), gate_other_side) elif data == 2: alpha_over(img, mirror_gate_side, (-1,7), mirror_gate_side) alpha_over(img, mirror_gate_side, (10,2), mirror_gate_side) elif data == 3: alpha_over(img, mirror_gate_other_side, (2,1), mirror_gate_other_side) alpha_over(img, mirror_gate_other_side, (13,7), mirror_gate_other_side) else: # closed # positions for pasting the fence sides, as with fences pos_top_left = (2,3) pos_top_right = (10,3) pos_bottom_right = (10,7) pos_bottom_left = (2,7) if data == 0 or data == 2: alpha_over(img, gate_other_side, pos_top_right, gate_other_side) alpha_over(img, mirror_gate_other_side, pos_bottom_left, mirror_gate_other_side) elif data == 1 or data == 3: alpha_over(img, gate_side, pos_top_left, gate_side) alpha_over(img, mirror_gate_side, pos_bottom_right, mirror_gate_side) return img # mycelium block(blockid=110, top_image="assets/minecraft/textures/block/mycelium_top.png", side_image="assets/minecraft/textures/block/mycelium_side.png") # warped_nylium & crimson_nylium block(blockid=1006, top_image="assets/minecraft/textures/block/warped_nylium.png", side_image="assets/minecraft/textures/block/warped_nylium_side.png") block(blockid=1007, top_image="assets/minecraft/textures/block/crimson_nylium.png", side_image="assets/minecraft/textures/block/crimson_nylium_side.png") # lilypad # At the moment of writing this lilypads has no ancil data and their # orientation depends on their position on the map. So it uses pseudo # ancildata. @material(blockid=111, data=list(range(4)), transparent=True) def lilypad(self, blockid, data): t = self.load_image_texture("assets/minecraft/textures/block/lily_pad.png").copy() if data == 0: t = t.rotate(180) elif data == 1: t = t.rotate(270) elif data == 2: t = t elif data == 3: t = t.rotate(90) return self.build_full_block(None, None, None, None, None, t) # nether bricks @material(blockid=112, data=list(range(3)), solid=True) def nether_bricks(self, blockid, data): if data == 0: # normal t = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png") elif data == 1: # cracked t = self.load_image_texture("assets/minecraft/textures/block/cracked_nether_bricks.png") elif data == 2: # chiseled t = self.load_image_texture("assets/minecraft/textures/block/chiseled_nether_bricks.png") img = self.build_full_block(t, None, None, t, t) return img # nether wart @material(blockid=115, data=list(range(4)), transparent=True) def nether_wart(self, blockid, data): if data == 0: # just come up t = self.load_image_texture("assets/minecraft/textures/block/nether_wart_stage0.png") elif data in (1, 2): t = self.load_image_texture("assets/minecraft/textures/block/nether_wart_stage1.png") else: # fully grown t = self.load_image_texture("assets/minecraft/textures/block/nether_wart_stage2.png") # use the same technic as tall grass img = self.build_billboard(t) return img # enchantment table # TODO there's no book at the moment @material(blockid=116, transparent=True, nodata=True) def enchantment_table(self, blockid, data): # no book at the moment top = self.load_image_texture("assets/minecraft/textures/block/enchanting_table_top.png") side = self.load_image_texture("assets/minecraft/textures/block/enchanting_table_side.png") img = self.build_full_block((top, 4), None, None, side, side) return img # brewing stand # TODO this is a place holder, is a 2d image pasted @material(blockid=117, data=list(range(5)), transparent=True) def brewing_stand(self, blockid, data): base = self.load_image_texture("assets/minecraft/textures/block/brewing_stand_base.png") img = self.build_full_block(None, None, None, None, None, base) t = self.load_image_texture("assets/minecraft/textures/block/brewing_stand.png") stand = self.build_billboard(t) alpha_over(img,stand,(0,-2)) return img # cauldron @material(blockid=118, data=list(range(4)), transparent=True, solid=True, nospawn=True) def cauldron(self, blockid, data): side = self.load_image_texture("assets/minecraft/textures/block/cauldron_side.png").copy() top = self.load_image_texture("assets/minecraft/textures/block/cauldron_top.png") bottom = self.load_image_texture("assets/minecraft/textures/block/cauldron_inner.png") water = self.transform_image_top(self.load_image_texture("water.png")) # Side texture isn't transparent between the feet, so adjust the texture ImageDraw.Draw(side).rectangle((5, 14, 11, 16), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) if data == 0: # Empty img = self.build_full_block(top, side, side, side, side) else: # Part or fully filled # Is filled in increments of a third, with data indicating how many thirds are filled img = self.build_full_block(None, side, side, None, None) alpha_over(img, water, (0, 12 - data * 4), water) img2 = self.build_full_block(top, None, None, side, side) alpha_over(img, img2, (0, 0), img2) return img # end portal and end_gateway @material(blockid=[119,209], transparent=True, nodata=True) def end_portal(self, blockid, data): img = Image.new("RGBA", (24,24), self.bgcolor) # generate a black texure with white, blue and grey dots resembling stars t = Image.new("RGBA", (16,16), (0,0,0,255)) for color in [(155,155,155,255), (100,255,100,255), (255,255,255,255)]: for i in range(6): x = randint(0,15) y = randint(0,15) t.putpixel((x,y),color) if blockid == 209: # end_gateway return self.build_block(t, t) t = self.transform_image_top(t) alpha_over(img, t, (0,0), t) return img # end portal frame (data range 8 to get all orientations of filled) @material(blockid=120, data=list(range(8)), transparent=True, solid=True, nospawn=True) def end_portal_frame(self, blockid, data): # Do rotation, only seems to affect ender eye & top of frame data = data & 0b100 | ((self.rotation + (data & 0b11)) % 4) top = self.load_image_texture("assets/minecraft/textures/block/end_portal_frame_top.png").copy() top = top.rotate((data % 4) * 90) side = self.load_image_texture("assets/minecraft/textures/block/end_portal_frame_side.png") img = self.build_full_block((top, 4), None, None, side, side) if data & 0x4 == 0x4: # ender eye on it # generate the eye eye_t = self.load_image_texture("assets/minecraft/textures/block/end_portal_frame_eye.png").copy() eye_t_s = eye_t.copy() # cut out from the texture the side and the top of the eye ImageDraw.Draw(eye_t).rectangle((0, 0, 15, 4), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) ImageDraw.Draw(eye_t_s).rectangle((0, 4, 15, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) # transform images and paste eye = self.transform_image_top(eye_t.rotate((data % 4) * 90)) eye_s = self.transform_image_side(eye_t_s) eye_os = eye_s.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, eye_s, (5, 5), eye_s) alpha_over(img, eye_os, (9, 5), eye_os) alpha_over(img, eye, (0, 0), eye) return img # end stone block(blockid=121, top_image="assets/minecraft/textures/block/end_stone.png") # dragon egg # NOTE: this isn't a block, but I think it's better than nothing block(blockid=122, top_image="assets/minecraft/textures/block/dragon_egg.png") # inactive redstone lamp block(blockid=123, top_image="assets/minecraft/textures/block/redstone_lamp.png") # active redstone lamp block(blockid=124, top_image="assets/minecraft/textures/block/redstone_lamp_on.png") # daylight sensor. @material(blockid=[151,178], transparent=True) def daylight_sensor(self, blockid, data): if blockid == 151: # daylight sensor top = self.load_image_texture("assets/minecraft/textures/block/daylight_detector_top.png") else: # inverted daylight sensor top = self.load_image_texture("assets/minecraft/textures/block/daylight_detector_inverted_top.png") side = self.load_image_texture("assets/minecraft/textures/block/daylight_detector_side.png") # cut the side texture in half mask = side.crop((0,8,16,16)) side = Image.new(side.mode, side.size, self.bgcolor) alpha_over(side, mask,(0,0,16,8), mask) # plain slab top = self.transform_image_top(top) side = self.transform_image_side(side) otherside = side.transpose(Image.FLIP_LEFT_RIGHT) sidealpha = side.split()[3] side = ImageEnhance.Brightness(side).enhance(0.9) side.putalpha(sidealpha) othersidealpha = otherside.split()[3] otherside = ImageEnhance.Brightness(otherside).enhance(0.8) otherside.putalpha(othersidealpha) img = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(img, side, (0,12), side) alpha_over(img, otherside, (12,12), otherside) alpha_over(img, top, (0,6), top) return img # wooden double and normal slabs # these are the new wooden slabs, blockids 43 44 still have wooden # slabs, but those are unobtainable without cheating @material(blockid=[125, 126], data=list(range(16)), transparent=(44,), solid=True) def wooden_slabs(self, blockid, data): texture = data & 7 if texture== 0: # oak top = side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png") elif texture== 1: # spruce top = side = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png") elif texture== 2: # birch top = side = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png") elif texture== 3: # jungle top = side = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png") elif texture== 4: # acacia top = side = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png") elif texture== 5: # dark wood top = side = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png") elif texture== 6: # crimson top = side = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png") elif texture== 7: # warped top = side = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png") else: return None if blockid == 125: # double slab return self.build_block(top, side) return self.build_slab_block(top, side, data & 8 == 8); # emerald ore block(blockid=129, top_image="assets/minecraft/textures/block/emerald_ore.png") # emerald block block(blockid=133, top_image="assets/minecraft/textures/block/emerald_block.png") # cocoa plant @material(blockid=127, data=list(range(12)), transparent=True) def cocoa_plant(self, blockid, data): orientation = data & 3 # rotation if self.rotation == 1: if orientation == 0: orientation = 1 elif orientation == 1: orientation = 2 elif orientation == 2: orientation = 3 elif orientation == 3: orientation = 0 elif self.rotation == 2: if orientation == 0: orientation = 2 elif orientation == 1: orientation = 3 elif orientation == 2: orientation = 0 elif orientation == 3: orientation = 1 elif self.rotation == 3: if orientation == 0: orientation = 3 elif orientation == 1: orientation = 0 elif orientation == 2: orientation = 1 elif orientation == 3: orientation = 2 size = data & 12 if size == 8: # big t = self.load_image_texture("assets/minecraft/textures/block/cocoa_stage2.png") c_left = (0,3) c_right = (8,3) c_top = (5,2) elif size == 4: # normal t = self.load_image_texture("assets/minecraft/textures/block/cocoa_stage1.png") c_left = (-2,2) c_right = (8,2) c_top = (5,2) elif size == 0: # small t = self.load_image_texture("assets/minecraft/textures/block/cocoa_stage0.png") c_left = (-3,2) c_right = (6,2) c_top = (5,2) # let's get every texture piece necessary to do this stalk = t.copy() ImageDraw.Draw(stalk).rectangle((0,0,11,16),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(stalk).rectangle((12,4,16,16),outline=(0,0,0,0),fill=(0,0,0,0)) top = t.copy() # warning! changes with plant size ImageDraw.Draw(top).rectangle((0,7,16,16),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(top).rectangle((7,0,16,6),outline=(0,0,0,0),fill=(0,0,0,0)) side = t.copy() # warning! changes with plant size ImageDraw.Draw(side).rectangle((0,0,6,16),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(side).rectangle((0,0,16,3),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(side).rectangle((0,14,16,16),outline=(0,0,0,0),fill=(0,0,0,0)) # first compose the block of the cocoa plant block = Image.new("RGBA", (24,24), self.bgcolor) tmp = self.transform_image_side(side).transpose(Image.FLIP_LEFT_RIGHT) alpha_over (block, tmp, c_right,tmp) # right side tmp = tmp.transpose(Image.FLIP_LEFT_RIGHT) alpha_over (block, tmp, c_left,tmp) # left side tmp = self.transform_image_top(top) alpha_over(block, tmp, c_top,tmp) if size == 0: # fix a pixel hole block.putpixel((6,9), block.getpixel((6,10))) # compose the cocoa plant img = Image.new("RGBA", (24,24), self.bgcolor) if orientation in (2,3): # south and west tmp = self.transform_image_side(stalk).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, block,(-1,-2), block) alpha_over(img, tmp, (4,-2), tmp) if orientation == 3: img = img.transpose(Image.FLIP_LEFT_RIGHT) elif orientation in (0,1): # north and east tmp = self.transform_image_side(stalk.transpose(Image.FLIP_LEFT_RIGHT)) alpha_over(img, block,(-1,5), block) alpha_over(img, tmp, (2,12), tmp) if orientation == 0: img = img.transpose(Image.FLIP_LEFT_RIGHT) return img # command block @material(blockid=[137,210,211], solid=True, nodata=True) def command_block(self, blockid, data): if blockid == 210: front = self.load_image_texture("assets/minecraft/textures/block/repeating_command_block_front.png") side = self.load_image_texture("assets/minecraft/textures/block/repeating_command_block_side.png") back = self.load_image_texture("assets/minecraft/textures/block/repeating_command_block_back.png") elif blockid == 211: front = self.load_image_texture("assets/minecraft/textures/block/chain_command_block_front.png") side = self.load_image_texture("assets/minecraft/textures/block/chain_command_block_side.png") back = self.load_image_texture("assets/minecraft/textures/block/chain_command_block_back.png") else: front = self.load_image_texture("assets/minecraft/textures/block/command_block_front.png") side = self.load_image_texture("assets/minecraft/textures/block/command_block_side.png") back = self.load_image_texture("assets/minecraft/textures/block/command_block_back.png") return self.build_full_block(side, side, back, front, side) # beacon block # at the moment of writing this, it seems the beacon block doens't use # the data values @material(blockid=138, transparent=True, nodata = True) def beacon(self, blockid, data): # generate the three pieces of the block t = self.load_image_texture("assets/minecraft/textures/block/glass.png") glass = self.build_block(t,t) t = self.load_image_texture("assets/minecraft/textures/block/obsidian.png") obsidian = self.build_full_block((t,12),None, None, t, t) obsidian = obsidian.resize((20,20), Image.ANTIALIAS) t = self.load_image_texture("assets/minecraft/textures/block/beacon.png") crystal = self.build_block(t,t) crystal = crystal.resize((16,16),Image.ANTIALIAS) # compose the block img = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(img, obsidian, (2, 4), obsidian) alpha_over(img, crystal, (4,3), crystal) alpha_over(img, glass, (0,0), glass) return img # cobblestone and mossy cobblestone walls, chorus plants, mossy stone brick walls # one additional bit of data value added for mossy and cobblestone @material(blockid=[199]+list(range(1792, 1808 + 1)), data=list(range(32)), transparent=True, nospawn=True) def cobblestone_wall(self, blockid, data): walls_id_to_tex = { 199: "assets/minecraft/textures/block/chorus_plant.png", # chorus plants 1792: "assets/minecraft/textures/block/andesite.png", 1793: "assets/minecraft/textures/block/bricks.png", 1794: "assets/minecraft/textures/block/cobblestone.png", 1795: "assets/minecraft/textures/block/diorite.png", 1796: "assets/minecraft/textures/block/end_stone_bricks.png", 1797: "assets/minecraft/textures/block/granite.png", 1798: "assets/minecraft/textures/block/mossy_cobblestone.png", 1799: "assets/minecraft/textures/block/mossy_stone_bricks.png", 1800: "assets/minecraft/textures/block/nether_bricks.png", 1801: "assets/minecraft/textures/block/prismarine.png", 1802: "assets/minecraft/textures/block/red_nether_bricks.png", 1803: "assets/minecraft/textures/block/red_sandstone.png", 1804: "assets/minecraft/textures/block/sandstone.png", 1805: "assets/minecraft/textures/block/stone_bricks.png", 1806: "assets/minecraft/textures/block/blackstone.png", 1807: "assets/minecraft/textures/block/polished_blackstone.png", 1808: "assets/minecraft/textures/block/polished_blackstone_bricks.png" } t = self.load_image_texture(walls_id_to_tex[blockid]).copy() wall_pole_top = t.copy() wall_pole_side = t.copy() wall_side_top = t.copy() wall_side = t.copy() # _full is used for walls without pole wall_side_top_full = t.copy() wall_side_full = t.copy() # generate the textures of the wall ImageDraw.Draw(wall_pole_top).rectangle((0,0,3,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_pole_top).rectangle((12,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_pole_top).rectangle((0,0,15,3),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_pole_top).rectangle((0,12,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_pole_side).rectangle((0,0,3,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_pole_side).rectangle((12,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # Create the sides and the top of the pole wall_pole_side = self.transform_image_side(wall_pole_side) wall_pole_other_side = wall_pole_side.transpose(Image.FLIP_LEFT_RIGHT) wall_pole_top = self.transform_image_top(wall_pole_top) # Darken the sides slightly. These methods also affect the alpha layer, # so save them first (we don't want to "darken" the alpha layer making # the block transparent) sidealpha = wall_pole_side.split()[3] wall_pole_side = ImageEnhance.Brightness(wall_pole_side).enhance(0.8) wall_pole_side.putalpha(sidealpha) othersidealpha = wall_pole_other_side.split()[3] wall_pole_other_side = ImageEnhance.Brightness(wall_pole_other_side).enhance(0.7) wall_pole_other_side.putalpha(othersidealpha) # Compose the wall pole wall_pole = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(wall_pole,wall_pole_side, (3,4),wall_pole_side) alpha_over(wall_pole,wall_pole_other_side, (9,4),wall_pole_other_side) alpha_over(wall_pole,wall_pole_top, (0,0),wall_pole_top) # create the sides and the top of a wall attached to a pole ImageDraw.Draw(wall_side).rectangle((0,0,15,2),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_side).rectangle((0,0,11,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_side_top).rectangle((0,0,11,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_side_top).rectangle((0,0,15,4),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_side_top).rectangle((0,11,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # full version, without pole ImageDraw.Draw(wall_side_full).rectangle((0,0,15,2),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_side_top_full).rectangle((0,4,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) ImageDraw.Draw(wall_side_top_full).rectangle((0,4,15,15),outline=(0,0,0,0),fill=(0,0,0,0)) # compose the sides of a wall atached to a pole tmp = Image.new("RGBA", (24,24), self.bgcolor) wall_side = self.transform_image_side(wall_side) wall_side_top = self.transform_image_top(wall_side_top) # Darken the sides slightly. These methods also affect the alpha layer, # so save them first (we don't want to "darken" the alpha layer making # the block transparent) sidealpha = wall_side.split()[3] wall_side = ImageEnhance.Brightness(wall_side).enhance(0.7) wall_side.putalpha(sidealpha) alpha_over(tmp,wall_side, (0,0),wall_side) alpha_over(tmp,wall_side_top, (-5,3),wall_side_top) wall_side = tmp wall_other_side = wall_side.transpose(Image.FLIP_LEFT_RIGHT) # compose the sides of the full wall tmp = Image.new("RGBA", (24,24), self.bgcolor) wall_side_full = self.transform_image_side(wall_side_full) wall_side_top_full = self.transform_image_top(wall_side_top_full.rotate(90)) # Darken the sides slightly. These methods also affect the alpha layer, # so save them first (we don't want to "darken" the alpha layer making # the block transparent) sidealpha = wall_side_full.split()[3] wall_side_full = ImageEnhance.Brightness(wall_side_full).enhance(0.7) wall_side_full.putalpha(sidealpha) alpha_over(tmp,wall_side_full, (4,0),wall_side_full) alpha_over(tmp,wall_side_top_full, (3,-4),wall_side_top_full) wall_side_full = tmp wall_other_side_full = wall_side_full.transpose(Image.FLIP_LEFT_RIGHT) # Create img to compose the wall img = Image.new("RGBA", (24,24), self.bgcolor) # Position wall imgs around the wall bit stick pos_top_left = (-5,-2) pos_bottom_left = (-8,4) pos_top_right = (5,-3) pos_bottom_right = (7,4) # +x axis points top right direction # +y axis points bottom right direction # There are two special cases for wall without pole. # Normal case: # First compose the walls in the back of the image, # then the pole and then the walls in the front. if (data == 0b1010) or (data == 0b11010): alpha_over(img, wall_other_side_full,(0,2), wall_other_side_full) elif (data == 0b0101) or (data == 0b10101): alpha_over(img, wall_side_full,(0,2), wall_side_full) else: if (data & 0b0001) == 1: alpha_over(img,wall_side, pos_top_left,wall_side) # top left if (data & 0b1000) == 8: alpha_over(img,wall_other_side, pos_top_right,wall_other_side) # top right alpha_over(img,wall_pole,(0,0),wall_pole) if (data & 0b0010) == 2: alpha_over(img,wall_other_side, pos_bottom_left,wall_other_side) # bottom left if (data & 0b0100) == 4: alpha_over(img,wall_side, pos_bottom_right,wall_side) # bottom right return img # carrots, potatoes @material(blockid=[141,142], data=list(range(8)), transparent=True, nospawn=True) def crops4(self, blockid, data): # carrots and potatoes have 8 data, but only 4 visual stages stage = {0:0, 1:0, 2:1, 3:1, 4:2, 5:2, 6:2, 7:3}[data] if blockid == 141: # carrots raw_crop = self.load_image_texture("assets/minecraft/textures/block/carrots_stage%d.png" % stage) else: # potatoes raw_crop = self.load_image_texture("assets/minecraft/textures/block/potatoes_stage%d.png" % stage) crop1 = self.transform_image_top(raw_crop) crop2 = self.transform_image_side(raw_crop) crop3 = crop2.transpose(Image.FLIP_LEFT_RIGHT) img = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(img, crop1, (0,12), crop1) alpha_over(img, crop2, (6,3), crop2) alpha_over(img, crop3, (6,3), crop3) return img # anvils @material(blockid=145, data=list(range(12)), transparent=True, nospawn=True) def anvil(self, blockid, data): # anvils only have two orientations, invert it for rotations 1 and 3 orientation = data & 0x1 if self.rotation in (1, 3): if orientation == 1: orientation = 0 else: orientation = 1 # get the correct textures # the bits 0x4 and 0x8 determine how damaged is the anvil if (data & 0xc) == 0: # non damaged anvil top = self.load_image_texture("assets/minecraft/textures/block/anvil_top.png") elif (data & 0xc) == 0x4: # slightly damaged top = self.load_image_texture("assets/minecraft/textures/block/chipped_anvil_top.png") elif (data & 0xc) == 0x8: # very damaged top = self.load_image_texture("assets/minecraft/textures/block/damaged_anvil_top.png") # everything else use this texture big_side = self.load_image_texture("assets/minecraft/textures/block/anvil.png").copy() small_side = self.load_image_texture("assets/minecraft/textures/block/anvil.png").copy() base = self.load_image_texture("assets/minecraft/textures/block/anvil.png").copy() small_base = self.load_image_texture("assets/minecraft/textures/block/anvil.png").copy() # cut needed patterns ImageDraw.Draw(big_side).rectangle((0, 8, 15, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) ImageDraw.Draw(small_side).rectangle((0, 0, 2, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) ImageDraw.Draw(small_side).rectangle((13, 0, 15, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) ImageDraw.Draw(small_side).rectangle((0, 8, 15, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) ImageDraw.Draw(base).rectangle((0, 0, 15, 15), outline=(0, 0, 0, 0)) ImageDraw.Draw(base).rectangle((1, 1, 14, 14), outline=(0, 0, 0, 0)) ImageDraw.Draw(small_base).rectangle((0, 0, 15, 15), outline=(0, 0, 0, 0)) ImageDraw.Draw(small_base).rectangle((1, 1, 14, 14), outline=(0, 0, 0, 0)) ImageDraw.Draw(small_base).rectangle((2, 2, 13, 13), outline=(0, 0, 0, 0)) ImageDraw.Draw(small_base).rectangle((3, 3, 12, 12), outline=(0, 0, 0, 0)) # check orientation and compose the anvil if orientation == 1: # bottom-left top-right top = top.rotate(90) left_side = small_side left_pos = (1, 6) right_side = big_side right_pos = (10, 5) else: # top-left bottom-right right_side = small_side right_pos = (12, 6) left_side = big_side left_pos = (3, 5) img = Image.new("RGBA", (24, 24), self.bgcolor) # darken sides alpha = big_side.split()[3] big_side = ImageEnhance.Brightness(big_side).enhance(0.8) big_side.putalpha(alpha) alpha = small_side.split()[3] small_side = ImageEnhance.Brightness(small_side).enhance(0.9) small_side.putalpha(alpha) alpha = base.split()[3] base_d = ImageEnhance.Brightness(base).enhance(0.8) base_d.putalpha(alpha) # compose base = self.transform_image_top(base) base_d = self.transform_image_top(base_d) small_base = self.transform_image_top(small_base) top = self.transform_image_top(top) alpha_over(img, base_d, (0, 12), base_d) alpha_over(img, base_d, (0, 11), base_d) alpha_over(img, base_d, (0, 10), base_d) alpha_over(img, small_base, (0, 10), small_base) alpha_over(img, top, (0, 1), top) # Fix gap between block edges alpha_over(img, top, (0, 0), top) left_side = self.transform_image_side(left_side) right_side = self.transform_image_side(right_side).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, left_side, left_pos, left_side) alpha_over(img, right_side, right_pos, right_side) return img # block of redstone block(blockid=152, top_image="assets/minecraft/textures/block/redstone_block.png") # nether quartz ore block(blockid=153, top_image="assets/minecraft/textures/block/nether_quartz_ore.png") # block of quartz @material(blockid=155, data=list(range(5)), solid=True) def quartz_block(self, blockid, data): if data in (0,1): # normal and chiseled quartz block if data == 0: top = self.load_image_texture("assets/minecraft/textures/block/quartz_block_top.png") side = self.load_image_texture("assets/minecraft/textures/block/quartz_block_side.png") else: top = self.load_image_texture("assets/minecraft/textures/block/chiseled_quartz_block_top.png") side = self.load_image_texture("assets/minecraft/textures/block/chiseled_quartz_block.png") return self.build_block(top, side) # pillar quartz block with orientation top = self.load_image_texture("assets/minecraft/textures/block/quartz_pillar_top.png") side = self.load_image_texture("assets/minecraft/textures/block/quartz_pillar.png").copy() if data == 2: # vertical return self.build_block(top, side) elif data == 3: # north-south oriented if self.rotation in (0,2): return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90)) return self.build_full_block(side, None, None, side.rotate(90), top) elif data == 4: # east-west oriented if self.rotation in (0,2): return self.build_full_block(side, None, None, side.rotate(90), top) return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90)) # hopper @material(blockid=154, data=list(range(4)), transparent=True) def hopper(self, blockid, data): #build the top side = self.load_image_texture("assets/minecraft/textures/block/hopper_outside.png") top = self.load_image_texture("assets/minecraft/textures/block/hopper_top.png") bottom = self.load_image_texture("assets/minecraft/textures/block/hopper_inside.png") hop_top = self.build_full_block((top,10), side, side, side, side, side) #build a solid block for mid/top hop_mid = self.build_full_block((top,5), side, side, side, side, side) hop_bot = self.build_block(side,side) hop_mid = hop_mid.resize((17,17),Image.ANTIALIAS) hop_bot = hop_bot.resize((10,10),Image.ANTIALIAS) #compose the final block img = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(img, hop_bot, (7,14), hop_bot) alpha_over(img, hop_mid, (3,3), hop_mid) alpha_over(img, hop_top, (0,-6), hop_top) return img # slime block block(blockid=165, top_image="assets/minecraft/textures/block/slime_block.png") # prismarine block @material(blockid=168, data=list(range(3)), solid=True) def prismarine_block(self, blockid, data): if data == 0: # prismarine t = self.load_image_texture("assets/minecraft/textures/block/prismarine.png") elif data == 1: # prismarine bricks t = self.load_image_texture("assets/minecraft/textures/block/prismarine_bricks.png") elif data == 2: # dark prismarine t = self.load_image_texture("assets/minecraft/textures/block/dark_prismarine.png") img = self.build_block(t, t) return img # sea lantern block(blockid=169, top_image="assets/minecraft/textures/block/sea_lantern.png") # hay block @material(blockid=170, data=list(range(9)), solid=True) def hayblock(self, blockid, data): top = self.load_image_texture("assets/minecraft/textures/block/hay_block_top.png") side = self.load_image_texture("assets/minecraft/textures/block/hay_block_side.png") if self.rotation == 1: if data == 4: data = 8 elif data == 8: data = 4 elif self.rotation == 3: if data == 4: data = 8 elif data == 8: data = 4 # choose orientation and paste textures if data == 4: # east-west orientation return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90)) elif data == 8: # north-south orientation return self.build_full_block(side, None, None, side.rotate(90), top) else: return self.build_block(top, side) # carpet - wool block that's small? @material(blockid=171, data=list(range(16)), transparent=True) def carpet(self, blockid, data): texture = self.load_image_texture("assets/minecraft/textures/block/%s_wool.png" % color_map[data]) return self.build_full_block((texture,15),texture,texture,texture,texture) #clay block block(blockid=172, top_image="assets/minecraft/textures/block/terracotta.png") #stained hardened clay @material(blockid=159, data=list(range(16)), solid=True) def stained_clay(self, blockid, data): texture = self.load_image_texture("assets/minecraft/textures/block/%s_terracotta.png" % color_map[data]) return self.build_block(texture,texture) #coal block block(blockid=173, top_image="assets/minecraft/textures/block/coal_block.png") # packed ice block block(blockid=174, top_image="assets/minecraft/textures/block/packed_ice.png") #blue ice block(blockid=11312, top_image="assets/minecraft/textures/block/blue_ice.png") #smooth stones block(blockid=11313, top_image="assets/minecraft/textures/block/smooth_stone.png") # stone block(blockid=11314, top_image="assets/minecraft/textures/block/sandstone_top.png") # sandstone block(blockid=11315, top_image="assets/minecraft/textures/block/red_sandstone_top.png") # red sandstone #coral blocks block(blockid=11316, top_image="assets/minecraft/textures/block/brain_coral_block.png") block(blockid=11317, top_image="assets/minecraft/textures/block/bubble_coral_block.png") block(blockid=11318, top_image="assets/minecraft/textures/block/fire_coral_block.png") block(blockid=11319, top_image="assets/minecraft/textures/block/horn_coral_block.png") block(blockid=11320, top_image="assets/minecraft/textures/block/tube_coral_block.png") #dead coral blocks block(blockid=11321, top_image="assets/minecraft/textures/block/dead_brain_coral_block.png") block(blockid=11322, top_image="assets/minecraft/textures/block/dead_bubble_coral_block.png") block(blockid=11323, top_image="assets/minecraft/textures/block/dead_fire_coral_block.png") block(blockid=11324, top_image="assets/minecraft/textures/block/dead_horn_coral_block.png") block(blockid=11325, top_image="assets/minecraft/textures/block/dead_tube_coral_block.png") @material(blockid=175, data=list(range(16)), transparent=True) def flower(self, blockid, data): double_plant_map = ["sunflower", "lilac", "tall_grass", "large_fern", "rose_bush", "peony", "peony", "peony"] plant = double_plant_map[data & 0x7] if data & 0x8: part = "top" else: part = "bottom" png = "assets/minecraft/textures/block/%s_%s.png" % (plant,part) texture = self.load_image_texture(png) img = self.build_billboard(texture) #sunflower top if data == 8: bloom_tex = self.load_image_texture("assets/minecraft/textures/block/sunflower_front.png") alpha_over(img, bloom_tex.resize((14, 11), Image.ANTIALIAS), (5,5)) return img # chorus flower @material(blockid=200, data=list(range(6)), solid=True) def chorus_flower(self, blockid, data): # aged 5, dead if data == 5: texture = self.load_image_texture("assets/minecraft/textures/block/chorus_flower_dead.png") else: texture = self.load_image_texture("assets/minecraft/textures/block/chorus_flower.png") return self.build_block(texture,texture) # purpur block block(blockid=201, top_image="assets/minecraft/textures/block/purpur_block.png") # purpur pilar @material(blockid=202, data=list(range(12)) , solid=True) def purpur_pillar(self, blockid, data): pillar_orientation = data & 12 top=self.load_image_texture("assets/minecraft/textures/block/purpur_pillar_top.png") side=self.load_image_texture("assets/minecraft/textures/block/purpur_pillar.png") if pillar_orientation == 0: # east-west orientation return self.build_block(top, side) elif pillar_orientation == 4: # east-west orientation return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90)) elif pillar_orientation == 8: # north-south orientation return self.build_full_block(side, None, None, side.rotate(270), top) # end brick block(blockid=206, top_image="assets/minecraft/textures/block/end_stone_bricks.png") # frosted ice @material(blockid=212, data=list(range(4)), solid=True) def frosted_ice(self, blockid, data): img = self.load_image_texture("assets/minecraft/textures/block/frosted_ice_%d.png" % data) return self.build_block(img, img) # magma block block(blockid=213, top_image="assets/minecraft/textures/block/magma.png") # nether wart block block(blockid=214, top_image="assets/minecraft/textures/block/nether_wart_block.png") # warped wart block block(blockid=1010, top_image="assets/minecraft/textures/block/warped_wart_block.png") # red nether brick block(blockid=215, top_image="assets/minecraft/textures/block/red_nether_bricks.png") @material(blockid=216, data=list(range(12)), solid=True) def boneblock(self, blockid, data): # extract orientation boneblock_orientation = data & 12 if self.rotation == 1: if boneblock_orientation == 4: boneblock_orientation = 8 elif boneblock_orientation == 8: boneblock_orientation = 4 elif self.rotation == 3: if boneblock_orientation == 4: boneblock_orientation = 8 elif boneblock_orientation == 8: boneblock_orientation = 4 top = self.load_image_texture("assets/minecraft/textures/block/bone_block_top.png") side = self.load_image_texture("assets/minecraft/textures/block/bone_block_side.png") # choose orientation and paste textures if boneblock_orientation == 0: return self.build_block(top, side) elif boneblock_orientation == 4: # east-west orientation return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90)) elif boneblock_orientation == 8: # north-south orientation return self.build_full_block(side, None, None, side.rotate(270), top) # observer @material(blockid=218, data=[0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13], solid=True, nospawn=True) def observer(self, blockid, data): # Do rotation if self.rotation in [1, 2, 3] and (data & 0b111) in [2, 3, 4, 5]: rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3}, 2: {2: 3, 3: 2, 4: 5, 5: 4}, 3: {2: 4, 3: 5, 4: 3, 5: 2}} data = (data & 0b1000) | rotation_map[self.rotation][data & 0b111] front = self.load_image_texture("assets/minecraft/textures/block/observer_front.png") side = self.load_image_texture("assets/minecraft/textures/block/observer_side.png") top = self.load_image_texture("assets/minecraft/textures/block/observer_top.png") file_name_back = "observer_back_on" if data & 0b1000 else "observer_back" back = self.load_image_texture("assets/minecraft/textures/block/%s.png" % file_name_back) if data & 0b0111 == 0: # Down img = self.build_full_block(back, None, None, side.rotate(90), top) elif data & 0b0111 == 1: # Up img = self.build_full_block(front.rotate(180), None, None, side.rotate(90), top.rotate(180)) elif data & 0b0111 == 2: # East img = self.build_full_block(top.rotate(180), None, None, side, back) elif data & 0b0111 == 3: # West img = self.build_full_block(top, None, None, side, front) elif data & 0b0111 == 4: # North img = self.build_full_block(top.rotate(270), None, None, front, side) elif data & 0b0111 == 5: # South img = self.build_full_block(top.rotate(90), None, None, back, side) return img # shulker box @material(blockid=list(range(219, 235)) + [257], data=list(range(6)), solid=True, nospawn=True) def shulker_box(self, blockid, data): # Do rotation if self.rotation in [1, 2, 3] and data in [2, 3, 4, 5]: rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3}, 2: {2: 3, 3: 2, 4: 5, 5: 4}, 3: {2: 4, 3: 5, 4: 3, 5: 2}} data = rotation_map[self.rotation][data] if blockid == 257: # Uncolored shulker box file_name = "shulker.png" else: file_name = "shulker_%s.png" % color_map[blockid - 219] shulker_t = self.load_image("assets/minecraft/textures/entity/shulker/%s" % file_name).copy() w, h = shulker_t.size res = w // 4 # Cut out the parts of the shulker texture we need for the box top = shulker_t.crop((res, 0, res * 2, res)) bottom = shulker_t.crop((res * 2, int(res * 1.75), res * 3, int(res * 2.75))) side_top = shulker_t.crop((0, res, res, int(res * 1.75))) side_bottom = shulker_t.crop((0, int(res * 2.75), res, int(res * 3.25))) side = Image.new('RGBA', (res, res)) side.paste(side_top, (0, 0), side_top) side.paste(side_bottom, (0, res // 2), side_bottom) if data == 0: # down side = side.rotate(180) img = self.build_full_block(bottom, None, None, side, side) elif data == 1: # up img = self.build_full_block(top, None, None, side, side) elif data == 2: # east img = self.build_full_block(side, None, None, side.rotate(90), bottom) elif data == 3: # west img = self.build_full_block(side.rotate(180), None, None, side.rotate(270), top) elif data == 4: # north img = self.build_full_block(side.rotate(90), None, None, top, side.rotate(270)) elif data == 5: # south img = self.build_full_block(side.rotate(270), None, None, bottom, side.rotate(90)) return img # structure block @material(blockid=255, data=list(range(4)), solid=True) def structure_block(self, blockid, data): if data == 0: img = self.load_image_texture("assets/minecraft/textures/block/structure_block_save.png") elif data == 1: img = self.load_image_texture("assets/minecraft/textures/block/structure_block_load.png") elif data == 2: img = self.load_image_texture("assets/minecraft/textures/block/structure_block_corner.png") elif data == 3: img = self.load_image_texture("assets/minecraft/textures/block/structure_block_data.png") return self.build_block(img, img) # Jigsaw block @material(blockid=256, data=list(range(6)), solid=True) def jigsaw_block(self, blockid, data): # Do rotation if self.rotation in [1, 2, 3] and data in [2, 3, 4, 5]: rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3}, 2: {2: 3, 3: 2, 4: 5, 5: 4}, 3: {2: 4, 3: 5, 4: 3, 5: 2}} data = rotation_map[self.rotation][data] top = self.load_image_texture("assets/minecraft/textures/block/jigsaw_top.png") bottom = self.load_image_texture("assets/minecraft/textures/block/jigsaw_bottom.png") side = self.load_image_texture("assets/minecraft/textures/block/jigsaw_side.png") if data == 0: # Down img = self.build_full_block(bottom.rotate(self.rotation * 90), None, None, side.rotate(180), side.rotate(180)) elif data == 1: # Up img = self.build_full_block(top.rotate(self.rotation * 90), None, None, side, side) elif data == 2: # North img = self.build_full_block(side, None, None, side.rotate(90), bottom.rotate(180)) elif data == 3: # South img = self.build_full_block(side.rotate(180), None, None, side.rotate(270), top.rotate(270)) elif data == 4: # West img = self.build_full_block(side.rotate(90), None, None, top.rotate(180), side.rotate(270)) elif data == 5: # East img = self.build_full_block(side.rotate(270), None, None, bottom.rotate(180), side.rotate(90)) return img # beetroots(207), berry bushes (11505) @material(blockid=[207, 11505], data=list(range(4)), transparent=True, nospawn=True) def crops(self, blockid, data): crops_id_to_tex = { 207: "assets/minecraft/textures/block/beetroots_stage%d.png", 11505: "assets/minecraft/textures/block/sweet_berry_bush_stage%d.png", } raw_crop = self.load_image_texture(crops_id_to_tex[blockid] % data) crop1 = self.transform_image_top(raw_crop) crop2 = self.transform_image_side(raw_crop) crop3 = crop2.transpose(Image.FLIP_LEFT_RIGHT) img = Image.new("RGBA", (24,24), self.bgcolor) alpha_over(img, crop1, (0,12), crop1) alpha_over(img, crop2, (6,3), crop2) alpha_over(img, crop3, (6,3), crop3) return img # Concrete @material(blockid=251, data=list(range(16)), solid=True) def concrete(self, blockid, data): texture = self.load_image_texture("assets/minecraft/textures/block/%s_concrete.png" % color_map[data]) return self.build_block(texture, texture) # Concrete Powder @material(blockid=252, data=list(range(16)), solid=True) def concrete(self, blockid, data): texture = self.load_image_texture("assets/minecraft/textures/block/%s_concrete_powder.png" % color_map[data]) return self.build_block(texture, texture) # Glazed Terracotta @material(blockid=list(range(235, 251)), data=list(range(4)), solid=True) def glazed_terracotta(self, blockid, data): # Do rotation data = (self.rotation + data) % 4 texture = self.load_image_texture("assets/minecraft/textures/block/%s_glazed_terracotta.png" % color_map[blockid - 235]).copy() texture_side4 = texture.transpose(Image.FLIP_LEFT_RIGHT) if data == 0: # South return self.build_full_block(texture, None, None, texture, texture_side4.rotate(270)) elif data == 1: # West return self.build_full_block(texture.rotate(270), None, None, texture.rotate(90), texture_side4.rotate(180)) elif data == 2: # North return self.build_full_block(texture.rotate(180), None, None, texture.rotate(180), texture_side4.rotate(90)) elif data == 3: # East return self.build_full_block(texture.rotate(90), None, None, texture.rotate(270), texture_side4) # dried kelp block @material(blockid=11331, data=[0], solid=True) def sandstone(self, blockid, data): top = self.load_image_texture("assets/minecraft/textures/block/dried_kelp_top.png") return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/dried_kelp_side.png")) # scaffolding block(blockid=11414, top_image="assets/minecraft/textures/block/scaffolding_top.png", side_image="assets/minecraft/textures/block/scaffolding_side.png", solid=False, transparent=True) # beehive and bee_nest @material(blockid=[11501, 11502], data=list(range(8)), solid=True) def beehivenest(self, blockid, data): if blockid == 11501: #beehive t_top = self.load_image("assets/minecraft/textures/block/beehive_end.png") t_side = self.load_image("assets/minecraft/textures/block/beehive_side.png") t_front = self.load_image("assets/minecraft/textures/block/beehive_front.png") t_front_honey = self.load_image("assets/minecraft/textures/block/beehive_front_honey.png") elif blockid == 11502: #bee_nest t_top = self.load_image("assets/minecraft/textures/block/bee_nest_top.png") t_side = self.load_image("assets/minecraft/textures/block/bee_nest_side.png") t_front = self.load_image("assets/minecraft/textures/block/bee_nest_front.png") t_front_honey = self.load_image("assets/minecraft/textures/block/bee_nest_front_honey.png") if data >= 4: front = t_front_honey else: front = t_front if self.rotation == 0: # rendering north upper-left if data == 0 or data == 4: # south return self.build_full_block(t_top, t_side, t_side, t_side, front) elif data == 1 or data == 5: # west return self.build_full_block(t_top, t_side, t_side, front, t_side) elif data == 2 or data == 6: # north return self.build_full_block(t_top, t_side, front, t_side, t_side) elif data == 3 or data == 7: # east return self.build_full_block(t_top, front, t_side, t_side, t_side) elif self.rotation == 1: # north upper-right if data == 0 or data == 4: # south return self.build_full_block(t_top, t_side, t_side, front, t_side) elif data == 1 or data == 5: # west return self.build_full_block(t_top, t_side, front, t_side, t_side) elif data == 2 or data == 6: # north return self.build_full_block(t_top, front, t_side, t_side, t_side) elif data == 3 or data == 7: # east return self.build_full_block(t_top, t_side, t_side, t_side, front) elif self.rotation == 2: # north lower-right if data == 0 or data == 4: # south return self.build_full_block(t_top, t_side, front, t_side, t_side) elif data == 1 or data == 5: # west return self.build_full_block(t_top, front, t_side, t_side, t_side) elif data == 2 or data == 6: # north return self.build_full_block(t_top, t_side, t_side, t_side, front) elif data == 3 or data == 7: # east return self.build_full_block(t_top, t_side, t_side, front, t_side) elif self.rotation == 3: # north lower-left if data == 0 or data == 4: # south return self.build_full_block(t_top, front, t_side, t_side, t_side) elif data == 1 or data == 5: # west return self.build_full_block(t_top, t_side, t_side, t_side, front) elif data == 2 or data == 6: # north return self.build_full_block(t_top, t_side, t_side, front, t_side) elif data == 3 or data == 7: # east return self.build_full_block(t_top, t_side, front, t_side, t_side) # honeycomb_block block(blockid=11503, top_image="assets/minecraft/textures/block/honeycomb_block.png") # honey_block block(blockid=11504, top_image="assets/minecraft/textures/block/honey_block_top.png", side_image="assets/minecraft/textures/block/honey_block_side.png") # Barrel @material(blockid=11418, data=list(range(12)), solid=True) def barrel(self, blockid, data): t_bottom = self.load_image("assets/minecraft/textures/block/barrel_bottom.png") t_side = self.load_image("assets/minecraft/textures/block/barrel_side.png") if data & 0x01: t_top = self.load_image("assets/minecraft/textures/block/barrel_top_open.png") else: t_top = self.load_image("assets/minecraft/textures/block/barrel_top.png") data = data >> 1 if data == 0: # up return self.build_full_block(t_top, None, None, t_side, t_side) elif data == 1: # down t_side = t_side.rotate(180) return self.build_full_block(t_bottom, None, None, t_side, t_side) elif data == 2: # south return self.build_full_block(t_side.rotate(180), None, None, t_side.rotate(270), t_top) elif data == 3: # east return self.build_full_block(t_side.rotate(270), None, None, t_bottom, t_side.rotate(90)) elif data == 4: # north return self.build_full_block(t_side, None, None, t_side.rotate(90), t_bottom) else: # west return self.build_full_block(t_side.rotate(90), None, None, t_top, t_side.rotate(270)) # Campfire (11506) and soul campfire (1003) @material(blockid=[11506, 1003], data=list(range(8)), solid=True, transparent=True, nospawn=True) def campfire(self, blockid, data): # Do rotation, mask to not clobber lit data data = data & 0b100 | ((self.rotation + (data & 0b11)) % 4) block_name = "campfire" if blockid == 11506 else "soul_campfire" # Load textures # Fire & lit log textures contain multiple tiles, since both are # 16px wide rely on load_image_texture() to crop appropriately fire_raw_t = self.load_image_texture("assets/minecraft/textures/block/" + block_name + "_fire.png") log_raw_t = self.load_image_texture("assets/minecraft/textures/block/campfire_log.png") log_lit_raw_t = self.load_image_texture("assets/minecraft/textures/block/" + block_name + "_log_lit.png") def create_tile(img_src, coord_crop, coord_paste, rot): # Takes an image, crops a region, optionally rotates the # texture, then finally pastes it onto a 16x16 image img_out = Image.new("RGBA", (16, 16), self.bgcolor) img_in = img_src.crop(coord_crop) if rot != 0: img_in = img_in.rotate(rot, expand=True) img_out.paste(img_in, coord_paste) return img_out # Generate bottom bottom_t = log_lit_raw_t if data & 0b100 else log_raw_t bottom_t = create_tile(bottom_t, (0, 8, 16, 14), (0, 5), 0) bottom_t = self.transform_image_top(bottom_t) # Generate two variants of a log: one with a lit side, one without log_t = Image.new("RGBA", (24, 24), self.bgcolor) log_end_t = create_tile(log_raw_t, (0, 4, 4, 8), (12, 6), 0) log_side_t = create_tile(log_raw_t, (0, 0, 16, 4), (0, 6), 0) log_side_lit_t = create_tile(log_lit_raw_t, (0, 0, 16, 4), (0, 6), 0) log_end_t = self.transform_image_side(log_end_t) log_top_t = self.transform_image_top(log_side_t) log_side_t = self.transform_image_side(log_side_t).transpose(Image.FLIP_LEFT_RIGHT) log_side_lit_t = self.transform_image_side(log_side_lit_t).transpose(Image.FLIP_LEFT_RIGHT) alpha_over(log_t, log_top_t, (-2, 2), log_top_t) # Fix some holes at the edges alpha_over(log_t, log_top_t, (-2, 1), log_top_t) log_lit_t = log_t.copy() # Unlit log alpha_over(log_t, log_side_t, (5, 0), log_side_t) alpha_over(log_t, log_end_t, (-7, 0), log_end_t) # Lit log. For unlit fires, just reference the unlit log texture if data & 0b100: alpha_over(log_lit_t, log_side_lit_t, (5, 0), log_side_lit_t) alpha_over(log_lit_t, log_end_t, (-7, 0), log_end_t) else: log_lit_t = log_t # Log parts. Because fire needs to be in the middle of the logs, # split the logs into two parts: Those appearing behind the fire # and those appearing in front of the fire logs_back_t = Image.new("RGBA", (24, 24), self.bgcolor) logs_front_t = Image.new("RGBA", (24, 24), self.bgcolor) # Back logs alpha_over(logs_back_t, log_lit_t, (-1, 7), log_lit_t) log_tmp_t = logs_back_t.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(logs_back_t, log_tmp_t, (1, -3), log_tmp_t) # Front logs alpha_over(logs_front_t, log_t, (7, 10), log_t) # Due to the awkward drawing order, take a small part of the back # logs that need to be drawn on top of the front logs despite # the front logs being drawn last ImageDraw.Draw(log_tmp_t).rectangle((0, 0, 18, 24), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0)) alpha_over(logs_front_t, log_tmp_t, (1, -3), log_tmp_t) log_tmp_t = Image.new("RGBA", (24, 24), self.bgcolor) alpha_over(log_tmp_t, log_lit_t, (7, 10), log_lit_t) log_tmp_t = log_tmp_t.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(logs_front_t, log_tmp_t, (1, -3), log_tmp_t) # Compose final image img = Image.new("RGBA", (24, 24), self.bgcolor) alpha_over(img, bottom_t, (0, 12), bottom_t) alpha_over(img, logs_back_t, (0, 0), logs_back_t) if data & 0b100: fire_t = fire_raw_t.copy() if data & 0b11 in [0, 2]: # North, South fire_t = fire_t.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, fire_t, (4, 4), fire_t) alpha_over(img, logs_front_t, (0, 0), logs_front_t) if data & 0b11 in [0, 2]: # North, South img = img.transpose(Image.FLIP_LEFT_RIGHT) return img # Bell @material(blockid=11507, data=list(range(16)), solid=True, transparent=True, nospawn=True) def bell(self, blockid, data): # Do rotation, mask to not clobber attachment data data = data & 0b1100 | ((self.rotation + (data & 0b11)) % 4) # Load textures bell_raw_t = self.load_image("assets/minecraft/textures/entity/bell/bell_body.png") bar_raw_t = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png") post_raw_t = self.load_image_texture("assets/minecraft/textures/block/stone.png") def create_tile(img_src, coord_crop, coord_paste, rot): # Takes an image, crops a region, optionally rotates the # texture, then finally pastes it onto a 16x16 image img_out = Image.new("RGBA", (16, 16), self.bgcolor) img_in = img_src.crop(coord_crop) if rot != 0: img_in = img_in.rotate(rot, expand=True) img_out.paste(img_in, coord_paste) return img_out # 0 = floor, 1 = ceiling, 2 = single wall, 3 = double wall bell_type = (data & 0b1100) >> 2 # Should the bar/post texture be flipped? Yes if either: # - Attached to floor and East or West facing # - Not attached to floor and North or South facing flip_part = ((bell_type == 0 and data & 0b11 in [1, 3]) or (bell_type != 0 and data & 0b11 in [0, 2])) # Generate bell # Bell side textures varies based on self.rotation bell_sides_idx = [(0 - self.rotation) % 4, (3 - self.rotation) % 4] # Upper sides bell_coord = [x * 6 for x in bell_sides_idx] bell_ul_t = create_tile(bell_raw_t, (bell_coord[0], 6, bell_coord[0] + 6, 13), (5, 4), 180) bell_ur_t = create_tile(bell_raw_t, (bell_coord[1], 6, bell_coord[1] + 6, 13), (5, 4), 180) bell_ul_t = self.transform_image_side(bell_ul_t) bell_ur_t = self.transform_image_side(bell_ur_t.transpose(Image.FLIP_LEFT_RIGHT)) bell_ur_t = bell_ur_t.transpose(Image.FLIP_LEFT_RIGHT) # Lower sides bell_coord = [x * 8 for x in bell_sides_idx] bell_ll_t = create_tile(bell_raw_t, (bell_coord[0], 21, bell_coord[0] + 8, 23), (4, 11), 180) bell_lr_t = create_tile(bell_raw_t, (bell_coord[1], 21, bell_coord[1] + 8, 23), (4, 11), 180) bell_ll_t = self.transform_image_side(bell_ll_t) bell_lr_t = self.transform_image_side(bell_lr_t.transpose(Image.FLIP_LEFT_RIGHT)) bell_lr_t = bell_lr_t.transpose(Image.FLIP_LEFT_RIGHT) # Upper top top_rot = (180 + self.rotation * 90) % 360 bell_ut_t = create_tile(bell_raw_t, (6, 0, 12, 6), (5, 5), top_rot) bell_ut_t = self.transform_image_top(bell_ut_t) # Lower top bell_lt_t = create_tile(bell_raw_t, (8, 13, 16, 21), (4, 4), top_rot) bell_lt_t = self.transform_image_top(bell_lt_t) bell_t = Image.new("RGBA", (24, 24), self.bgcolor) alpha_over(bell_t, bell_lt_t, (0, 8), bell_lt_t) alpha_over(bell_t, bell_ll_t, (3, 4), bell_ll_t) alpha_over(bell_t, bell_lr_t, (9, 4), bell_lr_t) alpha_over(bell_t, bell_ut_t, (0, 3), bell_ut_t) alpha_over(bell_t, bell_ul_t, (4, 4), bell_ul_t) alpha_over(bell_t, bell_ur_t, (8, 4), bell_ur_t) # Generate bar if bell_type == 1: # Ceiling # bar_coord: Left Right Top bar_coord = [(4, 2, 6, 5), (6, 2, 8, 5), (1, 3, 3, 5)] bar_tile_pos = [(7, 1), (7, 1), (7, 7)] bar_over_pos = [(6, 3), (7, 2), (0, 0)] else: # Floor, single wall, double wall # Note: For a single wall bell, the position of the bar # varies based on facing if bell_type == 2 and data & 0b11 in [2, 3]: # Single wall, North/East facing bar_x_sw = 3 bar_l_pos_sw = (6, 7) else: bar_x_sw = 0 bar_l_pos_sw = (4, 8) bar_x = [2, None, bar_x_sw, 0][bell_type] bar_len = [12, None, 13, 16][bell_type] bar_l_pos = [(6, 7), None, bar_l_pos_sw, (4, 8)][bell_type] bar_long_coord = (bar_x, 3, bar_x + bar_len, 5) bar_coord = [(5, 4, 7, 6), bar_long_coord, bar_long_coord] bar_tile_pos = [(2, 1), (bar_x, 1), (bar_x, 7)] bar_over_pos = [bar_l_pos, (7, 3), (0, 1)] bar_l_t = create_tile(bar_raw_t, bar_coord[0], bar_tile_pos[0], 0) bar_r_t = create_tile(bar_raw_t, bar_coord[1], bar_tile_pos[1], 0) bar_t_t = create_tile(bar_raw_t, bar_coord[2], bar_tile_pos[2], 0) bar_l_t = self.transform_image_side(bar_l_t) bar_r_t = self.transform_image_side(bar_r_t.transpose(Image.FLIP_LEFT_RIGHT)) bar_r_t = bar_r_t.transpose(Image.FLIP_LEFT_RIGHT) bar_t_t = self.transform_image_top(bar_t_t) bar_t = Image.new("RGBA", (24, 24), self.bgcolor) alpha_over(bar_t, bar_t_t, bar_over_pos[2], bar_t_t) alpha_over(bar_t, bar_l_t, bar_over_pos[0], bar_l_t) alpha_over(bar_t, bar_r_t, bar_over_pos[1], bar_r_t) if flip_part: bar_t = bar_t.transpose(Image.FLIP_LEFT_RIGHT) # Generate post, only applies to floor attached bell if bell_type == 0: post_l_t = create_tile(post_raw_t, (0, 1, 4, 16), (6, 1), 0) post_r_t = create_tile(post_raw_t, (0, 1, 2, 16), (14, 1), 0) post_t_t = create_tile(post_raw_t, (0, 0, 2, 4), (14, 6), 0) post_l_t = self.transform_image_side(post_l_t) post_r_t = self.transform_image_side(post_r_t.transpose(Image.FLIP_LEFT_RIGHT)) post_r_t = post_r_t.transpose(Image.FLIP_LEFT_RIGHT) post_t_t = self.transform_image_top(post_t_t) post_back_t = Image.new("RGBA", (24, 24), self.bgcolor) post_front_t = Image.new("RGBA", (24, 24), self.bgcolor) alpha_over(post_back_t, post_t_t, (0, 1), post_t_t) alpha_over(post_back_t, post_l_t, (10, 0), post_l_t) alpha_over(post_back_t, post_r_t, (7, 3), post_r_t) alpha_over(post_back_t, post_r_t, (6, 3), post_r_t) # Fix some holes alpha_over(post_front_t, post_back_t, (-10, 5), post_back_t) if flip_part: post_back_t = post_back_t.transpose(Image.FLIP_LEFT_RIGHT) post_front_t = post_front_t.transpose(Image.FLIP_LEFT_RIGHT) img = Image.new("RGBA", (24, 24), self.bgcolor) if bell_type == 0: alpha_over(img, post_back_t, (0, 0), post_back_t) alpha_over(img, bell_t, (0, 0), bell_t) alpha_over(img, bar_t, (0, 0), bar_t) if bell_type == 0: alpha_over(img, post_front_t, (0, 0), post_front_t) return img # Ancient Debris block(blockid=[1000], top_image="assets/minecraft/textures/block/ancient_debris_top.png", side_image="assets/minecraft/textures/block/ancient_debris_side.png") # Basalt @material(blockid=[1001, 1002], data=list(range(3)), solid=True) def basalt(self, blockid, data): block_name = "polished_basalt" if blockid == 1002 else "basalt" top = self.load_image_texture("assets/minecraft/textures/block/" + block_name + "_top.png") side = self.load_image_texture("assets/minecraft/textures/block/" + block_name + "_side.png") if data == 0: return self.build_block(top, side) elif data == 1: # east-west orientation return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90)) elif data == 2: # north-south orientation return self.build_full_block(side, None, None, side.rotate(270), top) # Blackstone block block(blockid=[1004], top_image="assets/minecraft/textures/block/blackstone_top.png", side_image="assets/minecraft/textures/block/blackstone.png") # Chain @material(blockid=11419, data=list(range(3)), solid=True, transparent=True, nospawn=True) def chain(self, blockid, data): tex = self.load_image_texture("assets/minecraft/textures/block/chain.png") sidetex = Image.new(tex.mode, tex.size, self.bgcolor) mask = tex.crop((0, 0, 6, 16)) alpha_over(sidetex, mask, (5, 0), mask) if data == 0: # y return self.build_sprite(sidetex) else: img = Image.new("RGBA", (24, 24), self.bgcolor) sidetex = sidetex.rotate(90) side = self.transform_image_side(sidetex) otherside = self.transform_image_top(sidetex) def draw_x(): _side = side.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, _side, (6,3), _side) alpha_over(img, otherside, (3,3), otherside) def draw_z(): _otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT) alpha_over(img, side, (6,3), side) alpha_over(img, _otherside, (0,6), _otherside) draw_funcs = [draw_x, draw_z] if data == 1: # x draw_funcs[self.rotation % len(draw_funcs)]() elif data == 2: # z draw_funcs[(self.rotation + 1) % len(draw_funcs)]() return img # Respawn anchor @material(blockid=1037, data=list(range(5)), solid=True) def respawn_anchor(self, blockid, data): top = self.load_image_texture("assets/minecraft/textures/block/respawn_anchor_top_off.png" if data == 0 else "assets/minecraft/textures/block/respawn_anchor_top.png") side = self.load_image_texture( "assets/minecraft/textures/block/respawn_anchor_side%s.png" % (data)) return self.build_block(top, side) # Netherite block(blockid=[1005], top_image="assets/minecraft/textures/block/netherite_block.png") # soul soil block(blockid=1020, top_image="assets/minecraft/textures/block/soul_soil.png") # nether gold ore block(blockid=1021, top_image="assets/minecraft/textures/block/nether_gold_ore.png") # Solid Nether stone blocks block(blockid=1022, top_image="assets/minecraft/textures/block/polished_blackstone.png") block(blockid=1023, top_image="assets/minecraft/textures/block/chiseled_polished_blackstone.png") block(blockid=1024, top_image="assets/minecraft/textures/block/gilded_blackstone.png") block(blockid=1025, top_image="assets/minecraft/textures/block/cracked_polished_blackstone_bricks.png") block(blockid=1026, top_image="assets/minecraft/textures/block/polished_blackstone_bricks.png") block(blockid=1035, top_image="assets/minecraft/textures/block/crying_obsidian.png") block(blockid=1036, top_image="assets/minecraft/textures/block/lodestone_top.png", side_image="assets/minecraft/textures/block/lodestone_side.png") block(blockid=1041, top_image="assets/minecraft/textures/block/quartz_bricks.png") block(blockid=1042, top_image="assets/minecraft/textures/block/amethyst_block.png") block(blockid=1043, top_image="assets/minecraft/textures/block/raw_iron_block.png") block(blockid=1044, top_image="assets/minecraft/textures/block/raw_gold_block.png") block(blockid=1045, top_image="assets/minecraft/textures/block/budding_amethyst.png") # You have entered the COPPER ZONE block(blockid=[1046, 1050], top_image="assets/minecraft/textures/block/copper_block.png") block(blockid=[1047, 1051], top_image="assets/minecraft/textures/block/exposed_copper.png") block(blockid=[1048, 1052], top_image="assets/minecraft/textures/block/weathered_copper.png") block(blockid=[1049, 1053], top_image="assets/minecraft/textures/block/oxidized_copper.png") # Cut variant block(blockid=[1054, 1058], top_image="assets/minecraft/textures/block/cut_copper.png") block(blockid=[1055, 1059], top_image="assets/minecraft/textures/block/exposed_cut_copper.png") block(blockid=[1056, 1060], top_image="assets/minecraft/textures/block/weathered_cut_copper.png") block(blockid=[1057, 1061], top_image="assets/minecraft/textures/block/oxidized_cut_copper.png") block(blockid=1062, top_image="assets/minecraft/textures/block/raw_copper_block.png") block(blockid=1063, top_image="assets/minecraft/textures/block/copper_ore.png") # You are now leaving the COPPER ZONE
enaut/Minecraft-Overviewer
overviewer_core/textures.py
Python
gpl-3.0
265,312
[ "BLAST", "CRYSTAL" ]
7d5effb9846602f32a849855b871097d166bda3fd9a0469c02e069619727e732
from sqlalchemy import extract from sqlalchemy import select from sqlalchemy import sql from sqlalchemy.dialects import sybase from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import fixtures class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = sybase.dialect() def test_extract(self): t = sql.table("t", sql.column("col1")) mapping = { "day": "day", "doy": "dayofyear", "dow": "weekday", "milliseconds": "millisecond", "millisecond": "millisecond", "year": "year", } for field, subst in list(mapping.items()): self.assert_compile( select([extract(field, t.c.col1)]), 'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % subst, ) def test_offset_not_supported(self): stmt = select([1]).offset(10) assert_raises_message( NotImplementedError, "Sybase ASE does not support OFFSET", stmt.compile, dialect=self.__dialect__, ) def test_delete_extra_froms(self): t1 = sql.table("t1", sql.column("c1")) t2 = sql.table("t2", sql.column("c1")) q = sql.delete(t1).where(t1.c.c1 == t2.c.c1) self.assert_compile( q, "DELETE FROM t1 FROM t1, t2 WHERE t1.c1 = t2.c1" ) def test_delete_extra_froms_alias(self): a1 = sql.table("t1", sql.column("c1")).alias("a1") t2 = sql.table("t2", sql.column("c1")) q = sql.delete(a1).where(a1.c.c1 == t2.c.c1) self.assert_compile( q, "DELETE FROM a1 FROM t1 AS a1, t2 WHERE a1.c1 = t2.c1" ) self.assert_compile(sql.delete(a1), "DELETE FROM t1 AS a1")
graingert/sqlalchemy
test/dialect/test_sybase.py
Python
mit
1,838
[ "ASE" ]
0025d8987321a038a889186e9019c1c79b46a99703695a492086bcfe8d822ed3
# Copyright Iris contributors # # This file is part of Iris and is released under the LGPL license. # See COPYING and COPYING.LESSER in the root of the repository for full # licensing details. """Integration tests for loading and saving netcdf files.""" # Import iris.tests first so that some things can be initialised before # importing anything else. import iris.tests as tests from os.path import join as path_join, dirname, sep as os_sep import shutil from subprocess import check_call import tempfile import iris from iris.tests import stock class TestClimatology(iris.tests.IrisTest): reference_cdl_path = os_sep.join( [ dirname(tests.__file__), ( "results/integration/climatology/TestClimatology/" "reference_simpledata.cdl" ), ] ) @classmethod def _simple_cdl_string(cls): with open(cls.reference_cdl_path, "r") as f: cdl_content = f.read() # Add the expected CDL first line since this is removed from the # stored results file. cdl_content = "netcdf {\n" + cdl_content return cdl_content @staticmethod def _load_sanitised_cube(filepath): cube = iris.load_cube(filepath) # Remove attributes convention, if any. cube.attributes.pop("Conventions", None) # Remove any var-names. for coord in cube.coords(): coord.var_name = None cube.var_name = None return cube @classmethod def setUpClass(cls): # Create a temp directory for temp files. cls.temp_dir = tempfile.mkdtemp() cls.path_ref_cdl = path_join(cls.temp_dir, "standard.cdl") cls.path_ref_nc = path_join(cls.temp_dir, "standard.nc") # Create reference CDL file. with open(cls.path_ref_cdl, "w") as f_out: f_out.write(cls._simple_cdl_string()) # Create reference netCDF file from reference CDL. command = "ncgen -o {} {}".format(cls.path_ref_nc, cls.path_ref_cdl) check_call(command, shell=True) cls.path_temp_nc = path_join(cls.temp_dir, "tmp.nc") # Create reference cube. cls.cube_ref = stock.climatology_3d() @classmethod def tearDownClass(cls): # Destroy a temp directory for temp files. shutil.rmtree(cls.temp_dir) ############################################################################### # Round-trip tests def test_cube_to_cube(self): # Save reference cube to file, load cube from same file, test against # reference cube. iris.save(self.cube_ref, self.path_temp_nc) cube = self._load_sanitised_cube(self.path_temp_nc) self.assertEqual(cube, self.cube_ref) def test_file_to_file(self): # Load cube from reference file, save same cube to file, test against # reference CDL. cube = iris.load_cube(self.path_ref_nc) iris.save(cube, self.path_temp_nc) self.assertCDL( self.path_temp_nc, reference_filename=self.reference_cdl_path, flags="", ) # NOTE: # The saving half of the round-trip tests is tested in the # appropriate dedicated test class: # unit.fileformats.netcdf.test_Saver.Test_write.test_with_climatology . # The loading half has no equivalent dedicated location, so is tested # here as test_load_from_file. def test_load_from_file(self): # Create cube from file, test against reference cube. cube = self._load_sanitised_cube(self.path_ref_nc) self.assertEqual(cube, self.cube_ref) if __name__ == "__main__": tests.main()
pp-mo/iris
lib/iris/tests/integration/test_climatology.py
Python
lgpl-3.0
3,683
[ "NetCDF" ]
e39f445cbbad07559e68edf1720b47686506531dcd99fe402f39838a27103d4e
from __future__ import unicode_literals import base64 import datetime import hashlib import json import netrc import os import re import socket import sys import time from ..compat import ( compat_cookiejar, compat_cookies, compat_getpass, compat_http_client, compat_urllib_error, compat_urllib_parse, compat_urlparse, compat_str, compat_etree_fromstring, ) from ..utils import ( NO_DEFAULT, age_restricted, bug_reports_message, clean_html, compiled_regex_type, determine_ext, error_to_compat_str, ExtractorError, fix_xml_ampersands, float_or_none, int_or_none, parse_iso8601, RegexNotFoundError, sanitize_filename, sanitized_Request, unescapeHTML, unified_strdate, url_basename, xpath_text, xpath_with_ns, determine_protocol, ) class InfoExtractor(object): """Information Extractor class. Information extractors are the classes that, given a URL, extract information about the video (or videos) the URL refers to. This information includes the real video URL, the video title, author and others. The information is stored in a dictionary which is then passed to the YoutubeDL. The YoutubeDL processes this information possibly downloading the video to the file system, among other possible outcomes. The type field determines the type of the result. By far the most common value (and the default if _type is missing) is "video", which indicates a single video. For a video, the dictionaries must include the following fields: id: Video identifier. title: Video title, unescaped. Additionally, it must contain either a formats entry or a url one: formats: A list of dictionaries for each format available, ordered from worst to best quality. Potential fields: * url Mandatory. The URL of the video file * ext Will be calculated from URL if missing * format A human-readable description of the format ("mp4 container with h264/opus"). Calculated from the format_id, width, height. and format_note fields if missing. * format_id A short description of the format ("mp4_h264_opus" or "19"). Technically optional, but strongly recommended. * format_note Additional info about the format ("3D" or "DASH video") * width Width of the video, if known * height Height of the video, if known * resolution Textual description of width and height * tbr Average bitrate of audio and video in KBit/s * abr Average audio bitrate in KBit/s * acodec Name of the audio codec in use * asr Audio sampling rate in Hertz * vbr Average video bitrate in KBit/s * fps Frame rate * vcodec Name of the video codec in use * container Name of the container format * filesize The number of bytes, if known in advance * filesize_approx An estimate for the number of bytes * player_url SWF Player URL (used for rtmpdump). * protocol The protocol that will be used for the actual download, lower-case. "http", "https", "rtsp", "rtmp", "rtmpe", "m3u8", or "m3u8_native". * preference Order number of this format. If this field is present and not None, the formats get sorted by this field, regardless of all other values. -1 for default (order by other properties), -2 or smaller for less than default. < -1000 to hide the format (if there is another one which is strictly better) * language Language code, e.g. "de" or "en-US". * language_preference Is this in the language mentioned in the URL? 10 if it's what the URL is about, -1 for default (don't know), -10 otherwise, other values reserved for now. * quality Order number of the video quality of this format, irrespective of the file format. -1 for default (order by other properties), -2 or smaller for less than default. * source_preference Order number for this video source (quality takes higher priority) -1 for default (order by other properties), -2 or smaller for less than default. * http_headers A dictionary of additional HTTP headers to add to the request. * stretched_ratio If given and not 1, indicates that the video's pixels are not square. width : height ratio as float. * no_resume The server does not support resuming the (HTTP or RTMP) download. Boolean. url: Final video URL. ext: Video filename extension. format: The video format, defaults to ext (used for --get-format) player_url: SWF Player URL (used for rtmpdump). The following fields are optional: alt_title: A secondary title of the video. display_id An alternative identifier for the video, not necessarily unique, but available before title. Typically, id is something like "4234987", title "Dancing naked mole rats", and display_id "dancing-naked-mole-rats" thumbnails: A list of dictionaries, with the following entries: * "id" (optional, string) - Thumbnail format ID * "url" * "preference" (optional, int) - quality of the image * "width" (optional, int) * "height" (optional, int) * "resolution" (optional, string "{width}x{height"}, deprecated) thumbnail: Full URL to a video thumbnail image. description: Full video description. uploader: Full name of the video uploader. creator: The main artist who created the video. release_date: The date (YYYYMMDD) when the video was released. timestamp: UNIX timestamp of the moment the video became available. upload_date: Video upload date (YYYYMMDD). If not explicitly set, calculated from timestamp. uploader_id: Nickname or id of the video uploader. location: Physical location where the video was filmed. subtitles: The available subtitles as a dictionary in the format {language: subformats}. "subformats" is a list sorted from lower to higher preference, each element is a dictionary with the "ext" entry and one of: * "data": The subtitles file contents * "url": A URL pointing to the subtitles file "ext" will be calculated from URL if missing automatic_captions: Like 'subtitles', used by the YoutubeIE for automatically generated captions duration: Length of the video in seconds, as an integer or float. view_count: How many users have watched the video on the platform. like_count: Number of positive ratings of the video dislike_count: Number of negative ratings of the video repost_count: Number of reposts of the video average_rating: Average rating give by users, the scale used depends on the webpage comment_count: Number of comments on the video comments: A list of comments, each with one or more of the following properties (all but one of text or html optional): * "author" - human-readable name of the comment author * "author_id" - user ID of the comment author * "id" - Comment ID * "html" - Comment as HTML * "text" - Plain text of the comment * "timestamp" - UNIX timestamp of comment * "parent" - ID of the comment this one is replying to. Set to "root" to indicate that this is a comment to the original video. age_limit: Age restriction for the video, as an integer (years) webpage_url: The URL to the video webpage, if given to youtube-dl it should allow to get the same result again. (It will be set by YoutubeDL if it's missing) categories: A list of categories that the video falls in, for example ["Sports", "Berlin"] tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"] is_live: True, False, or None (=unknown). Whether this video is a live stream that goes on instead of a fixed-length video. start_time: Time in seconds where the reproduction should start, as specified in the URL. end_time: Time in seconds where the reproduction should end, as specified in the URL. The following fields should only be used when the video belongs to some logical chapter or section: chapter: Name or title of the chapter the video belongs to. chapter_number: Number of the chapter the video belongs to, as an integer. chapter_id: Id of the chapter the video belongs to, as a unicode string. The following fields should only be used when the video is an episode of some series or programme: series: Title of the series or programme the video episode belongs to. season: Title of the season the video episode belongs to. season_number: Number of the season the video episode belongs to, as an integer. season_id: Id of the season the video episode belongs to, as a unicode string. episode: Title of the video episode. Unlike mandatory video title field, this field should denote the exact title of the video episode without any kind of decoration. episode_number: Number of the video episode within a season, as an integer. episode_id: Id of the video episode, as a unicode string. Unless mentioned otherwise, the fields should be Unicode strings. Unless mentioned otherwise, None is equivalent to absence of information. _type "playlist" indicates multiple videos. There must be a key "entries", which is a list, an iterable, or a PagedList object, each element of which is a valid dictionary by this specification. Additionally, playlists can have "title", "description" and "id" attributes with the same semantics as videos (see above). _type "multi_video" indicates that there are multiple videos that form a single show, for examples multiple acts of an opera or TV episode. It must have an entries key like a playlist and contain all the keys required for a video at the same time. _type "url" indicates that the video must be extracted from another location, possibly by a different extractor. Its only required key is: "url" - the next URL to extract. The key "ie_key" can be set to the class name (minus the trailing "IE", e.g. "Youtube") if the extractor class is known in advance. Additionally, the dictionary may have any properties of the resolved entity known in advance, for example "title" if the title of the referred video is known ahead of time. _type "url_transparent" entities have the same specification as "url", but indicate that the given additional information is more precise than the one associated with the resolved URL. This is useful when a site employs a video service that hosts the video and its technical metadata, but that video service does not embed a useful title, description etc. Subclasses of this one should re-define the _real_initialize() and _real_extract() methods and define a _VALID_URL regexp. Probably, they should also be added to the list of extractors. Finally, the _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. """ _ready = False _downloader = None _WORKING = True def __init__(self, downloader=None): """Constructor. Receives an optional downloader.""" self._ready = False self.set_downloader(downloader) @classmethod def suitable(cls, url): """Receives a URL and returns True if suitable for this IE.""" # This does not use has/getattr intentionally - we want to know whether # we have cached the regexp for *this* class, whereas getattr would also # match the superclass if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) return cls._VALID_URL_RE.match(url) is not None @classmethod def _match_id(cls, url): if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) m = cls._VALID_URL_RE.match(url) assert m return m.group('id') @classmethod def working(cls): """Getter method for _WORKING.""" return cls._WORKING def initialize(self): """Initializes an instance (authentication, etc).""" if not self._ready: self._real_initialize() self._ready = True def extract(self, url): """Extracts URL information and returns it in list of dicts.""" try: self.initialize() return self._real_extract(url) except ExtractorError: raise except compat_http_client.IncompleteRead as e: raise ExtractorError('A network error has occurred.', cause=e, expected=True) except (KeyError, StopIteration) as e: raise ExtractorError('An extractor error has occurred.', cause=e) def set_downloader(self, downloader): """Sets the downloader for this IE.""" self._downloader = downloader def _real_initialize(self): """Real initialization process. Redefine in subclasses.""" pass def _real_extract(self, url): """Real extraction process. Redefine in subclasses.""" pass @classmethod def ie_key(cls): """A string for getting the InfoExtractor with get_info_extractor""" return compat_str(cls.__name__[:-2]) @property def IE_NAME(self): return compat_str(type(self).__name__[:-2]) def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True): """ Returns the response handle """ if note is None: self.report_download_webpage(video_id) elif note is not False: if video_id is None: self.to_screen('%s' % (note,)) else: self.to_screen('%s: %s' % (video_id, note)) try: return self._downloader.urlopen(url_or_request) except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: if errnote is False: return False if errnote is None: errnote = 'Unable to download webpage' errmsg = '%s: %s' % (errnote, error_to_compat_str(err)) if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: self._downloader.report_warning(errmsg) return False def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None): """ Returns a tuple (page content as string, URL handle) """ # Strip hashes from the URL (#1038) if isinstance(url_or_request, (compat_str, str)): url_or_request = url_or_request.partition('#')[0] urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal) if urlh is False: assert not fatal return False content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding) return (content, urlh) @staticmethod def _guess_encoding_from_content(content_type, webpage_bytes): m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type) if m: encoding = m.group(1) else: m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]', webpage_bytes[:1024]) if m: encoding = m.group(1).decode('ascii') elif webpage_bytes.startswith(b'\xff\xfe'): encoding = 'utf-16' else: encoding = 'utf-8' return encoding def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None): content_type = urlh.headers.get('Content-Type', '') webpage_bytes = urlh.read() if prefix is not None: webpage_bytes = prefix + webpage_bytes if not encoding: encoding = self._guess_encoding_from_content(content_type, webpage_bytes) if self._downloader.params.get('dump_intermediate_pages', False): try: url = url_or_request.get_full_url() except AttributeError: url = url_or_request self.to_screen('Dumping request to ' + url) dump = base64.b64encode(webpage_bytes).decode('ascii') self._downloader.to_screen(dump) if self._downloader.params.get('write_pages', False): try: url = url_or_request.get_full_url() except AttributeError: url = url_or_request basen = '%s_%s' % (video_id, url) if len(basen) > 240: h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest() basen = basen[:240 - len(h)] + h raw_filename = basen + '.dump' filename = sanitize_filename(raw_filename, restricted=True) self.to_screen('Saving request to ' + filename) # Working around MAX_PATH limitation on Windows (see # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) if os.name == 'nt': absfilepath = os.path.abspath(filename) if len(absfilepath) > 259: filename = '\\\\?\\' + absfilepath with open(filename, 'wb') as outf: outf.write(webpage_bytes) try: content = webpage_bytes.decode(encoding, 'replace') except LookupError: content = webpage_bytes.decode('utf-8', 'replace') if ('<title>Access to this site is blocked</title>' in content and 'Websense' in content[:512]): msg = 'Access to this webpage has been blocked by Websense filtering software in your network.' blocked_iframe = self._html_search_regex( r'<iframe src="([^"]+)"', content, 'Websense information URL', default=None) if blocked_iframe: msg += ' Visit %s for more details' % blocked_iframe raise ExtractorError(msg, expected=True) if '<title>The URL you requested has been blocked</title>' in content[:512]: msg = ( 'Access to this webpage has been blocked by Indian censorship. ' 'Use a VPN or proxy server (with --proxy) to route around it.') block_msg = self._html_search_regex( r'</h1><p>(.*?)</p>', content, 'block message', default=None) if block_msg: msg += ' (Message: "%s")' % block_msg.replace('\n', ' ') raise ExtractorError(msg, expected=True) return content def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None): """ Returns the data of the page as a string """ success = False try_count = 0 while success is False: try: res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding) success = True except compat_http_client.IncompleteRead as e: try_count += 1 if try_count >= tries: raise e self._sleep(timeout, video_id) if res is False: return res else: content, _ = res return content def _download_xml(self, url_or_request, video_id, note='Downloading XML', errnote='Unable to download XML', transform_source=None, fatal=True, encoding=None): """Return the xml as an xml.etree.ElementTree.Element""" xml_string = self._download_webpage( url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding) if xml_string is False: return xml_string if transform_source: xml_string = transform_source(xml_string) return compat_etree_fromstring(xml_string.encode('utf-8')) def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', errnote='Unable to download JSON metadata', transform_source=None, fatal=True, encoding=None): json_string = self._download_webpage( url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding) if (not fatal) and json_string is False: return None return self._parse_json( json_string, video_id, transform_source=transform_source, fatal=fatal) def _parse_json(self, json_string, video_id, transform_source=None, fatal=True): if transform_source: json_string = transform_source(json_string) try: return json.loads(json_string) except ValueError as ve: errmsg = '%s: Failed to parse JSON ' % video_id if fatal: raise ExtractorError(errmsg, cause=ve) else: self.report_warning(errmsg + str(ve)) def report_warning(self, msg, video_id=None): idstr = '' if video_id is None else '%s: ' % video_id self._downloader.report_warning( '[%s] %s%s' % (self.IE_NAME, idstr, msg)) def to_screen(self, msg): """Print msg to screen, prefixing it with '[ie_name]'""" self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg)) def report_extraction(self, id_or_name): """Report information extraction.""" self.to_screen('%s: Extracting information' % id_or_name) def report_download_webpage(self, video_id): """Report webpage download.""" self.to_screen('%s: Downloading webpage' % video_id) def report_age_confirmation(self): """Report attempt to confirm age.""" self.to_screen('Confirming age') def report_login(self): """Report attempt to log in.""" self.to_screen('Logging in') @staticmethod def raise_login_required(msg='This video is only available for registered users'): raise ExtractorError( '%s. Use --username and --password or --netrc to provide account credentials.' % msg, expected=True) @staticmethod def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'): raise ExtractorError( '%s. You might want to use --proxy to workaround.' % msg, expected=True) # Methods for following #608 @staticmethod def url_result(url, ie=None, video_id=None, video_title=None): """Returns a URL that points to a page that should be processed""" # TODO: ie should be the class used for getting the info video_info = {'_type': 'url', 'url': url, 'ie_key': ie} if video_id is not None: video_info['id'] = video_id if video_title is not None: video_info['title'] = video_title return video_info @staticmethod def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None): """Returns a playlist""" video_info = {'_type': 'playlist', 'entries': entries} if playlist_id: video_info['id'] = playlist_id if playlist_title: video_info['title'] = playlist_title if playlist_description: video_info['description'] = playlist_description return video_info def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): """ Perform a regex search on the given string, using a single or a list of patterns returning the first matching group. In case of failure return a default value or raise a WARNING or a RegexNotFoundError, depending on fatal, specifying the field name. """ if isinstance(pattern, (str, compat_str, compiled_regex_type)): mobj = re.search(pattern, string, flags) else: for p in pattern: mobj = re.search(p, string, flags) if mobj: break if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty(): _name = '\033[0;34m%s\033[0m' % name else: _name = name if mobj: if group is None: # return the first matching group return next(g for g in mobj.groups() if g is not None) else: return mobj.group(group) elif default is not NO_DEFAULT: return default elif fatal: raise RegexNotFoundError('Unable to extract %s' % _name) else: self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message()) return None def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): """ Like _search_regex, but strips HTML tags and unescapes entities. """ res = self._search_regex(pattern, string, name, default, fatal, flags, group) if res: return clean_html(res).strip() else: return res def _get_login_info(self): """ Get the login info as (username, password) It will look in the netrc file using the _NETRC_MACHINE value If there's no info available, return (None, None) """ if self._downloader is None: return (None, None) username = None password = None downloader_params = self._downloader.params # Attempt to use provided username and password or .netrc data if downloader_params.get('username', None) is not None: username = downloader_params['username'] password = downloader_params['password'] elif downloader_params.get('usenetrc', False): try: info = netrc.netrc().authenticators(self._NETRC_MACHINE) if info is not None: username = info[0] password = info[2] else: raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) except (IOError, netrc.NetrcParseError) as err: self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err)) return (username, password) def _get_tfa_info(self, note='two-factor verification code'): """ Get the two-factor authentication info TODO - asking the user will be required for sms/phone verify currently just uses the command line option If there's no info available, return None """ if self._downloader is None: return None downloader_params = self._downloader.params if downloader_params.get('twofactor', None) is not None: return downloader_params['twofactor'] return compat_getpass('Type %s and press [Return]: ' % note) # Helper functions for extracting OpenGraph info @staticmethod def _og_regexes(prop): content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))' property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)' % {'prop': re.escape(prop)}) template = r'<meta[^>]+?%s[^>]+?%s' return [ template % (property_re, content_re), template % (content_re, property_re), ] @staticmethod def _meta_regex(prop): return r'''(?isx)<meta (?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1) [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop) def _og_search_property(self, prop, html, name=None, **kargs): if name is None: name = 'OpenGraph %s' % prop escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs) if escaped is None: return None return unescapeHTML(escaped) def _og_search_thumbnail(self, html, **kargs): return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs) def _og_search_description(self, html, **kargs): return self._og_search_property('description', html, fatal=False, **kargs) def _og_search_title(self, html, **kargs): return self._og_search_property('title', html, **kargs) def _og_search_video_url(self, html, name='video url', secure=True, **kargs): regexes = self._og_regexes('video') + self._og_regexes('video:url') if secure: regexes = self._og_regexes('video:secure_url') + regexes return self._html_search_regex(regexes, html, name, **kargs) def _og_search_url(self, html, **kargs): return self._og_search_property('url', html, **kargs) def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs): if display_name is None: display_name = name return self._html_search_regex( self._meta_regex(name), html, display_name, fatal=fatal, group='content', **kwargs) def _dc_search_uploader(self, html): return self._html_search_meta('dc.creator', html, 'uploader') def _rta_search(self, html): # See http://www.rtalabel.org/index.php?content=howtofaq#single if re.search(r'(?ix)<meta\s+name="rating"\s+' r' content="RTA-5042-1996-1400-1577-RTA"', html): return 18 return 0 def _media_rating_search(self, html): # See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/ rating = self._html_search_meta('rating', html) if not rating: return None RATING_TABLE = { 'safe for kids': 0, 'general': 8, '14 years': 14, 'mature': 17, 'restricted': 19, } return RATING_TABLE.get(rating.lower(), None) def _family_friendly_search(self, html): # See http://schema.org/VideoObject family_friendly = self._html_search_meta('isFamilyFriendly', html) if not family_friendly: return None RATING_TABLE = { '1': 0, 'true': 0, '0': 18, 'false': 18, } return RATING_TABLE.get(family_friendly.lower(), None) def _twitter_search_player(self, html): return self._html_search_meta('twitter:player', html, 'twitter card player') def _search_json_ld(self, html, video_id, **kwargs): json_ld = self._search_regex( r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>', html, 'JSON-LD', group='json_ld', **kwargs) if not json_ld: return {} return self._json_ld(json_ld, video_id, fatal=kwargs.get('fatal', True)) def _json_ld(self, json_ld, video_id, fatal=True): if isinstance(json_ld, compat_str): json_ld = self._parse_json(json_ld, video_id, fatal=fatal) if not json_ld: return {} info = {} if json_ld.get('@context') == 'http://schema.org': item_type = json_ld.get('@type') if item_type == 'TVEpisode': info.update({ 'episode': unescapeHTML(json_ld.get('name')), 'episode_number': int_or_none(json_ld.get('episodeNumber')), 'description': unescapeHTML(json_ld.get('description')), }) part_of_season = json_ld.get('partOfSeason') if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason': info['season_number'] = int_or_none(part_of_season.get('seasonNumber')) part_of_series = json_ld.get('partOfSeries') if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries': info['series'] = unescapeHTML(part_of_series.get('name')) elif item_type == 'Article': info.update({ 'timestamp': parse_iso8601(json_ld.get('datePublished')), 'title': unescapeHTML(json_ld.get('headline')), 'description': unescapeHTML(json_ld.get('articleBody')), }) return dict((k, v) for k, v in info.items() if v is not None) @staticmethod def _hidden_inputs(html): html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html) hidden_inputs = {} for input in re.findall(r'(?i)<input([^>]+)>', html): if not re.search(r'type=(["\'])(?:hidden|submit)\1', input): continue name = re.search(r'name=(["\'])(?P<value>.+?)\1', input) if not name: continue value = re.search(r'value=(["\'])(?P<value>.*?)\1', input) if not value: continue hidden_inputs[name.group('value')] = value.group('value') return hidden_inputs def _form_hidden_inputs(self, form_id, html): form = self._search_regex( r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id, html, '%s form' % form_id, group='form') return self._hidden_inputs(form) def _sort_formats(self, formats, field_preference=None): if not formats: raise ExtractorError('No video formats found') for f in formats: # Automatically determine tbr when missing based on abr and vbr (improves # formats sorting in some cases) if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None: f['tbr'] = f['abr'] + f['vbr'] def _formats_key(f): # TODO remove the following workaround from ..utils import determine_ext if not f.get('ext') and 'url' in f: f['ext'] = determine_ext(f['url']) if isinstance(field_preference, (list, tuple)): return tuple(f.get(field) if f.get(field) is not None else -1 for field in field_preference) preference = f.get('preference') if preference is None: preference = 0 if f.get('ext') in ['f4f', 'f4m']: # Not yet supported preference -= 0.5 proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1 if f.get('vcodec') == 'none': # audio only if self._downloader.params.get('prefer_free_formats'): ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus'] else: ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a'] ext_preference = 0 try: audio_ext_preference = ORDER.index(f['ext']) except ValueError: audio_ext_preference = -1 else: if self._downloader.params.get('prefer_free_formats'): ORDER = ['flv', 'mp4', 'webm'] else: ORDER = ['webm', 'flv', 'mp4'] try: ext_preference = ORDER.index(f['ext']) except ValueError: ext_preference = -1 audio_ext_preference = 0 return ( preference, f.get('language_preference') if f.get('language_preference') is not None else -1, f.get('quality') if f.get('quality') is not None else -1, f.get('tbr') if f.get('tbr') is not None else -1, f.get('filesize') if f.get('filesize') is not None else -1, f.get('vbr') if f.get('vbr') is not None else -1, f.get('height') if f.get('height') is not None else -1, f.get('width') if f.get('width') is not None else -1, proto_preference, ext_preference, f.get('abr') if f.get('abr') is not None else -1, audio_ext_preference, f.get('fps') if f.get('fps') is not None else -1, f.get('filesize_approx') if f.get('filesize_approx') is not None else -1, f.get('source_preference') if f.get('source_preference') is not None else -1, f.get('format_id') if f.get('format_id') is not None else '', ) formats.sort(key=_formats_key) def _check_formats(self, formats, video_id): if formats: formats[:] = filter( lambda f: self._is_valid_url( f['url'], video_id, item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'), formats) def _is_valid_url(self, url, video_id, item='video'): url = self._proto_relative_url(url, scheme='http:') # For now assume non HTTP(S) URLs always valid if not (url.startswith('http://') or url.startswith('https://')): return True try: self._request_webpage(url, video_id, 'Checking %s URL' % item) return True except ExtractorError as e: if isinstance(e.cause, compat_urllib_error.URLError): self.to_screen( '%s: %s URL is invalid, skipping' % (video_id, item)) return False raise def http_scheme(self): """ Either "http:" or "https:", depending on the user's preferences """ return ( 'http:' if self._downloader.params.get('prefer_insecure', False) else 'https:') def _proto_relative_url(self, url, scheme=None): if url is None: return url if url.startswith('//'): if scheme is None: scheme = self.http_scheme() return scheme + url else: return url def _sleep(self, timeout, video_id, msg_template=None): if msg_template is None: msg_template = '%(video_id)s: Waiting for %(timeout)s seconds' msg = msg_template % {'video_id': video_id, 'timeout': timeout} self.to_screen(msg) time.sleep(timeout) def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None, transform_source=lambda s: fix_xml_ampersands(s).strip(), fatal=True): manifest = self._download_xml( manifest_url, video_id, 'Downloading f4m manifest', 'Unable to download f4m manifest', # Some manifests may be malformed, e.g. prosiebensat1 generated manifests # (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244) transform_source=transform_source, fatal=fatal) if manifest is False: return [] formats = [] manifest_version = '1.0' media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media') if not media_nodes: manifest_version = '2.0' media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media') base_url = xpath_text( manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'], 'base URL', default=None) if base_url: base_url = base_url.strip() for i, media_el in enumerate(media_nodes): if manifest_version == '2.0': media_url = media_el.attrib.get('href') or media_el.attrib.get('url') if not media_url: continue manifest_url = ( media_url if media_url.startswith('http://') or media_url.startswith('https://') else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url)) # If media_url is itself a f4m manifest do the recursive extraction # since bitrates in parent manifest (this one) and media_url manifest # may differ leading to inability to resolve the format by requested # bitrate in f4m downloader if determine_ext(manifest_url) == 'f4m': formats.extend(self._extract_f4m_formats( manifest_url, video_id, preference, f4m_id, fatal=fatal)) continue tbr = int_or_none(media_el.attrib.get('bitrate')) formats.append({ 'format_id': '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)])), 'url': manifest_url, 'ext': 'flv', 'tbr': tbr, 'width': int_or_none(media_el.attrib.get('width')), 'height': int_or_none(media_el.attrib.get('height')), 'preference': preference, }) self._sort_formats(formats) return formats def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None, entry_protocol='m3u8', preference=None, m3u8_id=None, note=None, errnote=None, fatal=True): formats = [{ 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])), 'url': m3u8_url, 'ext': ext, 'protocol': 'm3u8', 'preference': preference - 1 if preference else -1, 'resolution': 'multiple', 'format_note': 'Quality selection URL', }] format_url = lambda u: ( u if re.match(r'^https?://', u) else compat_urlparse.urljoin(m3u8_url, u)) res = self._download_webpage_handle( m3u8_url, video_id, note=note or 'Downloading m3u8 information', errnote=errnote or 'Failed to download m3u8 information', fatal=fatal) if res is False: return [] m3u8_doc, urlh = res m3u8_url = urlh.geturl() # A Media Playlist Tag MUST NOT appear in a Master Playlist # https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3 # The EXT-X-TARGETDURATION tag is REQUIRED for every M3U8 Media Playlists # https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1 if '#EXT-X-TARGETDURATION' in m3u8_doc: return [{ 'url': m3u8_url, 'format_id': m3u8_id, 'ext': ext, 'protocol': entry_protocol, 'preference': preference, }] last_info = None last_media = None kv_rex = re.compile( r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)') for line in m3u8_doc.splitlines(): if line.startswith('#EXT-X-STREAM-INF:'): last_info = {} for m in kv_rex.finditer(line): v = m.group('val') if v.startswith('"'): v = v[1:-1] last_info[m.group('key')] = v elif line.startswith('#EXT-X-MEDIA:'): last_media = {} for m in kv_rex.finditer(line): v = m.group('val') if v.startswith('"'): v = v[1:-1] last_media[m.group('key')] = v elif line.startswith('#') or not line.strip(): continue else: if last_info is None: formats.append({'url': format_url(line)}) continue tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000) format_id = [] if m3u8_id: format_id.append(m3u8_id) last_media_name = last_media.get('NAME') if last_media and last_media.get('TYPE') != 'SUBTITLES' else None format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats))) f = { 'format_id': '-'.join(format_id), 'url': format_url(line.strip()), 'tbr': tbr, 'ext': ext, 'protocol': entry_protocol, 'preference': preference, } codecs = last_info.get('CODECS') if codecs: # TODO: looks like video codec is not always necessarily goes first va_codecs = codecs.split(',') if va_codecs[0]: f['vcodec'] = va_codecs[0] if len(va_codecs) > 1 and va_codecs[1]: f['acodec'] = va_codecs[1] resolution = last_info.get('RESOLUTION') if resolution: width_str, height_str = resolution.split('x') f['width'] = int(width_str) f['height'] = int(height_str) if last_media is not None: f['m3u8_media'] = last_media last_media = None formats.append(f) last_info = {} self._sort_formats(formats) return formats @staticmethod def _xpath_ns(path, namespace=None): if not namespace: return path out = [] for c in path.split('/'): if not c or c == '.': out.append(c) else: out.append('{%s}%s' % (namespace, c)) return '/'.join(out) def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None): smil = self._download_smil(smil_url, video_id, fatal=fatal) if smil is False: assert not fatal return [] namespace = self._parse_smil_namespace(smil) return self._parse_smil_formats( smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None): smil = self._download_smil(smil_url, video_id, fatal=fatal) if smil is False: return {} return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params) def _download_smil(self, smil_url, video_id, fatal=True): return self._download_xml( smil_url, video_id, 'Downloading SMIL file', 'Unable to download SMIL file', fatal=fatal) def _parse_smil(self, smil, smil_url, video_id, f4m_params=None): namespace = self._parse_smil_namespace(smil) formats = self._parse_smil_formats( smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) subtitles = self._parse_smil_subtitles(smil, namespace=namespace) video_id = os.path.splitext(url_basename(smil_url))[0] title = None description = None upload_date = None for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): name = meta.attrib.get('name') content = meta.attrib.get('content') if not name or not content: continue if not title and name == 'title': title = content elif not description and name in ('description', 'abstract'): description = content elif not upload_date and name == 'date': upload_date = unified_strdate(content) thumbnails = [{ 'id': image.get('type'), 'url': image.get('src'), 'width': int_or_none(image.get('width')), 'height': int_or_none(image.get('height')), } for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')] return { 'id': video_id, 'title': title or video_id, 'description': description, 'upload_date': upload_date, 'thumbnails': thumbnails, 'formats': formats, 'subtitles': subtitles, } def _parse_smil_namespace(self, smil): return self._search_regex( r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None) def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None): base = smil_url for meta in smil.findall(self._xpath_ns('./head/meta', namespace)): b = meta.get('base') or meta.get('httpBase') if b: base = b break formats = [] rtmp_count = 0 http_count = 0 m3u8_count = 0 videos = smil.findall(self._xpath_ns('.//video', namespace)) for video in videos: src = video.get('src') if not src: continue bitrate = float_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000) filesize = int_or_none(video.get('size') or video.get('fileSize')) width = int_or_none(video.get('width')) height = int_or_none(video.get('height')) proto = video.get('proto') ext = video.get('ext') src_ext = determine_ext(src) streamer = video.get('streamer') or base if proto == 'rtmp' or streamer.startswith('rtmp'): rtmp_count += 1 formats.append({ 'url': streamer, 'play_path': src, 'ext': 'flv', 'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate), 'tbr': bitrate, 'filesize': filesize, 'width': width, 'height': height, }) if transform_rtmp_url: streamer, src = transform_rtmp_url(streamer, src) formats[-1].update({ 'url': streamer, 'play_path': src, }) continue src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src) if proto == 'm3u8' or src_ext == 'm3u8': m3u8_formats = self._extract_m3u8_formats( src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False) if len(m3u8_formats) == 1: m3u8_count += 1 m3u8_formats[0].update({ 'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate), 'tbr': bitrate, 'width': width, 'height': height, }) formats.extend(m3u8_formats) continue if src_ext == 'f4m': f4m_url = src_url if not f4m_params: f4m_params = { 'hdcore': '3.2.0', 'plugin': 'flowplayer-3.2.0.1', } f4m_url += '&' if '?' in f4m_url else '?' f4m_url += compat_urllib_parse.urlencode(f4m_params) formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)) continue if src_url.startswith('http') and self._is_valid_url(src, video_id): http_count += 1 formats.append({ 'url': src_url, 'ext': ext or src_ext or 'flv', 'format_id': 'http-%d' % (bitrate or http_count), 'tbr': bitrate, 'filesize': filesize, 'width': width, 'height': height, }) continue self._sort_formats(formats) return formats def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'): subtitles = {} for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))): src = textstream.get('src') if not src: continue ext = textstream.get('ext') or determine_ext(src) if not ext: type_ = textstream.get('type') SUBTITLES_TYPES = { 'text/vtt': 'vtt', 'text/srt': 'srt', 'application/smptett+xml': 'tt', } if type_ in SUBTITLES_TYPES: ext = SUBTITLES_TYPES[type_] lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang subtitles.setdefault(lang, []).append({ 'url': src, 'ext': ext, }) return subtitles def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True): xspf = self._download_xml( playlist_url, playlist_id, 'Downloading xpsf playlist', 'Unable to download xspf manifest', fatal=fatal) if xspf is False: return [] return self._parse_xspf(xspf, playlist_id) def _parse_xspf(self, playlist, playlist_id): NS_MAP = { 'xspf': 'http://xspf.org/ns/0/', 's1': 'http://static.streamone.nl/player/ns/0', } entries = [] for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)): title = xpath_text( track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id) description = xpath_text( track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description') thumbnail = xpath_text( track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail') duration = float_or_none( xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000) formats = [{ 'url': location.text, 'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)), 'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))), 'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))), } for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))] self._sort_formats(formats) entries.append({ 'id': playlist_id, 'title': title, 'description': description, 'thumbnail': thumbnail, 'duration': duration, 'formats': formats, }) return entries def _download_dash_manifest(self, dash_manifest_url, video_id, fatal=True): return self._download_xml( dash_manifest_url, video_id, note='Downloading DASH manifest', errnote='Could not download DASH manifest', fatal=fatal) def _extract_dash_manifest_formats(self, dash_manifest_url, video_id, fatal=True, namespace=None, formats_dict={}): dash_doc = self._download_dash_manifest(dash_manifest_url, video_id, fatal) if dash_doc is False: return [] return self._parse_dash_manifest( dash_doc, namespace=namespace, formats_dict=formats_dict) def _parse_dash_manifest(self, dash_doc, namespace=None, formats_dict={}): def _add_ns(path): return self._xpath_ns(path, namespace) formats = [] for a in dash_doc.findall('.//' + _add_ns('AdaptationSet')): mime_type = a.attrib.get('mimeType') for r in a.findall(_add_ns('Representation')): mime_type = r.attrib.get('mimeType') or mime_type url_el = r.find(_add_ns('BaseURL')) if mime_type == 'text/vtt': # TODO implement WebVTT downloading pass elif mime_type.startswith('audio/') or mime_type.startswith('video/'): segment_list = r.find(_add_ns('SegmentList')) format_id = r.attrib['id'] video_url = url_el.text if url_el is not None else None filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None) f = { 'format_id': format_id, 'url': video_url, 'width': int_or_none(r.attrib.get('width')), 'height': int_or_none(r.attrib.get('height')), 'tbr': int_or_none(r.attrib.get('bandwidth'), 1000), 'asr': int_or_none(r.attrib.get('audioSamplingRate')), 'filesize': filesize, 'fps': int_or_none(r.attrib.get('frameRate')), } if segment_list is not None: initialization_url = segment_list.find(_add_ns('Initialization')).attrib['sourceURL'] f.update({ 'initialization_url': initialization_url, 'segment_urls': [segment.attrib.get('media') for segment in segment_list.findall(_add_ns('SegmentURL'))], 'protocol': 'http_dash_segments', }) if not f.get('url'): f['url'] = initialization_url try: existing_format = next( fo for fo in formats if fo['format_id'] == format_id) except StopIteration: full_info = formats_dict.get(format_id, {}).copy() full_info.update(f) codecs = r.attrib.get('codecs') if codecs: if mime_type.startswith('video/'): vcodec, acodec = codecs, 'none' else: # mime_type.startswith('audio/') vcodec, acodec = 'none', codecs full_info.update({ 'vcodec': vcodec, 'acodec': acodec, }) formats.append(full_info) else: existing_format.update(f) else: self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type) return formats def _live_title(self, name): """ Generate the title for a live video """ now = datetime.datetime.now() now_str = now.strftime("%Y-%m-%d %H:%M") return name + ' ' + now_str def _int(self, v, name, fatal=False, **kwargs): res = int_or_none(v, **kwargs) if 'get_attr' in kwargs: print(getattr(v, kwargs['get_attr'])) if res is None: msg = 'Failed to extract %s: Could not parse value %r' % (name, v) if fatal: raise ExtractorError(msg) else: self._downloader.report_warning(msg) return res def _float(self, v, name, fatal=False, **kwargs): res = float_or_none(v, **kwargs) if res is None: msg = 'Failed to extract %s: Could not parse value %r' % (name, v) if fatal: raise ExtractorError(msg) else: self._downloader.report_warning(msg) return res def _set_cookie(self, domain, name, value, expire_time=None): cookie = compat_cookiejar.Cookie( 0, name, value, None, None, domain, None, None, '/', True, False, expire_time, '', None, None, None) self._downloader.cookiejar.set_cookie(cookie) def _get_cookies(self, url): """ Return a compat_cookies.SimpleCookie with the cookies for the url """ req = sanitized_Request(url) self._downloader.cookiejar.add_cookie_header(req) return compat_cookies.SimpleCookie(req.get_header('Cookie')) def get_testcases(self, include_onlymatching=False): t = getattr(self, '_TEST', None) if t: assert not hasattr(self, '_TESTS'), \ '%s has _TEST and _TESTS' % type(self).__name__ tests = [t] else: tests = getattr(self, '_TESTS', []) for t in tests: if not include_onlymatching and t.get('only_matching', False): continue t['name'] = type(self).__name__[:-len('IE')] yield t def is_suitable(self, age_limit): """ Test whether the extractor is generally suitable for the given age limit (i.e. pornographic sites are not, all others usually are) """ any_restricted = False for tc in self.get_testcases(include_onlymatching=False): if 'playlist' in tc: tc = tc['playlist'][0] is_restricted = age_restricted( tc.get('info_dict', {}).get('age_limit'), age_limit) if not is_restricted: return True any_restricted = any_restricted or is_restricted return not any_restricted def extract_subtitles(self, *args, **kwargs): if (self._downloader.params.get('writesubtitles', False) or self._downloader.params.get('listsubtitles')): return self._get_subtitles(*args, **kwargs) return {} def _get_subtitles(self, *args, **kwargs): raise NotImplementedError("This method must be implemented by subclasses") @staticmethod def _merge_subtitle_items(subtitle_list1, subtitle_list2): """ Merge subtitle items for one language. Items with duplicated URLs will be dropped. """ list1_urls = set([item['url'] for item in subtitle_list1]) ret = list(subtitle_list1) ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls]) return ret @classmethod def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2): """ Merge two subtitle dictionaries, language by language. """ ret = dict(subtitle_dict1) for lang in subtitle_dict2: ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang]) return ret def extract_automatic_captions(self, *args, **kwargs): if (self._downloader.params.get('writeautomaticsub', False) or self._downloader.params.get('listsubtitles')): return self._get_automatic_captions(*args, **kwargs) return {} def _get_automatic_captions(self, *args, **kwargs): raise NotImplementedError("This method must be implemented by subclasses") class SearchInfoExtractor(InfoExtractor): """ Base class for paged search queries extractors. They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query} Instances should define _SEARCH_KEY and _MAX_RESULTS. """ @classmethod def _make_valid_url(cls): return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY @classmethod def suitable(cls, url): return re.match(cls._make_valid_url(), url) is not None def _real_extract(self, query): mobj = re.match(self._make_valid_url(), query) if mobj is None: raise ExtractorError('Invalid search query "%s"' % query) prefix = mobj.group('prefix') query = mobj.group('query') if prefix == '': return self._get_n_results(query, 1) elif prefix == 'all': return self._get_n_results(query, self._MAX_RESULTS) else: n = int(prefix) if n <= 0: raise ExtractorError('invalid download number %s for query "%s"' % (n, query)) elif n > self._MAX_RESULTS: self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) n = self._MAX_RESULTS return self._get_n_results(query, n) def _get_n_results(self, query, n): """Get a specified number of results for a query""" raise NotImplementedError("This method must be implemented by subclasses") @property def SEARCH_KEY(self): return self._SEARCH_KEY
lzambella/Qyoutube-dl
youtube_dl/extractor/common.py
Python
gpl-3.0
67,363
[ "VisIt" ]
6895605ae4fb868ecdd6038782fea805c8523fe89d9d9aae2acfc530562610dc
"""Low-level interface to NCBI's EUtils for Entrez search and retrieval. For higher-level interfaces, see DBIdsClient (which works with a set of database identifiers) and HistoryClient (which does a much better job of handling history). There are five classes of services: ESearch - search a database EPost - upload a list of indicies for further use ESummary - get document summaries for a given set of records EFetch - get the records translated to a given format ELink - find related records in other databases You can find more information about them at http://www.ncbi.nlm.nih.gov/entrez/query/static/eutils_help.html but that document isn't very useful. Perhaps the following is better. EUtils offers a structured way to query Entrez, get the results in various formats, and get information about related documents. The way to start off is create an EUtils object. >>> from Bio import EUtils >>> from Bio.EUtils.ThinClient import ThinClient >>> eutils = ThinClient.ThinClient() >>> You can search Entrez with the "esearch" method. This does a query on the server, which generates a list of identifiers for records that matched the query. However, not all the identifiers are returned. You can request only a subset of the matches (using the 'retstart' and 'retmax') terms. This is useful because searches like 'cancer' can have over 1.4 million matches. Most people would rather change the query or look at more details about the first few hits than wait to download all the identifiers before doing anything else. The esearch method, and indeed all these methods, returns a 'urllib.addinfourl' which is an HTTP socket connection that has already parsed the HTTP header and is ready to read the data from the server. For example, here's a query and how to use it Search in PubMed for the term cancer for the entrez date from the last 60 days and retrieve the first 10 IDs and translations using the history parameter. >>> infile = eutils.esearch("cancer", ... daterange = EUtils.WithinNDays(60, "edat"), ... retmax = 10) >>> >>> print infile.read() <?xml version="1.0"?> <!DOCTYPE eSearchResult PUBLIC "-//NLM//DTD eSearchResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eSearch_020511.dtd"> <eSearchResult> <Count>7228</Count> <RetMax>10</RetMax> <RetStart>0</RetStart> <IdList> <Id>12503096</Id> <Id>12503075</Id> <Id>12503073</Id> <Id>12503033</Id> <Id>12503030</Id> <Id>12503028</Id> <Id>12502932</Id> <Id>12502925</Id> <Id>12502881</Id> <Id>12502872</Id> </IdList> <TranslationSet> <Translation> <From>cancer%5BAll+Fields%5D</From> <To>(%22neoplasms%22%5BMeSH+Terms%5D+OR+cancer%5BText+Word%5D)</To> </Translation> </TranslationSet> <TranslationStack> <TermSet> <Term>"neoplasms"[MeSH Terms]</Term> <Field>MeSH Terms</Field> <Count>1407151</Count> <Explode>Y</Explode> </TermSet> <TermSet> <Term>cancer[Text Word]</Term> <Field>Text Word</Field> <Count>382919</Count> <Explode>Y</Explode> </TermSet> <OP>OR</OP> <TermSet> <Term>2002/10/30[edat]</Term> <Field>edat</Field> <Count>-1</Count> <Explode>Y</Explode> </TermSet> <TermSet> <Term>2002/12/29[edat]</Term> <Field>edat</Field> <Count>-1</Count> <Explode>Y</Explode> </TermSet> <OP>RANGE</OP> <OP>AND</OP> </TranslationStack> </eSearchResult> >>> You get a raw XML input stream which you can process in many ways. (The appropriate DTDs are included in the subdirectory "DTDs" and see also the included POM reading code.) WARNING! As of this writing (2002/12/3) NCBI returns their XML encoded as Latin-1 but their processing instruction says it is UTF-8 because they leave out the "encoding" attribute. Until they fix it you will need to recode the input stream before processing it with XML tools, like this import codecs infile = codecs.EncodedFile(infile, "utf-8", "iso-8859-1") The XML fields are mostly understandable: Count -- the total number of matches from this search RetMax -- the number of <ID> values returned in this subset RetStart -- the start position of this subset in the list of all matches IDList and ID -- the identifiers in this subset TranslationSet / Translation -- if the search field is not explicitly specified ("qualified"), then the server will apply a set of hueristics to improve the query. Eg, in this case "cancer" is first parsed as cancer[All Fields] then turned into the query "neoplasms"[MeSH Terms] OR cancer[Text Word] Note that these terms are URL escaped. For details on how the translation is done, see http://www.ncbi.nlm.nih.gov/entrez/query/static/help/pmhelp.html#AutomaticTermMapping TranslationStack -- The (possibly 'improved' query) fully parsed out and converted into a postfix (RPN) notation. The above example is written in the Entrez query language as ("neoplasms"[MeSH Terms] OR cancer[Text Word]) AND 2002/10/30:2002/12/29[edat] Note that these terms are *not* URL escaped. Nothing like a bit of inconsistency for the soul. The "Count" field shows how many matches were found for each term of the expression. I don't know what "Explode" does. Let's get more information about the first record, which has an id of 12503096. There are two ways to query for information, one uses a set of identifiers and the other uses the history. I'll talk about the history one in a bit. To use a set of identifiers you need to make a DBIds object containing the that list. >>> dbids = EUtils.DBIds("pubmed", ["12503096"]) >>> Now get the summary using dbids >>> infile = eutils.esummary_using_dbids(dbids) >>> print infile.read() <?xml version="1.0"?> <!DOCTYPE eSummaryResult PUBLIC "-//NLM//DTD eSummaryResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eSummary_020511.dtd"> <eSummaryResult> <DocSum> <Id>12503096</Id> <Item Name="PubDate" Type="Date">2003 Jan 30</Item> <Item Name="Source" Type="String">Am J Med Genet</Item> <Item Name="Authors" Type="String">Coyne JC, Kruus L, Racioppo M, Calzone KA, Armstrong K</Item> <Item Name="Title" Type="String">What do ratings of cancer-specific distress mean among women at high risk of breast and ovarian cancer?</Item> <Item Name="Volume" Type="String">116</Item> <Item Name="Pages" Type="String">222-8</Item> <Item Name="EntrezDate" Type="Date">2002/12/28 04:00</Item> <Item Name="PubMedId" Type="Integer">12503096</Item> <Item Name="MedlineId" Type="Integer">22390532</Item> <Item Name="Lang" Type="String">English</Item> <Item Name="PubType" Type="String"></Item> <Item Name="RecordStatus" Type="String">PubMed - in process</Item> <Item Name="Issue" Type="String">3</Item> <Item Name="SO" Type="String">2003 Jan 30;116(3):222-8</Item> <Item Name="DOI" Type="String">10.1002/ajmg.a.10844</Item> <Item Name="JTA" Type="String">3L4</Item> <Item Name="ISSN" Type="String">0148-7299</Item> <Item Name="PubId" Type="String"></Item> <Item Name="PubStatus" Type="Integer">4</Item> <Item Name="Status" Type="Integer">5</Item> <Item Name="HasAbstract" Type="Integer">1</Item> <Item Name="ArticleIds" Type="List"> <Item Name="PubMedId" Type="String">12503096</Item> <Item Name="DOI" Type="String">10.1002/ajmg.a.10844</Item> <Item Name="MedlineUID" Type="String">22390532</Item> </Item> </DocSum> </eSummaryResult> >>> This is just a summary. To get the full details, including an abstract (if available) use the 'efetch' method. I'll only print a bit to convince you it has an abstract. >>> s = eutils.efetch_using_dbids(dbids).read() >>> print s[587:860] <ArticleTitle>What do ratings of cancer-specific distress mean among women at high risk of breast and ovarian cancer?</ArticleTitle> <Pagination> <MedlinePgn>222-8</MedlinePgn> </Pagination> <Abstract> <AbstractText>Women recruited from a hereditary cancer registry provided >>> Suppose instead you want the data in a text format. Different databases have different text formats. For example, PubMed has a "docsum" format which gives just the summary of a document and "medline" format as needed for a citation database. To get these, use a "text" "retmode" ("return mode") and select the appropriate "rettype" ("return type"). Here are examples of those two return types >>> print eutils.efetch_using_dbids(dbids, "text", "docsum").read()[:497] 1: Coyne JC, Kruus L, Racioppo M, Calzone KA, Armstrong K. What do ratings of cancer-specific distress mean among women at high risk of breast and ovarian cancer? Am J Med Genet. 2003 Jan 30;116(3):222-8. PMID: 12503096 [PubMed - in process] >>> print eutils.efetch_using_dbids(dbids, "text", "medline").read()[:369] UI - 22390532 PMID- 12503096 DA - 20021227 IS - 0148-7299 VI - 116 IP - 3 DP - 2003 Jan 30 TI - What do ratings of cancer-specific distress mean among women at high risk of breast and ovarian cancer? PG - 222-8 AB - Women recruited from a hereditary cancer registry provided ratings of distress associated with different aspects of high-risk status >>> It's also possible to get a list of records related to a given article. This is done through the "elink" method. For example, here's how to get the list of PubMed articles related to the above PubMed record. (Again, truncated because otherwise there is a lot of data.) >>> print eutils.elink_using_dbids(dbids).read()[:590] <?xml version="1.0"?> <!DOCTYPE eLinkResult PUBLIC "-//NLM//DTD eLinkResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eLink_020511.dtd"> <eLinkResult> <LinkSet> <DbFrom>pubmed</DbFrom> <IdList> <Id>12503096</Id> </IdList> <LinkSetDb> <DbTo>pubmed</DbTo> <LinkName>pubmed_pubmed</LinkName> <Link> <Id>12503096</Id> <Score>2147483647</Score> </Link> <Link> <Id>11536413</Id> <Score>30817790</Score> </Link> <Link> <Id>11340606</Id> <Score>29939219</Score> </Link> <Link> <Id>10805955</Id> <Score>29584451</Score> </Link> >>> For a change of pace, let's work with the protein database to learn how to work with history. Suppose I want to do a multiple sequene alignment of bacteriorhodopsin with all of its neighbors, where "neighbors" is defined by NCBI. There are good programs for this -- I just need to get the records in the right format, like FASTA. The bacteriorhodopsin I'm interested in is BAA75200, which is GI:4579714, so I'll start by asking for its neighbors. >>> results = eutils.elink_using_dbids( ... EUtils.DBIds("protein", ["4579714"]), ... db = "protein").read() >>> print results[:454] <?xml version="1.0"?> <!DOCTYPE eLinkResult PUBLIC "-//NLM//DTD eLinkResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eLink_020511.dtd"> <eLinkResult> <LinkSet> <DbFrom>protein</DbFrom> <IdList> <Id>4579714</Id> </IdList> <LinkSetDb> <DbTo>protein</DbTo> <LinkName>protein_protein</LinkName> <Link> <Id>4579714</Id> <Score>2147483647</Score> </Link> <Link> <Id>11277596</Id> <Score>1279</Score> </Link> >>> Let's get all the <Id> fields. (While the following isn't a good way to parse XML, it is easy to understand and works well enough for this example.) Note that I remove the first <Id> because that's from the query and not from the results. >>> import re >>> ids = re.findall(r"<Id>(\d+)</Id>", results) >>> ids = ids[1:] >>> len(ids) 222 >>> dbids = EUtils.DBIds("protein", ids) >>> That's a lot of records. I could use 'efetch_using_dbids' but there's a problem with that. Efetch uses the HTTP GET protocol to pass information to the EUtils server. ("GET" is what's used when you type a URL in the browser.) Each id takes about 9 characters, so the URL would be over 2,000 characters long. This may not work on some systems, for example, some proxies do not support long URLs. (Search for "very long URLs" for examples.) Instead, we'll upload the list to the server then fetch the FASTA version using the history. The first step is to upload the data. We want to put that into the history so we set 'usehistory' to true. There's no existing history so the webenv string is None. >>> print eutils.epost(dbids, usehistory = 1, webenv = None).read() <?xml version="1.0"?> <!DOCTYPE ePostResult PUBLIC "-//NLM//DTD ePostResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/ePost_020511.dtd"> <ePostResult> <QueryKey>1</QueryKey> <WebEnv>%7BPgTHRHFBsJfC%3C%5C%5C%5B%3EAfJCKQ%5Ey%60%3CGkH%5DH%5E%3DJHGBKAJ%3F%40CbCiG%3FE%3C</WebEnv> </ePostResult> >>> This says that the identifiers were saved as query #1, which will be used later on as the "query_key" field. The WebEnv is a cookie (or token) used to tell the server where to find that query. The WebEnv changes after every history-enabled ESearch or EPost so you'll need to parse the output from those to get the new WebEnv field. You'll also need to unquote it since it is URL-escaped. Also, you will need to pass in the name of the database used for the query in order to access the history. Why? I don't know -- I figure the WebEnv and query_key should be enough to get the database name. >>> import urllib >>> webenv = urllib.unquote("%7BPgTHRHFBsJfC%3C%5C%5C%5B%3EAfJCKQ%5Ey%60%3CGkH%5DH%5E%3DJHGBKAJ%3F%40CbCiG%3FE%3C") >>> print webenv {PgTHRHFBsJfC<\\[>AfJCKQ^y`<GkH]H^=JHGBKAJ?@CbCiG?E< >>> Okay, now to get the data in FASTA format. Notice that I need the 'retmax' in order to include all the records in the result. (The default is 20 records.) >>> fasta = eutils.efetch_using_history("protein", webenv, query_key = "1", ... retmode = "text", rettype = "fasta", ... retmax = len(dbids)).read() >>> fasta.count(">") 222 >>> print fasta[:694] >gi|14194475|sp|O93742|BACH_HALSD Halorhodopsin (HR) MMETAADALASGTVPLEMTQTQIFEAIQGDTLLASSLWINIALAGLSILLFVYMGRNLEDPRAQLIFVAT LMVPLVSISSYTGLVSGLTVSFLEMPAGHALAGQEVLTPWGRYLTWALSTPMILVALGLLAGSNATKLFT AVTADIGMCVTGLAAALTTSSYLLRWVWYVISCAFFVVVLYVLLAEWAEDAEVAGTAEIFNTLKLLTVVL WLGYPIFWALGAEGLAVLDVAVTSWAYSGMDIVAKYLFAFLLLRWVVDNERTVAGMAAGLGAPLARCAPA DD >gi|14194474|sp|O93741|BACH_HALS4 Halorhodopsin (HR) MRSRTYHDQSVCGPYGSQRTDCDRDTDAGSDTDVHGAQVATQIRTDTLLHSSLWVNIALAGLSILVFLYM ARTVRANRARLIVGATLMIPLVSLSSYLGLVTGLTAGPIEMPAAHALAGEDVLSQWGRYLTWTLSTPMIL LALGWLAEVDTADLFVVIAADIGMCLTGLAAALTTSSYAFRWAFYLVSTAFFVVVLYALLAKWPTNAEAA GTGDIFGTLRWLTVILWLGYPILWALGVEGFALVDSVGLTSWGYSLLDIGAKYLFAALLLRWVANNERTI AVGQRSGRGAIGDPVED >>> To round things out, here's a query which refines the previous query. I want to get all records from the first search which also have the word "Structure" in them. (My background was originally structural biophysics, whaddya expect? :) >>> print eutils.search("#1 AND structure", db = "protein", usehistory = 1, ... webenv = webenv).read() <?xml version="1.0"?> <!DOCTYPE eSearchResult PUBLIC "-//NLM//DTD eSearchResult, 11 May 2002//EN" "http://www.ncbi.nlm.nih.gov/entrez/query/DTD/eSearch_020511.dtd"> <eSearchResult> <Count>67</Count> <RetMax>20</RetMax> <RetStart>0</RetStart> <QueryKey>2</QueryKey> <WebEnv>UdvMf%3F%60G%3DIE%60bG%3DGec%3E%3D%3Cbc_%5DgBAf%3EAi_e%5EAJcHgDi%3CIqGdE%7BmC%3C</WebEnv> <IdList> <Id>461608</Id> <Id>114808</Id> <Id>1364150</Id> <Id>1363466</Id> <Id>1083906</Id> <Id>99232</Id> <Id>99212</Id> <Id>81076</Id> <Id>114811</Id> <Id>24158915</Id> <Id>24158914</Id> <Id>24158913</Id> <Id>1168615</Id> <Id>114812</Id> <Id>114809</Id> <Id>17942995</Id> <Id>17942994</Id> <Id>17942993</Id> <Id>20151159</Id> <Id>20150922</Id> </IdList> <TranslationSet> </TranslationSet> <TranslationStack> <TermSet> <Term>#1</Term> <Field>All Fields</Field> <Count>222</Count> <Explode>Y</Explode> </TermSet> <TermSet> <Term>structure[All Fields]</Term> <Field>All Fields</Field> <Count>142002</Count> <Explode>Y</Explode> </TermSet> <OP>AND</OP> </TranslationStack> </eSearchResult> >>> One last thing about history. It doesn't last very long -- perhaps an hour or so. (Untested.) You may be able to toss it some keep-alive signal every once in a while. Or you may want to keep The known 'db' fields and primary IDs (if known) are genome -- GI number nucleotide -- GI number omim -- MIM number popset -- GI number protein -- GI number pubmed -- PMID sequences (not available; this will combine all sequence databases) structure -- MMDB ID taxonomy -- TAXID The 'field' parameter is different for different databases. The fields for PubMed are listed at http://www.ncbi.nlm.nih.gov/entrez/query/static/help/pmhelp.html#SearchFieldDescriptionsandTags Affiliation -- AD All Fields -- All Author -- AU EC/RN Number -- RN Entrez Date -- EDAT (also valid for 'datetype') Filter -- FILTER Issue -- IP Journal Title -- TA Language -- LA MeSH Date -- MHDA (also valid for 'datetype') MeSH Major Topic -- MAJR MeSH Subheadings -- SH MeSH Terms -- MH Pagination -- PG Personal Name as Subject -- PS Publication Date -- DP (also valid for 'datetype') Publication Type -- PT Secondary Source ID -- SI Subset -- SB Substance Name -- NM Text Words -- TW Title -- TI Title/Abstract -- TIAB Unique Identifiers -- UID Volume -- VI The fields marked as 'datetype' can also be used for date searches. Date searches can be done in the query (for example, as 1990/01/01:1999/12/31[edat] or by passing a WithinNDays or DateRange field to the 'date' parameter of the search. Please pay attention to the usage limits! The are listed at http://www.ncbi.nlm.nih.gov/entrez/query/static/eutils_help.html At the time of this writing they are: * Run retrieval scripts on weekends or between 9 PM and 5 AM ET weekdays for any series of more than 100 requests. * Make no more than one request every 3 seconds. * Only 5000 PubMed records may be retrieved in a single day. * NCBI's Disclaimer and Copyright notice must be evident to users of your service. NLM does not hold the copyright on the PubMed abstracts the journal publishers do. NLM provides no legal advice concerning distribution of copyrighted materials, consult your legal counsel. (Their disclaimer is at http://www.ncbi.nlm.nih.gov/About/disclaimer.html ) """ # " # Emacs cruft import urllib, urllib2, cStringIO DUMP_URL = 0 DUMP_RESULT = 0 # These tell NCBI who is using the tool. They are meant to provide # hints to NCBI about how their service is being used and provide a # means of getting ahold of the author. # # To use your own values, pass them in to the EUtils constructor. # TOOL = "EUtils_Python_client" EMAIL = "biopython-dev@biopython.org" assert " " not in TOOL assert " " not in EMAIL def _dbids_to_id_string(dbids): """Internal function: convert a list of ids to a comma-seperated string""" # NOTE: the server strips out non-numeric characters # Eg, "-1" is treated as "1". So do some sanity checking. # XXX Should I check for non-digits? # Are any of the IDs non-integers? if not dbids: raise TypeError("dbids list must have at least one term") for x in dbids.ids: if "," in x: raise TypeError("identifiers cannot contain a comma: %r " % (x,)) id_string = ",".join(dbids.ids) assert id_string.count(",") == len(dbids.ids)-1, "double checking" return id_string class ThinClient: """Client-side interface to the EUtils services See the module docstring for much more complete information. """ def __init__(self, opener = None, tool = TOOL, email = EMAIL, baseurl = "http://www.ncbi.nlm.nih.gov/entrez/eutils/"): """opener = None, tool = TOOL, email = EMAIL, baseurl = ".../eutils/" 'opener' -- an object which implements the 'open' method like a urllib2.OpenDirector. Defaults to urllib2.build_opener() 'tool' -- the term to use for the 'tool' field, used by NCBI to track which programs use their services. If you write your own tool based on this package, use your own tool name. 'email' -- a way for NCBI to contact you (the developer, not the user!) if there are problems and to tell you about updates or changes to their system. 'baseurl' -- location of NCBI's EUtils directory. Shouldn't need to change this at all. """ if tool is not None and " " in tool: raise TypeError("No spaces allowed in 'tool'") if email is not None and " " in email: raise TypeError("No spaces allowed in 'email'") if opener is None: opener = urllib2.build_opener() self.opener = opener self.tool = tool self.email = email self.baseurl = baseurl def _fixup_query(self, query): """Internal function to add and remove fields from a query""" q = query.copy() # Set the 'tool' and 'email' fields q["tool"] = self.tool q["email"] = self.email # Kinda cheesy -- shouldn't really do this here. # If 'usehistory' is true, use the value of 'Y' instead. # Otherwise, don't use history if "usehistory" in q: if q["usehistory"]: q["usehistory"] = "y" else: q["usehistory"] = None # This will also remove the history, email, etc. fields # if they are set to None. for k, v in q.items(): if v is None: del q[k] # Convert the query into the form needed for a GET. return urllib.urlencode(q) def _get(self, program, query): """Internal function: send the query string to the program as GET""" # NOTE: epost uses a different interface q = self._fixup_query(query) url = self.baseurl + program + "?" + q if DUMP_URL: print "Opening with GET:", url if DUMP_RESULT: print " ================== Results ============= " s = self.opener.open(url).read() print s print " ================== Finished ============ " return cStringIO.StringIO(s) return self.opener.open(url) def esearch(self, term, # In Entrez query language db = "pubmed", # Required field, default to PubMed field = None, # Field to use for unqualified words daterange = None, # Date restriction retstart = 0, retmax = 20, # Default from NCBI is 20, so I'll use that usehistory = 0, # Enable history tracking webenv = None, # If given, add to an existing history ): """term, db="pubmed", field=None, daterange=None, retstart=0, retmax=20, usehistory=0, webenv=none Search the given database for records matching the query given in the 'term'. See the module docstring for examples. 'term' -- the query string in the Entrez query language; see http://www.ncbi.nlm.nih.gov/entrez/query/static/help/pmhelp.html 'db' -- the database to search 'field' -- the field to use for unqualified words Eg, "dalke[au] AND gene" with field==None becomes dalke[au] AND (genes[MeSH Terms] OR gene[Text Word] and "dalke[au] AND gene" with field=="au" becomes dalke[au] AND genes[Author] (Yes, I think the first "au" should be "Author" too) 'daterange' -- a date restriction; either WithinNDays or DateRange 'retstart' -- include identifiers in the output, starting with position 'retstart' (normally starts with 0) 'retmax' -- return at most 'retmax' identifiers in the output (if not specified, NCBI returns 20 identifiers) 'usehistory' -- flag to enable history tracking 'webenv' -- if this string is given, add the search results to an existing history. (WARNING: the history disappers after about an hour of non-use.) You will need to parse the output XML to get the new QueryKey and WebEnv fields. Returns an input stream from an HTTP request. The stream contents are in XML. """ query = {"term": term, "db": db, "field": field, "retstart": retstart, "retmax": retmax, "usehistory": usehistory, "WebEnv": webenv, } if daterange is not None: query.update(daterange.get_query_params()) return self._get(program = "esearch.fcgi", query = query) def epost(self, dbids, webenv = None, # If given, add to an existing history ): """dbids, webenv = None Create a new collection in the history containing the given list of identifiers for a database. 'dbids' -- a DBIds, which contains the database name and a list of identifiers in that database 'webenv' -- if this string is given, add the collection to an existing history. (WARNING: the history disappers after about an hour of non-use.) You will need to parse the output XML to get the new QueryKey and WebEnv fields. NOTE: The order of the IDs on the server is NOT NECESSARILY the same as the upload order. Returns an input stream from an HTTP request. The stream contents are in XML. """ id_string = _dbids_to_id_string(dbids) # Looks like it will accept *any* ids. Wonder what that means. program = "epost.fcgi" query = {"id": id_string, "db": dbids.db, "WebEnv": webenv, } q = self._fixup_query(query) # Need to use a POST since the data set can be *very* long; # even too long for GET. if DUMP_URL: print "Opening with POST:", self.baseurl + program + "?" + q if DUMP_RESULT: print " ================== Results ============= " s = self.opener.open(self.baseurl + program, q).read() print s print " ================== Finished ============ " return cStringIO.StringIO(s) return self.opener.open(self.baseurl + program, q) def esummary_using_history(self, db, # This is required. Don't use a # default here because it must match # that of the webenv webenv, query_key, retstart = 0, retmax = 20, retmode = "xml", # any other modes? ): """db, webenv, query_key, retstart = 0, retmax = 20, retmode = "xml" Get the summary for a collection of records in the history 'db' -- the database containing the history/collection 'webenv' -- the WebEnv cookie for the history 'query_key' -- the collection in the history 'retstart' -- get the summaries starting with this position 'retmax' -- get at most this many summaries 'retmode' -- can only be 'xml'. (Are there others?) Returns an input stream from an HTTP request. The stream contents are in 'retmode' format. """ return self._get(program = "esummary.fcgi", query = {"db": db, "WebEnv": webenv, "query_key": query_key, "retstart": retstart, "retmax": retmax, "retmode": retmode, }) def esummary_using_dbids(self, dbids, retmode = "xml", # any other modes? ): """dbids, retmode = "xml" Get the summary for records specified by identifier 'dbids' -- a DBIds containing the database name and list of record identifiers 'retmode' -- can only be 'xml' Returns an input stream from an HTTP request. The stream contents are in 'retmode' format. """ id_string = _dbids_to_id_string(dbids) return self._get(program = "esummary.fcgi", query = {"id": id_string, "db": dbids.db, # "retmax": len(dbids.ids), # needed? "retmode": retmode, }) def efetch_using_history(self, db, webenv, query_key, retstart = 0, retmax = 20, retmode = None, rettype = None, # sequence only seq_start = None, seq_stop = None, strand = None, complexity = None, ): """db, webenv, query_key, retstart=0, retmax=20, retmode=None, rettype=None, seq_start=None, seq_stop=None, strand=None, complexity=None Fetch information for a collection of records in the history, in a variety of formats. 'db' -- the database containing the history/collection 'webenv' -- the WebEnv cookie for the history 'query_key' -- the collection in the history 'retstart' -- get the formatted data starting with this position 'retmax' -- get data for at most this many records These options work for sequence databases 'seq_start' -- return the sequence starting at this position. The first position is numbered 1 'seq_stop' -- return the sequence ending at this position Includes the stop position, so seq_start = 1 and seq_stop = 5 returns the first 5 bases/residues. 'strand' -- strand. Use EUtils.PLUS_STRAND (== 1) for plus strand and EUtils.MINUS_STRAND (== 2) for negative 'complexity' -- regulates the level of display. Options are 0 - get the whole blob 1 - get the bioseq for gi of interest (default in Entrez) 2 - get the minimal bioseq-set containing the gi of interest 3 - get the minimal nuc-prot containing the gi of interest 4 - get the minimal pub-set containing the gi of interest http://www.ncbi.nlm.nih.gov/entrez/query/static/efetchseq_help.html The valid retmode and rettype values are For publication databases (omim, pubmed, journals) the retmodes are 'xml', 'asn.1', 'text', and 'html'. If retmode == xml ---> XML (default) if retmode == asn.1 ---> ASN.1 The following rettype values work for retmode == 'text'. docsum ----> author / title / cite / PMID brief ----> a one-liner up to about 66 chars abstract ----> cite / title / author / dept / full abstract / PMID citation ----> cite / title / author / dept / full abstract / MeSH terms / substances / PMID medline ----> full record in medline format asn.1 ----> full record in one ASN.1 format mlasn1 ----> full record in another ASN.1 format uilist ----> list of uids, one per line sgml ----> same as retmode="xml" Sequence databases (genome, protein, nucleotide, popset) also have retmode values of 'xml', 'asn.1', 'text', and 'html'. If retmode == 'xml' ---> XML (default; only supports rettype == 'native') If retmode == 'asn.1' ---> ASN.1 text (only works for rettype of 'native' and 'sequin') The following work with a retmode of 'text' or 'html' native ----> Default format for viewing sequences fasta ----> FASTA view of a sequence gb ----> GenBank view for sequences, constructed sequences will be shown as contigs (by pointing to its parts). Valid for nucleotides. gbwithparts --> GenBank view for sequences, the sequence will always be shown. Valid for nucleotides. est ----> EST Report. Valid for sequences from dbEST database. gss ----> GSS Report. Valid for sequences from dbGSS database. gp ----> GenPept view. Valid for proteins. seqid ----> To convert list of gis into list of seqids acc ----> To convert list of gis into list of accessions # XXX TRY THESE fasta_xml gb_xml gi (same as uilist?) A retmode of 'file' is the same as 'text' except the data is sent with a Content-Type of application/octet-stream, which tells the browser to save the data to a file. A retmode of 'html' is the same as 'text' except a HTML header and footer are added and special character are properly escaped. Returns an input stream from an HTTP request. The stream contents are in the requested format. """ # NOTE: found the list of possible values by sending illegal # parameters, to see which comes up as an error message. Used # that to supplement the information from the documentation. # Looks like efetch is based on pmfetch code and uses the same # types. # if retmax is specified and larger than 500, NCBI only returns # 500 sequences. Removing it from the URL relieves this constraint. # To get around this, if retstart is 0 and retmax is greater than 500, # we set retmax to be None. if retstart == 0 and retmax > 500: retmax = None return self._get(program = "efetch.fcgi", query = {"db": db, "WebEnv": webenv, "query_key": query_key, "retstart": retstart, "retmax": retmax, "retmode": retmode, "rettype": rettype, "seq_start": seq_start, "seq_stop": seq_stop, "strand": strand, "complexity": complexity, }) def efetch_using_dbids(self, dbids, retmode = None, rettype = None, # sequence only seq_start = None, seq_stop = None, strand = None, complexity = None, ): """dbids, retmode = None, rettype = None, seq_start = None, seq_stop = None, strand = None, complexity = None Fetch information for records specified by identifier 'dbids' -- a DBIds containing the database name and list of record identifiers 'retmode' -- See the docstring for 'efetch_using_history' 'rettype' -- See the docstring for 'efetch_using_history' These options work for sequence databases 'seq_start' -- return the sequence starting at this position. The first position is numbered 1 'seq_stop' -- return the sequence ending at this position Includes the stop position, so seq_start = 1 and seq_stop = 5 returns the first 5 bases/residues. 'strand' -- strand. Use EUtils.PLUS_STRAND (== 1) for plus strand and EUtils.MINUS_STRAND (== 2) for negative 'complexity' -- regulates the level of display. Options are 0 - get the whole blob 1 - get the bioseq for gi of interest (default in Entrez) 2 - get the minimal bioseq-set containing the gi of interest 3 - get the minimal nuc-prot containing the gi of interest 4 - get the minimal pub-set containing the gi of interest Returns an input stream from an HTTP request. The stream contents are in the requested format. """ id_string = _dbids_to_id_string(dbids) return self._get(program = "efetch.fcgi", query = {"id": id_string, "db": dbids.db, # "retmax": len(dbids.ids), # needed? "retmode": retmode, "rettype": rettype, "seq_start": seq_start, "seq_stop": seq_stop, "strand": strand, "complexity": complexity, }) def elink_using_history(self, dbfrom, webenv, query_key, db = "pubmed", retstart = 0, retmax = 20, cmd = "neighbor", retmode = None, term = None, field = None, # "Date limits not valid for link commands" daterange = None, ): """dbfrom, webenv, query_key, db="pubmed", retstart=0, retmax=20, cmd="neighbor", retmode=None, term=None, field=None, daterange=None, Find records related (in various ways) to a collection of records in the history. 'dbfrom' -- this is the name of the database containing the collection of record. NOTE! For the other methods this is named 'db'. But I'm keeping NCBI's notation. This is where the records come FROM. 'webenv' -- the WebEnv cookie for the history 'query_key' -- the collection in the history 'db' -- Where the records link TO. This is where you want to find the new records. For example, if you want to find PubMed records related to a protein then 'dbfrom' is 'protein' and 'db' is 'pubmed' 'cmd'-- one of the following (unless specified, retmode is the default value, which returns data in XML) neighbor: Display neighbors and their scores by database and ID. (This is the default 'cmd'.) prlinks: List the hyperlink to the primary LinkOut provider for multiple IDs and database. When retmode == 'ref' this URL redirects the browser to the primary LinkOut provider for a single ID and database. llinks: List LinkOut URLs and Attributes for multiple IDs and database. lcheck: Check for the existence (Y or N) of an external link in for multiple IDs and database. ncheck: Check for the existence of a neighbor link for each ID, e.g., Related Articles in PubMed. 'retstart' -- get the formatted data starting with this position 'retmax' -- get data for at most this many records 'retmode' -- only used with 'prlinks' 'term' -- restrict results to records which also match this Entrez search 'field' -- the field to use for unqualified words 'daterange' -- restrict results to records which also match this date criteria; either WithinNDays or DateRange NOTE: DateRange must have both mindate and maxdate Some examples: In PubMed, to get a list of "Related Articles" dbfrom = pubmed cmd = neighbor To get MEDLINE index only related article dbfrom = pubmed db = pubmed term = medline[sb] cmd = neighbor Given a PubMed record, find the related nucleotide records dbfrom = pubmed db = nucleotide (or "protein" for related protein records) cmd = neighbor To get "LinkOuts" (external links) for a PubMed record set dbfrom = pubmed cmd = llinks Get the primary link information for a PubMed document; includes various hyperlinks, image URL for the provider, etc. dbfrom = pubmed cmd = prlinks (optional) retmode = "ref" (causes a redirect to the privder) Returns an input stream from an HTTP request. The stream contents are in XML unless 'retmode' is 'ref'. """ query = {"WebEnv": webenv, "query_key": query_key, "db": db, "dbfrom": dbfrom, "cmd": cmd, "retstart": retstart, "retmax": retmax, "retmode": retmode, "term": term, "field": field, } if daterange is not None: if daterange.mindate is None or daterange.maxdate is None: raise TypeError("Both mindate and maxdate must be set for eLink") query.update(daterange.get_query_params()) return self._get(program = "elink.fcgi", query = query) def elink_using_dbids(self, dbids, db = "pubmed", cmd = "neighbor", retmode = None, term = None, field = None, daterange = None, ): """dbids, db="pubmed", cmd="neighbor", retmode=None, term=None, daterange=None Find records related (in various ways) to a set of records specified by identifier. 'dbids' -- a DBIds containing the database name and list of record identifiers 'db' -- Where the records link TO. This is where you want to find the new records. For example, if you want to find PubMed records related to a protein then 'db' is 'pubmed'. (The database they are from is part of the DBIds object.) 'cmd' -- see the docstring for 'elink_using_history' 'retmode' -- see 'elink_using_history' 'term' -- see 'elink_using_history' 'daterange' -- see 'elink_using_history' Returns an input stream from an HTTP request. The stream contents are in XML unless 'retmode' is 'ref'. """ id_string = _dbids_to_id_string(dbids) query = {"id": id_string, "db": db, "dbfrom": dbids.db, "cmd": cmd, "retmode": retmode, "field" : field, "term": term, } if daterange is not None: import Datatypes if isinstance(daterange, Datatypes.DateRange) and \ (daterange.mindate is None or daterange.maxdate is None): raise TypeError("Both mindate and maxdate must be set for eLink") query.update(daterange.get_query_params()) return self._get(program = "elink.fcgi", query = query)
dbmi-pitt/DIKB-Micropublication
scripts/mp-scripts/Bio/EUtils/ThinClient.py
Python
apache-2.0
47,176
[ "Biopython" ]
f78353031eb28935b0699675b8a0fcf2d1de23478dd88b574174844eb85bb441
""" RHEAS module for retrieving meteorological forecasts/hindcasts from the IRI FD Seasonal Forecast Tercile Probabilities. .. module:: iri :synopsis: Retrieve IRI meteorological forecast data .. moduleauthor:: Kostas Andreadis <kandread@jpl.nasa.gov> """ import datasets import dbio import netCDF4 as netcdf import os import random import string import numpy as np from datetime import date from dateutil.relativedelta import relativedelta import logging def dates(dbname): dts = datasets.dates(dbname, "precip.iri") return dts def ingest(dbname, filename, dt, lt, cname, stname): """Imports Geotif *filename* into database *dbname*.""" db = dbio.connect(dbname) cur = db.cursor() schemaname, tablename = stname.split(".") cur.execute( "select * from information_schema.tables where table_schema='{0}' and table_name='{1}'".format(schemaname, tablename)) if not bool(cur.rowcount): cur.execute("create table {0}.{1} (rid serial not null primary key, fdate date, tercile text, leadtime int, rast raster)".format( schemaname, tablename)) db.commit() cur.execute("select * from {0} where fdate='{1}' and tercile = '{2}' and leadtime = {3}".format(stname, dt.strftime("%Y-%m-%d"), cname, lt)) if bool(cur.rowcount): cur.execute("delete from {0} where fdate='{1}' and tercile = '{2}' and leadtime = {3}".format(stname, dt.strftime("%Y-%m-%d"), cname, lt)) db.commit() dbio.ingest(dbname, filename, dt, stname, False, False) sql = "update {0} set tercile = '{1}' where tercile is null".format( stname, cname) cur.execute(sql) sql = "update {0} set leadtime = '{1}' where leadtime is null".format( stname, lt) cur.execute(sql) db.commit() cur.close() def download(dbname, dts, bbox=None): """Downloads IRI forecast tercile probability data from the IRI data server, and imports them into the database *dbname*. Optionally uses a bounding box to limit the region with [minlon, minlat, maxlon, maxlat].""" leadtime = 3 res = 2.5 baseurl = "http://iridl.ldeo.columbia.edu/SOURCES/.IRI/.FD/.Seasonal_Forecast/.{0}/.prob/dods" table = {"Precipitation": "precip.iri", "Temperature": "tmax.iri"} for varname in ["Precipitation", "Temperature"]: purl = baseurl.format(varname) pds = netcdf.Dataset(purl) lat = pds.variables["Y"][:] lon = pds.variables["X"][:] lon[lon > 180] -= 360.0 i1, i2, j1, j2 = datasets.spatialSubset(np.sort(lat)[::-1], np.sort(lon), res, bbox) lati = np.argsort(lat)[::-1][i1:i2] loni = np.argsort(lon)[j1:j2] lat = np.sort(lat)[::-1][i1:i2] lon = np.sort(lon)[j1:j2] t = pds.variables["F"][:] ti = [tt for tt in range(len(t)) if t[tt] >= ((dts[0].year - 1960) * 12 + dts[0].month - 0.5) and t[tt] <= ((dts[1].year - 1960) * 12 + dts[1].month - 0.5)] for tt in ti: dt = date(1960, 1, 1) + relativedelta(months=int(t[tt])) for m in range(leadtime): for ci, c in enumerate(["below", "normal", "above"]): data = pds.variables["prob"][tt, m, lati, loni, ci] filename = dbio.writeGeotif(lat, lon, res, data) ingest(dbname, filename, dt, m + 1, c, table[varname]) os.remove(filename) def _getResampledTables(dbname, options, res): """Find names of resampled raster tables.""" rtables = {} db = dbio.connect(dbname) cur = db.cursor() for v in ['precip', 'tmax', 'tmin', 'wind']: tname = options['vic'][v] cur.execute( "select * from raster_resampled where sname='{0}' and tname like '{1}%' and resolution={2}".format(v, tname, res)) rtables[v] = cur.fetchone()[1] cur.close() db.close() return rtables def _deleteTableIfExists(dbname, sname, tname): """Check if table exists and delete it.""" db = dbio.connect(dbname) cur = db.cursor() cur.execute( "select * from information_schema.tables where table_schema='{0}' and table_name='{1}'".format(sname, tname)) if bool(cur.rowcount): cur.execute("drop table {0}.{1}".format(sname, tname)) db.commit() cur.close() db.close() def _resampleClimatology(dbname, ptable, name, dt0): """Resample finer scale climatology to IRI spatial resolution.""" tilesize = 10 res = 2.5 db = dbio.connect(dbname) cur = db.cursor() cur.execute( "select * from pg_catalog.pg_class c inner join pg_catalog.pg_namespace n on c.relnamespace=n.oid where n.nspname='precip' and c.relname='{0}_iri'".format(ptable)) if not bool(cur.rowcount): sql = "create table precip.{1}_iri as (with f as (select fdate,st_tile(st_rescale(rast,{0},'average'),{2},{2}) as rast from precip.{1}) select fdate,rast,dense_rank() over (order by st_upperleftx(rast),st_upperlefty(rast)) as rid from f)".format( res, ptable, tilesize) cur.execute(sql) cur.execute( "create index {0}_iri_r on precip.{0}_iri(rid)".format(ptable)) cur.execute( "create index {0}_iri_t on precip.{0}_iri(fdate)".format(ptable)) db.commit() _deleteTableIfExists(dbname, 'precip', "{0}_iri_xy".format(ptable)) sql = "create table precip.{0}_iri_xy as (select gid,st_worldtorastercoordx(rast,geom) as x,st_worldtorastercoordy(rast,geom) as y,rid as tile from precip.{0}_iri,{1}.basin where fdate=date'{2}-{3}-{4}' and st_intersects(rast,geom))".format( ptable, name, dt0.year, dt0.month, dt0.day) cur.execute(sql) db.commit() cur.execute( "create index {0}_iri_xy_r on precip.{0}_iri_xy(tile)".format(ptable)) db.commit() cur.close() db.close() def _getForcings(e, dbname, ptable, rtables, name, dt0, dt1): """Extract meteorological forcings for ensemble member.""" db = dbio.connect(dbname) cur = db.cursor() data = {} for v in ['precip', 'tmax', 'tmin', 'wind']: temptable = ''.join(random.SystemRandom().choice( string.ascii_letters) for _ in range(8)) sql = "create table {7} as (with f as (select gid,st_worldtorastercoordx(rast,geom) as xf,st_worldtorastercoordy(rast,geom) as yf,rid as ftile from {6}.{0},{1}.basin where fdate=date'{2}-{3}-{4}' and st_intersects(rast,geom)) select c.gid,xf,yf,x,y,ftile as tile from f inner join precip.{5}_iri_xy as c on c.gid=f.gid)".format( rtables[v], name, dt0.year, dt0.month, dt0.day, ptable, v, temptable) cur.execute(sql) db.commit() cur.execute("create index {0}_r on {0}(tile)".format(temptable)) db.commit() sql = "select gid,fdate,st_value(rast,xf,yf) from {6}.{0},{7} as xy inner join iri_years as i on xy.x=i.x and xy.y=i.y where ens={2} and rid=tile and fdate>=date(concat_ws('-',yr,'{3}-{4}')) and fdate<=(date(concat_ws('-',yr,'{3}-{4}'))+interval'{5} days') order by gid,fdate".format( rtables[v], ptable, e + 1, dt0.month, dt0.day, (dt1 - dt0).days, v, temptable) cur.execute(sql) data[v] = cur.fetchall() cur.execute("drop table {0}".format(temptable)) db.commit() cur.close() db.close() return data def generate(options, models): """Generate meteorological forecast forcings by resampling fine-scale climatology.""" log = logging.getLogger(__name__) options['vic']['tmax'] = options['vic']['temperature'] options['vic']['tmin'] = options['vic']['temperature'] leadtime = 3 db = dbio.connect(models.dbname) cur = db.cursor() name = models.name dt0 = date(models.startyear, models.startmonth, models.startday) dt1 = date(models.endyear, models.endmonth, models.endday) dtf = dt0 - relativedelta(months=1) # forecast initialization date months = [(dt0 + relativedelta(months=t)).month for t in range(leadtime)] # check if forecast date exists in IRI data sql = "select count(*) from precip.iri where fdate=date '{0}-{1}-{2}'".format( dtf.year, dtf.month, dtf.day) cur.execute(sql) if bool(cur.rowcount): ptable = options['vic']['precip'] # find resampled raster tables rtables = _getResampledTables(models.dbname, options, models.res) # resample climatology to IRI spatial resolution as a table _resampleClimatology(models.dbname, ptable, name, dt0) # calculate the annual accumulated precipitation using only the months # within the forecast period _deleteTableIfExists(models.dbname, 'public', 'iri_psum') sql = "create table iri_psum as (with f as (select distinct x,y,tile from precip.{0}_iri_xy) select x,y,date_part('year',fdate) as yr,sum(st_value(rast,x,y)) as psum,row_number() over (partition by x,y) as rid from f,precip.{0}_iri where rid=tile and ({1}) group by x,y,yr order by x,y,psum)".format( ptable, " or ".join(["date_part('month',fdate)={0}".format(m) for m in months])) cur.execute(sql) db.commit() # retrieve probabilities from IRI seasonal forecast _deleteTableIfExists(models.dbname, 'public', 'iri_probs') sql = "create table iri_probs as (with f as (select x,y,st_pixelaspoint(rast,x,y) as geom from precip.{0}_iri_xy,precip.{0}_iri where rid=tile and fdate=date'{1}-{2}-{3}') select x,y,st_value(rast,geom) as prob,tercile,leadtime from f,precip.iri where fdate=date'{1}-{2}-{3}')".format( ptable, dt0.year, dt0.month, dt0.day) # ptable, dt0.year, dt0.month, dt0.day, dtf.year, dtf.month, dtf.day) cur.execute(sql) cur.execute("alter table iri_probs add column pg int") for ti, t in enumerate(['below', 'normal', 'above']): cur.execute( "update iri_probs set pg={0} where tercile='{1}'".format(ti + 1, t)) db.commit() # get number of years in climatology cur.execute("select count(distinct(yr)) from iri_psum") nyears = int(cur.fetchone()[0]) # assign probability weights to each year # FIXME: It seems like the IRI NetCDFs have null values for lead times # > 1 month. Just using lead time of 1 month for now _deleteTableIfExists(models.dbname, 'public', 'iri_pw') sql = "create table iri_pw as (with s as (select x,y,yr,psum,rid/({0}/3+1)+1 as pg from iri_psum) select s.x,s.y,s.yr,psum,1.0/{1}*prob/100.0 as weight from s inner join iri_probs as p on p.x=s.x and p.y=s.y and s.pg=p.pg where leadtime=1)".format( nyears, nyears / 3.0) cur.execute(sql) db.commit() # sample years based on probability weights _deleteTableIfExists(models.dbname, 'public', 'iri_years') sql = "create table iri_years as (with f as (select x,y,yr,sum(weight) over (partition by x,y order by psum) as w1, sum(weight) over (partition by x,y order by psum) - weight as w2 from iri_pw), r as (select n as ens,random() as s from generate_series(1,{0}) as x(n)) select x,y,yr,ens from f,r where s>=w2 and s<w1)".format( models.nens) cur.execute(sql) db.commit() # retrieve and write forcing data for e in range(models.nens): data = _getForcings(e, models.dbname, ptable, rtables, name, dt0, dt1) models[e].writeForcings(data['precip'], data['tmax'], data[ 'tmin'], data['wind']) else: log.warning("IRI forecast was not issued for requested date {0}.".format(dt0)) # Clean-up temporary tables cur.execute("drop table precip.{0}_iri_xy".format(ptable)) cur.execute("drop table iri_psum") cur.execute("drop table iri_probs") cur.execute("drop table iri_years") cur.close() db.close()
nasa/RHEAS
src/datasets/iri.py
Python
mit
11,845
[ "NetCDF" ]
b43295adac57da3f8aa6ff3804e8d088c8a0de6182859c8973b7c4516aa792e1
import os import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import ImageGrid import random import logging logger = logging.getLogger() import time import pprint from astroML.stats import binned_statistic_2d import brewer2mpl import ResolvedStellarPops as rsp import ResolvedStellarPops.convertz as convertz from TPAGBparams import table_src, snap_src import galaxy_tests import model_plots angst_data = rsp.angst_tables.AngstTables() import itertools import tables color_scheme = ['#d73027', '#fc8d59', '#fee090', '#669966', '#e0f3f8', '#4575b4'] def get_color_cut(filter1): ''' see snap_src + tables/color_cuts.dat ''' color_dict = {'F606W': 0.2, 'F475W': 0.3, 'F110W': 0.1} return color_dict[filter1] def get_filter1(target, fits_src=None): '''get optical filter1''' fits_src = fits_src or snap_src + '/data/angst_no_trim' fitsfile, = rsp.fileIO.get_files(fits_src, '*%s*.fits' % target.lower()) filter1 = fitsfile.split(target)[1].split('_')[1].split('-')[0].upper() return filter1 def default_output_location(target, extra_directory=None, mc=False): outfile_loc = os.path.join(snap_src, 'models', 'varysfh', target) if extra_directory is not None: outfile_loc = os.path.join(outfile_loc, extra_directory) if mc is True: outfile_loc += '/mc' outfile_loc += '/' rsp.fileIO.ensure_dir(outfile_loc) return outfile_loc def default_agb_filepath(cmd_input_file, extra_directory='default'): agb_mod = None agb_model = cmd_input_file.replace('cmd_input_', '').lower() if extra_directory == 'default': agb_mod = agb_model.split('.')[0] return agb_mod def setup_files(cmd_input_file, target, outfile_loc='default', extra_str='', mc=True): agb_mod = default_agb_filepath(cmd_input_file) if outfile_loc == 'default': outfile_loc = \ default_output_location(target, extra_directory=agb_mod, mc=mc) names = ['opt_lf', 'ir_lf', 'narratio', 'opt_mass_met', 'ir_mass_met', 'contam'] name_fmt = '%s_%s_%s%s.dat' fnames = [os.path.join(outfile_loc, name_fmt % (agb_mod, target, f, extra_str)) for f in names] return outfile_loc, fnames, agb_mod def load_default_ancient_galaxies(table_file='default'): if table_file == 'default': table_file = os.path.join(table_src, 'ancients_0.1_0.2_galaxies.dat') elif table_file == 'comp_corr': table_file = os.path.join(table_src, 'ancients_0.1_0.2_comp_corr_galaxies.dat') else: logger.info('reading from table %s' % table_file) # Reads in the data as well as the mag offsets and factor in the # exclude region. ags = AncientGalaxies() ags.read_trgb_table(table_file) return ags def combine_list_of_dictionaries(dlist): result_dict = {} for dic in dlist: for key in dic.keys(): if not key in result_dict.keys(): result_dict[key] = [] result_dict[key].append(dic[key]) return result_dict def default_values_for_vsfh(target, cmd_input, vsfh_kw=None, match_sfh_src=None, match_sfh_file='default', galaxy_input_src='default', galaxy_input_search_fmt='*%s*dat'): vsfh_kw = vsfh_kw or {} if match_sfh_file == 'default': match_sfh_src = snap_src + '/data/sfh_parsec/' elif match_sfh_file == 'grid': # oldest version!! match_sfh_src = snap_src + '/data/sfh_parsec/match_grid/match_files/' if galaxy_input_src == 'default': galaxy_input_src = snap_src + '/input/' galaxy_input, = rsp.fileIO.get_files(galaxy_input_src, galaxy_input_search_fmt % target.upper()) vsfh_kw['sfh_file'], = rsp.fileIO.get_files(match_sfh_src, '%s*sfh' % target.replace('-deep', '')) vsfh_kw['galaxy_input'] = galaxy_input vsfh_kw['target'] = target vsfh_kw['cmd_input_file'] = cmd_input return vsfh_kw def prepare_vsfh_run(targets, cmd_input_files, nsfhs, mk_tri_sfh_kw=None, vary_sfh_kw=None, make_many_kw=None, vsfh_kw=None, table_file='default', dry_run=False, default_kw=None): ''' Run a number of SFH variations on a galaxy. If passed default to the args, will attempt to find the file based on the galaxy name. ARGS: galaxy_name: target name, ex: ddo71 (case doesn't matter) cmd_input_file: filename, ex: 'cmd_input_CAF09_S_OCT13.dat' match_sfh_file: 'default', the sfh file from match. match_fileorigin: 'match-grid', which type of sfh file 'match' or 'match-grid' galaxy_input_file: 'default', base input file for trilegal, will be copied. mk_tri_sfh_kw: A dict to be passed to VarySFH.make_trilegal_sfh default: random_sfh = True, random_z = False make_many_kw: A dict to be passed to VarySFH.prepare_trilegal_sfr default: nsfhs = 50, mk_tri_sfh_kw dict. vary_sfh_kw: A dict to be passed to VarySFH.vary_the_sfh default: diag_plots = True, make_many_kw dict RETURNS: VarySFHs class ''' # load and possibly overwrite make_many_kw from defaults # start the stream logger. The file logger is done target by target. global logger logger = logging.getLogger() logger.info('start of run: %s' % time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())) # create formatter and add it to the handlers formatter = \ '%(asctime)-15s %(levelname)s %(funcName)s %(lineno)d %(message)s' ch = logging.StreamHandler() ch.setLevel(logging.ERROR) ch.setFormatter(formatter) logger.addHandler(ch) logger.info('start of run: %s' % time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())) make_many_kw = make_many_kw or {} vary_sfh_kw = vary_sfh_kw or {} mk_tri_sfh_kw = mk_tri_sfh_kw or {} default_kw = default_kw or {} mk_tri_sfh_kw = dict({'dry_run': dry_run}.items() + mk_tri_sfh_kw.items()) make_many_kw = dict({'mk_tri_sfh_kw': mk_tri_sfh_kw}.items() + make_many_kw.items()) vary_sfh_kw = dict({'make_many_kw': make_many_kw}.items() + vary_sfh_kw.items()) vsfh_kw = vsfh_kw or {} vsfh_kw = dict({'file_origin': 'match-hmc', 'table_file': table_file, 'outfile_loc': 'default', 'nsfhs': nsfhs}.items() \ + vsfh_kw.items()) vsfh_kws = [] vSFHs = [] for target, cmd_input in itertools.product(targets, cmd_input_files): target = target.lower() #print target, cmd_input vsfh_kws.append(default_values_for_vsfh(target, cmd_input, vsfh_kw=vsfh_kw, **default_kw)) vSFHs.append(VarySFHs(**vsfh_kw)) return vSFHs, vary_sfh_kw def number_of_stars(gal=None, exclude_region='default', mag_below_trgb=2., galaxy_information=None, factor=2., comp_frac=False, indices=False, completeness_correction=False): ''' Count the number of rgb and agb stars (in F814W or F160W) ARGS: gal: rsp.Galaxies.galaxy instance galaxy_information: if no gal, dictonary with target, mag2, filter2, filter1 exclude_region: mags above and below the trgb to exclude from star counts [default: factor times the trgb mag error] mag_below_trgb: mags below the trgb to call rgb stars [2] factor: factor times the trgb mag error to exclude RETURNS: nrgb: int number of rgb stars nagb: int number of agb stars exclude_dict: dict of trgb, trgb_err, and factor Use galaxy_information if its a trilegal cmd or galaxy instance for data. There are two reasons to use exclude_region. 1) the TRGB mag is uncertain, and when counting agb stars, there should be no chance of scattering up rgb stars. 2) TPAGB stars can be below the TRGB. TO DO: Adapt for recent star formation: Add color information or verts to the stars in region call. Will need a robust contamination code. ''' # use with either data or simulations galaxy_information = galaxy_information or {} if gal is None: target = galaxy_information.get('target') mag2 = galaxy_information.get('mag2') mag1 = galaxy_information.get('mag1') filter2 = galaxy_information.get('filter2') filter1 = galaxy_information.get('filter1') else: target = gal.target mag2 = gal.mag2 mag1 = gal.mag1 filter2 = gal.filter2 filter1 = gal.filter1 if filter2 == 'F814W': extra_key = '%s,%s' % (filter1, filter2) trgb = angst_data.get_item(target, 'mTRGB', extra_key) trgb_err = angst_data.get_item(target, 'mTRGB_err', extra_key=extra_key) band = 'opt' if gal.filter2 == 'F160W': trgb = rsp.fileIO.item_from_row(angst_data.snap_tab3, 'target', target.upper(), 'mTRGB_F160W') trgb_err = rsp.fileIO.item_from_row(angst_data.snap_tab3, 'target', target.upper(), 'mTRGB_F160W_err') band = 'ir' if exclude_region == 'default': exclude_region = trgb_err * factor else: # overwrite factor and use the exclude region as the mag above # and below. Then trgb_err must be set to 1 because it is always # multiplied by factor when used. trgb_err = 1. factor = exclude_region if comp_frac is False: offset = trgb + mag_below_trgb else: offset = mag_below_trgb # rgb will go from mag_below_trgb to trgb + exclude_region color_cut, = np.nonzero((mag1-mag2) > get_color_cut(filter1)) nrgb = rsp.math_utils.between(mag2[color_cut], offset, trgb + exclude_region) # agb will go from trgb - exclude_region to very bright nagb = rsp.math_utils.between(mag2[color_cut], trgb - exclude_region, -99.) if completeness_correction is True: ast_dict = tables.read_completeness_corrections(filename='high_res_completeness_corrections.dat') key = '%s_bins' % band rgb_low = offset rgb_high = trgb + exclude_region irgb_low = np.argmin(abs(ast_dict[target][key] - rgb_low)) irgb_high = np.argmin(abs(ast_dict[target][key] - rgb_high)) agb_low = trgb - exclude_region iagb_low = np.argmin(abs(ast_dict[target][key] - agb_low)) iagb_high = 0 hist, bins = np.histogram(gal.mag2, bins=ast_dict[target][key]) check = np.sum(hist[irgb_high:irgb_low]) if check != len(nrgb): print 'hist method is off for %s rgb by %i' % (target, check-len(nrgb)) check = np.sum(hist[iagb_high:iagb_low]) if check != len(nagb): print 'hist method is off for %s agb by %i' % (target, check-len(nagb)) corr_hist = hist * 1./ast_dict[target]['%s_correction' % band][:-1] nrgb = np.round(np.sum(corr_hist[irgb_high:irgb_low])) nagb = np.round(np.sum(corr_hist[iagb_high:iagb_low])) # add trgb_err to galaxy instance, could also add factor.. if gal is not None: gal.trgb_err = trgb_err exclude_dict = {'trgb': trgb, 'trgb_err': trgb_err, 'factor': factor} if indices is False and completeness_correction is False: nrgb = len(nrgb) nagb = len(nagb) return nrgb, nagb, exclude_dict def convert_match_to_trilegal_sfh(sfr_dir, fileorigin='match-hmc', search_str='*.sfh', make_trilegal_sfh_kw=None): ''' call StarFormationHistories.make_trilegal_sfh for a number of files in a directory. ARGS: sfr_dir base location of match sfr files fileorigin 'match' sfr fileorigin, match, match-grid, match-hmc search_str '*.sfh' extenstion of match sfh file make_trilegal_sfh_kw {'random_sfr': False, 'random_z': False} kwargs to send to make_trilegal_sfh RETURNS: list of trilegal SFR files written ''' # make_trilegal_sfh kwargs will set both randoms to False by default make_trilegal_sfh_kw = make_trilegal_sfh_kw or {} make_trilegal_sfh_kw = dict({'random_sfr': False, 'random_z': False}.items() + make_trilegal_sfh_kw.items()) # find match sfh files` sfhs = rsp.fileIO.get_files(sfr_dir, search_str) # load SFHs SFHs = [StarFormationHistories(s, fileorigin) for s in sfhs] # write trilegal sfh tri_sfhs = [S.make_trilegal_sfh(**make_trilegal_sfh_kw) for S in SFHs] return tri_sfhs class AncientGalaxies(object): ''' This class is to set up a run for VarySFHs. write_trgb_table: writes a table with trgb and the number of agb stars and rgb stars (using number_of_stars). number_of_stars: finds the number of rgb and agb stars in the data. read_trgb_table: reads the table write_trgb_table writes into recarray ''' def __init__(self): pass def write_trgb_table(self, targets='ancients', mag_below_trgb=[2, 1.5], exclude_region='default', comp_table='default', completeness_correction=False): ''' write the trgb, trgb_err, and the number of stars (opt, ir) rgb and agb ''' if targets == 'ancients': if type(exclude_region) is list: ex_str = '_'.join((str(f) for f in exclude_region)) else: ex_str = str(exclude_region) if completeness_correction is True: ex_str += '_comp_corr' tstring = '%s_%s' % (targets, ex_str) targets = galaxy_tests.ancients() else: logger.error('need to write how to read other targets') comp_frac = False if mag_below_trgb == 'comp_frac': logger.info('using completeness limit for rgb norm mag.') comp_frac = True comp_data = tables.read_completeness_table(table=comp_table) fits_srcs = [snap_src + '/data/angst_no_trim', 'default'] factors = [] gal_dict = {} for i, band in enumerate(['opt', 'ir']): gals = rsp.Galaxies.galaxies([galaxy_tests.load_galaxy(t, band=band, fits_src=fits_srcs[i]) for t in targets]) if type(exclude_region) is list: exreg = exclude_region[i] else: exreg = exclude_region for gal in gals.galaxies: if 'ngc404-deep' == gal.target: gal.target = 'ngc404' else: gal.target = gal.target.lower() if comp_frac is True: mag_below_trgb = rsp.fileIO.item_from_row(comp_data, 'target', gal.target, '%s_filter2' % band) else: mag_below_trgb = mag_below_trgb[i] nrgb, nagb, ex_dict = number_of_stars(gal, comp_frac=comp_frac, mag_below_trgb=mag_below_trgb, exclude_region=exreg, completeness_correction=completeness_correction) mag_max = gal.mag2.max() mag_min = gal.mag2.min() nbins = np.sqrt(len(gal.mag2)) gdict = {'%s_trgb' % band: gal.trgb, '%s_trgb_err' % band: ex_dict['trgb_err'], 'n%s_rgb' % band: nrgb, 'n%s_agb' % band: nagb, 'n%s_bins' % band: nbins, '%s_max' % band: mag_max, '%s_min' % band: mag_min} try: gal_dict[gal.target].update(gdict) except KeyError: gal_dict[gal.target] = gdict factors.append(ex_dict['factor']) fmt = '%(target)s ' fmt += '%(opt_trgb).2f %(opt_trgb_err).2f %(nopt_rgb)i %(nopt_agb)i ' fmt += '%(ir_trgb).2f %(ir_trgb_err).2f %(nir_rgb)i %(nir_agb)i ' fmt += '%(opt_max).2f %(ir_max).2f %(opt_min).2f %(ir_min).2f %(nopt_bins)i %(nir_bins)i \n' outfile = os.path.join(table_src, '%s_galaxies.dat' % tstring) if comp_frac is False: header = '# mags below trgb: optical=%.2f nir=%.2f \n' % (mag_below_trgb[0], mag_below_trgb[1]) else: header = '# mags below trgb set by completeness fraction. \n' if len(np.unique(factors)) > 1: header += '# excluded trgb region: %.2f * opt trgb mag_err \n' % factors[0] header += '# excluded trgb region: %.2f * ir trgb mag_err \n' % factors[1] else: header += '# excluded trgb region: %.2f * trgb mag_err \n' % factors[0] header += '# '+ fmt.replace('%(', '').replace(').2f', '').replace(')i', '').replace(')s', '') with open(outfile, 'w') as out: out.write(header) for k, v in gal_dict.items(): gal_dict[k]['target'] = k out.write(fmt % gal_dict[k]) logger.info('wrote %s' % outfile) return outfile def read_trgb_table(self, table_name): with open(table_name, 'r') as tf: col_keys = tf.readlines()[3].replace('#','').strip().split() dtype = [(c, '<f8') for c in col_keys] dtype[0] = ('target', '|S16') data = np.genfromtxt(table_name, dtype=dtype) self.data = data.view(np.recarray) # read the exclude factor around trgb_err with open(table_name, 'r') as f: lines = f.readlines() eline = [l for l in lines if 'excluded' in l] mline, = [l for l in lines if 'mags' in l] if len(eline) > 1: self.factor = np.array([el.split(':')[1].split('*')[0] for el in eline], dtype=float) else: factor = eline.split(':')[1].split('*')[0] self.factor = np.array([factor, factor], dtype=float) # read the mag_below offsets for each filter if 'completeness' in mline: self.offsets = [] else: opt_off = float(mline.split('=')[1].split('nir')[0]) nir_off = float(mline.strip().split('=')[-1]) self.offsets = [opt_off, nir_off] class StarFormationHistories(object): '''Make TRILEGAL star formation history files from MATCH''' def __init__(self, sfh_file, file_origin, sfr_files=None, sfr_file_loc=None, sfr_file_search_fmt=None): self.base, self.name = os.path.split(sfh_file) self.data = tables.parse_sfh_data(sfh_file, file_origin) self.file_origin = file_origin self.sfr_files = sfr_files if sfr_file_loc is not None: if sfr_file_search_fmt is not None: self.sfr_files = rsp.fileIO.get_files(sfr_file_loc, sfr_file_search_fmt) def random_draw_within_uncertainty(self, attr, npoints=2e5): ''' randomly draw values within uncertainty for an array ARGS: attr: string name of the array that also has attr_errm and attr_errp (p and m are important due to the sign). attr_errm: - err associated with each point on attr array attr_errp: same as attr_errm but the + err npoints: number of points to populate gaussian to sample RETURNS: array of values randomly picked within the uncertainties If errm and errp are equal, just returns a randomly chosen point (of npoints) of a gaussian with mean attr and sigma=attr_errm If not, will stick to gaussians together at attr using sigma=attr_errm and sigm=attr_errp and returning a random value from there. If one of the err values is zero, will just use the other half of the gaussian. If they are both zero, well, just returns attr. ''' if attr == 'mh': logger.warning('this method was designed for sfr, not [M/H]') # load in values this way in case I want to move this to its own # function if hasattr(self.data, attr): val_arr = self.data.__getattribute__(attr) errm_arr = self.data.__getattribute__('%s_errm' % attr) errp_arr = self.data.__getattribute__('%s_errp' % attr) else: val_arr = self.__getattribute__(attr) errm_arr = self.__getattribute__('%s_errm' % attr) errp_arr = self.__getattribute__('%s_errp' % attr) rand_arr = np.array([]) # don't want negative sfr values. If not sfr, don't care. if attr == 'sfr': lowlim = 0 else: lowlim = -np.inf for val, errm, errp in zip(val_arr, errm_arr, errp_arr): if errp == errm and errp > 0: # even uncertainties, easy. new_arr = np.random.normal(val, errp, npoints) elif errp != 0 and errm != 0: # stitch two gaussians together pos_gauss = np.random.normal(val, errp, npoints) neg_gauss = np.random.normal(val, errm, npoints) new_arr = np.concatenate([pos_gauss[pos_gauss >= val], neg_gauss[neg_gauss <= val]]) elif errp == 0 and errm != 0: # no positive uncertainties neg_gauss = np.random.normal(val, errm, npoints) new_arr = neg_gauss[neg_gauss <= val] elif errp != 0 and errm == 0: # no negative uncertainties pos_gauss = np.random.normal(val, errp, npoints) new_arr = pos_gauss[pos_gauss >= val] else: # um.. no errors, why was this called logger.warning('no uncertainties') new_arr = np.ones(4) * val new_arr = new_arr[new_arr > lowlim] rand_arr = np.append(rand_arr, random.choice(new_arr)) return rand_arr def interp_null_values(self): ''' If there is no SF, there is still some +err in SF. However, M/H is not constrained so is set to 0. Here we fill in values of M/H by interpolating the entire M/H vs age, this should be used as mean value with vdisp to the be the sigma in the gaussian distribution. I think it's reasonable since this is really just finding the -zinc law that MATCH assumes. ''' somesf, = np.nonzero(self.data.sfr != 0) #ff = interp1d(self.data.lagei[somesf], self.data.mh[somesf], # bounds_error=False) _, mh_interp = rsp.math_utils.extrap1d(self.data.lagei[somesf], self.data.mh[somesf], self.data.lagei) self.mh_interp = mh_interp return mh_interp def make_trilegal_sfh(self, random_sfr=False, random_z=False, zdisp=True, outfile='default', dry_run=False): ''' turn binned sfh in to trilegal sfh random_sfr: calls random_draw_within_uncertainty random_z: ''' # In MATCH [M/H] = log(Z/Zsun) with Zsun = 0.02 (see MATCH's makemod.cpp) # It doesn't matter if this is "correct". Stellar models have absolute Z. # Zsun is just a scaling that needs to be undone from MATCH to here. zsun = 0.02 if self.file_origin == 'match-old': random_sfr = False random_z = False zdisp = False if outfile == 'default': outfile = os.path.join(self.base, self.name.replace('.sfh', '.tri.dat')) if dry_run is True: return outfile age1a = 10 ** (self.data.lagei) age1p = 1.0 * 10 ** (self.data.lagei + 0.0001) age2a = 1.0 * 10 ** self.data.lagef age2p = 1.0 * 10 ** (self.data.lagef + 0.0001) if random_sfr is False: sfr = self.data.sfr else: sfr = self.random_draw_within_uncertainty('sfr') if random_z is False: mh = self.data.mh else: # HACK. Not using mh errs from MATCH. Untrustworthy. # Shifting instead from within dispersion. self.interp_null_values() disp = np.median(self.data.mh_disp[np.nonzero(self.data.mh_disp)])/2. mh = self.mh_interp + np.random.normal(0, disp) metalicity = zsun * 10 ** mh if zdisp is True: zdisp = metalicity * np.median(self.data.mh_disp[np.nonzero(self.data.mh_disp)]) #zdisp = self.data.mh_disp fmt = '%.4e %.3e %.4f %.4f \n' else: zdisp = [''] * len(mh) fmt = '%.4e %.3e %.4f %s\n' with open(outfile, 'w') as out: for i in range(len(sfr)): if sfr[i] == 0: # this is just a waste of lines in TRILEGAL continue out.write(fmt % (age1a[i], 0.0, metalicity[i], zdisp[i])) out.write(fmt % (age1p[i], sfr[i], metalicity[i], zdisp[i])) out.write(fmt % (age2a[i], sfr[i], metalicity[i], zdisp[i])) out.write(fmt % (age2p[i], 0.0, metalicity[i], zdisp[i])) return outfile def load_random_arrays(self, attr_str): if 'sfr' in attr_str: col = 1 if 'mh' in attr_str or 'feh' in attr_str: col = 2 val_arrs = [np.genfromtxt(s, usecols=(col))[1::4] for s in self.sfr_files] if attr_str == 'mh': val_arrs = np.array([10**(val_arr/.2) for val_arr in val_arrs]) if attr_str == 'feh': val_arrs = np.array([convertz.convertz(feh=feh)[1] for feh in val_arrs]) return val_arrs def plot_sfh(self, attr_str, ax=None, outfile=None, yscale='linear', plot_random_arrays_kw=None, errorbar_kw=None, twoplots=False): ''' plot the data from the sfh file. ''' plot_random_arrays_kw = plot_random_arrays_kw or {} # set up errorbar plot errorbar_kw = errorbar_kw or {} errorbar_default = {'linestyle': 'steps-mid', 'lw': 3, 'color': 'darkred'} errorbar_kw = dict(errorbar_default.items() + errorbar_kw.items()) # load the plotting values and their errors, this could be generalized # and passed ... val_arr = self.data.__getattribute__(attr_str) errm_arr = self.data.__getattribute__('%s_errm' % attr_str) errp_arr = self.data.__getattribute__('%s_errp' % attr_str) if 'sfr' in attr_str: ylab = '${\\rm SFR\ (10^3\ M_\odot/yr)}$' val_arr *= 1e3 errm_arr *= 1e3 errp_arr *= 1e3 plot_random_arrays_kw['moffset'] = 1e3 elif 'm' in attr_str: ylab = '${\\rm [M/H]}$' elif 'fe' in attr_str: ylab = '${\\rm [Fe/H]}$' # mask 0 values so there is a vertical line on the plot val_arr[val_arr==0] = 1e-15 errm_arr[errm_arr==0] = 1e-15 errp_arr[errp_arr==0] = 1e-15 if twoplots is True: fig, axs = plt.subplots(figsize=(8, 8), nrows=2, sharex=True) else: fig, ax = plt.subplots(figsize=(8, 5)) axs = [ax] for ax in axs: if not 'm' in attr_str: ax.errorbar(self.data.lagei, val_arr, [errm_arr, errp_arr], zorder=100, **errorbar_kw) if len(plot_random_arrays_kw) > 0: # if loading the random arrays from files, need to give the # attribute to load. if plot_random_arrays_kw['from_files'] is True: plot_random_arrays_kw['attr_str'] = attr_str self.plot_random_arrays(ax=ax, **plot_random_arrays_kw) ax.set_ylabel(ylab, fontsize=20) ax.set_xlim(7.9, 10.13) target = self.name.split('.')[0].upper().replace('-', '\!-\!') if twoplots is True: ax = axs[1] # lower plot limit doesn't need to be 1e-15... axs[0].set_ylim(1e-7, axs[0].get_ylim()[1]) axs[0].set_yscale('log') fig.subplots_adjust(hspace=0.09) else: fig.subplots_adjust(bottom=0.15) ax.annotate('$%s$' % target, (0.02, 0.97), va='top', xycoords='axes fraction', fontsize=16) ax.set_xlabel('$\log {\\rm Age (yr)}$', fontsize=20) plt.tick_params(labelsize=16) if outfile is not None: plt.savefig(outfile, dpi=150) return ax def plot_random_arrays(self, ax=None, val_arrs=None, from_files=False, attr_str=None, yscale='linear', moffset=1.): ''' val_arrs are random after making a bunch of arrays that sample the sfr or mh uncertainties plot up where they are. ''' if from_files is True: val_arrs = self.load_random_arrays(attr_str) assert val_arrs is not None, 'either specify val_arrs or set from_files' if ax is None: fig, ax = plt.subplots(figsize=(12,12)) [ax.errorbar(self.data.lagei, val_arrs[i] * moffset, linestyle='steps-mid', color='k', alpha=0.2) for i in range(len(val_arrs))] ax.set_yscale('linear') return ax def make_many_trilegal_sfhs(self, nsfhs=100, mk_tri_sfh_kw=None): ''' make nsfhs number of trilegal sfh input files. ''' mk_tri_sfh_kw = mk_tri_sfh_kw or {} if nsfhs > 1: self.mc = True outfile_fmt = mk_tri_sfh_kw.get('outfile_fmt', 'default') if outfile_fmt == 'default': outfile_fmt = self.name.replace('.sfh', '%03d.tri.dat') # need to iterate outside of dict.. see below. try: del mk_tri_sfh_kw['outfile_fmt'] except: pass # update any passed kw to make_trilegal_sfh with defaults. mk_tri_sfh_kw = dict({'random_sfr': True, 'random_z': False}.items() + mk_tri_sfh_kw.items()) outfiles = [self.make_trilegal_sfh(outfile=outfile_fmt % i, **mk_tri_sfh_kw) for i in range(nsfhs)] return outfiles def compare_tri_match(self, trilegal_catalog, filter1, filter2, outfig=None): ''' Two plots, one M/H vs Age for match and trilegal, the other sfr for match vs age and number of stars of a given age for trilegal. ''' sgal = rsp.Galaxies.simgalaxy(trilegal_catalog, filter1=filter1, filter2=filter2) sgal.lage = sgal.data.get_col('logAge') sgal.mh = sgal.data.get_col('[M/H]') issfr, = np.nonzero(self.sfr > 0) age_bins = np.digitize(sgal.lage, self.lagef[issfr]) mean_mh= [np.mean(sgal.mh[age_bins==i]) for i in range(len(issfr))] bins = self.lagei sfr = np.array(np.histogram(sgal.lage, bins=bins)[0], dtype=float) fig, (ax1, ax2) = plt.subplots(figsize=(8,8), ncols=2, sharex=True) # should be density, weighted by number anyway.. ax1.plot(sgal.lage, sgal.mh, '.', color='grey') ax1.plot(self.lagei[issfr], mean_mh, linestyle='steps', color='navy', lw=3, label='TRILEGAL') ax1.plot(self.lagei[issfr], self.mh[issfr], linestyle='steps', lw=3, color='k', label='MATCH') ax1.fill_between(self.lagei[issfr], self.mh[issfr] + self.mh_disp[issfr], self.mh[issfr] - self.mh_disp[issfr], lw=2, color='red', alpha=0.2) ax1.set_ylabel('$[M/H]$', fontsize=20) ax1.set_xlabel('$\log {\\rm Age (yr)}$', fontsize=20) ax1.legend(loc=0, frameon=False) ax2.plot(bins[:-1], sfr/(np.sum(sfr)), linestyle='steps', color='navy', lw=3, label='TRILEGAL') ax2.plot(self.lagei, self.sfr/np.sum(self.sfr), linestyle='steps', lw=2, color='k', label='MATCH') ax2.set_ylabel('$ {\propto \\rm SFR}$', fontsize=20) ax2.set_xlabel('$\log {\\rm Age (yr)}$', fontsize=20) ax2.legend(loc=0, frameon=False) ax2.set_xlim(8, 10.5) if outfig is not None: fig.savefig(outfig, dpi=150) class FileIO(object): def __init__(self): pass def check_target(self, target): if target is None: assert hasattr(self, 'target'), \ 'need to pass target or have attribute' else: self.target = target return def load_data_for_normalization(self, target=None, ags=None): self.check_target(target) '''load the numbers of data rgb and agb stars from self.ags''' if ags is None: ags = self.ags if len(ags.offsets) == 0: self.comp_data = tables.read_completeness_table() column_names = ags.data.dtype.names if '404' in self.target: target = self.target.replace('-deep', '') else: target = self.target row, = np.nonzero(ags.data['target'] == target) try: [self.__setattr__('%s' % c, ags.data[row]['%s' % c][0]) for c in column_names if c != 'target'] except IndexError: '%s not found in data table' % target if len(ags.offsets) == 0: self.ir_offset = rsp.fileIO.item_from_row(self.comp_data, 'target', target.upper(), 'ir_filter2') self.opt_offset = rsp.fileIO.item_from_row(self.comp_data, 'target', target.upper(), 'opt_filter2') else: self.ir_offset = self.ir_trgb + ags.offsets[1] self.opt_offset = self.opt_trgb + ags.offsets[0] self.opt_bins = np.arange(self.opt_min, self.opt_max, 0.1) self.ir_bins = np.arange(self.ir_min, self.ir_max, 0.1) return self.nopt_rgb, self.nir_rgb def load_lf_file(self, lf_file): with open(lf_file, 'r') as lff: lines = [l.strip() for l in lff.readlines() if not l.startswith('#')] hists = [np.array(l.split(), dtype=float) for l in lines[0::2]] binss = [np.array(l.split(), dtype=float) for l in lines[1::2]] return hists, binss def load_galaxies(self, hist_it_up=True, target=None, ags=None, color_cut=False, completeness_correction=False): self.check_target(target) if not hasattr(self, 'opt_bins'): self.load_data_for_normalization(ags=ags) ir_gal = galaxy_tests.load_galaxy(self.target, band='ir') fits_src = snap_src + '/data/angst_no_trim' opt_gal = galaxy_tests.load_galaxy(self.target, band='opt', fits_src=fits_src) # make galaxy histograms if color_cut is True: filter1 = get_filter1(target) opt_color_inds = np.nonzero(opt_gal.color > get_color_cut(filter1)) ir_color_inds = np.nonzero(ir_gal.color > get_color_cut('F110W')) opt_gal.color_cut = opt_color_inds ir_gal.color_cut = ir_color_inds if hist_it_up is True: opt_gal.hist, opt_gal.bins = galaxy_tests.hist_it_up(opt_gal.mag2, threash=5) ir_gal.hist, ir_gal.bins = galaxy_tests.hist_it_up(ir_gal.mag2, threash=5) else: #nbins = (np.max(opt_gal.mag2) - np.min(opt_gal.mag2)) / 0.1 if color_cut is False: opt_gal.hist, opt_gal.bins = np.histogram(opt_gal.mag2, bins=self.opt_bins) ir_gal.hist, ir_gal.bins = np.histogram(ir_gal.mag2, bins=self.ir_bins) else: opt_gal.hist, opt_gal.bins = \ np.histogram(opt_gal.mag2[opt_color_inds], bins=self.opt_bins) ir_gal.hist, ir_gal.bins = \ np.histogram(ir_gal.mag2[ir_color_inds], bins=self.ir_bins) if completeness_correction is True: ast_dict = tables.read_completeness_corrections() opt_correction = ast_dict[target]['opt_correction'][:-1] ir_correction = ast_dict[target]['ir_correction'][:-1] opt_gal.hist *= 1. / opt_correction ir_gal.hist *= 1. / ir_correction return opt_gal, ir_gal def load_trilegal_data(self): '''load trilegal F814W and F160W mags''' if hasattr(self, 'target'): filter1 = get_filter1(self.target) else: print 'help, I need filter1!!' opt_mag = self.sgal.data.get_col('F814W') ir_mag = self.sgal.data.get_col('F160W') opt_mag1 = self.sgal.mag1 ir_mag1 = self.sgal.data.get_col('F110W') opt_color_cut, = \ np.nonzero((opt_mag1 - opt_mag) > get_color_cut(filter1)) ir_color_cut, = \ np.nonzero((ir_mag1 - ir_mag) > get_color_cut('F110W')) self.opt_color_cut = opt_color_cut self.ir_color_cut = ir_color_cut self.shift_mags(opt_inds=opt_color_cut, ir_inds=ir_color_cut) return def read_trilegal_catalog(self, trilegal_output, filter1='F606W'): '''read the trilegal cat mag1 and mag2 are optical.''' self.sgal = rsp.Galaxies.simgalaxy(trilegal_output, filter1=filter1, filter2='F814W') return def shift_mags(self, opt_inds=None, ir_inds=None): '''shift mags to they agree with opt trgb''' opt_mag = self.sgal.data.get_col('F814W') ir_mag = self.sgal.data.get_col('F160W') if opt_inds is None: opt_inds = np.arange(len(opt_mag)) if ir_inds is None: ir_inds = np.arange(len(ir_mag)) # Threshold is set at 100 rgb stars in a bin. rgb_thresh = 5. self.sgal.all_stages('RGB') rgb_inds = np.intersect1d(self.sgal.irgb, opt_inds) rgb_bins = np.arange(10, 30, 0.01) rgb_hist, _ = np.histogram(opt_mag[rgb_inds], bins=rgb_bins) rgb_bin_edge = np.nonzero(rgb_hist > rgb_thresh)[0][0] - 1 opt_offset = rgb_bins[rgb_bin_edge] - self.opt_trgb rgb_inds = np.intersect1d(self.sgal.irgb, ir_inds) rgb_hist, _ = np.histogram(ir_mag[rgb_inds], bins=rgb_bins) rgb_bin_edge = np.nonzero(rgb_hist > rgb_thresh)[0][0] - 1 ir_offset = rgb_bins[rgb_bin_edge] - self.ir_trgb # HERE's A HACK FOR NO OFFSETS!!! #ir_offset = 0. #opt_offset = 0. logger.debug('IR OFFSET: %f' % ir_offset) logger.debug('OPT OFFSET: %f' % opt_offset) self.ir_moffset = ir_offset self.opt_moffset = opt_offset print ir_offset print opt_offset self.opt_mag = opt_mag[opt_inds] - opt_offset self.ir_mag = ir_mag[ir_inds] - ir_offset return class VarySFHs(StarFormationHistories, AncientGalaxies, FileIO): ''' run several variations of the age sfr z from MATCH SFH to produce simulated CMDs, LFs, and nagb/nrgb ratios. can make plots, and save summary files. First, use AncientGalaxies to create a table of trgb and nrgb and nagb stars from data and pass this as the table_file. NOTE: AncientGalaxies and just takes all the stars within some mag below trgb. Something better is needed for recent SF. The main method is vary_the_SFH, which sets up the files, calls StarFormationHistories and calls the other methods to write out the LFs and ratios. ''' def __init__(self, galaxy_input, sfh_file, file_origin, cmd_input_file, nsfhs, outfile_loc='default', filter1=None, extra_str='', target=None, table_file='default', just_once=False): ''' galaxy_input is a template. ''' # load SFH instance to make lots of trilegal runs if just_once is False: StarFormationHistories.__init__(self, sfh_file, file_origin) # add information to self, in other words, save # to use in plotting and stats later if target is None: gname = os.path.split(galaxy_input)[1] target = gname.split('_')[1].replace('.dat', '').lower() self.target = target self.filter1 = get_filter1(self.target) self.galaxy_input = galaxy_input self.cmd_input_file = cmd_input_file self.nsfhs = nsfhs self.mc = False if self.nsfhs > 1: self.mc = True self.extra_str = extra_str # exclude regions and the number of data rgb and agb stars self.ags = load_default_ancient_galaxies(table_file=table_file) self.load_data_for_normalization() # 90% (or whatever) completeness magnitudes if len(self.ags.offsets) == 0: self.comp_data = tables.read_completeness_table() # setup the locations all the files to write and read from self.outfile_loc, self.fnames, self.agb_mod = \ setup_files(cmd_input_file, outfile_loc=outfile_loc, extra_str=extra_str, mc=self.mc, target=self.target) # header files are needed in two cases # nagb/nrgb ratio file self.narratio_header = '# target nopt_rgb nopt_agb nir_rgb nir_agb ' self.narratio_header += 'opt_ar_ratio ir_ar_ratio opt_ar_ratio_err ' self.narratio_header += 'ir_ar_ratio_err \n' # contamination of phases in rgb and agb region file # (changing the self.regions will disrupt calculation of # rheb_eagb_contamination -- see contamination_by_phases code) self.regions = ['MS', 'RGB', 'HEB', 'BHEB', 'RHEB', 'EAGB', 'TPAGB'] self.contam_header = '# %s %s \n' % (' '.join(self.regions),'Total') def prepare_trilegal_sfr(self, make_many_kw=None): '''call make_many_trilegal_sfhs''' make_many_kw = make_many_kw or {} self.sfr_files = self.make_many_trilegal_sfhs(**make_many_kw) def prepare_galaxy_input(self, object_mass=None, dry_run=False): ''' write the galaxy input file from a previously written template. simply overwrites the filename line to link to the new sfr file. ''' self.galaxy_inputs = [] lines = open(self.galaxy_input).readlines() # line that links to sfr file. extra = ' '.join(lines[-3].split(' ')[1:]) if object_mass is not None: extra2 = ' '.join(lines[-6].split()[1:]) for i in range(len(self.sfr_files)): lines[-3] = ' '.join([self.sfr_files[i], extra]) if object_mass is not None: lines[-6] = ' '.join(['%.4e' % object_mass, extra2]) new_name = \ os.path.split(self.galaxy_input)[1].replace('.dat', '_%003i.dat' % i) new_out = os.path.join(self.outfile_loc, new_name) if dry_run is False: with open(new_out, 'w') as f: f.write(''.join(lines)) logger.info('wrote %s' % new_out) self.galaxy_inputs.append(new_out) def gather_results(self, mass_met=True, tpagb_lf=True, narratio_dict=None): '''gather results into strings or lists of strings for writing.''' result_dict = {} if tpagb_lf is True: # load mags if not hasattr(self, 'opt_mag'): self.load_trilegal_data() opt_hist, opt_bins = np.histogram(self.opt_mag, bins=self.opt_bins) ir_hist, ir_bins = np.histogram(self.ir_mag, bins=self.ir_bins) # scale the simulated LF to match the data LF opt_hist = np.array(opt_hist, dtype=float) * self.opt_norm ir_hist = np.array(ir_hist, dtype=float) * self.ir_norm result_dict['opt_lf_line'] = \ '\n'.join([' '.join(['%g' % t for t in opt_hist]), ' '.join(['%g' % t for t in opt_bins[1:]])]) result_dict['ir_lf_line'] = \ '\n'.join([' '.join(['%g' % t for t in ir_hist]), ' '.join(['%g' % t for t in ir_bins[1:]])]) if mass_met is True: self.sgal.all_stages('TPAGB') opt_inds = self.sgal.itpagb ir_inds = self.sgal.itpagb mag2 = self.sgal.mag2 mag4 = self.sgal.data.get_col('F160W') opt_key = 'opt_mass_met_line' ir_key = 'ir_mass_met_line' for mag, inds, key in zip([mag2, mag4], [opt_inds, ir_inds], [opt_key, ir_key]): mass = self.sgal.data.get_col('m_ini')[inds] mag = mag[inds] mh = self.sgal.data.get_col('[M/H]')[inds] result_dict[key] = \ '\n'.join([' '.join(['%g' % t for t in mag]), ' '.join(['%g' % t for t in mass]), ' '.join(['%.3f' % t for t in mh])]) # N agb/rgb ratio file narratio_dict = narratio_dict or {} if len(narratio_dict) > 0: self.narratio_header = '# target nopt_rgb nopt_agb nir_rgb nir_agb ' self.narratio_header += 'opt_ar_ratio ir_ar_ratio opt_ar_ratio_err ' self.narratio_header += 'ir_ar_ratio_err \n' narratio_fmt = '%(target)s %(nopt_rgb)i %(nopt_agb)i %(nir_rgb)i ' narratio_fmt += '%(nir_agb)i %(opt_ar_ratio).3f %(ir_ar_ratio).3f ' narratio_fmt += '%(opt_ar_ratio_err).3f %(ir_ar_ratio_err).3f' opt_rgb = narratio_dict['opt_rgb'] opt_agb = narratio_dict['opt_agb'] ir_rgb = narratio_dict['ir_rgb'] ir_agb = narratio_dict['ir_agb'] nopt_rgb = float(len(opt_rgb)) nopt_agb = float(len(opt_agb)) nir_rgb = float(len(ir_rgb)) nir_agb = float(len(ir_agb)) out_dict = {'target': self.target, 'opt_ar_ratio': nopt_agb / nopt_rgb, 'ir_ar_ratio': nir_agb / nir_rgb, 'opt_ar_ratio_err': galaxy_tests.count_uncert_ratio(nopt_agb, nopt_rgb), 'ir_ar_ratio_err': galaxy_tests.count_uncert_ratio(nir_agb, nir_rgb), 'nopt_rgb': nopt_rgb, 'nopt_agb': nopt_agb, 'nir_rgb': nir_rgb, 'nir_agb': nir_agb} result_dict['narratio_line'] = narratio_fmt % out_dict return result_dict def do_normalization(self, filter1=None, trilegal_output=None, hist_it_up=False, debug=False): '''Do the normalization and save small part of outputs.''' if not hasattr(self, 'sgal') or self.mc is True: assert trilegal_output is not None, \ 'need sgal loaded or pass trilegal catalog file name' if filter1 is None: filter1 = self.filter1 self.read_trilegal_catalog(trilegal_output, filter1=filter1) self.load_trilegal_data() # select rgb and agb regions sopt_rgb, sir_rgb, sopt_agb, sir_agb = \ rgb_agb_regions(self.sgal, self.opt_offset, self.opt_trgb, self.opt_trgb_err, self.ags, self.ir_offset, self.ir_trgb, self.ir_trgb_err, self.opt_mag, self.ir_mag) # normalization self.opt_norm, self.ir_norm, opt_rgb, ir_rgb, opt_agb, ir_agb = \ normalize_simulation(self.opt_mag, self.ir_mag, self.nopt_rgb, self.nir_rgb, sopt_rgb, sir_rgb, sopt_agb, sir_agb) narratio_dict = {'opt_rgb': opt_rgb, 'opt_agb': opt_agb, 'ir_rgb': ir_rgb, 'ir_agb': ir_agb} result_dict = self.gather_results(narratio_dict=narratio_dict) if debug is True: return narratio_dict, (sopt_rgb, sopt_agb, sir_rgb, sir_agb), result_dict else: return (sopt_rgb, sopt_agb, sir_rgb, sir_agb), result_dict def vary_the_SFH(self, make_many_kw=None, hist_it_up=False): ''' make the sfhs, make the galaxy inputs, run trilegal. For no trilegal runs, set dry_run True. ''' make_many_kw = make_many_kw or {} if not 'mk_tri_sfh_kw' in make_many_kw.keys(): make_many_kw['mk_tri_sfh_kw'] = {} dry_run = make_many_kw['mk_tri_sfh_kw'].get('dry_run', False) new_fmt = self.target + '_tri_%003i.sfr' sfr_outfilefmt = os.path.join(self.outfile_loc, new_fmt) make_many_kw = dict({'nsfhs': self.nsfhs}.items() + \ make_many_kw.items()) make_many_kw['mk_tri_sfh_kw'] = \ dict({'outfile_fmt': sfr_outfilefmt}.items() + \ make_many_kw['mk_tri_sfh_kw'].items()) self.prepare_trilegal_sfr(make_many_kw=make_many_kw) self.prepare_galaxy_input(dry_run=dry_run) tname = os.path.join(self.outfile_loc, 'output_%s_%s' % (self.target, self.agb_mod)) trilegal_output_fmt = tname + '_%003i.dat' result_dicts = [] for galaxy_input in self.galaxy_inputs: num = int(galaxy_input.split('_')[-1].replace('.dat', '')) trilegal_output = trilegal_output_fmt % num rsp.TrilegalUtils.run_trilegal(self.cmd_input_file, galaxy_input, trilegal_output, rmfiles=False, dry_run=dry_run) norm_out, result_dict = \ self.do_normalization(filter1=self.filter1, trilegal_output=trilegal_output, hist_it_up=hist_it_up) #self.binary_contamination(opt_agb, ir_agb) result_dict['contam_line'] = self.contamination_by_phases(*norm_out) result_dicts.append(result_dict) if dry_run is False: lastnum = int(num) - 1 if os.path.isfile(trilegal_output_fmt % lastnum) is True: os.remove(trilegal_output_fmt % lastnum) result = combine_list_of_dictionaries(result_dicts) return result def write_results(self, res_dict): '''writes out the results to self.fnames (see __init__)''' for fname in self.fnames: with open(fname, 'a') as fh: fshort = fname.split(self.target)[-1][1:].replace('.dat', '').replace(self.extra_str, '') if 'narratio' in fname: fh.write(self.narratio_header) if 'contam' in fname: fh.write(self.contam_header) [fh.write('%s \n' % l)for l in res_dict['%s_line' % fshort]] return def contamination_by_phases(self, sopt_rgb, sopt_agb, sir_rgb, sir_agb, diag_plot=False): self.sgal.all_stages() indss = [self.sgal.__getattribute__('i%s' % r.lower()) for r in self.regions] line = '' contam_line = [] if diag_plot is True: fig, (axs) = plt.subplots(ncols=2) for i, (rgb, agb, inds) in enumerate(zip([sopt_rgb, sir_rgb], [sopt_agb, sir_agb], [self.opt_color_cut, self.ir_color_cut])): if i == 1: band = 'ir' mag = self.sgal.data.get_col('F160W')[inds] else: band = 'opt' mag = self.sgal.data.get_col('F814W')[inds] ncontam_rgb = [list(set(s) & set(inds) & set(rgb)) for s in indss] ncontam_agb = [list(set(s) & set(inds) & set(agb)) for s in indss] rheb_eagb_contam = len(ncontam_rgb[4]) + len(ncontam_rgb[5]) frac_rheb_eagb = float(rheb_eagb_contam) / \ float(np.sum([len(n) for n in ncontam_rgb])) heb_rgb_contam = len(ncontam_rgb[2]) frac_heb_rgb_contam = float(heb_rgb_contam) / \ float(np.sum([len(n) for n in ncontam_rgb])) mags = [mag[n] if len(n) > 0 else np.zeros(10) for n in ncontam_rgb] mms = np.concatenate(mags) ms, = np.nonzero(mms > 0) bins = np.linspace(np.min(mms[ms]), np.max(mms[ms]), 10) if diag_plot is True: [axs[i].hist(mags, bins=bins, alpha=0.5, stacked=True, label=self.regions)] nrgb_cont = np.array([len(n) for n in ncontam_rgb], dtype=int) nagb_cont = np.array([len(n) for n in ncontam_agb], dtype=int) line += 'rgb %s %s %i \n' % (band, ' '.join(map(str, nrgb_cont)), np.sum(nrgb_cont)) line += 'agb %s %s %i \n' % (band, ' '.join(map(str, nagb_cont)), np.sum(nagb_cont)) line += '# rgb eagb contamination: %i \n' % rheb_eagb_contam line += '# frac of total in rgb region: %.3f \n' % frac_rheb_eagb line += '# rc contamination: %i \n' % heb_rgb_contam line += '# frac of total in rgb region: %.3f \n' % \ frac_heb_rgb_contam logger.info(line) contam_line.append(line) if diag_plot is True: axs[0].legend(numpoints=1, loc=0) axs[0].set_title(self.target) plt.savefig('contamination_%s.png' % self.target, dpi=150) return line def binary_contamination(self, sopt_agb, sir_agb): '''fraction of binaries in trgb (not implemented)''' binaries, = np.nonzero(self.sgal.data.get_col('m2/m1') > 0) self.sgal.all_stages() rgb_binaries = np.intersect1d(self.sgal.irgb, binaries) nopt_binaries_above_trgb = float(len(np.intersect1d(sopt_agb, rgb_binaries))) nir_binaries_above_trgb = float(len(np.intersect1d(sir_agb, rgb_binaries))) tpagb_opt_contamination = nopt_binaries_above_trgb/float(len(sopt_agb)) tpagb_ir_contamination = nir_binaries_above_trgb/float(len(sir_agb)) logger.info('number of binaries: %i' % len(binaries)) logger.info('number of rgb binaries in tpagb region opt, ir: %i, %i' % \ (nopt_binaries_above_trgb, nir_binaries_above_trgb)) logger.info('frac rgb binaries in tpagb, region: %.3f, %.3f' % \ (tpagb_opt_contamination, tpagb_ir_contamination)) return tpagb_opt_contamination, tpagb_ir_contamination def rgb_agb_regions(sgal, opt_offset, opt_trgb, opt_trgb_err, ags, ir_offset, ir_trgb, ir_trgb_err, opt_mag, ir_mag): # define RGB regions opt_low = opt_offset opt_mid = opt_trgb + opt_trgb_err * ags.factor[0] ir_low = ir_offset ir_mid = ir_trgb + ir_trgb_err * ags.factor[1] # Recovered stars in simulated RGB region. sopt_rgb = sgal.stars_in_region(opt_mag, opt_low, opt_mid) sir_rgb = sgal.stars_in_region(ir_mag, ir_low, ir_mid) # define AGB regions opt_mid = opt_trgb - opt_trgb_err * ags.factor[0] opt_high = 10. ir_mid = ir_trgb - ir_trgb_err * ags.factor[1] ir_high = 10. # Recovered stars in simulated AGB region. sopt_agb = sgal.stars_in_region(opt_mag, opt_mid, opt_high) sir_agb = sgal.stars_in_region(ir_mag, ir_mid, ir_high) return sopt_rgb, sir_rgb, sopt_agb, sir_agb def normalize_simulation(opt_mag, ir_mag, nopt_rgb, nir_rgb, sopt_rgb, sir_rgb, sopt_agb, sir_agb): opt_norm = nopt_rgb / float(len(sopt_rgb)) ir_norm = nir_rgb / float(len(sir_rgb)) logger.info('OPT Normalization: %f' % opt_norm) logger.info('IR Normalization: %f' % ir_norm) # random sample the data distribution rands = np.random.random(len(opt_mag)) opt_ind, = np.nonzero(rands < opt_norm) rands = np.random.random(len(ir_mag)) ir_ind, = np.nonzero(rands < ir_norm) # scaled rgb: norm + in rgb opt_rgb = list(set(opt_ind) & set(sopt_rgb)) ir_rgb = list(set(ir_ind) & set(sir_rgb)) # scaled agb opt_agb = list(set(opt_ind) & set(sopt_agb)) ir_agb = list(set(ir_ind) & set(sir_agb)) return opt_norm, ir_norm, opt_rgb, ir_rgb, opt_agb, ir_agb class Plotting(object): def __init__(self, vSFH): self.files = FileIO() bands = ['opt', 'ir'] keys = ['trgb', 'trgb_err', 'offset'] key = ['%s_%s' % (b, k) for b, k in itertools.product(bands, keys)] key += ['target', 'agb_mod', 'ags', 'fnames'] [self.__setattr__(k, vSFH.__getattribute__(k)) for k in key] self.opt_lf_file, = [f for f in self.fnames if 'opt_lf' in f] self.ir_lf_file, = [f for f in self.fnames if 'ir_lf' in f] self.narratio_file, = [f for f in self.fnames if 'narratio' in f] def plot_lf_file(self, opt_lf_file, ir_lf_file, axs=None, plt_kw=None, opt_limit=None, ir_limit=None): '''needs work, but: plot the lf files.''' # set up the plot plt_kw = plt_kw or {} plt_kw = dict({'linestyle': 'steps-mid', 'color': 'black', 'alpha': 0.2}.items() + plt_kw.items()) label = '%s' % model_plots.translate_model_name(os.path.split(opt_lf_file)[1].split('_')[2].upper()) plt_kw_lab = dict(plt_kw.items() + {'label': label}.items()) if axs is None: fig, (axs) = plt.subplots(ncols=2, figsize=(12, 6)) plt.subplots_adjust(right=0.95, left=0.05, wspace=0.1) # these have like 50 histograms each opt_hists, opt_binss = self.files.load_lf_file(self.opt_lf_file) ir_hists, ir_binss = self.files.load_lf_file(self.ir_lf_file) for i, (hists, binss, limit) in enumerate(zip([opt_hists, ir_hists], [opt_binss, ir_binss], [opt_limit, ir_limit])): for j, (hist, bins) in enumerate(zip(hists, binss)): if j != 0: kw = plt_kw else: kw = plt_kw_lab if limit is not None: inds, = np.nonzero(bins <= limit) axs[i].plot(bins[inds], hist[inds], **kw) else: axs[i].plot(bins, hist, **kw) return axs def count_stars_from_hist(self, opt_hist, opt_bins, ir_hist, ir_bins): ratio_data = {} for i, (hist, bins, band) in enumerate(zip([opt_hist, ir_hist], [opt_bins, ir_bins], ['opt', 'ir'])): trgb = self.__getattribute__('%s_trgb' % band) trgb_err = self.__getattribute__('%s_trgb_err' % band) norm = self.__getattribute__('%s_offset' % band) irgb = rsp.math_utils.between(bins, norm, trgb + trgb_err * self.ags.factor[i]) iagb = rsp.math_utils.between(bins, trgb - trgb_err * self.ags.factor[i], 10.) nrgb = np.sum(hist[irgb]) nagb = np.sum(hist[iagb]) ratio_data['%s_ar_ratio' % band] = nagb / nrgb ratio_data['%s_ar_ratio_err' % band] = \ galaxy_tests.count_uncert_ratio(nagb, nrgb) ratio_data['n%s_rgb' % band] = nrgb ratio_data['n%s_agb'% band] = nagb return ratio_data def add_narratio_to_plot(self, ax, band, ratio_data): stext_kw = dict({'color': 'black', 'fontsize': 14, 'ha': 'center'}.items() + rsp.graphics.GraphicsUtils.ann_kwargs.items()) dtext_kw = dict(stext_kw.items() + {'color': 'darkred'}.items()) nrgb = rsp.fileIO.item_from_row(self.ags.data, 'target', self.target, 'n%s_rgb' % band) nagb = rsp.fileIO.item_from_row(self.ags.data, 'target', self.target, 'n%s_agb' % band) dratio = nagb / nrgb dratio_err = galaxy_tests.count_uncert_ratio(nagb, nrgb) #yval = 1.2 # text yloc found by eye, depends on fontsize stext_kw['transform'] = ax.transAxes dtext_kw['transform'] = ax.transAxes yval = 0.95 xagb_val = 0.17 xrgb_val = 0.5 xratio_val = 0.83 xvals = [xagb_val, xrgb_val, xratio_val] # simulated nrgb and nagb are the mean values srgb_text = '$\langle N_{\\rm RGB}\\rangle =%i$' % \ np.mean(ratio_data['n%s_rgb' % band]) sagb_text = '$\langle N_{\\rm TP-AGB}\\rangle=%i$' % \ np.mean(ratio_data['n%s_agb' % band]) # one could argue taking the mean isn't the best idea for # the ratio errors. sratio_text = '$f=%.3f\pm%.3f$' % \ (np.mean(ratio_data['%s_ar_ratio' % band]), np.mean(ratio_data['%s_ar_ratio_err' % band])) drgb_text = '$N_{\\rm RGB}=%i$' % nrgb dagb_text = '$N_{\\rm TP-AGB}=%i$' % nagb dratio_text = '$f = %.3f\pm%.3f$' % (dratio, dratio_err) textss = [[sagb_text, srgb_text, sratio_text], [dagb_text, drgb_text, dratio_text]] kws = [stext_kw, dtext_kw] for kw, texts in zip(kws, textss): for xval, text in zip(xvals, texts): ax.text(xval, yval, text, **kw) yval -= .05 # stack the text return ax def plot_by_stage(self, ax1, ax2, add_stage_lfs='default', stage_lf_kw=None, cols=None, trilegal_output=None, hist_it_up=False, narratio=True): if add_stage_lfs == 'all': add_stage_lfs = ['MS', 'RGB', 'HEB', 'RHEB', 'BHEB', 'EAGB', 'TPAGB'] if add_stage_lfs == 'default': add_stage_lfs = ['RGB', 'EAGB', 'TPAGB'] nstages = len(add_stage_lfs) stage_lf_kw = stage_lf_kw or {} stage_lf_kw = dict({'linestyle': 'steps', 'lw': 2}.items() + stage_lf_kw.items()) if hasattr(stage_lf_kw, 'label'): stage_lf_kw['olabel'] = stage_lf_kw['label'] if cols is None: if nstages < 3: cmap = brewer2mpl.get_map('Paired', 'Qualitative', 3) cols = cmap.mpl_colors[0::2] else: cols = color_scheme # load the trilegal catalog if it is given, if it is given, # no LF scaling... need to save this info better. Currently only # in log files. if trilegal_output is not None: self.files.read_trilegal_catalog(trilegal_output, filter1=get_filter1(self.target)) self.files.load_trilegal_data() self.opt_norm = 1. self.ir_norm = 1. self.files.load_data_for_normalization(target=self.target, ags=self.ags) assert hasattr(self.files, 'opt_mag'), \ 'Need opt_mag or trilegal_output' for ax, mag, norm, sinds, bins in \ zip([ax1, ax2], [self.files.sgal.data.get_col('F814W')-self.files.opt_moffset, self.files.sgal.data.get_col('F160W')-self.files.ir_moffset], [self.opt_norm, self.ir_norm], [self.files.opt_color_cut, self.files.ir_color_cut], [self.files.opt_bins, self.files.ir_bins]): #self.files.sgal.make_lf(mag, stages=add_stage_lfs, bins=bins, # inds=sinds, hist_it_up=hist_it_up) self.files.sgal.make_lf(mag, stages=add_stage_lfs, bins=bins, hist_it_up=hist_it_up) #import pdb #pdb.set_trace() k = 0 for i in range(nstages): istage = add_stage_lfs[i].lower() try: hist = self.files.sgal.__getattribute__('i%s_lfhist' % istage) except AttributeError: continue # combine all HeB stages into one for a cleaner plot. if istage == 'heb': hist = \ np.sum([self.files.sgal.__getattribute__('i%s_lfhist' % jstage.lower()) for jstage in add_stage_lfs if 'heb' in jstage.lower()], axis=0) elif 'heb' in istage: continue bins = \ self.files.sgal.__getattribute__('i%s_lfbins' % istage) stage_lf_kw['color'] = cols[k] k += 1 stage_lf_kw['label'] = '$%s$' % istage.upper().replace('PA', 'P\!-\!A') norm_hist = hist # * norm norm_hist[norm_hist < 3] = 3 ax.plot(bins[:-1], norm_hist, **stage_lf_kw) sopt_hist, sopt_bins = self.files.sgal.make_lf(self.files.opt_mag, bins=self.files.opt_bins, hist_it_up=hist_it_up) sir_hist, sir_bins = self.files.sgal.make_lf(self.files.ir_mag, bins=self.files.ir_bins, hist_it_up=hist_it_up) # 3 is the plot limit... sopt_hist[sopt_hist < 3] = 3 sir_hist[sir_hist < 3] = 3 stage_lf_kw['color'] = 'black' if narratio is False: if not hasattr(stage_lf_kw, 'olabel'): lab = '$Total$' #if hasattr(self, 'agb_mod'): # model = self.agb_mod.split('_')[-1] # lab = model_plots.translate_model_name(model, small=True) stage_lf_kw['label'] = lab else: stage_lf_kw['label'] = stage_lf_kw['olabel'] ax1.plot(sopt_bins[:-1], sopt_hist, **stage_lf_kw) ax2.plot(sir_bins[:-1], sir_hist, **stage_lf_kw) return ax1, ax2 def compare_to_gal(self, hist_it_up=False, narratio=True, no_agb=False, add_stage_lfs=None, extra_str='', trilegal_output=None, plot_data=True, cols=None, stage_lf_kw=None, axs=None, plt_kw=None, plot_models=True, completeness_correction=False): ''' Plot the LFs and galaxy LF. ARGS: hist_it_up: Use hist_it_up or bayseyn blocks narratio: overlay NRGB, NAGB, and NAGB/NRGB +/- err no_agb: plot the LF without AGB stars add_stage_lfs: (list) add LF of specific stages RETURNS: ax1, ax2: axes instances created for the plot. ''' # load plot limits: plims = model_plots.load_plot_limits() # load ast_table for annotations ast_table = tables.read_completeness_table() # load galaxy data opt_gal, ir_gal = self.files.load_galaxies(hist_it_up=hist_it_up, target=self.target, ags=self.ags, color_cut=True, completeness_correction=completeness_correction) if plot_models is True: # plot lfs from simulations (and initialize figure) plt_kw = plt_kw or {} (ax1, ax2) = \ self.plot_lf_file(self.opt_lf_file, self.ir_lf_file, opt_limit=opt_gal.comp50mag2, ir_limit=ir_gal.comp50mag2, axs=axs, plt_kw=plt_kw) else: fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 6), sharey=False) plt.subplots_adjust(right=0.95, left=0.1, wspace=0.1) if add_stage_lfs is not None and plot_models is False: (ax1, ax2) = self.plot_by_stage(ax1, ax2, add_stage_lfs=add_stage_lfs, narratio=narratio, stage_lf_kw=stage_lf_kw, cols=cols, trilegal_output=trilegal_output) # plot galaxy data if plot_data is True: dplot_kw = \ {'drawstyle': 'steps-mid', 'color': 'darkred', 'lw': 2, 'label': '$%s$' % self.target.upper().replace('-DEEP', '')} # HACK to mask low values #opt_gal.hist[opt_gal.hist < 0.1] = 0.1 #ir_gal.hist[ir_gal.hist < 0.1] = 0.1 opt_err = np.sqrt(opt_gal.hist) ir_err = np.sqrt(ir_gal.hist) ax1.errorbar(opt_gal.bins[1:], opt_gal.hist, yerr=opt_err, **dplot_kw) ax2.errorbar(ir_gal.bins[1:], ir_gal.hist, yerr=ir_err, **dplot_kw) # initialize add numbers to the plot if narratio is True and plot_models is True: # count stars from the saved file ratio_data = rsp.fileIO.readfile(self.narratio_file, string_column=0) # get the number ratios for the annotations mean_ratio = {} for key in ratio_data.dtype.names: if key == 'target': continue mean_ratio[key] = np.mean(ratio_data[:][key]) for i, (ax, gal, trgb_err, band) in enumerate(zip([ax1, ax2], [opt_gal, ir_gal], [self.opt_trgb_err, self.ir_trgb_err], ['opt', 'ir'])): index = list(plims['target']).index(opt_gal.target.replace('4-deep', '4')) ax.set_yscale('log') ax.set_ylim(plims[index]['%s_lfmin' % band], plims[index]['%s_lfmax' % band]) xmax = plims[index]['%s_cmdmin' % band] ax.set_xlim(xmax, gal.comp50mag2) yarr = np.linspace(*ax.get_ylim()) # vertical lines around the trgb exclude region ax.fill_betweenx(yarr, gal.trgb - trgb_err * self.ags.factor[i], gal.trgb + trgb_err * self.ags.factor[i], color='black', alpha=0.1) ax.vlines(gal.trgb, *ax.get_ylim(), color='black', linestyle='--') # % completeness limit ast_frac = rsp.fileIO.item_from_row(ast_table, 'target', self.target.upper(), '%s_filter2' % band) # line at dim limit for rgb normalization ax.vlines(ast_frac, *ax.get_ylim(), linestyle='--', color='black') ax.fill_betweenx(yarr, ast_frac, ax.get_xlim()[1], color='black', alpha=0.1) loc = 4 if narratio is False: loc = 0 ax.legend(loc=loc) ax.set_xlabel('$%s$' % gal.filter2, fontsize=20) if narratio is True: # need to load the data nrgb and nagb, calculate the ratio # and error. self.add_narratio_to_plot(ax, band, mean_ratio) ax1.set_ylabel(r'${\rm Number\ of\ Stars}$', fontsize=20) plt.tick_params(labelsize=16) outfile = '%s%s_lfs.png' % (self.opt_lf_file.split('opt_lf')[0][:-1], extra_str) plt.savefig(outfile, dpi=150) logger.info('wrote %s' % outfile) return ax1, ax2 def plot_mass_met_table(self, opt_mass_met_file, ir_mass_met_file, extra_str=''): fig = plt.figure(figsize=(8, 8)) grid = ImageGrid(fig, 111, nrows_ncols=(2, 2), axes_pad=.5, add_all=True, label_mode="all", cbar_location="top", cbar_mode="each", cbar_size="7%", cbar_pad="2%", aspect=0) cmaps = [plt.cm.get_cmap('jet', 9), plt.cm.gray_r] #cmap = #cmap.set_bad('w', 1.) #fig, (axs) = plt.subplots(ncols=2, figsize=(8, 8), sharey=True) types = ['mean', 'count'] k =-1 for j in range(len(types)): for i, mass_met in enumerate([opt_mass_met_file, ir_mass_met_file]): k += 1 with open(mass_met, 'r') as mmf: lines = [l.strip() for l in mmf.readlines() if not l.startswith('#')] mag = np.concatenate([np.array(l.split(), dtype=float) for l in lines[0::3]]) mass = np.concatenate([np.array(l.split(), dtype=float) for l in lines[1::3]]) mh = np.concatenate([np.array(l.split(), dtype=float) for l in lines[2::3]]) N, xedges, yedges = binned_statistic_2d(mag, mass, mh, types[j], bins=50) im = grid[k].imshow(N.T, origin='lower', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], aspect='auto', interpolation='nearest', cmap=cmaps[j]) grid[k].cax.colorbar(im) #grid[i].cax.set_label('$[M/H]$') grid.axes_all[0].set_ylabel('${\\rm Mass}\ (M_\odot)$', fontsize=20) grid.axes_all[2].set_ylabel('${\\rm Mass}\ (M_\odot)$', fontsize=20) grid.axes_all[2].set_xlabel('$F814W$', fontsize=20) grid.axes_all[3].set_xlabel('$F160W$', fontsize=20) target = '_'.join(os.path.split(opt_mass_met_file)[1].split('_')[0:4]) fig.suptitle('$%s$' % target.replace('_', '\ '), fontsize=20) plt.savefig('%s_mass_met%s.png' % (target, extra_str), dpi=150) return grid
philrosenfield/TPAGB-calib
sfh_tests.py
Python
bsd-3-clause
76,643
[ "Galaxy", "Gaussian" ]
099a3393b5104f327ce26e9a066104f4c99874862877c0bb1ca20ced0cc344ba
# # Copyright (C) 2013-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import unittest as ut import unittest_decorators as utx import espressomd @utx.skipIfMissingFeatures("HAT") class HatTest(ut.TestCase): def force(self, F_max, r_cut, r): if r > 0 and r < r_cut: return F_max * (1. - r / r_cut) else: return 0. def pot(self, F_max, r_cut, r): if r < r_cut: return F_max * (r - r_cut) * ((r + r_cut) / (2. * r_cut) - 1.) else: return 0. def test(self): system = espressomd.System(box_l=3 * [10]) system.time_step = 0.01 system.cell_system.skin = 0.4 p0 = system.part.add(pos=0.5 * system.box_l, type=0) p1 = system.part.add(pos=0.5 * system.box_l, type=0) F_max = 3.145 cutoff = 1.3 system.non_bonded_inter[0, 0].hat.set_params( F_max=F_max, cutoff=cutoff) dx = cutoff / 90. r0 = 0.5 * system.box_l[0] for i in range(100): r = r0 - i * dx p1.pos = [r, 0.5 * system.box_l[1], 0.5 * system.box_l[2]] system.integrator.run(0) self.assertAlmostEqual( self.force(F_max, cutoff, i * dx), p0.f[0], places=7) self.assertAlmostEqual( self.pot(F_max, cutoff, i * dx), system.analysis.energy()['total'], places=7) if __name__ == "__main__": ut.main()
pkreissl/espresso
testsuite/python/hat.py
Python
gpl-3.0
2,084
[ "ESPResSo" ]
e1e519f3a2557baf3b4447a1011e94da3dd00bae4c6192a1b5dfe764dc88864a
# Copyright (c) 2011, Jimmy Cao # All rights reserved. # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: # Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from oyoyo.parse import parse_nick import settings.wolfgame as var import botconfig from tools.wolfgamelogger import WolfgameLogger from tools import decorators from datetime import datetime, timedelta from operator import itemgetter from collections import defaultdict import threading import random import copy import time import re import logging import sys import os import imp import math import fnmatch import random import subprocess from imp import reload BOLD = "\u0002" COMMANDS = {} PM_COMMANDS = {} HOOKS = {} cmd = decorators.generate(COMMANDS) pmcmd = decorators.generate(PM_COMMANDS) hook = decorators.generate(HOOKS, raw_nick=True, permissions=False) # Game Logic Begins: var.LAST_PING = None # time of last ping var.LAST_STATS = None var.LAST_VOTES = None var.LAST_ADMINS = None var.LAST_GSTATS = None var.LAST_PSTATS = None var.LAST_TIME = None var.IS_ADMIN = {} var.IS_OWNER = {} var.IS_OP = [] var.WAS_OP = [] var.USERS = {} var.PINGING = False var.ADMIN_PINGING = False var.ROLES = {"person" : []} var.ORIGINAL_ROLES = {} var.SPECIAL_ROLES = {} var.PLAYERS = {} var.DCED_PLAYERS = {} var.ADMIN_TO_PING = None var.AFTER_FLASTGAME = None var.PHASE = "none" # "join", "day", or "night" var.TIMERS = {} var.DEAD = [] var.NO_LYNCH = [] var.TO_PING = [] var.CONNECT_OK = False var.DCED_GRACE = [] var.GIT_UPDATE = False var.MASS_MODES_CONNECT = False var.ORIGINAL_SETTINGS = {} var.LAST_SAID_TIME = {} var.GAME_START_TIME = datetime.now() # for idle checker only var.CAN_START_TIME = 0 var.GRAVEYARD_LOCK = threading.RLock() var.GAME_ID = 0 var.STARTED_DAY_PLAYERS = 0 var.DISCONNECTED = {} # players who got disconnected var.illegal_joins = defaultdict(int) var.LOGGER = WolfgameLogger(var.LOG_FILENAME, var.BARE_LOG_FILENAME) var.GOT_IT = False var.OPS_PING = 0 var.OPS_TO_PING = [] var.PINGING_OPS = [] var.PING_CHAN = None var.WHO_HOST = {} var.IDLE_HOST = {} var.AUTO_LOG_TOGGLED = False var.NO_PING = False var.AUTO_TOGGLED_LOG = False var.BURNED_HOUSES = [] var.BURNED = [] if botconfig.DEBUG_MODE: var.NIGHT_LIMIT_WARN = 0 var.NIGHT_TIME_WARN = 0 var.NIGHT_TIME_LIMIT = 0 # 90 var.DAY_TIME_LIMIT_WARN = 0 var.DAY_TIME_LIMIT_CHANGE = 0 var.KILL_IDLE_TIME = 0 #300 var.WARN_IDLE_TIME = 0 #180 var.JOIN_TIME_LIMIT = 0 def connect_callback(cli): to_be_devoiced = [] cmodes = [] if var.CONNECT_OK == True: return if botconfig.ADMIN_CHAN == "": var.LOG_CHAN = False @hook("quietlist", hookid=294) def on_quietlist(cli, server, botnick, channel, q, quieted, by, something): if re.match(".+\!\*@\*", quieted) and by == var.FULL_ADDRESS and channel == botconfig.CHANNEL: # only unquiet people quieted by bot cmodes.append(("-q", quieted)) @hook("whospcrpl", hookid=294) def on_whoreply(cli, server, you, chan, ident, cloak, user, status, acc): if user in var.USERS: return # Don't add someone who is already there if user == botconfig.NICK: cli.nickname = user cli.ident = ident cli.hostmask = cloak var.FULL_ADDRESS = "{0}!{1}@{2}".format(user, ident, cloak) if acc == "0": acc = "*" if "+" in status and user not in to_be_devoiced and chan == botconfig.CHANNEL: to_be_devoiced.append(user) var.USERS[user] = dict(cloak=cloak,account=acc) var.IS_OWNER[user] = False var.IS_ADMIN[user] = False if cloak in botconfig.ADMINS or cloak in botconfig.OWNERS or acc in botconfig.ADMINS_ACCOUNTS or acc in botconfig.OWNERS_ACCOUNTS: var.IS_ADMIN[user] = True if cloak in botconfig.OWNERS or acc in botconfig.OWNERS_ACCOUNTS: var.IS_OWNER[user] = True if "@" in status and user not in var.IS_OP and chan == botconfig.CHANNEL: var.IS_OP.append(user) @hook("endofwho", hookid=294) def afterwho(*args): if var.CONNECT_OK == False: for nick in to_be_devoiced: cmodes.append(("-v", nick)) var.CONNECT_OK = True # devoice all on connect @hook("mode", hookid=294) def on_give_me_ops(cli, modeapplier, chan, modeaction, target="", *other): if modeaction == "+o" and target == botconfig.NICK and var.PHASE == "none" and chan == botconfig.CHANNEL: @hook("quietlistend", 294) def on_quietlist_end(cli, svr, nick, chan, *etc): if chan == botconfig.CHANNEL and var.MASS_MODES_CONNECT == False: decorators.unhook(HOOKS, 294) mass_mode(cli, cmodes) var.MASS_MODES_CONNECT = True cli.mode(botconfig.CHANNEL, "q") # unquiet all cli.mode(botconfig.CHANNEL, "-m") # remove -m mode from channel elif modeaction == "+o" and target == botconfig.NICK and var.PHASE != "none": decorators.unhook(HOOKS, 294) # forget about it cli.who(botconfig.CHANNEL, "%nuchaf") def mass_mode(cli, md): """ Example: mass_mode(cli, (('+v', 'asdf'), ('-v','wobosd'))) """ lmd = len(md) # store how many mode changes to do args = ["", ""] for j in range(0, lmd): for i in range(0, len(md[j])): args[i] += md[j][i] + " " if ((j+1) % 4) == 0: cli.mode(botconfig.CHANNEL, args[0].replace(" ", ""), args[1]) args = ["", ""] if args[0] != "": cli.mode(botconfig.CHANNEL, args[0].replace(" ", ""), args[1]) def reset_modes_timers(cli): # Reset game timers for x, timr in var.TIMERS.items(): timr.cancel() var.TIMERS = {} # Reset modes cli.mode(botconfig.CHANNEL, "-m") cmodes = [] for plr in var.list_players(): cmodes.append(("-v", plr)) for deadguy in var.DEAD: cmodes.append(("-q", deadguy+"!*@*")) mass_mode(cli, cmodes) def pm(cli, target, message): # message either privmsg or notice, depending on user settings if target in var.USERS and var.USERS[target]["cloak"] in var.SIMPLE_NOTIFY: # still need to make it work with the damn ident cli.notice(target, message) else: cli.msg(target, message) def reset_settings(): for attr in list(var.ORIGINAL_SETTINGS.keys()): setattr(var, attr, var.ORIGINAL_SETTINGS[attr]) dict.clear(var.ORIGINAL_SETTINGS) def reset(cli): chan = botconfig.CHANNEL var.PHASE = "none" for x, timr in var.TIMERS.items(): timr.cancel() var.TIMERS = {} var.GAME_ID = 0 cmodes = [] cmodes.append(("-m", )) for plr in var.list_players(): if plr not in cmodes: cmodes.append(("-v", plr)) for deadguy in var.DEAD: if deadguy not in cmodes: cmodes.append(("-q", deadguy+"!*@*")) for aop in var.list_players(): if aop in var.WAS_OP and aop not in var.IS_OP: var.WAS_OP.remove(aop) var.IS_OP.append(aop) cmodes.append(("+o", aop)) for dop in var.DEAD: if dop in var.WAS_OP and dop not in var.IS_OP: var.WAS_OP.remove(dop) var.IS_OP.append(dop) cmodes.append(("+o", dop)) mass_mode(cli, cmodes) var.DEAD = [] var.NO_LYNCH = [] var.BURNED_HOUSES = [] var.BURNED = [] var.ROLES = {"person" : []} reset_settings() dict.clear(var.LAST_SAID_TIME) dict.clear(var.PLAYERS) dict.clear(var.DCED_PLAYERS) dict.clear(var.DISCONNECTED) if var.AFTER_FLASTGAME: var.AFTER_FLASTGAME() var.AFTER_FLASTGAME = None if var.ADMIN_TO_PING: # It was an flastgame cli.msg(chan, "PING! " + var.ADMIN_TO_PING) var.ADMIN_TO_PING = None def make_stasis(nick, penalty): try: cloak = var.USERS[nick]['cloak'] if cloak is not None: var.illegal_joins[cloak] += penalty except KeyError: pass def chan_log(cli, nick, action): cli.msg(botconfig.ADMIN_CHAN, "processCommand (b'{0}')action(\"{1}\")".format(nick, action)) @pmcmd("fdie", "fbye", raw_nick=True) @cmd("fdie", "fbye", raw_nick=True) def forced_exit(cli, rnick, *rest): # Admin Only """Forces the bot to close""" nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.PHASE in ("day", "night"): stop_game(cli) else: reset(cli) if var.LOG_CHAN == True: chan_log(cli, rnick, "forced_exit") cli.quit("Forced quit from "+nick) @cmd("logging", "log", "toggle", raw_nick=True) def toggle_logging(cli, rnick, chan, rest): """Toggles the logging option""" nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.LOG_CHAN == True: var.LOG_CHAN = False chan_log(cli, rnick, "disable_logging") cli.msg(chan, "Logging has now been disabled by \u0002{0}\u0002".format(nick)) cli.msg(botconfig.ADMIN_CHAN, "Logging is now \u0002off\u0002") if var.AUTO_LOG_TOGGLE == True: var.AUTO_LOG_TOGGLE == False chan_log(cli, rnick, "disable_auto_toggle") cli.msg(chan, "Automatic logging toggle has been disabled.") cli.msg(botconfig.ADMIN_CHAN, "Automatic logging toggle is now \u0002off\u0002.") var.AUTO_TOGGLED_LOG = True return if var.LOG_CHAN == False: var.LOG_CHAN = True chan_log(cli, rnick, "enable_logging") cli.msg(chan, "Logging has now been enabled by \u0002{0}\u0002".format(nick)) cli.msg(botconfig.ADMIN_CHAN, "Logging is now \u0002on\u0002") if var.AUTO_LOG_TOGGLE == False and var.AUTO_TOGGLED_LOG == True: var.AUTO_LOG_TOGGLE == True chan_log(cli, rnick, "enable_auto_toggle") cli.msg(chan, "Automatic logging toggle has been enabled.") cli.msg(botconfig.ADMIN_CHAN, "Automatic logging toggle is now \u0002on\u0002.") return else: cli.notice(nick, "You are not an admin.") @pmcmd("frestart", raw_nick=True) @cmd("frestart", raw_nick=True) def restart_program(cli, rnick, *rest): """Restarts the bot.""" nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: try: if var.PHASE in ("day", "night"): stop_game(cli) else: reset(cli) if var.LOG_CHAN == True: chan_log(cli, rnick, "restart") cli.quit("Forced restart from "+nick) raise SystemExit finally: print("RESTARTING") python = sys.executable if rest[-1].strip().lower() == "debugmode": os.execl(python, python, sys.argv[0], "--debug") elif rest[-1].strip().lower() == "normalmode": os.execl(python, python, sys.argv[0]) else: os.execl(python, python, *sys.argv) @pmcmd("ping") def pm_ping(cli, nick, rest): pm(cli, nick, 'Pong!') @cmd("ping", "p", raw_nick=True) def pinger(cli, rnick, chan, rest): """Pings the channel to get people's attention. Rate-Limited.""" nick, ident, mode, host = parse_nick(rnick) if (var.LAST_PING and var.LAST_PING + timedelta(seconds=var.PING_WAIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. " + "Please wait a while before using it again.")) return if var.PHASE in ('night','day'): cli.notice(nick, "Pong!") return if chan != botconfig.CHANNEL: return var.LAST_PING = datetime.now() if var.PINGING: return var.PINGING = True var.TO_PING = [] if botconfig.PING_NOTICE == True: cli.notice(chan, "A game of Werewolf is starting in "+ "{1} : type {0}join to join!".format(botconfig.CMD_CHAR, chan)) var.PINGING = False return @hook("whoreply", hookid=800) def ping_whoreply(cli, server, you, chan, ident, cloak, user, status, acc): if not var.PINGING: return if user in (botconfig.NICK, you): return # Don't ping self. if (all((not botconfig.REVERSE_PING, 'G' not in status, # not /away '+' not in status, # not already joined (voiced) cloak not in var.AWAY)) or all((botconfig.REVERSE_PING, '+' not in status, cloak in var.PING_IN))): var.TO_PING.append(user) @hook("endofwho", hookid=800) def do_ping(*args): if not var.PINGING: return var.TO_PING.sort(key=lambda x: x.lower()) cli.msg(botconfig.CHANNEL, "PING! "+" ".join(var.TO_PING)) if var.LOG_CHAN == True: chan_log(cli, rnick, "ping") var.PINGING = False minimum = datetime.now() + timedelta(seconds=var.PING_MIN_WAIT) if not var.CAN_START_TIME or var.CAN_START_TIME < minimum: var.CAN_START_TIME = minimum decorators.unhook(HOOKS, 800) cli.who(botconfig.CHANNEL, "%nuchaf") @cmd("in", raw_nick=True) @pmcmd("in", raw_nick=True) def get_in(cli, rnick, *rest): """Get yourself in the ping list""" nick, mode, ident, cloak = parse_nick(rnick) if botconfig.REVERSE_PING == False: cli.notice(nick, "Invalid syntax. Use {0}away and {0}back instead".format(botconfig.CMD_CHAR)) return if cloak in var.PING_IN and cloak not in botconfig.COMMON_HOSTS: cli.notice(nick, "You are already on the list") return if ident+"@"+cloak in var.PING_IN and cloak in botconfig.COMMON_HOSTS: cli.notice(nick, "You are already on the list") return if cloak not in botconfig.COMMON_HOSTS: var.PING_IN.append(cloak) if cloak in botconfig.COMMON_HOSTS: var.PING_IN.append(ident+"@"+cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "in") cli.notice(nick, "You are now on the list.") @cmd("out", raw_nick=True) @pmcmd("out", raw_nick=True) def get_out(cli, rnick, *rest): """Removes yourself from the ping list""" nick, mode, ident, cloak = parse_nick(rnick) if botconfig.REVERSE_PING == False: cli.notice(nick, "Invalid syntax. Use {0}away and {0}back instead".format(botconfig.CMD_CHAR)) return if cloak in var.PING_IN and cloak not in botconfig.COMMON_HOSTS: var.PING_IN.remove(cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "out") cli.notice(nick, "You are no longer in the list.") return if ident+"@"+cloak in var.PING_IN and cloak in botconfig.COMMON_HOSTS: var.PING_IN.remove(ident+"@"+cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "out") cli.notice(nick, "You are no longer in the list.") return cli.notice(nick, "You are not in the list.") @cmd("away", raw_nick=True) @pmcmd("away", raw_nick=True) def away(cli, rnick, *rest): """Use this to activate your away status (so you aren't pinged).""" nick, mode, ident, cloak = parse_nick(rnick) if botconfig.REVERSE_PING == True: cli.notice(nick, "Invalid syntax. Use {0}in and {0}out instead".format(botconfig.CMD_CHAR)) return if cloak in var.AWAY and cloak not in botconfig.COMMON_HOSTS: var.AWAY.remove(cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "away_back") cli.notice(nick, "You are no longer marked as away.") return if ident+"@"+cloak in var.AWAY and cloak in botconfig.COMMON_HOSTS: var.AWAY.remove(ident+"@"+cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "away_back") cli.notice(nick, "You are no longer marked as away.") return if cloak not in var.AWAY and cloak not in botconfig.COMMON_HOSTS: var.AWAY.append(cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "away") cli.notice(nick, "You are now marked as away.") return if ident+"@"+cloak not in var.AWAY and cloak in botconfig.COMMON_HOSTS: var.AWAY.append(ident+"@"+cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "away") cli.notice(nick, "You are now marked as away.") @cmd("back", raw_nick=True) @pmcmd("back", raw_nick=True) def back_from_away(cli, rnick, *rest): """Unmarks away status""" nick, mode, ident, cloak = parse_nick(rnick) if botconfig.REVERSE_PING == True: cli.notice(nick, "Invalid syntax. Use {0}in and {0}out instead".format(botconfig.CMD_CHAR)) return if cloak not in var.AWAY and cloak not in botconfig.COMMON_HOSTS: cli.notice(nick, "You are not marked as away.") return if ident+"@"+cloak not in var.AWAY and cloak in botconfig.COMMON_HOSTS: cli.notice(nick, "You are not marked as away.") return if cloak in var.AWAY and cloak not in botconfig.COMMON_HOSTS: var.AWAY.remove(cloak) if ident+"@"+cloak in var.AWAY and cloak in botconfig.COMMON_HOSTS: var.AWAY.remove(ident+"@"+cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "back") cli.notice(nick, "You are no longer marked as away.") @cmd("simple", raw_nick = True) @pmcmd("simple", raw_nick = True) def mark_simple_notify(cli, rnick, *rest): """If you don't want to bot to send you role instructions""" nick, mode, ident, cloak = parse_nick(rnick) if cloak in var.SIMPLE_NOTIFY and cloak not in botconfig.COMMON_HOSTS: var.SIMPLE_NOTIFY.remove(cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "simple_remove") cli.notice(nick, "You will no longer receive simple role instructions.") return if ident+"@"+cloak in var.SIMPLE_NOTIFY and cloak in botconfig.COMMON_HOSTS: var.SIMPLE_NOTIFY.remove(ident+"@"+cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "simple_remove") cli.notice(nick, "You will no longer receive simple role instructions.") return if cloak not in var.SIMPLE_NOTIFY and cloak not in botconfig.COMMON_HOSTS: var.SIMPLE_NOTIFY.append(cloak) if ident+"@"+cloak not in var.SIMPLE_NOTIFY and cloak in botconfig.COMMON_HOSTS: var.SIMPLE_NOTIFY.append(ident+"@"+cloak) if var.LOG_CHAN == True: chan_log(cli, rnick, "simple") cli.notice(nick, "You will now receive simple role instructions.") @cmd("fping", raw_nick=True) def fpinger(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: var.LAST_PING = None pinger(cli, rnick, chan, rest) if var.LOG_CHAN == True: chan_log(cli, rnick, "force_ping") @hook("mode") # unsets any ban set by ChanServ (AKICK) def unset_bans_akick(cli, rnick, chan, mode, *action): nick, mode, user, host = parse_nick(rnick) action = list(action) if mode == "+b" and "services." in host: if nick == "ChanServ" and len(action) == 1: ban = action.pop(0) cli.who(chan, "%nuchaf") @hook("whospcrpl", hookid=126) def am_i_op_now(cli, server, you, chan, ident, host, nick, status, account): if you == nick and '@' in status: cli.mode(chan, "-b", ban) cli.msg(chan, "\u0001ACTION resets the trap...\u0001") @hook("endofwho", hookid=126) def unhook_after_ban(cli, server, you, chan, output): decorators.unhook(HOOKS, 126) @cmd("join", "j", raw_nick=True) def join(cli, rnick, chan, rest): """Either starts a new game of Werewolf or joins an existing game that has not started yet.""" nick, mode, user, cloak = parse_nick(rnick) if chan == botconfig.CHANNEL: cmodes = [] pl = var.list_players() try: cloak = var.USERS[nick]['cloak'] if cloak is not None and var.illegal_joins[cloak] > 0: cli.notice(nick, "Sorry, but you are in stasis for {0} games.".format(var.illegal_joins[cloak])) return except KeyError: cloak = None if var.PHASE == "none": if var.LOG_CHAN == True and var.GOT_IT != True: chan_log(cli, rnick, "join_start") var.GOT_IT = False cmodes.append(("+v", nick)) var.ROLES["person"].append(nick) var.PHASE = "join" var.WAITED = 0 var.GAME_ID = time.time() var.CAN_START_TIME = datetime.now() + timedelta(seconds=var.MINIMUM_WAIT) cli.msg(chan, ('\u0002{0}\u0002 has started a game of Werewolf. '+ 'Type "{1}join" to join. Type "{1}start" to start the game. '+ 'Type "{1}wait" to increase join wait time.').format(nick, botconfig.CMD_CHAR)) var.GOT_IT = False # reset that variable (used in different places) # Set join timer if var.JOIN_TIME_LIMIT: t = threading.Timer(var.JOIN_TIME_LIMIT, kill_join, [cli, chan]) var.TIMERS['join'] = t t.daemon = True t.start() elif nick in pl: cli.notice(nick, "You're already playing!") return elif len(pl) >= var.MAX_PLAYERS: cli.notice(nick, "Too many players! Try again next time.") return elif var.PHASE != "join": cli.notice(nick, "Sorry but the game is already running. Try again next time.") return else: clones = [] if cloak is not None: for nck in pl: if nck in var.USERS and "cloak" in var.USERS[nck] and var.USERS[nck]["cloak"] == cloak: clones.append(nck) if len(clones) >= 1: cli.msg(botconfig.SPECIAL_CHAN, "Clones detected joining in {}: {}, {}".format(chan, nick, ', '.join(clones))) if var.LOG_CHAN == True: chan_log(cli, rnick, "join") cmodes.append(("+v", nick)) var.ROLES["person"].append(nick) cli.msg(chan, '\u0002{0}\u0002 has joined the game. New player count: \u0002{1}\u0002'.format(nick, len(pl)+1)) var.LAST_STATS = None # reset if nick in var.IS_OP and var.AUTO_OP_DEOP == True and nick not in var.WAS_OP: cmodes.append(("-o", nick)) var.IS_OP.remove(nick) var.WAS_OP.append(nick) mass_mode(cli, cmodes) def kill_join(cli, chan): pl = var.list_players() pl.sort(key=lambda x: x.lower()) msg = 'PING! {0}'.format(", ".join(pl)) reset(cli) cli.msg(chan, msg) cli.msg(chan, 'The current game took too long to start and ' + 'has been canceled. If you are still active, ' + 'please join again to start a new game.') if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "cancel_game") var.LOGGER.logMessage('Game canceled.') for nick in pl: if nick in var.WAS_OP and nick not in var.IS_OP: var.WAS_OP.remove(nick) var.IS_OP.append(nick) cli.mode(botconfig.CHANNEL, "+o {0}".format(nick)) @cmd("fjoin", raw_nick=True) def fjoin(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: noticed = False if chan == botconfig.CHANNEL: if not rest.strip(): if var.LOG_CHAN == True: chan_log(cli, rnick, "forced_join") var.GOT_IT = True join(cli, nick, chan, "") for a in re.split(" +",rest): a = a.strip() if not a: continue ul = list(var.USERS.keys()) ull = [u.lower() for u in ul] if a.lower() not in ull: if not is_fake_nick(a) or not botconfig.DEBUG_MODE: if not noticed: # important cli.msg(chan, nick+(": You may only fjoin "+ "people who are in this channel.")) noticed = True continue if not is_fake_nick(a): a = ul[ull.index(a.lower())] if a != botconfig.NICK: join(cli, a.strip(), chan, "") else: cli.notice(nick, "No, that won't be allowed.") @cmd("fleave","fquit","fdel", raw_nick=True) def fleave(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if chan == botconfig.CHANNEL: if var.PHASE == "none": cli.notice(nick, "No game is running.") for a in re.split(" +",rest): a = a.strip() if not a: continue pl = var.list_players() _pl = len(pl) - 1 pll = [x.lower() for x in pl] if a.lower() in pll: a = pl[pll.index(a.lower())] else: cli.msg(chan, nick+": That person is not playing.") return if var.LOG_CHAN == True: chan_log(cli, rnick, "forced_leave") var.GOT_IT = True cli.msg(chan, ("\u0002{0}\u0002 is forcing"+ " \u0002{1}\u0002 to leave.").format(nick, a)) cli.msg(chan, "Appears (s)he was a \02{0}\02.".format(var.get_role(a))) cli.msg(chan, "New player count: {0}".format(_pl)) if var.PHASE in ("day", "night"): var.LOGGER.logMessage("{0} is forcing {1} to leave.".format(nick, a)) var.LOGGER.logMessage("Appears (s)he was a {0}.".format(var.get_role(a))) del_player(cli, a) @cmd("fstart", raw_nick=True) def fstart(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if chan == botconfig.CHANNEL: var.CAN_START_TIME = datetime.now() cli.msg(botconfig.CHANNEL, "\u0002{0}\u0002 has forced the game to start.".format(nick)) start(cli, chan, chan, rest) if var.LOG_CHAN == True: chan_log(cli, rnick, "forced_start") var.GOT_IT = True @pmcmd("", raw_nick=True) def version_reply(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if rest == "\01VERSION\01": cli.notice(nick, "\u0001VERSION Wolfbot by jcao219 modified by Vgr using python 3.2\u0001") if var.LOG_CHAN == True: chan_log(cli, rnick, "version") '''@pmcmd("", raw_nick=True) def bot_uptime(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if rest == "\01UPTIME\01": cli.notice(nick, "\01UPTIME Up for \02{0}\02 hours, \02{1}\02 minutes and \02{2}\02 seconds, or \02{3}\02 seconds.\01".format''' @cmd("update", "upd", raw_nick=True) @pmcmd("update", "upd", raw_nick=True) def update(cli, rnick, *rest): """Restart for an update.""" nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: rest = list(rest) try: if var.PHASE in ("day", "night"): stop_game(cli) else: reset(cli) if var.LOG_CHAN == True: chan_log(cli, rnick, "update") cli.quit("Updating database . . .") raise SystemExit finally: print("RESTARTING") python = sys.executable os.execl(python, python, sys.argv[0]) @hook("kick") def on_kicked(cli, nick, chan, victim, reason): if chan == botconfig.CHANNEL: if victim == botconfig.NICK: cli.join(botconfig.CHANNEL) if var.AUTO_OP_FLAG == False: cli.msg("ChanServ", "op "+botconfig.CHANNEL) elif victim in var.IS_ADMIN and var.IS_ADMIN[victim] == True: var.IS_ADMIN[victim] = False # make sure no abuse can be made (it will be set back on join anyway) var.IS_OWNER[victim] = False # no need to check if True or False, as all owners are admins if victim in var.IS_OP: var.IS_OP.remove(victim) if victim in var.WAS_OP: var.WAS_OP.remove(victim) @hook("account") def on_account(cli, nick, acc): nick = parse_nick(nick)[0] var.IS_ADMIN[nick] = False var.IS_OWNER[nick] = False # default all of them to False, then set to True if they're admins (later) if nick in var.USERS.keys(): var.USERS[nick]["account"] = acc if acc in botconfig.ADMINS_ACCOUNTS or acc in botconfig.OWNERS_ACCOUNTS: var.IS_ADMIN[nick] = True if acc in botconfig.OWNERS_ACCOUNTS: var.IS_OWNER[nick] = True @cmd("stats", "s", "players", raw_nick=True) def stats(cli, rnick, chan, rest): """Display the player statistics""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: if var.PHASE == "none": cli.notice(nick, "No game is currently running.") return pl = var.list_players() if nick in pl or var.PHASE == "join": # only do this rate-limiting stuff if the person is in game if (var.LAST_STATS and var.LAST_STATS + timedelta(seconds=var.STATS_RATE_LIMIT) > datetime.now()): cli.msg(chan, nick+": This command is rate-limited.") return var.LAST_STATS = datetime.now() pl.sort(key=lambda x: x.lower()) if len(pl) > 1: msg = '{0}: \u0002{1}\u0002 players: {2}'.format(nick, len(pl), ", ".join(pl)) else: msg = '{0}: \u00021\u0002 player: {1}'.format(nick, pl[0]) if nick in pl or var.PHASE == "join": cli.msg(chan, msg) if var.LOG_CHAN == True: chan_log(cli, rnick, "stats") var.LOGGER.logMessage(msg.replace("\02", "")) else: cli.notice(nick, msg) if var.LOG_CHAN == True: chan_log(cli, rnick, "stats_notice") if var.PHASE == "join": return message = [] f = False # set to true after the is/are verb is decided l1 = [k for k in var.ROLES.keys() if var.ROLES[k]] l2 = [k for k in var.ORIGINAL_ROLES.keys() if var.ORIGINAL_ROLES[k]] rs = list(set(l1+l2)) # Due to popular demand, picky ordering if "wolf" in rs: rs.remove("wolf") rs.insert(0, "wolf") if "seer" in rs: rs.remove("seer") rs.insert(1, "seer") if "villager" in rs: rs.remove("villager") rs.append("villager") firstcount = len(var.ROLES[rs[0]]) if firstcount > 1 or not firstcount: vb = "are" else: vb = "is" for role in rs: count = len(var.ROLES[role]) if role == "traitor" and var.HIDDEN_TRAITOR: continue elif role == "villager" and var.HIDDEN_TRAITOR: count += len(var.ROLES["traitor"]) if count > 1 or count == 0: message.append("\u0002{0}\u0002 {1}".format(count if count else "\u0002no\u0002", var.plural(role))) else: message.append("\u0002{0}\u0002 {1}".format(count, role)) stats_mssg = "{0}: It is currently {4}. There {3} {1}, and {2}.".format(nick, ", ".join(message[0:-1]), message[-1], vb, var.PHASE) if nick in pl or var.PHASE == "join": cli.msg(chan, stats_mssg) var.LOGGER.logMessage(stats_mssg.replace("\02", "")) else: cli.notice(nick, stats_mssg) def hurry_up(cli, gameid, change): if var.PHASE != "day": return if gameid: if gameid != var.DAY_ID: return chan = botconfig.CHANNEL if not change: cli.msg(chan, ("\02As the sun sinks inexorably toward the horizon, turning the lanky pine " + "trees into fire-edged silhouettes, the villagers are reminded that very little " + "time remains for them to reach a decision; if darkness falls before they have done " + "so, the majority will win the vote. No one will be lynched if there " + "are no votes or an even split.\02")) if not var.DAY_TIME_LIMIT_CHANGE: return if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "day_warn") if (len(var.list_players()) <= var.SHORT_DAY_PLAYERS): tmr = threading.Timer(var.SHORT_DAY_LIMIT_CHANGE, hurry_up, [cli, var.DAY_ID, True]) else: tmr = threading.Timer(var.DAY_TIME_LIMIT_CHANGE, hurry_up, [cli, var.DAY_ID, True]) tmr.daemon = True var.TIMERS["day"] = tmr tmr.start() return var.DAY_ID = 0 pl = var.list_players() avail = len(pl) - len(var.WOUNDED) votesneeded = avail // 2 + 1 found_dup = False maxfound = (0, "") for votee, voters in iter(var.VOTES.items()): if len(voters) > maxfound[0]: maxfound = (len(voters), votee) found_dup = False elif len(voters) == maxfound[0]: found_dup = True if maxfound[0] > 0 and not found_dup: if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "forced_lynch") cli.msg(chan, "The sun sets.") var.LOGGER.logMessage("The sun sets.") var.VOTES[maxfound[1]] = [None] * votesneeded chk_decision(cli) # Induce a lynch else: if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "no_lynch") cli.msg(chan, ("As the sun sets, the villagers agree to "+ "retire to their beds and wait for morning.")) var.LOGGER.logMessage(("As the sun sets, the villagers agree to "+ "retire to their beds and wait for morning.")) transition_night(cli) @cmd("fnight", raw_nick=True) def fnight(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.PHASE != "day": cli.notice(nick, "It is not daytime.") else: if var.LOG_CHAN == True: chan_log(cli, rnick, "forced_night") hurry_up(cli, 0, True) @cmd("fday", raw_nick=True) def fday(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.PHASE != "night": cli.notice(nick, "It is not nighttime.") else: if var.LOG_CHAN == True: chan_log(cli, rnick, "forced_day") transition_day(cli) def chk_decision(cli): chan = botconfig.CHANNEL pl = var.list_players() avail = len(pl) - len(var.WOUNDED) votesneeded = avail // 2 + 1 not_lynching = len(var.NO_LYNCH) if not_lynching >= votesneeded: cli.msg(botconfig.CHANNEL, "Too many players refrained from voting. No lynching occuring.") transition_night(cli) return for votee, voters in iter(var.VOTES.items()): if len(voters) >= votesneeded: lmsg = random.choice(var.LYNCH_MESSAGES).format(votee, var.get_role(votee)) cli.msg(botconfig.CHANNEL, lmsg) var.LOGGER.logMessage(lmsg.replace("\02", "")) var.LOGGER.logBare(votee, "LYNCHED") if del_player(cli, votee, True): transition_night(cli) @pmcmd("retract") def wolfretract(cli, nick, rest): if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return role = var.get_role(nick) if role not in ('wolf', 'werecrow'): return if var.PHASE != "night": pm(cli, nick, "You may only retract at night.") return if role == "werecrow": # Check if already observed if var.OBSERVED.get(nick): pm(cli, nick, ("You have already transformed into a crow, and "+ "cannot turn back until day.")) return if nick in var.KILLS.keys(): del var.KILLS[nick] pm(cli, nick, "You have retracted your vote.") #var.LOGGER.logBare(nick, "RETRACT", nick) @cmd("votes", raw_nick=True) def show_votes(cli, rnick, chan, rest): """Displays the voting statistics.""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return if var.PHASE != "day": cli.notice(nick, "Voting is only during the day.") return if (var.LAST_VOTES and var.LAST_VOTES + timedelta(seconds=var.VOTES_RATE_LIMIT) > datetime.now()): cli.msg(chan, nick+": This command is rate-limited.") return pl = var.list_players() if nick in pl: var.LAST_VOTES = datetime.now() if not var.VOTES.values(): msg = nick+": No votes yet." if nick in pl: var.LAST_VOTES = None # reset else: if var.LOG_CHAN == True: chan_log(cli, rnick, "votes") votelist = ["{0}: {1} ({2})".format(votee, len(var.VOTES[votee]), " ".join(var.VOTES[votee])) for votee in var.VOTES.keys()] msg = "{0}: {1}".format(nick, ", ".join(votelist)) if nick in pl: cli.msg(chan, msg) else: cli.notice(nick, msg) pl = var.list_players() avail = len(pl) - len(var.WOUNDED) - len(var.NO_LYNCH) votesneeded = avail // 2 + 1 not_voting = len(var.NO_LYNCH) if not_voting == 1: plural = " isn't" else: plural = "s aren't" the_message = ("{0}: \u0002{1}\u0002 players, \u0002{2}\u0002 votes "+ "required to lynch, \u0002{3}\u0002 players available " + "to vote. \u0002{4}\u0002 player{5} voting.").format(nick, len(pl), votesneeded, avail, not_voting, plural) if nick in pl: cli.msg(chan, the_message) else: cli.notice(nick, the_message) def chk_traitor(cli): for tt in var.ROLES["traitor"]: var.ROLES["wolf"].append(tt) var.ROLES["traitor"].remove(tt) if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "traitor_wolf") pm(cli, tt, ('HOOOOOOOOOWL. You have become... a wolf!\n'+ 'It is up to you to avenge your fallen leaders!')) def stop_game(cli, winner = ""): chan = botconfig.CHANNEL if var.DAY_START_TIME: now = datetime.now() td = now - var.DAY_START_TIME var.DAY_TIMEDELTA += td if var.NIGHT_START_TIME: now = datetime.now() td = now - var.NIGHT_START_TIME var.NIGHT_TIMEDELTA += td daymin, daysec = var.DAY_TIMEDELTA.seconds // 60, var.DAY_TIMEDELTA.seconds % 60 nitemin, nitesec = var.NIGHT_TIMEDELTA.seconds // 60, var.NIGHT_TIMEDELTA.seconds % 60 total = var.DAY_TIMEDELTA + var.NIGHT_TIMEDELTA tmin, tsec = total.seconds // 60, total.seconds % 60 if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "stop_game") gameend_msg = ("Game lasted \u0002{0:0>2}:{1:0>2}\u0002. " + "\u0002{2:0>2}:{3:0>2}\u0002 was day. " + "\u0002{4:0>2}:{5:0>2}\u0002 was night. ").format(tmin, tsec, daymin, daysec, nitemin, nitesec) cli.msg(chan, gameend_msg) var.LOGGER.logMessage(gameend_msg.replace("\02", "")+"\n") var.LOGGER.logBare("DAY", "TIME", str(var.DAY_TIMEDELTA.seconds)) var.LOGGER.logBare("NIGHT", "TIME", str(var.NIGHT_TIMEDELTA.seconds)) var.LOGGER.logBare("GAME", "TIME", str(total.seconds)) roles_msg = [] var.ORIGINAL_ROLES["cursed villager"] = var.CURSED # A hack var.ORIGINAL_ROLES["gunner"] = list(var.GUNNERS.keys()) var.ORIGINAL_ROLES["arsonist"] = list(var.PYROS.keys()) lroles = list(var.ORIGINAL_ROLES.keys()) lroles.remove("wolf") lroles.insert(0, "wolf") # picky, howl consistency for role in lroles: if len(var.ORIGINAL_ROLES[role]) == 0 or role == "villager": continue playersinrole = list(var.ORIGINAL_ROLES[role]) for i,plr in enumerate(playersinrole): if plr.startswith("(dced)"): # don't care about it here playersinrole[i] = plr[6:] if len(playersinrole) == 2: msg = "The {1} were \u0002{0[0]}\u0002 and \u0002{0[1]}\u0002." roles_msg.append(msg.format(playersinrole, var.plural(role))) elif len(playersinrole) == 1: roles_msg.append("The {1} was \u0002{0[0]}\u0002.".format(playersinrole, role)) else: msg = "The {2} were {0}, and \u0002{1}\u0002." nickslist = ["\u0002"+x+"\u0002" for x in playersinrole[0:-1]] roles_msg.append(msg.format(", ".join(nickslist), playersinrole[-1], var.plural(role))) cli.msg(chan, " ".join(roles_msg)) if var.AUTO_LOG_TOGGLED == True: var.LOG_CHAN = True var.AUTO_LOG_TOGGLED = False cli.msg(botconfig.CHANNEL, "Logging has now been re-enabled.") cli.msg(botconfig.ADMIN_CHAN, "Game has ended, logging is now \u0002on\u0002.") plrl = [] for role,ppl in var.ORIGINAL_ROLES.items(): for x in ppl: plrl.append((x, role)) var.LOGGER.saveToFile() for plr, rol in plrl: #if plr not in var.USERS.keys(): # he died TODO: when a player leaves, count the game as lost for him # if plr in var.DEAD_USERS.keys(): # acc = var.DEAD_USERS[plr]["account"] # else: # continue # something wrong happened #else: if plr.startswith("(dced)") and plr[6:] in var.DCED_PLAYERS.keys(): acc = var.DCED_PLAYERS[plr[6:]]["account"] elif plr in var.PLAYERS.keys(): acc = var.PLAYERS[plr]["account"] else: continue #probably fjoin'd fake if acc == "*": continue # not logged in during game start # determine if this player's team won if plr in (var.ORIGINAL_ROLES["wolf"] + var.ORIGINAL_ROLES["traitor"] + var.ORIGINAL_ROLES["werecrow"]): # the player was wolf-aligned if winner == "wolves": won = True elif winner == "villagers": won = False else: break # abnormal game stop else: if winner == "wolves": won = False elif winner == "villagers": won = True else: break iwon = won and plr in var.list_players() # survived, team won = individual win var.update_role_stats(acc, rol, won, iwon) size = len(var.list_players()) + len(var.DEAD) if winner != "": # Only update if not an abnormal game stop var.update_game_stats(size, winner) reset(cli) return True def chk_win(cli): """ Returns True if someone won """ chan = botconfig.CHANNEL lpl = len(var.list_players()) if lpl == 0: cli.msg(chan, "No more players remaining. Game ended.") reset(cli) return True if var.PHASE == "join": return False lwolves = (len(var.ROLES["wolf"])+ len(var.ROLES["traitor"])+ len(var.ROLES["werecrow"])) if var.PHASE == "day": lpl -= len([x for x in var.WOUNDED if x not in var.ROLES["traitor"]]) lwolves -= len([x for x in var.WOUNDED if x in var.ROLES["traitor"]]) if lwolves == lpl / 2: cli.msg(chan, ("Game over! There are the same number of wolves as "+ "villagers. The wolves eat everyone and win.")) var.LOGGER.logMessage(("Game over! There are the same number of wolves as "+ "villagers. The wolves eat everyone, and win.")) village_win = False var.LOGGER.logBare("WOLVES", "WIN") elif lwolves > lpl / 2: cli.msg(chan, ("Game over! There are more wolves than "+ "villagers. The wolves eat everyone, and win.")) var.LOGGER.logMessage(("Game over! There are more wolves than "+ "villagers. The wolves eat everyone, and win.")) village_win = False var.LOGGER.logBare("WOLVES", "WIN") elif (not var.ROLES["wolf"] and not var.ROLES["traitor"] and not var.ROLES["werecrow"]): cli.msg(chan, ("Game over! All the wolves are dead! The villagers "+ "chop them up, BBQ them, and have a hearty meal.")) var.LOGGER.logMessage(("Game over! All the wolves are dead! The villagers "+ "chop them up, BBQ them, and have a hearty meal.")) village_win = True var.LOGGER.logBare("VILLAGERS", "WIN") elif (not var.ROLES["wolf"] and not var.ROLES["werecrow"] and var.ROLES["traitor"]): for t in var.ROLES["traitor"]: var.LOGGER.logBare(t, "TRANSFORM") chk_traitor(cli) cli.msg(chan, ('\u0002The villagers, during their celebrations, are '+ 'frightened as they hear a loud howl. The wolves are '+ 'not gone!\u0002')) var.LOGGER.logMessage(('The villagers, during their celebrations, are '+ 'frightened as they hear a loud howl. The wolves are '+ 'not gone!')) return chk_win(cli) else: return False stop_game(cli, "villagers" if village_win else "wolves") return True def del_player(cli, nick, forced_death = False, devoice = True): """ Returns: False if one side won. arg: forced_death = True when lynched or when the seer/wolf both don't act """ t = time.time() # time var.LAST_STATS = None # reset var.LAST_VOTES = None with var.GRAVEYARD_LOCK: if not var.GAME_ID or var.GAME_ID > t: # either game ended, or a new game has started. return False cmode = [] if devoice: cmode.append(("-v", nick)) var.del_player(nick) ret = True if var.PHASE == "join": # Died during the joining process as a person if nick in var.WAS_OP and nick not in var.IS_OP: var.WAS_OP.remove(nick) var.IS_OP.append(nick) cmode.append(("+o", nick)) mass_mode(cli, cmode) return not chk_win(cli) if var.PHASE != "join" and ret: # Died during the game, so quiet! if not is_fake_nick(nick): cmode.append(("+q", nick+"!*@*")) mass_mode(cli, cmode) if nick not in var.DEAD: var.DEAD.append(nick) ret = not chk_win(cli) if var.PHASE in ("night", "day") and ret: # remove him from variables if he is in there for a,b in list(var.KILLS.items()): if b == nick: del var.KILLS[a] elif a == nick: del var.KILLS[a] for x in (var.OBSERVED, var.HVISITED, var.GUARDED, var.BURN): keys = list(x.keys()) for k in keys: if k == nick: del x[k] elif x[k] == nick: del x[k] if nick in var.DISCONNECTED: del var.DISCONNECTED[nick] if var.PHASE == "day" and not forced_death and ret: # didn't die from lynching if nick in var.VOTES.keys(): del var.VOTES[nick] # Delete other people's votes on him for k in list(var.VOTES.keys()): if nick in var.VOTES[k]: var.VOTES[k].remove(nick) if not var.VOTES[k]: # no more votes on that guy del var.VOTES[k] break # can only vote once if nick in var.NO_LYNCH: var.NO_LYNCH.remove(nick) if nick in var.WOUNDED: var.WOUNDED.remove(nick) chk_decision(cli) elif var.PHASE == "night" and ret: chk_nightdone(cli) return ret if nick in var.WAS_OP and nick not in var.IS_OP: var.WAS_OP.remove(nick) var.IS_OP.append(nick) cli.mode(botconfig.CHANNEL, "+o {0}".format(nick)) def reaper(cli, gameid): # check to see if idlers need to be killed. var.IDLE_WARNED = [] chan = botconfig.CHANNEL while gameid == var.GAME_ID: with var.GRAVEYARD_LOCK: if var.WARN_IDLE_TIME or var.KILL_IDLE_TIME: # only if enabled to_warn = [] to_kill = [] for nick in var.list_players(): lst = var.LAST_SAID_TIME.get(nick, var.GAME_START_TIME) tdiff = datetime.now() - lst if (tdiff > timedelta(seconds=var.WARN_IDLE_TIME) and nick not in var.IDLE_WARNED): if var.WARN_IDLE_TIME: to_warn.append(nick) var.IDLE_WARNED.append(nick) var.LAST_SAID_TIME[nick] = (datetime.now() - timedelta(seconds=var.WARN_IDLE_TIME)) # Give him a chance elif (tdiff > timedelta(seconds=var.KILL_IDLE_TIME) and nick in var.IDLE_WARNED): if var.KILL_IDLE_TIME: to_kill.append(nick) elif (tdiff < timedelta(seconds=var.WARN_IDLE_TIME) and nick in var.IDLE_WARNED): var.IDLE_WARNED.remove(nick) # he saved himself from death for nck in to_kill: if nck not in var.list_players(): continue if var.LOG_CHAN == True: cli.send("whois", nck) @hook("whoisuser", hookid=451) def idle_fetch_host(cli, server, you, nick, ident, host, something, realname): var.IDLE_HOST[nick] = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, var.IDLE_HOST[nick], "idle_die") decorators.unhook(HOOKS, 451) cli.msg(chan, ("\u0002{0}\u0002 didn't get out of bed "+ "for a very long time. S/He is declared dead. Appears "+ "(s)he was a \u0002{1}\u0002.").format(nck, var.get_role(nck))) make_stasis(nck, var.IDLE_STASIS_PENALTY) if not del_player(cli, nck): return pl = var.list_players() x = [a for a in to_warn if a in pl] if x: if var.LOG_CHAN == True: cli.who(botconfig.CHANNEL, "%nuchaf") @hook("whospcrpl", hookid=451) def who_fetch_host(cli, server, you, chan, ident, host, nick, status, account): if nick in x: var.WHO_HOST[nick] = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, var.WHO_HOST[nick], "idle_warn") cli.msg(chan, ("{0}: \u0002You have been idling for a while. "+ "Please say something soon or you "+ "might be declared dead.\u0002").format(", ".join(x))) for dcedplayer in list(var.DISCONNECTED.keys()): _, timeofdc, what = var.DISCONNECTED[dcedplayer] if what == "quit" and (datetime.now() - timeofdc) > timedelta(seconds=var.QUIT_GRACE_TIME) and dcedplayer not in var.DCED_GRACE: if var.LOG_CHAN == True: cli.send("whowas", dcedplayer) @hook("whowasuser", hookid=154) def whowas_host(cli, server, you, nick, ident, host, dunno, realname): var.WHOWAS_HOST = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, var.WHOWAS_HOST, "die_quit_wait") decorators.unhook(HOOKS, 154) cli.msg(chan, ("\02{0}\02 died due to a fatal attack by wild animals. Appears (s)he "+ "was a \02{1}\02.").format(dcedplayer, var.get_role(dcedplayer))) make_stasis(dcedplayer, var.PART_STASIS_PENALTY) if not del_player(cli, dcedplayer, devoice = False): return elif what == "quit" and (datetime.now() - timeofdc) > timedelta(seconds=var.QUIT_GRACE_TIME * 2) and dcedplayer in var.DCED_GRACE: if var.LOG_CHAN == True: cli.send("whowas", dcedplayer) @hook("whowasuser", hookid=154) def whowas_host(cli, server, you, nick, ident, host, dunno, realname): var.WHOWAS_HOST = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, var.WHOWAS_HOST, "die_quit_wait") decorators.unhook(HOOKS, 154) cli.msg(chan, ("\02{0}\02 died due to a fatal attack by wild animals. Appears (s)he "+ "was a \02{1}\02.").format(dcedplayer, var.get_role(dcedplayer))) make_stasis(dcedplayer, var.PART_STASIS_PENALTY) var.DCED_GRACE.remove(dcedplayer) if not del_player(cli, dcedplayer, devoice = False): return elif what == "part" and (datetime.now() - timeofdc) > timedelta(seconds=var.PART_GRACE_TIME) and dcedplayer not in var.DCED_GRACE: if var.LOG_CHAN == True: cli.send("whois", dcedplayer) @hook("whoisuser", hookid=451) def part_is_host(cli, server, you, nick, ident, host, dunno, realname): if nick == dcedplayer: var.PART = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, var.PART, "die_part_on") decorators.unhook(HOOKS, 451) @hook("nosuchnick", hookid=451) def try_whowas(cli, server, you, action, output): cli.send("whowas", dcedplayer) @hook("whowasuser", hookid=451) def part_was_host(cli, server, you, nick, ident, host, dunno, realname): var.PART = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, var.PART, "die_part_off") decorators.unhook(HOOKS, 451) cli.msg(chan, ("\02{0}\02 died due to eating poisonous berries. Appears (s)he was "+ "a \02{1}\02.").format(dcedplayer, var.get_role(dcedplayer))) make_stasis(dcedplayer, var.PART_STASIS_PENALTY) if not del_player(cli, dcedplayer, devoice = False): return elif what == "part" and (datetime.now() - timeofdc) > timedelta(seconds=var.PART_GRACE_TIME * 2) and dcedplayer in var.DCED_GRACE: if var.LOG_CHAN == True: cli.send("whois", dcedplayer) @hook("whoisuser", hookid=451) def part_is_host(cli, server, you, nick, ident, host, dunno, realname): if nick == dcedplayer: var.PART = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, var.PART, "die_part_on") decorators.unhook(HOOKS, 451) @hook("nosuchnick", hookid=451) def try_whowas(cli, server, you, action, output): cli.send("whowas", dcedplayer) @hook("whowasuser", hookid=451) def part_was_host(cli, server, you, nick, ident, host, dunno, realname): var.PART = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, var.PART, "die_part_off") decorators.unhook(HOOKS, 451) cli.msg(chan, ("\02{0}\02 died due to eating poisonous berries. Appears (s)he was "+ "a \02{1}\02.").format(dcedplayer, var.get_role(dcedplayer))) make_stasis(dcedplayer, var.PART_STASIS_PENALTY) var.DCED_GRACE.remove(dcedplayer) if not del_player(cli, dcedplayer, devoice = False): return time.sleep(10) @cmd("") # update last said + git check def update_last_said(cli, nick, chan, rest): if chan == botconfig.CHANNEL: if var.PHASE not in ("join", "none"): var.LAST_SAID_TIME[nick] = datetime.now() if var.PHASE not in ("none", "join"): var.LOGGER.logChannelMessage(nick, rest) fullstring = "".join(rest) if var.CARE_BOLD and BOLD in fullstring: if var.KILL_BOLD: if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.EXEMPT_ADMINS == True: cli.msg(nick, "Remember, using bold is not allowed! (Exempted from kick)") return cli.send("KICK {0} {1} :Using bold is not allowed".format(botconfig.CHANNEL, nick)) else: cli.msg(botconfig.CHANNEL, nick + ": Using bold in the channel is not allowed.") if var.CARE_COLOR and any(code in fullstring for code in ["\x03", "\x16", "\x1f" ]): if var.KILL_COLOR: if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.EXEMPT_ADMINS == True: cli.msg(nick, "Remember, using colors is not allowed! (Exempted from kick)") return cli.send("KICK {0} {1} :Using colors is not allowed".format(botconfig.CHANNEL, nick)) else: cli.msg(botconfig.CHANNEL, nick + ": Using colors in the channel is not allowed.") if var.CARE_ADVERTISING and '#' in fullstring and botconfig.CHANNEL not in fullstring: # don't kick if they mention the channel if var.KILL_ADVERTISING: if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.EXEMPT_ADMINS == True: cli.msg(nick, "Remember, advertising is not allowed! (Exempted from kick)") return cli.send("KICK {0} {1} :Advertising is not allowed".format(botconfig.CHANNEL, nick)) else: cli.msg(botconfig.CHANNEL, nick + ": Advertising is not allowed.") if chan == botconfig.DEV_CHAN and nick == botconfig.DEV_BOT: args = ['git', 'pull'] if botconfig.BRANCH_NAME in rest and " pushed " in rest and botconfig.PROJECT_NAME in rest: args += ["http://github.com/{0}/{1}.git".format(botconfig.GIT_OWNER, botconfig.PROJECT_NAME), botconfig.BRANCH_NAME] cli.msg(chan, "Pulling commit from Git . . .") child = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = child.communicate() ret = child.returncode for line in (out + err).splitlines(): cli.msg(chan, line.decode('utf-8')) if ret != 0: if ret < 0: cause = 'signal' else: cause = 'status' cli.msg(chan, 'Process {} exited with {} {}'.format(args, cause, abs(ret))) if ret == 0: if var.PHASE == "none": cli.msg(chan, "Code successfully updated. Restarting.") cli.quit("Updating database") if var.PHASE == "join": cli.msg(chan, "Code successfully updated. Stopping current game and restarting.") reset(cli) cli.quit("Updating database") if var.PHASE in ["day", "night"]: cli.msg(chan, "Code successfully updated. Restarting once current game is over.") var.GIT_UPDATE = True aftergame(cli, var.FULL_ADDRESS, "update") @cmd('fpull', raw_nick=True) def fpull(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: args = ['git', 'pull'] if rest: args += rest.split(' ') else: args += ["http://github.com/{0}/{1}.git".format(botconfig.GIT_OWNER, botconfig.PROJECT_NAME), botconfig.BRANCH_NAME] cli.msg(chan, "Pulling commit from Git . . .") child = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = child.communicate() ret = child.returncode for line in (out + err).splitlines(): cli.msg(chan, line.decode('utf-8')) if ret != 0: if ret < 0: cause = 'signal' else: cause = 'status' cli.msg(chan, 'Process {} exited with {} {}'.format(args, cause, abs(ret))) @hook("join") def on_join(cli, raw_nick, chan, acc="*", rname=""): nick,m,u,cloak = parse_nick(raw_nick) if nick != botconfig.NICK: var.IS_ADMIN[nick] = False var.IS_OWNER[nick] = False # have everyone in there to avoid errors if nick == botconfig.NICK: cli.who(chan, "%nuchaf") @hook("whospcrpl", hookid=121) def put_the_admins(cli, server, nick, chan, ident, host, user, status, acc): # user is the nickname var.IS_ADMIN[user] = False var.IS_OWNER[user] = False if host in botconfig.ADMINS or acc in botconfig.ADMINS_ACCOUNTS: var.IS_ADMIN[user] = True if host in botconfig.OWNERS or acc in botconfig.OWNERS_ACCOUNTS: var.IS_ADMIN[user] = True var.IS_OWNER[user] = True @hook("endofwho", hookid=121) def unhook_admins(*stuff): # not important decorators.unhook(HOOKS, 121) cli.who(botconfig.CHANNEL, "%nuchaf") @hook("whospcrpl", hookid=121) def put_ops(cli, server, nick, chan, ident, host, user, status, acc): if '@' in status and user not in var.IS_OP and chan == botconfig.CHANNEL: var.IS_OP.append(user) @hook("endofwho", hookid=121) def unhook_ops(*stuff): decorators.unhook(HOOKS, 121) if cloak in botconfig.ADMINS or cloak in botconfig.OWNERS or acc in botconfig.ADMINS_ACCOUNTS or acc in botconfig.OWNERS_ACCOUNTS: var.IS_ADMIN[nick] = True if cloak in botconfig.OWNERS or acc in botconfig.OWNERS_ACCOUNTS: var.IS_OWNER[nick] = True if nick not in var.USERS.keys() and nick != botconfig.NICK and chan == botconfig.CHANNEL: var.USERS[nick] = dict(cloak=cloak,account=acc) with var.GRAVEYARD_LOCK: if nick in var.DISCONNECTED.keys(): clk = var.DISCONNECTED[nick][0] if cloak == clk: cli.mode(chan, "+v", nick, nick+"!*@*") del var.DISCONNECTED[nick] cli.msg(chan, "\02{0}\02 has returned to the village.".format(nick)) for r,rlist in var.ORIGINAL_ROLES.items(): if "(dced)"+nick in rlist: rlist.remove("(dced)"+nick) rlist.append(nick) break if nick in var.DCED_PLAYERS.keys(): var.PLAYERS[nick] = var.DCED_PLAYERS.pop(nick) if nick == botconfig.NICK or nick == botconfig.NICK+"_": return @cmd("goat", "g", raw_nick=True) def goat(cli, rnick, chan, rest): """Use a goat to interact with anyone in the channel during the day""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if var.PHASE != "day": cli.notice(nick, "You can only do that in the day.") return if var.GOATED and nick not in var.SPECIAL_ROLES["goat herder"]: cli.notice(nick, "You can only do that once per day.") return ul = list(var.USERS.keys()) ull = [x.lower() for x in ul] rest = re.split(" +",rest)[0].strip().lower() if not rest: cli.notice(nick, "Not enough parameters.") return matches = 0 for player in ull: if rest == player: victim = player break if player.startswith(rest): victim = player matches += 1 else: if matches != 1: pm(cli, nick,"\u0002{0}\u0002 is not in this channel.".format(rest)) return victim = ul[ull.index(victim)] if var.LOG_CHAN == True: chan_log(cli, rnick, "goat") goatact = random.choice(["kicks", "headbutts"]) cli.msg(botconfig.CHANNEL, ("\u0002{0}\u0002's goat walks by "+ "and {1} \u0002{2}\u0002.").format(nick, goatact, victim)) var.LOGGER.logMessage("{0}'s goat walks by and {1} {2}.".format(nick, goatact, victim)) var.GOATED = True @cmd("fgoat", raw_nick=True) def fgoat(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: var.GOATED = False goat(cli, rnick, chan, rest) @hook("nick") def on_nick(cli, rnick, nick): prefix,u,m,cloak = parse_nick(rnick) chan = botconfig.CHANNEL var.IS_ADMIN[nick] = False var.IS_OWNER[nick] = False if prefix in var.IS_ADMIN and var.IS_ADMIN[prefix] == True: var.IS_ADMIN[prefix] = False var.IS_ADMIN[nick] = True if prefix in var.IS_OWNER and var.IS_OWNER[prefix] == True: var.IS_OWNER[prefix] = False var.IS_OWNER[nick] = True if prefix in var.IS_OP: var.IS_OP.remove(prefix) if nick not in var.IS_OP: var.IS_OP.append(nick) if prefix in var.USERS: var.USERS[nick] = var.USERS.pop(prefix) if prefix == var.ADMIN_TO_PING: var.ADMIN_TO_PING = nick if prefix in var.USERS and nick in var.DISCONNECTED.keys(): var.DCED_GRACE.append(nick) leave(cli, "nick", prefix) return # for k,v in list(var.DEAD_USERS.items()): # if prefix == k: # var.DEAD_USERS[nick] = var.DEAD_USERS[k] # del var.DEAD_USERS[k] if prefix in var.NO_LYNCH: var.NO_LYNCH.remove(prefix) var.NO_LYNCH.append(nick) if prefix in var.list_players() and prefix not in var.DISCONNECTED.keys(): r = var.ROLES[var.get_role(prefix)] r.append(nick) r.remove(prefix) if var.PHASE in ("night", "day"): for k,v in var.ORIGINAL_ROLES.items(): if prefix in v: var.ORIGINAL_ROLES[k].remove(prefix) var.ORIGINAL_ROLES[k].append(nick) break for k,v in list(var.PLAYERS.items()): if prefix == k: var.PLAYERS[nick] = var.PLAYERS[k] del var.PLAYERS[k] if prefix in var.GUNNERS.keys(): var.GUNNERS[nick] = var.GUNNERS.pop(prefix) if prefix in var.CURSED: var.CURSED.append(nick) var.CURSED.remove(prefix) for dictvar in (var.HVISITED, var.OBSERVED, var.GUARDED, var.KILLS): kvp = [] for a,b in dictvar.items(): if a == prefix: a = nick if b == prefix: b = nick kvp.append((a,b)) dictvar.update(kvp) if prefix in dictvar.keys(): del dictvar[prefix] if prefix in var.SEEN: var.SEEN.remove(prefix) var.SEEN.append(nick) with var.GRAVEYARD_LOCK: # to be safe if prefix in var.LAST_SAID_TIME.keys(): var.LAST_SAID_TIME[nick] = var.LAST_SAID_TIME.pop(prefix) if prefix in var.IDLE_WARNED: var.IDLE_WARNED.remove(prefix) var.IDLE_WARNED.append(nick) if var.PHASE == "day": if prefix in var.WOUNDED: var.WOUNDED.remove(prefix) var.WOUNDED.append(nick) if prefix in var.INVESTIGATED: var.INVESTIGATED.remove(prefix) var.INVESTIGATED.append(prefix) if prefix in var.VOTES: var.VOTES[nick] = var.VOTES.pop(prefix) for v in var.VOTES.values(): if prefix in v: v.remove(prefix) v.append(nick) # Check if he was DC'ed if var.PHASE in ("night", "day"): with var.GRAVEYARD_LOCK: if nick in var.DISCONNECTED.keys(): clk = var.DISCONNECTED[nick][0] if cloak == clk: cli.mode(chan, "+v", nick, nick+"!*@*") del var.DISCONNECTED[nick] if var.LOG_CHAN == True: chan_log(cli, rnick, "return") cli.msg(chan, ("\02{0}\02 has returned to "+ "the village.").format(nick)) def leave(cli, what, rnick, why=""): nick, _, _, cloak = parse_nick(rnick) if what == "part" and why != botconfig.CHANNEL: return if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: var.IS_ADMIN[nick] = False if nick in var.IS_OWNER and var.IS_OWNER[nick] == True: var.IS_OWNER[nick] = False if nick not in var.IS_ADMIN: var.IS_ADMIN[nick] = False # just in case var.IS_OWNER[nick] = False # owner is admin anyway if why and why == botconfig.CHANGING_HOST_QUIT_MESSAGE: return if var.PHASE == "none": return if nick in var.PLAYERS: # must prevent double entry in var.ORIGINAL_ROLES for r,rlist in var.ORIGINAL_ROLES.items(): if nick in rlist: var.ORIGINAL_ROLES[r].remove(nick) var.ORIGINAL_ROLES[r].append("(dced)"+nick) break var.DCED_PLAYERS[nick] = var.PLAYERS.pop(nick) if nick not in var.list_players() or nick in var.DISCONNECTED.keys(): return # the player who just quit was in the game killhim = True if what == "part" and (not var.PART_GRACE_TIME or var.PHASE == "join"): if var.LOG_CHAN == True: chan_log(cli, rnick, "die_part") msg = ("\02{0}\02 died due to eating poisonous berries. Appears "+ "(s)he was a \02{1}\02.").format(nick, var.get_role(nick)) elif what == "quit" and (not var.QUIT_GRACE_TIME or var.PHASE == "join"): if var.LOG_CHAN == True: chan_log(cli, rnick, "die_quit") msg = ("\02{0}\02 died due to a fatal attack by wild animals. Appears "+ "(s)he was a \02{1}\02.").format(nick, var.get_role(nick)) elif what == "nick": if var.LOG_CHAN == True: chan_log(cli, rnick, "nick") msg = "\02{0}\02 drowned in the lake. Appears (s)he was a \02{1}\02.".format(nick, var.get_role(nick)) elif what != "kick": if var.LOG_CHAN == True: chan_log(cli, rnick, "leave_wait") msg = "\u0002{0}\u0002 has gone missing.".format(nick) killhim = False else: if var.LOG_CHAN == True: chan_log(cli, rnick, "die_kick") msg = ("\02{0}\02 died due to falling off a cliff. Appears "+ "(s)he was a \02{1}\02.").format(nick, var.get_role(nick)) cli.msg(botconfig.CHANNEL, msg) var.LOGGER.logMessage(msg.replace("\02", "")) make_stasis(nick, var.PART_STASIS_PENALTY) if killhim: del_player(cli, nick) else: var.DISCONNECTED[nick] = (cloak, datetime.now(), what) #Functions decorated with hook do not parse the nick by default hook("part")(lambda cli, nick, *rest: leave(cli, "part", nick, rest[0])) hook("quit")(lambda cli, nick, *rest: leave(cli, "quit", nick, rest[0])) hook("kick")(lambda cli, nick, *rest: leave(cli, "kick", rest[1])) @cmd("quit", "leave", "q", raw_nick=True) def leave_game(cli, rnick, chan, rest): """Quits the game.""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: if var.PHASE == "none": cli.notice(nick, "No game is currently running.") return if nick not in var.list_players() or nick in var.DISCONNECTED.keys(): # not playing cli.notice(nick, "You're not currently playing.") return if var.LOG_CHAN == True and var.GOT_IT != True: chan_log(cli, rnick, "leave") var.GOT_IT = False _pl = len(var.list_players()) pl = _pl - 1 if pl == 0: cli.msg(chan, "\02{0}\02 died of an unknown disease. S/He was a \02{1}\02.".format(nick, var.get_role(nick))) if not pl == 0: cli.msg(chan, "\02{0}\02 died of an unknown disease. S/He was a \02{1}\02. New player count: \02{2}\02".format(nick, var.get_role(nick), pl)) var.LOGGER.logMessage(("{0} died of an unknown disease. "+ "S/He was a {1}.").format(nick, var.get_role(nick))) make_stasis(nick, var.LEAVE_STASIS_PENALTY) del_player(cli, nick) @hook("mode") def mode(cli, nick, chan, mode, *params): params = list(params) if '-' in mode and 'o' in mode and chan == botconfig.CHANNEL: cli.send('whois', botconfig.NICK) @hook("whoischannels", hookid=267) def is_not_op_or_just_me(cli, server, you, user, chans): # check if the bot is op in the channel if user == you: # just make sure it's the right one if botconfig.CHANNEL in chans: if "@{0}".format(botconfig.CHANNEL) in chans: return elif "{0}".format(botconfig.CHANNEL) in chans and botconfig.OP_NEEDED == True: cli.msg(chan, "Error: OP Status is needed for the game to work") @hook("cannotsendtochan", hookid=267) def game_must_end(cli, server, you, action, output): # not op, end the game stop_game(cli) reset(cli) cli.quit("An error has been encountered") cli.who(botconfig.CHANNEL, "%nuchaf") @hook("whospcrpl", hookid=267) def check_for_ops(cli, server, you, chanw, ident, host, user, status, account): # user = nick if user in var.IS_OP and '@' not in status and chanw == botconfig.CHANNEL: var.IS_OP.remove(user) if nick == you and user not in var.WAS_OP and "@" not in status and chanw == botconfig.CHANNEL: var.WAS_OP.append(user) if '+' in mode and 'o' in mode and chan == botconfig.CHANNEL: cli.who(botconfig.CHANNEL, "%nuchaf") @hook("whospcrpl", hookid=267) def check_new_ops(cli, server, you, chany, ident, host, user, status, account): # user = nick if user in var.WAS_OP and "@" in status and chany == botconfig.CHANNEL: var.WAS_OP.remove(user) if user not in var.IS_OP and "@" in status and chany == botconfig.CHANNEL: var.IS_OP.append(user) decorators.unhook(HOOKS, 267) def begin_day(cli): chan = botconfig.CHANNEL # Reset nighttime variables var.KILLS = {} # nicknames of kill victim var.GUARDED = "" var.KILLER = "" # nickname of who chose the victim var.SEEN = [] # list of seers that have had visions var.OBSERVED = {} # those whom werecrows have observed var.HVISITED = {} var.GUARDED = {} var.BURN = {} var.BURNED_HOUSES = [] msg = ("The villagers must now vote for whom to lynch. "+ 'Use "{0}lynch <nick>" to cast your vote. {1} votes '+ 'are required to lynch.').format(botconfig.CMD_CHAR, len(var.list_players()) // 2 + 1) cli.msg(chan, msg) var.LOGGER.logMessage(msg) var.LOGGER.logBare("DAY", "BEGIN") if var.DAY_TIME_LIMIT_WARN > 0: # Time limit enabled var.DAY_ID = time.time() if len(var.list_players()) <= var.SHORT_DAY_PLAYERS: t = threading.Timer(var.SHORT_DAY_LIMIT_WARN, hurry_up, [cli, var.DAY_ID, False]) else: t = threading.Timer(var.DAY_TIME_LIMIT_WARN, hurry_up, [cli, var.DAY_ID, False]) var.TIMERS["day_warn"] = t t.daemon = True t.start() def night_warn(cli, gameid): if gameid != var.NIGHT_ID: return if var.PHASE == "day": return if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "night_warn") cli.msg(botconfig.CHANNEL, ("\02A few villagers awake early and notice it " + "is still dark outside. " + "The night is almost over and there are " + "still whispers heard in the village.\02")) def transition_day(cli, gameid=0): if gameid: if gameid != var.NIGHT_ID: return var.NIGHT_ID = 0 if var.PHASE == "day": return var.PHASE = "day" var.GOATED = False chan = botconfig.CHANNEL # Reset daytime variables var.NO_LYNCH = [] var.VOTES = {} var.INVESTIGATED = [] var.WOUNDED = [] var.DAY_START_TIME = datetime.now() if (not len(var.SEEN)+len(var.KILLS)+len(var.OBSERVED) # neither seer nor wolf acted and var.FIRST_NIGHT and var.ROLES["seer"] and not botconfig.DEBUG_MODE): if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "wolf_die") cli.msg(botconfig.CHANNEL, "\02The wolves all die of a mysterious plague.\02") for x in var.ROLES["wolf"]+var.ROLES["werecrow"]+var.ROLES["traitor"]: if not del_player(cli, x, True): return var.FIRST_NIGHT = False td = var.DAY_START_TIME - var.NIGHT_START_TIME var.NIGHT_START_TIME = None var.NIGHT_TIMEDELTA += td min, sec = td.seconds // 60, td.seconds % 60 found = {} for v in var.KILLS.values(): if v in found: found[v] += 1 else: found[v] = 1 maxc = 0 victim = "" dups = [] for v, c in found.items(): if c > maxc: maxc = c victim = v dups = [] elif c == maxc: dups.append(v) if maxc: if dups: dups.append(victim) victim = random.choice(dups) message = [("Night lasted \u0002{0:0>2}:{1:0>2}\u0002. It is now daytime. "+ "The villagers awake, thankful for surviving the night, "+ "and search the village... ").format(min, sec)] dead = [] crowonly = var.ROLES["werecrow"] and not var.ROLES["wolf"] survived_fire = [] if victim: var.LOGGER.logBare(victim, "WOLVESVICTIM", *[y for x,y in var.KILLS.items() if x == victim]) for crow, target in iter(var.OBSERVED.items()): if ((target in list(var.HVISITED.keys()) and var.HVISITED[target]) or # if var.HVISITED[target] is None, harlot visited self target in var.SEEN+list(var.GUARDED.keys())): if var.LOG_CHAN == True: cli.send("whois", crow) @hook("whoisuser", hookid=942) def crow_host(cli, server, you, nick, ident, host, dunno, realname): if nick == crow: crow_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, crow_, "crow_away_{0}".format(get_role(target))) decorators.unhook(HOOKS, 942) pm(cli, crow, ("As the sun rises, you conclude that \u0002{0}\u0002 was not in "+ "bed all night, and you fly back to your house.").format(target)) else: if var.LOG_CHAN == True: cli.send("whois", crow) @hook("whoisuser", hookid=942) def crow_host2(cli, server, you, nick, ident, host, dunno, realname): if nick == crow: crow_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, nick, "crow_bed_{0}".format(get_role(target))) decorators.unhook(HOOKS, 942) pm(cli, crow, ("As the sun rises, you conclude that \u0002{0}\u0002 was sleeping "+ "all night long, and you fly back to your house.").format(target)) if victim in var.GUARDED.values(): if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=942) def angel_victim_host(cli, server, you, nick, ident, host, dunno, realname): if nick == victim: victim_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, victim_, "saved_angel") decorators.unhook(HOOKS, 942) message.append(("\u0002{0}\u0002 was attacked by the wolves last night, but luckily, the "+ "guardian angel protected him/her.").format(victim)) victim = "" elif not victim: if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "no_victim") message.append(random.choice(var.NO_VICTIMS_MESSAGES) + " All villagers, however, have survived.") elif victim in var.ROLES["harlot"]: # Attacked harlot, yay no kill if var.HVISITED.get(victim): if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=942) def harlot_victim_host(cli, server, you, nick, ident, host, dunno, realname): if nick == victim: victim_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, victim_, "saved_harlot") decorators.unhook(HOOKS, 942) message.append("The wolves' selected victim was a harlot, "+ "but she wasn't home.") if victim and (victim not in var.ROLES["harlot"] or # not a harlot not var.HVISITED.get(victim) and # harlot stayed home (not var.BURNED.get(victim) and victim in var.list_players())): # victim wasn't burned to ashes if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=942) def wolf_victim_host(cli, server, you, nick, ident, host, dunno, realname): if nick == victim: victim_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, victim_, "death") decorators.unhook(HOOKS, 942) message.append(("The dead body of \u0002{0}\u0002, a "+ "\u0002{1}\u0002, is found. Those remaining mourn his/her "+ "death.").format(victim, var.get_role(victim))) dead.append(victim) var.LOGGER.logBare(victim, "KILLED") if victim in var.GUNNERS.keys() and var.GUNNERS[victim]: # victim had bullets! if random.random() < var.GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE: wc = var.ROLES["werecrow"] for crow in wc: if crow in var.OBSERVED.keys(): wc.remove(crow) # don't kill off werecrows that observed deadwolf = random.choice(var.ROLES["wolf"]+wc) if var.LOG_CHAN == True: cli.send("whois", deadwolf) @hook("whoisuser", hookid=942) def wolf_killed_host(cli, server, you, nick, ident, host, dunno, realname): if nick == deadwolf: deadwolf_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, deadwolf_, "night_shot") decorators.unhook(HOOKS, 942) message.append(("Fortunately, the victim, \02{0}\02, had bullets, and "+ "\02{1}\02, a \02{2}\02, was shot dead.").format(victim, deadwolf, var.get_role(deadwolf))) var.LOGGER.logBare(deadwolf, "KILLEDBYGUNNER") dead.append(deadwolf) if victim in var.PYROS.keys() and var.PYROS[victim]: # victim had molotovs! if random.random() < var.PYRO_KILLS_WOLF_AT_NIGHT_CHANCE: wc = var.ROLES["werecrow"] for crow in wc: if crow in var.OBSERVED.keys(): wc.remove(crow) # don't kill off werecrows that observed deadwolf = random.choice(var.ROLES["wolf"]+wc) if var.LOG_CHAN == True: cli.send("whois", deadwolf) @hook("whoisuser", hookid=942) def wolf_killed_host(cli, server, you, nick, ident, host, dunno, realname): if nick == deadwolf: deadwolf_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, deadwolf_, "night_burnt") decorators.unhook(HOOKS, 942) message.append(("Fortunately, the victim, \02{0}\02, had molotovs, and "+ "decided to burn his/her house down, killing \02{1}\02 "+ "in the process, most likely a \02wolf\02").format(victim, deadwolf)) var.LOGGER.logBare(deadwolf, "KILLEDBYPYRO") var.BURNED.append(deadwolf) dead.append(deadwolf) if victim in var.HVISITED.values(): # victim was visited by some harlot for hlt in var.HVISITED.keys(): if var.HVISITED[hlt] == victim: if var.LOG_CHAN == True: cli.send("whois", hlt) @hook("whoisuser", hookid=942) def double_victim_host(cli, server, you, nick, ident, host, dunno, realname): if nick == hlt: harlot_victim = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, harlot_victim, "dead_victim") decorators.unhook(HOOKS, 942) message.append(("\02{0}\02, a \02harlot\02, made the unfortunate mistake of "+ "visiting the victim's house last night and is "+ "now dead.").format(hlt)) dead.append(hlt) for harlot in var.ROLES["harlot"]: if var.HVISITED.get(harlot) in var.ROLES["wolf"]+var.ROLES["werecrow"]: if var.LOG_CHAN == True: cli.send("whois", harlot) @hook("whoisuser", hookid=942) def harlot_dead_wolf(cli, server, you, nick, ident, host, dunno, realname): if nick == harlot: harlot_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, harlot_, "visit_wolf") decorators.unhook(HOOKS, 942) message.append(("\02{0}\02, a \02harlot\02, made the unfortunate mistake of "+ "visiting a wolf's house last night and is "+ "now dead.").format(harlot)) dead.append(harlot) for gangel in var.ROLES["guardian angel"]: if var.GUARDED.get(gangel) in var.ROLES["wolf"]+var.ROLES["werecrow"]: if victim == gangel: continue # already dead. r = random.random() if r < var.GUARDIAN_ANGEL_DIES_CHANCE: if var.LOG_CHAN == True: cli.send("whois", gangel) @hook("whoisuser", hookid=942) def dead_guarding(cli, server, you, nick, ident, host, dunno, realname): if nick == gangel: _angel = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, _angel, "dead_angel") decorators.unhook(HOOKS, 942) message.append(("\02{0}\02, a \02guardian angel\02, "+ "made the unfortunate mistake of guarding a wolf "+ "last night, attempted to escape, but failed "+ "and is now dead.").format(gangel)) var.LOGGER.logBare(gangel, "KILLEDWHENGUARDINGWOLF") dead.append(gangel) for burned in var.BURNED: if burned == victim: wc = var.ROLES["werecrow"] for crow in wc: if crow in var.OBSERVED.keys(): wc.remove(crow) # don't kill off crows that observed deadwolves = var.ROLES["wolf"]+wc dw = random.choice(deadwolves) dead.append(dw) message.append(("Last night, the wolves attacked \02{0}\02. "+ "However, an arsonist decided to burn the house "+ "down as well. \02{1}\02 was also found dead inside. "+ "Because of the fire, it is impossible to determine "+ "what roles they had when they were alive.").format(victim, dw)) elif burned in var.list_players(): pos_surv = var.ROLES["seer"]+var.ROLES["harlot"]+var.ROLES["wolf"]+var.ROLES["werecrow"]+var.ROLES["guardian angel"] surv_pos = var.SEEN+list(var.HVISITED)+list(var.GUARDED)+list(var.KILLS)+list(var.OBSERVED) for roleplay in pos_surv: # Esper used to have a lot of roleplayers, you know if roleplay in var.SEEN or roleplay in var.HVISITED.keys() or roleplay in var.KILLS.keys() or roleplay in var.OBSERVED.keys() or roleplay in var.GUARDED.keys(): survived_fire.append(roleplay) if not burned in survived_fire: message.append(("An \02arsonist\02 threw a molotov at \02{0}\02's house! "+ "They have burned down to ashes and it is impossible to "+ "determine the role they had when alive.").format(burned)) dead.append(burned) for burnh in var.BURNED_HOUSES: if burnh in dead or (burnh in var.BURNED and burnh not in survived_fire): continue # their house is burned down anyway message.append(("An \02arsonist\02 threw a molotov at \02{0}\02's house! "+ "However, s/he survived and will now sleep in the middle "+ "of the remainings.").format(burnh)) cli.msg(chan, "\n".join(message)) for msg in message: var.LOGGER.logMessage(msg.replace("\02", "")) for deadperson in dead: if not del_player(cli, deadperson): return if (var.WOLF_STEALS_GUN and victim in dead and victim in var.GUNNERS.keys() and var.GUNNERS[victim] > 0): # victim has bullets guntaker = random.choice(var.ROLES["wolf"] + var.ROLES["werecrow"] + var.ROLES["traitor"]) # random looter numbullets = var.GUNNERS[victim] var.WOLF_GUNNERS[guntaker] = numbullets # transfer bullets to him/her if var.LOG_CHAN == True: cli.send("whois", guntaker) @hook("whoisuser", hookid=942) def guntaker_host(cli, server, you, nick, ident, host, dunno, realname): if nick == guntaker: wolfgun = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, wolfgun, "wolf_gun") decorators.unhook(HOOKS, 942) mmsg = ("While searching {2}'s belongings, You found " + "a gun loaded with {0} silver bullet{1}! " + "You may only use it during the day. " + "If you shoot at a wolf, you will intentionally miss. " + "If you shoot a villager, it is likely that they will be injured.") if numbullets == 1: mmsg = mmsg.format(numbullets, "", victim) else: mmsg = mmsg.format(numbullets, "s", victim) pm(cli, guntaker, mmsg) var.GUNNERS[victim] = 0 # just in case if (var.WOLF_STEALS_FIRE and victim in dead and victim in var.PYROS.keys() and var.PYROS[victim] > 0): # victim has molotovs firetaker = random.choice(var.ROLES["wolf"] + var.ROLES["werecrow"] + var.ROLES["traitor"]) # random looter numfire = var.PYROS[victim] var.PYROS[firetaker] = numfire # transfer molotovs to him/her if var.LOG_CHAN == True: cli.send("whois", guntaker) @hook("whoisuser", hookid=942) def firetaker_host(cli, server, you, nick, ident, host, dunno, realname): if nick == firetaker: wolfpyro = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, wolfpyro, "wolf_fire") decorators.unhook(HOOKS, 942) nmsg = ("While searching {2}'s belongings, You found " + "{0} molotov{1}! Use \"burn nick\" in PM to "+ "burn a house down.") if numfire == 1: nmsg = nmsg.format(numfire, "", victim) else: nmsg = nmsg.format(numfire, "s", victim) pm(cli, firetaker, nmsg) var.PYROS[victim] = 0 # just in case begin_day(cli) def chk_nightdone(cli): if (len(var.SEEN) == len(var.ROLES["seer"]) and # Seers have seen. len(var.HVISITED.keys()) == len(var.ROLES["harlot"]) and # harlots have harlotted. len(var.GUARDED.keys()) == len(var.ROLES["guardian angel"]) and # guardians have guarded len(var.ROLES["werecrow"]+var.ROLES["wolf"]) == len(var.KILLS)+len(var.OBSERVED) and # wolves have wolved len(var.BURN.keys()) == len(var.PYROS) and # arsonists have arsoned var.PHASE == "night"): # check if wolves are actually agreeing if len(set(var.KILLS.values())) > 1: return for x, t in var.TIMERS.items(): t.cancel() var.TIMERS = {} if var.PHASE == "night": # Double check transition_day(cli) @cmd("nolynch", "no_lynch", "nl", "novote", raw_nick=True) def no_lynch(cli, rnick, chan, rest): """Allow someone to refrain from voting for the day""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if var.PHASE != "day": cli.notice(nick, "Lynching is only during the day. Please wait patiently for morning.") return if nick in var.WOUNDED: cli.msg(chan, "{0}: You are wounded and resting, thus you are unable to vote for the day.".format(nick)) return if nick in var.NO_LYNCH: var.NO_LYNCH.remove(nick) cli.msg(chan, "{0}: You chose to vote for today.".format(nick)) if var.LOG_CHAN == True: chan_log(cli, rnick, "voting") return candidates = var.VOTES.keys() for voter in list(candidates): if nick in var.VOTES[voter]: var.VOTES[voter].remove(nick) if not var.VOTES[voter]: del var.VOTES[voter] var.NO_LYNCH.append(nick) cli.msg(chan, "{0}: You chose to refrain from voting.".format(nick)) if var.LOG_CHAN == True: chan_log(cli, rnick, "not_lynching") chk_decision(cli) return @cmd("lynch", "vote", "l", "v", raw_nick=True) def vote(cli, rnick, chan, rest): """Use this to vote for a candidate to be lynched""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if var.PHASE != "day": cli.notice(nick, ("Lynching is only allowed during the day. "+ "Please wait patiently for morning.")) return if nick in var.WOUNDED: cli.msg(chan, ("{0}: You are wounded and resting, "+ "thus you are unable to vote for the day.").format(nick)) return pl = var.list_players() pl_l = [x.strip().lower() for x in pl] rest = re.split(" +",rest)[0].strip().lower() if not rest: cli.notice(nick, "Not enough parameters.") return matches = 0 for player in pl_l: if rest == player: target = player break if player.startswith(rest): target = player matches += 1 else: if matches != 1: pm(cli, nick, "\u0002{0}\u0002 is currently not playing.".format(rest)) return voted = pl[pl_l.index(target)] if not var.SELF_LYNCH_ALLOWED: if nick == voted: cli.notice(nick, "Please try to save yourself.") return lcandidates = list(var.VOTES.keys()) for voters in lcandidates: # remove previous vote if nick in var.VOTES[voters]: var.VOTES[voters].remove(nick) if not var.VOTES.get(voters) and voters != voted: del var.VOTES[voters] break if voted not in var.VOTES.keys(): var.VOTES[voted] = [nick] else: var.VOTES[voted].append(nick) if nick in var.NO_LYNCH: var.NO_LYNCH.remove(nick) if var.LOG_CHAN == True: chan_log(cli, rnick, "vote") cli.msg(chan, ("\u0002{0}\u0002 votes for "+ "\u0002{1}\u0002.").format(nick, voted)) var.LOGGER.logMessage("{0} votes for {1}.".format(nick, voted)) var.LOGGER.logBare(voted, "VOTED", nick) var.LAST_VOTES = None # reset chk_decision(cli) @cmd("retract", "r", raw_nick=True) def retract(cli, rnick, chan, rest): """Takes back your vote during the day (for whom to lynch)""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if var.PHASE != "day": cli.notice(nick, ("Lynching is only allowed during the day. "+ "Please wait patiently for morning.")) return candidates = var.VOTES.keys() for voter in list(candidates): if nick in var.VOTES[voter]: var.VOTES[voter].remove(nick) if not var.VOTES[voter]: del var.VOTES[voter] if var.LOG_CHAN == True: chan_log(cli, rnick, "retract") cli.msg(chan, "\u0002{0}\u0002 retracted his/her vote.".format(nick)) var.LOGGER.logBare(voter, "RETRACT", nick) var.LOGGER.logMessage("{0} retracted his/her vote.".format(nick)) var.LAST_VOTES = None # reset break else: cli.notice(nick, "You haven't voted yet.") @cmd("shoot", "sh", raw_nick=True) def shoot(cli, rnick, chan, rest): """Use this to fire off a bullet at someone in the day if you have bullets""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if var.PHASE != "day": cli.notice(nick, ("Shooting is only allowed during the day. "+ "Please wait patiently for morning.")) return if not (nick in var.GUNNERS.keys() or nick in var.WOLF_GUNNERS.keys()): pm(cli, nick, "You don't have a gun.") return elif ((nick in var.GUNNERS.keys() and not var.GUNNERS[nick]) or (nick in var.WOLF_GUNNERS.keys() and not var.WOLF_GUNNERS[nick])): pm(cli, nick, "You don't have any more bullets.") return victim = re.split(" +",rest)[0].strip().lower() if not victim: cli.notice(nick, "Not enough parameters") return pl = var.list_players() pll = [x.lower() for x in pl] matches = 0 for player in pll: if victim == player: target = player break if player.startswith(victim): target = player matches += 1 else: if matches != 1: pm(cli, nick, "\u0002{0}\u0002 is currently not playing.".format(victim)) return victim = pl[pll.index(target)] if victim == nick: cli.notice(nick, "You are holding it the wrong way.") return wolfshooter = nick in var.ROLES["wolf"]+var.ROLES["werecrow"]+var.ROLES["traitor"] if wolfshooter and nick in var.WOLF_GUNNERS: var.WOLF_GUNNERS[nick] -= 1 else: var.GUNNERS[nick] -= 1 rand = random.random() if nick in var.ROLES["village drunk"]: chances = var.DRUNK_GUN_CHANCES else: chances = var.GUN_CHANCES wolfvictim = victim in var.ROLES["wolf"]+var.ROLES["werecrow"] if rand <= chances[0] and not (wolfshooter and wolfvictim): # didn't miss or suicide # and it's not a wolf shooting another wolf if var.LOG_CHAN == True: chan_log(cli, rnick, "gun_shoot") cli.msg(chan, ("\u0002{0}\u0002 shoots \u0002{1}\u0002 with "+ "a silver bullet!").format(nick, victim)) var.LOGGER.logMessage("{0} shoots {1} with a silver bullet!".format(nick, victim)) victimrole = var.get_role(victim) if victimrole in ("wolf", "werecrow"): if var.LOG_CHAN == True: chan_log(cli, rnick, "gun_wolf") cli.msg(chan, ("\u0002{0}\u0002 is a {1}, and is dying from "+ "the silver bullet.").format(victim, var.get_role(victim))) var.LOGGER.logMessage(("{0} is a {1}, and is dying from the "+ "silver bullet.").format(victim, var.get_role(victim))) if not del_player(cli, victim): return elif random.random() <= var.MANSLAUGHTER_CHANCE: if var.LOG_CHAN == True: chan_log(cli, rnick, "gun_fatal") cli.msg(chan, ("\u0002{0}\u0002 is a not a wolf "+ "but was accidentally fatally injured.").format(victim)) cli.msg(chan, "Appears (s)he was a \u0002{0}\u0002.".format(victimrole)) var.LOGGER.logMessage("{0} is not a wolf but was accidentally fatally injured.".format(victim)) var.LOGGER.logMessage("Appears (s)he was a {0}.".format(victimrole)) if not del_player(cli, victim): return else: if var.LOG_CHAN == True: chan_log(cli, rnick, "gun_vill") cli.msg(chan, ("\u0002{0}\u0002 is a villager and is injured but "+ "will have a full recovery. S/He will be resting "+ "for the day.").format(victim)) var.LOGGER.logMessage(("{0} is a villager and is injured but "+ "will have a full recovery. S/He will be resting "+ "for the day").format(victim)) if victim not in var.WOUNDED: var.WOUNDED.append(victim) lcandidates = list(var.VOTES.keys()) for cand in lcandidates: # remove previous vote if victim in var.VOTES[cand]: var.VOTES[cand].remove(victim) if not var.VOTES.get(cand): del var.VOTES[cand] break chk_decision(cli) chk_win(cli) elif rand <= chances[0] + chances[1]: if var.LOG_CHAN == True: chan_log(cli, rnick, "gun_miss") cli.msg(chan, "\u0002{0}\u0002 is a lousy shooter. S/He missed!".format(nick)) var.LOGGER.logMessage("{0} is a lousy shooter. S/He missed!".format(nick)) else: if var.LOG_CHAN == True: chan_log(cli, rnick, "gun_death") cli.msg(chan, ("\u0002{0}\u0002 should clean his/her weapons more often. "+ "The gun exploded and killed him/her!").format(nick)) cli.msg(chan, "Appears that (s)he was a \u0002{0}\u0002.".format(var.get_role(nick))) var.LOGGER.logMessage(("{0} should clean his/her weapers more often. "+ "The gun exploded and killed him/her!").format(nick)) var.LOGGER.logMessage("Appears that (s)he was a {0}.".format(var.get_role(nick))) if not del_player(cli, nick): return # Someone won. @pmcmd("kill", raw_nick=True) def kill(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return role = var.get_role(nick) if role == "traitor": return # they do this a lot. if role not in ('wolf', 'werecrow'): pm(cli, nick, "Only a wolf may use this command.") return if var.PHASE != "night": pm(cli, nick, "You may only kill people at night.") return victim = re.split(" +",rest)[0].strip().lower() if not victim: pm(cli, nick, "Not enough parameters") return if role == "werecrow": # Check if flying to observe if var.OBSERVED.get(nick): pm(cli, nick, ("You have already transformed into a crow; therefore, "+ "you are physically unable to kill a villager.")) return pl = var.list_players() pll = [x.lower() for x in pl] matches = 0 for player in pll: if victim == player: target = player break if player.startswith(victim): target = player matches += 1 else: if matches != 1: pm(cli, nick, "\u0002{0}\u0002 is currently not playing.".format(victim)) return victim = pl[pll.index(target)] if victim == nick: pm(cli, nick, "Suicide is bad. Don't do it.") return cantkillroles = var.ROLES["wolf"]+var.ROLES["werecrow"] # The roles you cannot kill if var.CANT_KILL_TRAITOR: cantkillroles += var.ROLES["traitor"] if victim in cantkillroles: pm(cli, nick, "You may only kill villagers, not other wolves{}." .format(" or traitors" if var.CANT_KILL_TRAITOR else "")) return var.KILLS[nick] = victim if var.LOG_CHAN == True: chan_log(cli, rnick, "kill") pm(cli, nick, "You have selected \u0002{0}\u0002 to be killed.".format(victim)) var.KILLED = victim var.LOGGER.logBare(nick, "SELECT", victim) chk_nightdone(cli) @pmcmd("guard", "protect", "save", raw_nick=True) def guard(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return role = var.get_role(nick) if role != 'guardian angel': pm(cli, nick, "Only a guardian angel may use this command.") return if var.PHASE != "night": pm(cli, nick, "You may only protect people at night.") return victim = re.split(" +",rest)[0].strip().lower() if not victim: pm(cli, nick, "Not enough parameters") return if var.GUARDED.get(nick): pm(cli, nick, ("You are already protecting "+ "\u0002{0}\u0002.").format(var.GUARDED[nick])) return pl = var.list_players() pll = [x.lower() for x in pl] matches = 0 for player in pll: if victim == player: target = player break if player.startswith(victim): target = player matches += 1 else: if matches != 1: pm(cli, nick, "\u0002{0}\u0002 is currently not playing.".format(victim)) return victim = pl[pll.index(target)] if victim == nick: pm(cli, nick, "You may not guard yourself.") return var.GUARDED[nick] = victim if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=777) def guarding(cli, server, you, nick, ident, host, dunno, realname): if nick == victim: victim_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, rnick, "guarding") chan_log(cli, victim_, "guarded") decorators.unhook(HOOKS, 777) pm(cli, nick, "You are protecting \u0002{0}\u0002 tonight. Farewell!".format(var.GUARDED[nick])) pm(cli, var.GUARDED[nick], "You can sleep well tonight, for a guardian angel is protecting you.") var.LOGGER.logBare(var.GUARDED[nick], "GUARDED", nick) chk_nightdone(cli) @pmcmd("observe", raw_nick=True) def observe(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if not var.is_role(nick, "werecrow"): pm(cli, nick, "Only a werecrow may use this command.") return if var.PHASE != "night": pm(cli, nick, "You may only transform into a crow at night.") return victim = re.split(" +", rest)[0].strip().lower() if not victim: pm(cli, nick, "Not enough parameters") return pl = var.list_players() pll = [x.lower() for x in pl] matches = 0 for player in pll: if victim == player: target = player break if player.startswith(victim): target = player matches += 1 else: if matches != 1: pm(cli, nick,"\u0002{0}\u0002 is currently not playing.".format(victim)) return victim = pl[pll.index(target)] if victim == nick.lower(): pm(cli, nick, "Instead of doing that, you should probably go kill someone.") return if nick in var.OBSERVED.keys(): pm(cli, nick, "You are already flying to \02{0}\02's house.".format(var.OBSERVED[nick])) return if var.get_role(victim) in ("werecrow", "traitor", "wolf"): pm(cli, nick, "Flying to another wolf's house is a waste of time.") return var.OBSERVED[nick] = victim if nick in var.KILLS.keys(): del var.KILLS[nick] if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=824) def crow_victim_host(cli, server, you, nick, ident, host, dunno, realname): if nick == victim: _victim = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, rnick, "observe") chan_log(cli, _victim, "observed") decorators.unhook(HOOKS, 824) pm(cli, nick, ("You transform into a large crow and start your flight "+ "to \u0002{0}'s\u0002 house. You will return after "+ "collecting your observations when day begins.").format(victim)) var.LOGGER.logBare(victim, "OBSERVED", nick) @pmcmd("id", raw_nick=True) def investigate(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if not var.is_role(nick, "detective"): pm(cli, nick, "Only a detective may use this command.") return if var.PHASE != "day": pm(cli, nick, "You may only investigate people during the day.") return if nick in var.INVESTIGATED: pm(cli, nick, "You may only investigate one person per round.") return victim = re.split(" +", rest)[0].strip().lower() if not victim: pm(cli, nick, "Not enough parameters") return pl = var.list_players() pll = [x.lower() for x in pl] matches = 0 for player in pll: if victim == player: target = player break if player.startswith(victim): target = player matches += 1 else: if matches != 1: pm(cli, nick,"\u0002{0}\u0002 is currently not playing.".format(victim)) return victim = pl[pll.index(target)] var.INVESTIGATED.append(nick) if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=556) def det_vic_host(cli, server, you, nick, ident, host, dunno, realname): _check = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, rnick, "checking") chan_log(cli, _check, "checked") decorators.unhook(HOOKS, 556) pm(cli, nick, ("The results of your investigation have returned. \u0002{0}\u0002"+ " is a... \u0002{1}\u0002!").format(victim, var.get_role(victim))) var.LOGGER.logBare(victim, "INVESTIGATED", nick) if random.random() < var.DETECTIVE_REVEALED_CHANCE: # a 2/5 chance (should be changeable in settings) # Reveal his role! for badguy in var.ROLES["wolf"] + var.ROLES["werecrow"] + var.ROLES["traitor"]: if var.LOG_CHAN == True: chan_log(cli, rnick, "revealed") pm(cli, badguy, ("\u0002{0}\u0002 accidentally drops a paper. The paper reveals "+ "that (s)he is the detective!").format(nick)) var.LOGGER.logBare(nick, "PAPERDROP") @pmcmd("visit", raw_nick=True) def hvisit(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if not var.is_role(nick, "harlot"): pm(cli, nick, "Only a harlot may use this command.") return if var.PHASE != "night": pm(cli, nick, "You may only visit someone at night.") return if var.HVISITED.get(nick): pm(cli, nick, ("You are already spending the night "+ "with \u0002{0}\u0002.").format(var.HVISITED[nick])) return victim = re.split(" +",rest)[0].strip().lower() if not victim: pm(cli, nick, "Not enough parameters") return pll = [x.lower() for x in var.list_players()] matches = 0 for player in pll: if victim == player: target = player break if player.startswith(victim): target = player matches += 1 else: if matches != 1: pm(cli, nick,"\u0002{0}\u0002 is currently not playing.".format(victim)) return victim = var.list_players()[pll.index(target)] if nick == victim: # Staying home var.HVISITED[nick] = None if var.LOG_CHAN == True: chan_log(cli, rnick, "staying") pm(cli, nick, "You have chosen to stay home for the night.") else: var.HVISITED[nick] = victim if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=69) def hvisited_(cli, server, you, nick, ident, host, dunno, realname): if nick == victim: hvisit_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, rnick, "visit") chan_log(cli, hvisit_, "visited") decorators.unhook(HOOKS, 69) pm(cli, nick, ("You are spending the night with \u0002{0}\u0002. "+ "Have a good time!").format(var.HVISITED[nick])) pm(cli, var.HVISITED[nick], ("You are spending the night with \u0002{0}"+ "\u0002. Have a good time!").format(nick)) var.LOGGER.logBare(var.HVISITED[nick], "VISITED", nick) chk_nightdone(cli) def is_fake_nick(who): return not(re.search("^[a-zA-Z\\\_\]\[`]([a-zA-Z0-9\\\_\]\[`]+)?", who)) or who.lower().endswith("serv") @pmcmd("see", raw_nick=True) def see(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if not var.is_role(nick, "seer"): pm(cli, nick, "Only a seer may use this command") return if var.PHASE != "night": pm(cli, nick, "You may only have visions at night.") return if nick in var.SEEN: pm(cli, nick, "You may only have one vision per round.") return victim = re.split(" +",rest)[0].strip().lower() pl = var.list_players() pll = [x.lower() for x in pl] if not victim: pm(cli, nick, "Not enough parameters") return matches = 0 for player in pll: if victim == player: target = player break if player.startswith(victim): target = player matches += 1 else: if matches != 1: pm(cli, nick,"\u0002{0}\u0002 is currently not playing.".format(victim)) return victim = var.list_players()[pll.index(target)] if victim in var.CURSED: role = "wolf" elif var.get_role(victim) == "traitor": role = "villager" else: role = var.get_role(victim) if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=820) def seen_host(cli, server, you, nick, ident, host, dunno, realname): if nick == victim: seen = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, rnick, "see") chan_log(cli, seen, "seen") decorators.unhook(HOOKS, 820) pm(cli, nick, ("You have a vision; in this vision, "+ "you see that \u0002{0}\u0002 is a "+ "\u0002{1}\u0002!").format(victim, role)) var.SEEN.append(nick) var.LOGGER.logBare(victim, "SEEN", nick) chk_nightdone(cli) @pmcmd("burn", raw_nick=True) def burn_house(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) change_mind = False if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return elif nick not in var.list_players() or nick in var.DISCONNECTED.keys(): cli.notice(nick, "You're not currently playing.") return if nick not in var.PYROS: pm(cli, nick, "Only an arsonist may use this command") return if var.PHASE != "night": pm(cli, nick, "You may only burn houses at night.") return if nick in var.BURN and not var.BURN[nick] == None: change_mind = True victim = re.split(" +",rest)[0].strip().lower() pl = var.list_players() pll = [x.lower() for x in pl] if not victim: pm(cli, nick, "Not enough parameters") return matches = 0 for player in pll: if victim == player: target = player break if player.startswith(victim): target = player matches += 1 else: if matches != 1: pm(cli, nick,"\u0002{0}\u0002 is currently not playing.".format(victim)) return victim = pl[pll.index(target)] if victim in var.BURNED and victim in pl: pm(cli, nick, "That house is already burnt down.") return if var.LOG_CHAN == True: cli.send("whois", victim) @hook("whoisuser", hookid=820) def burned_host(cli, server, you, nick, ident, host, dunno, realname): if nick == victim: burned = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, rnick, "burn") chan_log(cli, burned, "burned") decorators.unhook(HOOKS, 820) if change_mind == False: pm(cli, nick, "You have thrown a molotov at \u0002{0}\u0002's house. You will see the results of your deeds on the morning.".format(victim)) else: pm(cli, nick, "You take back your previously thrown molotov and throw it towards \u0002{0}\u0002's house instead.".format(victim)) rand = random.random() chances = var.FIRE_CHANCES if nick in var.ROLES["village drunk"]: chances = var.DRUNK_FIRE_CHANCES if rand <= chances[0]: # molotov hit the house, it'll burn down var.BURNED.append(victim) var.BURNED_HOUSES.append(victim) elif rand <= chances[0] + chances[1]: # miss var.MOLOTOVS_MISSED.append(nick) var.BURNED_HOUSES.append(victim) else: # suicide var.BURNED.append(nick) var.BURN[nick] = victim var.LOGGER.logBare(victim, "BURN", nick) chk_nightdone(cli) @pmcmd("noburn", "no-burn", "notburn", "not-burn", raw_nick=True) def no_burning_pyro(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.PYROS.keys() and nick not in var.BURN.keys(): var.BURN[nick] = None pm(cli, nick, "You chose to keep your molotov(s) to yourself for this night.") if var.LOG_CHAN == True: chan_log(cli, rnick, "no_burn") elif nick in var.PYROS.keys() and nick in var.BURN.keys(): var.BURN[nick] = None pm(cli, nick, "You decide to get back your molotov and keep it for this night.") else: pm(cli, nick, "Only an arsonist may use this command.") @cmd("kill", "guard", "protect", "save", "visit", "see", "id", "burn") def wrong_window(cli, nick, chan, rest): if chan == botconfig.CHANNEL: if var.PHASE in ("night", "day"): cli.msg(chan, "{0}: Do you have a role? In any case, you should type that in a PM".format(nick)) @cmd("msg", raw_nick=True) def msg_through_bot(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: params = rest.split() cli.msg(params[0], " ".join(params[1:])) if var.LOG_CHAN == True: chan_log(cli, rnick, "msg_bot") @cmd("say", raw_nick=True) def say_through_bot(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: cli.msg(botconfig.CHANNEL, rest) if var.LOG_CHAN == True: chan_log(cli, rnick, "say_bot") @cmd("me", raw_nick=True) def me_through_bot(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: cli.msg(botconfig.CHANNEL, "\u0001ACTION {0}\u0001".format(rest)) if var.LOG_CHAN == True: chan_log(cli, rnick, "me_bot") @cmd("act", raw_nick=True) def act_through_bot(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: params = rest.split() cli.msg(params[0], "\u0001ACTION {0}\u0001".format(" ".join(params[1:]))) if var.LOG_CHAN == True: chan_log(cli, rnick, "act_bot") @hook("featurelist") # For multiple targets with PRIVMSG def getfeatures(cli, nick, *rest): for r in rest: if r.startswith("TARGMAX="): x = r[r.index("PRIVMSG:"):] if "," in x: l = x[x.index(":")+1:x.index(",")] else: l = x[x.index(":")+1:] l = l.strip() if not l or not l.isdigit(): continue else: var.MAX_PRIVMSG_TARGETS = int(l) break def mass_privmsg(cli, targets, msg, notice = False): while targets: if len(targets) <= var.MAX_PRIVMSG_TARGETS: bgs = ",".join(targets) targets = () else: bgs = ",".join(targets[0:var.MAX_PRIVMSG_TARGETS]) targets = targets[var.MAX_PRIVMSG_TARGETS:] if not notice: cli.msg(bgs, msg) else: cli.notice(bgs, msg) @pmcmd("") def relay(cli, nick, rest): """Let the wolves talk to each other through the bot""" if var.PHASE not in ("night", "day"): return badguys = var.ROLES["wolf"] + var.ROLES["traitor"] + var.ROLES["werecrow"] if len(badguys) > 1: if nick in badguys: badguys.remove(nick) # remove self from list if rest.startswith("\01ACTION"): rest = rest[7:-1] mass_privmsg(cli, [guy for guy in badguys if (guy in var.PLAYERS and var.PLAYERS[guy]["cloak"] not in var.SIMPLE_NOTIFY)], nick+rest) mass_privmsg(cli, [guy for guy in badguys if (guy in var.PLAYERS and var.PLAYERS[guy]["cloak"] in var.SIMPLE_NOTIFY)], nick+rest, True) else: mass_privmsg(cli, [guy for guy in badguys if (guy in var.PLAYERS and var.PLAYERS[guy]["cloak"] not in var.SIMPLE_NOTIFY)], "\02{0}\02 says: {1}".format(nick, rest)) mass_privmsg(cli, [guy for guy in badguys if (guy in var.PLAYERS and var.PLAYERS[guy]["cloak"] in var.SIMPLE_NOTIFY)], "\02{0}\02 says: {1}".format(nick, rest), True) @pmcmd("tellchan", raw_nick=True) def chan_tell(cli, rnick, rest): """Allow wolves to send a message to the channel""" nick, mode, user, host = parse_nick(rnick) if var.PHASE not in ("night", "day"): return chan = botconfig.CHANNEL wolf = var.ROLES["wolf"] traitor = var.ROLES["traitor"] crow = var.ROLES["werecrow"] if nick not in wolf and nick not in traitor and nick not in crow: pm(cli, nick, "Only a wolf can do that") return if rest.startswith("/me "): rest = rest.replace("/me ", "") if nick in wolf: if var.LOG_CHAN == True: chan_log(cli, rnick, "wolf_tell") if len(wolf) > 1: cli.msg(chan, ("* \02A wolf\02 {0}".format(rest))) elif len(wolf) == 1: cli.msg(chan, ("* \02The wolf\02 {0}".format(rest))) return if nick in traitor: if var.LOG_CHAN == True: chan_log(cli, rnick, "traitor_tell") if len(traitor) > 1: cli.msg(chan, ("* \02A traitor\02 {0}".format(rest))) elif len(traitor) == 1: cli.msg(chan, ("* \02The traitor\02 {0}".format(rest))) return if nick in crow: if var.LOG_CHAN == True: chan_log(cli, rnick, "crow_tell") if len(crow) > 1: cli.msg(chan, ("* \02A werecrow\02 {0}".format(rest))) elif len(crow) == 1: cli.msg(chan, ("* \02The werecrow\02 {0}".format(rest))) return else: if nick in wolf: if var.LOG_CHAN == True: chan_log(cli, rnick, "wolf_tell") if len(wolf) > 1: cli.msg(chan, ("\02A wolf\02 says: {0}".format(rest))) elif len(wolf) == 1: cli.msg(chan, ("\02The wolf\02 says: {0}".format(rest))) return if nick in traitor: if var.LOG_CHAN == True: chan_log(cli, rnick, "traitor_tell") if len(traitor) > 1: cli.msg(chan, ("\02A traitor\02 says: {0}".format(rest))) elif len(traitor) == 1: cli.msg(chan, ("\02The traitor\02 says: {0}".format(rest))) return if nick in crow: if var.LOG_CHAN == True: chan_log(cli, rnick, "crow_tell") if len(crow) > 1: cli.msg(chan, ("\02A werecrow\02 says: {0}".format(rest))) elif len(crow) == 1: cli.msg(chan, ("\02The werecrow\02 says: {0}".format(rest))) return def transition_night(cli): if var.PHASE == "night": return var.PHASE = "night" for x, tmr in var.TIMERS.items(): # cancel daytime timer tmr.cancel() var.TIMERS = {} # Reset nighttime variables var.KILLS = {} var.GUARDED = {} # key = by whom, value = the person that is visited var.KILLER = "" # nickname of who chose the victim var.SEEN = [] # list of seers that have had visions var.OBSERVED = {} # those whom werecrows have observed var.HVISITED = {} var.BURN = {} var.MOLOTOVS_MISSED = [] var.NIGHT_START_TIME = datetime.now() daydur_msg = "" if var.NIGHT_TIMEDELTA or var.START_WITH_DAY: # transition from day td = var.NIGHT_START_TIME - var.DAY_START_TIME var.DAY_START_TIME = None var.DAY_TIMEDELTA += td min, sec = td.seconds // 60, td.seconds % 60 daydur_msg = "Day lasted \u0002{0:0>2}:{1:0>2}\u0002. ".format(min,sec) chan = botconfig.CHANNEL if var.NIGHT_TIME_LIMIT > 0: var.NIGHT_ID = time.time() t = threading.Timer(var.NIGHT_TIME_LIMIT, transition_day, [cli, var.NIGHT_ID]) var.TIMERS["night"] = t var.TIMERS["night"].daemon = True t.start() if var.NIGHT_TIME_WARN > 0: t2 = threading.Timer(var.NIGHT_TIME_WARN, night_warn, [cli, var.NIGHT_ID]) var.TIMERS["night_warn"] = t2 var.TIMERS["night_warn"].daemon = True t2.start() # send PMs ps = var.list_players() wolves = var.ROLES["wolf"]+var.ROLES["traitor"]+var.ROLES["werecrow"] for wolf in wolves: normal_notify = wolf in var.PLAYERS and var.PLAYERS[wolf]["cloak"] not in var.SIMPLE_NOTIFY if normal_notify: if wolf in var.ROLES["wolf"]: if var.LOG_CHAN == True: cli.send("whois", wolf) @hook("whoisuser", hookid=646) def call_wolf(cli, server, you, nick, ident, host, dunno, realname): if nick == wolf: # prevent multiple entries in logging (/whois result will only occur for wolf) wolfy = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, wolfy, "call_wolf") pm(cli, wolf, ('You are a \u0002wolf\u0002. It is your job to kill all the '+ 'villagers. Use "kill <nick>" to kill a villager.')) elif wolf in var.ROLES["traitor"]: if var.LOG_CHAN == True: cli.send("whois", wolf) @hook("whoisuser", hookid=646) def call_traitor(cli, server, you, nick, ident, host, dunno, realname): if nick == wolf: traitor = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, traitor, "call_traitor") pm(cli, wolf, ('You are a \u0002traitor\u0002. You are exactly like a '+ 'villager and not even a seer can see your true identity. '+ 'Only detectives can. ')) else: if var.LOG_CHAN == True: cli.send("whois", wolf) @hook("whoisuser", hookid=646) def call_crow(cli, server, you, nick, ident, host, dunno, realname): if nick == wolf: werecrow = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, werecrow, "call_crow") pm(cli, wolf, ('You are a \u0002werecrow\u0002. You are able to fly at night. '+ 'Use "kill <nick>" to kill a a villager. Alternatively, you can '+ 'use "observe <nick>" to check if someone is in bed or not. '+ 'Observing will prevent you from participating in a killing.')) pm(cli, wolf, 'You can use "tellchan message" to send a message to the channel, anonymously') if len(wolves) > 1: pm(cli, wolf, 'Also, if you PM me, your message will be relayed to other wolves.') else: if var.LOG_CHAN == True: cli.who(botconfig.CHANNEL, "%nuchaf") @hook("whospcrpl", hookid=646) def simple_call(cli, server, you, chan, ident, host, nick, status, account): if nick == wolf: woffle = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, woffle, "simple_{0}".format(var.get_role(wolf))) pm(cli, wolf, "You are a \02{0}\02.".format(var.get_role(wolf))) # !simple pl = ps[:] pl.sort(key=lambda x: x.lower()) pl.remove(wolf) # remove self from list for i, player in enumerate(pl): if player in var.ROLES["wolf"]: pl[i] = player + " (wolf)" elif player in var.ROLES["traitor"]: pl[i] = player + " (traitor)" elif player in var.ROLES["werecrow"]: pl[i] = player + " (werecrow)" pm(cli, wolf, "\u0002Players:\u0002 "+", ".join(pl)) for seer in var.ROLES["seer"]: pl = ps[:] pl.sort(key=lambda x: x.lower()) pl.remove(seer) # remove self from list if seer in var.PLAYERS and var.PLAYERS[seer]["cloak"] not in var.SIMPLE_NOTIFY: len_seer = len(seer) if var.LOG_CHAN == True: cli.send("whois", seer) @hook("whoisuser", hookid=646) def call_seer(cli, server, you, nick, ident, host, dunno, realname): if nick == seer: seer_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, seer_, "call_seer") pm(cli, seer, ('You are a \u0002seer\u0002. '+ 'It is your job to detect the wolves, you '+ 'may have a vision once per night. '+ 'Use "see <nick>" to see the role of a player.')) else: if var.LOG_CHAN == True: cli.send("whois", seer) @hook("whoisuser", hookid=646) def simple_seer(cli, server, you, nick, ident, host, dunno, realname): if nick == seer: seers = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, seers, "simple_seer") pm(cli, seer, "You are a \02seer\02.") # !simple pm(cli, seer, "Players: "+", ".join(pl)) for harlot in var.ROLES["harlot"]: pl = ps[:] pl.sort(key=lambda x: x.lower()) pl.remove(harlot) if harlot in var.PLAYERS and var.PLAYERS[harlot]["cloak"] not in var.SIMPLE_NOTIFY: if var.LOG_CHAN == True: cli.send("whois", harlot) @hook("whoisuser", hookid=646) def call_harlot(cli, server, you, nick, ident, host, dunno, realname): if nick == harlot: harl = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, harl, "call_harlot") cli.msg(harlot, ('You are a \u0002harlot\u0002. '+ 'You may spend the night with one person per round. '+ 'If you visit a victim of a wolf, or visit a wolf, '+ 'you will die. Use "visit <nick>" to visit a player. '+ 'Visiting yourself makes you stay home.')) else: if var.LOG_CHAN == True: cli.send("whois", harlot) @hook("whoisuser", hookid=646) def simple_harlot(cli, server, you, nick, ident, host, dunno, realname): if nick == harlot: harl = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, harl, "simple_harlot") cli.notice(harlot, "You are a \02harlot\02.") # !simple pm(cli, harlot, "Players: "+", ".join(pl)) for g_angel in var.ROLES["guardian angel"]: pl = ps[:] pl.sort(key=lambda x: x.lower()) pl.remove(g_angel) if g_angel in var.PLAYERS and var.PLAYERS[g_angel]["cloak"] not in var.SIMPLE_NOTIFY: if var.LOG_CHAN == True: cli.send("whois", g_angel) @hook("whoisuser", hookid=646) def call_angel(cli, server, you, nick, ident, host, dunno, realname): if nick == g_angel: _gangel = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, _gangel, "call_angel") cli.msg(g_angel, ('You are a \u0002guardian angel\u0002. '+ 'It is your job to protect the villagers. If you guard a'+ ' wolf, there is a 50/50 chance of you dying, if you guard '+ 'a victim, they will live. Use "guard <nick>" to guard a player.')) else: if var.LOG_CHAN == True: cli.send("whois", g_angel) @hook("whoisuser", hookid=646) def simple_angel(cli, server, you, nick, ident, host, dunno, realname): if nick == g_angel: s_angel = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, s_angel, "simple_angel") cli.notice(g_angel, "You are a \02guardian angel\02.") # !simple pm(cli, g_angel, "Players: " + ", ".join(pl)) for dttv in var.ROLES["detective"]: pl = ps[:] pl.sort(key=lambda x: x.lower()) pl.remove(dttv) if dttv in var.PLAYERS and var.PLAYERS[dttv]["cloak"] not in var.SIMPLE_NOTIFY: if var.LOG_CHAN == True: cli.send("whois", dttv) @hook("whoisuser", hookid=646) def call_det(cli, server, you, nick, ident, host, dunno, realname): if nick == dttv: det_ = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, det_, "call_det") cli.msg(dttv, ("You are a \u0002detective\u0002.\n"+ "It is your job to determine all the wolves and traitors. "+ "Your job is during the day, and you can see the true "+ "identity of all users, even traitors.\n"+ "But, each time you use your ability, you risk a 2/5 "+ "chance of having your identity revealed to the wolves. So be "+ "careful. Use \"id nick\" to identify any player during the day.")) else: if var.LOG_CHAN == True: cli.send("whois", dttv) @hook("whoisuser", hookid=646) def simple_det(cli, server, you, nick, ident, host, dunno, realname): if nick == dttv: s_det = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, s_det, "simple_det") cli.notice(dttv, "You are a \02detective\02.") # !simple pm(cli, dttv, "Players: " + ", ".join(pl)) for d in var.ROLES["village drunk"]: if var.FIRST_NIGHT: if var.LOG_CHAN == True: cli.send("whois", d) @hook("whoisuser", hookid=646) def call_drunk(cli, server, you, nick, ident, host, dunno, realname): if nick == d: drunk = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, drunk, "call_drunk") pm(cli, d, 'You have been drinking too much! You are the \u0002village drunk\u0002.') for g in tuple(var.GUNNERS.keys()): if g not in ps: continue elif not var.GUNNERS[g]: continue norm_notify = g in var.PLAYERS and var.PLAYERS[g]["cloak"] not in var.SIMPLE_NOTIFY if norm_notify: if var.LOG_CHAN == True: cli.send("whois", g) @hook("whoisuser", hookid=646) def call_gunner(cli, server, you, nick, ident, host, dunno, realname): if nick == g: gunner = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, gunner, "call_gunner") gun_msg = ("You hold a gun that shoots special silver bullets. You may only use it "+ "during the day. If you shoot a wolf, (s)he will die instantly, but if you "+ "shoot a villager, that villager will likely survive. Use '"+botconfig.CMD_CHAR+ "shoot <nick>' in channel during the day to shoot a player. You get {0}.") else: if var.LOG_CHAN == True: cli.send("whois", g) @hook("whoisuser", hookid=646) def simple_gunner(cli, server, you, nick, ident, host, dunno, realname): if nick == g: gun_s = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, gun_s, "simple_gunner") gun_msg = ("You have a \02gun\02 with {0}.") if var.GUNNERS[g] == 1: gun_msg = gun_msg.format("1 bullet") elif var.GUNNERS[g] > 1: gun_msg = gun_msg.format(str(var.GUNNERS[g]) + " bullets") else: continue pm(cli, g, gun_msg) for pyro in tuple(var.PYROS.keys()): if pyro not in ps: continue elif not var.PYROS[pyro]: continue norm_notify = pyro in var.PLAYERS and var.PLAYERS[pyro]["cloak"] not in var.SIMPLE_NOTIFY if norm_notify: if var.LOG_CHAN == True: cli.send("whois", pyro) @hook("whoisuser", hookid=646) def call_gunner(cli, server, you, nick, ident, host, dunno, realname): if nick == pyro: arse = "{0}!{1}@{2}".format(nick, ident, host) # hehehe, he's an arse chan_log(cli, arse, "call_arsonist") pyromsg = ("You have \02molotovs\02 that you can throw at people's houses "+ "during the night. Use \"burn <nick>\" to throw a molotov. "+ "If you burn the house of someone who is not there, they will "+ "survive and you will not be able to burn down their house again. "+ "Anyone inside the house (harlot, wolves or resident) will die. "+ "Use \"no-burn\" to not use any molotov. You get {0}.") else: if var.LOG_CHAN == True: cli.send("whois", g) @hook("whoisuser", hookid=646) def simple_gunner(cli, server, you, nick, ident, host, dunno, realname): if nick == g: gun_s = "{0}!{1}@{2}".format(nick, ident, host) chan_log(cli, gun_s, "simple_gunner") pyromsg = ("You have \02{0}\02.") if var.PYROS[pyro] == 1: pyromsg = pyromsg.format("1 molotov") elif var.PYROS[pyro] > 1: pyromsg = pyromsg.format(str(var.PYROS[pyro]) + " molotovs") else: continue pm(cli, pyro, pyromsg) pl = ps[:] pl.sort(key=lambda x: x.lower()) pl.remove(pyro) pm(cli, pyro, "Players: " + ", ".join(pl)) if var.LOG_CHAN == True: chan_log(cli, var.FULL_ADDRESS, "night_begin") dmsg = (daydur_msg + "It is now nighttime. All players "+ "check for PMs from me for instructions. "+ "If you did not receive one, simply sit back, "+ "relax, and wait patiently for morning.") cli.msg(chan, dmsg) var.LOGGER.logMessage(dmsg.replace("\02", "")) var.LOGGER.logBare("NIGHT", "BEGIN") # cli.msg(chan, "DEBUG: "+str(var.ROLES)) if not var.ROLES["wolf"]: # Probably something interesting going on. chk_nightdone(cli) chk_traitor(cli) if var.LOG_CHAN == True: decorators.unhook(HOOKS, 646) def cgamemode(cli, *args): chan = botconfig.CHANNEL if var.ORIGINAL_SETTINGS: # needs reset reset_settings() for arg in args: modeargs = arg.split("=", 1) if len(modeargs) < 2: # no equal sign in the middle of the arg cli.msg(botconfig.CHANNEL, "Invalid syntax.") return False modeargs[0] = modeargs[0].strip() if modeargs[0] in var.GAME_MODES.keys(): md = modeargs.pop(0) modeargs[0] = modeargs[0].strip() try: gm = var.GAME_MODES[md](modeargs[0]) for attr in dir(gm): val = getattr(gm, attr) if (hasattr(var, attr) and not callable(val) and not attr.startswith("_")): var.ORIGINAL_SETTINGS[attr] = getattr(var, attr) setattr(var, attr, val) return True except var.InvalidModeException as e: cli.msg(botconfig.CHANNEL, "Invalid mode: "+str(e)) return False else: cli.msg(chan, "Mode \u0002{0}\u0002 not found.".format(modeargs[0])) @cmd("", raw_nick=True) def outside_ping(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if var.EXT_PING.lower() in rest.lower() and var.EXT_PING != "" and nick.lower() != var.EXT_PING.lower(): cli.msg(botconfig.SPECIAL_CHAN, "{3}: {0} pinged you in {1} : {2}".format(nick, chan, rest, var.EXT_PING)) chan_log(cli, rnick, "external_ping") @cmd("start", "st", "go", raw_nick=True) def start(cli, rnick, chan, rest): """Starts a game of Werewolf""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: villagers = var.list_players() pl = villagers[:] if var.PHASE == "none": cli.notice(nick, "No game is currently running.") return if var.PHASE != "join": cli.notice(nick, "Werewolf is already in play.") return if nick not in villagers and nick != chan: cli.notice(nick, "You're currently not playing.") return now = datetime.now() var.GAME_START_TIME = now # Only used for the idler checker dur = int((var.CAN_START_TIME - now).total_seconds()) if dur > 0: cli.msg(chan, "Please wait at least {0} more seconds.".format(dur)) return if len(villagers) < var.MIN_PLAYERS: cli.msg(chan, "{0}: \u0002{1}\u0002 or more players are required to play.".format(nick, var.MIN_PLAYERS)) return for pcount in range(len(villagers), 3, -1): addroles = var.ROLES_GUIDE.get(pcount) if addroles: break if var.ORIGINAL_SETTINGS: # Custom settings while True: wvs = (addroles[var.INDEX_OF_ROLE["wolf"]] + addroles[var.INDEX_OF_ROLE["traitor"]]) if len(villagers) < (sum(addroles) - addroles[var.INDEX_OF_ROLE["gunner"]] - addroles[var.INDEX_OF_ROLE["cursed villager"]]): cli.msg(chan, "There are too few players in the "+ "game to use the custom roles.") elif not wvs: cli.msg(chan, "There has to be at least one wolf!") elif wvs > (len(villagers) / 2): cli.msg(chan, "Too many wolves.") else: break reset_settings() cli.msg(chan, "The default settings have been restored. Please !start again.") var.PHASE = "join" return if var.ADMIN_TO_PING: if "join" in COMMANDS.keys(): COMMANDS["join"] = [lambda *spam: cli.msg(chan, "This command has been disabled by an admin.")] if "start" in COMMANDS.keys(): COMMANDS["start"] = [lambda *spam: cli.msg(chan, "This command has been disabled by an admin.")] var.ROLES = {} var.CURSED = [] var.GUNNERS = {} var.WOLF_GUNNERS = {} var.PYROS = {} var.BURNED = [] # must be initialized here villager_roles = ("gunner", "cursed villager", "arsonist") for i, count in enumerate(addroles): role = var.ROLE_INDICES[i] if role in villager_roles: var.ROLES[role] = [None] * count continue # We deal with those later, see below selected = random.sample(villagers, count) var.ROLES[role] = selected for x in selected: villagers.remove(x) # Now for the villager roles # Select cursed (just a villager) if var.ROLES["cursed villager"]: possiblecursed = pl[:] for cannotbe in (var.ROLES["wolf"] + var.ROLES["werecrow"] + var.ROLES["seer"] + var.ROLES["village drunk"]): # traitor can be cursed possiblecursed.remove(cannotbe) var.CURSED = random.sample(possiblecursed, len(var.ROLES["cursed villager"])) del var.ROLES["cursed villager"] # Select gunner (also a villager) if var.ROLES["gunner"]: possible = pl[:] for cannotbe in (var.ROLES["wolf"] + var.ROLES["werecrow"] + var.ROLES["traitor"]): possible.remove(cannotbe) for csd in var.CURSED: # cursed cannot be gunner if csd in possible: possible.remove(csd) for gnr in random.sample(possible, len(var.ROLES["gunner"])): if gnr in var.ROLES["village drunk"]: var.GUNNERS[gnr] = (var.DRUNK_SHOTS_MULTIPLIER * math.ceil(var.SHOTS_MULTIPLIER * len(pl))) else: var.GUNNERS[gnr] = math.ceil(var.SHOTS_MULTIPLIER * len(pl)) del var.ROLES["gunner"] if var.ROLES["arsonist"]: possiblepyro = pl[:] for notpyro in var.GUNNERS: # anyone but gunner can be pyro possiblepyro.remove(notpyro) # I had a lot of fun typing pyro over and over (not) for drp in random.sample(possiblepyro, len(var.ROLES["arsonist"])): if drp in var.ROLES["village drunk"]: var.PYROS[drp] = var.DRUNK_FIRE_MULTIPLIER * math.ceil(var.MOLOTOV_AMOUNT * len(pl)) else: var.PYROS[drp] = math.ceil(var.MOLOTOV_AMOUNT * len(pl)) del var.ROLES["arsonist"] var.SPECIAL_ROLES["goat herder"] = [] if var.GOAT_HERDER: var.SPECIAL_ROLES["goat herder"] = [ nick ] var.ROLES["villager"] = villagers if var.LOG_CHAN == True and var.GOT_IT != True: chan_log(cli, rnick, "game_start") var.GOT_IT = False if var.LOG_CHAN == True and var.LOG_AUTO_TOGGLE == True and len(pl) >= var.MIN_LOG_PLAYERS: cli.msg(chan, "There are more than \u0002{0}\u0002 players. Logging was automatically disabled to reduce lag.".format(var.MIN_LOG_PLAYERS)) cli.msg(botconfig.ADMIN_CHAN, "Logging is now \u0002off\u0002 to reduce lag.") var.LOG_CHAN = False var.AUTO_LOG_TOGGLED = True cli.msg(chan, ("{0}: Welcome to Werewolf, the popular detective/social party "+ "game (a theme of Mafia).").format(", ".join(pl))) cli.mode(chan, "+m") var.ORIGINAL_ROLES = copy.deepcopy(var.ROLES) # Make a copy var.DAY_TIMEDELTA = timedelta(0) var.NIGHT_TIMEDELTA = timedelta(0) var.DAY_START_TIME = None var.NIGHT_START_TIME = None var.LAST_PING = None var.LOGGER.log("Game Start") var.LOGGER.logBare("GAME", "BEGIN", nick) var.LOGGER.logBare(str(len(pl)), "PLAYERCOUNT") var.LOGGER.log("***") var.LOGGER.log("ROLES: ") for rol in var.ROLES: r = [] for rw in var.plural(rol).split(" "): rwu = rw[0].upper() if len(rw) > 1: rwu += rw[1:] r.append(rwu) r = " ".join(r) var.LOGGER.log("{0}: {1}".format(r, ", ".join(var.ROLES[rol]))) for plr in var.ROLES[rol]: var.LOGGER.logBare(plr, "ROLE", rol) if var.CURSED: var.LOGGER.log("Cursed Villagers: "+", ".join(var.CURSED)) for plr in var.CURSED: var.LOGGER.logBare(plr+" ROLE cursed villager") if var.GUNNERS: var.LOGGER.log("Villagers With Bullets: "+", ".join([x+"("+str(y)+")" for x,y in var.GUNNERS.items()])) for plr in var.GUNNERS: var.LOGGER.logBare(plr, "ROLE gunner") if var.PYROS: var.LOGGER.log("Players With Molotovs: "+", ".join([x+"("+str(y)+")" for x,y in var.PYROS.items()])) for plr in var.PYROS: var.LOGGER.logBare(plr, "ROLE arsonist") var.LOGGER.log("***") var.PLAYERS = {plr:dict(var.USERS[plr]) for plr in pl if plr in var.USERS} if not var.START_WITH_DAY: var.FIRST_NIGHT = True transition_night(cli) else: transition_day(cli) for cloak in var.illegal_joins: if var.illegal_joins[cloak] != 0: var.illegal_joins[cloak] -= 1 # DEATH TO IDLERS! reapertimer = threading.Thread(None, reaper, args=(cli,var.GAME_ID)) reapertimer.daemon = True reapertimer.start() @pmcmd("fstasis", raw_nick=True) def fstasis(cli, rnick, *rest): nick, mode, user, host = parse_nick(rnick) data = rest[0].split() if len(data) == 2: if data[0] in var.USERS: cloak = var.USERS[str(data[0])]['cloak'] else: cloak = None amt = data[1] if cloak is not None: var.illegal_joins[cloak] = int(amt) cli.msg(nick, "{0} is now in stasis for {1} games".format(data[0], amt)) else: cli.msg(nick, "Sorry, that user has a None cloak") else: cli.msg(nick, "current illegal joins: " + str(var.illegal_joins)) @cmd("wait", "w", raw_nick=True) def wait(cli, rnick, chan, rest): """Increase the wait time (before !start can be used)""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: pl = var.list_players() if var.PHASE == "none": cli.notice(nick, "No game is currently running.") return if var.PHASE != "join": cli.notice(nick, "Werewolf is already in play.") return if nick not in pl: cli.notice(nick, "You're currently not playing.") return if var.WAITED >= var.MAXIMUM_WAITED: cli.msg(chan, "Limit has already been reached for extending the wait time.") return now = datetime.now() if now > var.CAN_START_TIME: var.CAN_START_TIME = now + timedelta(seconds=var.EXTRA_WAIT) else: var.CAN_START_TIME += timedelta(seconds=var.EXTRA_WAIT) var.WAITED += 1 if var.LOG_CHAN == True: chan_log(cli, rnick, "wait") cli.msg(chan, ("\u0002{0}\u0002 increased the wait time by "+ "{1} seconds.").format(nick, var.EXTRA_WAIT)) @cmd("roles") def listroles(cli, nick, chan, rest): """Display which roles are enabled and when""" old = var.ROLES_GUIDE.get(None) txt = "" pl = len(var.list_players()) + len(var.DEAD) if pl > 0: txt += '{0}: There are \u0002{1}\u0002 playing. '.format(nick, pl) for i,v in sorted({i:var.ROLES_GUIDE[i] for i in var.ROLES_GUIDE if i is not None}.items()): if old == v: continue; # nothing new here txt += "{1}[{0}] ".format(str(i), BOLD if i <= pl else "") for index, amt in enumerate(v): if amt - old[index] != 0: if amt > 1: txt = txt + var.ROLE_INDICES[index] + "({0}), ".format(amt) else: txt = txt + var.ROLE_INDICES[index] + ", " txt = txt[:-2] + (BOLD if i <= pl else "") + " " old = v if chan == nick: pm(cli, nick, txt) else: cli.msg(chan, txt) @cmd('gamestats', 'gstats', raw_nick=True) def game_stats(cli, rnick, chan, rest): """Gets the game stats for a given game size or lists game totals for all game sizes if no game size is given.""" nick, mode, user, host = parse_nick(rnick) if (chan != nick and var.LAST_GSTATS and var.GSTATS_RATE_LIMIT and var.LAST_GSTATS + timedelta(seconds=var.GSTATS_RATE_LIMIT) > datetime.now()): cli.notice(nick, ('This command is rate-limited. Please wait a while ' 'before using it again.')) return if chan != nick: var.LAST_GSTATS = datetime.now() if var.PHASE not in ('none', 'join'): cli.notice(nick, 'Wait until the game is over to view stats.') return # List all games sizes and totals if no size is given if not rest: if chan == nick: pm(cli, nick, var.get_game_totals()) if var.LOG_CHAN == True: chan_log(cli, rnick, "game_stats_pm") else: cli.msg(chan, var.get_game_totals()) if var.LOG_CHAN == True: chan_log(cli, rnick, "game_stats") return # Check for invalid input rest = rest.strip() if not rest.isdigit() or int(rest) > var.MAX_PLAYERS or int(rest) < var.MIN_PLAYERS: cli.notice(nick, ('Please enter an integer between {} and ' '{}.').format(var.MIN_PLAYERS, var.MAX_PLAYERS)) return # Attempt to find game stats for the given game size if chan == nick: pm(cli, nick, var.get_game_stats(int(rest))) else: cli.msg(chan, var.get_game_stats(int(rest))) @pmcmd('gamestats', 'gstats', raw_nick=True) def game_stats_pm(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) game_stats(cli, nick, rnick, rest) @cmd('playerstats', 'pstats', 'player', raw_nick=True) def player_stats(cli, rnick, chan, rest): """Gets the stats for the given player and role or a list of role totals if no role is given.""" nick, mode, user, host = parse_nick(rnick) if (chan != nick and var.LAST_PSTATS and var.PSTATS_RATE_LIMIT and var.LAST_PSTATS + timedelta(seconds=var.PSTATS_RATE_LIMIT) > datetime.now()): cli.notice(nick, ('This command is rate-limited. Please wait a while ' 'before using it again.')) return if chan != nick: var.LAST_PSTATS = datetime.now() if var.PHASE not in ('none', 'join'): cli.notice(nick, 'Wait until the game is over to view stats.') return params = rest.split() # Check if we have enough parameters if params: user = params[0] else: user = nick # Find the player's account if possible acc = False for us in var.USERS: if us.lower() == user.lower(): acc = var.USERS[us]['account'] if acc == '*': if us.lower() == nick.lower(): cli.notice(nick, 'You are not identified with NickServ.') else: cli.notice(nick, user + ' is not identified with NickServ.') return break # so we don't spend time going through players when we already found the account if not acc: acc = user # List the player's total games for all roles if no role is given if len(params) < 2: if chan == nick: pm(cli, nick, var.get_player_totals(acc)) if var.LOG_CHAN == True: chan_log(cli, rnick, "player_stats_all_pm") else: cli.msg(chan, var.get_player_totals(acc)) if var.LOG_CHAN == True: chan_log(cli, rnick, "player_stats_all") else: role = ' '.join(params[1:]) # Attempt to find the player's stats if chan == nick: pm(cli, nick, var.get_player_stats(acc, role)) if var.LOG_CHAN == True: chan_log(cli, rnick, "player_stats_{0}_pm".format(role)) else: cli.msg(chan, var.get_player_stats(acc, role)) if var.LOG_CHAN == True: chan_log(cli, rnick, "player_stats_{0}".format(role)) @pmcmd('playerstats', 'pstats', 'player', raw_nick=True) def player_stats_pm(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) player_stats(cli, nick, rnick, rest) @cmd("time", raw_nick=True) def timeleft(cli, rnick, chan, rest): """Returns the time left until the next day/night transition.""" nick, mode, user, host = parse_nick(rnick) if var.PHASE not in ("day", "night"): cli.notice(nick, "No game is currently running.") return if (chan != nick and var.LAST_TIME and var.LAST_TIME + timedelta(seconds=var.TIME_RATE_LIMIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. Please wait a while " "before using it again.")) return if chan != nick: var.LAST_TIME = datetime.now() if var.PHASE == "day": if var.STARTED_DAY_PLAYERS <= var.SHORT_DAY_PLAYERS: remaining = int((var.SHORT_DAY_LIMIT_WARN + var.SHORT_DAY_LIMIT_CHANGE) - (datetime.now() - var.DAY_START_TIME).total_seconds()) else: remaining = int((var.DAY_TIME_LIMIT_WARN + var.DAY_TIME_LIMIT_CHANGE) - (datetime.now() - var.DAY_START_TIME).total_seconds()) else: remaining = int(var.NIGHT_TIME_LIMIT - (datetime.now() - var.NIGHT_START_TIME).total_seconds()) #Check if timers are actually enabled if (var.PHASE == "day") and ((var.STARTED_DAY_PLAYERS <= var.SHORT_DAY_PLAYERS and var.SHORT_DAY_LIMIT_WARN == 0) or (var.DAY_TIME_LIMIT_WARN == 0 and var.STARTED_DAY_PLAYERS > var.SHORT_DAY_PLAYERS)): msg = "Day timers are currently disabled." elif var.PHASE == "night" and var.NIGHT_TIME_LIMIT == 0: msg = "Night timers are currently disabled." else: msg = "There is \x02{0[0]:0>2}:{0[1]:0>2}\x02 remaining until {1}.".format( divmod(remaining, 60), "sunrise" if var.PHASE == "night" else "sunset") if nick == chan: pm(cli, nick, msg) if var.LOG_CHAN == True: chan_log(cli, rnick, "time_pm") else: cli.msg(chan, msg) if var.LOG_CHAN == True: chan_log(cli, rnick, "time") @pmcmd("time", raw_nick=True) def timeleft_pm(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) timeleft(cli, nick, rnick, rest) @pmcmd("roles") def listroles_pm(cli, nick, rest): listroles(cli, nick, nick, rest) @cmd("myrole", raw_nick=True) def myrole(cli, rnick, chan, rest): """Reminds you of which role you have.""" nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("none", "join"): cli.notice(nick, "No game is currently running.") return ps = var.list_players() if nick not in ps: cli.notice(nick, "You're currently not playing.") return pm(cli, nick, "You are a \02{0}\02.".format(var.get_role(nick))) if var.LOG_CHAN == True: chan_log(cli, rnick, "role") # Check for gun/bullets if nick in var.GUNNERS and var.GUNNERS[nick]: if var.GUNNERS[nick] == 1: pm(cli, nick, "You have a \02gun\02 with {0} {1}.".format(var.GUNNERS[nick], "bullet")) else: pm(cli, nick, "You have a \02gun\02 with {0} {1}.".format(var.GUNNERS[nick], "bullets")) elif nick in var.WOLF_GUNNERS and var.WOLF_GUNNERS[nick]: if var.WOLF_GUNNERS[nick] == 1: pm(cli, nick, "You have a \02gun\02 with {0} {1}.".format(var.WOLF_GUNNERS[nick], "bullet")) else: pm(cli, nick, "You have a \02gun\02 with {0} {1}.".format(var.WOLF_GUNNERS[nick], "bullets")) elif nick in var.PYROS and var.PYROS[nick]: if var.PYROS[nick] == 1: pm(cli, nick, "You have {0} {1}.".format(var.PYROS[nick], "molotov")) else: pm(cli, nick, "You have {0} {1}.".format(var.PYROS[nick], "molotovs")) @pmcmd("myrole", raw_nick=True) def myrole_pm(cli, rnick, rest): myrole(cli, rnick, "", rest) @cmd("fwait", raw_nick=True) def fwait(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: pl = var.list_players() if var.PHASE == "none": cli.notice(nick, "No game is currently running.") return if var.PHASE != "join": cli.notice(nick, "Werewolf is already in play.") return rest = re.split(" +", rest.strip(), 1)[0] if rest and rest.isdigit(): if len(rest) < 4: extra = int(rest) else: cli.msg(botconfig.CHANNEL, "{0}: We don't have all day!".format(nick)) return else: extra = var.EXTRA_WAIT now = datetime.now() if now > var.CAN_START_TIME: var.CAN_START_TIME = now + timedelta(seconds=extra) else: var.CAN_START_TIME += timedelta(seconds=extra) var.WAITED += 1 if var.LOG_CHAN == True: chan_log(cli, rnick, "forced_wait") cli.msg(botconfig.CHANNEL, ("\u0002{0}\u0002 forcibly increased the wait time by "+ "{1} seconds.").format(nick, extra)) @cmd("fstop", raw_nick=True) def reset_game(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.PHASE == "none": cli.notice(nick, "No game is currently running.") return if var.LOG_CHAN == True: chan_log(cli, rnick, "forced_stop") cli.msg(botconfig.CHANNEL, "\u0002{0}\u0002 has forced the game to stop.".format(nick)) var.LOGGER.logMessage("{0} has forced the game to stop.".format(nick)) if var.PHASE != "join": stop_game(cli) else: reset(cli) @pmcmd("rules", raw_nick=True) def pm_rules(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if var.LOG_CHAN == True: chan_log(cli, rnick, "pm_rules") cli.notice(nick, var.RULES) @cmd("rules", raw_nick=True) def show_rules(cli, rnick, chan, rest): """Displays the rules""" nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("day", "night") and nick not in var.list_players() and chan == botconfig.CHANNEL: cli.notice(nick, var.RULES) return if var.LOG_CHAN == True: chan_log(cli, rnick, "rules") cli.msg(chan, var.RULES) var.LOGGER.logMessage(var.RULES) @pmcmd("help", raw_nick = True) def get_help(cli, rnick, rest): """Gets help.""" if var.LOG_CHAN == True and var.GOT_IT != True: chan_log(cli, rnick, "pm_help") var.GOT_IT = False nick, mode, user, cloak = parse_nick(rnick) fns = [] rest = rest.strip().replace(botconfig.CMD_CHAR, "", 1).lower() splitted = re.split(" +", rest, 1) cname = splitted.pop(0) rest = splitted[0] if splitted else "" found = False if cname: for c in (COMMANDS,PM_COMMANDS): if cname in c.keys(): found = True for fn in c[cname]: if fn.__doc__: if callable(fn.__doc__): pm(cli, nick, botconfig.CMD_CHAR+cname+": "+fn.__doc__(rest)) if nick == botconfig.CHANNEL: var.LOGGER.logMessage(botconfig.CMD_CHAR+cname+": "+fn.__doc__(rest)) else: pm(cli, nick, botconfig.CMD_CHAR+cname+": "+fn.__doc__) if nick == botconfig.CHANNEL: var.LOGGER.logMessage(botconfig.CMD_CHAR+cname+": "+fn.__doc__) return else: continue else: continue else: if not found: pm(cli, nick, "Command not found.") else: pm(cli, nick, "Documentation for this command is not available.") return # if command was not found, or if no command was given: for name, fn in COMMANDS.items(): if name not in botconfig.DISABLED_COMMANDS: if (name and not fn[0].admin_only and not fn[0].owner_only and name not in fn[0].aliases): fns.append("\u0002"+name+"\u0002") afns = [] if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: # todo - is_owner for name, fn in COMMANDS.items(): if name not in botconfig.DISABLED_COMMANDS: if fn[0].admin_only and name not in fn[0].aliases: afns.append("\u0002"+name+"\u0002") cli.notice(nick, "Commands: "+", ".join(fns)) if afns: cli.notice(nick, "Admin Commands: "+", ".join(afns)) @cmd("help", raw_nick = True) def help2(cli, rnick, chan, rest): """Gets help""" if chan == botconfig.CHANNEL: if var.LOG_CHAN == True: chan_log(cli, rnick, "help") var.GOT_IT = True if rest.strip(): # command was given get_help(cli, chan, rest) else: get_help(cli, rnick, rest) @hook("invite", raw_nick = False) def on_invite(cli, nick, something, chan): if chan == botconfig.CHANNEL: # it'll be able to join the default if +i... cli.join(chan) if var.RAW_JOIN == True: # ...or any other if that's True cli.join(chan) if var.IS_ADMIN[nick] == True: # lets allow admins... cli.join(chan) def is_admin(cloak): return bool([ptn for ptn in botconfig.OWNERS+botconfig.ADMINS if fnmatch.fnmatch(cloak.lower(), ptn.lower())]) @pmcmd("admins", "admin", raw_nick=True) def show_admins_pm(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) show_admins(cli, rnick, nick, rest) @cmd("admins", "admin", raw_nick=True) def show_admins(cli, rnick, chan, rest): """Pings the admins that are available.""" nick, mode, user, host = parse_nick(rnick) if chan == botconfig.CHANNEL: admins = [] pl = var.list_players() if (var.LAST_ADMINS and var.LAST_ADMINS + timedelta(seconds=var.ADMINS_RATE_LIMIT) > datetime.now()): cli.notice(nick, ("This command is rate-limited. " + "Please wait a while before using it again.")) return if not (var.PHASE in ("day", "night") and nick not in pl): var.LAST_ADMINS = datetime.now() if var.ADMIN_PINGING: return var.ADMIN_PINGING = True for adm in var.IS_ADMIN: if var.IS_ADMIN[adm] == True: admins.append(adm) admins.sort(key=lambda x: x.lower()) if var.PHASE in ("day", "night") and nick not in pl: if var.LOG_CHAN == True: chan_log(cli, rnick, "pm_admins") cli.notice(nick, "Available admins: "+" ".join(admins)) else: if var.LOG_CHAN == True: chan_log(cli, rnick, "admins") cli.msg(chan, "Available admins: "+" ".join(admins)) var.ADMIN_PINGING = False @cmd("coin", "c", raw_nick=True) def coin(cli, rnick, chan, rest): """It's a bad idea to base any decisions on this command.""" nick, mode, user, host = parse_nick(rnick) if var.PHASE in ("day", "night") and nick not in var.list_players(): cli.notice(nick, "You may not use this command right now.") return if var.LOG_CHAN == True: chan_log(cli, rnick, "coin") cli.msg(chan, "\2{0}\2 tosses a coin into the air...".format(nick)) var.LOGGER.logMessage("{0} tosses a coin into the air...".format(nick)) coin = random.choice(["heads", "tails"]) if random.randrange(0, 20) == 0: coin = "its side" cmsg = "The coin lands on \2{0}\2.".format(coin) cli.msg(chan, cmsg) var.LOGGER.logMessage(cmsg) def aftergame(cli, rawnick, rest): """Schedule a command to be run after the game by someone.""" chan = botconfig.CHANNEL nick = parse_nick(rawnick)[0] rst = re.split(" +", rest) cmd = rst.pop(0).lower().replace(botconfig.CMD_CHAR, "", 1).strip() if cmd in PM_COMMANDS.keys(): def do_action(): for fn in PM_COMMANDS[cmd]: fn(cli, rawnick, " ".join(rst)) elif cmd in COMMANDS.keys(): def do_action(): for fn in COMMANDS[cmd]: fn(cli, rawnick, botconfig.CHANNEL, " ".join(rst)) else: cli.notice(nick, "That command was not found.") return if var.PHASE == "none": do_action() return if var.LOG_CHAN == True: chan_log(cli, rawnick, "aftergame") if var.GIT_UPDATE == False: cli.msg(chan, ("The command \02{0}\02 has been scheduled to run "+ "after this game by \02{1}\02.").format(cmd, nick)) elif var.GIT_UPDATE == True: cli.msg(chan, "\u0002The bot will automatically restart once this game is over.\u0002") var.AFTER_FLASTGAME = do_action() @cmd("faftergame", raw_nick=True) def _faftergame(cli, nick, chan, rest): if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if not rest.strip(): cli.notice(parse_nick(nick)[0], "Incorrect syntax for this command.") return aftergame(cli, nick, rest) @pmcmd("faftergame", raw_nick=True) def faftergame(cli, nick, rest): _faftergame(cli, nick, botconfig.CHANNEL, rest) @pmcmd("flastgame", raw_nick=True) def flastgame(cli, rawnick, rest): """This command may be used in the channel or in a PM, and it disables starting or joining a game. !flastgame <optional-command-after-game-ends>""" nick, mode, user, host = parse_nick(rawnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: chan = botconfig.CHANNEL if var.PHASE != "join": if "join" in COMMANDS.keys(): COMMANDS["join"] = [lambda *spam: cli.msg(chan, "This command has been disabled by an admin.")] if "start" in COMMANDS.keys(): COMMANDS["start"] = [lambda *spam: cli.msg(chan, "This command has been disabled by an admin.")] if var.LOG_CHAN == True: chan_log(cli, rawnick, "last_game") cli.msg(chan, "Starting a new game has now been disabled by \02{0}\02.".format(nick)) var.ADMIN_TO_PING = nick if rest.strip(): aftergame(cli, rawnick, rest) @cmd("raw") # allows !exec out of debug mode #@pmcmd("raw", owner_only = True) do NOT make this as a PM command... it breaks it def raw_irc(cli, nick, chan, rest): if nick in var.IS_OWNER and var.IS_OWNER[nick] == True: try: cli.send(rest) except Exception as e: cli.msg(chan, str(type(e))+":"+str(e)) @cmd("flastgame", raw_nick=True) def _flastgame(cli, rawnick, chan, rest): flastgame(cli, rawnick, rest) before_debug_mode_commands = list(COMMANDS.keys()) before_debug_mode_pmcommands = list(PM_COMMANDS.keys()) if botconfig.DEBUG_MODE or botconfig.ALLOWED_NORMAL_MODE_COMMANDS: @cmd("eval", raw_nick=True) @pmcmd("eval", raw_nick=True) def pyeval(cli, rnick, *rest): rest = list(rest) nick, mode, user, host = parse_nick(rnick) if nick in var.IS_OWNER and var.IS_OWNER[nick] == True: if len(rest) == 2: chan = rest.pop(0) else: chan = nick try: a = str(eval(rest[0])) cli.msg(chan, a) except Exception as e: cli.msg(chan, str(type(e))+":"+str(e)) if var.LOG_CHAN == True or var.MINIMALIST_LOG == True: chan_log(cli, rnick, "eval") @cmd("exec", raw_nick=True) @pmcmd("exec", raw_nick=True) def py(cli, rnick, *rest): rest = list(rest) nick, mode, user, host = parse_nick(rnick) if nick in var.IS_OWNER and var.IS_OWNER[nick] == True: if len(rest) == 2: chan = rest.pop(0) else: chan = nick try: if var.LOG_CHAN == True: chan_log(cli, rnick, "exec") exec(rest[0]) except Exception as e: cli.msg(chan, str(type(e))+":"+str(e)) if var.LOG_CHAN == True or var.MINIMALIST_LOG == True: chan_log(cli, rnick, "exec") if botconfig.ALLOWED_NORMAL_MODE_COMMANDS or botconfig.DEBUG_MODE: for comd in list(COMMANDS.keys()): if (comd not in before_debug_mode_commands and comd not in botconfig.ALLOWED_NORMAL_MODE_COMMANDS): del COMMANDS[comd] for pmcomd in list(PM_COMMANDS.keys()): if (pmcomd not in before_debug_mode_pmcommands and pmcomd not in botconfig.ALLOWED_NORMAL_MODE_COMMANDS): del PM_COMMANDS[pmcomd] @cmd("revealroles", raw_nick=True) def revroles(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.PHASE != "none": if var.LOG_CHAN == True: chan_log(cli, rnick, "reveal_roles") cli.msg(chan, str(var.ROLES)) if var.PHASE in ('night','day'): cli.msg(chan, "Cursed: "+str(var.CURSED)) cli.msg(chan, "Gunners: "+str(list(var.GUNNERS.keys()))) cli.msg(chan, "Arsonists: "+str(list(var.PYROS.keys()))) if var.LOG_CHAN == True or var.MINIMALIST_LOG == True: chan_log(cli, rnick, "revealroles") @pmcmd("revealroles", raw_nick=True) def pmrevroles(cli, rnick, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: if var.PHASE != "none": if var.LOG_CHAN == True: chan_log(cli, rnick, "pm_reveal_roles") cli.notice(nick, str(var.ROLES)) if var.PHASE in ('night','day'): cli.notice(nick, "Cursed: "+str(var.CURSED)) cli.notice(nick, "Gunners: "+str(list(var.GUNNERS.keys()))) cli.notice(nick, "Arsonists: "+str(list(var.PYROS.keys()))) if var.LOG_CHAN == True or var.MINIMALIST_LOG == True: chan_log(cli, rnick, "revealroles") @cmd("fgame", raw_nick=True) def game(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: pl = var.list_players() if var.PHASE == "none": cli.notice(nick, "No game is currently running.") return if var.PHASE != "join": cli.notice(nick, "Werewolf is already in play.") return if nick not in pl: cli.notice(nick, "You're currently not playing.") return rest = rest.strip().lower() if rest: if cgamemode(cli, *re.split(" +",rest)): if var.LOG_CHAN == True: chan_log(cli, rnick, "fgame") cli.msg(chan, ("\u0002{0}\u0002 has changed the "+ "game settings successfully.").format(nick)) def fgame_help(args = ""): args = args.strip() if not args: return "Available game mode setters: "+ ", ".join(var.GAME_MODES.keys()) elif args in var.GAME_MODES.keys(): return var.GAME_MODES[args].__doc__ else: return "Game mode setter {0} not found.".format(args) game.__doc__ = fgame_help # DO NOT MAKE THIS A PMCOMMAND ALSO @cmd("force", raw_nick=True) def forcepm(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: rst = re.split(" +",rest) if len(rst) < 2: cli.msg(chan, "The syntax is incorrect.") return who = rst.pop(0).strip() if not who or who == botconfig.NICK: cli.msg(chan, "That won't work.") return if not is_fake_nick(who): ul = list(var.USERS.keys()) ull = [u.lower() for u in ul] if who.lower() not in ull: cli.msg(chan, "This can only be done on fake nicks.") return else: who = ul[ull.index(who.lower())] cmd = rst.pop(0).lower().replace(botconfig.CMD_CHAR, "", 1) did = False if PM_COMMANDS.get(cmd) and not PM_COMMANDS[cmd][0].owner_only: if (PM_COMMANDS[cmd][0].admin_only and nick in var.USERS and not is_admin(var.USERS[nick]["cloak"])): # Not a full admin cli.notice(nick, "Only full admins can force an admin-only command.") return for fn in PM_COMMANDS[cmd]: if fn.raw_nick: continue fn(cli, who, " ".join(rst)) did = True if did: if var.LOG_CHAN == True: chan_log(cli, rnick, "force") cli.msg(chan, "Operation successful.") else: cli.msg(chan, "Not possible with this command.") #if var.PHASE == "night": <- Causes problems with night starting twice. # chk_nightdone(cli) for fn in COMMANDS[cmd]: if fn.raw_nick: continue fn(cli, who, chan, " ".join(rst)) did = True if did: cli.msg(chan, "Operation successful.") else: cli.msg(chan, "Not possible with this command.") else: cli.msg(chan, "That command was not found.") @cmd("rforce", raw_nick=True) def rforcepm(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: rst = re.split(" +",rest) if len(rst) < 2: cli.msg(chan, "The syntax is incorrect.") return who = rst.pop(0).strip().lower() who = who.replace("_", " ") if (who not in var.ROLES or not var.ROLES[who]) and (who != "gunner" or var.PHASE in ("none", "join")): cli.msg(chan, nick+": invalid role") return elif who == "gunner": tgt = list(var.GUNNERS.keys()) else: tgt = var.ROLES[who] cmd = rst.pop(0).lower().replace(botconfig.CMD_CHAR, "", 1) if PM_COMMANDS.get(cmd) and not PM_COMMANDS[cmd][0].owner_only: if (PM_COMMANDS[cmd][0].admin_only and nick in var.USERS and not is_admin(var.USERS[nick]["cloak"])): # Not a full admin cli.notice(nick, "Only full admins can force an admin-only command.") return for fn in PM_COMMANDS[cmd]: for guy in tgt[:]: fn(cli, guy, " ".join(rst)) if var.LOG_CHAN == True: chan_log(cli, rnick, "role_force") cli.msg(chan, "Operation successful.") #if var.PHASE == "night": <- Causes problems with night starting twice. # chk_nightdone(cli) elif cmd.lower() in COMMANDS.keys() and not COMMANDS[cmd][0].owner_only: if (COMMANDS[cmd][0].admin_only and nick in var.USERS and not is_admin(var.USERS[nick]["cloak"])): # Not a full admin cli.notice(nick, "Only full admins can force an admin-only command.") return for fn in COMMANDS[cmd]: for guy in tgt[:]: fn(cli, guy, chan, " ".join(rst)) cli.msg(chan, "Operation successful.") else: cli.msg(chan, "That command was not found.") @cmd("frole", raw_nick=True) def frole(cli, rnick, chan, rest): nick, mode, user, host = parse_nick(rnick) if nick in var.IS_ADMIN and var.IS_ADMIN[nick] == True: rst = re.split(" +",rest) if len(rst) < 2: cli.msg(chan, "The syntax is incorrect.") return who = rst.pop(0).strip() rol = " ".join(rst).strip() ul = list(var.USERS.keys()) ull = [u.lower() for u in ul] if who.lower() not in ull: if not is_fake_nick(who): cli.msg(chan, "Could not be done.") cli.msg(chan, "The target needs to be in this channel or a fake name.") return if not is_fake_nick(who): who = ul[ull.index(who.lower())] if who == botconfig.NICK or not who: cli.msg(chan, "No.") return if rol not in var.ROLES.keys(): pl = var.list_players() if var.PHASE not in ("night", "day"): cli.msg(chan, "This is only allowed in game.") return if rol.startswith("gunner"): rolargs = re.split(" +",rol, 1) if len(rolargs) == 2 and rolargs[1].isdigit(): if len(rolargs[1]) < 7: var.GUNNERS[who] = int(rolargs[1]) var.WOLF_GUNNERS[who] = int(rolargs[1]) else: var.GUNNERS[who] = 999 var.WOLF_GUNNERS[who] = 999 else: var.GUNNERS[who] = math.ceil(var.SHOTS_MULTIPLIER * len(pl)) if who not in pl: var.ROLES["villager"].append(who) elif rol == "cursed villager": if who not in var.CURSED: var.CURSED.append(who) if who not in pl: var.ROLES["villager"].append(who) else: cli.msg(chan, "Not a valid role.") return if var.LOG_CHAN == True: chan_log(cli, rnick, "force_role") cli.msg(chan, "Operation successful.") return if who in var.list_players(): var.del_player(who) var.ROLES[rol].append(who) cli.msg(chan, "Operation successful.") if var.PHASE not in ('none','join'): chk_win(cli)
Vgr255/Wolfbot
modules/wolfgame.py
Python
bsd-2-clause
196,307
[ "VisIt" ]
b58b01639304c39c994882aa5d5a35f520529af9c3e12b1163d36e0f388c34db
""" This object is a wrapper for setting and getting jobs states """ __RCSID__ = "$Id" import datetime from DIRAC import gLogger, S_OK, S_ERROR from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest from DIRAC.Core.DISET.RPCClient import RPCClient from DIRAC.WorkloadManagementSystem.Service.JobPolicy import RIGHT_GET_INFO, RIGHT_RESCHEDULE from DIRAC.WorkloadManagementSystem.Service.JobPolicy import RIGHT_RESET, RIGHT_CHANGE_STATUS from DIRAC.WorkloadManagementSystem.DB.TaskQueueDB import singleValueDefFields, multiValueDefFields class JobState(object): class DBHold: def __init__(self): self.checked = False self.reset() def reset(self): self.job = None self.log = None self.tq = None __db = DBHold() _sDisableLocal = False class RemoteMethod(object): def __init__(self, functor): self.__functor = functor def __get__(self, obj, oType=None): return self.__class__(self.__functor.__get__(obj, oType)) def __call__(self, *args, **kwargs): funcSelf = self.__functor.__self__ if not funcSelf.localAccess: rpc = funcSelf._getStoreClient() if kwargs: fArgs = (args, kwargs) else: fArgs = (args, ) return getattr(rpc, self.__functor.__name__)(funcSelf.jid, fArgs) return self.__functor(*args, **kwargs) def __init__(self, jid, forceLocal=False, getRPCFunctor=False, source="Unknown"): self.__jid = jid self.__source = str(source) self.__forceLocal = forceLocal if getRPCFunctor: self.__getRPCFunctor = getRPCFunctor else: self.__getRPCFunctor = RPCClient self.checkDBAccess() @classmethod def checkDBAccess(cls): # Init DB if there if not JobState.__db.checked: JobState.__db.checked = True for varName, dbName in (('job', 'JobDB'), ('log', 'JobLoggingDB'), ('tq', 'TaskQueueDB')): try: dbImp = "DIRAC.WorkloadManagementSystem.DB.%s" % dbName dbMod = __import__(dbImp, fromlist=[dbImp]) dbClass = getattr(dbMod, dbName) dbInstance = dbClass() setattr(JobState.__db, varName, dbInstance) result = dbInstance._getConnection() if not result['OK']: gLogger.warn("Could not connect to %s (%s). Resorting to RPC" % (dbName, result['Message'])) JobState.__db.reset() break else: result['Value'].close() except RuntimeError: JobState.__db.reset() break except ImportError: JobState.__db.reset() break @property def jid(self): return self.__jid def setSource(self, source): self.__source = source @property def localAccess(self): if JobState._sDisableLocal: return False if JobState.__db.job or self.__forceLocal: return True return False def __getDB(self): return JobState.__db.job def _getStoreClient(self): return self.__getRPCFunctor("WorkloadManagement/JobStateSync") def getManifest(self, rawData=False): if self.localAccess: result = self.__getDB().getJobJDL(self.__jid) else: result = self._getStoreClient().getManifest(self.__jid) if not result['OK'] or rawData: return result if not result['Value']: return S_ERROR("No manifest for job %s" % self.__jid) manifest = JobManifest() result = manifest.loadJDL(result['Value']) if not result['OK']: return result return S_OK(manifest) def setManifest(self, manifest): if not isinstance(manifest, JobManifest): manifestStr = manifest manifest = JobManifest() result = manifest.load(manifestStr) if not result['OK']: return result manifestJDL = manifest.dumpAsJDL() if self.localAccess: return self.__retryFunction(5, self.__getDB().setJobJDL, (self.__jid, manifestJDL)) return self._getStoreClient().setManifest(self.__jid, manifestJDL) # Execute traces def __retryFunction(self, retries, functor, args=False, kwargs=False): retries = max(1, retries) if not args: args = tuple() if not kwargs: kwargs = {} while retries: retries -= 1 result = functor(*args, **kwargs) if result['OK']: return result if retries == 0: return result return S_ERROR("No more retries") right_commitCache = RIGHT_GET_INFO @RemoteMethod def commitCache(self, initialState, cache, jobLog): try: self.__checkType(initialState, dict) self.__checkType(cache, dict) self.__checkType(jobLog, (list, tuple)) except TypeError as excp: return S_ERROR(str(excp)) result = self.getAttributes(initialState.keys()) if not result['OK']: return result if not result['Value'] == initialState: return S_OK(False) gLogger.verbose("Job %s: About to execute trace. Current state %s" % (self.__jid, initialState)) data = {'att': [], 'jobp': [], 'optp': []} for key in cache: for dk in data: if key.find("%s." % dk) == 0: data[dk].append((key[len(dk) + 1:], cache[key])) jobDB = JobState.__db.job if data['att']: attN = [t[0] for t in data['att']] attV = [t[1] for t in data['att']] result = self.__retryFunction(5, jobDB.setJobAttributes, (self.__jid, attN, attV), {'update': True}) if not result['OK']: return result if data['jobp']: result = self.__retryFunction(5, jobDB.setJobParameters, (self.__jid, data['jobp'])) if not result['OK']: return result for k, v in data['optp']: result = self.__retryFunction(5, jobDB.setJobOptParameter, (self.__jid, k, v)) if not result['OK']: return result if 'inputData' in cache: result = self.__retryFunction(5, jobDB.setInputData, (self.__jid, cache['inputData'])) if not result['OK']: return result logDB = JobState.__db.log gLogger.verbose("Adding logging records for %s" % self.__jid) for record, updateTime, source in jobLog: gLogger.verbose("Logging records for %s: %s %s %s" % (self.__jid, record, updateTime, source)) record['date'] = updateTime record['source'] = source result = self.__retryFunction(5, logDB.addLoggingRecord, (self.__jid, ), record) if not result['OK']: return result gLogger.info("Job %s: Ended trace execution" % self.__jid) # We return a new initial state return self.getAttributes(initialState.keys()) # # Status # def __checkType(self, value, tList, canBeNone=False): """ Raise TypeError if the value does not have one of the expected types :param value: the value to test :param tList: type or tuple of types :param canBeNone: boolean, since there is no type for None to be used with isinstance """ if canBeNone: if value is None: return if not isinstance(value, tList): raise TypeError("%s has wrong type. Has to be one of %s" % (value, tList)) right_setStatus = RIGHT_GET_INFO @RemoteMethod def setStatus(self, majorStatus, minorStatus=None, appStatus=None, source=None, updateTime=None): try: self.__checkType(majorStatus, basestring) self.__checkType(minorStatus, basestring, canBeNone=True) self.__checkType(appStatus, basestring, canBeNone=True) self.__checkType(source, basestring, canBeNone=True) self.__checkType(updateTime, datetime.datetime, canBeNone=True) except TypeError as excp: return S_ERROR(str(excp)) result = JobState.__db.job.setJobStatus(self.__jid, majorStatus, minorStatus, appStatus) if not result['OK']: return result # HACK: Cause joblogging is crappy if not minorStatus: minorStatus = 'idem' if not source: source = self.__source return JobState.__db.log.addLoggingRecord(self.__jid, majorStatus, minorStatus, appStatus, date=updateTime, source=source) right_getMinorStatus = RIGHT_GET_INFO @RemoteMethod def setMinorStatus(self, minorStatus, source=None, updateTime=None): try: self.__checkType(minorStatus, basestring) self.__checkType(source, basestring, canBeNone=True) except TypeError as excp: return S_ERROR(str(excp)) result = JobState.__db.job.setJobStatus(self.__jid, minor=minorStatus) if not result['OK']: return result if not source: source = self.__source return JobState.__db.log.addLoggingRecord(self.__jid, minor=minorStatus, date=updateTime, source=source) @RemoteMethod def getStatus(self): result = JobState.__db.job.getJobAttributes(self.__jid, ['Status', 'MinorStatus']) if not result['OK']: return result data = result['Value'] if data: return S_OK((data['Status'], data['MinorStatus'])) else: return S_ERROR('Job %d not found in the JobDB' % int(self.__jid)) right_setAppStatus = RIGHT_GET_INFO @RemoteMethod def setAppStatus(self, appStatus, source=None, updateTime=None): try: self.__checkType(appStatus, basestring) self.__checkType(source, basestring, canBeNone=True) except TypeError as excp: return S_ERROR(str(excp)) result = JobState.__db.job.setJobStatus(self.__jid, application=appStatus) if not result['OK']: return result if not source: source = self.__source return JobState.__db.log.addLoggingRecord(self.__jid, application=appStatus, date=updateTime, source=source) right_getAppStatus = RIGHT_GET_INFO @RemoteMethod def getAppStatus(self): result = JobState.__db.job.getJobAttributes(self.__jid, ['ApplicationStatus']) if result['OK']: result['Value'] = result['Value']['ApplicationStatus'] return result # Attributes right_setAttribute = RIGHT_GET_INFO @RemoteMethod def setAttribute(self, name, value): try: self.__checkType(name, basestring) self.__checkType(value, basestring) except TypeError as excp: return S_ERROR(str(excp)) return JobState.__db.job.setJobAttribute(self.__jid, name, value) right_setAttributes = RIGHT_GET_INFO @RemoteMethod def setAttributes(self, attDict): try: self.__checkType(attDict, dict) except TypeError as excp: return S_ERROR(str(excp)) keys = [key for key in attDict] values = [attDict[key] for key in keys] return JobState.__db.job.setJobAttributes(self.__jid, keys, values) right_getAttribute = RIGHT_GET_INFO @RemoteMethod def getAttribute(self, name): try: self.__checkType(name, basestring) except TypeError as excp: return S_ERROR(str(excp)) return JobState.__db.job.getJobAttribute(self.__jid, name) right_getAttributes = RIGHT_GET_INFO @RemoteMethod def getAttributes(self, nameList=None): try: self.__checkType(nameList, (list, tuple), canBeNone=True) except TypeError as excp: return S_ERROR(str(excp)) return JobState.__db.job.getJobAttributes(self.__jid, nameList) # OptimizerParameters right_setOptParameter = RIGHT_GET_INFO @RemoteMethod def setOptParameter(self, name, value): try: self.__checkType(name, basestring) self.__checkType(value, basestring) except TypeError as excp: return S_ERROR(str(excp)) return JobState.__db.job.setJobOptParameter(self.__jid, name, value) right_setOptParameters = RIGHT_GET_INFO @RemoteMethod def setOptParameters(self, pDict): try: self.__checkType(pDict, dict) except TypeError as excp: return S_ERROR(str(excp)) for name in pDict: result = JobState.__db.job.setJobOptParameter(self.__jid, name, pDict[name]) if not result['OK']: return result return S_OK() right_removeOptParameters = RIGHT_GET_INFO @RemoteMethod def removeOptParameters(self, nameList): if isinstance(nameList, basestring): nameList = [nameList] try: self.__checkType(nameList, (list, tuple)) except TypeError as excp: return S_ERROR(str(excp)) for name in nameList: result = JobState.__db.job.removeJobOptParameter(self.__jid, name) if not result['OK']: return result return S_OK() right_getOptParameter = RIGHT_GET_INFO @RemoteMethod def getOptParameter(self, name): try: self.__checkType(name, basestring) except TypeError as excp: return S_ERROR(str(excp)) return JobState.__db.job.getJobOptParameter(self.__jid, name) right_getOptParameters = RIGHT_GET_INFO @RemoteMethod def getOptParameters(self, nameList=None): try: self.__checkType(nameList, (list, tuple), canBeNone=True) except TypeError as excp: return S_ERROR(str(excp)) return JobState.__db.job.getJobOptParameters(self.__jid, nameList) # Other @classmethod def cleanTaskQueues(cls, source=''): result = JobState.__db.tq.enableAllTaskQueues() if not result['OK']: return result result = JobState.__db.tq.findOrphanJobs() if not result['OK']: return result for jid in result['Value']: result = JobState.__db.tq.deleteJob(jid) if not result['OK']: gLogger.error("Cannot delete from TQ job %s: %s" % (jid, result['Message'])) continue result = JobState.__db.job.rescheduleJob(jid) if not result['OK']: gLogger.error("Cannot reschedule in JobDB job %s: %s" % (jid, result['Message'])) continue JobState.__db.log.addLoggingRecord(jid, "Received", "", "", source="JobState") return S_OK() right_resetJob = RIGHT_RESCHEDULE @RemoteMethod def rescheduleJob(self, source=""): result = JobState.__db.tq.deleteJob(self.__jid) if not result['OK']: return S_ERROR("Cannot delete from TQ job %s: %s" % (self.__jid, result['Message'])) result = JobState.__db.job.rescheduleJob(self.__jid) if not result['OK']: return S_ERROR("Cannot reschedule in JobDB job %s: %s" % (self.__jid, result['Message'])) JobState.__db.log.addLoggingRecord(self.__jid, "Received", "", "", source=source) return S_OK() right_resetJob = RIGHT_RESET @RemoteMethod def resetJob(self, source=""): result = JobState.__db.job.setJobAttribute(self.__jid, "RescheduleCounter", -1) if not result['OK']: return S_ERROR("Cannot set the RescheduleCounter for job %s: %s" % (self.__jid, result['Message'])) result = JobState.__db.tq.deleteJob(self.__jid) if not result['OK']: return S_ERROR("Cannot delete from TQ job %s: %s" % (self.__jid, result['Message'])) result = JobState.__db.job.rescheduleJob(self.__jid) if not result['OK']: return S_ERROR("Cannot reschedule in JobDB job %s: %s" % (self.__jid, result['Message'])) JobState.__db.log.addLoggingRecord(self.__jid, "Received", "", "", source=source) return S_OK() right_getInputData = RIGHT_GET_INFO @RemoteMethod def getInputData(self): return JobState.__db.job.getInputData(self.__jid) @classmethod def checkInputDataStructure(self, pDict): if not isinstance(pDict, dict): return S_ERROR("Input data has to be a dictionary") for lfn in pDict: if 'Replicas' not in pDict[lfn]: return S_ERROR("Missing replicas for lfn %s" % lfn) replicas = pDict[lfn]['Replicas'] for seName in replicas: if 'SURL' not in replicas or 'Disk' not in replicas: return S_ERROR("Missing SURL or Disk for %s:%s replica" % (seName, lfn)) return S_OK() right_setInputData = RIGHT_GET_INFO @RemoteMethod def set_InputData(self, lfnData): result = self.checkInputDataStructure(lfnData) if not result['OK']: return result return self.__db.job.setInputData(self.__jid, lfnData) right_insertIntoTQ = RIGHT_CHANGE_STATUS @RemoteMethod def insertIntoTQ(self, manifest=None): if not manifest: result = self.getManifest() if not result['OK']: return result manifest = result['Value'] reqSection = "JobRequirements" result = manifest.getSection(reqSection) if not result['OK']: return S_ERROR("No %s section in the job manifest" % reqSection) reqCfg = result['Value'] jobReqDict = {} for name in singleValueDefFields: if name in reqCfg: if name == 'CPUTime': jobReqDict[name] = int(reqCfg[name]) else: jobReqDict[name] = reqCfg[name] for name in multiValueDefFields: if name in reqCfg: jobReqDict[name] = reqCfg.getOption(name, []) jobPriority = reqCfg.getOption('UserPriority', 1) result = self.__retryFunction(2, JobState.__db.tq.insertJob, (self.__jid, jobReqDict, jobPriority)) if not result['OK']: errMsg = result['Message'] # Force removing the job from the TQ if it was actually inserted result = JobState.__db.tq.deleteJob(self.__jid) if result['OK']: if result['Value']: gLogger.info("Job %s removed from the TQ" % self.__jid) return S_ERROR("Cannot insert in task queue: %s" % errMsg) return S_OK()
andresailer/DIRAC
WorkloadManagementSystem/Client/JobState/JobState.py
Python
gpl-3.0
17,258
[ "DIRAC" ]
2ce1c582dbb55f02b8aef29aecda6c14ba1d4cef50d6d7b8bfffebfa78f3310e
# coding: utf-8 # # 2016 US Bike Share Activity Snapshot # # ## Table of Contents # - [Introduction](#intro) # - [Posing Questions](#pose_questions) # - [Data Collection and Wrangling](#wrangling) # - [Condensing the Trip Data](#condensing) # - [Exploratory Data Analysis](#eda) # - [Statistics](#statistics) # - [Visualizations](#visualizations) # - [Performing Your Own Analysis](#eda_continued) # - [Conclusions](#conclusions) # # <a id='intro'></a> # ## Introduction # # > **Tip**: Quoted sections like this will provide helpful instructions on how to navigate and use a Jupyter notebook. # # Over the past decade, bicycle-sharing systems have been growing in number and popularity in cities across the world. Bicycle-sharing systems allow users to rent bicycles for short trips, typically 30 minutes or less. Thanks to the rise in information technologies, it is easy for a user of the system to access a dock within the system to unlock or return bicycles. These technologies also provide a wealth of data that can be used to explore how these bike-sharing systems are used. # # In this project, you will perform an exploratory analysis on data provided by [Motivate](https://www.motivateco.com/), a bike-share system provider for many major cities in the United States. You will compare the system usage between three large cities: New York City, Chicago, and Washington, DC. You will also see if there are any differences within each system for those users that are registered, regular users and those users that are short-term, casual users. # <a id='pose_questions'></a> # ## Posing Questions # # Before looking at the bike sharing data, you should start by asking questions you might want to understand about the bike share data. Consider, for example, if you were working for Motivate. What kinds of information would you want to know about in order to make smarter business decisions? If you were a user of the bike-share service, what factors might influence how you would want to use the service? # # **Question 1**: Write at least two questions related to bike sharing that you think could be answered by data. # # **Answer**: To inform business decisions I would consider: # - useage disribution as a function of: # - time of day # - day of year # - season # - weather patterns # - Customer demographics # - Customer segments # - Whether or not any service point is running out of bikes and when (time of day, day of year) # # The main general questions come down to the classic : # # - Who uses the bike share? # - How do they use it? # - Why do they use it? # - When do they use it? # - Where do they use it (where do they pick up and return)? # # Finally, some specific Questions like, # # - What is the most common day of useage for subsribers, and customers? # - What is the most common time of useage for subscribers and customers? # - What is the most common trip duration? # # > **Tip**: If you double click on this cell, you will see the text change so that all of the formatting is removed. This allows you to edit this block of text. This block of text is written using [Markdown](http://daringfireball.net/projects/markdown/syntax), which is a way to format text using headers, links, italics, and many other options using a plain-text syntax. You will also use Markdown later in the Nanodegree program. Use **Shift** + **Enter** or **Shift** + **Return** to run the cell and show its rendered form. # <a id='wrangling'></a> # ## Data Collection and Wrangling # # Now it's time to collect and explore our data. In this project, we will focus on the record of individual trips taken in 2016 from our selected cities: New York City, Chicago, and Washington, DC. Each of these cities has a page where we can freely download the trip data.: # # - New York City (Citi Bike): [Link](https://www.citibikenyc.com/system-data) # - Chicago (Divvy): [Link](https://www.divvybikes.com/system-data) # - Washington, DC (Capital Bikeshare): [Link](https://www.capitalbikeshare.com/system-data) # # If you visit these pages, you will notice that each city has a different way of delivering its data. Chicago updates with new data twice a year, Washington DC is quarterly, and New York City is monthly. **However, you do not need to download the data yourself.** The data has already been collected for you in the `/data/` folder of the project files. While the original data for 2016 is spread among multiple files for each city, the files in the `/data/` folder collect all of the trip data for the year into one file per city. Some data wrangling of inconsistencies in timestamp format within each city has already been performed for you. In addition, a random 2% sample of the original data is taken to make the exploration more manageable. # # **Question 2**: However, there is still a lot of data for us to investigate, so it's a good idea to start off by looking at one entry from each of the cities we're going to analyze. Run the first code cell below to load some packages and functions that you'll be using in your analysis. Then, complete the second code cell to print out the first trip recorded from each of the cities (the second line of each data file). # # > **Tip**: You can run a code cell like you formatted Markdown cells above by clicking on the cell and using the keyboard shortcut **Shift** + **Enter** or **Shift** + **Return**. Alternatively, a code cell can be executed using the **Play** button in the toolbar after selecting it. While the cell is running, you will see an asterisk in the message to the left of the cell, i.e. `In [*]:`. The asterisk will change into a number to show that execution has completed, e.g. `In [1]`. If there is output, it will show up as `Out [1]:`, with an appropriate number to match the "In" number. # In[23]: ## import all necessary packages and functions. import csv # read and write csv files from datetime import datetime # operations to parse dates from datetime import time from datetime import date import pprint # use to print data structures like dictionaries in # a nicer way than the base print function. # In[24]: def print_first_point(filename): """ This function prints and returns the first data point (second row) from a csv file that includes a header row. """ # print city name for reference city = filename.split('-')[0].split('/')[-1] print('\nCity: {}'.format(city)) with open(filename, 'r') as f_in: ## TODO: Use the csv library to set up a DictReader object. ## ## see https://docs.python.org/3/library/csv.html ## trip_reader = csv.DictReader(f_in) ## TODO: Use a function on the DictReader object to read the ## ## first trip from the data file and store it in a variable. ## ## see https://docs.python.org/3/library/csv.html#reader-objects## first_trip = trip_reader.__next__() ## TODO: Use the pprint library to print the first trip. ## ## see https://docs.python.org/3/library/pprint.html ## pp = pprint.PrettyPrinter(indent=4) pp.pprint(first_trip) # output city name and first trip for later testing return (city, first_trip) # list of files for each city data_files = ['./data/NYC-CitiBike-2016.csv', './data/Chicago-Divvy-2016.csv', './data/Washington-CapitalBikeshare-2016.csv',] # print the first trip from each file, store in dictionary example_trips = {} for data_file in data_files: city, first_trip = print_first_point(data_file) example_trips[city] = first_trip # If everything has been filled out correctly, you should see below the printout of each city name (which has been parsed from the data file name) that the first trip has been parsed in the form of a dictionary. When you set up a `DictReader` object, the first row of the data file is normally interpreted as column names. Every other row in the data file will use those column names as keys, as a dictionary is generated for each row. # # This will be useful since we can refer to quantities by an easily-understandable label instead of just a numeric index. For example, if we have a trip stored in the variable `row`, then we would rather get the trip duration from `row['duration']` instead of `row[0]`. # # <a id='condensing'></a> # ### Condensing the Trip Data # # It should also be observable from the above printout that each city provides different information. Even where the information is the same, the column names and formats are sometimes different. To make things as simple as possible when we get to the actual exploration, we should trim and clean the data. Cleaning the data makes sure that the data formats across the cities are consistent, while trimming focuses only on the parts of the data we are most interested in to make the exploration easier to work with. # # You will generate new data files with five values of interest for each trip: trip duration, starting month, starting hour, day of the week, and user type. Each of these may require additional wrangling depending on the city: # # - **Duration**: This has been given to us in seconds (New York, Chicago) or milliseconds (Washington). A more natural unit of analysis will be if all the trip durations are given in terms of minutes. # - **Month**, **Hour**, **Day of Week**: Ridership volume is likely to change based on the season, time of day, and whether it is a weekday or weekend. Use the start time of the trip to obtain these values. The New York City data includes the seconds in their timestamps, while Washington and Chicago do not. The [`datetime`](https://docs.python.org/3/library/datetime.html) package will be very useful here to make the needed conversions. # - **User Type**: It is possible that users who are subscribed to a bike-share system will have different patterns of use compared to users who only have temporary passes. Washington divides its users into two types: 'Registered' for users with annual, monthly, and other longer-term subscriptions, and 'Casual', for users with 24-hour, 3-day, and other short-term passes. The New York and Chicago data uses 'Subscriber' and 'Customer' for these groups, respectively. For consistency, you will convert the Washington labels to match the other two. # # # **Question 3a**: Complete the helper functions in the code cells below to address each of the cleaning tasks described above. # In[25]: def duration_in_mins(datum, city): """ Takes as input a dictionary containing info about a single trip (datum) and its origin city (city) and returns the trip duration in units of minutes. Remember that Washington is in terms of milliseconds while Chicago and NYC are in terms of seconds. HINT: The csv module reads in all of the data as strings, including numeric values. You will need a function to convert the strings into an appropriate numeric type when making your transformations. see https://docs.python.org/3/library/functions.html """ # YOUR CODE HERE if city == 'NYC' or city == 'Chicago': duration = int(datum['tripduration'])/60 elif city == 'BayArea': duration = float(datum['duration']) else: duration = int(datum['Duration (ms)'])/60000 return duration # Some tests to check that your code works. There should be no output if all of # the assertions pass. The `example_trips` dictionary was obtained from when # you printed the first trip from each of the original data files. tests = {'NYC': 13.9833, 'Chicago': 15.4333, 'Washington': 7.1231} for city in tests: assert abs(duration_in_mins(example_trips[city], city) - tests[city]) < .001 # In[26]: def time_of_trip(datum, city): """ Takes as input a dictionary containing info about a single trip (datum) and its origin city (city) and returns the month, hour, and day of the week in which the trip was made. Remember that NYC includes seconds, while Washington and Chicago do not. HINT: You should use the datetime module to parse the original date strings into a format that is useful for extracting the desired information. see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior """ # YOUR CODE HERE days_dict = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'} if city == 'NYC': trip_date = datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M:%S') month = int(trip_date.strftime('%m')[-1]) hour = int(trip_date.strftime('%H')[-1]) days_of_week = days_dict[datetime.weekday(datetime.date(trip_date))] elif city == 'Chicago': trip_date = datetime.strptime(datum['starttime'],'%m/%d/%Y %H:%M') month = int(trip_date.strftime('%m')[-1]) hour = int(trip_date.strftime('%H')) days_of_week = days_dict[datetime.weekday(datetime.date(trip_date))] elif city == 'Washington': trip_date = datetime.strptime(datum['Start date'],'%m/%d/%Y %H:%M') month = int(trip_date.strftime('%m')[-1]) hour = int(trip_date.strftime('%H')) days_of_week = days_dict[datetime.weekday(datetime.date(trip_date))] return ( month, hour, days_of_week ) # Some tests to check that your code works. There should be no output if all of # the assertions pass. The `example_trips` dictionary was obtained from when # you printed the first trip from each of the original data files. #'NYC': (1, 0, 'Friday'), tests = {'NYC': (1, 0, 'Friday'), 'Chicago': (3, 23, 'Thursday'), 'Washington': (3, 22, 'Thursday')} for city in tests: assert time_of_trip(example_trips[city], city) == tests[city] # In[27]: def type_of_user(datum, city): """ Takes as input a dictionary containing info about a single trip (datum) and its origin city (city) and returns the type of system user that made the trip. Remember that Washington has different category names compared to Chicago and NYC. """ # YOUR CODE HERE if city == 'NYC'or city == 'Chicago': user_type = datum['usertype'] elif city == 'BayArea': user_type = datum['user_type'] else: user_type = datum['Member Type'] if user_type == 'Registered': user_type = 'Subscriber' else: user_type = 'Customer' return user_type # Some tests to check that your code works. There should be no output if all of # the assertions pass. The `example_trips` dictionary was obtained from when # you printed the first trip from each of the original data files. tests = {'NYC': 'Customer', 'Chicago': 'Subscriber', 'Washington': 'Subscriber'} for city in tests: assert type_of_user(example_trips[city], city) == tests[city] # **Question 3b**: Now, use the helper functions you wrote above to create a condensed data file for each city consisting only of the data fields indicated above. In the `/examples/` folder, you will see an example datafile from the [Bay Area Bike Share](http://www.bayareabikeshare.com/open-data) before and after conversion. Make sure that your output is formatted to be consistent with the example file. # In[28]: def condense_data(in_file, out_file, city): """ This function takes full data from the specified input file and writes the condensed data to a specified output file. The city argument determines how the input file will be parsed. HINT: See the cell below to see how the arguments are structured! """ with open(out_file, 'w') as f_out, open(in_file, 'r') as f_in: # set up csv DictWriter object - writer requires column names for the # first row as the "fieldnames" argument out_colnames = ['duration', 'month', 'hour', 'day_of_week', 'user_type'] trip_writer = csv.DictWriter(f_out, fieldnames = out_colnames) print (trip_writer) trip_writer.writeheader() ## TODO: set up csv DictReader object ## trip_reader = csv.DictReader(f_in) # collect data from and process each row for row in trip_reader: # set up a dictionary to hold the values for the cleaned and trimmed # data point new_point = {} ## TODO: use the helper functions to get the cleaned data from ## ## the original data dictionaries. ## ## Note that the keys for the new_point dictionary should match ## ## the column names set in the DictWriter object above. ## new_point['duration'] = duration_in_mins(row, city) new_point['month'] = time_of_trip(row,city)[0] new_point['hour'] = time_of_trip(row,city)[1] new_point['day_of_week'] = time_of_trip(row,city)[2] new_point['user_type'] = type_of_user(row, city) ## TODO: write the processed information to the output file. ## ## see https://docs.python.org/3/library/csv.html#writer-objects ## trip_writer.writerow(new_point) # In[29]: # Run this cell to check your work city_info = {'Washington': {'in_file': './data/Washington-CapitalBikeshare-2016.csv', 'out_file': './data/Washington-2016-Summary.csv'}, 'Chicago': {'in_file': './data/Chicago-Divvy-2016.csv', 'out_file': './data/Chicago-2016-Summary.csv'}, 'NYC': {'in_file': './data/NYC-CitiBike-2016.csv', 'out_file': './data/NYC-2016-Summary.csv'}} for city, filenames in city_info.items(): condense_data(filenames['in_file'], filenames['out_file'], city) print_first_point(filenames['out_file']) # > **Tip**: If you save a jupyter Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the necessary code blocks from your previous session to reestablish variables and functions before picking up where you last left off. # # <a id='eda'></a> # ## Exploratory Data Analysis # # Now that you have the data collected and wrangled, you're ready to start exploring the data. In this section you will write some code to compute descriptive statistics from the data. You will also be introduced to the `matplotlib` library to create some basic histograms of the data. # # <a id='statistics'></a> # ### Statistics # # First, let's compute some basic counts. The first cell below contains a function that uses the csv module to iterate through a provided data file, returning the number of trips made by subscribers and customers. The second cell runs this function on the example Bay Area data in the `/examples/` folder. Modify the cells to answer the question below. # # **Question 4a**: Which city has the highest number of trips? Which city has the highest proportion of trips made by subscribers? Which city has the highest proportion of trips made by short-term customers? # # **Answer**: NYC has the Highest Number Of **Trips**, Highest Number Of **Subscribers** and Highest Number of **Short-term Customers**. # In[30]: def number_of_trips(filename): """ This function reads in a file with trip data and reports the number of trips made by subscribers, customers, and total overall. """ city = filename.split('-')[0].split('/')[-1] with open(filename, 'r') as f_in: # set up csv reader object reader = csv.DictReader(f_in) # initialize count variables n_subscribers = 0 n_customers = 0 # tally up ride types for row in reader: if city == 'NYC' or city == 'Chicago': if row['usertype'] == 'Subscriber': n_subscribers += 1 else: n_customers += 1 else: if row['Member Type'] == 'Registered': n_subscribers += 1 else: n_customers += 1 # compute total number of rides n_trips = n_subscribers + n_customers # return tallies as a tuple return city, n_trips, n_subscribers, n_customers # In[31]: ## Modify this and the previous cell to answer Question 4a. Remember to run ## ## the function on the cleaned data files you created from Question 3. ## data_file = ['./data/NYC-CitiBike-2016.csv', './data/Chicago-Divvy-2016.csv', './data/Washington-CapitalBikeshare-2016.csv'] output =[] for file in data_file: data = number_of_trips(file) output.append(data) for item in output: print (item[0],":",item[1],"=>'TotalTrips' ",item[2],"=>'TotalSubscriber' ",item[3],"=>'TotalCustomer'") # > **Tip**: In order to add additional cells to a notebook, you can use the "Insert Cell Above" and "Insert Cell Below" options from the menu bar above. There is also an icon in the toolbar for adding new cells, with additional icons for moving the cells up and down the document. By default, new cells are of the code type; you can also specify the cell type (e.g. Code or Markdown) of selected cells from the Cell menu or the dropdown in the toolbar. # # Now, you will write your own code to continue investigating properties of the data. # # **Question 4b**: Bike-share systems are designed for riders to take short trips. Most of the time, users are allowed to take trips of 30 minutes or less with no additional charges, with overage charges made for trips of longer than that duration. What is the average trip length for each city? What proportion of rides made in each city are longer than 30 minutes? # # **Answer**: # - **NYC** : The average Trip Length = 15.81, Propotion of Trips Longer than 30 Minutes = 7.30% # - **Chicago**: The average Trip Length = 16.56, Propotion of Trips Longer than 30 Minutes = 8.33% # - **Washington**: The average Trip Length = 18.93, Propotion of Trips Longer than 30 Minutes = 10.83% # In[32]: ## Use this and additional cells to answer Question 4b. ## ## ## ## HINT: The csv module reads in all of the data as strings, including ## ## numeric values. You will need a function to convert the strings ## ## into an appropriate numeric type before you aggregate data. ## ## TIP: For the Bay Area example, the average trip length is 14 minutes ## ## and 3.5% of trips are longer than 30 minutes. ## def trip_avg(filename): city = filename.split('-')[0].split('/')[-1] with open(filename,'r') as f_in: trip = csv.DictReader(f_in) trips = 0 trip_time = 0 trip_exceed = 0 trip_duration = 0 for row in trip: trips += 1 trip_time = duration_in_mins(row, city) if trip_time > 30: trip_exceed += 1 else: None trip_duration += trip_time trip_exceed_percent = (float(trip_exceed/trips))*100 trip_avg = float(trip_duration/trips) return (city,trip_avg,trip_exceed_percent) # In[34]: data_file = [ './data/NYC-CitiBike-2016.csv', './data/Chicago-Divvy-2016.csv', './data/Washington-CapitalBikeshare-2016.csv'] for file in data_file: print (trip_avg(file)) # **Question 4c**: Dig deeper into the question of trip duration based on ridership. Choose one city. Within that city, which type of user takes longer rides on average: Subscribers or Customers? # # **Answer**: Choosing **NYC** as the city, from that the Subscriber is taking longer ride on average when compared to the customer average duration. # In[35]: ## Use this and additional cells to answer Question 4c. If you have ## ## not done so yet, consider revising some of your previous code to ## ## make use of functions for reusability. ## ## ## ## TIP: For the Bay Area example data, you should find the average ## ## Subscriber trip duration to be 9.5 minutes and the average Customer ## ## trip duration to be 54.6 minutes. Do the other cities have this ## ## level of difference? ## def avg_user_type(filename): city = filename.split('-')[0].split('/')[-1] with open(filename,'r') as f_in: data = csv.DictReader(f_in) trips = 0 subscriber = 0 customer = 0 for row in data: trips += 1 if type_of_user(row,city) == 'Subscriber': subscriber += duration_in_mins(row, city) else: customer += duration_in_mins(row, city) subscriber_avg = float(subscriber/trips) customer_avg = float(customer/trips) return (city,subscriber_avg,customer_avg) # In[36]: data_file = ['./data/NYC-CitiBike-2016.csv', './data/Chicago-Divvy-2016.csv', './data/Washington-CapitalBikeshare-2016.csv'] for file in data_file: print(avg_user_type(file)) # <a id='visualizations'></a> # ### Visualizations # # The last set of values that you computed should have pulled up an interesting result. While the mean trip time for Subscribers is well under 30 minutes, the mean trip time for Customers is actually _above_ 30 minutes! It will be interesting for us to look at how the trip times are distributed. In order to do this, a new library will be introduced here, `matplotlib`. Run the cell below to load the library and to generate an example plot. # In[37]: # load library import matplotlib.pyplot as plt # this is a 'magic word' that allows for plots to be displayed # inline with the notebook. If you want to know more, see: # http://ipython.readthedocs.io/en/stable/interactive/magics.html get_ipython().magic('matplotlib inline') # example histogram, data taken from bay area sample data = [ 7.65, 8.92, 7.42, 5.50, 16.17, 4.20, 8.98, 9.62, 11.48, 14.33, 19.02, 21.53, 3.90, 7.97, 2.62, 2.67, 3.08, 14.40, 12.90, 7.83, 25.12, 8.30, 4.93, 12.43, 10.60, 6.17, 10.88, 4.78, 15.15, 3.53, 9.43, 13.32, 11.72, 9.85, 5.22, 15.10, 3.95, 3.17, 8.78, 1.88, 4.55, 12.68, 12.38, 9.78, 7.63, 6.45, 17.38, 11.90, 11.52, 8.63,] plt.hist(data) plt.title('Distribution of Trip Durations') plt.xlabel('Duration (m)') plt.show() # In the above cell, we collected fifty trip times in a list, and passed this list as the first argument to the `.hist()` function. This function performs the computations and creates plotting objects for generating a histogram, but the plot is actually not rendered until the `.show()` function is executed. The `.title()` and `.xlabel()` functions provide some labeling for plot context. # # You will now use these functions to create a histogram of the trip times for the city you selected in question 4c. Don't separate the Subscribers and Customers for now: just collect all of the trip times and plot them. # In[38]: ## Use this and additional cells to collect all of the trip times as a list ## ## and then use pyplot functions to generate a histogram of trip times. ## def trip_time(filename): city = filename.split('-')[0].split('/')[-1] with open(filename,'r') as f_in: reader = csv.DictReader(f_in) data = [] for row in reader: duration_data = duration_in_mins(row,city) data.append(duration_data) return data file = './data/NYC-CitiBike-2016.csv' duration_plot = trip_time(file) plt.hist(duration_plot,bins=30) plt.xlim(0,3000) plt.title('Trip Duration Of NYC') plt.xlabel('Duration (m)') plt.show() # If you followed the use of the `.hist()` and `.show()` functions exactly like in the example, you're probably looking at a plot that's completely unexpected. The plot consists of one extremely tall bar on the left, maybe a very short second bar, and a whole lot of empty space in the center and right. Take a look at the duration values on the x-axis. This suggests that there are some highly infrequent outliers in the data. Instead of reprocessing the data, you will use additional parameters with the `.hist()` function to limit the range of data that is plotted. Documentation for the function can be found [[here]](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.hist.html#matplotlib.pyplot.hist). # # **Question 5**: Use the parameters of the `.hist()` function to plot the distribution of trip times for the Subscribers in your selected city. Do the same thing for only the Customers. Add limits to the plots so that only trips of duration less than 75 minutes are plotted. As a bonus, set the plots up so that bars are in five-minute wide intervals. For each group, where is the peak of each distribution? How would you describe the shape of each distribution? # # **Answer**: # # **SUBSCRIBER:** The Distribution has a peak from 1 to 10 minutes of duration for nearly 10000 data's and to describe the Shape of Distribution, From the peak on the left side of graph gradually getting decreased to the right side of the Graph. # # **CUSTOMER:** The Distrbution has a peak some where from 15 to 24 minuntes of duration for nearly 8000 data's # and the Shape of Distribution, it has a moderate increase from the lift side of the graph then to the peak and gradually it gets decreased to the right side of the graph. # In[39]: ## Use this and additional cells to answer Question 5. ## def trip_time(filename): city = filename.split('-')[0].split('/')[-1] with open(filename,'r') as f_in: reader = csv.DictReader(f_in) subscriber = [] customer = [] for row in reader: if type_of_user(row, city) == 'Subscriber': duration_data = duration_in_mins(row,city) if duration_data < 75: subscriber.append(duration_data) else: None else: duration_data = duration_in_mins(row,city) if duration_data < 75: customer.append(duration_data) else: None return (subscriber,customer) # In[40]: file = './data/NYC-CitiBike-2016.csv' subscriber,customer = trip_time(file) plt.hist(subscriber,bins=10) plt.title('Trip Duration Of NYC Subscriber') plt.xlabel('Duration (m)') plt.show() plt.hist(customer,bins=10) plt.title('Trip Duration Of NYC Customer') plt.xlabel('Duration (m)') plt.show() # <a id='eda_continued'></a> # ## Performing Your Own Analysis # # So far, you've performed an initial exploration into the data available. You have compared the relative volume of trips made between three U.S. cities and the ratio of trips made by Subscribers and Customers. For one of these cities, you have investigated differences between Subscribers and Customers in terms of how long a typical trip lasts. Now it is your turn to continue the exploration in a direction that you choose. Here are a few suggestions for questions to explore: # # - How does ridership differ by month or season? Which month / season has the highest ridership? Does the ratio of Subscriber trips to Customer trips change depending on the month or season? # - Is the pattern of ridership different on the weekends versus weekdays? On what days are Subscribers most likely to use the system? What about Customers? Does the average duration of rides change depending on the day of the week? # - During what time of day is the system used the most? Is there a difference in usage patterns for Subscribers and Customers? # # If any of the questions you posed in your answer to question 1 align with the bullet points above, this is a good opportunity to investigate one of them. As part of your investigation, you will need to create a visualization. If you want to create something other than a histogram, then you might want to consult the [Pyplot documentation](https://matplotlib.org/devdocs/api/pyplot_summary.html). In particular, if you are plotting values across a categorical variable (e.g. city, user type), a bar chart will be useful. The [documentation page for `.bar()`](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.bar.html#matplotlib.pyplot.bar) includes links at the bottom of the page with examples for you to build off of for your own use. # # **Question 6**: Continue the investigation by exploring another question that could be answered by the data available. Document the question you want to explore below. Your investigation should involve at least two variables and should compare at least two groups. You should also use at least one visualization as part of your explorations. # # **Answer**: # ***QUESTION:*** Is the pattern of ridership different on the weekends versus weekdays? On what days are Subscribers most likely to use the system? What about Customers? Does the average duration of rides change depending on the day of the week? # # ***ANSWER ANALYSIS*** I have a made a analysis for the weekdays and weekends of each type of user says Subscirbers and the Customers, Answering to the **1st** part of the question, the ridership pattern differs based on the weekdays and weekends **2nd** part, Subscriber type user are more on weekdays than on weekends and in Customer type user also Weekday riders are more in count comapred to the weekend riders and the **3rd** part of the question, yes do average duration of rides changes depending on the day of the week. # ***On Conclusion*** anaysis of this visual anaysis the weekday riders are more in number, when compared to the weekend users, based on this more bikes and maintainence has to be made on the weekdays to give a better servies and improve the Business on a effective way. # In[41]: ## Use this and additional cells to continue to explore the dataset. ## ## Once you have performed your exploration, document your findings ## import numpy as np ## in the Markdown cell above.## def type_user_analysis(filename): city = filename.split('-')[0].split('/')[-1] with open(filename,'r') as f_in: reader = csv.DictReader(f_in) sub_week_day = [] sub_weekend_days = [] cust_week_day = [] cust_weekend_days = [] trips = 0 for rows in reader: trips += 1 if type_of_user(rows, city) == 'Subscriber': week_days = time_of_trip(rows, city)[2] if week_days =='Saturday' or week_days == 'Sunday': sub_weekend_days.append(week_days) else: sub_week_day.append(week_days) else: week_days = time_of_trip(rows, city)[2] if week_days == 'Saturday' or week_days == 'Sunday': cust_weekend_days.append(week_days) else: cust_week_day.append(week_days) return (sub_week_day,sub_weekend_days,cust_week_day,cust_weekend_days) # In[42]: def dayofweek_avg(filename): city = filename.split('-')[0].split('/')[-1] with open(filename,'r') as f_in: trip = csv.DictReader(f_in) trips = 0 trip_weekday= 0 trip_weekend = 0 for row in trip: trips += 1 if time_of_trip(row, city)[2] == 'Saturday' or time_of_trip(row, city)[2] == 'Sunday': trip_weekend += duration_in_mins(row, city) else: trip_weekday += duration_in_mins(row, city) weekday_avg = float(trip_weekday/trips) weekend_avg = float(trip_weekend/trips) return (weekday_avg,weekend_avg) # In[43]: file = './data/NYC-CitiBike-2016.csv' sub_weekdays , sub_weekends, cust_weekdays, cust_weekends = type_user_analysis(file) weekday_avg , weekend_avg = dayofweek_avg(file) subscriber_weekdays = len(sub_weekdays) subscriber_weekends = len(sub_weekends) customer_weekdays = len(cust_weekdays) customer_weekends = len(cust_weekends) sub_object = ('weekdays','weekends') subscriber = [subscriber_weekdays,subscriber_weekends] y_pos1 = np.arange(len(sub_object)) plt.bar(y_pos1,subscriber) plt.title("Subscriber Usage") plt.xticks(y_pos1,sub_object) plt.ylabel('Number of Trips') plt.show() cust_object = ('weekdays','weekends') customer = [customer_weekdays,customer_weekends] y_pos2 = np.arange(len(cust_object)) plt.bar(y_pos2,customer) plt.title("Customer Usage") plt.xticks(y_pos2,cust_object) plt.ylabel('Number of Trips') plt.show() trip_avg = ('weekdays','weekend') avg = [weekday_avg, weekend_avg] y_pos3= np.arange(len(trip_avg)) plt.bar(y_pos3,avg) plt.title("Average Usage") plt.xticks(y_pos3,trip_avg) plt.ylabel('Number of Trips') plt.show() # <a id='conclusions'></a> # ## Conclusions # # Congratulations on completing the project! This is only a sampling of the data analysis process: from generating questions, wrangling the data, and to exploring the data. Normally, at this point in the data analysis process, you might want to draw conclusions about the data by performing a statistical test or fitting the data to a model for making predictions. There are also a lot of potential analyses that could be performed on the data which are not possible with only the data provided. For example, detailed location data has not been investigated. Where are the most commonly used docks? What are the most common routes? As another example, weather has potential to have a large impact on daily ridership. How much is ridership impacted when there is rain or snow? Are subscribers or customers affected more by changes in weather? # # **Question 7**: Putting the bike share data aside, think of a topic or field of interest where you would like to be able to apply the techniques of data science. What would you like to be able to learn from your chosen subject? # # **Answer**: I would like to use this techniques for product based companies and come out with the analysis of products which are most being attracted to the customers and the marketing statergies analysis to those product which would help in using those statergies to make other product reach out to the customers in a similar way like a recommender system to make a much Effective Business Development. # # > **Tip**: If we want to share the results of our analysis with others, we aren't limited to giving them a copy of the jupyter Notebook (.ipynb) file. We can also export the Notebook output in a form that can be opened even for those without Python installed. From the **File** menu in the upper left, go to the **Download as** submenu. You can then choose a different format that can be viewed more generally, such as HTML (.html) or # PDF (.pdf). You may need additional packages or software to perform these exports. # Documents & Websites Used: # - [Python functions](https://docs.python.org/3/library/functions.html) # - [Bulid-in Constands](https://docs.python.org/3/library/constants.html) # - [Stack OverFlow](https://stackoverflow.com/) # - [Tutorial Point](https://www.tutorialspoint.com) # - [Matplotlib Bar chart](https://plot.ly/matplotlib/bar-charts/) # - [Python Standard Library](https://docs.python.org/2/library/index.html) # # In[ ]:
Hiteshsaai/BikeShare_DataAnalysis
Bike_Share_Analysis.py
Python
mit
39,685
[ "VisIt" ]
585e653232a4b871d1cc3c548aee05c0e9d28be7284a9b029c9b86347496924f
import numpy as np import numpy.linalg as la from pysal.spreg.utils import RegressionPropsY, spdot import pysal.spreg.user_output as USER from utils import cache_readonly from base import LikelihoodModelResults import family from iwls import iwls __all__ = ['GLM'] class GLM(RegressionPropsY): """ Generalised linear models. Can currently estimate Guassian, Poisson and Logisitc regression coefficients. GLM object prepares model input and fit method performs estimation which then returns a GLMResults object. Parameters ---------- y : array n*1, dependent variable. X : array n*k, independent variable, exlcuding the constant. family : string Model type: 'Gaussian', 'Poisson', 'Binomial' Attributes ---------- y : array n*1, dependent variable. X : array n*k, independent variable, including constant. family : string Model type: 'Gaussian', 'Poisson', 'logistic' n : integer Number of observations k : integer Number of independent variables df_model : float k-1, where k is the number of variables (including intercept) df_residual : float observations minus variables (n-k) mean_y : float Mean of y std_y : float Standard deviation of y fit_params : dict Parameters passed into fit method to define estimation routine. normalized_cov_params : array k*k, approximates [X.T*X]-1 """ def __init__(self, y, X, family=family.Gaussian(), constant=True): """ Initialize class """ self.n = USER.check_arrays(y, X) USER.check_y(y, self.n) self.y = y if constant: self.X = USER.check_constant(X) else: self.X = X self.family = family self.k = self.X.shape[1] self.fit_params = {} def fit(self, ini_betas=None, tol=1.0e-6, max_iter=200, solve='iwls'): """ Method that fits a model with a particular estimation routine. Parameters ---------- ini_betas : array k*1, initial coefficient values, including constant. Default is None, which calculates initial values during estimation. tol: float Tolerence for estimation convergence. max_iter : integer Maximum number of iterations if convergence not achieved. solve :string Technique to solve MLE equations. 'iwls' = iteratively (re)weighted least squares (default) """ self.fit_params['ini_betas'] = ini_betas self.fit_params['tol'] = tol self.fit_params['max_iter'] = max_iter self.fit_params['solve']=solve if solve.lower() == 'iwls': params, predy, w, n_iter = iwls(self.y, self.X, self.family, ini_betas=ini_betas, tol=tol, max_iter=max_iter) self.fit_params['n_iter'] = n_iter return GLMResults(self, params.flatten(), predy, w) @cache_readonly def df_model(self): return self.X.shape[1] - 1 @cache_readonly def df_resid(self): return self.n - self.df_model - 1 class GLMResults(LikelihoodModelResults): """ Results of estimated GLM and diagnostics. Parameters ---------- model : GLM object Pointer to GLM object with estimation parameters. params : array k*1, estimared coefficients mu : array n*1, predicted y values. w : array n*1, final weight used for iwls Attributes ---------- model : GLM Object Points to GLM object for which parameters have been estimated. y : array n*1, dependent variable. x : array n*k, independent variable, including constant. family : string Model type: 'Gaussian', 'Poisson', 'Logistic' n : integer Number of observations k : integer Number of independent variables df_model : float k-1, where k is the number of variables (including intercept) df_residual : float observations minus variables (n-k) fit_params : dict parameters passed into fit method to define estimation routine. scale : float sigma squared used for subsequent computations. params : array n*k, estimared beta coefficients w : array n*1, final weight values of x mu : array n*1, predicted value of y (i.e., fittedvalues) cov_params : array Variance covariance matrix (kxk) of betas which has been appropriately scaled by sigma-squared bse : array k*1, standard errors of betas pvalues : array k*1, two-tailed pvalues of parameters tvalues : array k*1, the tvalues of the standard errors null : array n*1, predicted values of y for null model deviance : float value of the deviance function evalued at params; see family.py for distribution-specific deviance null_deviance : float value of the deviance function for the model fit with a constant as the only regressor llf : float value of the loglikelihood function evalued at params; see family.py for distribution-specific loglikelihoods llnull : float value of log-likelihood function evaluated at null aic : float AIC bic : float BIC D2 : float percent deviance explained adj_D2 : float adjusted percent deviance explained pseudo_R2 : float McFadden's pseudo R2 (coefficient of determination) adj_pseudoR2 : float adjusted McFadden's pseudo R2 resid_response : array response residuals; defined as y-mu resid_pearson : array Pearson residuals; defined as (y-mu)/sqrt(VAR(mu)) where VAR is the distribution specific variance function; see family.py and varfuncs.py for more information. resid_working : array Working residuals; the working residuals are defined as resid_response/link'(mu); see links.py for the derivatives of the link functions. resid_anscombe : array Anscombe residuals; see family.py for distribution-specific Anscombe residuals. resid_deviance : array deviance residuals; see family.py for distribution-specific deviance residuals. pearson_chi2 : float chi-Squared statistic is defined as the sum of the squares of the Pearson residuals normalized_cov_params : array k*k, approximates [X.T*X]-1 """ def __init__(self, model, params, mu, w): self.model = model self.n = model.n self.y = model.y.T.flatten() self.X = model.X self.k = model.k self.family = model.family self.fit_params = model.fit_params self.params = params self.w = w self.mu = mu.flatten() self._cache = {} @cache_readonly def df_model(self): return self.model.df_model @cache_readonly def df_resid(self): return self.model.df_resid @cache_readonly def normalized_cov_params(self): return la.inv(spdot(self.w.T, self.w)) @cache_readonly def resid_response(self): return (self.y-self.mu) @cache_readonly def resid_pearson(self): return ((self.y-self.mu) / np.sqrt(self.family.variance(self.mu))) @cache_readonly def resid_working(self): return (self.resid_response / self.family.link.deriv(self.mu)) @cache_readonly def resid_anscombe(self): return (self.family.resid_anscombe(self.y, self.mu)) @cache_readonly def resid_deviance(self): return (self.family.resid_dev(self.y, self.mu)) @cache_readonly def pearson_chi2(self): chisq = (self.y - self.mu)**2 / self.family.variance(self.mu) chisqsum = np.sum(chisq) return chisqsum @cache_readonly def null(self): y = np.reshape(self.y, (-1,1)) model = self.model X = np.ones((len(y), 1)) null_mod = GLM(y, X, family=self.family, constant=False) return null_mod.fit().mu @cache_readonly def scale(self): if isinstance(self.family, (family.Binomial, family.Poisson)): return 1. else: return (((np.power(self.resid_response, 2) / self.family.variance(self.mu))).sum() / (self.df_resid)) @cache_readonly def deviance(self): return self.family.deviance(self.y, self.mu) @cache_readonly def null_deviance(self): return self.family.deviance(self.y, self.null) @cache_readonly def llnull(self): return self.family.loglike(self.y, self.null, scale=self.scale) @cache_readonly def llf(self): return self.family.loglike(self.y, self.mu, scale=self.scale) @cache_readonly def aic(self): if isinstance(self.family, family.QuasiPoisson): return np.nan else: return -2 * self.llf + 2*(self.df_model+1) @cache_readonly def bic(self): return (self.deviance - (self.model.n - self.df_model - 1) * np.log(self.model.n)) @cache_readonly def D2(self): return 1 - (self.deviance / self.null_deviance) @cache_readonly def adj_D2(self): return 1.0 - (float(self.n) - 1.0)/(float(self.n) - float(self.k)) * (1.0-self.D2) @cache_readonly def pseudoR2(self): return 1 - (self.llf/self.llnull) @cache_readonly def adj_pseudoR2(self): return 1 - ((self.llf-self.k)/self.llnull)
CartoDB/crankshaft
release/python/0.8.2/crankshaft/crankshaft/regression/glm/glm.py
Python
bsd-3-clause
11,848
[ "Gaussian" ]
3d6f294a3da69a5ca48b623a1f27bb57827a01852bbc50258078237eb2e5f2d6
'''Tools for working with files in the samtools pileup -c format.''' try: from collections import namedtuple except: from namedtuple import namedtuple import pysam PileupSubstitution = namedtuple( "PileupSubstitution", " ".join( (\ "chromosome", "pos", "reference_base", "genotype", "consensus_quality", "snp_quality", "mapping_quality", "coverage", "read_bases", "base_qualities" ) ) ) PileupIndel = namedtuple( "PileupIndel", " ".join( (\ "chromosome", "pos", "reference_base", "genotype", "consensus_quality", "snp_quality", "mapping_quality", "coverage", "first_allele", "second_allele", "reads_first", "reads_second", "reads_diff" ) ) ) def iterate( infile ): '''iterate over ``samtools pileup -c`` formatted file. *infile* can be any iterator over a lines. The function yields named tuples of the type :class:`pysam.Pileup.PileupSubstitution` or :class:`pysam.Pileup.PileupIndel`. .. note:: The parser converts to 0-based coordinates ''' conv_subst = (str,lambda x: int(x)-1,str,str,int,int,int,int,str,str) conv_indel = (str,lambda x: int(x)-1,str,str,int,int,int,int,str,str,int,int,int) for line in infile: d = line[:-1].split() if d[2] == "*": try: yield PileupIndel( *[x(y) for x,y in zip(conv_indel,d) ] ) except TypeError: raise pysam.SamtoolsError( "parsing error in line: `%s`" % line) else: try: yield PileupSubstitution( *[x(y) for x,y in zip(conv_subst,d) ] ) except TypeError: raise pysam.SamtoolsError( "parsing error in line: `%s`" % line)
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/eggs/pysam-0.4.2_kanwei_b10f6e722e9a-py2.7-linux-x86_64-ucs4.egg/pysam/Pileup.py
Python
gpl-3.0
2,017
[ "pysam" ]
06093ddfb38629a1883cb6cf27e4b4278d6d178c2cea29a38e25930c77e2239a
import numpy as np import scipy as sp from peri import runner, util from peri.viz.plots import generative_model import pickle import matplotlib.pyplot as pl import matplotlib.gridspec as gridspec from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes from mpl_toolkits.axes_grid1.inset_locator import mark_inset from mpl_toolkits.axes_grid1 import ImageGrid from matplotlib.patches import Circle, Rectangle def sample_center_particle(state): cind = state.closet_particle(np.array(state.image.shape)/2) blocks = state.blocks_particle(cind) hxy = runner.sample_state(state, blocks[1:3], N=5000, doprint=True) hr = runner.sample_state(state, [blocks[-1]], N=5000, doprint=True) z = state.state[blocks[0]] y,x = hh.get_histogram().T return x,y,z,r def load(): s,h,l = pickle.load(open('/media/scratch/bamf/crystal-fcc/crystal_fcc.tif_t001.tif-fit-gaussian-4d.pkl')) x,y,z,r = np.load('/media/scratch/bamf/crystal-fcc/crystal_fcc.tif_t001.tif-fit-gaussian-4d-sample-xyzr.npy').T x -= s.pad y -= s.pad return s,x,y,z,r def dorun(): generative_model(*load())
peri-source/peri
scripts/figures/generative-model.py
Python
mit
1,123
[ "CRYSTAL", "Gaussian" ]
3f4cd3e325a9c32ed6eedaae4e9c02fbc9b72ada7cbaa1754c377a2fba429911
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module provides classes to interface with the Crystallography Open Database. If you use data from the COD, please cite the following works (as stipulated by the COD developers):: Merkys, A., Vaitkus, A., Butkus, J., Okulič-Kazarinas, M., Kairys, V. & Gražulis, S. (2016) "COD::CIF::Parser: an error-correcting CIF parser for the Perl language". Journal of Applied Crystallography 49. Gražulis, S., Merkys, A., Vaitkus, A. & Okulič-Kazarinas, M. (2015) "Computing stoichiometric molecular composition from crystal structures". Journal of Applied Crystallography 48, 85-91. Gražulis, S., Daškevič, A., Merkys, A., Chateigner, D., Lutterotti, L., Quirós, M., Serebryanaya, N. R., Moeck, P., Downs, R. T. & LeBail, A. (2012) "Crystallography Open Database (COD): an open-access collection of crystal structures and platform for world-wide collaboration". Nucleic Acids Research 40, D420-D427. Grazulis, S., Chateigner, D., Downs, R. T., Yokochi, A. T., Quiros, M., Lutterotti, L., Manakova, E., Butkus, J., Moeck, P. & Le Bail, A. (2009) "Crystallography Open Database – an open-access collection of crystal structures". J. Appl. Cryst. 42, 726-729. Downs, R. T. & Hall-Wallace, M. (2003) "The American Mineralogist Crystal Structure Database". American Mineralogist 88, 247-250. """ import requests import subprocess from monty.dev import requires from monty.os.path import which import re from pymatgen.core.composition import Composition from pymatgen.core.structure import Structure __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "1.0" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" class COD: """ An interface to the Crystallography Open Database. """ def __init__(self): """ Blank __init__. No args required. """ pass def query(self, sql: str) -> str: """ Perform a query. :param sql: SQL string :return: Response from SQL query. """ r = subprocess.check_output(["mysql", "-u", "cod_reader", "-h", "www.crystallography.net", "-e", sql, "cod"]) return r.decode("utf-8") @requires(which("mysql"), "mysql must be installed to use this query.") def get_cod_ids(self, formula): """ Queries the COD for all cod ids associated with a formula. Requires mysql executable to be in the path. Args: formula (str): Formula. Returns: List of cod ids. """ # TODO: Remove dependency on external mysql call. MySQL-python package does not support Py3! # Standardize formula to the version used by COD. sql = 'select file from data where formula="- %s -"' % \ Composition(formula).hill_formula text = self.query(sql).split("\n") cod_ids = [] for l in text: m = re.search(r"(\d+)", l) if m: cod_ids.append(int(m.group(1))) return cod_ids def get_structure_by_id(self, cod_id, **kwargs): """ Queries the COD for a structure by id. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A Structure. """ r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id) return Structure.from_str(r.text, fmt="cif", **kwargs) @requires(which("mysql"), "mysql must be installed to use this query.") def get_structure_by_formula(self, formula, **kwargs): """ Queries the COD for structures by formula. Requires mysql executable to be in the path. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A list of dict of the format [{"structure": Structure, "cod_id": cod_id, "sg": "P n m a"}] """ structures = [] sql = 'select file, sg from data where formula="- %s -"' % \ Composition(formula).hill_formula text = self.query(sql).split("\n") text.pop(0) for l in text: if l.strip(): cod_id, sg = l.split("\t") r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id.strip()) try: s = Structure.from_str(r.text, fmt="cif", **kwargs) structures.append({"structure": s, "cod_id": int(cod_id), "sg": sg}) except Exception: import warnings warnings.warn("\nStructure.from_str failed while parsing CIF file:\n%s" % r.text) raise return structures
mbkumar/pymatgen
pymatgen/ext/cod.py
Python
mit
5,175
[ "CRYSTAL", "pymatgen" ]
deeb00b0bb59c855c23a766835e103d14d1133ff1a7a39c21e2f021a711220fd
"""Tests for indenter.py.""" # Author: Prabhu Ramachandran # License: BSD style # Copyright (c) 2004, Enthought, Inc. import unittest import cStringIO from tvtk import indenter class TestIndent(unittest.TestCase): def test_basic(self): """Simple tests for indenter.""" id = indenter.Indent() self.assertEqual(str(id), '') id.incr() self.assertEqual(str(id), ' ') id.incr() self.assertEqual(str(id), ' ') id.decr() self.assertEqual(str(id), ' ') id.decr() self.assertEqual(str(id), '') id.incr(); id.incr() id.reset() self.assertEqual(str(id), '') def test_format(self): """Tests if formatting works ok.""" id = indenter.Indent() id.incr() # test one liner with trailing newlines txt = """class foo:\n\n \n \n""" t1 = id.format(txt) self.assertEqual(t1, ' class foo:\n') # test one liner with no trailing newline. txt = """class foo:""" t1 = id.format(txt) self.assertEqual(t1, ' class foo:\n') # test multi-line text. txt = """print "hi!" if name == 'hi': print "hi, hi!" """ res = """ print "hi!"\n if name == 'hi':\n print "hi, hi!"\n""" self.assertEqual(id.format(txt), res) txt = """ class Foo: def __init__(self): pass def _get_a(self): return self._a""" res = """ class Foo: def __init__(self): pass def _get_a(self): return self._a""" + '\n' self.assertEqual(id.format(txt), res) class TestVTKDocMassager(unittest.TestCase): def test_doc_massage(self): """Test massage method.""" doc = "This is a test. All VTK classes and vtk classes\n"\ "are named like this: vtkActor, vtkLODProperty,\n"\ "vtkXMLDataReader, vtk3DSImporter etc. The methods \n"\ "of a VTK object are like GetData, GetOutput, \n"\ "SetRepresentationToWireframe. Ivars are named like\n"\ "SpecularColor, Write3DPropsAsRasterImage etc." ret = "This is a test. All VTK classes and vtk classes\n"\ "are named like this: Actor, LODProperty,\n"\ "XMLDataReader, ThreeDSImporter etc. The methods \n"\ "of a VTK object are like get_data, get_output, \n"\ "set_representation_to_wireframe. Ivars are named like\n"\ "specular_color, write3d_props_as_raster_image etc." dm = indenter.VTKDocMassager() self.assertEqual(dm.massage(doc), ret) def test_rename_class(self): """Test if VTK classes are renamed correctly.""" dm = indenter.VTKDocMassager() t = 'vtkFooBar vtkXMLDataReader vtk3DSReader vtk2000Bug' r = dm._rename_class(t) correct = 'FooBar XMLDataReader ThreeDSReader Two000Bug' self.assertEqual(r, correct) def test_remove_sig(self): """Test if function signature is removed correctly.""" dm = indenter.VTKDocMassager() t = 'V.GetOutput(int) -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput (int idx);\n'\ 'V.GetOutput() -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput ();\n\n'\ ' Set/Get the output of this reader.\n' r = dm._remove_sig(t) correct = ' Set/Get the output of this reader.\n' self.assertEqual(r, correct) t = 'V.GetOutput(int) -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput (int idx);\n'\ 'V.GetOutput() -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput ();\n\n' r = dm._remove_sig(t) correct = '' self.assertEqual(r, correct) def test_class_doc(self): """Test if class docs are generated correctly.""" dm = indenter.VTKDocMassager() indent = indenter.Indent() out = cStringIO.StringIO() doc = "vtkLODProperty, vtkXMLDataReader, vtk3DSImporter\n"\ "SetRepresentationToWireframe, Write3DPropsAsRasterImage" dm.write_class_doc(doc, out, indent) out.seek(0) ret = out.read() correct = ''' """ LODProperty, XMLDataReader, ThreeDSImporter set_representation_to_wireframe, write3d_props_as_raster_image """\n''' #print ret #print correct self.assertEqual(ret, correct) # Test empty doc out = cStringIO.StringIO() doc = "" dm.write_class_doc(doc, out, indent) out.seek(0) ret = out.read() self.assertEqual(ret, ' """\n \n """\n') def test_trait_doc(self): """Test if trait docs are generated correctly.""" dm = indenter.VTKDocMassager() indent = indenter.Indent() out = cStringIO.StringIO() doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput (int idx);\n'\ 'V.GetOutput() -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput ();\n\n'\ 'vtkLODProperty, vtkXMLDataReader, vtk3DSImporter\n'\ 'SetRepresentationToWireframe, Write3DPropsAsRasterImage' dm.write_trait_doc(doc, out, indent) out.seek(0) ret = out.read() correct = ''' """ LODProperty, XMLDataReader, ThreeDSImporter set_representation_to_wireframe, write3d_props_as_raster_image """\n''' #print ret #print correct self.assertEqual(ret, correct) # Test empty doc. out = cStringIO.StringIO() doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput (int idx);\n'\ 'V.GetOutput() -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput ();\n\n' dm.write_trait_doc(doc, out, indent) out.seek(0) ret = out.read() self.assertEqual(ret, ' """\n \n """\n') def test_method_doc(self): """Test if method docs are generated correctly.""" dm = indenter.VTKDocMassager() indent = indenter.Indent() out = cStringIO.StringIO() doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput (int idx);\n'\ 'V.GetOutput() -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput ();\n\n'\ 'vtkLODProperty, vtkXMLDataReader, vtk3DSImporter\n'\ 'SetRepresentationToWireframe, Write3DPropsAsRasterImage' dm.write_method_doc(doc, out, indent) out.seek(0) ret = out.read() correct = ''' """ V.get_output(int) -> StructuredPoints V.get_output() -> StructuredPoints LODProperty, XMLDataReader, ThreeDSImporter set_representation_to_wireframe, write3d_props_as_raster_image """\n''' #print ret #print correct self.assertEqual(ret, correct) # Test empty doc. out = cStringIO.StringIO() doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput (int idx);\n'\ 'V.GetOutput() -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput ();\n\n' dm.write_method_doc(doc, out, indent) out.seek(0) ret = out.read() correct = ''' """ V.get_output(int) -> StructuredPoints V.get_output() -> StructuredPoints """\n''' #print ret #print correct self.assertEqual(ret, correct) def test_get_method_doc(self): """Test if get_method_doc works correctly.""" dm = indenter.VTKDocMassager() doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput (int idx);\n'\ 'V.GetOutput() -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput ();\n\n'\ 'vtkLODProperty, vtkXMLDataReader, vtk3DSImporter\n'\ 'SetRepresentationToWireframe, Write3DPropsAsRasterImage' ret = dm.get_method_doc(doc) correct = 'V.get_output(int) -> StructuredPoints\n'\ 'V.get_output() -> StructuredPoints\n\n'\ 'LODProperty, XMLDataReader, ThreeDSImporter\n'\ 'set_representation_to_wireframe, '\ 'write3d_props_as_raster_image' #print ret #print correct self.assertEqual(ret, correct) # Test empty doc (only signature exists). doc = 'V.GetOutput(int) -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput (int idx);\n'\ 'V.GetOutput() -> vtkStructuredPoints\n'\ 'C++: vtkStructuredPoints *GetOutput ();\n\n' ret = dm.get_method_doc(doc) correct = 'V.get_output(int) -> StructuredPoints\n'\ 'V.get_output() -> StructuredPoints\n' self.assertEqual(ret, correct) if __name__ == "__main__": unittest.main()
liulion/mayavi
tvtk/tests/test_indenter.py
Python
bsd-3-clause
9,233
[ "VTK" ]
9d423ce83e2e73079b5bf613629e73809c9355af1f1b1f7af0897dc3e4c6973c
""" Opendrift module .. currentmodule:: opendrift """ import logging; logger = logging.getLogger(__name__) import unittest import importlib import platform import numpy as np import time from datetime import timedelta from .version import __version__ # For automated access to available drift classes, e.g. for GUI # Hardcoded for now _available_models = \ ['leeway.Leeway', 'openoil.OpenOil', 'larvalfish.LarvalFish', 'plastdrift.PlastDrift', 'shipdrift.ShipDrift', 'openberg.OpenBerg'] def get_model_names(): return [m.split('.')[-1] for m in _available_models] def get_model(model_name): if model_name not in get_model_names(): raise ValueError('No drift model named %s' % model_name) else: for m in _available_models: if m.split('.')[-1] == model_name: module = importlib.import_module( 'opendrift.models.' + m.split('.')[0]) model = getattr(module, model_name) return model def open(filename, times=None, elements=None, load_history=True): '''Import netCDF output file as OpenDrift object of correct class''' import os import pydoc from netCDF4 import Dataset if not os.path.exists(filename): logger.info('File does not exist, trying to retrieve from URL') import urllib try: urllib.urlretrieve(filename, 'opendrift_tmp.nc') filename = 'opendrift_tmp.nc' except: raise ValueError('%s does not exist' % filename) n = Dataset(filename) try: module_name = n.opendrift_module class_name = n.opendrift_class except: raise ValueError(filename + ' does not contain ' 'necessary global attributes ' 'opendrift_module and opendrift_class') n.close() if class_name == 'OpenOil3D': class_name = 'OpenOil' module_name = 'opendrift.models.openoil' if class_name == 'OceanDrift3D': class_name = 'OceanDrift' module_name = 'opendrift.models.oceandrift' cls = pydoc.locate(module_name + '.' + class_name) if cls is None: from opendrift.models import oceandrift cls = oceandrift.OceanDrift o = cls() o.io_import_file(filename, times=times, elements=elements, load_history=load_history) logger.info('Returning ' + str(type(o)) + ' object') return o def open_xarray(filename, chunks={'trajectory': 50000, 'time': 1000}): '''Import netCDF output file as OpenDrift object of correct class''' import os import pydoc import xarray as xr if not os.path.exists(filename): logger.info('File does not exist, trying to retrieve from URL') import urllib try: urllib.urlretrieve(filename, 'opendrift_tmp.nc') filename = 'opendrift_tmp.nc' except: raise ValueError('%s does not exist' % filename) n = xr.open_dataset(filename) try: module_name = n.opendrift_module class_name = n.opendrift_class except: raise ValueError(filename + ' does not contain ' 'necessary global attributes ' 'opendrift_module and opendrift_class') n.close() if class_name == 'OpenOil3D': class_name = 'OpenOil' module_name = 'opendrift.models.openoil' if class_name == 'OceanDrift3D': class_name = 'OceanDrift' module_name = 'opendrift.models.oceandrift' cls = pydoc.locate(module_name + '.' + class_name) if cls is None: from opendrift.models import oceandrift cls = oceandrift.OceanDrift o = cls() o.io_import_file_xarray(filename, chunks=chunks) logger.info('Returning ' + str(type(o)) + ' object') return o def versions(): import multiprocessing import platform import scipy import matplotlib import netCDF4 import xarray import sys s = '\n------------------------------------------------------\n' s += 'Software and hardware:\n' s += ' OpenDrift version %s\n' % __version__ s += ' Platform: %s, %s\n' % (platform.system(), platform.release()) try: from psutil import virtual_memory ram = virtual_memory().total/(1024**3) except: ram = 'unknown' s += ' %s GB memory\n' % ram s += ' %s processors (%s)\n' % (multiprocessing.cpu_count(), platform.processor()) s += ' NumPy version %s\n' % np.__version__ s += ' SciPy version %s\n' % scipy.__version__ s += ' Matplotlib version %s\n' % matplotlib.__version__ s += ' NetCDF4 version %s\n' % netCDF4.__version__ s += ' Xarray version %s\n' % xarray.__version__ s += ' Python version %s\n' % sys.version.replace('\n', '') s += '------------------------------------------------------\n' return s def import_from_ladim(ladimfile, romsfile): """Import Ladim output file as OpenDrift simulation obejct""" from models.oceandrift import OceanDrift o = OceanDrift() from netCDF4 import Dataset, date2num, num2date if isinstance(romsfile, str): from opendrift.readers import reader_ROMS_native romsfile = reader_ROMS_native.Reader(romsfile) l = Dataset(ladimfile, 'r') pid = l.variables['pid'][:] particle_count = l.variables['particle_count'][:] end_index = np.cumsum(particle_count) start_index = np.concatenate(([0], end_index[:-1])) x = l.variables['X'][:] y = l.variables['Y'][:] lon, lat = romsfile.xy2lonlat(x, y) time = num2date(l.variables['time'][:], l.variables['time'].units) history_dtype_fields = [ (name, o.ElementType.variables[name]['dtype']) for name in o.ElementType.variables] # Add environment variables o.history_metadata = o.ElementType.variables.copy() history_dtype = np.dtype(history_dtype_fields) num_timesteps = len(time) num_elements = len(l.dimensions['particle']) o.history = np.ma.array( np.zeros([num_elements, num_timesteps]), dtype=history_dtype, mask=[True]) for n in range(num_timesteps): start = start_index[n] active = pid[start:start+particle_count[n]] o.history['lon'][active, n] = \ lon[start:start+particle_count[n]] o.history['lat'][active, n] = \ lat[start:start+particle_count[n]] o.history['status'][active, n] = 0 o.status_categories = ['active', 'missing_data'] firstlast = np.ma.notmasked_edges(o.history['status'], axis=1) index_of_last = firstlast[1][1] o.history['status'][np.arange(len(index_of_last)), index_of_last] = 1 kwargs = {} for var in ['lon', 'lat', 'status']: kwargs[var] = o.history[var][ np.arange(len(index_of_last)), index_of_last] kwargs['ID'] = range(num_elements) o.elements = o.ElementType(**kwargs) o.elements_deactivated = o.ElementType() o.remove_deactivated_elements() # Import time steps from metadata o.time_step = time[1] - time[0] o.time_step_output = o.time_step o.start_time = time[0] o.time = time[-1] o.steps_output = num_timesteps return o
OpenDrift/opendrift
opendrift/__init__.py
Python
gpl-2.0
7,293
[ "NetCDF" ]
d890f52d3a99f104e587c48d0c884bf74a09e2b68553ce810de83a997c712925
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, division import os,unittest,numpy as np from pyscf.nao import nao class KnowValues(unittest.TestCase): def test_openmx(self): """ Computing of the atomic orbitals """ print("openmx not anymore supported, skip ...") #sv = nao(openmx='water', cd=os.path.dirname(os.path.abspath(__file__))) #self.assertEqual(sv.natoms, 3) #self.assertEqual(sv.norbs, 23) if __name__ == "__main__": unittest.main()
gkc1000/pyscf
pyscf/nao/test/test_0039_openmx_nao.py
Python
apache-2.0
1,079
[ "OpenMX", "PySCF" ]
d926fb3dd2fec969b1d5ff34a8fa90be4ea9a5deda9589bb7f2f7ea64436af0d
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause import numbers import warnings import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..utils import check_arrays from ..utils import atleast2d_or_csc from ..utils import array2d from ..utils import atleast2d_or_csr from ..utils import safe_asarray from ..utils import warn_if_not_float from ..utils.sparsefuncs import inplace_csr_row_normalize_l1 from ..utils.sparsefuncs import inplace_csr_row_normalize_l2 from ..utils.sparsefuncs import inplace_csr_column_scale from ..utils.sparsefuncs import mean_variance_axis0 from ..externals import six zip = six.moves.zip map = six.moves.map __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'Normalizer', 'OneHotEncoder', 'Scaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', ] def _mean_and_std(X, axis=0, with_mean=True, with_std=True): """Compute mean and std deviation for centering, scaling. Zero valued std components are reset to 1.0 to avoid NaNs when scaling. """ X = np.asarray(X) Xr = np.rollaxis(X, axis) if with_mean: mean_ = Xr.mean(axis=0) else: mean_ = None if with_std: std_ = Xr.std(axis=0) if isinstance(std_, np.ndarray): std_[std_ == 0.0] = 1.0 elif std_ == 0.: std_ = 1. else: std_ = None return mean_, std_ def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Parameters ---------- X : array-like or CSR matrix. The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- :class:`sklearn.preprocessing.StandardScaler` to perform centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) warn_if_not_float(X, estimator='The scale function') if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() _, var = mean_variance_axis0(X) var[var == 0.0] = 1.0 inplace_csr_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) warn_if_not_float(X, estimator='The scale function') mean_, std_ = _mean_and_std( X, axis, with_mean=with_mean, with_std=with_std) if copy: X = X.copy() # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ if with_std: Xr /= std_ return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Standardizes features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The standardization is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This standardization is often used as an alternative to zero mean, unit variance scaling. Parameters ---------- feature_range: tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default is True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- `min_` : ndarray, shape (n_features,) Per feature adjustment for minimum. `scale_` : ndarray, shape (n_features,) Per feature relative scaling of the data. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_arrays(X, sparse_format="dense", copy=self.copy)[0] warn_if_not_float(X, estimator=self) feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) data_min = np.min(X, axis=0) data_range = np.max(X, axis=0) - data_min # Do not scale constant features data_range[data_range == 0.0] = 1.0 self.scale_ = (feature_range[1] - feature_range[0]) / data_range self.min_ = feature_range[0] - data_min * self.scale_ self.data_range = data_range self.data_min = data_min return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_arrays(X, sparse_format="dense", copy=self.copy)[0] X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_arrays(X, sparse_format="dense", copy=self.copy)[0] X -= self.min_ X /= self.scale_ return X class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- `mean_` : array of floats with shape [n_features] The mean value for each feature in the training set. `std_` : array of floats with shape [n_features] The standard deviation for each feature in the training set. See also -------- :func:`sklearn.preprocessing.scale` to perform centering and scaling without using the ``Transformer`` object oriented API :class:`sklearn.decomposition.RandomizedPCA` with `whiten=True` to further remove the linear correlation across features. """ def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : array-like or CSR matrix with shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. """ X = check_arrays(X, copy=self.copy, sparse_format="csr")[0] if warn_if_not_float(X, estimator=self): X = X.astype(np.float) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") self.mean_ = None if self.with_std: var = mean_variance_axis0(X)[1] self.std_ = np.sqrt(var) self.std_[var == 0.0] = 1.0 else: self.std_ = None return self else: self.mean_, self.std_ = _mean_and_std( X, axis=0, with_mean=self.with_mean, with_std=self.with_std) return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ copy = copy if copy is not None else self.copy X = check_arrays(X, copy=copy, sparse_format="csr")[0] if warn_if_not_float(X, estimator=self): X = X.astype(np.float) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.std_ is not None: inplace_csr_column_scale(X, 1 / self.std_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.std_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like with shape [n_samples, n_features] The data used to scale along the features axis. """ copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.std_ is not None: inplace_csr_column_scale(X, self.std_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.std_ if self.with_mean: X += self.mean_ return X class Scaler(StandardScaler): def __init__(self, copy=True, with_mean=True, with_std=True): warnings.warn("Scaler was renamed to StandardScaler. The old name " " will be removed in 0.15.", DeprecationWarning) super(Scaler, self).__init__(copy, with_mean, with_std) def normalize(X, norm='l2', axis=1, copy=True): """Normalize a dataset along any axis Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1' or 'l2', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Normalizer` to perform normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ if norm not in ('l1', 'l2'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0] warn_if_not_float(X, 'The normalize function') if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) else: if norm == 'l1': norms = np.abs(X).sum(axis=1)[:, np.newaxis] norms[norms == 0.0] = 1.0 elif norm == 'l2': norms = np.sqrt(np.sum(X ** 2, axis=1))[:, np.newaxis] norms[norms == 0.0] = 1.0 X /= norms if axis == 0: X = X.T return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Parameters ---------- norm : 'l1' or 'l2', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- :func:`sklearn.preprocessing.normalize` equivalent function without the object oriented API """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ atleast2d_or_csr(X) return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy atleast2d_or_csr(X) return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default is True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- :class:`sklearn.preprocessing.Binarizer` to perform binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`) """ sparse_format = "csr" # We force sparse format to be either csr or csc. if hasattr(X, "format"): if X.format in ["csr", "csc"]: sparse_format = X.format X = check_arrays(X, sparse_format=sparse_format, copy=copy)[0] if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default is True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ atleast2d_or_csr(X) return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = array2d(K) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ K = array2d(K) if copy: K = K.copy() K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : array or scipy.sparse matrix with shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : array or scipy.sparse matrix with shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = safe_asarray(X) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ if selected == "all": return transform(X) X = atleast2d_or_csc(X, copy=copy) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix were each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : maximum value for all features. - array : maximum value per feature. categorical_features: "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. Attributes ---------- `active_features_` : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. `feature_indices_` : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) `n_values_` : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'float'>, n_values='auto') >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_arrays(X, sparse_format='dense', dtype=np.int)[0] if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if self.n_values == 'auto': n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Asssumes X contains only categorical features.""" X = check_arrays(X, sparse_format='dense', dtype=np.int)[0] if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) n_values_check = np.max(X, axis=0) + 1 if (n_values_check > self.n_values_).any(): raise ValueError("Feature out of bounds. Try setting n_values.") column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if self.n_values == 'auto': out = out[:, self.active_features_] return out def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape=(n_samples, n_features) Input array of type int. Returns ------- X_out : sparse matrix, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
depet/scikit-learn
sklearn/preprocessing/data.py
Python
bsd-3-clause
34,765
[ "Gaussian" ]
b34a1a6ff5c45ef4417908529ef4047a6fa68b03f158b5b27e79fd61028fa5ff
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import unittest from pathlib import Path import os import numpy as np from pymatgen.core.operations import SymmOp from pymatgen.core.sites import PeriodicSite from pymatgen.core.structure import Molecule, Structure from pymatgen.io.cif import CifParser from pymatgen.io.vasp.inputs import Poscar from pymatgen.io.vasp.outputs import Vasprun from pymatgen.symmetry.analyzer import ( PointGroupAnalyzer, SpacegroupAnalyzer, cluster_sites, iterative_symmetrize, ) from pymatgen.util.testing import PymatgenTest test_dir_mol = os.path.join(PymatgenTest.TEST_FILES_DIR, "molecules") class SpacegroupAnalyzerTest(PymatgenTest): def setUp(self): p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")) self.structure = p.structure self.sg = SpacegroupAnalyzer(self.structure, 0.001) self.disordered_structure = self.get_structure("Li10GeP2S12") self.disordered_sg = SpacegroupAnalyzer(self.disordered_structure, 0.001) s = p.structure.copy() site = s[0] del s[0] s.append(site.species, site.frac_coords) self.sg3 = SpacegroupAnalyzer(s, 0.001) graphite = self.get_structure("Graphite") graphite.add_site_property("magmom", [0.1] * len(graphite)) self.sg4 = SpacegroupAnalyzer(graphite, 0.001) self.structure4 = graphite def test_primitive(self): s = Structure.from_spacegroup("Fm-3m", np.eye(3) * 3, ["Cu"], [[0, 0, 0]]) a = SpacegroupAnalyzer(s) self.assertEqual(len(s), 4) self.assertEqual(len(a.find_primitive()), 1) def test_is_laue(self): s = Structure.from_spacegroup("Fm-3m", np.eye(3) * 3, ["Cu"], [[0, 0, 0]]) a = SpacegroupAnalyzer(s) self.assertTrue(a.is_laue()) def test_magnetic(self): lfp = PymatgenTest.get_structure("LiFePO4") sg = SpacegroupAnalyzer(lfp, 0.1) self.assertEqual(sg.get_space_group_symbol(), "Pnma") magmoms = [0] * len(lfp) magmoms[4] = 1 magmoms[5] = -1 magmoms[6] = 1 magmoms[7] = -1 lfp.add_site_property("magmom", magmoms) sg = SpacegroupAnalyzer(lfp, 0.1) self.assertEqual(sg.get_space_group_symbol(), "Pnma") def test_get_space_symbol(self): self.assertEqual(self.sg.get_space_group_symbol(), "Pnma") self.assertEqual(self.disordered_sg.get_space_group_symbol(), "P4_2/nmc") self.assertEqual(self.sg3.get_space_group_symbol(), "Pnma") self.assertEqual(self.sg4.get_space_group_symbol(), "P6_3/mmc") def test_get_space_number(self): self.assertEqual(self.sg.get_space_group_number(), 62) self.assertEqual(self.disordered_sg.get_space_group_number(), 137) self.assertEqual(self.sg4.get_space_group_number(), 194) def test_get_hall(self): self.assertEqual(self.sg.get_hall(), "-P 2ac 2n") self.assertEqual(self.disordered_sg.get_hall(), "P 4n 2n -1n") def test_get_pointgroup(self): self.assertEqual(self.sg.get_point_group_symbol(), "mmm") self.assertEqual(self.disordered_sg.get_point_group_symbol(), "4/mmm") def test_get_symmetry_operations(self): for sg, structure in [(self.sg, self.structure), (self.sg4, self.structure4)]: pgops = sg.get_point_group_operations() fracsymmops = sg.get_symmetry_operations() symmops = sg.get_symmetry_operations(True) latt = structure.lattice for fop, op, pgop in zip(fracsymmops, symmops, pgops): # translation vector values should all be 0 or 0.5 t = fop.translation_vector * 2 self.assertArrayAlmostEqual(t - np.round(t), 0) self.assertArrayAlmostEqual(fop.rotation_matrix, pgop.rotation_matrix) for site in structure: newfrac = fop.operate(site.frac_coords) newcart = op.operate(site.coords) self.assertTrue(np.allclose(latt.get_fractional_coords(newcart), newfrac)) found = False newsite = PeriodicSite(site.species, newcart, latt, coords_are_cartesian=True) for testsite in structure: if newsite.is_periodic_image(testsite, 1e-3): found = True break self.assertTrue(found) # Make sure this works for any position, not just the atomic # ones. random_fcoord = np.random.uniform(size=(3)) random_ccoord = latt.get_cartesian_coords(random_fcoord) newfrac = fop.operate(random_fcoord) newcart = op.operate(random_ccoord) self.assertTrue(np.allclose(latt.get_fractional_coords(newcart), newfrac)) def test_get_symmetry_dataset(self): ds = self.sg.get_symmetry_dataset() self.assertEqual(ds["international"], "Pnma") def test_get_crystal_system(self): crystal_system = self.sg.get_crystal_system() self.assertEqual("orthorhombic", crystal_system) self.assertEqual("tetragonal", self.disordered_sg.get_crystal_system()) def test_get_refined_structure(self): for a in self.sg.get_refined_structure().lattice.angles: self.assertEqual(a, 90) refined = self.disordered_sg.get_refined_structure() for a in refined.lattice.angles: self.assertEqual(a, 90) self.assertEqual(refined.lattice.a, refined.lattice.b) s = self.get_structure("Li2O") sg = SpacegroupAnalyzer(s, 0.01) self.assertEqual(sg.get_refined_structure().num_sites, 4 * s.num_sites) def test_get_symmetrized_structure(self): symm_struct = self.sg.get_symmetrized_structure() for a in symm_struct.lattice.angles: self.assertEqual(a, 90) self.assertEqual(len(symm_struct.equivalent_sites), 5) symm_struct = self.disordered_sg.get_symmetrized_structure() self.assertEqual(len(symm_struct.equivalent_sites), 8) self.assertEqual([len(i) for i in symm_struct.equivalent_sites], [16, 4, 8, 4, 2, 8, 8, 8]) s1 = symm_struct.equivalent_sites[1][1] s2 = symm_struct[symm_struct.equivalent_indices[1][1]] self.assertEqual(s1, s2) self.assertEqual(self.sg4.get_symmetrized_structure()[0].magmom, 0.1) self.assertEqual(symm_struct.wyckoff_symbols[0], "16h") # self.assertEqual(symm_struct[0].wyckoff, "16h") # Check copying self.assertEqual(symm_struct.copy(), symm_struct) d = symm_struct.as_dict() from pymatgen.symmetry.structure import SymmetrizedStructure ss = SymmetrizedStructure.from_dict(d) self.assertEqual(ss.wyckoff_symbols[0], "16h") self.assertIn("SymmetrizedStructure", ss.__str__()) def test_find_primitive(self): """ F m -3 m Li2O testing of converting to primitive cell """ parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "Li2O.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure) primitive_structure = s.find_primitive() self.assertEqual(primitive_structure.formula, "Li2 O1") # This isn't what is expected. All the angles should be 60 self.assertAlmostEqual(primitive_structure.lattice.alpha, 60) self.assertAlmostEqual(primitive_structure.lattice.beta, 60) self.assertAlmostEqual(primitive_structure.lattice.gamma, 60) self.assertAlmostEqual(primitive_structure.lattice.volume, structure.lattice.volume / 4.0) def test_get_ir_reciprocal_mesh(self): grid = self.sg.get_ir_reciprocal_mesh() self.assertEqual(len(grid), 216) self.assertAlmostEqual(grid[1][0][0], 0.1) self.assertAlmostEqual(grid[1][0][1], 0.0) self.assertAlmostEqual(grid[1][0][2], 0.0) self.assertAlmostEqual(grid[1][1], 2) def test_get_conventional_standard_structure(self): parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "bcc_1927.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) conv = s.get_conventional_standard_structure() self.assertAlmostEqual(conv.lattice.alpha, 90) self.assertAlmostEqual(conv.lattice.beta, 90) self.assertAlmostEqual(conv.lattice.gamma, 90) self.assertAlmostEqual(conv.lattice.a, 9.1980270633769461) self.assertAlmostEqual(conv.lattice.b, 9.1980270633769461) self.assertAlmostEqual(conv.lattice.c, 9.1980270633769461) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "btet_1915.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) conv = s.get_conventional_standard_structure() self.assertAlmostEqual(conv.lattice.alpha, 90) self.assertAlmostEqual(conv.lattice.beta, 90) self.assertAlmostEqual(conv.lattice.gamma, 90) self.assertAlmostEqual(conv.lattice.a, 5.0615106678044235) self.assertAlmostEqual(conv.lattice.b, 5.0615106678044235) self.assertAlmostEqual(conv.lattice.c, 4.2327080177761687) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orci_1010.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) conv = s.get_conventional_standard_structure() self.assertAlmostEqual(conv.lattice.alpha, 90) self.assertAlmostEqual(conv.lattice.beta, 90) self.assertAlmostEqual(conv.lattice.gamma, 90) self.assertAlmostEqual(conv.lattice.a, 2.9542233922299999) self.assertAlmostEqual(conv.lattice.b, 4.6330325651443296) self.assertAlmostEqual(conv.lattice.c, 5.373703587040775) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orcc_1003.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) conv = s.get_conventional_standard_structure() self.assertAlmostEqual(conv.lattice.alpha, 90) self.assertAlmostEqual(conv.lattice.beta, 90) self.assertAlmostEqual(conv.lattice.gamma, 90) self.assertAlmostEqual(conv.lattice.a, 4.1430033493799998) self.assertAlmostEqual(conv.lattice.b, 31.437979757624728) self.assertAlmostEqual(conv.lattice.c, 3.99648651) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orac_632475.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) conv = s.get_conventional_standard_structure() self.assertAlmostEqual(conv.lattice.alpha, 90) self.assertAlmostEqual(conv.lattice.beta, 90) self.assertAlmostEqual(conv.lattice.gamma, 90) self.assertAlmostEqual(conv.lattice.a, 3.1790663399999999) self.assertAlmostEqual(conv.lattice.b, 9.9032878699999998) self.assertAlmostEqual(conv.lattice.c, 3.5372412099999999) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "monoc_1028.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) conv = s.get_conventional_standard_structure() self.assertAlmostEqual(conv.lattice.alpha, 90) self.assertAlmostEqual(conv.lattice.beta, 117.53832420192903) self.assertAlmostEqual(conv.lattice.gamma, 90) self.assertAlmostEqual(conv.lattice.a, 14.033435583000625) self.assertAlmostEqual(conv.lattice.b, 3.96052850731) self.assertAlmostEqual(conv.lattice.c, 6.8743926325200002) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "hex_1170.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) conv = s.get_conventional_standard_structure() self.assertAlmostEqual(conv.lattice.alpha, 90) self.assertAlmostEqual(conv.lattice.beta, 90) self.assertAlmostEqual(conv.lattice.gamma, 120) self.assertAlmostEqual(conv.lattice.a, 3.699919902005897) self.assertAlmostEqual(conv.lattice.b, 3.699919902005897) self.assertAlmostEqual(conv.lattice.c, 6.9779585500000003) structure = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "tric_684654.json")) s = SpacegroupAnalyzer(structure, symprec=1e-2) conv = s.get_conventional_standard_structure() self.assertAlmostEqual(conv.lattice.alpha, 74.09581916308757) self.assertAlmostEqual(conv.lattice.beta, 75.72817279281173) self.assertAlmostEqual(conv.lattice.gamma, 63.63234318667333) self.assertAlmostEqual(conv.lattice.a, 3.741372924048738) self.assertAlmostEqual(conv.lattice.b, 3.9883228679270686) self.assertAlmostEqual(conv.lattice.c, 7.288495840048958) def test_get_primitive_standard_structure(self): parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "bcc_1927.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) prim = s.get_primitive_standard_structure() self.assertAlmostEqual(prim.lattice.alpha, 109.47122063400001) self.assertAlmostEqual(prim.lattice.beta, 109.47122063400001) self.assertAlmostEqual(prim.lattice.gamma, 109.47122063400001) self.assertAlmostEqual(prim.lattice.a, 7.9657251015812145) self.assertAlmostEqual(prim.lattice.b, 7.9657251015812145) self.assertAlmostEqual(prim.lattice.c, 7.9657251015812145) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "btet_1915.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) prim = s.get_primitive_standard_structure() self.assertAlmostEqual(prim.lattice.alpha, 105.015053349) self.assertAlmostEqual(prim.lattice.beta, 105.015053349) self.assertAlmostEqual(prim.lattice.gamma, 118.80658411899999) self.assertAlmostEqual(prim.lattice.a, 4.1579321075608791) self.assertAlmostEqual(prim.lattice.b, 4.1579321075608791) self.assertAlmostEqual(prim.lattice.c, 4.1579321075608791) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orci_1010.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) prim = s.get_primitive_standard_structure() self.assertAlmostEqual(prim.lattice.alpha, 134.78923546600001) self.assertAlmostEqual(prim.lattice.beta, 105.856239333) self.assertAlmostEqual(prim.lattice.gamma, 91.276341676000001) self.assertAlmostEqual(prim.lattice.a, 3.8428217771014852) self.assertAlmostEqual(prim.lattice.b, 3.8428217771014852) self.assertAlmostEqual(prim.lattice.c, 3.8428217771014852) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orcc_1003.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) prim = s.get_primitive_standard_structure() self.assertAlmostEqual(prim.lattice.alpha, 90) self.assertAlmostEqual(prim.lattice.beta, 90) self.assertAlmostEqual(prim.lattice.gamma, 164.985257335) self.assertAlmostEqual(prim.lattice.a, 15.854897098324196) self.assertAlmostEqual(prim.lattice.b, 15.854897098324196) self.assertAlmostEqual(prim.lattice.c, 3.99648651) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "orac_632475.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) prim = s.get_primitive_standard_structure() self.assertAlmostEqual(prim.lattice.alpha, 90) self.assertAlmostEqual(prim.lattice.beta, 90) self.assertAlmostEqual(prim.lattice.gamma, 144.40557588533386) self.assertAlmostEqual(prim.lattice.a, 5.2005185662155391) self.assertAlmostEqual(prim.lattice.b, 5.2005185662155391) self.assertAlmostEqual(prim.lattice.c, 3.5372412099999999) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "monoc_1028.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) prim = s.get_primitive_standard_structure() self.assertAlmostEqual(prim.lattice.alpha, 63.579155761999999) self.assertAlmostEqual(prim.lattice.beta, 116.42084423747779) self.assertAlmostEqual(prim.lattice.gamma, 148.47965136208569) self.assertAlmostEqual(prim.lattice.a, 7.2908007159612325) self.assertAlmostEqual(prim.lattice.b, 7.2908007159612325) self.assertAlmostEqual(prim.lattice.c, 6.8743926325200002) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "hex_1170.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) prim = s.get_primitive_standard_structure() self.assertAlmostEqual(prim.lattice.alpha, 90) self.assertAlmostEqual(prim.lattice.beta, 90) self.assertAlmostEqual(prim.lattice.gamma, 120) self.assertAlmostEqual(prim.lattice.a, 3.699919902005897) self.assertAlmostEqual(prim.lattice.b, 3.699919902005897) self.assertAlmostEqual(prim.lattice.c, 6.9779585500000003) parser = CifParser(os.path.join(PymatgenTest.TEST_FILES_DIR, "rhomb_3478_conv.cif")) structure = parser.get_structures(False)[0] s = SpacegroupAnalyzer(structure, symprec=1e-2) prim = s.get_primitive_standard_structure() self.assertAlmostEqual(prim.lattice.alpha, 28.049186140546812) self.assertAlmostEqual(prim.lattice.beta, 28.049186140546812) self.assertAlmostEqual(prim.lattice.gamma, 28.049186140546812) self.assertAlmostEqual(prim.lattice.a, 5.9352627428399982) self.assertAlmostEqual(prim.lattice.b, 5.9352627428399982) self.assertAlmostEqual(prim.lattice.c, 5.9352627428399982) def test_tricky_structure(self): # for some reason this structure kills spglib1.9 # 1.7 can't find symmetry either, but at least doesn't kill python s = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR.tricky_symmetry")) sa = SpacegroupAnalyzer(s, 0.1) sa.get_space_group_symbol() sa.get_space_group_number() sa.get_point_group_symbol() sa.get_crystal_system() sa.get_hall() class SpacegroupTest(unittest.TestCase): def setUp(self): p = Poscar.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "POSCAR")) self.structure = p.structure self.sg1 = SpacegroupAnalyzer(self.structure, 0.001).get_space_group_operations() def test_are_symmetrically_equivalent(self): sites1 = [self.structure[i] for i in [0, 1]] sites2 = [self.structure[i] for i in [2, 3]] self.assertTrue(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3)) sites1 = [self.structure[i] for i in [0, 1]] sites2 = [self.structure[i] for i in [0, 2]] self.assertFalse(self.sg1.are_symmetrically_equivalent(sites1, sites2, 1e-3)) H2O2 = Molecule( ["O", "O", "H", "H"], [ [0, 0.727403, -0.050147], [0, -0.727403, -0.050147], [0.83459, 0.897642, 0.401175], [-0.83459, -0.897642, 0.401175], ], ) C2H2F2Br2 = Molecule( ["C", "C", "F", "Br", "H", "F", "H", "Br"], [ [-0.752000, 0.001000, -0.141000], [0.752000, -0.001000, 0.141000], [-1.158000, 0.991000, 0.070000], [-1.240000, -0.737000, 0.496000], [-0.924000, -0.249000, -1.188000], [1.158000, -0.991000, -0.070000], [0.924000, 0.249000, 1.188000], [1.240000, 0.737000, -0.496000], ], ) H2O = Molecule( ["H", "O", "H"], [[0, 0.780362, -0.456316], [0, 0, 0.114079], [0, -0.780362, -0.456316]], ) C2H4 = Molecule( ["C", "C", "H", "H", "H", "H"], [ [0.0000, 0.0000, 0.6695], [0.0000, 0.0000, -0.6695], [0.0000, 0.9289, 1.2321], [0.0000, -0.9289, 1.2321], [0.0000, 0.9289, -1.2321], [0.0000, -0.9289, -1.2321], ], ) NH3 = Molecule( ["N", "H", "H", "H"], [ [0.0000, 0.0000, 0.0000], [0.0000, -0.9377, -0.3816], [0.8121, 0.4689, -0.3816], [-0.8121, 0.4689, -0.3816], ], ) BF3 = Molecule( ["B", "F", "F", "F"], [ [0.0000, 0.0000, 0.0000], [0.0000, -0.9377, 0.00], [0.8121, 0.4689, 0], [-0.8121, 0.4689, 0], ], ) CH4 = Molecule( ["C", "H", "H", "H", "H"], [ [0.000000, 0.000000, 0.000000], [0.000000, 0.000000, 1.08], [1.026719, 0.000000, -0.363000], [-0.513360, -0.889165, -0.363000], [-0.513360, 0.889165, -0.363000], ], ) PF6 = Molecule( ["P", "F", "F", "F", "F", "F", "F"], [[0, 0, 0], [0, 0, 1], [0, 0, -1], [0, 1, 0], [0, -1, 0], [1, 0, 0], [-1, 0, 0]], ) class PointGroupAnalyzerTest(PymatgenTest): def test_spherical(self): a = PointGroupAnalyzer(CH4) self.assertEqual(a.sch_symbol, "Td") self.assertEqual(len(a.get_pointgroup()), 24) a = PointGroupAnalyzer(PF6) self.assertEqual(a.sch_symbol, "Oh") self.assertEqual(len(a.get_pointgroup()), 48) m = Molecule.from_file(os.path.join(test_dir_mol, "c60.xyz")) a = PointGroupAnalyzer(m) self.assertEqual(a.sch_symbol, "Ih") cube_species = ["C", "C", "C", "C", "C", "C", "C", "C"] cube_coords = [ [0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1], ] m = Molecule(cube_species, cube_coords) a = PointGroupAnalyzer(m, 0.1) self.assertEqual(a.sch_symbol, "Oh") def test_tricky(self): m = Molecule.from_file(os.path.join(test_dir_mol, "dh.xyz")) a = PointGroupAnalyzer(m, 0.1) self.assertEqual(a.sch_symbol, "D*h") def test_linear(self): coords = [ [0.000000, 0.000000, 0.000000], [0.000000, 0.000000, 1.08], [0, 0.000000, -1.08], ] mol = Molecule(["C", "H", "H"], coords) a = PointGroupAnalyzer(mol) self.assertEqual(a.sch_symbol, "D*h") mol = Molecule(["C", "H", "N"], coords) a = PointGroupAnalyzer(mol) self.assertEqual(a.sch_symbol, "C*v") def test_asym_top(self): coords = [ [0.000000, 0.000000, 0.000000], [0.000000, 0.000000, 1.08], [1.026719, 0.000000, -0.363000], [-0.513360, -0.889165, -0.363000], [-0.513360, 0.889165, -0.363000], ] mol = Molecule(["C", "H", "F", "Br", "Cl"], coords) a = PointGroupAnalyzer(mol) self.assertEqual(a.sch_symbol, "C1") self.assertEqual(len(a.get_pointgroup()), 1) coords = [ [0.000000, 0.000000, 1.08], [1.026719, 0.000000, -0.363000], [-0.513360, -0.889165, -0.363000], [-0.513360, 0.889165, -0.363000], ] cs_mol = Molecule(["H", "F", "Cl", "Cl"], coords) a = PointGroupAnalyzer(cs_mol) self.assertEqual(a.sch_symbol, "Cs") self.assertEqual(len(a.get_pointgroup()), 2) a = PointGroupAnalyzer(C2H2F2Br2) self.assertEqual(a.sch_symbol, "Ci") self.assertEqual(len(a.get_pointgroup()), 2) def test_cyclic(self): a = PointGroupAnalyzer(H2O2) self.assertEqual(a.sch_symbol, "C2") self.assertEqual(len(a.get_pointgroup()), 2) a = PointGroupAnalyzer(H2O) self.assertEqual(a.sch_symbol, "C2v") self.assertEqual(len(a.get_pointgroup()), 4) a = PointGroupAnalyzer(NH3) self.assertEqual(a.sch_symbol, "C3v") self.assertEqual(len(a.get_pointgroup()), 6) cs2 = Molecule.from_file(os.path.join(test_dir_mol, "Carbon_Disulfide.xyz")) a = PointGroupAnalyzer(cs2, eigen_tolerance=0.001) self.assertEqual(a.sch_symbol, "C2v") def test_dihedral(self): a = PointGroupAnalyzer(C2H4) self.assertEqual(a.sch_symbol, "D2h") self.assertEqual(len(a.get_pointgroup()), 8) a = PointGroupAnalyzer(BF3) self.assertEqual(a.sch_symbol, "D3h") self.assertEqual(len(a.get_pointgroup()), 12) m = Molecule.from_file(os.path.join(test_dir_mol, "b12h12.xyz")) a = PointGroupAnalyzer(m) self.assertEqual(a.sch_symbol, "Ih") def test_symmetrize_molecule1(self): np.random.seed(77) distortion = np.random.randn(len(C2H4), 3) / 10 dist_mol = Molecule(C2H4.species, C2H4.cart_coords + distortion) eq = iterative_symmetrize(dist_mol, max_n=100, epsilon=1e-7) sym_mol, eq_sets, ops = eq["sym_mol"], eq["eq_sets"], eq["sym_ops"] self.assertTrue({0, 1} in eq_sets.values()) self.assertTrue({2, 3, 4, 5} in eq_sets.values()) coords = sym_mol.cart_coords for i, eq_set in eq_sets.items(): for j in eq_set: rotated = np.dot(ops[i][j], coords[i]) self.assertTrue(np.allclose(np.dot(ops[i][j], coords[i]), coords[j])) def test_symmetrize_molecule2(self): np.random.seed(77) distortion = np.random.randn(len(C2H2F2Br2), 3) / 20 dist_mol = Molecule(C2H2F2Br2.species, C2H2F2Br2.cart_coords + distortion) PA1 = PointGroupAnalyzer(C2H2F2Br2, tolerance=0.1) self.assertTrue(PA1.get_pointgroup().sch_symbol == "Ci") PA2 = PointGroupAnalyzer(dist_mol, tolerance=0.1) self.assertTrue(PA2.get_pointgroup().sch_symbol == "C1") eq = iterative_symmetrize(dist_mol, tolerance=0.3) PA3 = PointGroupAnalyzer(eq["sym_mol"], tolerance=0.1) self.assertTrue(PA3.get_pointgroup().sch_symbol == "Ci") def test_get_kpoint_weights(self): for name in ["SrTiO3", "LiFePO4", "Graphite"]: s = PymatgenTest.get_structure(name) a = SpacegroupAnalyzer(s) ir_mesh = a.get_ir_reciprocal_mesh((4, 4, 4)) weights = [i[1] for i in ir_mesh] weights = np.array(weights) / sum(weights) for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in ir_mesh])): self.assertAlmostEqual(i, w) for name in ["SrTiO3", "LiFePO4", "Graphite"]: s = PymatgenTest.get_structure(name) a = SpacegroupAnalyzer(s) ir_mesh = a.get_ir_reciprocal_mesh((1, 2, 3)) weights = [i[1] for i in ir_mesh] weights = np.array(weights) / sum(weights) for i, w in zip(weights, a.get_kpoint_weights([i[0] for i in ir_mesh])): self.assertAlmostEqual(i, w) v = Vasprun(os.path.join(PymatgenTest.TEST_FILES_DIR, "vasprun.xml")) a = SpacegroupAnalyzer(v.final_structure) wts = a.get_kpoint_weights(v.actual_kpoints) for w1, w2 in zip(v.actual_kpoints_weights, wts): self.assertAlmostEqual(w1, w2) kpts = [[0, 0, 0], [0.15, 0.15, 0.15], [0.2, 0.2, 0.2]] self.assertRaises(ValueError, a.get_kpoint_weights, kpts) class FuncTest(unittest.TestCase): def test_cluster_sites(self): o, c = cluster_sites(CH4, 0.1) self.assertEqual(o.specie.symbol, "C") self.assertEqual(len(c), 1) o, c = cluster_sites(C2H2F2Br2.get_centered_molecule(), 0.1) self.assertIsNone(o) self.assertEqual(len(c), 4) if __name__ == "__main__": unittest.main()
vorwerkc/pymatgen
pymatgen/symmetry/tests/test_analyzer.py
Python
mit
28,003
[ "VASP", "pymatgen" ]
88b327f2a9e17e2bb24c46a6c14cbae3da73ea4d17c8485e456e04b633e30b54
# -*- test-case-name: buildbot.test.test_svnpoller -*- # Based on the work of Dave Peticolas for the P4poll # Changed to svn (using xml.dom.minidom) by Niklaus Giger # Hacked beyond recognition by Brian Warner from twisted.python import log from twisted.internet import defer, reactor, utils from twisted.internet.task import LoopingCall from buildbot import util from buildbot.changes import base from buildbot.changes.changes import Change import xml.dom.minidom def _assert(condition, msg): if condition: return True raise AssertionError(msg) def dbgMsg(myString): log.msg(myString) return 1 # these split_file_* functions are available for use as values to the # split_file= argument. def split_file_alwaystrunk(path): return (None, path) def split_file_branches(path): # turn trunk/subdir/file.c into (None, "subdir/file.c") # and branches/1.5.x/subdir/file.c into ("branches/1.5.x", "subdir/file.c") pieces = path.split('/') if pieces[0] == 'trunk': return (None, '/'.join(pieces[1:])) elif pieces[0] == 'branches': return ('/'.join(pieces[0:2]), '/'.join(pieces[2:])) else: return None class SVNPoller(base.ChangeSource, util.ComparableMixin): """This source will poll a Subversion repository for changes and submit them to the change master.""" compare_attrs = ["svnurl", "split_file_function", "svnuser", "svnpasswd", "pollinterval", "histmax", "svnbin"] parent = None # filled in when we're added last_change = None loop = None working = False def __init__(self, svnurl, split_file=None, svnuser=None, svnpasswd=None, pollinterval=10*60, histmax=100, svnbin='svn'): """ @type svnurl: string @param svnurl: the SVN URL that describes the repository and subdirectory to watch. If this ChangeSource should only pay attention to a single branch, this should point at the repository for that branch, like svn://svn.twistedmatrix.com/svn/Twisted/trunk . If it should follow multiple branches, point it at the repository directory that contains all the branches like svn://svn.twistedmatrix.com/svn/Twisted and also provide a branch-determining function. Each file in the repository has a SVN URL in the form (SVNURL)/(BRANCH)/(FILEPATH), where (BRANCH) could be empty or not, depending upon your branch-determining function. Only files that start with (SVNURL)/(BRANCH) will be monitored. The Change objects that are sent to the Schedulers will see (FILEPATH) for each modified file. @type split_file: callable or None @param split_file: a function that is called with a string of the form (BRANCH)/(FILEPATH) and should return a tuple (BRANCH, FILEPATH). This function should match your repository's branch-naming policy. Each changed file has a fully-qualified URL that can be split into a prefix (which equals the value of the 'svnurl' argument) and a suffix; it is this suffix which is passed to the split_file function. If the function returns None, the file is ignored. Use this to indicate that the file is not a part of this project. For example, if your repository puts the trunk in trunk/... and branches are in places like branches/1.5/..., your split_file function could look like the following (this function is available as svnpoller.split_file_branches):: pieces = path.split('/') if pieces[0] == 'trunk': return (None, '/'.join(pieces[1:])) elif pieces[0] == 'branches': return ('/'.join(pieces[0:2]), '/'.join(pieces[2:])) else: return None If instead your repository layout puts the trunk for ProjectA in trunk/ProjectA/... and the 1.5 branch in branches/1.5/ProjectA/..., your split_file function could look like:: pieces = path.split('/') if pieces[0] == 'trunk': branch = None pieces.pop(0) # remove 'trunk' elif pieces[0] == 'branches': pieces.pop(0) # remove 'branches' # grab branch name branch = 'branches/' + pieces.pop(0) else: return None # something weird projectname = pieces.pop(0) if projectname != 'ProjectA': return None # wrong project return (branch, '/'.join(pieces)) The default of split_file= is None, which indicates that no splitting should be done. This is equivalent to the following function:: return (None, path) If you wish, you can override the split_file method with the same sort of function instead of passing in a split_file= argument. @type svnuser: string @param svnuser: If set, the --username option will be added to the 'svn log' command. You may need this to get access to a private repository. @type svnpasswd: string @param svnpasswd: If set, the --password option will be added. @type pollinterval: int @param pollinterval: interval in seconds between polls. The default is 600 seconds (10 minutes). Smaller values decrease the latency between the time a change is recorded and the time the buildbot notices it, but it also increases the system load. @type histmax: int @param histmax: maximum number of changes to look back through. The default is 100. Smaller values decrease system load, but if more than histmax changes are recorded between polls, the extra ones will be silently lost. @type svnbin: string @param svnbin: path to svn binary, defaults to just 'svn'. Use this if your subversion command lives in an unusual location. """ if svnurl.endswith("/"): svnurl = svnurl[:-1] # strip the trailing slash self.svnurl = svnurl self.split_file_function = split_file or split_file_alwaystrunk self.svnuser = svnuser self.svnpasswd = svnpasswd self.svnbin = svnbin self.pollinterval = pollinterval self.histmax = histmax self._prefix = None self.overrun_counter = 0 self.loop = LoopingCall(self.checksvn) def split_file(self, path): # use getattr() to avoid turning this function into a bound method, # which would require it to have an extra 'self' argument f = getattr(self, "split_file_function") return f(path) def startService(self): log.msg("SVNPoller(%s) starting" % self.svnurl) base.ChangeSource.startService(self) # Don't start the loop just yet because the reactor isn't running. # Give it a chance to go and install our SIGCHLD handler before # spawning processes. reactor.callLater(0, self.loop.start, self.pollinterval) def stopService(self): log.msg("SVNPoller(%s) shutting down" % self.svnurl) self.loop.stop() return base.ChangeSource.stopService(self) def describe(self): return "SVNPoller watching %s" % self.svnurl def checksvn(self): # Our return value is only used for unit testing. # we need to figure out the repository root, so we can figure out # repository-relative pathnames later. Each SVNURL is in the form # (ROOT)/(PROJECT)/(BRANCH)/(FILEPATH), where (ROOT) is something # like svn://svn.twistedmatrix.com/svn/Twisted (i.e. there is a # physical repository at /svn/Twisted on that host), (PROJECT) is # something like Projects/Twisted (i.e. within the repository's # internal namespace, everything under Projects/Twisted/ has # something to do with Twisted, but these directory names do not # actually appear on the repository host), (BRANCH) is something like # "trunk" or "branches/2.0.x", and (FILEPATH) is a tree-relative # filename like "twisted/internet/defer.py". # our self.svnurl attribute contains (ROOT)/(PROJECT) combined # together in a way that we can't separate without svn's help. If the # user is not using the split_file= argument, then self.svnurl might # be (ROOT)/(PROJECT)/(BRANCH) . In any case, the filenames we will # get back from 'svn log' will be of the form # (PROJECT)/(BRANCH)/(FILEPATH), but we want to be able to remove # that (PROJECT) prefix from them. To do this without requiring the # user to tell us how svnurl is split into ROOT and PROJECT, we do an # 'svn info --xml' command at startup. This command will include a # <root> element that tells us ROOT. We then strip this prefix from # self.svnurl to determine PROJECT, and then later we strip the # PROJECT prefix from the filenames reported by 'svn log --xml' to # get a (BRANCH)/(FILEPATH) that can be passed to split_file() to # turn into separate BRANCH and FILEPATH values. # whew. if self.working: log.msg("SVNPoller(%s) overrun: timer fired but the previous " "poll had not yet finished." % self.svnurl) self.overrun_counter += 1 return defer.succeed(None) self.working = True log.msg("SVNPoller polling") if not self._prefix: # this sets self._prefix when it finishes. It fires with # self._prefix as well, because that makes the unit tests easier # to write. d = self.get_root() d.addCallback(self.determine_prefix) else: d = defer.succeed(self._prefix) d.addCallback(self.get_logs) d.addCallback(self.parse_logs) d.addCallback(self.get_new_logentries) d.addCallback(self.create_changes) d.addCallback(self.submit_changes) d.addCallbacks(self.finished_ok, self.finished_failure) return d def getProcessOutput(self, args): # this exists so we can override it during the unit tests d = utils.getProcessOutput(self.svnbin, args, {}) return d def get_root(self): args = ["info", "--xml", "--non-interactive", self.svnurl] if self.svnuser: args.extend(["--username=%s" % self.svnuser]) if self.svnpasswd: args.extend(["--password=%s" % self.svnpasswd]) d = self.getProcessOutput(args) return d def determine_prefix(self, output): try: doc = xml.dom.minidom.parseString(output) except xml.parsers.expat.ExpatError: dbgMsg("_process_changes: ExpatError in %s" % output) log.msg("SVNPoller._determine_prefix_2: ExpatError in '%s'" % output) raise rootnodes = doc.getElementsByTagName("root") if not rootnodes: # this happens if the URL we gave was already the root. In this # case, our prefix is empty. self._prefix = "" return self._prefix rootnode = rootnodes[0] root = "".join([c.data for c in rootnode.childNodes]) # root will be a unicode string _assert(self.svnurl.startswith(root), "svnurl='%s' doesn't start with <root>='%s'" % (self.svnurl, root)) self._prefix = self.svnurl[len(root):] if self._prefix.startswith("/"): self._prefix = self._prefix[1:] log.msg("SVNPoller: svnurl=%s, root=%s, so prefix=%s" % (self.svnurl, root, self._prefix)) return self._prefix def get_logs(self, ignored_prefix=None): args = [] args.extend(["log", "--xml", "--verbose", "--non-interactive"]) if self.svnuser: args.extend(["--username=%s" % self.svnuser]) if self.svnpasswd: args.extend(["--password=%s" % self.svnpasswd]) args.extend(["--limit=%d" % (self.histmax), self.svnurl]) d = self.getProcessOutput(args) return d def parse_logs(self, output): # parse the XML output, return a list of <logentry> nodes try: doc = xml.dom.minidom.parseString(output) except xml.parsers.expat.ExpatError: dbgMsg("_process_changes: ExpatError in %s" % output) log.msg("SVNPoller._parse_changes: ExpatError in '%s'" % output) raise logentries = doc.getElementsByTagName("logentry") return logentries def _filter_new_logentries(self, logentries, last_change): # given a list of logentries, return a tuple of (new_last_change, # new_logentries), where new_logentries contains only the ones after # last_change if not logentries: # no entries, so last_change must stay at None return (None, []) mostRecent = int(logentries[0].getAttribute("revision")) if last_change is None: # if this is the first time we've been run, ignore any changes # that occurred before now. This prevents a build at every # startup. log.msg('svnPoller: starting at change %s' % mostRecent) return (mostRecent, []) if last_change == mostRecent: # an unmodified repository will hit this case log.msg('svnPoller: _process_changes last %s mostRecent %s' % ( last_change, mostRecent)) return (mostRecent, []) new_logentries = [] for el in logentries: if last_change == int(el.getAttribute("revision")): break new_logentries.append(el) new_logentries.reverse() # return oldest first return (mostRecent, new_logentries) def get_new_logentries(self, logentries): last_change = self.last_change (new_last_change, new_logentries) = self._filter_new_logentries(logentries, self.last_change) self.last_change = new_last_change log.msg('svnPoller: _process_changes %s .. %s' % (last_change, new_last_change)) return new_logentries def _get_text(self, element, tag_name): try: child_nodes = element.getElementsByTagName(tag_name)[0].childNodes text = "".join([t.data for t in child_nodes]) except: text = "<unknown>" return text def _transform_path(self, path): _assert(path.startswith(self._prefix), "filepath '%s' should start with prefix '%s'" % (path, self._prefix)) relative_path = path[len(self._prefix):] if relative_path.startswith("/"): relative_path = relative_path[1:] where = self.split_file(relative_path) # 'where' is either None or (branch, final_path) return where def create_changes(self, new_logentries): changes = [] for el in new_logentries: branch_files = [] # get oldest change first revision = str(el.getAttribute("revision")) dbgMsg("Adding change revision %s" % (revision,)) # TODO: the rest of buildbot may not be ready for unicode 'who' # values author = self._get_text(el, "author") comments = self._get_text(el, "msg") # there is a "date" field, but it provides localtime in the # repository's timezone, whereas we care about buildmaster's # localtime (since this will get used to position the boxes on # the Waterfall display, etc). So ignore the date field and use # our local clock instead. #when = self._get_text(el, "date") #when = time.mktime(time.strptime("%.19s" % when, # "%Y-%m-%dT%H:%M:%S")) branches = {} pathlist = el.getElementsByTagName("paths")[0] for p in pathlist.getElementsByTagName("path"): action = p.getAttribute("action") path = "".join([t.data for t in p.childNodes]) # the rest of buildbot is certaily not yet ready to handle # unicode filenames, because they get put in RemoteCommands # which get sent via PB to the buildslave, and PB doesn't # handle unicode. path = path.encode("ascii") if path.startswith("/"): path = path[1:] where = self._transform_path(path) # if 'where' is None, the file was outside any project that # we care about and we should ignore it if where: branch, filename = where if not branch in branches: branches[branch] = { 'files': []} branches[branch]['files'].append(filename) if not branches[branch].has_key('action'): branches[branch]['action'] = action for branch in branches.keys(): action = branches[branch]['action'] files = branches[branch]['files'] number_of_files_changed = len(files) if action == u'D' and number_of_files_changed == 1 and files[0] == '': log.msg("Ignoring deletion of branch '%s'" % branch) else: c = Change(who=author, files=files, comments=comments, revision=revision, branch=branch) changes.append(c) return changes def submit_changes(self, changes): for c in changes: self.parent.addChange(c) def finished_ok(self, res): log.msg("SVNPoller finished polling") dbgMsg('_finished : %s' % res) assert self.working self.working = False return res def finished_failure(self, f): log.msg("SVNPoller failed") dbgMsg('_finished : %s' % f) assert self.working self.working = False return None # eat the failure
gward/buildbot
buildbot/changes/svnpoller.py
Python
gpl-2.0
20,098
[ "Brian" ]
9da974bd2b141aa56c785934d4ef85e6ad18fd5644d5a82f2c722fda97783f08
# Copyright 2014 M. A. Zentile, J. Keaveney, L. Weller, D. Whiting, # C. S. Adams and I. G. Hughes. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup import time print("\n\n NOTE: wxPython >=2.8 needs to be installed for the\n\ GUI part of this program to work! It is not currently possible\n\ to install this automatically through setuptools / pip / easy_install.\n\ \ This is included with Enthought Canopy. For Windows systems, this is all that's needed.\ For Linux systems, wxPython is not supported in Canopy and needs to be installed separately.\ To install wxPython, please visit the wxPython website:\n\ http://www.wxpython.org/download.php \n\n") setup( name='ElecSus', description='(Atomic Physics) Calculate the weak-probe electric susceptibility for alkali-metal vapours', author='James Keaveney et. al.', author_email='james.keaveney@durham.ac.uk', url='https://github.com/jameskeaveney/ElecSus', version='3.0.7', packages=['elecsus', 'elecsus.libs', 'elecsus.tests'], package_data={'elecsus':['images/elecsus_group.ico', 'images/elecsus_t_group.ico', 'images/elecsus-logo.png', 'images/jqc-logo.png', 'docs/ElecSus_GUI_UserGuide.pdf']}, license='Apache License, Version 2.0', long_description=open('README.md').read(), install_requires=[ #'wxPython >= 2.8.11.0' ### wxPython is needed but can't be installed from PyPi. 'numpy', 'scipy >= 0.12.0', 'matplotlib >= 1.3.1', 'lmfit >= 0.9.5', 'psutil' ], classifiers=[ 'Development Status :: 1 - Planning', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Topic :: Scientific/Engineering :: Physics'] )
jameskeaveney/ElecSus
setup.py
Python
apache-2.0
2,241
[ "VisIt" ]
e2b094c48c39a5d2b024a6abb5ed59bf6e28838ceafbf4f97ea3f69cbae0af13