text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import cgt
from cgt import core
from cgt import nn
import numpy as np
import cPickle as pickle
from scipy.stats import norm
import matplotlib.pyplot as plt
from example_utils import fetch_dataset
'''
MNIST manifold demo (with 2-dimensional latent z) using variational autoencoder
'''
rng = np.random.RandomState(1234)
def kld_unit_mvn(mu, var):
# KL divergence from N(0, I)
return (mu.shape[1] + cgt.sum(cgt.log(var), axis=1) - cgt.sum(cgt.square(mu), axis=1) - cgt.sum(var, axis=1)) / 2.0
def log_diag_mvn(mu, var):
# log probability of x under N(mu, diag(var))
def f(x):
# expects batches
k = mu.shape[1]
logp = (-k / 2.0) * np.log(2 * np.pi) - 0.5 * cgt.sum(cgt.log(var), axis=1) - cgt.sum(0.5 * (1.0 / var) * (x - mu) * (x - mu), axis=1)
return logp
return f
class HiddenLayer(object):
# adapted from http://deeplearning.net/tutorial/mlp.html
def __init__(self, input, n_in, n_out, W=None, b=None,
activation=cgt.tanh, prefix=""):
self.n_in = n_in
self.n_out = n_out
if W is None:
# XXX replace with nn init
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=cgt.floatX
)
if activation == cgt.sigmoid:
W_values *= 4
W = cgt.shared(W_values, name=prefix+"_W")
if b is None:
b_values = np.zeros((n_out,), dtype=cgt.floatX)
b = cgt.shared(b_values, name=prefix+"_b")
self.W = W
self.b = b
# XXX broadcast api may change
lin_output = cgt.broadcast("+", cgt.dot(input, self.W),
cgt.dimshuffle(self.b, ["x", 0]), "xx,1x")
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class _MLP(object):
# building block for MLP instantiations defined below
def __init__(self, x, n_in, n_hid, nlayers=1, prefix=""):
self.nlayers = nlayers
self.hidden_layers = list()
inp = x
for k in xrange(self.nlayers):
hlayer = HiddenLayer(
input=inp,
n_in=n_in,
n_out=n_hid,
activation=cgt.tanh,
prefix=prefix + ("_%d" % (k + 1))
)
n_in = n_hid
inp = hlayer.output
self.hidden_layers.append(hlayer)
self.params = [param for l in self.hidden_layers for param in l.params]
self.input = input
# NOTE output layer computed by instantations
class GaussianMLP(_MLP):
def __init__(self, x, n_in, n_hid, n_out, nlayers=1, y=None, eps=None):
super(GaussianMLP, self).__init__(x, n_in, n_hid, nlayers=nlayers, prefix="GaussianMLP_hidden")
self.mu_layer = HiddenLayer(
input=self.hidden_layers[-1].output,
n_in=self.hidden_layers[-1].n_out,
n_out=n_out,
activation=None,
prefix="GaussianMLP_mu"
)
# log(sigma^2)
self.logvar_layer = HiddenLayer(
input=self.hidden_layers[-1].output,
n_in=self.hidden_layers[-1].n_out,
n_out=n_out,
activation=None,
prefix="GaussianMLP_logvar"
)
self.mu = self.mu_layer.output
self.var = cgt.exp(self.logvar_layer.output)
self.sigma = cgt.sqrt(self.var)
self.params = self.params + self.mu_layer.params +\
self.logvar_layer.params
# for use as encoder
if eps is not None:
assert(y is None)
self.out = self.mu + self.sigma * eps
# for use as decoder
if y:
assert(eps is None)
self.out = cgt.sigmoid(self.mu)
self.cost = -cgt.sum(log_diag_mvn(self.out, self.var)(y))
class BernoulliMLP(_MLP):
def __init__(self, x, n_in, n_hid, n_out, nlayers=1, y=None):
super(BernoulliMLP, self).__init__(x, n_in, n_hid, nlayers=nlayers, prefix="BernoulliMLP_hidden")
self.out_layer = HiddenLayer(
input=self.hidden_layers[-1].output,
n_in=self.hidden_layers[-1].n_out,
n_out=n_out,
activation=cgt.sigmoid,
prefix="BernoulliMLP_y_hat"
)
self.params = self.params + self.out_layer.params
if y is not None:
self.out = self.out_layer.output
self.cost = cgt.sum(nn.binary_crossentropy(self.out, y))
class VAE(object):
def __init__(self, xdim, args, dec="bernoulli"):
self.xdim = xdim
self.hdim = args.hdim
self.zdim = args.zdim
self.lmbda = args.lmbda # weight decay coefficient * 2
self.x = cgt.matrix("x", dtype=cgt.floatX)
self.eps = cgt.matrix("eps", dtype=cgt.floatX)
self.enc_mlp = GaussianMLP(self.x, self.xdim, self.hdim, self.zdim, nlayers=args.nlayers, eps=self.eps)
if dec == "bernoulli":
# log p(x | z) defined as -CE(x, y) = dec_mlp.cost(y)
self.dec_mlp = BernoulliMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
elif dec == "gaussian":
self.dec_mlp = GaussianMLP(self.enc_mlp.out, self.zdim, self.hdim, self.xdim, nlayers=args.nlayers, y=self.x)
else:
raise RuntimeError("unrecognized decoder %" % dec)
self.cost = (-cgt.sum(kld_unit_mvn(self.enc_mlp.mu, self.enc_mlp.var)) + self.dec_mlp.cost) / args.batch_size
self.params = self.enc_mlp.params + self.dec_mlp.params
# L2 regularization
self.gparams = [cgt.grad(self.cost, [p])[0] + self.lmbda * p for p in self.params]
self.gaccums = [cgt.shared(np.zeros(p.op.get_value().shape, dtype=cgt.floatX)) for p in self.params]
# XXX replace w/ adagrad update from nn
ADAGRAD_EPS = 1e-10 # for stability
self.updates = [
(param, param - args.lr * gparam / cgt.sqrt(gaccum + cgt.square(gparam) + ADAGRAD_EPS))
for param, gparam, gaccum in zip(self.params, self.gparams, self.gaccums)
]
self.updates += [
(gaccum, gaccum + cgt.square(gparam))
for gaccum, gparam in zip(self.gaccums, self.gparams)
]
self.train = cgt.function(
[self.x, self.eps],
self.cost,
updates=self.updates
)
self.test = cgt.function(
[self.x, self.eps],
self.cost,
updates=None
)
# can be used for semi-supervised learning for example
self.encode = cgt.function(
[self.x, self.eps],
self.enc_mlp.out
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=100)
parser.add_argument("--nlayers", default=1, type=int, help="number of hidden layers in MLP before output layers")
parser.add_argument("--hdim", default=500, type=int, help="dimension of hidden layer")
parser.add_argument("--zdim", default=2, type=int, help="dimension of continuous latent variable")
parser.add_argument("--lmbda", default=0.001, type=float, help="weight decay coefficient")
parser.add_argument("--lr", default=0.01, type=float, help="learning rate")
parser.add_argument("--epochs", default=1000, type=int, help="number of passes over dataset")
parser.add_argument("--print_every", default=100, type=int, help="how often to print cost")
parser.add_argument("--outfile", default="vae_model.pk", help="output file to save model to")
args = parser.parse_args()
print(args)
if args.epochs > 100:
print("NOTE: training might take a while. You may want to first sanity check by setting --epochs to something like 20 (manifold will be fuzzy).")
# set up dataset
mnist = fetch_dataset("http://rll.berkeley.edu/cgt-data/mnist.npz")
X = (mnist["X"]/255.).astype(cgt.floatX)
y = mnist["y"]
np.random.seed(0)
sortinds = np.random.permutation(70000)
X = X[sortinds]
y = y[sortinds]
train_x = X[0:50000]
train_y = y[0:50000]
valid_x = X[50000:60000]
valid_y = y[50000:60000]
# run SGVB algorithm
model = VAE(train_x.shape[1], args, dec="bernoulli")
expcost = None
num_train_batches = train_x.shape[0] / args.batch_size
num_valid_batches = valid_x.shape[0] / args.batch_size
valid_freq = num_train_batches
for b in xrange(args.epochs * num_train_batches):
k = b % num_train_batches
x = train_x[k * args.batch_size:(k + 1) * args.batch_size, :]
eps = np.random.randn(x.shape[0], args.zdim).astype(cgt.floatX)
cost = model.train(x, eps)
if not expcost:
expcost = cost
else:
expcost = 0.01 * cost + 0.99 * expcost
if (b + 1) % args.print_every == 0:
print("iter %d, cost %f, expcost %f" % (b + 1, cost, expcost))
if (b + 1) % valid_freq == 0:
valid_cost = 0
for l in xrange(num_valid_batches):
x_val = valid_x[l * args.batch_size:(l + 1) * args.batch_size, :]
eps_val = np.zeros((x_val.shape[0], args.zdim), dtype=cgt.floatX)
valid_cost = valid_cost + model.test(x_val, eps_val)
valid_cost = valid_cost / num_valid_batches
print("valid cost: %f" % valid_cost)
# XXX fix pickling of cgt models
#print("saving final model")
#with open(args.outfile, "wb") as f:
#pickle.dump(model, f, protocol=pickle.HIGHEST_PROTOCOL)
# XXX use this to sample, should later be able to compile f(z) = y directly (See Issue #18)
newz = cgt.matrix("newz", dtype=cgt.floatX)
newy = cgt.core.clone(model.dec_mlp.out, {model.enc_mlp.out:newz})
decode = cgt.function(
[newz],
newy
)
S = (28, 28)
M = 20
manifold = np.zeros((S[0]*M, S[1]*M), dtype=cgt.floatX)
for z1 in xrange(M):
for z2 in xrange(M):
print z1, z2
z = np.zeros((1, 2))
# pass unit square through inverse Gaussian CDF
z[0, 0] = norm.ppf(z1 * 1.0/M + 1.0/(M * 2))
z[0, 1] = norm.ppf(z2 * 1.0/M + 1.0/(M * 2))
z = np.array(z, dtype=cgt.floatX)
x_hat = decode(z)
x_hat = x_hat.reshape(S)
manifold[z1 * S[0]:(z1 + 1) * S[0],
z2 * S[1]:(z2 + 1) * S[1]] = x_hat
plt.imshow(manifold, cmap="Greys_r")
plt.axis("off")
plt.show()
if __name__ == "__main__":
main()
|
nebw/cgt
|
examples/demo_variational_autoencoder.py
|
Python
|
mit
| 10,799
|
[
"Gaussian"
] |
04c16d6edbaa7eb6a51d75c38133d904ebed0100ca273287cbf25d275e06b758
|
"""
========================
Random Number Generation
========================
Use ``default_rng()`` to create a `Generator` and call its methods.
=============== =========================================================
Generator
--------------- ---------------------------------------------------------
Generator Class implementing all of the random number distributions
default_rng Default constructor for ``Generator``
=============== =========================================================
============================================= ===
BitGenerator Streams that work with Generator
--------------------------------------------- ---
MT19937
PCG64
Philox
SFC64
============================================= ===
============================================= ===
Getting entropy to initialize a BitGenerator
--------------------------------------------- ---
SeedSequence
============================================= ===
Legacy
------
For backwards compatibility with previous versions of numpy before 1.17, the
various aliases to the global `RandomState` methods are left alone and do not
use the new `Generator` API.
==================== =========================================================
Utility functions
-------------------- ---------------------------------------------------------
random Uniformly distributed floats over ``[0, 1)``
bytes Uniformly distributed random bytes.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility
functions - removed
in the new API
-------------------- ---------------------------------------------------------
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
random_integers Uniformly distributed integers in a given range.
(deprecated, use ``integers(..., closed=True)`` instead)
random_sample Alias for `random_sample`
randint Uniformly distributed integers in a given range
seed Seed the legacy random number generator.
==================== =========================================================
==================== =========================================================
Univariate
distributions
-------------------- ---------------------------------------------------------
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== ==========================================================
Multivariate
distributions
-------------------- ----------------------------------------------------------
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== ==========================================================
==================== =========================================================
Standard
distributions
-------------------- ---------------------------------------------------------
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
-------------------- ---------------------------------------------------------
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
from __future__ import division, absolute_import, print_function
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random',
'random_integers',
'random_sample',
'ranf',
'rayleigh',
'sample',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf',
]
# add these for module-freeze analysis (like PyInstaller)
from . import _pickle
from . import common
from . import bounded_integers
from .mtrand import *
from .generator import Generator, default_rng
from .bit_generator import SeedSequence
from .mt19937 import MT19937
from .pcg64 import PCG64
from .philox import Philox
from .sfc64 import SFC64
from .mtrand import RandomState
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
'Philox', 'PCG64', 'SFC64', 'default_rng']
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this
function's entire purpose is to return a newly allocated RandomState whose
state pickle can set. Consequently the RandomState returned by this function
is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
MSeifert04/numpy
|
numpy/random/__init__.py
|
Python
|
bsd-3-clause
| 7,521
|
[
"Gaussian"
] |
fdd737334300f902c2afac52a4b6b57c40355fe7d9c968aa3285f4eb734a525b
|
""" Basic setuptools script for DIRAC.
Does not contain any dependency
"""
import sys
import os
import glob
# Actual setuptools
from setuptools import setup, find_packages
# Find the base dir where the setup.py lies
base_dir = os.path.abspath(os.path.dirname(__file__))
# Take all the packages but the scripts and tests
allPackages = find_packages(where=base_dir, exclude=["*test*", "*scripts*"])
# Because we want to have a 'DIRAC' base module and that the setup.py
# is lying inside it, we need to define a mapping
# < module name : directory >
# e.g. DIRAC.DataManagementSystem is base_dir/DataManagementSystem
package_dir = dict(("DIRAC.%s" % p, os.path.join(base_dir, p.replace('.', '/'))) for p in allPackages)
# We also rename the packages so that they contain DIRAC
allPackages = ['DIRAC.%s' % p for p in allPackages]
# Artificially create the 'DIRAC' package
# at the root
allPackages.insert(0, 'DIRAC')
package_dir['DIRAC'] = base_dir
# The scripts to be distributed
scripts = glob.glob('%s/*/scripts/*.py' % base_dir)
setup(
name="DIRAC",
version="6.19.0",
url="https://github.com/DIRACGRID/DIRAC",
license="GPLv3",
package_dir=package_dir,
packages=allPackages,
scripts=scripts,
)
|
petricm/DIRAC
|
setup.py
|
Python
|
gpl-3.0
| 1,234
|
[
"DIRAC"
] |
e5043fc312d19cec5b4c31a704f8d760f265c5b9fb58aeea4b7acae168d46d1e
|
"""
Component-level tests for builder.
"""
__author__ = "Dan Gunter <dkgunter@lbl.gov>"
__date__ = "4/24/14"
# Stdlib
import logging
import time
import unittest
# Package
from pymatgen.db.tests.common import ComponentTest, get_component_logger
_log = get_component_logger("comp_incr")
class BuilderComponentTest(ComponentTest):
EX_MOD = "pymatgen.db.builders.examples"
def test_copy_builder(self):
self._test_copy_builder(1)
def test_copy_builder_parallel(self):
self._test_copy_builder(8)
def _test_copy_builder(self, ncores):
# Init builder
bld_args = [
self.EX_MOD + ".copy_builder.CopyBuilder",
"source=" + self.src_conf.name,
"target=" + self.dst_conf.name,
"crit={}",
"-i",
"copy:number",
"-n",
str(ncores),
]
if _log.isEnabledFor(logging.DEBUG):
bld_args.append("-vv")
# Insert a new record #x
addrec = lambda x: self.src.insert_one({"number": x, "data": [1, 2, 3], "name": f"mp-{x:d}"})
# Add first batch of records
map(addrec, list(range(1000)))
# Run builder
self.mgbuild(bld_args)
# Count records in copied-to collection
n = self.dst.count()
self.assertTrue(n == 1000, "Bad count after 1st copy: " "expected=1000 got={:d}".format(n))
# do a few more copies
total, m, ovhd, rectm = n, (1, 100, 500, 1000, 501, 101), 0, {}
for i, newrec in enumerate(m):
_log.info(f"Build, #records = {newrec:d}")
# Add records
map(addrec, list(range(total, total + newrec)))
# Copy
t0 = time.time()
self.mgbuild(bld_args)
t1 = time.time()
if newrec == 1:
ovhd = t1 - t0
else:
rectm[newrec] = t1 - t0 - ovhd
# count records in copied-to collection
n = self.dst.count()
total += newrec
self.assertEqual(
n,
total,
"Bad count after copy #{:d}: " "expected={:d} got={:d}".format(i + 2, total, n),
)
_log.info(f"Overhead = {ovhd:.1f} seconds")
for sz in m[1:]:
_log.info(f"{sz:d} = {rectm[sz]:g}s, {rectm[sz] / sz * 1e6:.0f}us/rec")
def test_maxval_builder(self):
self._test_maxval_builder(1)
def test_maxval_builder_parallel(self):
self._test_maxval_builder(8)
def _test_maxval_builder(self, ncores):
# Do an incremental build with the MaximumValueBuilder
groups = ["A", "B", "C", "D"]
# Init builder
bld_args = [
self.EX_MOD + ".maxvalue_builder.MaxValueBuilder",
"source=" + self.src_conf.name,
"target=" + self.dst_conf.name,
"-i",
"copy:recid",
"-n",
str(ncores),
]
if _log.isEnabledFor(logging.DEBUG):
bld_args.append("-vv")
# Way to cycle groups
get_group = lambda x, n: groups[:n][x % n]
# Insert a new record #x
ngrp = len(groups) - 1
addrec = lambda x: self.src.insert_one({"recid": x, "value": x, "group": get_group(x, ngrp)})
# Add first batch of records
nrec = 10
map(addrec, list(range(nrec)))
# Run builder
self.mgbuild(bld_args)
# Check number of records in target
ntarget = self.dst.count()
self.assertEqual(ntarget, ngrp)
# Check max values for each group
group_maxes = {get_group(x, ngrp): x for x in range(nrec - 1, nrec - ngrp - 1, -1)}
for rec in self.dst.find({}):
self.assertEqual(rec["value"], group_maxes[rec["group"]])
# Add some more records, and a new group
rmin, rmax = 20, 25
ngrp = len(groups)
addrec = lambda x: self.src.insert_one({"recid": x, "value": x, "group": get_group(x, ngrp)})
map(addrec, list(range(rmin, rmax)))
# Re-run builder
self.mgbuild(bld_args)
# Check number of records in target
ntarget = self.dst.count()
self.assertEqual(ntarget, ngrp)
# Check max values for each group
group_maxes = {get_group(x, ngrp): x for x in range(rmin, rmax)}
for rec in self.dst.find({}):
self.assertEqual(rec["value"], group_maxes[rec["group"]])
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen-db
|
pymatgen/db/builders/tests/comp_incr.py
|
Python
|
mit
| 4,507
|
[
"pymatgen"
] |
8a3d6afd3dfdf9d98c20ede9f18a9b61ba3a643b173eb9c5e52b31b6fd11ec47
|
# -*- coding: utf-8 -*-
#
# Markov Logic Networks
#
# (C) 2012-2015 by Daniel Nyga
# 2006-2011 by Dominik Jain
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from dnutils import logs
import optimize
import sys
from numpy.ma.core import exp
from pracmln.mln.constants import HARD
try:
import numpy
except:
pass
logger = logs.getlogger(__name__)
class AbstractLearner(object):
'''
Abstract base class for every MLN learning algorithm.
'''
def __init__(self, mrf=None, **params):
self.mrf = mrf
self._params = params
self.mrf.apply_cw()
self._w = None
@property
def prior_stdev(self):
return self._params.get('prior_stdev')
@property
def verbose(self):
return self._params.get('verbose', False)
@property
def use_init_weights(self):
return self._params.get('use_init_weights')
@property
def usegrad(self):
return True
@property
def usef(self):
return True
@property
def multicore(self):
return self._params.get('multicore', False)
@property
def weights(self):
return self._w
@property
def maxrepeat(self):
return self._params.get('maxrepeat', 1)
def repeat(self):
return False
def _add_fixweights(self, w):
i = 0
w_ = []
for f in self.mrf.formulas:
if self.mrf.mln.fixweights[f.idx] or f.weight == HARD:
w_.append(self._w[f.idx])
else:
w_.append(w[i])
i += 1
return w_
def _varweights(self):
return self._filter_fixweights(self._w)
def f(self, weights):
# reconstruct full weight vector
w = self._add_fixweights(weights)
# compute prior
prior = 0
if self.prior_stdev is not None:
for w_ in w: # we have to use the log of the prior here
prior -= 1. / (2. * (self.prior_stdev ** 2)) * w_ ** 2
# compute log likelihood
likelihood = self._f(w)
if self.verbose:
sys.stdout.write(' \r')
if self.prior_stdev is not None:
sys.stdout.write(' log P(D|w) + log P(w) = %f + %f = %f\r' % (likelihood, prior, likelihood + prior))
else:
sys.stdout.write(' log P(D|w) = %f\r' % likelihood)
sys.stdout.flush()
return likelihood + prior
def grad(self, weights):
w = self._add_fixweights(weights)
grad = self._grad(w)
self._grad_ = grad
# add gaussian prior
if self.prior_stdev is not None:
for i, weight in enumerate(w):
grad[i] -= 1./(self.prior_stdev ** 2) * weight
return self._filter_fixweights(grad)
def __call__(self, weights):
return self.likelihood(weights)
def likelihood(self, wt):
l = self.f(wt)
l = exp(l)
return l
def _fDummy(self, wt):
''' a dummy target function that is used when f is disabled '''
if not hasattr(self, 'dummy_f'):
self.dummyFCount = 0
self.dummyFCount += 1
if self.dummyFCount > 150:
return 0
print "self.dummyFCount", self.dummyFCount
if not hasattr(self, 'dummyFValue'):
self.dummyFValue = 0
if not hasattr(self, 'lastFullGradient'):
self.dummyFValue = 0
else:
self.dummyFValue += sum(abs(self.lastFullGradient))
print "_f: self.dummyFValue = ", self.dummyFValue
return self.dummyFValue
def _filter_fixweights(self, v):
'''
Removes from the vector `v` all elements at indices that correspond to a fixed weight formula index.
or a hard constraint formula.
'''
if len(v) != len(self.mrf.formulas):
raise Exception('Vector must have same length as formula weights')
v_ = []#numpy.zeros(len(v), numpy.float64)
for val in [v[i] for i in range(len(self.mrf.formulas)) if not self.mrf.mln.fixweights[i] and self.mrf.mln.weights[i] != HARD]:
v_.append(val)
return v_
def run(self, **params):
'''
Learn the weights of the MLN given the training data previously
loaded
'''
if not 'scipy' in sys.modules:
raise Exception("Scipy was not imported! Install numpy and scipy if you want to use weight learning.")
# initial parameter vector: all zeros or weights from formulas
self._w = [0] * len(self.mrf.formulas)
for f in self.mrf.formulas:
if self.mrf.mln.fixweights[f.idx] or self.use_init_weights or f.weight == HARD:
self._w[f.idx] = f.weight
runs = 0
while runs < self.maxrepeat:
self._prepare()
self._optimize(**self._params)
self._cleanup()
runs += 1
if not self.repeat(): break
return self.weights
def _prepare(self):
pass
def _cleanup(self):
pass
def _optimize(self, optimizer='bfgs', **params):
w = self._varweights()
if optimizer == "directDescent":
opt = optimize.DirectDescent(w, self, **params)
elif optimizer == "diagonalNewton":
opt = optimize.DiagonalNewton(w, self, **params)
else:
opt = optimize.SciPyOpt(optimizer, w, self, **params)
w = opt.run()
self._w = self._add_fixweights(w)
def hessian(self, wt):
wt = self._reconstructFullWeightVectorWithFixedWeights(wt)
wt = map(float, wt)
fullHessian = self._hessian(wt)
return self._projectMatrixToNonFixedWeightIndices(fullHessian)
def _projectMatrixToNonFixedWeightIndices(self, matrix):
if len(self._fixedWeightFormulas) == 0:
return matrix
dim = len(self.mln.formulas) - len(self._fixedWeightFormulas)
proj = numpy.zeros((dim, dim), numpy.float64)
i2 = 0
for i in xrange(len(self.mln.formulas)):
if (i in self._fixedWeightFormulas):
continue
j2 = 0
for j in xrange(len(self.mln.formulas)):
if (j in self._fixedWeightFormulas):
continue
proj[i2][j2] = matrix[i][j]
j2 += 1
i2 += 1
return proj
def _hessian(self, wt):
raise Exception("The learner '%s' does not provide a Hessian computation; use another optimizer!" % str(type(self)))
def _f(self, wt, **params):
raise Exception("The learner '%s' does not provide an objective function computation; use another optimizer!" % str(type(self)))
@property
def name(self):
if self.prior_stdev is None:
sigma = 'no prior'
else:
sigma = "sigma=%f" % self.prior_stdev
return "%s[%s]" % (self.__class__.__name__, sigma)
class DiscriminativeLearner(AbstractLearner):
'''
Abstract superclass of all discriminative learning algorithms.
Provides some convenience methods for determining the set of
query predicates from the common parameters.
'''
@property
def qpreds(self):
'''
Computes from the set parameters the list of query predicates
for the discriminative learner. Eitehr the 'qpreds' or 'epreds'
parameters must be given, both are lists of predicate names.
'''
if not hasattr(self, '_preds'):
qpreds = self._params.get('qpreds', [])
if 'epreds' in self._params:
epreds = self._params['epreds']
qpreds.extend([p.name for p in self.mrf.predicates if p.name not in epreds])
if not set(qpreds).isdisjoint(epreds):
raise Exception('Query predicates and evidence predicates must be disjoint.')
if len(qpreds) == 0:
raise Exception("For discriminative Learning, query or evidence predicates must be provided.")
self._qpreds = qpreds
return self._qpreds
@property
def epreds(self):
return [p.name for p in self.mrf.predicates if p.name not in self.qpreds]
def _qpred(self, predname):
return predname in self.qpreds
@property
def name(self):
return self.__class__.__name__ + "[query predicates: %s]" % ",".join(self.qpreds)
class SoftEvidenceLearner(AbstractLearner):
'''
Superclass for all soft-evidence learners.
'''
def __init__(self, mrf, **params):
AbstractLearner.__init__(self, mrf, **params)
def _getTruthDegreeGivenEvidence(self, gf, world=None):
if world is None: world = self.mrf.evidence
return gf.noisyor(world)
|
danielnyga/pracmln
|
python2/pracmln/mln/learning/common.py
|
Python
|
bsd-2-clause
| 9,913
|
[
"Gaussian"
] |
d7777abab3c7a4eff877ed129a9c6cda217ddd9c81d4b6c9eee6c0f746aa35a6
|
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import time
import numpy as np
import tensorflow as tf
import random
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import pickle as pkl
def load_text(n,num_samples=None):
fname = 'Oxford_English_Dictionary.txt'
txt = []
with open(fname,'rb') as f:
txt = f.readlines()
txt = [x.decode('utf-8').strip() for x in txt]
txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
word_list = [x.split(' ', 1)[0].strip() for x in txt]
# List of definitions
def_list = [x.split(' ', 1)[1].strip()for x in txt]
maxlen=0
for defi in def_list:
maxlen=max(maxlen,len(defi.split()))
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
_map,rev_map=get_one_hot_map(word_list,def_list,n)
if num_samples is not None:
num_samples=len(word_list)
# X = (36665, 56210)
X = map_one_hot(word_list[:num_samples],_map,1,n)
X = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n)
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
words=[]
for line in to_def:
if line:
words.append(line.split()[0])
counts=defaultdict(int)
uniq=defaultdict(int)
for line in corpus:
for word in line.split():
counts[word]+=1
_map=defaultdict(lambda :n+1)
rev_map=defaultdict(lambda:"<UNK>")
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print len(words)
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print len(words)
i=0
# random.shuffle(words)
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[n+2]='End'
print list(reversed(sorted(uniq.items())))
print len(list(uniq.items()))
# print rev_map
return _map,rev_map
def map_one_hot(corpus,_map,maxlen,n):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print total_not,len(corpus)
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus),16],dtype=np.float32)
for l,line in enumerate(corpus):
# if len(line)==0:
# rtn[l]=n+2
# else:
# if line not in _map:
# total_not+=1
mapped=_map[line]
if mapped==75001:
total_not+=1
binrep=(1&(mapped/(2**np.arange(16)))).astype(np.float32)
rtn[l]=binrep
print total_not,len(corpus)
return rtn
else:
if form2:
rtn=np.zeros([len(corpus),maxlen+2,16],dtype=np.float32)
else:
rtn=np.zeros([len(corpus),maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([len(corpus),maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
for l,_line in enumerate(corpus):
x=0
line=_line.split()
for i in range(min(len(line),maxlen)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
if form2:
binrep=(1&(mapped/(2**np.arange(16)))).astype(np.float32)
rtn[l,i+1,:]=binrep
else:
rtn[l,i+1]=mapped
if mapped==75001:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
if form2:
rtn[l,x+1,:]=(1&(to_app/(2**np.arange(16)))).astype(np.float32)
else:
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print nopes,totes,wtf
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
print self.learning_rate
self.batch_size = batch_size
# tf Graph input
self.n_words=network_architecture['n_input']
if not form2:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
else:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
self.intype=type(self.x)
if not form2:
self.caption_placeholder = tf.placeholder(tf.int32, [None,network_architecture["maxlen"]],name='caption_placeholder')
else:
self.caption_placeholder = tf.placeholder(tf.float32, [None, network_architecture["maxlen"],self.n_words],name='caption_placeholder')
print self.caption_placeholder.shape
self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask')
# Create autoencoder network
if not generative:
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
self.test=test
else:
self._build_gen()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
self.saver = tf.train.Saver(max_to_keep=100)
self.sess.run(init)
if ctrain:
self.saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
start_token_tensor=tf.constant((np.zeros([self.batch_size,16])).astype(np.float32),dtype=tf.float32)
self.network_weights=network_weights
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
if not same_embedding:
input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
else:
input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
loss = 0
with tf.variable_scope("RNN"):
for i in range(self.network_architecture['maxlen']):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
if form2:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1,:],logit=True)
else:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1])
loss+=tf.reduce_sum(KLD_loss*self.mask[:,i])
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i > 0:
if not form2:
labels = tf.expand_dims(self.caption_placeholder[:, i], 1)
ix_range=tf.range(0, self.batch_size, 1)
ixs = tf.expand_dims(ix_range, 1)
concat = tf.concat([ixs, labels],1)
onehot = tf.sparse_to_dense(
concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
else:
onehot=self.caption_placeholder[:,i,:]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if form2:
# best_word=tf.nn.softmax(logit)
# best_word=tf.round(best_word)
# all_the_f_one_h.append(best_word)
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logit, labels=onehot)
print xentropy.shape
xentropy=tf.reduce_sum(xentropy,reduction_indices=-1)
print xentropy.shape
else:
xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.mask[:,i]
loss += tf.reduce_sum(xentropy)
loss = (loss / tf.reduce_sum(self.mask[:,1:]))+tf.reduce_sum(input_embedding_KLD_loss)
self.loss=loss
def _initialize_weights(self, n_lstm_input, maxlen,
n_input, n_z):
all_weights = dict()
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias')}
if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab')}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_input, n_z),name='out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_input, n_z),name='out_log_sigma')}
else:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb')}
all_weights['variational_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_input, n_z),name='out_mean')}
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
'lstm': self.lstm}
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x)
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
mu=tf.nn.embedding_lookup(weights['out_mean'],x)+biases['out_mean']
if not vanilla:
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)+biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _create_loss_optimizer(self):
if clip_grad:
opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), .1)
self.optimizer = opt_func.apply_gradients(zip(grads, tvars))
else:
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def _create_loss_test(self):
self.test_op = \
tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={})
def partial_fit(self, X,y,mask,testify=False):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if self.test and testify:
print tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask})
exit()
else:
opt, cost,shit = self.sess.run((self.optimizer, self.loss,all_the_f_one_h),
feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask})
# print shit
return cost
def _build_gen(self):
#same setup as `_create_network` function
network_weights = self._initialize_weights(**self.network_architecture)
if form2:
start_token_tensor=tf.constant((np.zeros([self.batch_size,16])).astype(np.float32),dtype=tf.float32)
else:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
self.network_weights=network_weights
if not same_embedding:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['input_meaning'])
else:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'])
print input_embedding.shape
# image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(self.batch_size,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(input_embedding, state)
print state,output.shape
if form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor)
print previous_word.shape
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(self.network_architecture['maxlen']):
tf.get_variable_scope().reuse_variables()
print i
out, state = self.lstm(previous_word, state)
# get a one-hot word encoding from the output of the LSTM
logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not form2:
best_word = tf.argmax(logit, 1)
else:
best_word=tf.nn.sigmoid(logit)
best_word=tf.round(best_word)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
print logit.shape
if form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word)
print previous_word.shape
all_words.append(best_word)
self.generated_words=all_words
def generate(self, _map, x):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
# """
# if z_mu is None:
# z_mu = np.random.normal(size=self.network_architecture["n_z"])
# # Note: This maps to mean of distribution, we could alternatively
# # sample from Gaussian distribution
# return self.sess.run(self.x_reconstr_mean,
# feed_dict={self.z: z_mu})
# saver = tf.train.Saver()
# saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
generated_word_index,f_it= self.sess.run([self.generated_words,all_the_f_one_h], feed_dict={self.x:x})
print f_it
print generated_word_index
if form2:
generated_word_index=np.array(bin_to_int(generated_word_index))
generated_word_index=np.rollaxis(generated_word_index,1)
else:
generated_word_index=np.array(generated_word_index)
return generated_word_index
# generated_sentence = ixtoword(_map,generated_word_index)
# return generated_sentence
def ixtoword(_map,ixs):
return [[_map[x] for x in y] for y in ixs]
def bin_to_int(a):
return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a]
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=20,gen=False,ctrain=False,test=False):
if should_decay and not gen:
global_step=tf.Variable(0,trainable=False)
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
all_samps, 0.95, staircase=True)
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,generative=gen,ctrain=ctrain,test=test)
# Training cycle
# if test:
# maxlen=network_architecture['maxlen']
# return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[])
if gen:
return vae
costs=[]
indlist=np.arange(all_samps).astype(int)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
np.random.shuffle(indlist)
testify=False
for i in range(total_batch):
batch_xs = X[indlist[i*batch_size:(i+1)*batch_size]]
# Fit training using batch data
if epoch==2 and i ==0:
testify=True
cost = vae.partial_fit(batch_xs,y[indlist[i*batch_size:(i+1)*batch_size]].astype(np.uint32),mask[indlist[i*batch_size:(i+1)*batch_size]],testify=testify)
# Compute average loss
avg_cost += np.sum(cost) / n_samples * batch_size
costs.append(avg_cost)
# Display logs per epoch step
if epoch % display_step == 0 or epoch==1:
if should_save:
print 'saving'
vae.saver.save(vae.sess, './modelstemp/model')
pkl.dump(costs,open('100_256_45000_allwords_results.pkl','wb'))
print("Epoch:", '%04d' % (epoch+1),
"cost=", avg_cost)
return vae
if __name__ == "__main__":
form2=True
vanilla=True
same_embedding=False
clip_grad=False
should_save=True
should_train=True
# should_train=not should_train
should_continue=False
should_decay=False
zero_end_tok=True
training_epochs=10000
batch_size=1000
all_the_f_one_h=[]
if not zero_end_tok:
X, y, mask, _map = load_text(2**16-4)
else:
X, y, mask, _map = load_text(2**16-3)
n_input =16
n_samples = 30000
lstm_dim=256
model_path = './modelstemp/'
all_samps=len(X)
# X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(maxlen=32, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_lstm_input=lstm_dim, # LSTM cell size
n_z=512, # dimensionality of latent space
)
if should_train:
# vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue)
# print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005)
else:
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
# # vae_2d._build_gen()
ind_list=np.arange(len(X)).astype(int)
np.random.shuffle(ind_list)
x_sample = X[ind_list[:1000]]
print x_sample
y_sample = y[ind_list[:1000]]
print y_sample
y_hat = vae_2d.generate(_map,x_sample)
y_hat=y_hat[:10]
# print y_hat
y_hat_words=ixtoword(_map,y_hat)
print y_hat_words
if form2:
y_words=ixtoword(_map,np.array(bin_to_int(y_sample[:10])))
else:
y_words=ixtoword(_map,y_sample)
print(y_hat)
print(y_hat_words)
print(y_words)
# # plt.figure(figsize=(8, 6))
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
# plt.colorbar()
# plt.grid()
# plt.show()
|
dricciardelli/vae2vec
|
VAE_def.py
|
Python
|
mit
| 21,946
|
[
"Gaussian"
] |
694ddac1c15db5e314d8a88a33df0e7dd07f43d16463e6ead449c3f58ea737af
|
"""
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on a 2D projection of the iris
dataset. We only consider the first 2 features of this dataset:
- Sepal length
- Sepal width
This example shows how to plot the decision surface for four SVM classifiers
with different kernels.
The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly
different decision boundaries. This can be a consequence of the following
differences:
- ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the
regular hinge loss.
- ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass
reduction while ``SVC`` uses the One-vs-One multiclass reduction.
Both linear models have linear decision boundaries (intersecting hyperplanes)
while the non-linear kernel models (polynomial or Gaussian RBF) have more
flexible non-linear decision boundaries with shapes that depend on the kind of
kernel and its parameters.
.. NOTE:: while plotting the decision function of classifiers for toy 2D
datasets can help get an intuitive understanding of their respective
expressive power, be aware that those intuitions don't always generalize to
more realistic high-dimensional problems.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=0.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
# import some data to play with
iris = datasets.load_iris()
# Take the first two features. We could avoid this by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
models = (
svm.SVC(kernel="linear", C=C),
svm.LinearSVC(C=C, max_iter=10000),
svm.SVC(kernel="rbf", gamma=0.7, C=C),
svm.SVC(kernel="poly", degree=3, gamma="auto", C=C),
)
models = (clf.fit(X, y) for clf in models)
# title for the plots
titles = (
"SVC with linear kernel",
"LinearSVC (linear kernel)",
"SVC with RBF kernel",
"SVC with polynomial (degree 3) kernel",
)
# Set-up 2x2 grid for plotting.
fig, sub = plt.subplots(2, 2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
for clf, title, ax in zip(models, titles, sub.flatten()):
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors="k")
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xlabel("Sepal length")
ax.set_ylabel("Sepal width")
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
|
scikit-learn/scikit-learn
|
examples/svm/plot_iris_svc.py
|
Python
|
bsd-3-clause
| 3,693
|
[
"Gaussian"
] |
38ad949308e1622fa0ae31de1afe1d43c5e35d484055cfaf107f875dd42e27fd
|
# Copyright (c) 2012 - N.P. de Klein
#
# This file is part of Python Mass Spec Analyzer (PyMSA).
#
# Python Mass Spec Analyzer (PyMSA) is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Python Mass Spec Analyzer (PyMSA) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Python Mass Spec Analyzer (PyMSA). If not, see <http://www.gnu.org/licenses/>.")
"""
Handling the opening of files, checking for file vadility and existence.
"""
# author: ndeklein
# date:08/02/2012
# summary: Writes readerInstance (from parseFeatureXML.Reader) to output files.
import os
import warnings
class FileHandle():
"""
A handle to easily validate the correctness of a file
>>> fileHandle = FileHandle('exampleFeatureXMLfile.featureXML')
>>> fileHandle.isXML()
>>> fileHandle.isFeatureXML()
"""
def __init__(self,path):
"""
@type path: string
@param path: Path of a file
"""
#test if the file is there to get an early error if it's not
test = open(path)
test.close()
self.path = path
# Check if the file provided is a xml file
def isXML(self):
"""
Check if <?xml is in the first line of the file.
@raise IOError: File given to FileHandle is not a valid XML file.
B{Example}:
Check if a file is valid XML
>>> fileHandle = FileHandle('example_featureXML_file.featureXML')
>>> fileHandle.isXML() # returns None if it is valid, raises error if it is invalid
"""
inFile = open(self.path)
# if the first line of the file doesn't start with <?xml: close the file and raise IOError: 'Not a valid xml file'
if not inFile.readline().startswith('<?xml'):
inFile.close()
raise IOError, self.path+' is not a valid xml file'
else:
inFile.close()
return
# Check if the file provided is a featureXML file
def isFeatureXML(self):
"""
Check if <featureMap is in the second line of the file.
@raise IOError: File given to FileHandle is not a valid .featXML file.
@raise Warning: File given to FileHandle does not have 'software' in the 4th line
@raise Warning: Version of software is not of 1.9.0 or higher
B{Example}:
Checking if a featureXML file is valid
>>> fileHandle = FileHandle('example_featureXML_file.featureXML')
>>> fileHandle.isFeatureXML() # returns None if it is valid, raises error if it is invalid
"""
inFile = open(self.path)
# read the first line
inFile.readline()
# if the second line of the file doesn't start with <featureMap: Return 'Not a featureXML file'
if not inFile.readline().startswith('<featureMap'):
inFile.close()
raise IOError, self.path+' is not a valid featureXML file'
else:
# read max of 10 lines (because the software line can be in different place with different version of featureXML
for i in range(0,9): # loop 10 times
# read each line
softwareLine = inFile.readline()
# if software == in softwareLine, break
if 'software' in softwareLine:
break
# if 'software' is not in softwareLine this means that the first 10 lines did not contain software
if not 'software' in softwareLine:
# give a warning because it is not vital for the functioning of the program
warnings.warn('software information of the featureXML file: \''+str(self.path)+'\' not in the first 10 lines, software version used unknown')
inFile.close
else:
version = softwareLine.split('version="')[1].split('"')[0]
# if the version is not 1.7.0 - 1.9.0
if not int(version.replace('.','')) >= 190:
warnings.warn('pyMS is only tested on version 1.9.0 of FeatureFinder. Older versions might not work. Found version: '+str(version)+' for your file:' +str(self.path)+'')
inFile.close()
# Check if the file provided is a consensusXML file
def isConsensusXML(self):
"""
Check if <featureMap is in the second line of the file.
@raise IOError: File given to FileHandle is not a valid .featXML file.
@raise Warning: File given to FileHandle does not have 'software' in the 4th line
@raise Warning: Version of software is not of 1.9.0 or higher
B{Example}:
Checking if a featureXML file is valid
>>> fileHandle = FileHandle('example_featureXML_file.featureXML')
>>> fileHandle.isFeatureXML() # returns None if it is valid, raises error if it is invalid
"""
inFile = open(self.path)
# read the first line
line=inFile.readline()
while line.startswith('<?xml'):
line=inFile.readline()
# if the second line of the file doesn't start with <featureMap: Return 'Not a featureXML file'
if not line.startswith('<consensusXML'):
inFile.close()
raise IOError, self.path+' is not a valid consensusXML file'
else:
# read max of 10 lines (because the software line can be in different place with different version of featureXML
for i in range(0,9): # loop 10 times
# read each line
softwareLine = inFile.readline()
# if software == in softwareLine, break
if 'software' in softwareLine:
break
# if 'software' is not in softwareLine this means that the first 10 lines did not contain software
if not 'software' in softwareLine:
# give a warning because it is not vital for the functioning of the program
warnings.warn('software information of the consensusXML file: \''+str(self.path)+'\' not in the first 10 lines, software version used unknown')
inFile.close
else:
name=softwareLine.split('name="')[1].split('"')[0]
version = softwareLine.split('version="')[1].split('"')[0]
# if the version is not 1.7.0 - 1.9.0
if not int(version.replace('.','')) >= 190:
warnings.warn('pyMSA is only tested on version 1.9.0 of OpenMS. Older versions might not work. Found version: '+str(version)+' for your file:' +str(self.path)+'')
inFile.close()
# check if the file provided is a mzml or peaks.mzml file
def isMzML(self):
"""
Check if <mzML is in the second or third line of the file.
@raise IOError: File given to FileHandle is not a valid .mzML or .peaks.mzML file.
B{Example}:
Checking if an mzml file is valid
>>> fileHandle = FileHandle('exampleMzMLfile.mzML')
>>> fileHandle.isXML() # returns None if it is valid, raises error if it is invalid
Checking if a peaks.mzml file is valid
>>> fileHandle = FileHandle('example_peaks_mzML_file.peaks.mzML')
>>> fileHandle.isMzML()
"""
inFile = open(self.path)
# read the first line
inFile.readline()
# if the second line and the third line don't start with mzML: Return: 'Not a .mzML or .peaks.mzML file'
# the second .readline() after the and not reads the second line (because .readline() pops the line out of buffer)
if not 'mzML' in inFile.readline() and not 'mzML' in inFile.readline():
inFile.close()
raise IOError, self.path+' is not a valid .mzML or .peaks.mzML file'
else:
inFile.close()
def isMascot(self):
"""
Check if <mascot_search_results is in the second line of the file.
@raise IOError: File given to FileHandle is not a valid mascot result XML file.
B{Example}:
Checking if an mascot file is valid
>>> fileHandle = FileHandle('exampleMascotFile.xml')
>>> fileHandle.isMascot() # returns None if it is valid, raises error if it is invalid
"""
inFile = open(self.path)
# read the first line
inFile.readline()
# if the second line and the third line don't start with mzML: Return: 'Not a .mzML or .peaks.mzML file'
# the second .readline() after the and not reads the second line (because .readline() pops the line out of buffer)
if not 'mascot_search_results' in inFile.readline():
inFile.close()
raise IOError, self.path+' is not a valid mascot result xml file'
else:
inFile.close()
# return the absolute path of the file
def getFile(self):
"""
Get the absolute path of the file given to FileHandle
@rtype: string
@return: The absolute path of the file given to FileHandle
B{Example}:
Print the full path of a file given to FileHandle:
>>> fileHandle = FileHandle('example_featureXML_file.featureXML')
>>> print 'The full path is:',fileHandle.getFile()
The full path is: example_featureXML_file.featureXML
"""
return os.path.abspath(self.path)
|
davidmam/pyMSA
|
pyMSA/fileHandling.py
|
Python
|
gpl-3.0
| 10,017
|
[
"OpenMS"
] |
a12a149d489fee23577ba32bed8035eabe2c04dbf0d1b2a499715339f385ce89
|
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016 Haggi Krey, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import pymel.core as pm
import logging
log = logging.getLogger("ui")
class BaseTemplate(pm.ui.AETemplate):
def addControl(self, control, label=None, **kwargs):
pm.ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
pm.ui.AETemplate.beginLayout(self, name, collapse=collapse)
class AEmtap_standInLocatorTemplate(BaseTemplate):
def __init__(self, nodeName):
BaseTemplate.__init__(self,nodeName)
self.thisNode = None
self.node = pm.PyNode(self.nodeName)
self.beginScrollLayout()
self.buildBody(nodeName)
self.addExtraControls("ExtraControls")
self.endScrollLayout()
def buildBody(self, nodeName):
self.thisNode = pm.PyNode(nodeName)
self.beginLayout("StandinData" ,collapse=0)
self.addControl("proxyFile", label="Proxy File")
self.addControl("displayType", label="Display Type")
self.addControl("percentDisplay", label="Density")
self.addControl("elementSize", label="Element Size")
self.endLayout()
|
haggi/appleseed-maya
|
module/scripts/appleseed_maya/aetemplate/aemtap_standinlocatortemplate.py
|
Python
|
mit
| 2,397
|
[
"VisIt"
] |
cf5662a20ee5c935636f80255c1d12d865a8314d8a05fd445eafa4c789dbf4bd
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
import os
class Nwchem(Package):
"""High-performance computational chemistry software"""
homepage = "http://www.nwchem-sw.org"
url = "http://www.nwchem-sw.org/images/Nwchem-6.6.revision27746-src.2015-10-20.tar.gz"
version('6.6', 'c581001c004ea5e5dfacb783385825e3',
url='http://www.nwchem-sw.org/images/Nwchem-6.6.revision27746-src.2015-10-20.tar.gz')
depends_on('blas')
depends_on('lapack')
depends_on('mpi')
depends_on('scalapack')
depends_on('python@2.7:2.8', type=('build', 'run'))
# patches for 6.6-27746:
urls_for_patches = {
'@6.6': [
('http://www.nwchem-sw.org/images/Tddft_mxvec20.patch.gz', 'f91c6a04df56e228fe946291d2f38c9a'),
('http://www.nwchem-sw.org/images/Tools_lib64.patch.gz', 'b71e8dbad27f1c97b60a53ec34d3f6e0'),
('http://www.nwchem-sw.org/images/Config_libs66.patch.gz', 'cc4be792e7b5128c3f9b7b1167ade2cf'),
('http://www.nwchem-sw.org/images/Cosmo_meminit.patch.gz', '1d94685bf3b72d8ecd40c46334348ca7'),
('http://www.nwchem-sw.org/images/Sym_abelian.patch.gz', 'b19cade61c787916a73a4aaf6e2445d6'),
('http://www.nwchem-sw.org/images/Xccvs98.patch.gz', 'b9aecc516a3551dcf871cb2f066598cb'),
('http://www.nwchem-sw.org/images/Dplot_tolrho.patch.gz', '0a5bdad63d2d0ffe46b28db7ad6d9cec'),
('http://www.nwchem-sw.org/images/Driver_smalleig.patch.gz', 'c3f609947220c0adb524b02c316b5564'),
('http://www.nwchem-sw.org/images/Ga_argv.patch.gz', '7a665c981cfc17187455e1826f095f6f'),
('http://www.nwchem-sw.org/images/Raman_displ.patch.gz', 'ed334ca0b2fe81ce103ef8cada990c4c'),
('http://www.nwchem-sw.org/images/Ga_defs.patch.gz', '0c3cab4d5cbef5acac16ffc5e6f869ef'),
('http://www.nwchem-sw.org/images/Zgesvd.patch.gz', '8fd5a11622968ef4351bd3d5cddce8f2'),
('http://www.nwchem-sw.org/images/Cosmo_dftprint.patch.gz', '64dcf27f3c6ced2cadfb504fa66e9d08'),
('http://www.nwchem-sw.org/images/Txs_gcc6.patch.gz', '56595a7252da051da13f94edc54fe059'),
('http://www.nwchem-sw.org/images/Gcc6_optfix.patch.gz', 'c6642c21363c09223784b47b8636047d'),
('http://www.nwchem-sw.org/images/Util_gnumakefile.patch.gz', 'af74ea2e32088030137001ce5cb047c5'),
('http://www.nwchem-sw.org/images/Util_getppn.patch.gz', '8dec8ee198bf5ec4c3a22a6dbf31683c'),
('http://www.nwchem-sw.org/images/Gcc6_macs_optfix.patch.gz', 'a891a2713aac8b0423c8096461c243eb'),
('http://www.nwchem-sw.org/images/Notdir_fc.patch.gz', '2dc997d4ab3719ac7964201adbc6fd79')
]
}
# Iterate over patches
for condition, urls in urls_for_patches.items():
for url, md5 in urls:
patch(url, when=condition, level=0, md5=md5)
def install(self, spec, prefix):
scalapack = spec['scalapack'].libs
lapack = spec['lapack'].libs
blas = spec['blas'].libs
# see http://www.nwchem-sw.org/index.php/Compiling_NWChem
args = []
args.extend([
'NWCHEM_TOP=%s' % self.stage.source_path,
# NWCHEM is picky about FC and CC. They should NOT be full path.
# see http://www.nwchem-sw.org/index.php/Special:AWCforum/sp/id7524
'CC=%s' % os.path.basename(spack_cc),
'FC=%s' % os.path.basename(spack_fc),
'USE_MPI=y',
'MPI_LOC=%s' % spec['mpi'].prefix,
'USE_PYTHONCONFIG=y',
'PYTHONVERSION=%s' % spec['python'].version.up_to(2),
'PYTHONHOME=%s' % spec['python'].home,
'BLASOPT=%s' % ((lapack + blas).ld_flags),
'BLAS_LIB=%s' % blas.ld_flags,
'LAPACK_LIB=%s' % lapack.ld_flags,
'USE_SCALAPACK=y',
'SCALAPACK=%s' % scalapack.ld_flags,
'NWCHEM_MODULES=all python',
'NWCHEM_LONG_PATHS=Y' # by default NWCHEM_TOP is 64 char max
])
# TODO: query if blas/lapack/scalapack uses 64bit Ints
# A flag to distinguish between 32bit and 64bit integers in linear
# algebra (Blas, Lapack, Scalapack)
use32bitLinAlg = True
if use32bitLinAlg:
args.extend([
'USE_64TO32=y',
'BLAS_SIZE=4',
'LAPACK_SIZE=4',
'SCALAPACK_SIZE=4'
])
else:
args.extend([
'BLAS_SIZE=8',
'LAPACK_SIZE=8'
'SCALAPACK_SIZE=8'
])
if sys.platform == 'darwin':
target = 'MACX64'
args.extend([
'CFLAGS_FORGA=-DMPICH_NO_ATTR_TYPE_TAGS'
])
else:
target = 'LINUX64'
args.extend(['NWCHEM_TARGET=%s' % target])
with working_dir('src'):
make('nwchem_config', *args)
if use32bitLinAlg:
make('64_to_32', *args)
make(*args)
# need to install by hand. Follow Ubuntu:
# http://packages.ubuntu.com/trusty/all/nwchem-data/filelist
# http://packages.ubuntu.com/trusty/amd64/nwchem/filelist
share_path = join_path(prefix, 'share', 'nwchem')
mkdirp(prefix.bin)
install_tree('data', share_path)
install_tree(join_path('basis', 'libraries'),
join_path(share_path, 'libraries'))
install_tree(join_path('nwpw', 'libraryps'),
join_path(share_path, 'libraryps'))
b_path = join_path(self.stage.source_path, 'bin',
target, 'nwchem')
chmod = which('chmod')
chmod('+x', b_path)
install(b_path, prefix.bin)
# Finally, make user's life easier by creating a .nwchemrc file
# to point to the required data files.
nwchemrc = """\
nwchem_basis_library {data}/libraries/
nwchem_nwpw_library {data}/libraryps/
ffield amber
amber_1 {data}/amber_s/
amber_2 {data}/amber_q/
amber_3 {data}/amber_x/
amber_4 {data}/amber_u/
spce {data}/solvents/spce.rst
charmm_s {data}/charmm_s/
charmm_x {data}/charmm_x/
""".format(data=share_path)
with open(".nwchemrc", 'w') as f:
f.write(nwchemrc)
install(".nwchemrc", share_path)
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/nwchem/package.py
|
Python
|
lgpl-2.1
| 7,627
|
[
"Amber",
"NWChem"
] |
35446600beca41ba4fe7909a6aa770bd7cc1b44395b5f7c79ed46b940b398899
|
#
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom, Karl MacMillan
# 2014 Fabian Schmitt
# 2009-2014 Christoph Dalitz
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gamera.plugin import *
import _listutilities
class permute_list(PluginFunction):
"""
Permutes the given list (in place) one step.
Returns ``True`` if there are more permutations to go. Returns
``False`` if permutations are done.
Example usage::
>>>from gamera.plugins import listutilities
>>>a = [1,2,3]
>>>while listutilities.permute_list(a):
... print a
...
[2, 1, 3]
[1, 3, 2]
[3, 1, 2]
[2, 3, 1]
[3, 2, 1]
"""
category = "List"
self_type = None
args = Args([Class("list")])
return_type = Int("continuaton")
class all_subsets(PluginFunction):
"""Returns all subsets of size *size* of the given list."""
category = "List"
self_type = None
args = Args([Class("list"), Int("size")])
return_type = Class("subsets")
class median(PluginFunction):
"""Compute the median from a list of values in linear time.
This implementation works both with builtin numeric types like *int* or
*float*, and with user defined types. For user defined type, you
must implement the "less than" operator (`__lt__`), as in the following
example:
.. code:: Python
class P:
x = 0; y = 0
def __init__(self, x, y):
self.x = x; self.y = y
def __lt__(self, other):
return (self.x < other.x)
def __eq__(self, other):
return (self.x == other.x)
a = [P(0,0), P(1,1), P(2,0)]
p = median(a)
When the parameter *inlist* is ``True``, the median is always a list entry,
even for lists of even size. Otherwise, the median is for an
even size list of *int* or *float* values the mean between the two middle
values. So if you need the median for a pivot element, set *inlist* to
``True``.
For user defined types, the returned median is always a list
entry, because arithmetic computations do not make sense in this case.
.. note::
This is *not* the median image filter that replaces each pixel value
with the median of its neighborhood. For this purpose, see the
rank__ plugin.
.. __: morphology.html#rank
"""
category = "List"
pure_python = 1
self_type = None
return_type = Class("m")
args = Args([Class("list"),
Check("inlist", check_box="always from list", default=False)])
author = "Christoph Dalitz"
def __call__(list, inlist=False):
return _listutilities.median_py(list, inlist)
__call__ = staticmethod(__call__)
class median_py(PluginFunction):
"""This is only for Gamera's Python-C++ interface."""
category = None
self_type = None
return_type = Class("m")
args = Args([Class("list"), Check("inlist")])
author = "Christoph Dalitz"
class kernel_density(PluginFunction):
"""Computes the kernel density for *values* at the specified
*x*-positions. Reference: S.J. Sheather: \"Density Estimation.\"
Statistical Science 19 pp. 588-597 (2004).
Arguments:
*values*
Sample values from which the density is to be estimated.
*x*
For each value in *x*, the density at this position is returned
in the returned float vector.
*bw*
Band width, i.e. the parameter *h* in the kernel density estimator.
when set to zero, Silverman's rule of thumb is used, which sets the
bandwidth to 0.9 min{sigma, iqr/1.34} n^(-1/5).
*kernel*
The kernel function that weights the values (0 = rectangular,
1 = triangular, 2 = Gaussian). A Gaussian kernel produces the smoothest
result, but is slightly slower than the other two.
Note that the kernels are normalized to variance one, which means that
the rectangular kernel has support [-sqrt(3), +sqrt(3)], and the
triangular kernel has support [-sqrt(6), sqrt(6)].
"""
category = "List"
self_type = None
return_type = FloatVector()
args = Args([FloatVector("values"), FloatVector("x"), Float("bw", default=0.0), Choice("kernel", choices=["rectangular", "triangular", "gaussian"], default=0)])
author = "Christoph Dalitz and Fabian Schmitt"
def __call__(values, x, bw=0.0, kernel=0):
return _listutilities.kernel_density(values, x, bw, kernel)
__call__ = staticmethod(__call__)
class argmax(PluginFunction):
"""Returns the index of the maximum in a list.
"""
category = "List"
pure_python = 1
self_type = None
return_type = Int()
args = Args([FloatVector("x")])
author = "Christoph Dalitz"
def __call__(x):
mi = 0
mv = x[0]
for i,v in enumerate(x):
if v > mv:
mi = i; mv = v
return mi
__call__ = staticmethod(__call__)
class argmin(PluginFunction):
"""Returns the index of the minimum in a list.
"""
category = "List"
pure_python = 1
self_type = None
return_type = Int()
args = Args([FloatVector("x")])
author = "Christoph Dalitz"
def __call__(x):
mi = 0
mv = x[0]
for i,v in enumerate(x):
if v < mv:
mi = i; mv = v
return mi
__call__ = staticmethod(__call__)
class ListUtilitiesModule(PluginModule):
category = None
cpp_headers=["listutilities.hpp"]
functions = [permute_list, all_subsets, median, median_py,
kernel_density, argmax, argmin]
author = "Michael Droettboom and Karl MacMillan"
url = "http://gamera.sourceforge.net/"
module = ListUtilitiesModule()
permute_list = permute_list()
all_subsets = all_subsets()
median = median()
kernel_density = kernel_density()
argmax = argmax()
argmin = argmin()
|
hsnr-gamera/gamera
|
gamera/plugins/listutilities.py
|
Python
|
gpl-2.0
| 6,385
|
[
"Gaussian"
] |
9f780ef5a32b84f17a53617f17f21f7477c7fadfed90e57237efb17e2758c597
|
# requires scikit bio [http://scikit-bio.org]
from skbio.parse.sequences import parse_fasta
import sys
if __name__ == '__main__':
with open(sys.argv[1], 'U') as in_file:
with open(sys.argv[2], 'w') as out_file:
for label, seq in parse_fasta(in_file):
seq = seq.replace("U", "T")
out_file.write(">%s\n%s\n" % (label, seq))
|
ekopylova/sortmerna
|
rRNA_databases/scripts/edit_U_to_T_rna.py
|
Python
|
gpl-3.0
| 380
|
[
"scikit-bio"
] |
bf2cf5e985ec4551858d3f3aa53f693216e5d757ccaf95cdd96c66fb1db8cc74
|
"""
:mod:`svbjt` -- State-Variable-Based Bipolar Transistor
-------------------------------------------------------
.. module:: svbjt
.. moduleauthor:: Carlos Christoffersen
"""
from bjt import extrinsic_bjt
import numpy as np
import cardoon.circuit as cir
from cardoon.globalVars import const, glVar
import cppaddev as ad
from diode import Junction
from svdiode import SVJunction
class SVBJTi(cir.Element):
"""
State-variable-based Gummel-Poon intrinsic BJT model based
This implementation based mainly on previous implementation in
carrot and some equations from Pspice manual, with the addition of
the state-variable definitions.
Terminal order: 0 Collector, 1 Base, 2 Emitter, (3 Bulk, not included)::
C (0) o----, 4----o E (2)
\ /
\ /
---------
|
o
B (1)
Can be used for NPN or PNP transistors.
Intrinsic Internal Topology
+++++++++++++++++++++++++++
The state variable formulation is achieved by replacing the BE and
BC diodes (Ibf, Ibr) with state-variable based diodes. This
requires two additional variables (nodes) but eliminates large
positive exponentials from the model::
Term : x2
+--------------------------+
| |
/|\ /^\
( | ) gyr v2 ( | ) gyr vbc(x)
\V/ \|/
tref | |
,----+--------------------------+
| | |
| /^\ /|\
| ( | ) gyr v1 ( | ) gyr vbe(x)
--- \|/ \V/
V | |
+--------------------------+
Term : x1
All currents/charges in the model are functions of voltages v3
(x2) and v4 (x1). Note that vbc and vbe are now also functions of
x1, x2.
In addition we may need 2 additional nodes (plus reference) if rb
is not zero: Bi for the internal base node and tib to measure the
internal base current and calculate Rb(ib).
1. If RB == 0::
+----------------+--o 0 (C)
- | |
/^\ |
v2 ( | ) ibc(x2) |
\|/ |
+ | /|\
(B) 1 o---------+ ( | ) ice(x1,x2)
+ | \V/
/|\ |
v1 ( | ) ibe(x1) |
\V/ |
- | |
+----------------+--o 2 (E)
2. If RB != 0::
+----------------+--o 0 (C)
- | |
/^\ |
gyr tib v2 ( | ) ibc(x2) |
\|/ |
,---, + | /|\
(B) 1 o----( --> )----------+ Term : Bi ( | ) ice(x1,x2)
`---` + | \V/
/|\ |
v1 ( | ) ibe(x1) |
\V/ |
- | |
gyr v(1,Bi) +----------------+--o 2 (E)
,---,
+---( <-- ) -----+
| `---` |
tref | | ib/gyr
,--+ |
| | ,---, | Term : ib
| +---( --> )------+
| `---`
---
V gyr ib Rb(ib)
Charge sources are connected between internal nodes defined
above. If xcjc is not 1 but RB is zero, xcjc is ignored.
"""
devType = "svbjt"
paramDict = dict(
cir.Element.tempItem,
type = ('Type (npn or pnp)', '', str, 'npn'),
isat = ('Transport saturation current', 'A', float, 1e-16),
bf = ('Ideal maximum forward beta', '', float, 100.),
nf = ('Forward current emission coefficient', '', float, 1.),
vaf = ('Forward early voltage', 'V', float, 0.),
ikf = ('Forward-beta high current roll-off knee current', 'A',
float, 0.),
ise = ('Base-emitter leakage saturation current', 'A', float, 0.),
ne = ('Base-emitter leakage emission coefficient', '', float, 1.5),
br = ('Ideal maximum reverse beta', '', float, 1.),
nr = ('Reverse current emission coefficient', '', float, 1.),
var = ('Reverse early voltage', 'V', float, 0.),
ikr = ('Corner for reverse-beta high current roll off', 'A', float, 0.),
isc = ('Base collector leakage saturation current', 'A', float, 0.),
nc = ('Base-collector leakage emission coefficient', '', float, 2.),
rb = ('Zero bias base resistance', 'Ohm', float, 0.),
rbm = ('Minimum base resistance', 'Ohm', float, 0.),
irb = ('Current at which rb falls to half of rbm', 'A', float, 0.),
eg = ('Badgap voltage', 'eV', float, 1.11),
cje = ('Base emitter zero bias p-n capacitance', 'F', float, 0.),
vje = ('Base emitter built in potential', 'V', float, 0.75),
mje = ('Base emitter p-n grading factor', '', float, 0.33),
cjc = ('Base collector zero bias p-n capacitance', 'F', float, 0.),
vjc = ('Base collector built in potential', 'V', float, 0.75),
mjc = ('Base collector p-n grading factor', '', float, 0.33),
xcjc = ('Fraction of cbc connected internal to rb', '', float, 1.),
fc = ('Forward bias depletion capacitor coefficient', '', float, 0.5),
tf = ('Ideal forward transit time', 's', float, 0.),
xtf = ('Transit time bias dependence coefficient', '', float, 0.),
vtf = ('Transit time dependency on vbc', 'V', float, 0.),
itf = ('Transit time dependency on ic', 'A', float, 0.),
tr = ('Ideal reverse transit time', 's', float, 0.),
xtb = ('Forward and reverse beta temperature coefficient', '',
float, 0.),
xti = ('IS temperature effect exponent', '', float, 3.),
tnom = ('Nominal temperature', 'C', float, 27.),
area = ('Current multiplier', '', float, 1.)
)
def __init__(self, instanceName):
cir.Element.__init__(self, instanceName)
# Use junctions to model diodes and capacitors
self.jif = SVJunction()
self.jir = SVJunction()
self.jile = Junction()
self.jilc = Junction()
# collector/emitter terminal numbers: may be re-mapped by
# extrinsic device
self._ct = 0
self._et = 2
def process_params(self):
"""
Adjusts internal topology and makes preliminary calculations
according to parameters.
"""
# Define topology first. Add state variable nodes
x2 = self.add_internal_term('x2','s.v.')
x1 = self.add_internal_term('x1','s.v.')
tref = self.add_reference_term()
# Default configuration assumes rb == 0
# ibe, vbe, ibc, vbc, ice
self.csOutPorts = [(1, self._et), (tref, x1), (1, self._ct),
(tref, x2), (self._ct, self._et)]
# Controling voltages are x1, x2
self.controlPorts = [(x1, tref), (x2, tref)]
# qbe, qbc
self.qsOutPorts = [(1, self._et), (1, self._ct)]
# Flag to signal if the extra charge Qbx is needed or not
self._qbx = False
# Default state-variable VCCSs
self.linearVCCS = [((1, self._et), (x1, tref), glVar.gyr),
((1, self._ct), (x2, tref), glVar.gyr)]
if self.rb != 0.:
# rb is not zero: add internal terminals
tBi = self.add_internal_term('Bi', 'V')
tib = self.add_internal_term('ib', '{0} A'.format(glVar.gyr))
# Add Linear VCCS for gyrator(s)
self.linearVCCS = [((tBi, self._et), (x1, tref), glVar.gyr),
((tBi, 0), (x2, tref), glVar.gyr),
((1, tBi), (tib, tref), glVar.gyr),
((tib, tref), (1, tBi), glVar.gyr)]
# ibe, vbe, ibc, vbc, ice, Rb(ib) * ib
self.csOutPorts = [(tBi, self._et), (tref, x1), (tBi, self._ct),
(tref, x2), (self._ct, self._et), (tref, tib)]
# Controling voltages are x1, x2 and gyrator port
self.controlPorts = [(x1, tref), (x2, tref), (tib, tref)]
# qbie, qbic
self.qsOutPorts = [(tBi, self._et), (tBi, self._ct)]
# Now check if Cjbc must be splitted (since rb != 0)
if (self.cjc != 0.) and (self.xcjc < 1.):
# add extra charge source
self.qsOutPorts.append((1, self._ct))
self._qbx = True
# Make sure the guess is consistent
self.vPortGuess = np.zeros(len(self.controlPorts))
# # Try guess in active region
# self.vPortGuess[0] = 100. # x1
# self.vPortGuess[1] = -1. # x2
# if self.rb != 0.:
# self.vPortGuess[2] = 1e-6 / glVar.gyr # ib
# In principle we may not need any charge
keepPorts = [ ]
if self.cje + self.tf != 0.:
# keep qbe
keepPorts.append(self.qsOutPorts[0])
if self.cjc + self.tr != 0.:
# keep qbc, qbx (if any)
if self._qbx:
keepPorts += self.qsOutPorts[-2:]
else:
keepPorts.append(self.qsOutPorts[-1])
self.qsOutPorts = keepPorts
# keep track of how many output variables are needed
self.ncurrents = len(self.csOutPorts)
self.ncharges = len(self.qsOutPorts)
# NPN or PNP
if self.type == 'pnp':
self._typef = -1.
else:
self._typef = 1.
# Calculate common variables
# Absolute nominal temperature
self.Tnomabs = self.tnom + const.T0
self.egapn = self.eg - .000702 * (self.Tnomabs**2) \
/ (self.Tnomabs + 1108.)
# jif produces if, cje
self.jif.process_params(self.isat, self.nf, self.fc, self.cje,
self.vje, self.mje, self.xti, self.eg,
self.Tnomabs)
# jir produces ir, cjc
self.jir.process_params(self.isat, self.nr, self.fc, self.cjc,
self.vjc, self.mjc, self.xti, self.eg,
self.Tnomabs)
if self.ise != 0.:
# jile produces ile
self.jile.process_params(self.ise, self.ne, 0, 0, 0, 0,
self.xti, self.eg, self.Tnomabs)
if self.isc != 0.:
# jilc produces ilc
self.jilc.process_params(self.isc, self.nc, 0, 0, 0, 0,
self.xti, self.eg, self.Tnomabs)
# Constants needed for rb(ib) calculation
if self.irb != 0.:
self._ck1 = 144. / self.irb / self.area /np.pi/np.pi
self._ck2 = np.pi*np.pi * np.sqrt(self.irb * self.area) / 24.
def set_temp_vars(self, temp):
"""
Calculate temperature-dependent variables, given temp in deg. C
"""
# Absolute temperature (note self.temp is in deg. C)
self.Tabs = const.T0 + temp
# Normalized temp
self.tnratio = self.Tabs / self.Tnomabs
tnXTB = pow(self.tnratio, self.xtb)
# Thermal voltage
self.vt = const.k * self.Tabs / const.q
# Temperature-adjusted egap
self.egap_t = self.eg - .000702 * (self.Tabs**2) / (self.Tabs + 1108.)
# set temperature in juctions
self.jif.set_temp_vars(self.Tabs, self.Tnomabs, self.vt,
self.egapn, self.egap_t)
self.jir.set_temp_vars(self.Tabs, self.Tnomabs, self.vt,
self.egapn, self.egap_t)
# Adjust ise and isc (which have different temperature variation)
if self.ise != 0.:
self.jile.set_temp_vars(self.Tabs, self.Tnomabs, self.vt,
self.egapn, self.egap_t)
self.jile._t_is /= tnXTB
if self.isc != 0.:
self.jilc.set_temp_vars(self.Tabs, self.Tnomabs, self.vt,
self.egapn, self.egap_t)
self.jilc._t_is /= tnXTB
# Now some BJT-only variables
self._bf_t = self.bf * tnXTB
self._br_t = self.br * tnXTB
def eval_cqs(self, vPort):
"""
Calculates currents/charges
Input is a vector may be one of the following, depending on
parameter values::
vPort = [xbe, xbc]
vPort = [xbe, xbc, v4_i] (gyrator voltage, irb != 0)
Output also depends on parameter values. Charges only present
if parameters make them different than 0 (i.e., cje, tf, cjc,
etc. are set to nonzero values)::
iVec = [ibe, vbe, ibc, vbc, ice]
iVec = [ibe, vbe, ibc, vbc, ice, gyr*ib*Rb] (rb != 0)
qVec = [qbe, qbc]
qVec = [qbe, qbc, qbx] (rb != 0 and cjc != 1)
"""
# Invert state variables if needed
vPort1 = self._typef * vPort
# Calculate junctions currents and voltages
(ibf, vbe) = self.jif.get_idvd(vPort1[0])
(ibr, vbc) = self.jir.get_idvd(vPort1[1])
if self.ise != 0.:
ile = self.jile.get_id(vbe)
else:
ile = 0.
if self.isc != 0.:
ilc = self.jilc.get_id(vbc)
else:
ilc = 0.
# Kqb
q1m1 = 1.
if self.var != 0.:
q1m1 -= vbe / self.var
if self.vaf != 0.:
q1m1 -= vbc / self.vaf
kqb = 1. / q1m1
# We need extra checking to consider the following
# possibilities to create the AD tape:
#
# 1. both ikf and ikr are zero -> no tape generated
# 2. One of them is nonzero but both ibf and ibr are zero -> want tape
# but only for the nonzero parameter
if self.ikf + self.ikr != 0.:
q2 = 0.
if self.ikf != 0.:
q2 += ibf / self.ikf
if self.ikr != 0.:
q2 += ibr / self.ikr
kqb *= .5 * (1. + np.sqrt(1. + 4. * q2))
# Create output vector [ibe, ibc, ice, ...]
iVec = np.zeros(self.ncurrents, dtype = type(ibf))
qVec = np.zeros(self.ncharges, dtype = type(ibf))
# ibe, vbe
iVec[0] = ibf / self._bf_t + ile
iVec[1] = glVar.gyr * vbe
# ibc, vbc
iVec[2] = ibr / self._br_t + ilc
iVec[3] = glVar.gyr * vbc
# ice
iVec[4] = (ibf - ibr) / kqb
# RB
if self.rb != 0.:
# Using gyrator
# vPort1[2] not defined if rb == 0
# ib has area effect included (removed by _ck1 and _ck2)
ib = vPort1[2] * glVar.gyr
if self.irb != 0.:
ib1 = np.abs(ib)
x = np.sqrt(1. + self._ck1 * ib1) - 1.
x *= self._ck2 / np.sqrt(ib1)
tx = np.tan(x)
c = self.rbm + 3. * (self.rb - self.rbm) \
* (tx - x) / (x * tx * tx)
rb = ad.condassign(ib1, c, self.rb)
else:
rb = self.rbm + (self.rb - self.rbm) / kqb
# Output is gyr * ib * rb. It is divided by area^2 to
# compensate that the whole vector is multiplied by area
# at the end
iVec[5] = glVar.gyr * ib * rb / pow(self.area, 2)
vbcx = ib * rb / self.area + vbc
# Charges -----------------------------------------------
# Note that if tf == 0 and cje == 0, nothing is calculated and
# nothing is assigned to the output vector.
# qbe is the first charge (0)
if self.tf != 0.:
# Effective tf
tfeff = self.tf
if self.vtf != 0.:
x = ibf / (ibf + self.itf)
# safe_exp() not needed since positive vbc grows
# logarithmically
tfeff *= (1. + self.xtf * x*x *
np.exp(vbc /1.44 /self.vtf))
qVec[0] = tfeff * ibf
if self.cje != 0.:
qVec[0] += self.jif.get_qd(vbe)
# qbc
if self._qbx:
if self.tr != 0.:
qVec[-2] = self.tr * ibr
if self.cjc != 0.:
qVec[-2] += self.jir.get_qd(vbc) * self.xcjc
# qbx
qVec[-1] = self.jir.get_qd(vbcx) * (1. - self.xcjc)
else:
if self.tr != 0.:
qVec[-1] = self.tr * ibr
if self.cjc != 0.:
qVec[-1] += self.jir.get_qd(vbc)
# Consider area effect and invert currents if needed
iVec *= self.area * self._typef
qVec *= self.area * self._typef
return (iVec, qVec)
def power(self, vPort, currV):
"""
Calculate total instantaneous power
Input: control voltages as in eval_cqs() and currents from
returned by eval_cqs()
"""
# vce = vbe - vbc
gyrvce = currV[1] - currV[3]
if self.rb != 0.:
# currV[5] = ib * Rb * gyr
# vPort[2] = ib / gyr
pRb = currV[5] * vPort[2]
else:
pRb = 0.
# pout = ibe * vbie + ibc * vbic + vce * ice + pRb
pout = (currV[0] * currV[1] + currV[2] * currV[3]
+ currV[4] * gyrvce) / glVar.gyr + pRb
return pout
def get_OP(self, vPort):
"""
Calculates operating point information
Input: same as eval_cqs
Output: dictionary with OP variables
For now it is quite incomplete
"""
# First we need the Jacobian
(outV, jac) = self.eval_and_deriv(vPort)
power = self.power(vPort, outV)
# calculate gm, etc. in terms od jac for state-variable
# formulation
opDict = dict(
VBE = outV[1] / glVar.gyr,
VCE = (outV[1] - outV[3]) / glVar.gyr,
IB = outV[0] + outV[2],
IC = outV[4] - outV[2],
IE = - outV[4] - outV[0],
Temp = self.temp,
Power = power,
)
return opDict
def get_noise(self, f):
"""
Return noise spectral density at frequency f
Requires a previous call to get_OP()
Not implemented yet
"""
return None
#--------------------------------------------------------------------------
Device = extrinsic_bjt(SVBJTi)
|
cechrist/cardoon
|
cardoon/devices/svbjt.py
|
Python
|
gpl-3.0
| 19,632
|
[
"xTB"
] |
f9ce89c06b97cfaae8fe194718f94fac01f83d998bdbe53a0ca475d143dc22d6
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012-2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""XLS exporter"""
import datetime
import tempfile
from kiwi.currency import currency
import xlwt
from stoqlib.exporters.xlsutils import (get_date_format,
get_number_format,
write_app_hyperlink,
write_app_logo)
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class XLSExporter(object):
def __init__(self, name=None):
self._current_column = 1
self._n_columns = -1
self._column_styles = None
self._headers = None
self._wb = xlwt.Workbook(encoding='utf8')
if not name:
name = _('Stoq sheet')
self._ws = self._wb.add_sheet(name)
self._setup()
#
# Private
#
def _setup(self):
self._header_font = xlwt.Font()
self._header_font.bold = True
self._header_style = xlwt.XFStyle()
self._header_style.font = self._header_font
self._style_date = xlwt.XFStyle()
self._style_date.num_format_str = get_date_format()
self._style_general = xlwt.XFStyle()
self._style_general.num_format_str = 'general'
self._style_number = xlwt.XFStyle()
self._style_number.num_format_str = get_number_format()
def _add_row(self, columns, style=None):
if len(columns) - 1 > self._n_columns:
raise ValueError(columns, self._n_columns)
for i, column in enumerate(columns):
self._write_one(i, column, style=style)
self._current_column += 1
def _write_one(self, i, data, style=None):
if style is None:
style = self._column_styles[i]
if data is None:
data = ''
else:
if isinstance(data, datetime.date):
data = data.strftime('%Y-%m-%d')
elif isinstance(data, str):
data = unicode(data, 'utf-8')
self._ws.write(self._current_column, i, data, style)
#
# Public API
#
def set_column_headers(self, headers):
self._headers = headers
def set_column_types(self, column_types):
css = []
for i, column_type in enumerate(column_types):
if column_type in (datetime.datetime, datetime.date):
style = self._style_date
elif column_type in [int, long, float, currency]:
style = self._style_number
else:
style = self._style_general
css.append(style)
self._column_styles = css
self._n_columns = len(column_types)
def add_cells(self, cells):
write_app_logo(self._ws)
write_app_hyperlink(self._ws, 0)
if self._headers:
self._add_row(self._headers, style=self._header_style)
for y, line in enumerate(cells):
self._add_row(line)
def save(self, prefix=''):
if prefix:
prefix = 'Stoq-%s-' % (prefix, )
else:
prefix = 'Stoq-'
temporary = tempfile.NamedTemporaryFile(
prefix=prefix,
suffix='.xls', delete=False)
self._wb.save(temporary.name)
return temporary
def add_from_object_list(self, objectlist, data=None):
columns = objectlist.get_visible_columns()
self.set_column_types([
c.data_type for c in columns])
self.set_column_headers([
getattr(c, 'long_title', None) or c.title for c in columns])
self.add_cells(objectlist.get_cell_contents(data))
|
tiagocardosos/stoq
|
stoqlib/exporters/xlsexporter.py
|
Python
|
gpl-2.0
| 4,485
|
[
"VisIt"
] |
de580503c26f43988d308b6bce2993139b5f6868ccb40032a0cc0e2661dddac9
|
#*************************************************************************
# Copyright (C) 2015 by Arash Bakhtiari
# You may not use this file except in compliance with the License.
# You obtain a copy of the License in the LICENSE file.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#*************************************************************************
###############################################################################
# IMPORT LOCAL LIBRARIES
###############################################################################
from math import *
from visit import *
def set_view(theta=7*pi/12):
phi = pi/4
#set the view attributes
v=GetView3D()
v.imageZoom = 1.0
v.viewNormal = (cos(theta), cos(phi)*sin(theta), sin(phi)*sin(theta))
v.viewUp = (0, 0, 1)
SetView3D(v)
def save_images(image_dir):
#set annotation attributes
AnnotationAtts = AnnotationAttributes()
AnnotationAtts.userInfoFlag = 0
AnnotationAtts.databaseInfoFlag = 0
AnnotationAtts.timeInfoFlag = 0
AnnotationAtts.axes3D.visible = 0
AnnotationAtts.axes3D.triadFlag = 0
AnnotationAtts.axes3D.bboxFlag = 0
SetAnnotationAttributes(AnnotationAtts)
#Set the window attributes
SaveWindowAtts = SaveWindowAttributes()
SaveWindowAtts.outputToCurrentDirectory = 0
SaveWindowAtts.outputDirectory = image_dir
SaveWindowAtts.fileName = "image_"
SaveWindowAtts.quality = 100
SaveWindowAtts.format = SaveWindowAtts.PNG # BMP, CURVE, JPEG, OBJ, PNG, POSTSCRIPT, POVRAY, PPM, RGB, STL, TIFF, ULTRA, VTK, PLY
SaveWindowAtts.width = 2048
SaveWindowAtts.height = 2048
SaveWindowAtts.screenCapture = 0
#Traverse through states and save images
nts=TimeSliderGetNStates()
for ts in range(0,nts):
TimeSliderSetState(ts)
SetSaveWindowAttributes(SaveWindowAtts)
SaveWindow()
def change_view_and_save(image_dir, theta_i=pi/4, theta_f=7*pi/12):
phi = pi/4
d_theta=0.01
r=int(floor((theta_f-theta_i)/d_theta))+1
v=GetView3D()
theta=theta_i
for i in range(r):
#set the view attributes
v.imageZoom = 0.8+i*0.2/r
v.viewNormal = (cos(theta), cos(phi)*sin(theta), sin(phi)*sin(theta))
v.viewUp = (0, 0, 1)
SetView3D(v)
theta=theta+d_theta
save_images(image_dir)
theta=theta_f
v.imageZoom = 1.0
v.viewNormal = (cos(theta), cos(phi)*sin(theta), sin(phi)*sin(theta))
v.viewUp = (0, 0, 1)
SetView3D(v)
theta=theta+d_theta
save_images(image_dir)
|
arashb/tbslas
|
scripts/vis_plot_utils.py
|
Python
|
bsd-3-clause
| 2,818
|
[
"VTK",
"VisIt"
] |
405398af7facd62f1baa04e54df2eb09a89c77ee1878ddd617baf8f6109983c6
|
#!/usr/bin/env python
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer()
import os, os.path
fields_36912 = ""
fields_0 = "129.128/130.128/131.128/132.128/133.128/135.128/155.128"
for year in range(1979, 2015):
for f in fields_0.split('/'):
fn = "{0}.{1}.an.nc".format(year,f)
print(fn)
if not os.path.isfile(fn):
server.retrieve({
"class": "ei",
"dataset": "interim",
"date": "{0}0101/{0}0201/{0}0301/{0}0401/{0}0501/{0}0601/{0}0701/{0}0801/{0}0901/{0}1001/{0}1101/{0}1201".format(year),
"grid": "0.75/0.75",
"levelist": "1/2/3/5/7/10/20/30/50/70/100/125/150/175/200/225/250/300/350/400/450/500/550/600/650/700/750/775/800/825/850/875/900/925/950/975/1000",
"levtype": "pl",
"param": f,
"step": "0",
"stream": "mnth",
"target": fn,
"time": "00/06/12/18",
"type": "an",
"format": "netcdf"
})
print("All done")
|
r-shekhar/ReanalysisUnifier
|
eraI_monthly/fetch_MSE.py
|
Python
|
mit
| 1,147
|
[
"NetCDF"
] |
704b2bdb3c8d6430ec2386cb06adc53b5289b191f6e438c7d57917e808bf2e28
|
#!/usr/bin/env python
# encoding: utf-8
#
# bpt.py
#
# Created by José Sánchez-Gallego on 19 Jan 2017.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
from marvin.core.exceptions import MarvinDeprecationWarning, MarvinError
def get_snr(snr_min, emission_line, default=3):
"""Convenience function to get the minimum SNR for a certain emission line.
If ``snr_min`` is a dictionary and ``emision_line`` is one of the keys,
returns that value. If the emission line is not included in the dictionary,
returns ``default``. If ``snr_min`` is a float, returns that value
regardless of the ``emission_line``.
"""
if not isinstance(snr_min, dict):
return snr_min
if emission_line in snr_min:
return snr_min[emission_line]
else:
return default
def get_masked(maps, emline, snr=1):
"""Convenience function to get masked arrays without negative values."""
gflux = maps['emline_gflux_' + emline]
gflux_masked = gflux.masked
# Masks spaxels with flux <= 0
gflux_masked.mask |= (gflux_masked.data <= 0)
# Masks all spaxels that don't reach the cutoff SNR
gflux_masked.mask |= gflux.snr < snr
gflux_masked.mask |= gflux.ivar == 0
return gflux_masked
def _get_kewley06_axes(use_oi=True):
"""Creates custom axes for displaying Kewley06 plots."""
fig = plt.figure(1, (8.5, 10))
fig.clf()
plt.subplots_adjust(top=0.99, bottom=0.08, hspace=0.01)
# The axes for the three classification plots
grid_bpt = ImageGrid(fig, 211,
nrows_ncols=(1, 3) if use_oi else (1, 2),
direction='row',
axes_pad=0.1,
add_all=True,
label_mode='L',
share_all=False)
# The axes for the galaxy display
gal_bpt = ImageGrid(fig, 212, nrows_ncols=(1, 1))
# Plots the classification boundary lines
xx_sf_nii = np.linspace(-1.281, 0.045, int(1e4))
xx_sf_sii = np.linspace(-2, 0.315, int(1e4))
xx_sf_oi = np.linspace(-2.5, -0.7, int(1e4))
xx_comp_nii = np.linspace(-2, 0.4, int(1e4))
xx_agn_sii = np.array([-0.308, 1.0])
xx_agn_oi = np.array([-1.12, 0.5])
grid_bpt[0].plot(xx_sf_nii, kewley_sf_nii(xx_sf_nii), 'k--', zorder=90)
grid_bpt[1].plot(xx_sf_sii, kewley_sf_sii(xx_sf_sii), 'r-', zorder=90)
if use_oi:
grid_bpt[2].plot(xx_sf_oi, kewley_sf_oi(xx_sf_oi), 'r-', zorder=90)
grid_bpt[0].plot(xx_comp_nii, kewley_comp_nii(xx_comp_nii), 'r-', zorder=90)
grid_bpt[1].plot(xx_agn_sii, kewley_agn_sii(xx_agn_sii), 'b-', zorder=80)
if use_oi:
grid_bpt[2].plot(xx_agn_oi, kewley_agn_oi(xx_agn_oi), 'b-', zorder=80)
# Adds captions
grid_bpt[0].text(-1, -0.5, 'SF', ha='center', fontsize=12, zorder=100, color='c')
grid_bpt[0].text(0.5, 0.5, 'AGN', ha='left', fontsize=12, zorder=100)
grid_bpt[0].text(-0.08, -1.2, 'Comp', ha='left', fontsize=12, zorder=100, color='g')
grid_bpt[1].text(-1.2, -0.5, 'SF', ha='center', fontsize=12, zorder=100)
grid_bpt[1].text(-1, 1.2, 'Seyfert', ha='left', fontsize=12, zorder=100, color='r')
grid_bpt[1].text(0.3, -1, 'LINER', ha='left', fontsize=12, zorder=100, color='m')
if use_oi:
grid_bpt[2].text(-2, -0.5, 'SF', ha='center', fontsize=12, zorder=100)
grid_bpt[2].text(-1.5, 1, 'Seyfert', ha='left', fontsize=12, zorder=100)
grid_bpt[2].text(-0.1, -1, 'LINER', ha='right', fontsize=12, zorder=100)
# Sets the ticks, ticklabels, and other details
xtick_limits = ((-2, 1), (-1.5, 1), (-2.5, 0.5))
axes = [0, 1, 2] if use_oi else [0, 1]
for ii in axes:
grid_bpt[ii].get_xaxis().set_tick_params(direction='in')
grid_bpt[ii].get_yaxis().set_tick_params(direction='in')
grid_bpt[ii].set_xticks(np.arange(xtick_limits[ii][0], xtick_limits[ii][1] + 0.5, 0.5))
grid_bpt[ii].set_xticks(np.arange(xtick_limits[ii][0],
xtick_limits[ii][1] + 0.1, 0.1), minor=True)
grid_bpt[ii].set_yticks(np.arange(-1.5, 2.0, 0.5))
grid_bpt[ii].set_yticks(np.arange(-1.5, 1.6, 0.1), minor=True)
grid_bpt[ii].grid(which='minor', alpha=0.2)
grid_bpt[ii].grid(which='major', alpha=0.5)
grid_bpt[ii].set_xlim(xtick_limits[ii][0], xtick_limits[ii][1])
grid_bpt[ii].set_ylim(-1.5, 1.6)
if use_oi:
grid_bpt[ii].set_ylim(-1.5, 1.8)
grid_bpt[ii].spines['top'].set_visible(True)
if ii in [0, 1]:
if not use_oi and ii == 1:
continue
grid_bpt[ii].get_xticklabels()[-1].set_visible(False)
grid_bpt[0].set_ylabel(r'log([OIII]/H$\beta$)')
grid_bpt[0].set_xlabel(r'log([NII]/H$\alpha$)')
grid_bpt[1].set_xlabel(r'log([SII]/H$\alpha$)')
if use_oi:
grid_bpt[2].set_xlabel(r'log([OI]/H$\alpha$)')
gal_bpt[0].grid(False)
return fig, grid_bpt, gal_bpt[0]
def kewley_sf_nii(log_nii_ha):
"""Star forming classification line for log([NII]/Ha)."""
return 0.61 / (log_nii_ha - 0.05) + 1.3
def kewley_sf_sii(log_sii_ha):
"""Star forming classification line for log([SII]/Ha)."""
return 0.72 / (log_sii_ha - 0.32) + 1.3
def kewley_sf_oi(log_oi_ha):
"""Star forming classification line for log([OI]/Ha)."""
return 0.73 / (log_oi_ha + 0.59) + 1.33
def kewley_comp_nii(log_nii_ha):
"""Composite classification line for log([NII]/Ha)."""
return 0.61 / (log_nii_ha - 0.47) + 1.19
def kewley_agn_sii(log_sii_ha):
"""Seyfert/LINER classification line for log([SII]/Ha)."""
return 1.89 * log_sii_ha + 0.76
def kewley_agn_oi(log_oi_ha):
"""Seyfert/LINER classification line for log([OI]/Ha)."""
return 1.18 * log_oi_ha + 1.30
def bpt_kewley06(maps, snr_min=3, return_figure=True, use_oi=True, **kwargs):
"""Returns a classification of ionisation regions, as defined in Kewley+06.
Makes use of the classification system defined by
`Kewley et al. (2006) <https://ui.adsabs.harvard.edu/#abs/2006MNRAS.372..961K/abstract>`_
to return classification masks for different ionisation mechanisms. If ``return_figure=True``,
produces and returns a matplotlib figure with the classification plots (based on
Kewley+06 Fig. 4) and the 2D spatial distribution of classified spaxels (i.e., a map of the
galaxy in which each spaxel is colour-coded based on its emission mechanism).
While it is possible to call this function directly, its normal use will be via the
:func:`~marvin.tools.maps.Maps.get_bpt` method.
Parameters:
maps (a Marvin :class:`~marvin.tools.maps.Maps` object)
The Marvin Maps object that contains the emission line maps to be used to determine
the BPT classification.
snr_min (float or dict):
The signal-to-noise cutoff value for the emission lines used to generate the BPT
diagram. If ``snr_min`` is a single value, that signal-to-noise will be used for all
the lines. Alternatively, a dictionary of signal-to-noise values, with the
emission line channels as keys, can be used.
E.g., ``snr_min={'ha': 5, 'nii': 3, 'oi': 1}``. If some values are not provided,
they will default to ``SNR>=3``.
return_figure (bool):
If ``True``, it also returns the matplotlib figure_ of the BPT diagram plot,
which can be used to modify the style of the plot.
use_oi (bool):
If ``True``, uses the OI diagnostic diagram for spaxel classification.
Returns:
bpt_return:
``bpt_kewley06`` returns a dictionary of dictionaries of classification masks.
The classification masks (not to be confused with bitmasks) are boolean arrays with the
same shape as the Maps or Cube (without the spectral dimension) that can be used
to select spaxels belonging to a certain excitation process (e.g., star forming).
The returned dictionary has the following keys: ``'sf'`` (star forming), ``'comp'``
(composite), ``'agn'``, ``'seyfert'``, ``'liner'``, ``'invalid'``
(spaxels that are masked out at the DAP level), and ``'ambiguous'`` (good spaxels that
do not fall in any classification or fall in more than one). Each key provides access
to a new dictionary with keys ``'nii'`` (for the constraints in the diagram NII/Halpha
vs OIII/Hbeta), ``'sii'`` (SII/Halpha vs OIII/Hbeta), ``'oi'`` (OI/Halpha vs
OIII/Hbeta; only if ``use_oi=True``), and ``'global'``, which applies all the previous
constraints at once. The ``'ambiguous'`` mask only contains the ``'global'``
subclassification, while the ``'comp'`` dictionary only contains ``'nii'``.
``'nii'`` is not available for ``'seyfert'`` and ``'liner'``. All the global masks are
unique (a spaxel can only belong to one of them) with the exception of ``'agn'``, which
intersects with ``'seyfert'`` and ``'liner'``. Additionally, if ``return_figure=True``,
``bpt_kewley06`` will return a tuple, the first elemnt of which is the dictionary of
classification masks, and the second the matplotlib figure for the generated plot.
Example:
>>> maps_8485_1901 = Maps(plateifu='8485-1901')
>>> bpt_masks = bpt_kewley06(maps_8485_1901)
Gets the global mask for star forming spaxels
>>> sf = bpt_masks['sf']['global']
Gets the seyfert mask based only on the SII/Halpha vs OIII/Hbeta diagnostics
>>> seyfert_sii = bpt_masks['seyfert']['sii']
"""
if 'snr' in kwargs:
warnings.warn('snr is deprecated. Use snr_min instead. '
'snr will be removed in a future version of marvin',
MarvinDeprecationWarning)
snr_min = kwargs.pop('snr')
elif len(kwargs.keys()) > 0:
raise MarvinError('unknown keyword {0}'.format(list(kwargs.keys())[0]))
# Gets the necessary emission line maps
oiii = get_masked(maps, 'oiii_5008', snr=get_snr(snr_min, 'oiii'))
nii = get_masked(maps, 'nii_6585', snr=get_snr(snr_min, 'nii'))
ha = get_masked(maps, 'ha_6564', snr=get_snr(snr_min, 'ha'))
hb = get_masked(maps, 'hb_4862', snr=get_snr(snr_min, 'hb'))
sii = get_masked(maps, 'sii_6718', snr=get_snr(snr_min, 'sii'))
oi = get_masked(maps, 'oi_6302', snr=get_snr(snr_min, 'oi'))
# Calculate masked logarithms
log_oiii_hb = np.ma.log10(oiii / hb)
log_nii_ha = np.ma.log10(nii / ha)
log_sii_ha = np.ma.log10(sii / ha)
log_oi_ha = np.ma.log10(oi / ha)
# Calculates masks for each emission mechanism according to the paper boundaries.
# The log_nii_ha < 0.05, log_sii_ha < 0.32, etc are necessary because the classification lines
# diverge and we only want the region before the asymptota.
sf_mask_nii = ((log_oiii_hb < kewley_sf_nii(log_nii_ha)) & (log_nii_ha < 0.05)).filled(False)
sf_mask_sii = ((log_oiii_hb < kewley_sf_sii(log_sii_ha)) & (log_sii_ha < 0.32)).filled(False)
sf_mask_oi = ((log_oiii_hb < kewley_sf_oi(log_oi_ha)) & (log_oi_ha < -0.59)).filled(False)
sf_mask = sf_mask_nii & sf_mask_sii & sf_mask_oi if use_oi else sf_mask_nii & sf_mask_sii
comp_mask = ((log_oiii_hb > kewley_sf_nii(log_nii_ha)) & (log_nii_ha < 0.05)).filled(False) & \
((log_oiii_hb < kewley_comp_nii(log_nii_ha)) & (log_nii_ha < 0.465)).filled(False)
comp_mask &= (sf_mask_sii & sf_mask_oi) if use_oi else sf_mask_sii
agn_mask_nii = ((log_oiii_hb > kewley_comp_nii(log_nii_ha)) |
(log_nii_ha > 0.465)).filled(False)
agn_mask_sii = ((log_oiii_hb > kewley_sf_sii(log_sii_ha)) |
(log_sii_ha > 0.32)).filled(False)
agn_mask_oi = ((log_oiii_hb > kewley_sf_oi(log_oi_ha)) |
(log_oi_ha > -0.59)).filled(False)
agn_mask = agn_mask_nii & agn_mask_sii & agn_mask_oi if use_oi else agn_mask_nii & agn_mask_sii
seyfert_mask_sii = agn_mask & (kewley_agn_sii(log_sii_ha) < log_oiii_hb).filled(False)
seyfert_mask_oi = agn_mask & (kewley_agn_oi(log_oi_ha) < log_oiii_hb).filled(False)
seyfert_mask = seyfert_mask_sii & seyfert_mask_oi if use_oi else seyfert_mask_sii
liner_mask_sii = agn_mask & (kewley_agn_sii(log_sii_ha) > log_oiii_hb).filled(False)
liner_mask_oi = agn_mask & (kewley_agn_oi(log_oi_ha) > log_oiii_hb).filled(False)
liner_mask = liner_mask_sii & liner_mask_oi if use_oi else liner_mask_sii
# The invalid mask is the combination of spaxels that are invalid in all of the emission maps
invalid_mask_nii = ha.mask | oiii.mask | nii.mask | hb.mask
invalid_mask_sii = ha.mask | oiii.mask | sii.mask | hb.mask
invalid_mask_oi = ha.mask | oiii.mask | oi.mask | hb.mask
invalid_mask = ha.mask | oiii.mask | nii.mask | hb.mask | sii.mask
if use_oi:
invalid_mask |= oi.mask
# The ambiguous mask are spaxels that are not invalid but don't fall into any of the
# emission mechanism classifications.
ambiguous_mask = ~(sf_mask | comp_mask | seyfert_mask | liner_mask) & ~invalid_mask
sf_classification = {'global': sf_mask,
'nii': sf_mask_nii,
'sii': sf_mask_sii}
comp_classification = {'global': comp_mask,
'nii': comp_mask}
agn_classification = {'global': agn_mask,
'nii': agn_mask_nii,
'sii': agn_mask_sii}
seyfert_classification = {'global': seyfert_mask,
'sii': seyfert_mask_sii}
liner_classification = {'global': liner_mask,
'sii': liner_mask_sii}
invalid_classification = {'global': invalid_mask,
'nii': invalid_mask_nii,
'sii': invalid_mask_sii}
ambiguous_classification = {'global': ambiguous_mask}
if use_oi:
sf_classification['oi'] = sf_mask_oi
agn_classification['oi'] = agn_mask_oi
seyfert_classification['oi'] = seyfert_mask_oi
liner_classification['oi'] = liner_mask_oi
invalid_classification['oi'] = invalid_mask_oi
bpt_return_classification = {'sf': sf_classification,
'comp': comp_classification,
'agn': agn_classification,
'seyfert': seyfert_classification,
'liner': liner_classification,
'invalid': invalid_classification,
'ambiguous': ambiguous_classification}
if not return_figure:
return bpt_return_classification
# Does all the plotting
with plt.style.context('seaborn-darkgrid'):
fig, grid_bpt, gal_bpt = _get_kewley06_axes(use_oi=use_oi)
sf_kwargs = {'marker': 's', 's': 12, 'color': 'c', 'zorder': 50, 'alpha': 0.7, 'lw': 0.0}
sf_handler = grid_bpt[0].scatter(log_nii_ha[sf_mask], log_oiii_hb[sf_mask], **sf_kwargs)
grid_bpt[1].scatter(log_sii_ha[sf_mask], log_oiii_hb[sf_mask], **sf_kwargs)
comp_kwargs = {'marker': 's', 's': 12, 'color': 'g', 'zorder': 45, 'alpha': 0.7, 'lw': 0.0}
comp_handler = grid_bpt[0].scatter(log_nii_ha[comp_mask], log_oiii_hb[comp_mask],
**comp_kwargs)
grid_bpt[1].scatter(log_sii_ha[comp_mask], log_oiii_hb[comp_mask], **comp_kwargs)
seyfert_kwargs = {'marker': 's', 's': 12, 'color': 'r', 'zorder': 40, 'alpha': 0.7, 'lw': 0.0}
seyfert_handler = grid_bpt[0].scatter(log_nii_ha[seyfert_mask], log_oiii_hb[seyfert_mask],
**seyfert_kwargs)
grid_bpt[1].scatter(log_sii_ha[seyfert_mask], log_oiii_hb[seyfert_mask], **seyfert_kwargs)
liner_kwargs = {'marker': 's', 's': 12, 'color': 'm', 'zorder': 35, 'alpha': 0.7, 'lw': 0.0}
liner_handler = grid_bpt[0].scatter(log_nii_ha[liner_mask], log_oiii_hb[liner_mask],
**liner_kwargs)
grid_bpt[1].scatter(log_sii_ha[liner_mask], log_oiii_hb[liner_mask], **liner_kwargs)
amb_kwargs = {'marker': 's', 's': 12, 'color': '0.6', 'zorder': 30, 'alpha': 0.7, 'lw': 0.0}
amb_handler = grid_bpt[0].scatter(log_nii_ha[ambiguous_mask], log_oiii_hb[ambiguous_mask],
**amb_kwargs)
grid_bpt[1].scatter(log_sii_ha[ambiguous_mask], log_oiii_hb[ambiguous_mask], **amb_kwargs)
if use_oi:
grid_bpt[2].scatter(log_oi_ha[sf_mask], log_oiii_hb[sf_mask], **sf_kwargs)
grid_bpt[2].scatter(log_oi_ha[comp_mask], log_oiii_hb[comp_mask], **comp_kwargs)
grid_bpt[2].scatter(log_oi_ha[seyfert_mask], log_oiii_hb[seyfert_mask], **seyfert_kwargs)
grid_bpt[2].scatter(log_oi_ha[liner_mask], log_oiii_hb[liner_mask], **liner_kwargs)
grid_bpt[2].scatter(log_oi_ha[ambiguous_mask], log_oiii_hb[ambiguous_mask], **amb_kwargs)
# Creates the legend
grid_bpt[0].legend([sf_handler, comp_handler, seyfert_handler, liner_handler, amb_handler],
['Star-forming', 'Composite', 'Seyfert', 'LINER', 'Ambiguous'], ncol=2,
loc='upper left', frameon=True, labelspacing=0.1, columnspacing=0.1,
handletextpad=0.1, fontsize=9)
# Creates a RGB image of the galaxy, and sets the colours of the spaxels to match the
# classification masks
gal_rgb = np.zeros((ha.shape[0], ha.shape[1], 3), dtype=np.uint8)
for ii in [1, 2]: # Cyan
gal_rgb[:, :, ii][sf_mask] = 255
gal_rgb[:, :, 1][comp_mask] = 128 # Green
gal_rgb[:, :, 0][seyfert_mask] = 255 # Red
# Magenta
gal_rgb[:, :, 0][liner_mask] = 255
gal_rgb[:, :, 2][liner_mask] = 255
for ii in [0, 1, 2]:
gal_rgb[:, :, ii][invalid_mask] = 255 # White
gal_rgb[:, :, ii][ambiguous_mask] = 169 # Grey
# Shows the image.
gal_bpt.imshow(gal_rgb, origin='lower', aspect='auto', interpolation='nearest')
gal_bpt.set_xlim(0, ha.shape[1] - 1)
gal_bpt.set_ylim(0, ha.shape[0] - 1)
gal_bpt.set_xlabel('x [spaxels]')
gal_bpt.set_ylabel('y [spaxels]')
return (bpt_return_classification, fig)
|
bretthandrews/marvin
|
python/marvin/utils/dap/bpt.py
|
Python
|
bsd-3-clause
| 18,520
|
[
"Galaxy"
] |
ac82b9384eaac70be2d6cb4864c3bbaadcf3bff0c4d9da89ccdc1a38f9631d41
|
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import threading
from selenium import webdriver
from lastuser_core.models import db
from lastuserapp import app
server_name = app.config.get('SERVER_NAME') or 'localhost:7001'
base_url = 'http://%s' % server_name
host, port = server_name.split(':')
port = int(port) if port else 7001
def before_all(context):
context.server = make_server(host, port, app)
context.thread = threading.Thread(target=context.server.serve_forever)
context.thread.start()
context.browser = webdriver.PhantomJS()
context.browser.visit = lambda url: context.browser.get(base_url + url)
def after_all(context):
# Explicitly quits the browser, otherwise it won't once tests are done
context.server.shutdown()
context.thread.join()
context.browser.quit()
def after_step(context, step):
context.browser.delete_all_cookies()
def before_feature(context, feature):
db.create_all()
def after_feature(context, feature):
db.session.commit()
db.drop_all()
|
hasgeek/lastuser
|
features/environment.py
|
Python
|
bsd-2-clause
| 1,055
|
[
"VisIt"
] |
de8c20d7d15c70791b0316fe0d41e3b913fc89081312d7c51284dc307c56bae9
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
These plugins modify the behavior of py.test and are meant to be imported
into conftest.py in the root directory.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import __future__
from ..extern import six
import ast
import datetime
import io
import locale
import math
import os
import re
import sys
import types
from collections import OrderedDict
import pytest
from ..config.paths import set_temp_config, set_temp_cache
from .helper import treat_deprecations_as_exceptions, ignore_warnings
from .helper import enable_deprecations_as_exceptions # pylint: disable=W0611
from ..utils.argparse import writeable_directory
from ..utils.introspection import resolve_name
try:
import importlib.machinery as importlib_machinery
except ImportError: # Python 2.7
importlib_machinery = None
pytest_plugins = ('astropy.tests.pytest_doctestplus',
'astropy.tests.pytest_openfiles',
'astropy.tests.pytest_repeat',
'astropy.tests.pytest_remotedata')
# these pytest hooks allow us to mark tests and run the marked tests with
# specific command line options.
def pytest_addoption(parser):
parser.addoption("--config-dir", nargs='?', type=writeable_directory,
help="specify directory for storing and retrieving the "
"Astropy configuration during tests (default is "
"to use a temporary directory created by the test "
"runner); be aware that using an Astropy config "
"file other than the default can cause some tests "
"to fail unexpectedly")
parser.addoption("--cache-dir", nargs='?', type=writeable_directory,
help="specify directory for storing and retrieving the "
"Astropy cache during tests (default is "
"to use a temporary directory created by the test "
"runner)")
parser.addini("config_dir",
"specify directory for storing and retrieving the "
"Astropy configuration during tests (default is "
"to use a temporary directory created by the test "
"runner); be aware that using an Astropy config "
"file other than the default can cause some tests "
"to fail unexpectedly", default=None)
parser.addini("cache_dir",
"specify directory for storing and retrieving the "
"Astropy cache during tests (default is "
"to use a temporary directory created by the test "
"runner)", default=None)
def pytest_configure(config):
treat_deprecations_as_exceptions()
def pytest_runtest_setup(item):
config_dir = item.config.getini('config_dir')
cache_dir = item.config.getini('cache_dir')
# Command-line options can override, however
config_dir = item.config.getoption('config_dir') or config_dir
cache_dir = item.config.getoption('cache_dir') or cache_dir
# We can't really use context managers directly in py.test (although
# py.test 2.7 adds the capability), so this may look a bit hacky
if config_dir:
item.set_temp_config = set_temp_config(config_dir)
item.set_temp_config.__enter__()
if cache_dir:
item.set_temp_cache = set_temp_cache(cache_dir)
item.set_temp_cache.__enter__()
def pytest_runtest_teardown(item, nextitem):
if hasattr(item, 'set_temp_cache'):
item.set_temp_cache.__exit__()
if hasattr(item, 'set_temp_config'):
item.set_temp_config.__exit__()
PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Scipy', 'scipy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py'),
('Pandas', 'pandas')])
# This always returns with Astropy's version
from .. import __version__
TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
def pytest_report_header(config):
try:
stdoutencoding = sys.stdout.encoding or 'ascii'
except AttributeError:
stdoutencoding = 'ascii'
if six.PY2:
args = [x.decode('utf-8') for x in config.args]
else:
args = config.args
# TESTED_VERSIONS can contain the affiliated package version, too
if len(TESTED_VERSIONS) > 1:
for pkg, version in TESTED_VERSIONS.items():
if pkg != 'Astropy':
s = "\nRunning tests with {0} version {1}.\n".format(
pkg, version)
else:
s = "\nRunning tests with Astropy version {0}.\n".format(
TESTED_VERSIONS['Astropy'])
# Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from
# each directory argument
if hasattr(config, 'rootdir'):
rootdir = str(config.rootdir)
if not rootdir.endswith(os.sep):
rootdir += os.sep
dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg
for arg in args]
else:
dirs = args
s += "Running tests in {0}.\n\n".format(" ".join(dirs))
s += "Date: {0}\n\n".format(datetime.datetime.now().isoformat()[:19])
from platform import platform
plat = platform()
if isinstance(plat, bytes):
plat = plat.decode(stdoutencoding, 'replace')
s += "Platform: {0}\n\n".format(plat)
s += "Executable: {0}\n\n".format(sys.executable)
s += "Full Python Version: \n{0}\n\n".format(sys.version)
s += "encodings: sys: {0}, locale: {1}, filesystem: {2}".format(
sys.getdefaultencoding(),
locale.getpreferredencoding(),
sys.getfilesystemencoding())
if sys.version_info < (3, 3, 0):
s += ", unicode bits: {0}".format(
int(math.log(sys.maxunicode, 2)))
s += '\n'
s += "byteorder: {0}\n".format(sys.byteorder)
s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format(
sys.float_info)
for module_display, module_name in six.iteritems(PYTEST_HEADER_MODULES):
try:
with ignore_warnings(DeprecationWarning):
module = resolve_name(module_name)
except ImportError:
s += "{0}: not available\n".format(module_display)
else:
try:
version = module.__version__
except AttributeError:
version = 'unknown (no __version__ attribute)'
s += "{0}: {1}\n".format(module_display, version)
special_opts = ["remote_data", "pep8"]
opts = []
for op in special_opts:
op_value = getattr(config.option, op, None)
if op_value:
if isinstance(op_value, six.string_types):
op = ': '.join((op, op_value))
opts.append(op)
if opts:
s += "Using Astropy options: {0}.\n".format(", ".join(opts))
if six.PY2:
s = s.encode(stdoutencoding, 'replace')
return s
def pytest_pycollect_makemodule(path, parent):
# This is where we set up testing both with and without
# from __future__ import unicode_literals
# On Python 3, just do the regular thing that py.test does
if six.PY2:
return Pair(path, parent)
else:
return pytest.Module(path, parent)
class Pair(pytest.File):
"""
This class treats a given test .py file as a pair of .py files
where one has __future__ unicode_literals and the other does not.
"""
def collect(self):
# First, just do the regular import of the module to make
# sure it's sane and valid. This block is copied directly
# from py.test
try:
mod = self.fspath.pyimport(ensuresyspath=True)
except SyntaxError:
import py
excinfo = py.code.ExceptionInfo()
raise self.CollectError(excinfo.getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module {!r} has this __file__ attribute:\n"
" {}\n"
"which is not the same as the test file we want to collect:\n"
" {}\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules".format(e.args))
# Now get the file's content.
with io.open(six.text_type(self.fspath), 'rb') as fd:
content = fd.read()
# If the file contains the special marker, only test it both ways.
if b'TEST_UNICODE_LITERALS' in content:
# Return the file in both unicode_literal-enabled and disabled forms
return [
UnicodeLiteralsModule(mod.__name__, content, self.fspath, self),
NoUnicodeLiteralsModule(mod.__name__, content, self.fspath, self)
]
else:
return [pytest.Module(self.fspath, self)]
_RE_FUTURE_IMPORTS = re.compile(br'from __future__ import ((\(.*?\))|([^\n]+))',
flags=re.DOTALL)
class ModifiedModule(pytest.Module):
def __init__(self, mod_name, content, path, parent):
self.mod_name = mod_name
self.content = content
super(ModifiedModule, self).__init__(path, parent)
def _importtestmodule(self):
# We have to remove the __future__ statements *before* parsing
# with compile, otherwise the flags are ignored.
content = re.sub(_RE_FUTURE_IMPORTS, b'\n', self.content)
new_mod = types.ModuleType(self.mod_name)
new_mod.__file__ = six.text_type(self.fspath)
if hasattr(self, '_transform_ast'):
# ast.parse doesn't let us hand-select the __future__
# statements, but built-in compile, with the PyCF_ONLY_AST
# flag does.
tree = compile(
content, six.text_type(self.fspath), 'exec',
self.flags | ast.PyCF_ONLY_AST, True)
tree = self._transform_ast(tree)
# Now that we've transformed the tree, recompile it
code = compile(
tree, six.text_type(self.fspath), 'exec')
else:
# If we don't need to transform the AST, we can skip
# parsing/compiling in two steps
code = compile(
content, six.text_type(self.fspath), 'exec',
self.flags, True)
pwd = os.getcwd()
try:
os.chdir(os.path.dirname(six.text_type(self.fspath)))
six.exec_(code, new_mod.__dict__)
finally:
os.chdir(pwd)
self.config.pluginmanager.consider_module(new_mod)
return new_mod
class UnicodeLiteralsModule(ModifiedModule):
flags = (
__future__.absolute_import.compiler_flag |
__future__.division.compiler_flag |
__future__.print_function.compiler_flag |
__future__.unicode_literals.compiler_flag
)
class NoUnicodeLiteralsModule(ModifiedModule):
flags = (
__future__.absolute_import.compiler_flag |
__future__.division.compiler_flag |
__future__.print_function.compiler_flag
)
def _transform_ast(self, tree):
# When unicode_literals is disabled, we still need to convert any
# byte string containing non-ascii characters into a Unicode string.
# If it doesn't decode as utf-8, we assume it's some other kind
# of byte string and just ultimately leave it alone.
# Note that once we drop support for Python 3.2, we should be
# able to remove this transformation and just put explicit u''
# prefixes in the test source code.
class NonAsciiLiteral(ast.NodeTransformer):
def visit_Str(self, node):
s = node.s
if isinstance(s, bytes):
try:
s.decode('ascii')
except UnicodeDecodeError:
try:
s = s.decode('utf-8')
except UnicodeDecodeError:
pass
else:
return ast.copy_location(ast.Str(s=s), node)
return node
return NonAsciiLiteral().visit(tree)
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests are known to fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
|
AustereCuriosity/astropy
|
astropy/tests/pytest_plugins.py
|
Python
|
bsd-3-clause
| 13,431
|
[
"VisIt"
] |
cd1dea25a4e4826038bc021d8b589f6322a33dfa8896a067eaec9c68daba3a55
|
# Nucleotide analysis from VCF files
# (c) 2017 Ali Rassolie
# Lumina sequencing
class annuc:
def __init__(self, **kwargs):
# Kwarg handling
self.spec_input = kwargs["dictinput"]
self.hass_input = kwargs["hass"]
self.output = kwargs["output"]
self.searchtype = kwargs["searchtype"]
self.pos_in_iter = 147
self.endstart = list()
def clean(self):
with open("malbac_4.freebayes.bwa.vcf", "rb") as file_in:
with open("cleaned_hassle.vcf", "wb") as file_out:
for i, line in enumerate(file_in):
if i < 50000:
file_out.write(line)
else:
break
with open("malbac_4_vcfoutput", "rb") as file_in:
with open("cleaned_sample", "wb") as file_out:
for i, line in enumerate(file_in):
if i < 10000:
file_out.write(line)
else:
break
def specific_input(self):
with open(self.spec_input, "r") as spec_file:
for spec_line in spec_file:
temp_info = spec_line.split("\t")
yield str(temp_info[0]), str(temp_info[1].replace("\n",""))
def hass(self):
with open(self.hass_input, "r") as hass_file:
for i, line in enumerate(hass_file):
line = line.split("\t")
if len(line) >= 4:
temp = [ line[0],line[1], line[3], line[4] ]
if self.chr_ and self.pos in temp:
self.pos_in_iter = i
yield temp
else:
pass
def chunk_search(self):
import re, itertools
with open(self.hass_input, "r") as file:
search_list = list(itertools.islice(file, self.slicesize))
while True:
if not search_list:
break
search_for = re.compile("{}\\t{}\\t.*".format(self.chr_, self.pos))
finding = list(filter(search_for.match, search_list))
if finding:
finding_list = finding[0].split("\t")
result = [finding_list[0],finding_list[1],finding_list[3],finding_list[4]]
yield result
elif not finding:
search_list = list(itertools.islice(file, self.slicesize))
else:
print("We have an issue")
def filter(self, slicesize):
import time
self.slicesize = slicesize
with open(self.output, "w") as file:
spec_gen = self.specific_input()
if self.searchtype == "hassle":
gen = self.hass()
elif self.searchtype == "chunk":
gen = self.chunk_search()
else:
print("Please enter searchtype:")
try:
text = ""
count = 0
start = time.time()
start2 = time.time()
for self.chr_, self.pos in spec_gen:
append_this = next(gen)
# print(append_this)
text = text + "{}\t{}\t{}\t{}\n".format(*append_this)
if count == 100:
end2 = time.time()
print(end2-start2)
start2 = time.time()
count = 0
count += 1
file.write(text)
end = time.time()
self.endstart = end - start
except:
raise Exception
def timed_completion(self):
print("Tot time: {}".format(self.endstart))
def prod_counter(self, infile=None, outcountfile=None):
import matplotlib.pyplot as plt
import numpy as np
counts=dict()
for element in self.basepair_gen(infile=infile):
if element in counts:
counts[element] += 1
else:
counts[element] = 1
ordered_count = iter(counts)
with open(outcountfile, "w") as file:
for entry in ordered_count:
text = "{} {}\n".format(entry, counts[entry])
file.write(text)
def basepair_gen(self, infile=None):
with open(infile, "r") as file:
for line in file:
line = line.replace("\n", "").split("\t")
# This should output the ref and the alt with a separator ">"
yield "{}>{}".format(line[2],line[3])
|
HypoChloremic/annuc
|
annuc.py
|
Python
|
mit
| 3,706
|
[
"BWA"
] |
28a0a669fb06c034c4bef93f93551c73cdf35c70a8228885c64f2923f87e1382
|
#
# Copyright 2014-2015, 2017, 2021 Lars Pastewka (U. Freiburg)
# 2014-2015, 2020 James Kermode (Warwick U.)
# 2020 Petr Grigorev (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from ase.lattice.cubic import Diamond, FaceCenteredCubic, SimpleCubic, BodyCenteredCubic
###
def set_groups(a, n, skin_x, skin_y, central_x=-1./2, central_y=-1./2,
invert_central=False):
nx, ny, nz = n
sx, sy, sz = a.cell.diagonal()
print('skin_x = {0}*a0, skin_y = {1}*a0'.format(skin_x, skin_y))
skin_x = skin_x*sx/nx
skin_y = skin_y*sy/ny
print('skin_x = {0}, skin_y = {1}'.format(skin_x, skin_y))
r = a.positions
g = np.ones(len(a), dtype=int)
mask = np.logical_or(
np.logical_or(
np.logical_or(
r[:, 0]/sx < (1.-central_x)/2,
r[:, 0]/sx > (1.+central_x)/2),
r[:, 1]/sy < (1.-central_y)/2),
r[:, 1]/sy > (1.+central_y)/2)
if invert_central:
mask = np.logical_not(mask)
g = np.where(mask, g, 2*np.ones_like(g))
mask = np.logical_or(
np.logical_or(
np.logical_or(
r[:, 0] < skin_x, r[:, 0] > sx-skin_x),
r[:, 1] < skin_y),
r[:, 1] > sy-skin_y)
g = np.where(mask, np.zeros_like(g), g)
a.set_array('groups', g)
def set_regions(cryst, r_I, cutoff, r_III):
sx, sy, sz = cryst.cell.diagonal()
x, y = cryst.positions[:, 0], cryst.positions[:, 1]
cx, cy = sx/2, sy/2
r = np.sqrt((x - cx)**2 + (y - cy)**2)
# Regions I-III defined by radial distance from center
regionI = r < r_I
regionII = (r >= r_I) & (r < (r_I + cutoff))
regionIII = (r >= (r_I + cutoff)) & (r < r_III)
regionIV = (r >= r_III) & (r < (r_III + cutoff))
cryst.new_array('region', np.zeros(len(cryst), dtype=int))
region = cryst.arrays['region']
region[regionI] = 1
region[regionII] = 2
region[regionIII] = 3
region[regionIV] = 4
# keep only cylinder defined by regions I - IV
cryst = cryst[regionI | regionII | regionIII | regionIV]
# order by radial distance from tip
order = r[regionI | regionII | regionIII | regionIV ].argsort()
cryst = cryst[order]
return cryst
def cluster(el, a0, n, crack_surface=[1,1,0], crack_front=[0,0,1],
lattice=None, shift=None):
nx, ny, nz = n
third_dir = np.cross(crack_surface, crack_front)
directions = [ third_dir, crack_surface, crack_front ]
if np.linalg.det(directions) < 0:
third_dir = -third_dir
directions = [ third_dir, crack_surface, crack_front ]
unitcell = lattice(el, latticeconstant=a0, size=[1, 1, 1],
directions=directions )
if shift is not None:
unitcell.translate(np.dot(shift, unitcell.cell))
# Center cluster in unit cell
x, y, z = (unitcell.get_scaled_positions()%1.0).T
x += (1.0-x.max()+x.min())/2 - x.min()
y += (1.0-y.max()+y.min())/2 - y.min()
z += (1.0-z.max()+z.min())/2 - z.min()
unitcell.set_scaled_positions(np.transpose([x, y, z]))
a = unitcell.copy()
a *= (nx, ny, nz)
#a.info['unitcell'] = unitcell
a.set_pbc([False, False, True])
return a
def diamond(*args, **kwargs):
kwargs['lattice'] = Diamond
return cluster(*args, **kwargs)
def fcc(*args, **kwargs):
kwargs['lattice'] = FaceCenteredCubic
return cluster(*args, **kwargs)
def bcc(*args, **kwargs):
kwargs['lattice'] = BodyCenteredCubic
return cluster(*args, **kwargs)
def sc(*args, **kwargs):
kwargs['lattice'] = SimpleCubic
return cluster(*args, **kwargs)
|
libAtoms/matscipy
|
matscipy/fracture_mechanics/clusters.py
|
Python
|
lgpl-2.1
| 4,414
|
[
"ASE",
"Matscipy"
] |
9c3c2482404df30d61d83532cd9b94058ca0c4172104fa50baf4a4c202e7ad58
|
#!/usr/bin/env python
'''
File name: main_make_Thetainfo.py
Author: Guillaume Viejo
Date created: 16/08/2017
Python Version: 3.5.2
Theta modulation, returns angle
'''
import numpy as np
import pandas as pd
import scipy.io
from functions import *
# from pylab import *
import ipyparallel
import os, sys
import neuroseries as nts
import time
from Wavelets import MyMorlet as Morlet
from pylab import *
from scipy.ndimage.filters import gaussian_filter
import _pickle as cPickle
from pycircstat import mean as circmean
def computePlaceFields(spikes, position, ep, nb_bins = 100, frequency = 120.0):
place_fields = {}
position_tsd = position.restrict(ep)
xpos = position_tsd.iloc[:,0]
ypos = position_tsd.iloc[:,1]
xbins = np.linspace(xpos.min(), xpos.max()+1e-6, nb_bins+1)
ybins = np.linspace(ypos.min(), ypos.max()+1e-6, nb_bins+1)
for n in spikes:
position_spike = position_tsd.realign(spikes[n].restrict(ep))
spike_count,_,_ = np.histogram2d(position_spike.iloc[:,1].values, position_spike.iloc[:,0].values, [ybins,xbins])
occupancy, _, _ = np.histogram2d(ypos, xpos, [ybins,xbins])
mean_spike_count = spike_count/(occupancy+1)
place_field = mean_spike_count*frequency
place_fields[n] = pd.DataFrame(index = ybins[0:-1][::-1],columns = xbins[0:-1], data = place_field)
extent = (xbins[0], xbins[-1], ybins[0], ybins[-1]) # USEFUL FOR MATPLOTLIB
return place_fields, extent
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
session = 'Mouse12/Mouse12-120809'
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
rip_ep,rip_tsd = loadRipples(data_directory+session)
rip_ep = sws_ep.intersect(rip_ep)
rip_tsd = rip_tsd.restrict(sws_ep)
speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
spikeshd = {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
neurons = np.sort(list(spikeshd.keys()))
print(session, len(neurons))
####################################################################################################################
# HEAD DIRECTION INFO
####################################################################################################################
spikeshd = {k:spikes[k] for k in np.where(hd_info_neuron==1)[0] if k not in []}
position = pd.read_csv(data_directory+session+"/"+session.split("/")[1] + ".csv", delimiter = ',', header = None, index_col = [0])
angle = nts.Tsd(t = position.index.values, d = position[1].values, time_units = 's')
tcurves = computeAngularTuningCurves(spikeshd, angle, wake_ep, nb_bins = 60, frequency = 1/0.0256)
hdneurons = tcurves.idxmax().sort_values().index.values
####################################################################################################################
#
####################################################################################################################
spikesnohd = {k:spikes[k] for k in np.where(hd_info_neuron==0)[0] if k not in []}
####################################################################################################################
# PHASE SPIKE NO HD
####################################################################################################################
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
lfp_hpc = loadLFP(data_directory+session+"/"+session.split("/")[1]+'.eeg', n_channel, hpc_channel, float(fs), 'int16')
lfp_hpc = downsample(lfp_hpc.restrict(wake_ep), 1, 5)
lfp_filt_hpc = nts.Tsd(lfp_hpc.index.values, butter_bandpass_filter(lfp_hpc, 5, 15, fs/5, 2))
power = nts.Tsd(lfp_filt_hpc.index.values, np.abs(lfp_filt_hpc.values))
enveloppe,dummy = getPeaksandTroughs(power, 5)
index = (enveloppe.as_series() > np.percentile(enveloppe, 10)).values*1.0
start_cand = np.where((index[1:] - index[0:-1]) == 1)[0]+1
end_cand = np.where((index[1:] - index[0:-1]) == -1)[0]
if end_cand[0] < start_cand[0]: end_cand = end_cand[1:]
if end_cand[-1] < start_cand[-1]: start_cand = start_cand[0:-1]
tmp = np.where(end_cand != start_cand)
start_cand = enveloppe.index.values[start_cand[tmp]]
end_cand = enveloppe.index.values[end_cand[tmp]]
theta_ep = nts.IntervalSet(start_cand, end_cand)
theta_ep = theta_ep.drop_short_intervals(300000)
theta_ep = theta_ep.merge_close_intervals(30000).drop_short_intervals(1000000)
phase = getPhase(lfp_hpc, 5, 15, 16, fs/5.)
phase = phase.restrict(theta_ep)
phase = phase.as_series()
tmp = phase.values + (2*np.pi)
tmp = tmp % (2*np.pi)
phase = nts.Tsd(t = phase.index.values, d = tmp)
spikes_phase = {n:phase.realign(spikesnohd[n].restrict(theta_ep), align = 'closest') for n in spikesnohd.keys()}
####################################################################################################################
# PHASE ANGLE RELATION
####################################################################################################################
nb_bins = 19
bins = np.linspace(0, 2*np.pi, nb_bins)
idx = bins[0:-1]+np.diff(bins)/2
tuning_curves = pd.DataFrame(index = idx, columns = spikes_phase.keys())
angle = angle[~angle.index.duplicated(keep='first')]
for k in spikes_phase:
spk_phase = spikes_phase[k].restrict(theta_ep)
spk_angle = angle.restrict(theta_ep).realign(spk_phase)
idx = np.digitize(spk_angle.values, bins)-1
angle_phase = np.zeros(nb_bins-1)
for i in np.unique(idx):
# angle_phase[i] = circmean(spk_phase[idx==i])
angle_phase[i] = np.mean(np.cos(spk_phase[idx==i].values))
occupancy, _ = np.histogram(angle, bins)
tuning_curves[k] = angle_phase
figure()
for i, n in enumerate(spikesnohd.keys()):
subplot(6,8,i+1)
plot(tuning_curves[n])
# tmp = tuning_curves[n].sort_values()
# plot(tmp.values, tmp.index.values)
# ylim(0, 2*np.pi)
show()
# angle = angle.restrict(ep)
# # # Smoothing the angle here
# # tmp = pd.Series(index = angle.index.values, data = np.unwrap(angle.values))
# # tmp2 = tmp.rolling(window=50,win_type='gaussian',center=True,min_periods=1).mean(std=10.0)
# # angle = nts.Tsd(tmp2%(2*np.pi))
# for k in spikes:
# spks = spikes[k]
# true_ep = nts.IntervalSet(start = np.maximum(angle.index[0], spks.index[0]),
# end = np.minimum(angle.index[-1], spks.index[-1]))
# spks = spks.restrict(true_ep)
# angle_spike = angle.restrict(true_ep).realign(spks)
# spike_count, bin_edges = np.histogram(angle_spike, bins)
# occupancy, _ = np.histogram(angle, bins)
# spike_count = spike_count/occupancy
# tuning_curves[k] = spike_count*frequency
# plot(lfp_hpc.restrict(theta_ep))
# plot(lfp_filt_hpc.restrict(theta_ep))
# plot(phase.restrict(theta_ep).as_series()*100)
|
gviejo/ThalamusPhysio
|
python/main_test_PHASE_THETA_HD.py
|
Python
|
gpl-3.0
| 7,839
|
[
"Gaussian"
] |
2dea91e8bd68cd7febf79fa5f285c3c7902f9656654536c3bf41badf21219743
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import shutil
import numpy as np
import pytest
import mdtraj as md
from mdtraj.formats import HDF5TrajectoryFile, NetCDFTrajectoryFile
from mdtraj.reporters import HDF5Reporter, NetCDFReporter, DCDReporter, XTCReporter
from mdtraj.testing import eq
try:
from openmm.unit import nanometers, kelvin, picoseconds, femtoseconds
from openmm import LangevinIntegrator, Platform
from openmm.app import PDBFile, ForceField, Simulation, CutoffNonPeriodic, CutoffPeriodic, HBonds, CheckpointReporter
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
# special pytest global to mark all tests in this module
pytestmark = pytest.mark.skipif(not HAVE_OPENMM, reason='test_reporter.py needs OpenMM.')
def test_reporter(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
xtcfile = os.path.join(tmpdir, 'traj.xtc')
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True, cell=True)
reporter3 = DCDReporter(dcdfile, 2)
reporter4 = XTCReporter(xtcfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.reporters.append(reporter4)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
reporter4.close()
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, 22, 3))
eq(got.velocities.shape, (50, 22, 3))
eq(got.cell_lengths.shape, (50, 3))
eq(got.cell_angles.shape, (50, 3))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb')).top
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths.shape, (50, 3))
eq(cell_angles.shape, (50, 3))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=get_fn('native.pdb'))
netcdf_traj = md.load(ncfile, top=get_fn('native.pdb'))
xtc_traj = md.load(xtcfile, top=get_fn('native.pdb'))
# we don't have to convert units here, because md.load already
# handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(xtc_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(xtc_traj.xyz, dcd_traj.xyz, decimal=3)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_reporter_subset(tmpdir, get_fn):
pdb = PDBFile(get_fn('native2.pdb'))
pdb.topology.setUnitCellDimensions([2, 2, 2])
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffPeriodic,
nonbondedCutoff=1 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
xtcfile = os.path.join(tmpdir, 'traj.xtc')
atomSubset = [0, 1, 2, 4, 5]
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True, atomSubset=atomSubset)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True,
cell=True, atomSubset=atomSubset)
reporter3 = DCDReporter(dcdfile, 2, atomSubset=atomSubset)
reporter4 = XTCReporter(xtcfile, 2, atomSubset=atomSubset)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.reporters.append(reporter4)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
reporter4.close()
t = md.load(get_fn('native.pdb'))
t.restrict_atoms(atomSubset)
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, len(atomSubset), 3))
eq(got.velocities.shape, (50, len(atomSubset), 3))
eq(got.cell_lengths, 2 * np.ones((50, 3)))
eq(got.cell_angles, 90 * np.ones((50, 3)))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb'), atom_indices=atomSubset).topology
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, 20 * np.ones((50, 3)))
eq(cell_angles, 90 * np.ones((50, 3)))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
eq(xyz.shape, (50, len(atomSubset), 3))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=hdf5_traj)
netcdf_traj = md.load(ncfile, top=hdf5_traj)
xtc_traj = md.load(xtcfile, top=hdf5_traj)
# we don't have to convert units here, because md.load already handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(xtc_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(xtc_traj.xyz, hdf5_traj.xyz)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_xtc_reporter_append(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
xtcfile = os.path.join(tmpdir, 'traj.xtc')
xtcfile_cp = os.path.join(tmpdir, 'traj_cp.xtc')
checkpoint = os.path.join(tmpdir, 'checkpoint.chk')
reporter = XTCReporter(xtcfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(CheckpointReporter(checkpoint, 10))
simulation.step(10)
reporter.close()
shutil.copyfile(xtcfile, xtcfile_cp)
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.loadCheckpoint(checkpoint)
reporter = XTCReporter(xtcfile, 2, append=True)
simulation.reporters.append(reporter)
simulation.step(10)
reporter.close()
xtc_traj = md.load(xtcfile, top=get_fn('native.pdb'))
xtc_traj_cp = md.load(xtcfile_cp, top=get_fn('native.pdb'))
eq(xtc_traj.xyz[:5], xtc_traj_cp.xyz)
eq(xtc_traj.n_frames, 10)
eq(xtc_traj_cp.n_frames, 5)
eq(xtc_traj.time[:5], xtc_traj_cp.time)
|
mdtraj/mdtraj
|
tests/test_reporter.py
|
Python
|
lgpl-2.1
| 10,137
|
[
"MDTraj",
"OpenMM"
] |
62f2dcf4474315cf3121616ffb677642aebb28e4130cef8019b54e44d3f41feb
|
__author__ = "{ ministry of promise }"
__copyright__ = "Copyright 2015, { ministry of promise }"
__license__ = "MIT"
__version__ = "0.1.0"
__maintainer__ = "Adam Nichols"
__email__ = "adam.j.nichols@gmail.com"
__status__ = "Development"
keyword_filterlist = [
u'account',
u'activity',
u'actor',
u'address',
u'algorithm',
u'analysis',
u'appendix',
u'application',
u'april',
u'attack',
u'attacker',
u'august',
u'backdoor',
u'based',
u'between',
u'botnet',
u'c2',
u'c server',
u'c servers',
u'campaign',
u'checkwith',
u'code',
u'copy',
u'command',
u'communication',
u'company',
u'configuration',
u'connect',
u'contact',
u'control',
u'corporation',
u'country',
u'custom',
u'datum',
u'december',
u'description',
u'detection',
u'detectionsystemrootkill',
u'distribution',
u'directory',
u'dll',
u'domain',
u'dword',
u'error',
u'exploit',
u'february',
u'figure',
u'file',
u'from',
u'fromversion',
u'function',
u'functionality',
u'infection',
u'information',
u'informationfiguremalware',
u'intelreport',
u'inquirefile',
u'inquire',
u'inquiry',
u'install',
u'ip',
u'into',
u'january',
u'july',
u'june',
u'kill',
u'legitimate',
u'list',
u'loader',
u'logo',
u'main',
u'malware',
u'march',
u'may',
u'md5',
u'micro',
u'module',
u'name',
u'network',
u'november',
u'number',
u'october',
u'organization',
u'page',
u'payload',
u'please',
u'pleasecontact',
u'port',
u'process',
u'product',
u'report',
u'sample',
u'september',
u'server',
u'service',
u'shellcode',
u'site',
u'software',
u'stage',
u'state',
u'sysmain',
u'system',
u'telnet',
u'temp',
u'thread',
u'threat',
u'tlp',
u'trademark',
u'trojan',
u'unique',
u'updater',
u'updatersoftware',
u'user',
u'username',
u'value',
u'version',
u'versionpoint',
u'view',
u'victim',
u'with',
u'within'
]
ioc_filterlist = {
'domain' : [
u'cisco.com',
u'fireeye.com',
u'ipviking.com',
u'kaspersky.com',
u'norse-corp.com',
u'verisign.com'
],
'ip' : [
u'127.0.0.1'
]
}
# top 2k as of 4/2015
alexa_filterlist = [
u'google.com',
u'facebook.com',
u'youtube.com',
u'baidu.com',
u'yahoo.com',
u'wikipedia.org',
u'amazon.com',
u'twitter.com',
u'taobao.com',
u'qq.com',
u'google.co.in',
u'live.com',
u'linkedin.com',
u'sina.com.cn',
u'weibo.com',
u'yahoo.co.jp',
u'tmall.com',
u'google.co.jp',
u'ebay.com',
u't.co',
u'blogspot.com',
u'google.de',
u'hao123.com',
u'bing.com',
u'yandex.ru',
u'reddit.com',
u'vk.com',
u'google.co.uk',
u'amazon.co.jp',
u'msn.com',
u'google.com.br',
u'instagram.com',
u'google.fr',
u'tumblr.com',
u'wordpress.com',
u'sohu.com',
u'imgur.com',
u'pinterest.com',
u'paypal.com',
u'aliexpress.com',
u'xvideos.com',
u'apple.com',
u'ask.com',
u'microsoft.com',
u'onclickads.net',
u'google.it',
u'gmail.com',
u'imdb.com',
u'mail.ru',
u'fc2.com',
u'alibaba.com',
u'google.ru',
u'google.es',
u'adcash.com',
u'amazon.de',
u'stackoverflow.com',
u'360.cn',
u'netflix.com',
u'googleadservices.com',
u'diply.com',
u'google.ca',
u'craigslist.org',
u'go.com',
u'xhamster.com',
u'google.com.hk',
u'naver.com',
u'tianya.cn',
u'gmw.cn',
u'163.com',
u'bbc.co.uk',
u'google.com.tr',
u'amazon.co.uk',
u'adobe.com',
u'kickass.to',
u'pornhub.com',
u'rakuten.co.jp',
u'ebay.de',
u'dropbox.com',
u'cnn.com',
u'amazon.cn',
u'nicovideo.jp',
u'espn.go.com',
u'google.com.mx',
u'google.pl',
u'ok.ru',
u'soso.com',
u'google.com.au',
u'dailymotion.com',
u'cntv.cn',
u'googleusercontent.com',
u'github.com',
u'jd.com',
u'directrev.com',
u'youku.com',
u'outbrain.com',
u'alipay.com',
u'people.com.cn',
u'flipkart.com',
u'pixnet.net',
u'google.co.kr',
u'google.co.id',
u'chinadaily.com.cn',
u'nytimes.com',
u'blogger.com',
u'buzzfeed.com',
u'uol.com.br',
u'huffingtonpost.com',
u'ebay.co.uk',
u'wikia.com',
u'livedoor.com',
u'indiatimes.com',
u'booking.com',
u'google.com.tw',
u'amazon.in',
u'china.com',
u'sogou.com',
u'tudou.com',
u'bycontext.com',
u'chase.com',
u'blogspot.in',
u'amazonaws.com',
u'dailymail.co.uk',
u'ettoday.net',
u'xinhuanet.com',
u'google.com.eg',
u'flickr.com',
u'xnxx.com',
u'globo.com',
u'wordpress.org',
u'coccoc.com',
u'douban.com',
u'yelp.com',
u'google.nl',
u'bankofamerica.com',
u'pconline.com.cn',
u'salesforce.com',
u'godaddy.com',
u'themeforest.net',
u'about.com',
u'ameblo.jp',
u'popads.net',
u'dmm.co.jp',
u'daum.net',
u'cnet.com',
u'slideshare.net',
u'twitch.tv',
u'google.com.pk',
u'youradexchange.com',
u'etsy.com',
u'deviantart.com',
u'bongacams.com',
u'google.com.ar',
u'redtube.com',
u'amazon.fr',
u'loading-delivery1.com',
u'quora.com',
u'bbc.com',
u'theguardian.com',
u'weather.com',
u'youporn.com',
u'adf.ly',
u'naver.jp',
u'warmportrait.com',
u'ilividnewtab.com',
u'stackexchange.com',
u'life.com.tw',
u'stamplive.com',
u'vimeo.com',
u'forbes.com',
u'espncricinfo.com',
u'indeed.com',
u'soundcloud.com',
u'snapdeal.com',
u'walmart.com',
u'aol.com',
u'microsoftonline.com',
u'google.com.sa',
u'reference.com',
u'bp.blogspot.com',
u'wellsfargo.com',
u'mozilla.org',
u'google.co.za',
u'w3schools.com',
u'google.gr',
u'zillow.com',
u'feedly.com',
u'amazon.it',
u'leboncoin.fr',
u'wikihow.com',
u'theladbible.com',
u'mailchimp.com',
u'office365.com',
u'google.co.th',
u'mystart.com',
u'china.com.cn',
u'thepiratebay.se',
u'tripadvisor.com',
u'livejasmin.com',
u'google.com.ua',
u'businessinsider.com',
u'allegro.pl',
u'livejournal.com',
u'vice.com',
u'zol.com.cn',
u'popcash.net',
u'wikimedia.org',
u'ikea.com',
u'force.com',
u'ifeng.com',
u'washingtonpost.com',
u'onet.pl',
u'pixiv.net',
u'kakaku.com',
u'gamer.com.tw',
u'files.wordpress.com',
u'google.be',
u'secureserver.net',
u'mediafire.com',
u'smzdm.com',
u'9gag.com',
u'nih.gov',
u'sourceforge.net',
u'google.com.my',
u'taboola.com',
u'google.com.co',
u'foxnews.com',
u'archive.org',
u'web.de',
u'xuite.net',
u'tubecup.com',
u'blogfa.com',
u'bestbuy.com',
u'google.com.sg',
u'ups.com',
u'trklnks.com',
u'pclady.com.cn',
u'bitauto.com',
u'zhihu.com',
u'comcast.net',
u'google.cn',
u'usps.com',
u'google.ro',
u'goo.ne.jp',
u'americanexpress.com',
u'abs-cbnnews.com',
u'gmx.net',
u'likes.com',
u'doublepimp.com',
u'wix.com',
u'intuit.com',
u'office.com',
u'shutterstock.com',
u'ppomppu.co.kr',
u'google.com.ng',
u'ndtv.com',
u'akamaihd.net',
u'doorblog.jp',
u'google.at',
u'stumbleupon.com',
u'weebly.com',
u'target.com',
u'mashable.com',
u'addthis.com',
u'skype.com',
u'steamcommunity.com',
u'gfycat.com',
u'avg.com',
u'badoo.com',
u'avito.ru',
u'dmm.com',
u'orange.fr',
u'pandora.com',
u'telegraph.co.uk',
u'amazon.es',
u'quikr.com',
u'ign.com',
u'haosou.com',
u'hootsuite.com',
u'mercadolivre.com.br',
u'groupon.com',
u'google.co.ve',
u'bild.de',
u'homedepot.com',
u'softonic.com',
u'google.pt',
u'theadgateway.com',
u'goodreads.com',
u'github.io',
u'google.se',
u'youm7.com',
u'hdfcbank.com',
u'wordreference.com',
u'bet365.com',
u'icicibank.com',
u'bomb01.com',
u'wsj.com',
u'spiegel.de',
u'adplxmd.com',
u'media.tumblr.com',
u'gome.com.cn',
u'pcbaby.com.cn',
u'bilibili.com',
u'jabong.com',
u'caijing.com.cn',
u'engadget.com',
u'pchome.net',
u'hulu.com',
u'kaskus.co.id',
u'steampowered.com',
u't-online.de',
u'trello.com',
u'webmd.com',
u'ask.fm',
u'spotify.com',
u'slickdeals.net',
u'whatsapp.com',
u'rediff.com',
u'wow.com',
u'hurriyet.com.tr',
u'google.com.pe',
u'udn.com',
u'usatoday.com',
u'google.ch',
u'seznam.cz',
u'zendesk.com',
u'hp.com',
u'fedex.com',
u'lifehacker.com',
u'webssearches.com',
u'ijreview.com',
u'tube8.com',
u'bloomberg.com',
u'fiverr.com',
u'mobile01.com',
u'wp.pl',
u'chinatimes.com',
u'bleacherreport.com',
u'cbssports.com',
u'uptodown.com',
u'samsung.com',
u'mama.cn',
u'youth.cn',
u'gameforge.com',
u'hupu.com',
u'rambler.ru',
u'answers.com',
u'torrentz.eu',
u'google.ae',
u'disqus.com',
u'teepr.com',
u'xcar.com.cn',
u'google.com.ph',
u'dell.com',
u'speedtest.net',
u'capitalone.com',
u'google.no',
u'rutracker.org',
u'evernote.com',
u'amazon.ca',
u'accuweather.com',
u'extratorrent.cc',
u'ebay.in',
u'moz.com',
u'google.com.bd',
u'39.net',
u'techcrunch.com',
u'iqiyi.com',
u'gizmodo.com',
u'newegg.com',
u'photobucket.com',
u'kompas.com',
u'ltn.com.tw',
u'gsmarena.com',
u'cloudfront.net',
u'meetup.com',
u'truemediapipe.com',
u'google.cz',
u'kickstarter.com',
u'varzesh3.com',
u'vipcpms.com',
u'2ch.net',
u'detik.com',
u'trackingclick.net',
u'thefreedictionary.com',
u'codecanyon.net',
u'google.co.hu',
u'libero.it',
u'autohome.com.cn',
u'milliyet.com.tr',
u'ce.cn',
u'infusionsoft.com',
u'tistory.com',
u'webtretho.com',
u'reuters.com',
u'bitly.com',
u'justdial.com',
u'googleapis.com',
u'onlinesbi.com',
u'google.co.il',
u'babytree.com',
u'twimg.com',
u'sahibinden.com',
u'ameba.jp',
u'eksisozluk.com',
u'google.ie',
u'mydomainadvisor.com',
u'reimageplus.com',
u'hudong.com',
u'oracle.com',
u'verizonwireless.com',
u'4shared.com',
u'scribd.com',
u'surveymonkey.com',
u'xda-developers.com',
u'time.com',
u'liputan6.com',
u'att.com',
u'staticwebdom.com',
u'battle.net',
u'paytm.com',
u'ptt01.cc',
u'irs.gov',
u'lady8844.com',
u'eastmoney.com',
u'ebay.com.au',
u'expedia.com',
u'macys.com',
u'tlbb8.com',
u'uploaded.net',
u'privatehomeclips.com',
u'ck101.com',
u'ero-advertising.com',
u'bhaskar.com',
u'free.fr',
u'yandex.ua',
u'taleo.net',
u'ebay.it',
u'olx.in',
u'list-manage.com',
u'sabah.com.tr',
u'blog.jp',
u'theverge.com',
u'lazada.co.id',
u'liveinternet.ru',
u'citibank.com',
u'repubblica.it',
u'nyaa.se',
u'csdn.net',
u'okcupid.com',
u'warofclicks.com',
u'google.com.vn',
u'mlb.com',
u'naukri.com',
u'ganji.com',
u'starsports.com',
u'fbcdn.net',
u'ink361.com',
u'32d1d3b9c.se',
u'nba.com',
u'chaturbate.com',
u'google.cl',
u'exoclick.com',
u'rt.com',
u'ci123.com',
u'retailmenot.com',
u'livedoor.biz',
u'gmarket.co.kr',
u'b5m.com',
u'trulia.com',
u'gap.com',
u'goal.com',
u'mega.co.nz',
u'elpais.com',
u'xywy.com',
u'icloud.com',
u'kayak.com',
u'npr.org',
u'hostgator.com',
u'mobile.de',
u'xe.com',
u'glassdoor.com',
u'mi.com',
u'flirchi.com',
u'kouclo.com',
u'asana.com',
u'odesk.com',
u'58.com',
u'abril.com.br',
u'blogimg.jp',
u'6pm.com',
u'php.net',
u'woot.com',
u'rbc.ru',
u'hotels.com',
u'marca.com',
u'bestadbid.com',
u'styletv.com.cn',
u'eyny.com',
u'chinaz.com',
u'wonderlandads.com',
u'constantcontact.com',
u'google.fi',
u'issuu.com',
u'impress.co.jp',
u'wetransfer.com',
u'buzzfil.net',
u'zippyshare.com',
u'ad132m.com',
u'timeanddate.com',
u'enet.com.cn',
u'gawker.com',
u'doubleclick.com',
u'sex.com',
u'nordstrom.com',
u'clien.net',
u'cnzz.com',
u'slack.com',
u'urbandictionary.com',
u'agoda.com',
u'google.sk',
u'cricbuzz.com',
u'corriere.it',
u'azlyrics.com',
u'asos.com',
u'google.dz',
u'mystartsearch.com',
u'kijiji.ca',
u'hm.com',
u'urdupoint.com',
u'fanli.com',
u'zoho.com',
u'beeg.com',
u'playstation.com',
u'gamefaqs.com',
u'elance.com',
u'tmz.com',
u'elmundo.es',
u'shopclues.com',
u'tabelog.com',
u'airbnb.com',
u'realtor.com',
u'independent.co.uk',
u'eastday.com',
u'appledaily.com.tw',
u'wunderground.com',
u'lowes.com',
u'latimes.com',
u'nike.com',
u'google.dk',
u'eventbrite.com',
u'statcounter.com',
u'drseks.com',
u'streamcloud.eu',
u'ehow.com',
u'onedio.com',
u'it168.com',
u'domaintools.com',
u'irctc.co.in',
u'shopify.com',
u'aparat.com',
u'houzz.com',
u'adsrvmedia.net',
u'savefrom.net',
u'aweber.com',
u'blog.me',
u'goo.gl',
u'vnexpress.net',
u'ucoz.ru',
u'ad120m.com',
u'hatena.ne.jp',
u'donga.com',
u'stockstar.com',
u'4dsply.com',
u'olx.pl',
u'squarespace.com',
u'sberbank.ru',
u'nbcnews.com',
u'slate.com',
u'blogspot.jp',
u'statsmobi.com',
u'11st.co.kr',
u'duckduckgo.com',
u'moneycontrol.com',
u'mercadolibre.com.ar',
u'intoday.in',
u'subito.it',
u'kohls.com',
u'southwest.com',
u'lenovo.com',
u'youtube-mp3.org',
u'ca.gov',
u'box.com',
u'jqw.com',
u'youboy.com',
u'mixi.jp',
u'rottentomatoes.com',
u'facenama.com',
u'albawabhnews.com',
u'hubspot.com',
u'java.com',
u'in.com',
u'pinimg.com',
u'myntra.com',
u'hdzog.com',
u'vcommission.com',
u'nownews.com',
u'udemy.com',
u'ancestry.com',
u'instructables.com',
u'haber7.com',
u'wired.com',
u'taringa.net',
u'hstpnetwork.com',
u'pof.com',
u'kinopoisk.ru',
u'medium.com',
u'shareba.com',
u'etao.com',
u'youdao.com',
u'fidelity.com',
u'ig.com.br',
u'siteadvisor.com',
u'gazeta.pl',
u'zing.vn',
u'neobux.com',
u'subscene.com',
u'sakura.ne.jp',
u'airtel.in',
u'jimdo.com',
u'fh21.com.cn',
u'wikiwiki.jp',
u'wiktionary.org',
u'priceline.com',
u'lenta.ru',
u'jrj.com.cn',
u'verizon.com',
u'nifty.com',
u'bodybuilding.com',
u'behance.net',
u'sabq.org',
u'xunlei.com',
u'kotaku.com',
u'entrepreneur.com',
u'semrush.com',
u'mackeeper.com',
u'lemonde.fr',
u'overstock.com',
u'drudgereport.com',
u'japanpost.jp',
u'mirror.co.uk',
u'digikala.com',
u'allrecipes.com',
u'basecamp.com',
u'youjizz.com',
u'goodgamestudios.com',
u'nydailynews.com',
u'soku.com',
u'ebay.fr',
u'interia.pl',
u'academia.edu',
u'17ok.com',
u'onlylady.com',
u'dianping.com',
u'rednet.cn',
u'elfagr.org',
u'wwwpromoter.com',
u'putlocker.is',
u'vetogate.com',
u'chip.de',
u'ad6media.fr',
u'abcnews.go.com',
u'cracked.com',
u'ero-video.net',
u'junbi-tracker.com',
u'chron.com',
u'slimspots.com',
u'asus.com',
u'prntscr.com',
u'livescore.com',
u'discovercard.com',
u'bluehost.com',
u'torcache.net',
u'ted.com',
u'omiga-plus.com',
u'pcgames.com.cn',
u'change.org',
u'rapidgator.net',
u'ashleyrnadison.com',
u'epweike.com',
u'kooora.com',
u'ticketmaster.com',
u'auction.co.kr',
u'makemytrip.com',
u'souq.com',
u'4chan.org',
u'el-balad.com',
u'freepik.com',
u'twoo.com',
u'voc.com.cn',
u'saramin.co.kr',
u'workercn.cn',
u'google.co.nz',
u'staples.com',
u'cnblogs.com',
u'blackboard.com',
u'myfitnesspal.com',
u'chinaso.com',
u'eonline.com',
u'so-net.ne.jp',
u'yoka.com',
u'seesaa.net',
u'costco.com',
u'hespress.com',
u'renren.com',
u'liveleak.com',
u'shareasale.com',
u'e-hentai.org',
u'news.com.au',
u'people.com',
u'faithtap.com',
u'ewt.cc',
u'bhphotovideo.com',
u'douyutv.com',
u'm-w.com',
u'battlefield.com',
u'kinogo.net',
u'vine.co',
u'terra.com.br',
u'lefigaro.fr',
u'united.com',
u'efix.com',
u'android.com',
u'patch.com',
u'gutefrage.net',
u'sears.com',
u'custhelp.com',
u'zulily.com',
u'vi-view.com',
u'folha.uol.com.br',
u'disney.go.com',
u'pcpop.com',
u'westernjournalism.com',
u'biglobe.ne.jp',
u'adp.com',
u'kdnet.net',
u'zappos.com',
u'milanuncios.com',
u'digg.com',
u'mint.com',
u'atwiki.jp',
u'focus.de',
u'backpage.com',
u'billdesk.com',
u'315che.com',
u'tinyurl.com',
u'babycenter.com',
u'22find.com',
u'cnbc.com',
u'primewire.ag',
u'io9.com',
u'cookpad.com',
u'yhd.com',
u'swagbucks.com',
u'r10.net',
u'coursera.org',
u'bukalapak.com',
u'leagueoflegends.com',
u'delta.com',
u'gyazo.com',
u'ibm.com',
u'momoshop.com.tw',
u'comcast.com',
u'webex.com',
u'sfgate.com',
u'atlassian.net',
u'foodnetwork.com',
u'sbnation.com',
u'nate.com',
u'nikkei.com',
u'novinky.cz',
u'pcmag.com',
u'marketwatch.com',
u'globososo.com',
u'xbox.com',
u'mihanblog.com',
u'icc-cricket.com',
u'scoop.it',
u'ruten.com.tw',
u'trovi.com',
u'mit.edu',
u'wav.tv',
u'huaban.com',
u'126.com',
u'nhl.com',
u'bookmyshow.com',
u'commentcamarche.net',
u'mynavi.jp',
u'sh.st',
u'sciencedirect.com',
u'ytimg.com',
u'rightmove.co.uk',
u'olx.com.br',
u'w3.org',
u'weblio.jp',
u'tomshardware.com',
u'www.gov.uk',
u'nhk.or.jp',
u'aa.com',
u'filehippo.com',
u'mynet.com',
u'web.tv',
u'sulekha.com',
u'exblog.jp',
u'superuser.com',
u'shaadi.com',
u'histats.com',
u'letv.com',
u'williamhill.com',
u'ncaa.com',
u'mackolik.com',
u'india.com',
u'google.rs',
u'indianexpress.com',
u'stanford.edu',
u'tagged.com',
u'pchome.com.tw',
u'usaa.com',
u'gofundme.com',
u'k618.cn',
u'blogspot.de',
u'myfreecams.com',
u'reverso.net',
u'usmagazine.com',
u'porn.com',
u'pch.com',
u'farsnews.com',
u'dreamstime.com',
u'104.com.tw',
u'torrentz.in',
u'livestrong.com',
u'tnaflix.com',
u'instructure.com',
u'gemius.pl',
u'dangdang.com',
u'sky.com',
u'match.com',
u'investopedia.com',
u'thehindu.com',
u'informer.com',
u'zhaopin.com',
u'altervista.org',
u'lequipe.fr',
u'cbslocal.com',
u'ixxx.com',
u'gamespot.com',
u'europa.eu',
u'sfr.fr',
u'movie4k.to',
u'cisco.com',
u'yodobashi.com',
u'hilton.com',
u'yandex.com.tr',
u'indiegogo.com',
u'mayoclinic.org',
u'bidvertiser.com',
u'acfun.tv',
u'scoopwhoop.com',
u'usbank.com',
u'sahadan.com',
u'tutorialspoint.com',
u'ad4game.com',
u'pagesjaunes.fr',
u'themidnightmatulas.com',
u'gamepedia.com',
u't-mobile.com',
u'safehomepage.com',
u'adme.ru',
u'vk.me',
u'ccb.com',
u'coupons.com',
u'cdiscount.com',
u'offpageads.com',
u'monster.com',
u'roblox.com',
u'xing.com',
u'cbsnews.com',
u'google.hr',
u'tsite.jp',
u'techradar.com',
u'ria.ru',
u'abplive.in',
u'forever21.com',
u'filmon-ads.com',
u'deadspin.com',
u'ampclicks.com',
u'google.bg',
u'gumtree.com',
u'lolwot.com',
u'dafont.com',
u'geocities.jp',
u'oyunskor.com',
u'inc.com',
u'padsdel.com',
u'drtuber.com',
u'motherless.com',
u'fitbit.com',
u'chexun.com',
u'tribunnews.com',
u'123rf.com',
u'sharepoint.com',
u'ew.com',
u'howtogeek.com',
u'friv.com',
u'zara.com',
u'4399.com',
u'thedailybeast.com',
u'nfl.com',
u'warriorforum.com',
u'ringring.vn',
u'suning.com',
u'asahi.com',
u'almasryalyoum.com',
u'himado.in',
u'redfin.com',
u'free-tv-video-online.info',
u'tabnak.ir',
u'hubpages.com',
u'sapo.pt',
u'indiamart.com',
u'12306.cn',
u'189.cn',
u'cbc.ca',
u'jmpdirect01.com',
u'olx.ua',
u'gismeteo.ru',
u'nypost.com',
u'istockphoto.com',
u'adultfriendfinder.com',
u'slrclub.com',
u'foursquare.com',
u'intel.com',
u'qianlong.com',
u'21cn.com',
u'2345.com',
u'immobilienscout24.de',
u'eenadu.net',
u'apache.org',
u'itmedia.co.jp',
u'searchengineland.com',
u'all2lnk.com',
u'harvard.edu',
u'as.com',
u'yellowpages.com',
u'pixabay.com',
u'researchgate.net',
u'popsugar.com',
u'imobile.com.cn',
u'androidcentral.com',
u'mercadolibre.com.mx',
u'quizlet.com',
u'allocine.fr',
u'pantip.com',
u'smh.com.au',
u'who.is',
u'linkwithin.com',
u'freelancer.com',
u'getbootstrap.com',
u'cpasbien.pw',
u'typepad.com',
u'prothom-alo.com',
u'clipconverter.cc',
u'yam.com',
u'sporx.com',
u'tutsplus.com',
u'nbcsports.com',
u'ebay.ca',
u'cbs.com',
u'caixa.gov.br',
u'distractify.com',
u'virgilio.it',
u'rakuten.ne.jp',
u'e97527f0.se',
u'lanacion.com.ar',
u'gigazine.net',
u'internethaber.com',
u'rakuten.com',
u'ultimate-guitar.com',
u'tripadvisor.co.uk',
u'rutor.org',
u'reduxmediia.com',
u'ashleymadison.com',
u'gittigidiyor.com',
u'itau.com.br',
u'nocookie.net',
u'gigacircle.com',
u'whitepages.com',
u'makeuseof.com',
u'wiley.com',
u'indianrail.gov.in',
u'theatlantic.com',
u'hh.ru',
u'food.com',
u'marktplaats.nl',
u'weather.gov',
u'marriott.com',
u'masrawy.com',
u'idnes.cz',
u'liveperson.net',
u'chiphell.com',
u'blogspot.mx',
u'51job.com',
u'deezer.com',
u'nationalgeographic.com',
u'mediaplex.com',
u'dict.cc',
u'24h.com.vn',
u'6park.com',
u'citrixonline.com',
u'ebates.com',
u'gazzetta.it',
u'iminent.com',
u'jumia.com.ng',
u'finstorieslive.com',
u'qingdaonews.com',
u'imagebam.com',
u'focus.cn',
u'jobrapido.com',
u'tahrirnews.com',
u'leadpages.net',
u'worldstarhiphop.com',
u'am15.net',
u'cnmo.com',
u'clarin.com',
u'getpocket.com',
u'what-character-are-you.com',
u'popupads.ir',
u'bt.com',
u'jquery.com',
u'delta-homes.com',
u'images-amazon.com',
u'google.com.ec',
u'today.com',
u'ccebba93.se',
u'cocolog-nifty.com',
u'emol.com',
u'salon.com',
u'ukr.net',
u'hypergames.net',
u'lightinthebox.com',
u'blackhatworld.com',
u'viralnova.com',
u'hardsextube.com',
u'blogspot.ru',
u'thenextweb.com',
u'state.gov',
u'adrotator.se',
u'thekitchn.com',
u'wayfair.com',
u'mercadolibre.com.ve',
u'wetter.com',
u'firstpost.com',
u'ensonhaber.com',
u'anitube.se',
u'sozcu.com.tr',
u'topix.com',
u'interpark.com',
u'mtv.com',
u'brainyquote.com',
u'americanas.com.br',
u'lapatilla.com',
u'barnesandnoble.com',
u'solarmovie.is',
u'macrumors.com',
u'audible.com',
u'fanfiction.net',
u'envato.com',
u'littlethings.com',
u'rarbg.com',
u'habrahabr.ru',
u'prezi.com',
u'bitbucket.org',
u'mediaset.it',
u'yts.to',
u'nikkeibp.co.jp',
u'webcrawler.com',
u'rtl.de',
u'yomiuri.co.jp',
u'dropboxusercontent.com',
u'ryanair.com',
u'usnews.com',
u'vg.no',
u'google.com.do',
u'hatenablog.com',
u'donanimhaber.com',
u'dribbble.com',
u'norton.com',
u'postimg.org',
u'investing.com',
u'hollywoodreporter.com',
u'elitedaily.com',
u'clicksvenue.com',
u'iconosquare.com',
u'jcpenney.com',
u'idealo.de',
u'xtube.com',
u'olx.co.id',
u'search-simple.com',
u'rollingstone.com',
u'videomega.tv',
u'nairaland.com',
u'mbc.net',
u'ctrip.com',
u'mapquest.com',
u'rei.com',
u'media-fire.org',
u'arstechnica.com',
u'zone-telechargement.com',
u'getresponse.com',
u'complex.com',
u'525j.com.cn',
u'nasa.gov',
u'zedo.com',
u'click4stat.com',
u'cnbeta.com',
u'zomato.com',
u'ly.net',
u'wwe.com',
u'beytoote.com',
u'jsfiddle.net',
u'discuss.com.hk',
u'persianblog.ir',
u'google.lk',
u'google.lt',
u'graphicriver.net',
u'microsoftstore.com',
u'114la.com',
u'uber.com',
u'netteller.com',
u'kinox.to',
u'jezebel.com',
u'9gag.tv',
u'linternaute.com',
u'upworthy.com',
u'ldblog.jp',
u'qualtrics.com',
u'biblegateway.com',
u'ocn.ne.jp',
u'alarabiya.net',
u'2chblog.jp',
u'narod.ru',
u'correios.com.br',
u'vox.com',
u'picmonkey.com',
u'elwatannews.com',
u'bahn.de',
u'heise.de',
u'feng.com',
u'bloglovin.com',
u'elegantthemes.com',
u'windowsphone.com',
u'lazada.vn',
u'abc.net.au',
u'elmogaz.com',
u'haberturk.com',
u'dhgate.com',
u'td.com',
u'istartsurf.com',
u'gazetaexpress.com',
u'ea.com',
u'walmart.com.br',
u'wp.com',
u'pbs.org',
u'theblaze.com',
u'vodlocker.com',
u'dubizzle.com',
u'nu.nl',
u'merdeka.com',
u'hindustantimes.com',
u'wattpad.com',
u'similarweb.com',
u'cc.com',
u'haiwainet.cn',
u'jagran.com',
u'tesco.com',
u'discogs.com',
u'spankwire.com',
u'ptt.cc',
u'360doc.com',
u'joomla.org',
u'mydrivers.com',
u'wiocha.pl',
u'juksy.com',
u'autoscout24.de',
u'citibank.co.in',
u'crunchbase.com',
u'axisbank.com',
u'todayhumor.co.kr',
u'iciba.com',
u'flightradar24.com',
u'infobae.com',
u'gaana.com',
u'clixsense.com',
u'couchtuner.eu',
u'cityadspix.com',
u'chefkoch.de',
u'video-one.com',
u'thechive.com',
u'walgreens.com',
u'popmog.com',
u'conduit.com',
u'bedbathandbeyond.com',
u'sitepoint.com',
u'list-manage1.com',
u'ebay.es',
u'katproxy.com',
u'boredpanda.com',
u'garanti.com.tr',
u'alfalfalfa.com',
u'iflscience.com',
u'brassring.com',
u'jalopnik.com',
u'biobiochile.cl',
u'amoory.com',
u'clickbank.com',
u'codeproject.com',
u'vente-privee.com',
u'3dmgame.com',
u'templatemonster.com',
u'skysports.com',
u'otto.de',
u'pole-emploi.fr',
u'askmebazaar.com',
u'lynda.com',
u'suara.com',
u'myway.com',
u'digitaltrends.com',
u'qidian.com',
u'bizjournals.com',
u'shaparak.ir',
u'ed.gov',
u'legacy.com',
u'piriform.com',
u'creditkarma.com',
u'autotrader.com',
u'finn.no',
u'vip.com',
u'blomaga.jp',
u'mbank.com.pl',
u'askubuntu.com',
u'gumtree.com.au',
u'telegraaf.nl',
u'bandcamp.com',
u'zeroredirect1.com',
u'ahrefs.com',
u'toysrus.com',
u'tripadvisor.in',
u'jalan.net',
u'seasonvar.ru',
u'super.cz',
u'websta.me',
u'oneindia.com',
u'mpnrs.com',
u'lotour.com',
u'yaplakal.com',
u'gamersky.com',
u'welt.de',
u'klikbca.com',
u'cloudflare.com',
u'babylon.com',
u'all-free-download.com',
u'o2.pl',
u'pcworld.com',
u'kuronekoyamato.co.jp',
u'mydala.com',
u'credit-agricole.fr',
u'drugs.com',
u'sephora.com',
u'blogspot.tw',
u'redirectvoluum.com',
u'weheartit.com',
u'xkcd.com',
u'doodle.com',
u'turbobit.net',
u'fang.com',
u'genius.com',
u'namecheap.com',
u'babal.net',
u'megaoferta.net',
u'friendlife.com',
u'springer.com',
u'zopim.com',
u'fool.com',
u'vesti.ru',
u'pixlr.com',
u'schwab.com',
u'buscape.com.br',
u'royalbank.com',
u'pnc.com',
u'nudevista.com',
u'dhl.de',
u'gazeta.ru',
u'capitalone360.com',
u'gilt.com',
u'staticflickr.com',
u'speedanalysis.net',
u'shutterfly.com',
u'hid.im',
u'auto.ru',
u'track300.com',
u'digitalocean.com',
u'aftonbladet.se',
u'gotowebinar.com',
u'howstuffworks.com',
u'mail.com',
u'kioskea.net',
u'niuche.com',
u'suik.info',
u'plarium.com',
u'google.com.kw',
u'crsdrz.com',
u'linkbucks.com',
u'takungpao.com',
u'moba-stream.com',
u'mgid.com',
u'canva.com',
u'tgbus.com',
u'kbb.com',
u'abc.es',
u'gamestop.com',
u'over-blog.com',
u'iherb.com',
u'codepen.io',
u'vsuch.com',
u'meituan.com',
u'lun.com',
u'jia.com',
u'yadi.sk',
u'dmv.org',
u'tomsguide.com',
u'leo.org',
u'chekb.com',
u's2d6.com',
u'homeway.com.cn',
u'city-data.com',
u'dpreview.com',
u'mangahere.co',
u'adweek.com',
u'labanquepostale.fr',
u'drom.ru',
u'gotomeeting.com',
u'fandango.com',
u'serverfault.com',
u'ibtimes.com',
u'jin115.com',
u'bankmellat.ir',
u'carview.co.jp',
u'netshoes.com.br',
u'excite.co.jp',
u'serving-sys.com',
u'jeuxvideo.com',
u'rozblog.com',
u'chicagotribune.com',
u'inquirer.net',
u'orbitz.com',
u'sprint.com',
u'echo.msk.ru',
u'n11.com',
u'line.me',
u'cars.com',
u'smallseotools.com',
u'r7.com',
u'herokuapp.com',
u'pingdom.com',
u'cheezburger.com',
u'y8.com',
u'lego.com',
u'keezmovies.com',
u'yourlust.com',
u'bankrate.com',
u'argos.co.uk',
u'20minutes.fr',
u'lockerdome.com',
u'ycombinator.com',
u'xiami.com',
u'qslpdk.com',
u'aljazeera.net',
u'lacaixa.es',
u'victoriassecret.com',
u'thisav.com',
u'flirt4free.com',
u'hellou.co.uk',
u'uploadable.ch',
u'mangafox.me',
u'ecnavi.jp',
u'shine.com',
u'zougla.gr',
u'watchseriestv.to',
u'refinery29.com',
u'sponichi.co.jp',
u'cvs.com',
u'livingsocial.com',
u'gstatic.com',
u'yandex.by',
u'berkeley.edu',
u'google.by',
u'careerbuilder.com',
u'dropbooks.tv',
u'hepsiburada.com',
u'lloydsbank.co.uk',
u'bufferapp.com',
u'metrolyrics.com',
u'kongregate.com',
u'sueddeutsche.de',
u'crunchyroll.com',
u'esuteru.com',
u'google.si',
u'uniqlo.com',
u'gtmetrix.com',
u'nuvid.com',
u'jcrew.com',
u'myanimelist.net',
u'ft.com',
u'gnavi.co.jp',
u'panasonic.jp',
u'radikal.com.tr',
u'orf.at',
u'ubuntu.com',
u'slashdot.org',
u'programme-tv.net',
u'docin.com',
u'viva.co.id',
u'anjuke.com',
u'seekingalpha.com',
u'billboard.com',
u'adxcore.com',
u'52pk.net',
u'mysql.com',
u'phonearena.com',
u'otomoto.pl',
u'alohatube.com',
u'smartshopping.com',
u'giphy.com',
u'tinypic.com',
u'metacritic.com',
u'garmin.com',
u'esporte.uol.com.br',
u'f54d6bf2b1.se',
u'ilmeteo.it',
u'hsbc.co.uk',
u'ccidnet.com',
u'newyorker.com',
u'unity3d.com',
u'tunein.com',
u'fortune.com',
u'freelotto.com',
u'tvguide.com',
u'yenisafak.com.tr',
u'cam4.com',
u'southcn.com',
u'qunar.com',
u'craigslist.ca',
u'axisbank.co.in',
u'imagefap.com',
u'commbank.com.au',
u'infoseek.co.jp',
u'nipic.com',
u'last.fm',
u'panet.co.il',
u'qvc.com',
u'ynet.co.il',
u'aliyun.com',
u'500px.com',
u'airasia.com',
u'aeriagames.com',
u'ning.com',
u'polyvore.com',
u'ngacn.cc',
u'kimiss.com',
u'discover.com',
u'hamariweb.com',
u'wpmudev.org',
u'angel.co',
u'1and1.com',
u'webs.com',
u'1und1.de',
u'ad123m.com',
u'sp.gov.br',
u'earthlink.net',
u'dlsite.com',
u'depositphotos.com',
u'danawa.com',
u'pogo.com',
u'vistaprint.com',
u'dagbladet.no',
u'zeit.de',
u'tobogo.net',
u'dx.com',
u'noticias.uol.com.br',
u'avira.com',
u'baiducontent.com',
u'mufg.jp',
u'abc.go.com',
u'grantland.com',
u'avclub.com',
u'banggood.com',
u'okwave.jp',
u'woothemes.com',
u'elcomercio.pe',
u'webmoney.ru',
u'opentable.com',
u'mangareader.net',
u'nosub.tv',
u'zozo.jp',
u'megafilmeshd.net',
u'yahoo-mbga.jp',
u'vrbo.com',
u'postbank.de',
u'chosun.com',
u'nouvelobs.com',
u'noaa.gov',
u'blogsky.com',
u'criteo.com',
u'4pda.ru',
u'kizi.com',
u'miniclip.com',
u'sakshi.com',
u'psu.edu',
u'economist.com',
u'edmunds.com',
u'kapanlagi.com',
u'pcanalysis.net',
u'mplife.com',
u'screencast.com',
u'icims.com',
u'haberler.com',
u'2ch-c.net',
u'sharelive.net',
u'onlinecreditcenter6.com',
u't411.io',
u'voyages-sncf.com',
u'zergnet.com',
u'p5w.net',
u'cloob.com',
u'elconfidencial.com',
u'bradesco.com.br',
u'7apps.me',
u'uptobox.com',
u'whois.com',
u'autotrader.co.uk',
u'wowhead.com',
u'venturebeat.com',
u'ceneo.pl',
u'fotostrana.ru',
u'bbb.org',
u'opensubtitles.org',
u'barclaycardus.com',
u'syosetu.com',
u'picofile.com',
u'geektoprofessional.com',
u'asriran.com',
u'pikabu.ru',
u'bioyun.com',
u'v9.com',
u'dostor.org',
u'hongkiat.com',
u'keepvid.com',
u'mtime.com',
u'skyscanner.net',
u'starbucks.com',
u'way2sms.com',
u'blogspot.com.ar',
u'qz.com',
u'zdnet.com',
u'oschina.net',
u'virginmedia.com',
u'internetdownloadmanager.com',
u'plurk.com',
u'cosmopolitan.com',
u'sports747.com',
u'gumtree.co.za',
u'mcafee.com',
u'sanook.com',
u'manta.com',
u'wpbeginner.com',
u'freegameszonetab.com',
u'bab.la',
u'sony.jp',
u'vgsgaming-ads.com',
u'dcinside.com',
u'tvn24.pl',
u'etorrent.co.kr',
u'cleartrip.com',
u'trgino.com',
u'teknoter.com',
u'dw.de',
u'foxsports.com',
u'xsrving.com',
u'cmbchina.com',
u'acesse.com',
u'sweet-page.com',
u'pastebin.com',
u'lg.com',
u'pcgamer.com',
u'chinabyte.com',
u'politico.com',
u'bookryanair.com',
u'wargaming.net',
u'duba.com',
u'porntube.com',
u'voyeurhit.com',
u'drive2.ru',
u'5278.cc',
u'dailykos.com',
u'metro.co.uk',
u'dantri.com.vn',
u'azet.sk',
u'jw.org',
u'premierleague.com',
u'xueqiu.com',
u'caixin.com',
u'zhibo8.cc',
u'rockstargames.com',
u'softpedia.com',
u'zazzle.com',
u'dhl.com',
u'ustream.tv',
u'sanjesh.org',
u'weather.com.cn',
u'breitbart.com',
u'fotolia.com',
u'sape.ru',
u'teamviewer.com',
u'ucoz.com',
u'affclicker.com',
u'iplt20.com',
u'uproxx.com',
u'gamme.com.tw',
u'tigerdirect.com',
u'netdna-cdn.com',
u'filmweb.pl',
u'easyjet.com',
u'humblebundle.com',
u'ecollege.com',
u'blogspot.kr',
u'ny.gov',
u'viadeo.com',
u'videodownloadconverter.com',
u'aastocks.com',
u'miniinthebox.com',
u'google.lv',
u'cj.com',
u'homeaway.com',
u'medicinenet.com',
u'peyvandha.ir',
u'myspace.com',
u'zimbio.com',
u'rackspace.com',
u'gumtree.pl',
u'nextmedia.com',
u'estadao.com.br',
u'popmyads.com',
u'theweathernetwork.com',
u'kinja.com',
u'blog.com',
u'umich.edu',
u'samsclub.com',
u'mysmartprice.com',
u'delicious.com',
u'greatandhra.com',
u'say-move.org',
u'fishki.net',
u'citi.com',
u'smi2.ru',
u'99acres.com',
u'submarino.com.br',
u'cliponyu.com',
u'junglee.com',
u'shahrekhabar.com',
u'bdnews24.com',
u'vporn.com',
u'wunderlist.com',
u'uludagsozluk.com',
u'segundamano.es',
u'euromillionairesystem.tv',
u'adultadworld.com',
u'privilegesbox.net',
u'trafficserving.com',
u'admaimai.com',
u'default-search.net',
u'sportskeeda.com',
u'trovigo.com',
u'ehowenespanol.com',
u'state.tx.us',
u'reclameaqui.com.br',
u'naij.com',
u'editor.wix.com',
u'grooveshark.com',
u'ilfattoquotidiano.it',
u'duolingo.com',
u'aljazeera.com',
u'douguo.com',
u'yiqifa.com',
u'fazenda.gov.br',
u'nature.com',
u'mic.com',
u'publishthis.com',
u'urbanoutfitters.com',
u'nymag.com',
u'v1.cn',
u'khanacademy.org',
u'greatergood.com',
u'drupal.org',
u'realestate.com.au',
u'mobtada.com',
u'delta-search.com',
u'traidnt.net',
u'nitroflare.com',
u'1337x.to',
u'brazzers.com',
u'mtsindia.in',
u'santander.co.uk',
u'huihui.cn',
u'infospace.com',
u'liveadoptimizer.com',
u'xiaomi.com',
u'ppstream.com',
u'115.com',
u'examiner.com',
u'couchtuner.eu.com',
u'duowan.com',
u'paidverts.com',
u'fanpage.gr',
u'storypick.com',
u'fatwallet.com',
u'kissanime.com',
u'linio.com.mx',
u'51cto.com',
u'zwaar.net',
u'ozon.ru',
u'traveltune.com',
u'inmotionhosting.com',
u'telekom.com',
u'poste.it',
u'wpengine.com',
u'magicbricks.com',
u'urbanspoon.com',
u'nbc.com',
u'konga.com',
u'telegraf.com.ua',
u'ulmart.ru',
u'ibtimes.co.uk',
u'n-tv.de',
u'aizhan.com',
u'familydoctor.com.cn',
u'nguoiduatin.vn',
u'tiscali.it',
u'yandex.kz',
u'jugem.jp',
u'national-lottery.co.uk',
u'patheos.com',
u'paytm.in',
u'purdue.edu',
u'cornell.edu',
u'quicksprout.com',
u'misr5.com',
u'filmibeat.com',
u'mashreghnews.ir',
u'windows.net',
u'newsmth.net',
u'fastcompany.com',
u'fanhuan.com',
u'bb.com.br',
u'dawn.com',
u'cambridge.org',
u'egrana.com.br',
u'888casino.com',
u'nrk.no',
u'francetvinfo.fr',
u'kompasiana.com',
u'kienthuc.net.vn',
u'ilbe.com',
u'archiveofourown.org',
u'profitboosterapp.com',
u'nguyentandung.org',
u'ad127m.com',
u'ranker.com',
u'vmware.com',
u'vanguard.com',
u'css-tricks.com',
u'soccerway.com',
u'shop-pro.jp',
u'4cdn.org',
u'id.net',
u'iconfinder.com',
u'tripadvisor.it',
u'tokopedia.com',
u'priceminister.com',
u'variety.com',
u'ascii.jp',
u'codecademy.com',
u'manoramaonline.com',
u'joins.com',
u'collegehumor.com',
u'autoblog.com',
u'actcorp.in',
u'eluniversal.com.mx',
u'toptenreviews.com',
u'forgeofempires.com',
u'index.hu',
u'freshbooks.com',
u'starwoodhotels.com',
u'tv.com',
u'ksl.com',
u'neogaf.com',
u'kicker.de',
u'clickadu.com',
u'korabia.com',
u'timewarnercable.com',
u'totaladperformance.com',
u'akhbarelyom.com',
u'csgolounge.com',
u'meishichina.com',
u'pornmd.com',
u'sunporno.com',
u'google.kz',
u'gozooms.com',
u'ihg.com',
u'hamusoku.com',
u'telecomitalia.it',
u'puu.sh',
u'hotpepper.jp',
u'computerbild.de',
u'cnki.net',
u'creativemarket.com',
u'khabaronline.ir',
u'officedepot.com',
u'commonfloor.com',
u'flipboard.com',
u'yjc.ir',
u'strava.com',
u'ucla.edu',
u'spanishdict.com',
u'shopstyle.com',
u'docusign.net',
u'17173.com',
u'ouedkniss.com',
u'rakuten-card.co.jp',
u'indeed.co.in',
u'lonelyplanet.com',
u'bmi.ir',
u'oneplus.net',
u'nowvideo.sx',
u'stubhub.com',
u'olx.ro',
u'addmefast.com',
u'thrillist.com',
u'flippa.com',
u'onliner.by',
u'perezhilton.com',
u'trafficfactory.biz',
u'techtudo.com.br',
u'redbox.com',
u'fnac.com',
u'about.me',
u'barclays.co.uk',
u'n-mobile.net',
u'searchenginejournal.com',
u'upsocl.com',
u'pravda.com.ua',
u'fastpic.ru',
u'fullonlinefilmizle.com',
u'businessweekly.com.tw',
u'doisongphapluat.com',
u'lindaikeji.blogspot.com',
u'rojadirecta.me',
u'yixun.com',
u'clip.vn',
u'meteofrance.com',
u'amarujala.com',
u'celebritytune.com',
u'kijiji.it',
u'boc.cn',
u'msnbc.com',
u'theglobeandmail.com',
u'reallifecam.com',
u'python.org',
u'17track.net',
u'nexusmods.com',
u'britishairways.com',
u'zoopla.co.uk',
u'travelocity.com',
u'so.com',
u'ilsole24ore.com',
u'akhbarak.net',
u'ad131m.com',
u'apartmenttherapy.com',
u'rikunabi.com',
u'wikispaces.com',
u'samanyoluhaber.com',
u'eztv.ch',
u'eroprofile.com',
u'unam.mx',
u'unicredit.it',
u'tagesschau.de',
u'nnm-club.me',
u'lumosity.com',
u'trademe.co.nz',
u'bigcartel.com',
u'yr.no',
u'columbia.edu',
u'theonion.com',
u'sankei.com',
u'face-masr.com',
u'washington.edu',
u'fatakat.com',
u'moviepilot.com',
u'ohmyzip.com',
u'83nsdjqqo1cau183xz.com',
u'miui.com',
u'enha.kr',
u'stern.de',
u'final.ir',
u'dsrlte.com',
u'ashemaletube.com',
u'uiuc.edu',
u'wemakeprice.com',
u'jiameng.com',
u'protothema.gr',
u'blocket.se',
u'rozetka.com.ua',
u'jvzoo.com',
u'mxttrf.com',
u'17k.com',
u'4tube.com',
u'dorkly.com',
u'lifehack.org',
u'wolframalpha.com',
u'gravatar.com',
u'buzzhand.com',
u'shopbop.com',
u'okezone.com',
u'hotwire.com',
u'multitran.ru',
u'dnaindia.com',
u'goibibo.com',
u'news24.com',
u'cinemablend.com',
u'alimama.com',
u'portaleducacao.com.br',
u'caisse-epargne.fr',
u'chess.com',
u'sierratradingpost.com',
u'techtarget.com',
u'kmart.com',
u'censor.net.ua',
u'hotukdeals.com',
u'ieee.org',
u'motthegioi.vn',
u'iqoption.com',
u'cdc.gov',
u'timeout.com',
u'tut.by',
u'ssisurveys.com',
u'guokr.com',
u'health.com',
u'blog.ir',
u'junkmail.co.za',
u'bgr.com',
u'diigo.com',
u'bayt.com',
u'mensxp.com',
u'2ch.sc',
u'indeed.co.uk',
u'google.com.pr',
u'xvideo-jp.com',
u'livetv.sx',
u'pitchfork.com',
u'camdolls.com',
u'dealmoon.com',
u'etrade.com',
u'inquisitr.com',
u'boston.com',
u'evite.com',
u'egou.com',
u'netsuite.com',
u'searchengines.guru',
u'privatbank.ua',
u'edx.org',
u'emirates.com',
u'1111.com.tw',
u'socialmediaexaminer.com',
u'asp.net',
u'allmyvideos.net',
u'wildberries.ru',
u'sfglobe.com',
u'wufoo.com',
u'fanpage.it',
u'divar.ir',
u'sony.com',
u'payoneer.com'
]
|
ministryofpromise/tlp
|
tlp/lib/filter_list.py
|
Python
|
mit
| 43,519
|
[
"ADF"
] |
cb57e3dcb2236a4757557d18adcaf32f0ab49ccf05dee0ee7ebd5a6586767de5
|
# $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the EState fingerprinting
validation values are from the paper (JCICS _35_ 1039-1045 (1995))
"""
import unittest
import numpy
from rdkit import Chem
from rdkit.Chem import EState
from rdkit.Chem.EState import Fingerprinter
class TestCase(unittest.TestCase):
def setUp(self):
pass
def _validate(self,vals,tol=1e-2,show=0):
for smi,c,v in vals:
mol = Chem.MolFromSmiles(smi)
counts,vals = Fingerprinter.FingerprintMol(mol)
counts = counts[numpy.nonzero(counts)]
vals = vals[numpy.nonzero(vals)]
if show:
print counts
print vals
assert len(c)==len(counts),'bad count len for smiles: %s'%(smi)
assert len(v)==len(vals),'bad val len for smiles: %s'%(smi)
c = numpy.array(c)
assert max(abs(c-counts))<tol,'bad count for SMILES: %s'%(smi)
v = numpy.array(v)
assert max(abs(v-vals))<tol,'bad val for SMILES: %s'%(smi)
def test1(self):
""" molecules
"""
data = [
('c1[nH]cnc1CC(N)C(O)=O',[1,2,1,1,1,1,1,1,1,1],
[0.26,3.12,-0.86,-1.01,0.67,5.25,2.71,3.84,8.42,10.26]),
('NCCc1ccc(O)c(O)c1',[2,3,3,1,2],
[1.26,4.71,0.75,5.30,17.97]),
]
self._validate(data,show=0)
if __name__ == '__main__':
unittest.main()
|
rdkit/rdkit-orig
|
rdkit/Chem/EState/UnitTestFingerprints.py
|
Python
|
bsd-3-clause
| 1,588
|
[
"RDKit"
] |
e10bec8db44b609b9c66bc54f5b2741e43908860d7cd4ae749c2cb726642f2e6
|
import struct, binascii,hashlib
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
## serpent.py - pure Python implementation of the Serpent algorithm.
## Bjorn Edstrom <be@bjrn.se> 13 december 2007.
##
## Copyrights
## ==========
##
## This code is a derived from an implementation by Dr Brian Gladman
## (gladman@seven77.demon.co.uk) which is subject to the following license.
## This Python implementation is not subject to any other license.
##
##/* This is an independent implementation of the encryption algorithm:
## *
## * Serpent by Ross Anderson, Eli Biham and Lars Knudsen
## *
## * which is a candidate algorithm in the Advanced Encryption Standard
## * programme of the US National Institute of Standards and Technology
## *
## * Copyright in this implementation is held by Dr B R Gladman but I
## * hereby give permission for its free direct or derivative use subject
## * to acknowledgment of its origin and compliance with any conditions
## * that the originators of the algorithm place on its exploitation.
## *
## * Dr Brian Gladman (gladman@seven77.demon.co.uk) 14th January 1999
## */
##
## The above copyright notice must not be removed.
##
## Information
## ===========
##
## Anyone thinking of using this code should reconsider. It's slow.
## Try python-mcrypt instead. In case a faster library is not installed
## on the target system, this code can be used as a portable fallback.
try:
import psyco
psyco.full()
except ImportError:
pass
import binascii
import base64
block_size = 16
key_size = 32
class Serpent:
def __init__(self, key=None):
"""Serpent."""
if key:
self.set_key(key)
def set_key(self, key):
"""Init."""
key_len = len(key)
if key_len % 4:
# XXX: add padding?
raise KeyError, "key not a multiple of 4"
if key_len > 32:
# XXX: prune?
raise KeyError, "key_len > 32"
self.key_context = [0] * 140
key_word32 = [0] * 32
i = 0
while key:
key_word32[i] = struct.unpack("<L", key[0:4])[0]
key = key[4:]
i += 1
set_key(self.key_context, key_word32, key_len)
#print(map(hex,self.key_context))
def decrypt(self, block):
"""Decrypt blocks."""
if len(block) % 16:
raise ValueError, "block size must be a multiple of 16"
plaintext = ''
while block:
a, b, c, d = struct.unpack("<4L", block[:16])
temp = [a, b, c, d]
decrypt(self.key_context, temp)
plaintext += struct.pack("<4L", *temp)
block = block[16:]
return plaintext
def encrypt(self, block):
"""Encrypt blocks."""
if len(block) % 16:
raise ValueError, "block size must be a multiple of 16"
ciphertext = ''
while block:
a, b, c, d = struct.unpack("<4L", block[0:16])
temp = [a, b, c, d]
encrypt(self.key_context, temp)
ciphertext += struct.pack("<4L", *temp)
block = block[16:]
return ciphertext
def get_name(self):
"""Return the name of the cipher."""
return "Serpent"
def get_block_size(self):
"""Get cipher block size in bytes."""
return 16
def get_key_size(self):
"""Get cipher key size in bytes."""
return 32
#
# Private.
#
import struct
import sys
WORD_BIGENDIAN = 0
if sys.byteorder == 'big':
WORD_BIGENDIAN = 1
def rotr32(x, n):
return (x >> n) | ((x << (32 - n)) & 0xFFFFFFFF)
def rotl32(x, n):
return ((x << n) & 0xFFFFFFFF) | (x >> (32 - n))
def byteswap32(x):
return ((x & 0xff) << 24) | (((x >> 8) & 0xff) << 16) | \
(((x >> 16) & 0xff) << 8) | ((x >> 24) & 0xff)
def set_key(l_key, key, key_len):
key_len *= 8
if key_len > 256:
return False
i = 0
lk = (key_len + 31) / 32
while i < lk:
l_key[i] = key[i]
if WORD_BIGENDIAN:
l_key[i] = byteswap32(key[i])
i += 1
if key_len < 256:
while i < 8:
l_key[i] = 0
i += 1
i = key_len / 32
lk = 1 << (key_len % 32)
l_key[i] = (l_key[i] & (lk - 1)) | lk
for i in xrange(132):
lk = l_key[i] ^ l_key[i + 3] ^ l_key[i + 5] ^ l_key[i + 7] ^ 0x9e3779b9 ^ i
l_key[i + 8] = ((lk << 11) & 0xFFFFFFFF) | (lk >> 21)
key = l_key
# serpent_generate.py
a = key[4 * 0 + 8]
b = key[4 * 0 + 9]
c = key[4 * 0 + 10]
d = key[4 * 0 + 11]
e = 0
f = 0
g = 0
h = 0
t1 = 0
t2 = 0
t3 = 0
t4 = 0
t5 = 0
t6 = 0
t7 = 0
t8 = 0
t9 = 0
t10 = 0
t11 = 0
t12 = 0
t13 = 0
t14 = 0
t15 = 0
t16 = 0
t1 = a ^ c;
t2 = d ^ t1;
t3 = a & t2;
t4 = d ^ t3;
t5 = b & t4;
g = t2 ^ t5;
t7 = a | g;
t8 = b | d;
t11 = a | d;
t9 = t4 & t7;
f = t8 ^ t9;
t12 = b ^ t11;
t13 = g ^ t9;
t15 = t3 ^ t8;
h = t12 ^ t13;
t16 = c & t15;
e = t12 ^ t16
key[4 * 0 + 8] = e
key[4 * 0 + 9] = f
key[4 * 0 + 10] = g
key[4 * 0 + 11] = h
a = key[4 * 1 + 8]
b = key[4 * 1 + 9]
c = key[4 * 1 + 10]
d = key[4 * 1 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ d;
t3 = c & t1;
t13 = d | t1;
e = t2 ^ t3;
t5 = c ^ t1;
t6 = c ^ e;
t7 = b & t6;
t10 = e | t5;
h = t5 ^ t7;
t9 = d | t7;
t11 = t9 & t10;
t14 = t2 ^ h;
g = a ^ t11;
t15 = g ^ t13;
f = t14 ^ t15
key[4 * 1 + 8] = e
key[4 * 1 + 9] = f
key[4 * 1 + 10] = g
key[4 * 1 + 11] = h
a = key[4 * 2 + 8]
b = key[4 * 2 + 9]
c = key[4 * 2 + 10]
d = key[4 * 2 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ t1;
t3 = a | t2;
t4 = d | t2;
t5 = c ^ t3;
g = d ^ t5;
t7 = b ^ t4;
t8 = t2 ^ g;
t9 = t5 & t7;
h = t8 ^ t9;
t11 = t5 ^ t7;
f = h ^ t11;
t13 = t8 & t11;
e = t5 ^ t13
key[4 * 2 + 8] = e
key[4 * 2 + 9] = f
key[4 * 2 + 10] = g
key[4 * 2 + 11] = h
a = key[4 * 3 + 8]
b = key[4 * 3 + 9]
c = key[4 * 3 + 10]
d = key[4 * 3 + 11]
t1 = a ^ d;
t2 = a & d;
t3 = c ^ t1;
t6 = b & t1;
t4 = b ^ t3;
t10 = (~t3) % 0x100000000;
h = t2 ^ t4;
t7 = a ^ t6;
t14 = (~t7) % 0x100000000;
t8 = c | t7;
t11 = t3 ^ t7;
g = t4 ^ t8;
t12 = h & t11;
f = t10 ^ t12;
e = t12 ^ t14
key[4 * 3 + 8] = e
key[4 * 3 + 9] = f
key[4 * 3 + 10] = g
key[4 * 3 + 11] = h
a = key[4 * 4 + 8]
b = key[4 * 4 + 9]
c = key[4 * 4 + 10]
d = key[4 * 4 + 11]
t1 = (~c) % 0x100000000;
t2 = b ^ c;
t3 = b | t1;
t4 = d ^ t3;
t5 = a & t4;
t7 = a ^ d;
h = t2 ^ t5;
t8 = b ^ t5;
t9 = t2 | t8;
t11 = d & t3;
f = t7 ^ t9;
t12 = t5 ^ f;
t15 = t1 | t4;
t13 = h & t12;
g = t11 ^ t13;
t16 = t12 ^ g;
e = t15 ^ t16
key[4 * 4 + 8] = e
key[4 * 4 + 9] = f
key[4 * 4 + 10] = g
key[4 * 4 + 11] = h
a = key[4 * 5 + 8]
b = key[4 * 5 + 9]
c = key[4 * 5 + 10]
d = key[4 * 5 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ d;
t3 = b ^ t2;
t4 = t1 | t2;
t5 = c ^ t4;
f = b ^ t5;
t13 = (~t5) % 0x100000000;
t7 = t2 | f;
t8 = d ^ t7;
t9 = t5 & t8;
g = t3 ^ t9;
t11 = t5 ^ t8;
e = g ^ t11;
t14 = t3 & t11;
h = t13 ^ t14
key[4 * 5 + 8] = e
key[4 * 5 + 9] = f
key[4 * 5 + 10] = g
key[4 * 5 + 11] = h
a = key[4 * 6 + 8]
b = key[4 * 6 + 9]
c = key[4 * 6 + 10]
d = key[4 * 6 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ b;
t3 = a ^ d;
t4 = c ^ t1;
t5 = t2 | t3;
e = t4 ^ t5;
t7 = d & e;
t8 = t2 ^ e;
t10 = t1 | e;
f = t7 ^ t8;
t11 = t2 | t7;
t12 = t3 ^ t10;
t14 = b ^ t7;
g = t11 ^ t12;
t15 = f & t12;
h = t14 ^ t15
key[4 * 6 + 8] = e
key[4 * 6 + 9] = f
key[4 * 6 + 10] = g
key[4 * 6 + 11] = h
a = key[4 * 7 + 8]
b = key[4 * 7 + 9]
c = key[4 * 7 + 10]
d = key[4 * 7 + 11]
t1 = a ^ d;
t2 = d & t1;
t3 = c ^ t2;
t4 = b | t3;
h = t1 ^ t4;
t6 = (~b) % 0x100000000;
t7 = t1 | t6;
e = t3 ^ t7;
t9 = a & e;
t10 = t1 ^ t6;
t11 = t4 & t10;
g = t9 ^ t11;
t13 = a ^ t3;
t14 = t10 & g;
f = t13 ^ t14
key[4 * 7 + 8] = e
key[4 * 7 + 9] = f
key[4 * 7 + 10] = g
key[4 * 7 + 11] = h
a = key[4 * 8 + 8]
b = key[4 * 8 + 9]
c = key[4 * 8 + 10]
d = key[4 * 8 + 11]
t1 = a ^ c;
t2 = d ^ t1;
t3 = a & t2;
t4 = d ^ t3;
t5 = b & t4;
g = t2 ^ t5;
t7 = a | g;
t8 = b | d;
t11 = a | d;
t9 = t4 & t7;
f = t8 ^ t9;
t12 = b ^ t11;
t13 = g ^ t9;
t15 = t3 ^ t8;
h = t12 ^ t13;
t16 = c & t15;
e = t12 ^ t16
key[4 * 8 + 8] = e
key[4 * 8 + 9] = f
key[4 * 8 + 10] = g
key[4 * 8 + 11] = h
a = key[4 * 9 + 8]
b = key[4 * 9 + 9]
c = key[4 * 9 + 10]
d = key[4 * 9 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ d;
t3 = c & t1;
t13 = d | t1;
e = t2 ^ t3;
t5 = c ^ t1;
t6 = c ^ e;
t7 = b & t6;
t10 = e | t5;
h = t5 ^ t7;
t9 = d | t7;
t11 = t9 & t10;
t14 = t2 ^ h;
g = a ^ t11;
t15 = g ^ t13;
f = t14 ^ t15
key[4 * 9 + 8] = e
key[4 * 9 + 9] = f
key[4 * 9 + 10] = g
key[4 * 9 + 11] = h
a = key[4 * 10 + 8]
b = key[4 * 10 + 9]
c = key[4 * 10 + 10]
d = key[4 * 10 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ t1;
t3 = a | t2;
t4 = d | t2;
t5 = c ^ t3;
g = d ^ t5;
t7 = b ^ t4;
t8 = t2 ^ g;
t9 = t5 & t7;
h = t8 ^ t9;
t11 = t5 ^ t7;
f = h ^ t11;
t13 = t8 & t11;
e = t5 ^ t13
key[4 * 10 + 8] = e
key[4 * 10 + 9] = f
key[4 * 10 + 10] = g
key[4 * 10 + 11] = h
a = key[4 * 11 + 8]
b = key[4 * 11 + 9]
c = key[4 * 11 + 10]
d = key[4 * 11 + 11]
t1 = a ^ d;
t2 = a & d;
t3 = c ^ t1;
t6 = b & t1;
t4 = b ^ t3;
t10 = (~t3) % 0x100000000;
h = t2 ^ t4;
t7 = a ^ t6;
t14 = (~t7) % 0x100000000;
t8 = c | t7;
t11 = t3 ^ t7;
g = t4 ^ t8;
t12 = h & t11;
f = t10 ^ t12;
e = t12 ^ t14
key[4 * 11 + 8] = e
key[4 * 11 + 9] = f
key[4 * 11 + 10] = g
key[4 * 11 + 11] = h
a = key[4 * 12 + 8]
b = key[4 * 12 + 9]
c = key[4 * 12 + 10]
d = key[4 * 12 + 11]
t1 = (~c) % 0x100000000;
t2 = b ^ c;
t3 = b | t1;
t4 = d ^ t3;
t5 = a & t4;
t7 = a ^ d;
h = t2 ^ t5;
t8 = b ^ t5;
t9 = t2 | t8;
t11 = d & t3;
f = t7 ^ t9;
t12 = t5 ^ f;
t15 = t1 | t4;
t13 = h & t12;
g = t11 ^ t13;
t16 = t12 ^ g;
e = t15 ^ t16
key[4 * 12 + 8] = e
key[4 * 12 + 9] = f
key[4 * 12 + 10] = g
key[4 * 12 + 11] = h
a = key[4 * 13 + 8]
b = key[4 * 13 + 9]
c = key[4 * 13 + 10]
d = key[4 * 13 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ d;
t3 = b ^ t2;
t4 = t1 | t2;
t5 = c ^ t4;
f = b ^ t5;
t13 = (~t5) % 0x100000000;
t7 = t2 | f;
t8 = d ^ t7;
t9 = t5 & t8;
g = t3 ^ t9;
t11 = t5 ^ t8;
e = g ^ t11;
t14 = t3 & t11;
h = t13 ^ t14
key[4 * 13 + 8] = e
key[4 * 13 + 9] = f
key[4 * 13 + 10] = g
key[4 * 13 + 11] = h
a = key[4 * 14 + 8]
b = key[4 * 14 + 9]
c = key[4 * 14 + 10]
d = key[4 * 14 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ b;
t3 = a ^ d;
t4 = c ^ t1;
t5 = t2 | t3;
e = t4 ^ t5;
t7 = d & e;
t8 = t2 ^ e;
t10 = t1 | e;
f = t7 ^ t8;
t11 = t2 | t7;
t12 = t3 ^ t10;
t14 = b ^ t7;
g = t11 ^ t12;
t15 = f & t12;
h = t14 ^ t15
key[4 * 14 + 8] = e
key[4 * 14 + 9] = f
key[4 * 14 + 10] = g
key[4 * 14 + 11] = h
a = key[4 * 15 + 8]
b = key[4 * 15 + 9]
c = key[4 * 15 + 10]
d = key[4 * 15 + 11]
t1 = a ^ d;
t2 = d & t1;
t3 = c ^ t2;
t4 = b | t3;
h = t1 ^ t4;
t6 = (~b) % 0x100000000;
t7 = t1 | t6;
e = t3 ^ t7;
t9 = a & e;
t10 = t1 ^ t6;
t11 = t4 & t10;
g = t9 ^ t11;
t13 = a ^ t3;
t14 = t10 & g;
f = t13 ^ t14
key[4 * 15 + 8] = e
key[4 * 15 + 9] = f
key[4 * 15 + 10] = g
key[4 * 15 + 11] = h
a = key[4 * 16 + 8]
b = key[4 * 16 + 9]
c = key[4 * 16 + 10]
d = key[4 * 16 + 11]
t1 = a ^ c;
t2 = d ^ t1;
t3 = a & t2;
t4 = d ^ t3;
t5 = b & t4;
g = t2 ^ t5;
t7 = a | g;
t8 = b | d;
t11 = a | d;
t9 = t4 & t7;
f = t8 ^ t9;
t12 = b ^ t11;
t13 = g ^ t9;
t15 = t3 ^ t8;
h = t12 ^ t13;
t16 = c & t15;
e = t12 ^ t16
key[4 * 16 + 8] = e
key[4 * 16 + 9] = f
key[4 * 16 + 10] = g
key[4 * 16 + 11] = h
a = key[4 * 17 + 8]
b = key[4 * 17 + 9]
c = key[4 * 17 + 10]
d = key[4 * 17 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ d;
t3 = c & t1;
t13 = d | t1;
e = t2 ^ t3;
t5 = c ^ t1;
t6 = c ^ e;
t7 = b & t6;
t10 = e | t5;
h = t5 ^ t7;
t9 = d | t7;
t11 = t9 & t10;
t14 = t2 ^ h;
g = a ^ t11;
t15 = g ^ t13;
f = t14 ^ t15
key[4 * 17 + 8] = e
key[4 * 17 + 9] = f
key[4 * 17 + 10] = g
key[4 * 17 + 11] = h
a = key[4 * 18 + 8]
b = key[4 * 18 + 9]
c = key[4 * 18 + 10]
d = key[4 * 18 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ t1;
t3 = a | t2;
t4 = d | t2;
t5 = c ^ t3;
g = d ^ t5;
t7 = b ^ t4;
t8 = t2 ^ g;
t9 = t5 & t7;
h = t8 ^ t9;
t11 = t5 ^ t7;
f = h ^ t11;
t13 = t8 & t11;
e = t5 ^ t13
key[4 * 18 + 8] = e
key[4 * 18 + 9] = f
key[4 * 18 + 10] = g
key[4 * 18 + 11] = h
a = key[4 * 19 + 8]
b = key[4 * 19 + 9]
c = key[4 * 19 + 10]
d = key[4 * 19 + 11]
t1 = a ^ d;
t2 = a & d;
t3 = c ^ t1;
t6 = b & t1;
t4 = b ^ t3;
t10 = (~t3) % 0x100000000;
h = t2 ^ t4;
t7 = a ^ t6;
t14 = (~t7) % 0x100000000;
t8 = c | t7;
t11 = t3 ^ t7;
g = t4 ^ t8;
t12 = h & t11;
f = t10 ^ t12;
e = t12 ^ t14
key[4 * 19 + 8] = e
key[4 * 19 + 9] = f
key[4 * 19 + 10] = g
key[4 * 19 + 11] = h
a = key[4 * 20 + 8]
b = key[4 * 20 + 9]
c = key[4 * 20 + 10]
d = key[4 * 20 + 11]
t1 = (~c) % 0x100000000;
t2 = b ^ c;
t3 = b | t1;
t4 = d ^ t3;
t5 = a & t4;
t7 = a ^ d;
h = t2 ^ t5;
t8 = b ^ t5;
t9 = t2 | t8;
t11 = d & t3;
f = t7 ^ t9;
t12 = t5 ^ f;
t15 = t1 | t4;
t13 = h & t12;
g = t11 ^ t13;
t16 = t12 ^ g;
e = t15 ^ t16
key[4 * 20 + 8] = e
key[4 * 20 + 9] = f
key[4 * 20 + 10] = g
key[4 * 20 + 11] = h
a = key[4 * 21 + 8]
b = key[4 * 21 + 9]
c = key[4 * 21 + 10]
d = key[4 * 21 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ d;
t3 = b ^ t2;
t4 = t1 | t2;
t5 = c ^ t4;
f = b ^ t5;
t13 = (~t5) % 0x100000000;
t7 = t2 | f;
t8 = d ^ t7;
t9 = t5 & t8;
g = t3 ^ t9;
t11 = t5 ^ t8;
e = g ^ t11;
t14 = t3 & t11;
h = t13 ^ t14
key[4 * 21 + 8] = e
key[4 * 21 + 9] = f
key[4 * 21 + 10] = g
key[4 * 21 + 11] = h
a = key[4 * 22 + 8]
b = key[4 * 22 + 9]
c = key[4 * 22 + 10]
d = key[4 * 22 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ b;
t3 = a ^ d;
t4 = c ^ t1;
t5 = t2 | t3;
e = t4 ^ t5;
t7 = d & e;
t8 = t2 ^ e;
t10 = t1 | e;
f = t7 ^ t8;
t11 = t2 | t7;
t12 = t3 ^ t10;
t14 = b ^ t7;
g = t11 ^ t12;
t15 = f & t12;
h = t14 ^ t15
key[4 * 22 + 8] = e
key[4 * 22 + 9] = f
key[4 * 22 + 10] = g
key[4 * 22 + 11] = h
a = key[4 * 23 + 8]
b = key[4 * 23 + 9]
c = key[4 * 23 + 10]
d = key[4 * 23 + 11]
t1 = a ^ d;
t2 = d & t1;
t3 = c ^ t2;
t4 = b | t3;
h = t1 ^ t4;
t6 = (~b) % 0x100000000;
t7 = t1 | t6;
e = t3 ^ t7;
t9 = a & e;
t10 = t1 ^ t6;
t11 = t4 & t10;
g = t9 ^ t11;
t13 = a ^ t3;
t14 = t10 & g;
f = t13 ^ t14
key[4 * 23 + 8] = e
key[4 * 23 + 9] = f
key[4 * 23 + 10] = g
key[4 * 23 + 11] = h
a = key[4 * 24 + 8]
b = key[4 * 24 + 9]
c = key[4 * 24 + 10]
d = key[4 * 24 + 11]
t1 = a ^ c;
t2 = d ^ t1;
t3 = a & t2;
t4 = d ^ t3;
t5 = b & t4;
g = t2 ^ t5;
t7 = a | g;
t8 = b | d;
t11 = a | d;
t9 = t4 & t7;
f = t8 ^ t9;
t12 = b ^ t11;
t13 = g ^ t9;
t15 = t3 ^ t8;
h = t12 ^ t13;
t16 = c & t15;
e = t12 ^ t16
key[4 * 24 + 8] = e
key[4 * 24 + 9] = f
key[4 * 24 + 10] = g
key[4 * 24 + 11] = h
a = key[4 * 25 + 8]
b = key[4 * 25 + 9]
c = key[4 * 25 + 10]
d = key[4 * 25 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ d;
t3 = c & t1;
t13 = d | t1;
e = t2 ^ t3;
t5 = c ^ t1;
t6 = c ^ e;
t7 = b & t6;
t10 = e | t5;
h = t5 ^ t7;
t9 = d | t7;
t11 = t9 & t10;
t14 = t2 ^ h;
g = a ^ t11;
t15 = g ^ t13;
f = t14 ^ t15
key[4 * 25 + 8] = e
key[4 * 25 + 9] = f
key[4 * 25 + 10] = g
key[4 * 25 + 11] = h
a = key[4 * 26 + 8]
b = key[4 * 26 + 9]
c = key[4 * 26 + 10]
d = key[4 * 26 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ t1;
t3 = a | t2;
t4 = d | t2;
t5 = c ^ t3;
g = d ^ t5;
t7 = b ^ t4;
t8 = t2 ^ g;
t9 = t5 & t7;
h = t8 ^ t9;
t11 = t5 ^ t7;
f = h ^ t11;
t13 = t8 & t11;
e = t5 ^ t13
key[4 * 26 + 8] = e
key[4 * 26 + 9] = f
key[4 * 26 + 10] = g
key[4 * 26 + 11] = h
a = key[4 * 27 + 8]
b = key[4 * 27 + 9]
c = key[4 * 27 + 10]
d = key[4 * 27 + 11]
t1 = a ^ d;
t2 = a & d;
t3 = c ^ t1;
t6 = b & t1;
t4 = b ^ t3;
t10 = (~t3) % 0x100000000;
h = t2 ^ t4;
t7 = a ^ t6;
t14 = (~t7) % 0x100000000;
t8 = c | t7;
t11 = t3 ^ t7;
g = t4 ^ t8;
t12 = h & t11;
f = t10 ^ t12;
e = t12 ^ t14
key[4 * 27 + 8] = e
key[4 * 27 + 9] = f
key[4 * 27 + 10] = g
key[4 * 27 + 11] = h
a = key[4 * 28 + 8]
b = key[4 * 28 + 9]
c = key[4 * 28 + 10]
d = key[4 * 28 + 11]
t1 = (~c) % 0x100000000;
t2 = b ^ c;
t3 = b | t1;
t4 = d ^ t3;
t5 = a & t4;
t7 = a ^ d;
h = t2 ^ t5;
t8 = b ^ t5;
t9 = t2 | t8;
t11 = d & t3;
f = t7 ^ t9;
t12 = t5 ^ f;
t15 = t1 | t4;
t13 = h & t12;
g = t11 ^ t13;
t16 = t12 ^ g;
e = t15 ^ t16
key[4 * 28 + 8] = e
key[4 * 28 + 9] = f
key[4 * 28 + 10] = g
key[4 * 28 + 11] = h
a = key[4 * 29 + 8]
b = key[4 * 29 + 9]
c = key[4 * 29 + 10]
d = key[4 * 29 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ d;
t3 = b ^ t2;
t4 = t1 | t2;
t5 = c ^ t4;
f = b ^ t5;
t13 = (~t5) % 0x100000000;
t7 = t2 | f;
t8 = d ^ t7;
t9 = t5 & t8;
g = t3 ^ t9;
t11 = t5 ^ t8;
e = g ^ t11;
t14 = t3 & t11;
h = t13 ^ t14
key[4 * 29 + 8] = e
key[4 * 29 + 9] = f
key[4 * 29 + 10] = g
key[4 * 29 + 11] = h
a = key[4 * 30 + 8]
b = key[4 * 30 + 9]
c = key[4 * 30 + 10]
d = key[4 * 30 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ b;
t3 = a ^ d;
t4 = c ^ t1;
t5 = t2 | t3;
e = t4 ^ t5;
t7 = d & e;
t8 = t2 ^ e;
t10 = t1 | e;
f = t7 ^ t8;
t11 = t2 | t7;
t12 = t3 ^ t10;
t14 = b ^ t7;
g = t11 ^ t12;
t15 = f & t12;
h = t14 ^ t15
key[4 * 30 + 8] = e
key[4 * 30 + 9] = f
key[4 * 30 + 10] = g
key[4 * 30 + 11] = h
a = key[4 * 31 + 8]
b = key[4 * 31 + 9]
c = key[4 * 31 + 10]
d = key[4 * 31 + 11]
t1 = a ^ d;
t2 = d & t1;
t3 = c ^ t2;
t4 = b | t3;
h = t1 ^ t4;
t6 = (~b) % 0x100000000;
t7 = t1 | t6;
e = t3 ^ t7;
t9 = a & e;
t10 = t1 ^ t6;
t11 = t4 & t10;
g = t9 ^ t11;
t13 = a ^ t3;
t14 = t10 & g;
f = t13 ^ t14
key[4 * 31 + 8] = e
key[4 * 31 + 9] = f
key[4 * 31 + 10] = g
key[4 * 31 + 11] = h
a = key[4 * 32 + 8]
b = key[4 * 32 + 9]
c = key[4 * 32 + 10]
d = key[4 * 32 + 11]
t1 = a ^ c;
t2 = d ^ t1;
t3 = a & t2;
t4 = d ^ t3;
t5 = b & t4;
g = t2 ^ t5;
t7 = a | g;
t8 = b | d;
t11 = a | d;
t9 = t4 & t7;
f = t8 ^ t9;
t12 = b ^ t11;
t13 = g ^ t9;
t15 = t3 ^ t8;
h = t12 ^ t13;
t16 = c & t15;
e = t12 ^ t16
key[4 * 32 + 8] = e
key[4 * 32 + 9] = f
key[4 * 32 + 10] = g
key[4 * 32 + 11] = h
def encrypt(key, in_blk):
# serpent_generate.py
a = in_blk[0]
b = in_blk[1]
c = in_blk[2]
d = in_blk[3]
if WORD_BIGENDIAN:
a = byteswap32(a)
b = byteswap32(b)
c = byteswap32(c)
d = byteswap32(d)
e = 0
f = 0
g = 0
h = 0
t1 = 0
t2 = 0
t3 = 0
t4 = 0
t5 = 0
t6 = 0
t7 = 0
t8 = 0
t9 = 0
t10 = 0
t11 = 0
t12 = 0
t13 = 0
t14 = 0
t15 = 0
t16 = 0
a ^= key[4 * 0 + 8]
b ^= key[4 * 0 + 9]
c ^= key[4 * 0 + 10]
d ^= key[4 * 0 + 11]
t1 = a ^ d;
t2 = a & d;
t3 = c ^ t1;
t6 = b & t1;
t4 = b ^ t3;
t10 = (~t3) % 0x100000000;
h = t2 ^ t4;
t7 = a ^ t6;
t14 = (~t7) % 0x100000000;
t8 = c | t7;
t11 = t3 ^ t7;
g = t4 ^ t8;
t12 = h & t11;
f = t10 ^ t12;
e = t12 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 1 + 8]
f ^= key[4 * 1 + 9]
g ^= key[4 * 1 + 10]
h ^= key[4 * 1 + 11]
t1 = (~e) % 0x100000000;
t2 = f ^ t1;
t3 = e | t2;
t4 = h | t2;
t5 = g ^ t3;
c = h ^ t5;
t7 = f ^ t4;
t8 = t2 ^ c;
t9 = t5 & t7;
d = t8 ^ t9;
t11 = t5 ^ t7;
b = d ^ t11;
t13 = t8 & t11;
a = t5 ^ t13
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 2 + 8]
b ^= key[4 * 2 + 9]
c ^= key[4 * 2 + 10]
d ^= key[4 * 2 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ d;
t3 = c & t1;
t13 = d | t1;
e = t2 ^ t3;
t5 = c ^ t1;
t6 = c ^ e;
t7 = b & t6;
t10 = e | t5;
h = t5 ^ t7;
t9 = d | t7;
t11 = t9 & t10;
t14 = t2 ^ h;
g = a ^ t11;
t15 = g ^ t13;
f = t14 ^ t15
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 3 + 8]
f ^= key[4 * 3 + 9]
g ^= key[4 * 3 + 10]
h ^= key[4 * 3 + 11]
t1 = e ^ g;
t2 = h ^ t1;
t3 = e & t2;
t4 = h ^ t3;
t5 = f & t4;
c = t2 ^ t5;
t7 = e | c;
t8 = f | h;
t11 = e | h;
t9 = t4 & t7;
b = t8 ^ t9;
t12 = f ^ t11;
t13 = c ^ t9;
t15 = t3 ^ t8;
d = t12 ^ t13;
t16 = g & t15;
a = t12 ^ t16
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 4 + 8]
b ^= key[4 * 4 + 9]
c ^= key[4 * 4 + 10]
d ^= key[4 * 4 + 11]
t1 = a ^ d;
t2 = d & t1;
t3 = c ^ t2;
t4 = b | t3;
h = t1 ^ t4;
t6 = (~b) % 0x100000000;
t7 = t1 | t6;
e = t3 ^ t7;
t9 = a & e;
t10 = t1 ^ t6;
t11 = t4 & t10;
g = t9 ^ t11;
t13 = a ^ t3;
t14 = t10 & g;
f = t13 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 5 + 8]
f ^= key[4 * 5 + 9]
g ^= key[4 * 5 + 10]
h ^= key[4 * 5 + 11]
t1 = (~e) % 0x100000000;
t2 = e ^ f;
t3 = e ^ h;
t4 = g ^ t1;
t5 = t2 | t3;
a = t4 ^ t5;
t7 = h & a;
t8 = t2 ^ a;
t10 = t1 | a;
b = t7 ^ t8;
t11 = t2 | t7;
t12 = t3 ^ t10;
t14 = f ^ t7;
c = t11 ^ t12;
t15 = b & t12;
d = t14 ^ t15
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 6 + 8]
b ^= key[4 * 6 + 9]
c ^= key[4 * 6 + 10]
d ^= key[4 * 6 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ d;
t3 = b ^ t2;
t4 = t1 | t2;
t5 = c ^ t4;
f = b ^ t5;
t13 = (~t5) % 0x100000000;
t7 = t2 | f;
t8 = d ^ t7;
t9 = t5 & t8;
g = t3 ^ t9;
t11 = t5 ^ t8;
e = g ^ t11;
t14 = t3 & t11;
h = t13 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 7 + 8]
f ^= key[4 * 7 + 9]
g ^= key[4 * 7 + 10]
h ^= key[4 * 7 + 11]
t1 = (~g) % 0x100000000;
t2 = f ^ g;
t3 = f | t1;
t4 = h ^ t3;
t5 = e & t4;
t7 = e ^ h;
d = t2 ^ t5;
t8 = f ^ t5;
t9 = t2 | t8;
t11 = h & t3;
b = t7 ^ t9;
t12 = t5 ^ b;
t15 = t1 | t4;
t13 = d & t12;
c = t11 ^ t13;
t16 = t12 ^ c;
a = t15 ^ t16
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 8 + 8]
b ^= key[4 * 8 + 9]
c ^= key[4 * 8 + 10]
d ^= key[4 * 8 + 11]
t1 = a ^ d;
t2 = a & d;
t3 = c ^ t1;
t6 = b & t1;
t4 = b ^ t3;
t10 = (~t3) % 0x100000000;
h = t2 ^ t4;
t7 = a ^ t6;
t14 = (~t7) % 0x100000000;
t8 = c | t7;
t11 = t3 ^ t7;
g = t4 ^ t8;
t12 = h & t11;
f = t10 ^ t12;
e = t12 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 9 + 8]
f ^= key[4 * 9 + 9]
g ^= key[4 * 9 + 10]
h ^= key[4 * 9 + 11]
t1 = (~e) % 0x100000000;
t2 = f ^ t1;
t3 = e | t2;
t4 = h | t2;
t5 = g ^ t3;
c = h ^ t5;
t7 = f ^ t4;
t8 = t2 ^ c;
t9 = t5 & t7;
d = t8 ^ t9;
t11 = t5 ^ t7;
b = d ^ t11;
t13 = t8 & t11;
a = t5 ^ t13
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 10 + 8]
b ^= key[4 * 10 + 9]
c ^= key[4 * 10 + 10]
d ^= key[4 * 10 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ d;
t3 = c & t1;
t13 = d | t1;
e = t2 ^ t3;
t5 = c ^ t1;
t6 = c ^ e;
t7 = b & t6;
t10 = e | t5;
h = t5 ^ t7;
t9 = d | t7;
t11 = t9 & t10;
t14 = t2 ^ h;
g = a ^ t11;
t15 = g ^ t13;
f = t14 ^ t15
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 11 + 8]
f ^= key[4 * 11 + 9]
g ^= key[4 * 11 + 10]
h ^= key[4 * 11 + 11]
t1 = e ^ g;
t2 = h ^ t1;
t3 = e & t2;
t4 = h ^ t3;
t5 = f & t4;
c = t2 ^ t5;
t7 = e | c;
t8 = f | h;
t11 = e | h;
t9 = t4 & t7;
b = t8 ^ t9;
t12 = f ^ t11;
t13 = c ^ t9;
t15 = t3 ^ t8;
d = t12 ^ t13;
t16 = g & t15;
a = t12 ^ t16
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 12 + 8]
b ^= key[4 * 12 + 9]
c ^= key[4 * 12 + 10]
d ^= key[4 * 12 + 11]
t1 = a ^ d;
t2 = d & t1;
t3 = c ^ t2;
t4 = b | t3;
h = t1 ^ t4;
t6 = (~b) % 0x100000000;
t7 = t1 | t6;
e = t3 ^ t7;
t9 = a & e;
t10 = t1 ^ t6;
t11 = t4 & t10;
g = t9 ^ t11;
t13 = a ^ t3;
t14 = t10 & g;
f = t13 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 13 + 8]
f ^= key[4 * 13 + 9]
g ^= key[4 * 13 + 10]
h ^= key[4 * 13 + 11]
t1 = (~e) % 0x100000000;
t2 = e ^ f;
t3 = e ^ h;
t4 = g ^ t1;
t5 = t2 | t3;
a = t4 ^ t5;
t7 = h & a;
t8 = t2 ^ a;
t10 = t1 | a;
b = t7 ^ t8;
t11 = t2 | t7;
t12 = t3 ^ t10;
t14 = f ^ t7;
c = t11 ^ t12;
t15 = b & t12;
d = t14 ^ t15
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 14 + 8]
b ^= key[4 * 14 + 9]
c ^= key[4 * 14 + 10]
d ^= key[4 * 14 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ d;
t3 = b ^ t2;
t4 = t1 | t2;
t5 = c ^ t4;
f = b ^ t5;
t13 = (~t5) % 0x100000000;
t7 = t2 | f;
t8 = d ^ t7;
t9 = t5 & t8;
g = t3 ^ t9;
t11 = t5 ^ t8;
e = g ^ t11;
t14 = t3 & t11;
h = t13 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 15 + 8]
f ^= key[4 * 15 + 9]
g ^= key[4 * 15 + 10]
h ^= key[4 * 15 + 11]
t1 = (~g) % 0x100000000;
t2 = f ^ g;
t3 = f | t1;
t4 = h ^ t3;
t5 = e & t4;
t7 = e ^ h;
d = t2 ^ t5;
t8 = f ^ t5;
t9 = t2 | t8;
t11 = h & t3;
b = t7 ^ t9;
t12 = t5 ^ b;
t15 = t1 | t4;
t13 = d & t12;
c = t11 ^ t13;
t16 = t12 ^ c;
a = t15 ^ t16
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 16 + 8]
b ^= key[4 * 16 + 9]
c ^= key[4 * 16 + 10]
d ^= key[4 * 16 + 11]
t1 = a ^ d;
t2 = a & d;
t3 = c ^ t1;
t6 = b & t1;
t4 = b ^ t3;
t10 = (~t3) % 0x100000000;
h = t2 ^ t4;
t7 = a ^ t6;
t14 = (~t7) % 0x100000000;
t8 = c | t7;
t11 = t3 ^ t7;
g = t4 ^ t8;
t12 = h & t11;
f = t10 ^ t12;
e = t12 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 17 + 8]
f ^= key[4 * 17 + 9]
g ^= key[4 * 17 + 10]
h ^= key[4 * 17 + 11]
t1 = (~e) % 0x100000000;
t2 = f ^ t1;
t3 = e | t2;
t4 = h | t2;
t5 = g ^ t3;
c = h ^ t5;
t7 = f ^ t4;
t8 = t2 ^ c;
t9 = t5 & t7;
d = t8 ^ t9;
t11 = t5 ^ t7;
b = d ^ t11;
t13 = t8 & t11;
a = t5 ^ t13
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 18 + 8]
b ^= key[4 * 18 + 9]
c ^= key[4 * 18 + 10]
d ^= key[4 * 18 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ d;
t3 = c & t1;
t13 = d | t1;
e = t2 ^ t3;
t5 = c ^ t1;
t6 = c ^ e;
t7 = b & t6;
t10 = e | t5;
h = t5 ^ t7;
t9 = d | t7;
t11 = t9 & t10;
t14 = t2 ^ h;
g = a ^ t11;
t15 = g ^ t13;
f = t14 ^ t15
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 19 + 8]
f ^= key[4 * 19 + 9]
g ^= key[4 * 19 + 10]
h ^= key[4 * 19 + 11]
t1 = e ^ g;
t2 = h ^ t1;
t3 = e & t2;
t4 = h ^ t3;
t5 = f & t4;
c = t2 ^ t5;
t7 = e | c;
t8 = f | h;
t11 = e | h;
t9 = t4 & t7;
b = t8 ^ t9;
t12 = f ^ t11;
t13 = c ^ t9;
t15 = t3 ^ t8;
d = t12 ^ t13;
t16 = g & t15;
a = t12 ^ t16
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 20 + 8]
b ^= key[4 * 20 + 9]
c ^= key[4 * 20 + 10]
d ^= key[4 * 20 + 11]
t1 = a ^ d;
t2 = d & t1;
t3 = c ^ t2;
t4 = b | t3;
h = t1 ^ t4;
t6 = (~b) % 0x100000000;
t7 = t1 | t6;
e = t3 ^ t7;
t9 = a & e;
t10 = t1 ^ t6;
t11 = t4 & t10;
g = t9 ^ t11;
t13 = a ^ t3;
t14 = t10 & g;
f = t13 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 21 + 8]
f ^= key[4 * 21 + 9]
g ^= key[4 * 21 + 10]
h ^= key[4 * 21 + 11]
t1 = (~e) % 0x100000000;
t2 = e ^ f;
t3 = e ^ h;
t4 = g ^ t1;
t5 = t2 | t3;
a = t4 ^ t5;
t7 = h & a;
t8 = t2 ^ a;
t10 = t1 | a;
b = t7 ^ t8;
t11 = t2 | t7;
t12 = t3 ^ t10;
t14 = f ^ t7;
c = t11 ^ t12;
t15 = b & t12;
d = t14 ^ t15
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 22 + 8]
b ^= key[4 * 22 + 9]
c ^= key[4 * 22 + 10]
d ^= key[4 * 22 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ d;
t3 = b ^ t2;
t4 = t1 | t2;
t5 = c ^ t4;
f = b ^ t5;
t13 = (~t5) % 0x100000000;
t7 = t2 | f;
t8 = d ^ t7;
t9 = t5 & t8;
g = t3 ^ t9;
t11 = t5 ^ t8;
e = g ^ t11;
t14 = t3 & t11;
h = t13 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 23 + 8]
f ^= key[4 * 23 + 9]
g ^= key[4 * 23 + 10]
h ^= key[4 * 23 + 11]
t1 = (~g) % 0x100000000;
t2 = f ^ g;
t3 = f | t1;
t4 = h ^ t3;
t5 = e & t4;
t7 = e ^ h;
d = t2 ^ t5;
t8 = f ^ t5;
t9 = t2 | t8;
t11 = h & t3;
b = t7 ^ t9;
t12 = t5 ^ b;
t15 = t1 | t4;
t13 = d & t12;
c = t11 ^ t13;
t16 = t12 ^ c;
a = t15 ^ t16
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 24 + 8]
b ^= key[4 * 24 + 9]
c ^= key[4 * 24 + 10]
d ^= key[4 * 24 + 11]
t1 = a ^ d;
t2 = a & d;
t3 = c ^ t1;
t6 = b & t1;
t4 = b ^ t3;
t10 = (~t3) % 0x100000000;
h = t2 ^ t4;
t7 = a ^ t6;
t14 = (~t7) % 0x100000000;
t8 = c | t7;
t11 = t3 ^ t7;
g = t4 ^ t8;
t12 = h & t11;
f = t10 ^ t12;
e = t12 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 25 + 8]
f ^= key[4 * 25 + 9]
g ^= key[4 * 25 + 10]
h ^= key[4 * 25 + 11]
t1 = (~e) % 0x100000000;
t2 = f ^ t1;
t3 = e | t2;
t4 = h | t2;
t5 = g ^ t3;
c = h ^ t5;
t7 = f ^ t4;
t8 = t2 ^ c;
t9 = t5 & t7;
d = t8 ^ t9;
t11 = t5 ^ t7;
b = d ^ t11;
t13 = t8 & t11;
a = t5 ^ t13
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 26 + 8]
b ^= key[4 * 26 + 9]
c ^= key[4 * 26 + 10]
d ^= key[4 * 26 + 11]
t1 = (~a) % 0x100000000;
t2 = b ^ d;
t3 = c & t1;
t13 = d | t1;
e = t2 ^ t3;
t5 = c ^ t1;
t6 = c ^ e;
t7 = b & t6;
t10 = e | t5;
h = t5 ^ t7;
t9 = d | t7;
t11 = t9 & t10;
t14 = t2 ^ h;
g = a ^ t11;
t15 = g ^ t13;
f = t14 ^ t15
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 27 + 8]
f ^= key[4 * 27 + 9]
g ^= key[4 * 27 + 10]
h ^= key[4 * 27 + 11]
t1 = e ^ g;
t2 = h ^ t1;
t3 = e & t2;
t4 = h ^ t3;
t5 = f & t4;
c = t2 ^ t5;
t7 = e | c;
t8 = f | h;
t11 = e | h;
t9 = t4 & t7;
b = t8 ^ t9;
t12 = f ^ t11;
t13 = c ^ t9;
t15 = t3 ^ t8;
d = t12 ^ t13;
t16 = g & t15;
a = t12 ^ t16
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 28 + 8]
b ^= key[4 * 28 + 9]
c ^= key[4 * 28 + 10]
d ^= key[4 * 28 + 11]
t1 = a ^ d;
t2 = d & t1;
t3 = c ^ t2;
t4 = b | t3;
h = t1 ^ t4;
t6 = (~b) % 0x100000000;
t7 = t1 | t6;
e = t3 ^ t7;
t9 = a & e;
t10 = t1 ^ t6;
t11 = t4 & t10;
g = t9 ^ t11;
t13 = a ^ t3;
t14 = t10 & g;
f = t13 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 29 + 8]
f ^= key[4 * 29 + 9]
g ^= key[4 * 29 + 10]
h ^= key[4 * 29 + 11]
t1 = (~e) % 0x100000000;
t2 = e ^ f;
t3 = e ^ h;
t4 = g ^ t1;
t5 = t2 | t3;
a = t4 ^ t5;
t7 = h & a;
t8 = t2 ^ a;
t10 = t1 | a;
b = t7 ^ t8;
t11 = t2 | t7;
t12 = t3 ^ t10;
t14 = f ^ t7;
c = t11 ^ t12;
t15 = b & t12;
d = t14 ^ t15
a = rotl32(a, 13)
c = rotl32(c, 3)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
d = rotl32(d, 7)
b = rotl32(b, 1)
a ^= b ^ d
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a = rotl32(a, 5)
c = rotl32(c, 22)
a ^= key[4 * 30 + 8]
b ^= key[4 * 30 + 9]
c ^= key[4 * 30 + 10]
d ^= key[4 * 30 + 11]
t1 = (~a) % 0x100000000;
t2 = a ^ d;
t3 = b ^ t2;
t4 = t1 | t2;
t5 = c ^ t4;
f = b ^ t5;
t13 = (~t5) % 0x100000000;
t7 = t2 | f;
t8 = d ^ t7;
t9 = t5 & t8;
g = t3 ^ t9;
t11 = t5 ^ t8;
e = g ^ t11;
t14 = t3 & t11;
h = t13 ^ t14
e = rotl32(e, 13)
g = rotl32(g, 3)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
h = rotl32(h, 7)
f = rotl32(f, 1)
e ^= f ^ h
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e = rotl32(e, 5)
g = rotl32(g, 22)
e ^= key[4 * 31 + 8]
f ^= key[4 * 31 + 9]
g ^= key[4 * 31 + 10]
h ^= key[4 * 31 + 11]
t1 = (~g) % 0x100000000;
t2 = f ^ g;
t3 = f | t1;
t4 = h ^ t3;
t5 = e & t4;
t7 = e ^ h;
d = t2 ^ t5;
t8 = f ^ t5;
t9 = t2 | t8;
t11 = h & t3;
b = t7 ^ t9;
t12 = t5 ^ b;
t15 = t1 | t4;
t13 = d & t12;
c = t11 ^ t13;
t16 = t12 ^ c;
a = t15 ^ t16
a ^= key[4 * 32 + 8]
b ^= key[4 * 32 + 9]
c ^= key[4 * 32 + 10]
d ^= key[4 * 32 + 11]
if WORD_BIGENDIAN:
a = byteswap32(a)
b = byteswap32(b)
c = byteswap32(c)
d = byteswap32(d)
in_blk[0] = a
in_blk[1] = b
in_blk[2] = c
in_blk[3] = d
def decrypt(key, in_blk):
# serpent_generate.py
a = in_blk[0]
b = in_blk[1]
c = in_blk[2]
d = in_blk[3]
if WORD_BIGENDIAN:
a = byteswap32(a)
b = byteswap32(b)
c = byteswap32(c)
d = byteswap32(d)
e = 0
f = 0
g = 0
h = 0
t1 = 0
t2 = 0
t3 = 0
t4 = 0
t5 = 0
t6 = 0
t7 = 0
t8 = 0
t9 = 0
t10 = 0
t11 = 0
t12 = 0
t13 = 0
t14 = 0
t15 = 0
t16 = 0
a ^= key[4 * 32 + 8]
b ^= key[4 * 32 + 9]
c ^= key[4 * 32 + 10]
d ^= key[4 * 32 + 11]
t1 = a & b;
t2 = a | b;
t3 = c | t1;
t4 = d & t2;
h = t3 ^ t4;
t6 = (~d) % 0x100000000;
t7 = b ^ t4;
t8 = h ^ t6;
t11 = c ^ t7;
t9 = t7 | t8;
f = a ^ t9;
t12 = d | f;
e = t11 ^ t12;
t14 = a & h;
t15 = t3 ^ f;
t16 = e ^ t14;
g = t15 ^ t16
e ^= key[4 * 31 + 8]
f ^= key[4 * 31 + 9]
g ^= key[4 * 31 + 10]
h ^= key[4 * 31 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = (~e) % 0x100000000;
t2 = e ^ f;
t3 = g ^ t2;
t4 = g | t1;
t5 = h ^ t4;
t13 = h & t1;
b = t3 ^ t5;
t7 = t3 & t5;
t8 = t2 ^ t7;
t9 = f | t8;
d = t5 ^ t9;
t11 = f | d;
a = t8 ^ t11;
t14 = t3 ^ t11;
c = t13 ^ t14
a ^= key[4 * 30 + 8]
b ^= key[4 * 30 + 9]
c ^= key[4 * 30 + 10]
d ^= key[4 * 30 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = (~c) % 0x100000000;
t2 = b & t1;
t3 = d ^ t2;
t4 = a & t3;
t5 = b ^ t1;
h = t4 ^ t5;
t7 = b | h;
t8 = a & t7;
f = t3 ^ t8;
t10 = a | d;
t11 = t1 ^ t7;
e = t10 ^ t11;
t13 = a ^ c;
t14 = b & t10;
t15 = t4 | t13;
g = t14 ^ t15
e ^= key[4 * 29 + 8]
f ^= key[4 * 29 + 9]
g ^= key[4 * 29 + 10]
h ^= key[4 * 29 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = g ^ h;
t2 = g | h;
t3 = f ^ t2;
t4 = e & t3;
b = t1 ^ t4;
t6 = e ^ h;
t7 = f | h;
t8 = t6 & t7;
d = t3 ^ t8;
t10 = (~e) % 0x100000000;
t11 = g ^ d;
t12 = t10 | t11;
a = t3 ^ t12;
t14 = g | t4;
t15 = t7 ^ t14;
t16 = d | t10;
c = t15 ^ t16
a ^= key[4 * 28 + 8]
b ^= key[4 * 28 + 9]
c ^= key[4 * 28 + 10]
d ^= key[4 * 28 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = b ^ c;
t2 = b | c;
t3 = a ^ c;
t7 = a ^ d;
t4 = t2 ^ t3;
t5 = d | t4;
t9 = t2 ^ t7;
e = t1 ^ t5;
t8 = t1 | t5;
t11 = a & t4;
g = t8 ^ t9;
t12 = e | t9;
f = t11 ^ t12;
t14 = a & g;
t15 = t2 ^ t14;
t16 = e & t15;
h = t4 ^ t16
e ^= key[4 * 27 + 8]
f ^= key[4 * 27 + 9]
g ^= key[4 * 27 + 10]
h ^= key[4 * 27 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = f ^ h;
t2 = (~t1) % 0x100000000;
t3 = e ^ g;
t4 = g ^ t1;
t7 = e | t2;
t5 = f & t4;
t8 = h ^ t7;
t11 = (~t4) % 0x100000000;
a = t3 ^ t5;
t9 = t3 | t8;
t14 = h & t11;
d = t1 ^ t9;
t12 = a | d;
b = t11 ^ t12;
t15 = t3 ^ t12;
c = t14 ^ t15
a ^= key[4 * 26 + 8]
b ^= key[4 * 26 + 9]
c ^= key[4 * 26 + 10]
d ^= key[4 * 26 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = a ^ d;
t2 = a & b;
t3 = b ^ c;
t4 = a ^ t3;
t5 = b | d;
t7 = c | t1;
h = t4 ^ t5;
t8 = b ^ t7;
t11 = (~t2) % 0x100000000;
t9 = t4 & t8;
f = t1 ^ t9;
t13 = t9 ^ t11;
t12 = h & f;
g = t12 ^ t13;
t15 = a & d;
t16 = c ^ t13;
e = t15 ^ t16
e ^= key[4 * 25 + 8]
f ^= key[4 * 25 + 9]
g ^= key[4 * 25 + 10]
h ^= key[4 * 25 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = (~e) % 0x100000000
t2 = e ^ f
t3 = t1 | t2
t4 = h ^ t3
t7 = h & t2
t5 = g ^ t4
t8 = t1 ^ t7
c = t2 ^ t5
t11 = e & t4
t9 = c & t8
t14 = t5 ^ t8
b = t4 ^ t9
t12 = t5 | b
d = t11 ^ t12
a = d ^ t14
a ^= key[4 * 24 + 8]
b ^= key[4 * 24 + 9]
c ^= key[4 * 24 + 10]
d ^= key[4 * 24 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = a & b;
t2 = a | b;
t3 = c | t1;
t4 = d & t2;
h = t3 ^ t4;
t6 = (~d) % 0x100000000;
t7 = b ^ t4;
t8 = h ^ t6;
t11 = c ^ t7;
t9 = t7 | t8;
f = a ^ t9;
t12 = d | f;
e = t11 ^ t12;
t14 = a & h;
t15 = t3 ^ f;
t16 = e ^ t14;
g = t15 ^ t16
e ^= key[4 * 23 + 8]
f ^= key[4 * 23 + 9]
g ^= key[4 * 23 + 10]
h ^= key[4 * 23 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = (~e) % 0x100000000;
t2 = e ^ f;
t3 = g ^ t2;
t4 = g | t1;
t5 = h ^ t4;
t13 = h & t1;
b = t3 ^ t5;
t7 = t3 & t5;
t8 = t2 ^ t7;
t9 = f | t8;
d = t5 ^ t9;
t11 = f | d;
a = t8 ^ t11;
t14 = t3 ^ t11;
c = t13 ^ t14
a ^= key[4 * 22 + 8]
b ^= key[4 * 22 + 9]
c ^= key[4 * 22 + 10]
d ^= key[4 * 22 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = (~c) % 0x100000000;
t2 = b & t1;
t3 = d ^ t2;
t4 = a & t3;
t5 = b ^ t1;
h = t4 ^ t5;
t7 = b | h;
t8 = a & t7;
f = t3 ^ t8;
t10 = a | d;
t11 = t1 ^ t7;
e = t10 ^ t11;
t13 = a ^ c;
t14 = b & t10;
t15 = t4 | t13;
g = t14 ^ t15
e ^= key[4 * 21 + 8]
f ^= key[4 * 21 + 9]
g ^= key[4 * 21 + 10]
h ^= key[4 * 21 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = g ^ h;
t2 = g | h;
t3 = f ^ t2;
t4 = e & t3;
b = t1 ^ t4;
t6 = e ^ h;
t7 = f | h;
t8 = t6 & t7;
d = t3 ^ t8;
t10 = (~e) % 0x100000000;
t11 = g ^ d;
t12 = t10 | t11;
a = t3 ^ t12;
t14 = g | t4;
t15 = t7 ^ t14;
t16 = d | t10;
c = t15 ^ t16
a ^= key[4 * 20 + 8]
b ^= key[4 * 20 + 9]
c ^= key[4 * 20 + 10]
d ^= key[4 * 20 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = b ^ c;
t2 = b | c;
t3 = a ^ c;
t7 = a ^ d;
t4 = t2 ^ t3;
t5 = d | t4;
t9 = t2 ^ t7;
e = t1 ^ t5;
t8 = t1 | t5;
t11 = a & t4;
g = t8 ^ t9;
t12 = e | t9;
f = t11 ^ t12;
t14 = a & g;
t15 = t2 ^ t14;
t16 = e & t15;
h = t4 ^ t16
e ^= key[4 * 19 + 8]
f ^= key[4 * 19 + 9]
g ^= key[4 * 19 + 10]
h ^= key[4 * 19 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = f ^ h;
t2 = (~t1) % 0x100000000;
t3 = e ^ g;
t4 = g ^ t1;
t7 = e | t2;
t5 = f & t4;
t8 = h ^ t7;
t11 = (~t4) % 0x100000000;
a = t3 ^ t5;
t9 = t3 | t8;
t14 = h & t11;
d = t1 ^ t9;
t12 = a | d;
b = t11 ^ t12;
t15 = t3 ^ t12;
c = t14 ^ t15
a ^= key[4 * 18 + 8]
b ^= key[4 * 18 + 9]
c ^= key[4 * 18 + 10]
d ^= key[4 * 18 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = a ^ d;
t2 = a & b;
t3 = b ^ c;
t4 = a ^ t3;
t5 = b | d;
t7 = c | t1;
h = t4 ^ t5;
t8 = b ^ t7;
t11 = (~t2) % 0x100000000;
t9 = t4 & t8;
f = t1 ^ t9;
t13 = t9 ^ t11;
t12 = h & f;
g = t12 ^ t13;
t15 = a & d;
t16 = c ^ t13;
e = t15 ^ t16
e ^= key[4 * 17 + 8]
f ^= key[4 * 17 + 9]
g ^= key[4 * 17 + 10]
h ^= key[4 * 17 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = (~e) % 0x100000000
t2 = e ^ f
t3 = t1 | t2
t4 = h ^ t3
t7 = h & t2
t5 = g ^ t4
t8 = t1 ^ t7
c = t2 ^ t5
t11 = e & t4
t9 = c & t8
t14 = t5 ^ t8
b = t4 ^ t9
t12 = t5 | b
d = t11 ^ t12
a = d ^ t14
a ^= key[4 * 16 + 8]
b ^= key[4 * 16 + 9]
c ^= key[4 * 16 + 10]
d ^= key[4 * 16 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = a & b;
t2 = a | b;
t3 = c | t1;
t4 = d & t2;
h = t3 ^ t4;
t6 = (~d) % 0x100000000;
t7 = b ^ t4;
t8 = h ^ t6;
t11 = c ^ t7;
t9 = t7 | t8;
f = a ^ t9;
t12 = d | f;
e = t11 ^ t12;
t14 = a & h;
t15 = t3 ^ f;
t16 = e ^ t14;
g = t15 ^ t16
e ^= key[4 * 15 + 8]
f ^= key[4 * 15 + 9]
g ^= key[4 * 15 + 10]
h ^= key[4 * 15 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = (~e) % 0x100000000;
t2 = e ^ f;
t3 = g ^ t2;
t4 = g | t1;
t5 = h ^ t4;
t13 = h & t1;
b = t3 ^ t5;
t7 = t3 & t5;
t8 = t2 ^ t7;
t9 = f | t8;
d = t5 ^ t9;
t11 = f | d;
a = t8 ^ t11;
t14 = t3 ^ t11;
c = t13 ^ t14
a ^= key[4 * 14 + 8]
b ^= key[4 * 14 + 9]
c ^= key[4 * 14 + 10]
d ^= key[4 * 14 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = (~c) % 0x100000000;
t2 = b & t1;
t3 = d ^ t2;
t4 = a & t3;
t5 = b ^ t1;
h = t4 ^ t5;
t7 = b | h;
t8 = a & t7;
f = t3 ^ t8;
t10 = a | d;
t11 = t1 ^ t7;
e = t10 ^ t11;
t13 = a ^ c;
t14 = b & t10;
t15 = t4 | t13;
g = t14 ^ t15
e ^= key[4 * 13 + 8]
f ^= key[4 * 13 + 9]
g ^= key[4 * 13 + 10]
h ^= key[4 * 13 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = g ^ h;
t2 = g | h;
t3 = f ^ t2;
t4 = e & t3;
b = t1 ^ t4;
t6 = e ^ h;
t7 = f | h;
t8 = t6 & t7;
d = t3 ^ t8;
t10 = (~e) % 0x100000000;
t11 = g ^ d;
t12 = t10 | t11;
a = t3 ^ t12;
t14 = g | t4;
t15 = t7 ^ t14;
t16 = d | t10;
c = t15 ^ t16
a ^= key[4 * 12 + 8]
b ^= key[4 * 12 + 9]
c ^= key[4 * 12 + 10]
d ^= key[4 * 12 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = b ^ c;
t2 = b | c;
t3 = a ^ c;
t7 = a ^ d;
t4 = t2 ^ t3;
t5 = d | t4;
t9 = t2 ^ t7;
e = t1 ^ t5;
t8 = t1 | t5;
t11 = a & t4;
g = t8 ^ t9;
t12 = e | t9;
f = t11 ^ t12;
t14 = a & g;
t15 = t2 ^ t14;
t16 = e & t15;
h = t4 ^ t16
e ^= key[4 * 11 + 8]
f ^= key[4 * 11 + 9]
g ^= key[4 * 11 + 10]
h ^= key[4 * 11 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = f ^ h;
t2 = (~t1) % 0x100000000;
t3 = e ^ g;
t4 = g ^ t1;
t7 = e | t2;
t5 = f & t4;
t8 = h ^ t7;
t11 = (~t4) % 0x100000000;
a = t3 ^ t5;
t9 = t3 | t8;
t14 = h & t11;
d = t1 ^ t9;
t12 = a | d;
b = t11 ^ t12;
t15 = t3 ^ t12;
c = t14 ^ t15
a ^= key[4 * 10 + 8]
b ^= key[4 * 10 + 9]
c ^= key[4 * 10 + 10]
d ^= key[4 * 10 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = a ^ d;
t2 = a & b;
t3 = b ^ c;
t4 = a ^ t3;
t5 = b | d;
t7 = c | t1;
h = t4 ^ t5;
t8 = b ^ t7;
t11 = (~t2) % 0x100000000;
t9 = t4 & t8;
f = t1 ^ t9;
t13 = t9 ^ t11;
t12 = h & f;
g = t12 ^ t13;
t15 = a & d;
t16 = c ^ t13;
e = t15 ^ t16
e ^= key[4 * 9 + 8]
f ^= key[4 * 9 + 9]
g ^= key[4 * 9 + 10]
h ^= key[4 * 9 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = (~e) % 0x100000000
t2 = e ^ f
t3 = t1 | t2
t4 = h ^ t3
t7 = h & t2
t5 = g ^ t4
t8 = t1 ^ t7
c = t2 ^ t5
t11 = e & t4
t9 = c & t8
t14 = t5 ^ t8
b = t4 ^ t9
t12 = t5 | b
d = t11 ^ t12
a = d ^ t14
a ^= key[4 * 8 + 8]
b ^= key[4 * 8 + 9]
c ^= key[4 * 8 + 10]
d ^= key[4 * 8 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = a & b;
t2 = a | b;
t3 = c | t1;
t4 = d & t2;
h = t3 ^ t4;
t6 = (~d) % 0x100000000;
t7 = b ^ t4;
t8 = h ^ t6;
t11 = c ^ t7;
t9 = t7 | t8;
f = a ^ t9;
t12 = d | f;
e = t11 ^ t12;
t14 = a & h;
t15 = t3 ^ f;
t16 = e ^ t14;
g = t15 ^ t16
e ^= key[4 * 7 + 8]
f ^= key[4 * 7 + 9]
g ^= key[4 * 7 + 10]
h ^= key[4 * 7 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = (~e) % 0x100000000;
t2 = e ^ f;
t3 = g ^ t2;
t4 = g | t1;
t5 = h ^ t4;
t13 = h & t1;
b = t3 ^ t5;
t7 = t3 & t5;
t8 = t2 ^ t7;
t9 = f | t8;
d = t5 ^ t9;
t11 = f | d;
a = t8 ^ t11;
t14 = t3 ^ t11;
c = t13 ^ t14
a ^= key[4 * 6 + 8]
b ^= key[4 * 6 + 9]
c ^= key[4 * 6 + 10]
d ^= key[4 * 6 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = (~c) % 0x100000000;
t2 = b & t1;
t3 = d ^ t2;
t4 = a & t3;
t5 = b ^ t1;
h = t4 ^ t5;
t7 = b | h;
t8 = a & t7;
f = t3 ^ t8;
t10 = a | d;
t11 = t1 ^ t7;
e = t10 ^ t11;
t13 = a ^ c;
t14 = b & t10;
t15 = t4 | t13;
g = t14 ^ t15
e ^= key[4 * 5 + 8]
f ^= key[4 * 5 + 9]
g ^= key[4 * 5 + 10]
h ^= key[4 * 5 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = g ^ h;
t2 = g | h;
t3 = f ^ t2;
t4 = e & t3;
b = t1 ^ t4;
t6 = e ^ h;
t7 = f | h;
t8 = t6 & t7;
d = t3 ^ t8;
t10 = (~e) % 0x100000000;
t11 = g ^ d;
t12 = t10 | t11;
a = t3 ^ t12;
t14 = g | t4;
t15 = t7 ^ t14;
t16 = d | t10;
c = t15 ^ t16
a ^= key[4 * 4 + 8]
b ^= key[4 * 4 + 9]
c ^= key[4 * 4 + 10]
d ^= key[4 * 4 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = b ^ c;
t2 = b | c;
t3 = a ^ c;
t7 = a ^ d;
t4 = t2 ^ t3;
t5 = d | t4;
t9 = t2 ^ t7;
e = t1 ^ t5;
t8 = t1 | t5;
t11 = a & t4;
g = t8 ^ t9;
t12 = e | t9;
f = t11 ^ t12;
t14 = a & g;
t15 = t2 ^ t14;
t16 = e & t15;
h = t4 ^ t16
e ^= key[4 * 3 + 8]
f ^= key[4 * 3 + 9]
g ^= key[4 * 3 + 10]
h ^= key[4 * 3 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = f ^ h;
t2 = (~t1) % 0x100000000;
t3 = e ^ g;
t4 = g ^ t1;
t7 = e | t2;
t5 = f & t4;
t8 = h ^ t7;
t11 = (~t4) % 0x100000000;
a = t3 ^ t5;
t9 = t3 | t8;
t14 = h & t11;
d = t1 ^ t9;
t12 = a | d;
b = t11 ^ t12;
t15 = t3 ^ t12;
c = t14 ^ t15
a ^= key[4 * 2 + 8]
b ^= key[4 * 2 + 9]
c ^= key[4 * 2 + 10]
d ^= key[4 * 2 + 11]
c = rotr32(c, 22)
a = rotr32(a, 5)
c ^= d ^ ((b << 7) & 0xFFFFFFFF)
a ^= b ^ d
d = rotr32(d, 7)
b = rotr32(b, 1)
d ^= c ^ ((a << 3) & 0xFFFFFFFF)
b ^= a ^ c
c = rotr32(c, 3)
a = rotr32(a, 13)
t1 = a ^ d;
t2 = a & b;
t3 = b ^ c;
t4 = a ^ t3;
t5 = b | d;
t7 = c | t1;
h = t4 ^ t5;
t8 = b ^ t7;
t11 = (~t2) % 0x100000000;
t9 = t4 & t8;
f = t1 ^ t9;
t13 = t9 ^ t11;
t12 = h & f;
g = t12 ^ t13;
t15 = a & d;
t16 = c ^ t13;
e = t15 ^ t16
e ^= key[4 * 1 + 8]
f ^= key[4 * 1 + 9]
g ^= key[4 * 1 + 10]
h ^= key[4 * 1 + 11]
g = rotr32(g, 22)
e = rotr32(e, 5)
g ^= h ^ ((f << 7) & 0xFFFFFFFF)
e ^= f ^ h
h = rotr32(h, 7)
f = rotr32(f, 1)
h ^= g ^ ((e << 3) & 0xFFFFFFFF)
f ^= e ^ g
g = rotr32(g, 3)
e = rotr32(e, 13)
t1 = (~e) % 0x100000000
t2 = e ^ f
t3 = t1 | t2
t4 = h ^ t3
t7 = h & t2
t5 = g ^ t4
t8 = t1 ^ t7
c = t2 ^ t5
t11 = e & t4
t9 = c & t8
t14 = t5 ^ t8
b = t4 ^ t9
t12 = t5 | b
d = t11 ^ t12
a = d ^ t14
a ^= key[4 * 0 + 8]
b ^= key[4 * 0 + 9]
c ^= key[4 * 0 + 10]
d ^= key[4 * 0 + 11]
if WORD_BIGENDIAN:
a = byteswap32(a)
b = byteswap32(b)
c = byteswap32(c)
d = byteswap32(d)
in_blk[0] = a
in_blk[1] = b
in_blk[2] = c
in_blk[3] = d
#CBC Encrypt - Jason Reaves
def serpent_cbc_encrypt(key, data, iv='\x00'*16):
out = ""
last = iv
for i in range((len(data)/16)):
temp = data[i*16:(i+1)*16]
to_encode = ""
for j in range(4):
temp1 = struct.unpack_from('<I', temp[j*4:])[0]
temp2 = struct.unpack_from('<I', last[j*4:])[0]
to_encode += struct.pack('<I',((temp1 ^ temp2) & 0xffffffff))
last= Serpent(key).encrypt(to_encode)
out += last
#print(binascii.hexlify(Serpent(key).encrypt(data)))
return out
#CBC Decrypt - Jason Reaves
def serpent_cbc_decrypt(key,data):
out = ""
last = '\x00'*16
for i in range((len(data)/16)):
temp = Serpent(key).decrypt(data[i*16:(i+1)*16])
to_decode = ""
for j in range(4):
temp1 = struct.unpack_from('<I', temp[j*4:])[0]
temp2 = struct.unpack_from('<I', last[j*4:])[0]
to_decode += struct.pack('<I',((temp1 ^ temp2) & 0xffffffff))
out += to_decode
last = data[i*16:(i+1)*16]
return out
def RSA_Decrypt_Last_Block(key, data):
bit = struct.unpack('I', key[:4])[0]
mod = key[4:(bit/8)+4]
exp = key[(bit/8)+4:]
mod = int(binascii.hexlify(mod), 16)
print("Modulo: %x" % mod)
exp = int(binascii.hexlify(exp), 16)
print("Exponential: %x" % exp)
keypub = RSA.construct((mod, long(exp)))
bits = keypub.size() + 1
print(bits)
encBlock = data[-(bits/8):]
print(binascii.hexlify(encBlock))
decblks = keypub.encrypt(encBlock, 0)
print(decblks)
decBlock = decblks[0]
#cipherpkcs1 = PKCS1_OAEP.new(keypub)
#decBlock = cipherpkcs1.encrypt(encBlock)
start_offset = 23
print("")
print binascii.hexlify(decBlock)
md5 = binascii.hexlify(decBlock[start_offset:start_offset+16])
start_offset += 16
serpent_key = decBlock[start_offset:start_offset+16]
start_offset += 16
sze = decBlock[start_offset:start_offset+4]
print "MD5: ", md5
print "Serpent: ", binascii.hexlify(serpent_key)
print "Size (Hex): ", binascii.hexlify(sze[::-1])
return serpent_key, md5, sze
def Serpent_Decrypt_Packet(key, data, size, md5):
print "Decrypting..."
output = "dumped_payload.bin"
result = serpent_cbc_decrypt(key, data[:int(binascii.hexlify(size[::-1]), 16)])
if result[0] == "M" and result[1] == "Z":
print "Binary File Located!"
print "Checking Hashes Match."
print len(result)
print hashlib.md5(result).hexdigest()
print md5
if hashlib.md5(result).hexdigest() == md5:
print "Matching Hashes. Dumping."
with open(output, "wb") as f:
f.write(result)
print "Binary Dumped!"
else:
print "Hashes don't match! Dumping anyway."
with open(output, "wb") as f:
f.write(result)
print "Dumped File!"
return
def main():
# Alter RSA key based on your sample of ISFB + the file name
#key = "\x00\x02\x00\x00\xE0\x64\x63\x8D\x56\xB4\x69\x04\x16\x10\x0B\xF5\x05\x57\x54\x21\x64\xBA\x8E\x6E\xE2\x7A\xAD\x15\xF9\x7C\x1F\x79\xA8\xC8\x39\x75\xE1\x29\x1C\x37\x15\xC5\x15\x69\xB0\x20\x4F\x2B\x4D\x3C\xF8\x1F\x38\x06\x02\x8D\xD0\x1D\x15\x7C\x87\xF7\xF9\x1D\xDB\x6D\xB1\xE5\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01"
with open("public.key", "rb") as f:
key = f.read()
with open("MAIN_export.bin", "rb") as f:
packet = f.read()
data = packet[-128:]
packet = packet[:-128]
serpent_key, md5, sze = RSA_Decrypt_Last_Block(key, data)
Serpent_Decrypt_Packet(serpent_key, packet, sze, md5)
return
if __name__ == "__main__":
main()
|
gray-panda/grayrepo
|
2020_flareon/11_rabbithole/__notworking/dsunsign.py
|
Python
|
gpl-2.0
| 74,080
|
[
"Brian"
] |
e565fd9d931d2a5fef3d3b0446e97c531f4f9dfa43f2fb766e1203e0b9f5c47d
|
"""Particle filter implementation."""
import numpy as np
class ParticleFilter(object):
def __init__(self, num_points, init_coords, init_sigma, have_map,
have_imu):
"""Initialise num_points particles using an isotropic Gaussian with
variance init_sigma and mean init_coords. If track_vel is True, the
filter will store velocities as well as the headings and yaws which it
tracks normally."""
self.num_points = num_points
# Particles are initialised using an isotropic Gaussian with covariance
# matrix init_stddev * I and mean given by init_coords. Remember that
# the matrix is stored with one (x, y) coordinate per row and
# num_points rows.
self.coords = np.random.multivariate_normal(
init_coords, init_sigma * np.eye(2), num_points
)
# Particle yaws are initialised randomly in [0, 2*pi]
self.yaws = np.random.uniform(0, 2*np.pi, num_points)
# Particle weights are initially uniform
self.weights = np.ones(num_points) / num_points
self.have_map = have_map
# Store speeds if necessary
self.have_imu = have_imu
if not have_imu:
self.velocities = np.random.multivariate_normal(
[0, 0], 5 * np.eye(2), num_points
)
def normalise_weights(self):
"""Ensure that weights sum to one."""
s = np.sum(self.weights)
# Prevent overflow
if s <= 0:
self.weights = np.ones_like(self.weights) / self.weights.size
else:
self.weights = self.weights / s
def effective_particles(self):
"""Filter should resample when this quantity falls below some
threshold, which Gustaffson et al. (2002) recommend be set to 2N/3"""
sqsum = np.sum(np.square(self.weights))
if sqsum < 1e-15:
# Prevent numerical issues
return 0
return 1.0 / sqsum
def auto_resample(self):
"""Resample iff the number of effective particles drops below two
thirds of ``self.num_points``"""
if self.effective_particles() < 2.0 / 3.0 * self.num_points:
self.resample()
def resample(self):
"""Draw ``self.num_points`` samples from distribution given by current
particle weights."""
self.normalise_weights()
# Produce a vector of indices into our coordinate, yaw and weights
# vectors, choosing according the the probability distribution defined
# by our current weights
samples = np.random.choice(
np.arange(self.num_points),
size=self.num_points,
replace=True,
p=self.weights
)
# Now resample from our set of particles
self.coords = self.coords[samples]
self.yaws = self.yaws[samples]
# Force yaws to be in [0, 2pi)
self.yaws %= 2 * np.pi
if not self.have_imu:
self.velocities = self.velocities[samples]
# Set weights to be uniform
self.weights = np.ones(self.num_points) / self.num_points
def state_estimate(self):
"""Give a best estimate for the current state of the vehicle."""
# State estimate is simply weighted sum of particle states
self.normalise_weights()
coords = np.dot(self.weights, self.coords).reshape((2,))
# Angular means are tricky. Here we convert all of the yaws to unit
# vectors, then add the unit vectors together to come up with a
# weighted mean vector, which can then be converted to an angle.
yaw_xs = np.cos(self.yaws)
yaw_ys = np.sin(self.yaws)
mean_x = np.dot(self.weights, yaw_xs)
mean_y = np.dot(self.weights, yaw_ys)
mean_yaw = np.arctan2(mean_y, mean_x).reshape((1,))
return np.concatenate((coords, mean_yaw))
def gps_update(self, mean, stddev):
"""Measure a GPS-like sensor reading with Cartesian coordinates given
by ``mean`` and uncertainty represented by an isotropic Gaussian with
standard deviation ``stddev``"""
# Scatter a handful of particles around the fix
num_to_scatter = max(1, int(0.01 * self.num_points))
indices = np.random.permutation(self.num_points)[:num_to_scatter]
self.coords[indices] = np.random.multivariate_normal(
mean, stddev ** 2 * np.eye(2), num_to_scatter
)
self.yaws[indices] = np.random.uniform(0, 2 * np.pi, num_to_scatter)
# Next, update the weights of all particles
precision = (np.eye(2) / (stddev ** 2))
diffs = self.coords - mean
by_precision = np.dot(precision, diffs.T).T
# We don't need to normalise, so these aren't exactly Gaussians
likelihoods = np.exp(-0.5 * np.einsum('ij,ij->i', diffs, by_precision))
assert likelihoods.shape == (self.num_points,), likelihoods.shape
self.weights *= likelihoods
def map_update(self, m):
"""Incorporate measurements from the Map instance m using a Cauchy-like
PDF over distances of each particle from their nearest road
segments."""
dists = np.zeros((self.num_points,))
for idx, point in enumerate(self.coords):
dists[idx] = m.nearest_lane_dist(point)
if np.percentile(dists, 5) > 15:
# Don't bother incorporating weights if the 95%+ of particles are
# more than 15m from a road. In that case, we can safely assume
# that we're off the road.
return
factors = 1.0 / ((1 + dists ** 2) ** 1.1)
self.weights *= factors
def predict(self, dt, *args):
"""Update the particles according to the state transition model."""
if self.have_imu:
return self.predict_imu(dt, *args)
return self.predict_no_imu(dt)
def predict_imu(self, dt, forward_speed, yaw_diff):
"""Update particles if we have IMU data (and thus do not need to keep
velocity or other higher dimensional data around)"""
assert self.have_imu
# Constant. Tuning this can produce better performance in some cases.
yaw_sigma = 0.15
noisy_yaws = np.random.normal(
yaw_diff, yaw_sigma, (self.num_points,)
)
# We're using a Gaussian because it's easy to sample from and gives
# values in (-inf, inf)
odom_noisy = np.random.normal(
forward_speed, abs(forward_speed) * 0.6, size=(self.num_points,)
)
noisy_odom = dt * odom_noisy
self.coords[:, 0] += noisy_odom * np.cos(self.yaws)
self.coords[:, 1] += noisy_odom * np.sin(self.yaws)
# Don't worry about forcing yaws into [0, 2 * pi), since we'll do that
# when we resample
self.yaws += dt * noisy_yaws
def predict_no_imu(self, dt):
"""Run predict step of PF when no IMU is available. Uses a fixed,
hand-derived distribution for transition probabilities."""
# Run investigate_transitions.py for some insight into the choice of
# covariance here. I've chosen a much larger covariance than the sample
# covariance, since we need the particles to jump around a bit.
new_velocities = self.velocities + np.random.multivariate_normal(
[0, 0], dt * 1 * np.eye(2), len(self.velocities)
)
self.coords += 0.5 * dt * (self.velocities + new_velocities)
self.velocities = new_velocities
# Keep yaws updated as well for the good of the visualisation code.
# This can be disabled in "production"
self.yaws = np.arctan2(self.velocities[:, 1], self.velocities[:, 0])
self.yaws %= 2 * np.pi
|
qxcv/comp2550
|
project/filter.py
|
Python
|
apache-2.0
| 7,753
|
[
"Gaussian"
] |
6389a24f18fb2c0387186a6f16ff857a39d99d355a11349ac8f19ec636576c8d
|
'''
.. module:: skrf.network
========================================
network (:mod:`skrf.network`)
========================================
Provides a n-port network class and associated functions.
Most of the functionality in this module is provided as methods and
properties of the :class:`Network` Class.
Network Class
===============
.. autosummary::
:toctree: generated/
Network
Network Representations
============================
.. autosummary::
:toctree: generated/
Network.s
Network.z
Network.y
Network.a
Network.t
Connecting Networks
===============================
.. autosummary::
:toctree: generated/
connect
innerconnect
cascade
cascade_list
de_embed
flip
Interpolation and Concatenation Along Frequency Axis
=====================================================
.. autosummary::
:toctree: generated/
stitch
overlap
Network.resample
Network.interpolate
Network.interpolate_self
Network.interpolate_from_f
Combining Networks
===================================
.. autosummary::
:toctree: generated/
n_oneports_2_nport
four_oneports_2_twoport
three_twoports_2_threeport
n_twoports_2_nport
IO
====
.. autosummary::
skrf.io.general.read
skrf.io.general.write
skrf.io.general.ntwk_2_spreadsheet
Network.write
Network.write_touchstone
Network.read
Network.write_spreadsheet
Noise
============
.. autosummary::
:toctree: generated/
Network.add_noise_polar
Network.add_noise_polar_flatband
Network.multiply_noise
Supporting Functions
======================
.. autosummary::
:toctree: generated/
inv
connect_s
innerconnect_s
s2z
s2y
s2t
s2a
z2s
z2y
z2t
z2a
y2s
y2z
y2t
t2s
t2z
t2y
fix_z0_shape
renormalize_s
passivity
reciprocity
Misc Functions
=====================
.. autosummary::
:toctree: generated/
average
two_port_reflect
chopinhalf
Network.nudge
Network.renormalize
'''
from six.moves import xrange
import os
import warnings
try:
import cPickle as pickle
from cPickle import UnpicklingError
except ImportError:
import pickle as pickle
from pickle import UnpicklingError
from copy import deepcopy as copy
import re
from numbers import Number
from itertools import product
import numpy as npy
from numpy.linalg import inv as npy_inv
import pylab as plb
from scipy import stats,signal # for Network.add_noise_*, and Network.windowed
from scipy.interpolate import interp1d # for Network.interpolate()
from numpy import fft
import unittest # fotr unitest.skip
from . import mathFunctions as mf
from . frequency import Frequency
from . plotting import *#smith, plot_rectangular, plot_smith, plot_complex_polar
from . tlineFunctions import zl_2_Gamma0
from . util import get_fid, get_extn, find_nearest_index,slice_domain
## later imports. delayed to solve circular dependencies
#from io.general import read, write
#from io import touchstone
#from io.general import network_2_spreadsheet
from .constants import ZERO
class Network(object):
'''
A n-port electrical network [#]_.
For instructions on how to create Network see :func:`__init__`.
A n-port network may be defined by three quantities,
* network parameter matrix (s, z, or y-matrix)
* port characteristic impedance matrix
* frequency information
The :class:`Network` class stores these data structures internally
in the form of complex :class:`numpy.ndarray`'s. These arrays are not
interfaced directly but instead through the use of the properties:
===================== =============================================
Property Meaning
===================== =============================================
:attr:`s` scattering parameter matrix
:attr:`z0` characteristic impedance matrix
:attr:`f` frequency vector
===================== =============================================
Although these docs focus on s-parameters, other equivalent network
representations such as :attr:`z` and :attr:`y` are
available. Scalar projections of the complex network parameters
are accessible through properties as well. These also return
:class:`numpy.ndarray`'s.
===================== =============================================
Property Meaning
===================== =============================================
:attr:`s_re` real part of the s-matrix
:attr:`s_im` imaginary part of the s-matrix
:attr:`s_mag` magnitude of the s-matrix
:attr:`s_db` magnitude in log scale of the s-matrix
:attr:`s_deg` phase of the s-matrix in degrees
:attr:`s_gd` group delay derived from the s-matrix
===================== =============================================
The following operations act on the networks s-matrix.
===================== =============================================
Operator Function
===================== =============================================
\+ element-wise addition of the s-matrix
\- element-wise difference of the s-matrix
\* element-wise multiplication of the s-matrix
\/ element-wise division of the s-matrix
\*\* cascading (only for 2-ports)
\// de-embedding (for 2-ports, see :attr:`inv`)
===================== =============================================
Different components of the :class:`Network` can be visualized
through various plotting methods. These methods can be used to plot
individual elements of the s-matrix or all at once. For more info
about plotting see the :doc:`../../tutorials/plotting` tutorial.
========================= =============================================
Method Meaning
========================= =============================================
:func:`plot_s_smith` plot complex s-parameters on smith chart
:func:`plot_s_re` plot real part of s-parameters vs frequency
:func:`plot_s_im` plot imaginary part of s-parameters vs frequency
:func:`plot_s_mag` plot magnitude of s-parameters vs frequency
:func:`plot_s_db` plot magnitude (in dB) of s-parameters vs frequency
:func:`plot_s_deg` plot phase of s-parameters (in degrees) vs frequency
:func:`plot_s_deg_unwrap` plot phase of s-parameters (in unwrapped degrees) vs frequency
:func:`plot_s_gd` plot group delay of s-parameters (in s) vs frequency
========================= =============================================
:class:`Network` objects can be created from a touchstone or pickle
file (see :func:`__init__`), by a
:class:`~skrf.media.media.Media` object, or manually by assigning the
network properties directly. :class:`Network` objects
can be saved to disk in the form of touchstone files with the
:func:`write_touchstone` method.
An exhaustive list of :class:`Network` Methods and Properties
(Attributes) are given below
References
------------
.. [#] http://en.wikipedia.org/wiki/Two-port_network
'''
global PRIMARY_PROPERTIES
PRIMARY_PROPERTIES = [ 's','z','y','a']
global COMPONENT_FUNC_DICT
COMPONENT_FUNC_DICT = {
're' : npy.real,
'im' : npy.imag,
'mag' : npy.abs,
'db' : mf.complex_2_db,
'db10' : mf.complex_2_db10,
'rad' : npy.angle,
'deg' : lambda x: npy.angle(x, deg=True),
'arcl' : lambda x: npy.angle(x) * npy.abs(x),
'rad_unwrap' : lambda x: mf.unwrap_rad(npy.angle(x)),
'deg_unwrap' : lambda x: mf.radian_2_degree(mf.unwrap_rad(\
npy.angle(x))),
'arcl_unwrap' : lambda x: mf.unwrap_rad(npy.angle(x)) *\
npy.abs(x),
'gd' : lambda x: -1 * npy.gradient(mf.unwrap_rad(npy.angle(x)))[0],
'vswr' : lambda x: (1+abs(x))/(1-abs(x)),
'time' : lambda x: fft.ifftshift(fft.ifft(x, axis=0), axes=0),
'time_db' : lambda x: mf.complex_2_db(fft.ifftshift(fft.ifft(x, axis=0),axes=0)),
'time_mag' : lambda x: mf.complex_2_magnitude(fft.ifftshift(fft.ifft(x, axis=0),axes=0)),
}
# provides y-axis labels to the plotting functions
global Y_LABEL_DICT
Y_LABEL_DICT = {
're' : 'Real Part',
'im' : 'Imag Part',
'mag' : 'Magnitude',
'abs' : 'Magnitude',
'db' : 'Magnitude (dB)',
'db10' : 'Magnitude (dB)',
'deg' : 'Phase (deg)',
'deg_unwrap' : 'Phase (deg)',
'rad' : 'Phase (rad)',
'rad_unwrap' : 'Phase (rad)',
'arcl' : 'Arc Length',
'arcl_unwrap' : 'Arc Length',
'gd' : 'Group Delay (s)',
'vswr' : 'VSWR',
'passivity' : 'Passivity',
'reciprocity' : 'Reciprocity',
'time': 'Time (real)',
'time_db': 'Magnitude (dB)',
'time_mag': 'Magnitude',
}
## CONSTRUCTOR
def __init__(self, file = None, name = None , comments = None, f_unit=None, **kwargs):
'''
Network constructor.
Creates an n-port microwave network from a `file` or directly
from data. If no file or data is given, then an empty Network
is created.
Parameters
------------
file : str or file-object
file to load information from. supported formats are:
* touchstone file (.s?p)
* pickled Network (.ntwk, .p) see :func:`write`
name : str
Name of this Network. if None will try to use file, if
its a str
comments : str
Comments associated with the Network
\*\*kwargs :
key word arguments can be used to assign properties of the
Network, such as `s`, `f` and `z0`.
Examples
------------
From a touchstone
>>> n = rf.Network('ntwk1.s2p')
From a pickle file
>>> n = rf.Network('ntwk1.ntwk')
Create a blank network, then fill in values
>>> n = rf.Network()
>>> freq = rf.Frequency(1,3,3,'ghz')
>>> n.frequency, n.s, n.z0 = freq,[1,2,3], [1,2,3]
Directly from values
>>> n = rf.Network(f=[1,2,3],s=[1,2,3],z0=[1,2,3])
See Also
-----------
read : read a network from a file
write : write a network to a file, using pickle
write_touchstone : write a network to a touchstone file
'''
# allow for old kwarg for backward compatability
if 'touchstone_filename' in kwargs:
file = kwargs['touchstone_filename']
self.name = name
self.comments = comments
if file is not None:
# allows user to pass filename or file obj
# open file in 'binary' mode because we are going to try and
# unpickle it first
fid = get_fid(file,'rb')
try:
self.read(fid)
except(UnpicklingError):
# if unpickling doesn't work then, close fid, reopen in
# non-binary mode and try to read it as touchstone
fid.close()
fid = get_fid(file)
self.read_touchstone(fid)
if name is None and isinstance(file,str):
name = os.path.splitext(os.path.basename(file))[0]
if self.frequency is not None and f_unit is not None:
self.frequency.unit = f_unit
# allow properties to be set through the constructor
for attr in PRIMARY_PROPERTIES + ['frequency','z0','f']:
if attr in kwargs:
self.__setattr__(attr,kwargs[attr])
#self.nports = self.number_of_ports
##TODO: remove this as it takes up ~70% cpu time of this init
self.__generate_plot_functions()
## OPERATORS
def __pow__(self,other):
'''
cascade this network with another network
port 1 of this network is connected to port 0 or the other
network
'''
# if they pass a number then use power operator
if isinstance(other, Number):
out = self.copy()
out.s = out.s**other
return out
# else connect the two
return connect(self,1,other,0)
def __floordiv__(self,other):
'''
de-embeding another network[s], from this network
See Also
----------
inv : inverse s-parameters
'''
try:
# if they passed 1 ntwks and a tuple of ntwks,
# then deEmbed like A.inv*C*B.inv
b = other[0]
c = other[1]
result = copy (self)
result.s = (b.inv**self**c.inv).s
#flip(de_embed( flip(de_embed(c.s,self.s)),b.s))
return result
except TypeError:
pass
if other.number_of_ports == 2:
result = self.copy()
result.s = (other.inv**self).s
#de_embed(self.s,other.s)
return result
else:
raise IndexError('Incorrect number of ports.')
def __mul__(self,other):
'''
Element-wise complex multiplication of s-matrix
'''
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s * other.s
else:
# other may be an array or a number
result.s = self.s * npy.array(other).reshape(-1,self.nports,self.nports)
return result
def __rmul__(self,other):
'''
Element-wise complex multiplication of s-matrix
'''
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s * other.s
else:
# other may be an array or a number
result.s = self.s * npy.array(other).reshape(-1,self.nports,self.nports)
return result
def __add__(self,other):
'''
Element-wise complex addition of s-matrix
'''
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s + other.s
else:
# other may be an array or a number
result.s = self.s + npy.array(other).reshape(-1,self.nports,self.nports)
return result
def __radd__(self,other):
'''
Element-wise complex addition of s-matrix
'''
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s + other.s
else:
# other may be an array or a number
result.s = self.s + npy.array(other).reshape(-1,self.nports,self.nports)
return result
def __sub__(self,other):
'''
Element-wise complex subtraction of s-matrix
'''
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s - other.s
else:
# other may be an array or a number
result.s = self.s - npy.array(other).reshape(-1,self.nports,self.nports)
return result
def __rsub__(self,other):
'''
Element-wise complex subtraction of s-matrix
'''
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = other.s - self.s
else:
# other may be an array or a number
result.s = npy.array(other).reshape(-1,self.nports,self.nports) - self.s
return result
def __truediv__(self,other):
return self.__div__(other)
def __div__(self,other):
'''
Element-wise complex multiplication of s-matrix
'''
result = self.copy()
if isinstance(other, Network):
self.__compatable_for_scalar_operation_test(other)
result.s = self.s / other.s
else:
# other may be an array or a number
result.s = self.s / npy.array(other).reshape(-1,self.nports,self.nports)
return result
def __eq__(self,other):
if other is None:
return False
if npy.all(npy.abs(self.s - other.s) < ZERO):
return True
else:
return False
def __ne__(self,other):
return (not self.__eq__(other))
def __getitem__(self,key):
'''
Slices a Network object based on an index, or human readable string
Parameters
-----------
key : str, or slice
if slice; like [2-10] then it is interpreted as the index of
the frequency.
if str, then should be like '50.1-75.5ghz', or just '50'.
If the frequency unit is omited then self.frequency.unit is
used.
Examples
-----------
>>> from skrf.data import ring_slot
>>> a = ring_slot['80-90ghz']
>>> a.plot_s_db()
'''
a = self.z0# HACK: to force getter for z0 to re-shape it
sliced_frequency = self.frequency[key]
return self.interpolate(sliced_frequency)
def __str__(self):
'''
'''
f = self.frequency
if self.name is None:
name = ''
else:
name = self.name
if len(npy.shape(self.z0)) == 0:
z0 = str(self.z0)
else:
z0 = str(self.z0[0,:])
output = '%i-Port Network: \'%s\', %s, z0=%s' % (self.number_of_ports, name, str(f), z0)
return output
def __repr__(self):
return self.__str__()
def __len__(self):
'''
length of frequency axis
'''
return len(self.s)
## INTERNAL CODE GENERATION METHODS
def __compatable_for_scalar_operation_test(self, other):
'''
tests to make sure other network's s-matrix is of same shape
'''
if other.frequency != self.frequency:
raise IndexError('Networks must have same frequency. See `Network.interpolate`')
if other.s.shape != self.s.shape:
raise IndexError('Networks must have same number of ports.')
def __generate_secondary_properties(self):
'''
creates numerous `secondary properties` which are various
different scalar projects of the primary properties. the primary
properties are s,z, and y.
'''
for prop_name in PRIMARY_PROPERTIES:
for func_name in COMPONENT_FUNC_DICT:
func = COMPONENT_FUNC_DICT[func_name]
if 'gd' in func_name: # scaling of gradient by frequency
def fget(self, f=func, p = prop_name):
return f(getattr(self,p)) / (2 * npy.pi * self.frequency.step)
else:
def fget(self, f=func, p = prop_name):
return f(getattr(self,p))
doc = '''
The %s component of the %s-matrix
See Also
----------
%s
'''%(func_name, prop_name, prop_name)
setattr(self.__class__,'%s_%s'%(prop_name, func_name),\
property(fget, doc = doc))
def __generate_plot_functions(self):
'''
'''
for prop_name in PRIMARY_PROPERTIES:
def plot_prop_polar(self,
m=None, n=None, ax=None,
show_legend=True ,prop_name=prop_name,*args, **kwargs):
# create index lists, if not provided by user
if m is None:
M = range(self.number_of_ports)
else:
M = [m]
if n is None:
N = range(self.number_of_ports)
else:
N = [n]
if 'label' not in kwargs.keys():
gen_label = True
else:
gen_label = False
was_interactive = plb.isinteractive
if was_interactive:
plb.interactive(False)
for m in M:
for n in N:
# set the legend label for this trace to the networks
# name if it exists, and they didnt pass a name key in
# the kwargs
if gen_label:
if self.name is None:
if plb.rcParams['text.usetex']:
label_string = '$%s_{%i%i}$'%\
(prop_name[0].upper(),m+1,n+1)
else:
label_string = '%s%i%i'%\
(prop_name[0].upper(),m+1,n+1)
else:
if plb.rcParams['text.usetex']:
label_string = self.name+', $%s_{%i%i}$'%\
(prop_name[0].upper(),m+1,n+1)
else:
label_string = self.name+', %s%i%i'%\
(prop_name[0].upper(),m+1,n+1)
kwargs['label'] = label_string
# plot the desired attribute vs frequency
plot_complex_polar(
z = getattr(self,prop_name)[:,m,n],
show_legend = show_legend, ax = ax,
*args, **kwargs)
if was_interactive:
plb.interactive(True)
plb.draw()
plb.show()
plot_prop_polar.__doc__ = '''
plot the Network attribute :attr:`%s` vs frequency.
Parameters
-----------
m : int, optional
first index of s-parameter matrix, if None will use all
n : int, optional
secon index of the s-parameter matrix, if None will use all
ax : :class:`matplotlib.Axes` object, optional
An existing Axes object to plot on
show_legend : Boolean
draw legend or not
attribute : string
Network attribute to plot
y_label : string, optional
the y-axis label
\*args,\\**kwargs : arguments, keyword arguments
passed to :func:`matplotlib.plot`
Notes
-------
This function is dynamically generated upon Network
initialization. This is accomplished by calling
:func:`plot_vs_frequency_generic`
Examples
------------
>>> myntwk.plot_%s(m=1,n=0,color='r')
'''%(prop_name,prop_name)
setattr(self.__class__,'plot_%s_polar'%(prop_name), \
plot_prop_polar)
def plot_prop_rect(self,
m=None, n=None, ax=None,
show_legend=True,prop_name=prop_name,*args, **kwargs):
# create index lists, if not provided by user
if m is None:
M = range(self.number_of_ports)
else:
M = [m]
if n is None:
N = range(self.number_of_ports)
else:
N = [n]
if 'label' not in kwargs.keys():
gen_label = True
else:
gen_label = False
#was_interactive = plb.isinteractive
#if was_interactive:
# plb.interactive(False)
for m in M:
for n in N:
# set the legend label for this trace to the networks
# name if it exists, and they didnt pass a name key in
# the kwargs
if gen_label:
if self.name is None:
if plb.rcParams['text.usetex']:
label_string = '$%s_{%i%i}$'%\
(prop_name[0].upper(),m+1,n+1)
else:
label_string = '%s%i%i'%\
(prop_name[0].upper(),m+1,n+1)
else:
if plb.rcParams['text.usetex']:
label_string = self.name+', $%s_{%i%i}$'%\
(prop_name[0].upper(),m+1,n+1)
else:
label_string = self.name+', %s%i%i'%\
(prop_name[0].upper(),m+1,n+1)
kwargs['label'] = label_string
# plot the desired attribute vs frequency
plot_complex_rectangular(
z = getattr(self,prop_name)[:,m,n],
show_legend = show_legend, ax = ax,
*args, **kwargs)
#if was_interactive:
# plb.interactive(True)
# plb.draw()
# plb.show()
plot_prop_rect.__doc__ = '''
plot the Network attribute :attr:`%s` vs frequency.
Parameters
-----------
m : int, optional
first index of s-parameter matrix, if None will use all
n : int, optional
secon index of the s-parameter matrix, if None will use all
ax : :class:`matplotlib.Axes` object, optional
An existing Axes object to plot on
show_legend : Boolean
draw legend or not
attribute : string
Network attribute to plot
y_label : string, optional
the y-axis label
\*args,\\**kwargs : arguments, keyword arguments
passed to :func:`matplotlib.plot`
Notes
-------
This function is dynamically generated upon Network
initialization. This is accomplished by calling
:func:`plot_vs_frequency_generic`
Examples
------------
>>> myntwk.plot_%s(m=1,n=0,color='r')
'''%(prop_name,prop_name)
setattr(self.__class__,'plot_%s_complex'%(prop_name), \
plot_prop_rect)
for func_name in COMPONENT_FUNC_DICT:
attribute = '%s_%s'%(prop_name, func_name)
y_label = Y_LABEL_DICT[func_name]
def plot_func(self, m=None, n=None, ax=None,
show_legend=True,attribute=attribute,
y_label=y_label,*args, **kwargs):
# create index lists, if not provided by user
if m is None:
M = range(self.number_of_ports)
else:
M = [m]
if n is None:
N = range(self.number_of_ports)
else:
N = [n]
if 'label' not in kwargs.keys():
gen_label = True
else:
gen_label = False
#TODO: turn off interactive plotting for performance
# this didnt work because it required a show()
# to be called, which in turn, disrupted testCases
#
#was_interactive = plb.isinteractive
#if was_interactive:
# plb.interactive(False)
for m in M:
for n in N:
# set the legend label for this trace to the networks
# name if it exists, and they didnt pass a name key in
# the kwargs
if gen_label:
if self.name is None:
if plb.rcParams['text.usetex']:
label_string = '$%s_{%i%i}$'%\
(attribute[0].upper(),m+1,n+1)
else:
label_string = '%s%i%i'%\
(attribute[0].upper(),m+1,n+1)
else:
if plb.rcParams['text.usetex']:
label_string = self.name+', $%s_{%i%i}$'%\
(attribute[0].upper(),m+1,n+1)
else:
label_string = self.name+', %s%i%i'%\
(attribute[0].upper(),m+1,n+1)
kwargs['label'] = label_string
# plot the desired attribute vs frequency
if 'time' in attribute:
xlabel = 'Time (ns)'
x = self.frequency.t_ns
else:
xlabel = 'Frequency (%s)'%self.frequency.unit
x = self.frequency.f_scaled
plot_rectangular(
x = x,
y = getattr(self,attribute)[:,m,n],
x_label = xlabel,
y_label = y_label,
show_legend = show_legend, ax = ax,
*args, **kwargs)
#if was_interactive:
# plb.interactive(True)
# plb.draw()
# #plb.show()
plot_func.__doc__ = '''
plot the Network attribute :attr:`%s` vs frequency.
Parameters
-----------
m : int, optional
first index of s-parameter matrix, if None will use all
n : int, optional
secon index of the s-parameter matrix, if None will use all
ax : :class:`matplotlib.Axes` object, optional
An existing Axes object to plot on
show_legend : Boolean
draw legend or not
attribute : string
Network attribute to plot
y_label : string, optional
the y-axis label
\*args,\\**kwargs : arguments, keyword arguments
passed to :func:`matplotlib.plot`
Notes
-------
This function is dynamically generated upon Network
initialization. This is accomplished by calling
:func:`plot_vs_frequency_generic`
Examples
------------
>>> myntwk.plot_%s(m=1,n=0,color='r')
'''%(attribute,attribute)
setattr(self.__class__,'plot_%s'%(attribute), \
plot_func)
def __generate_subnetworks(self):
'''
generates all one-port sub-networks
'''
for m in range(self.number_of_ports):
for n in range(self.number_of_ports):
def fget(self,m=m,n=n):
ntwk = self.copy()
ntwk.s = self.s[:,m,n]
ntwk.z0 = self.z0[:,m]
return ntwk
doc = '''
one-port sub-network.
'''
setattr(self.__class__,'s%i%i'%(m+1,n+1),\
property(fget,doc=doc))
def plot_s_db_time(self,*args,**kwargs):
return self.windowed().plot_s_time_db(*args,**kwargs)
## PRIMARY PROPERTIES
@property
def s(self):
'''
Scattering parameter matrix.
The s-matrix[#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so s11 can be accessed by
taking the slice s[:,0,0].
Returns
---------
s : complex :class:`numpy.ndarray` of shape `fxnxn`
the scattering parameter matrix.
See Also
------------
s
y
z
t
a
References
------------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters
'''
return self._s
@s.setter
def s(self, s):
'''
the input s-matrix should be of shape fxnxn,
where f is frequency axis and n is number of ports
'''
s_shape= npy.shape(s)
if len(s_shape) <3:
if len(s_shape) == 2:
# reshape to kx1x1, this simplifies indexing in function
s = npy.reshape(s,(-1,s_shape[0],s_shape[0]))
else:
s = npy.reshape(s,(-1,1,1))
self._s = npy.array(s,dtype=complex)
self.__generate_secondary_properties()
self.__generate_subnetworks()
@property
def y(self):
'''
Admittance parameter matrix.
The y-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so y11 can be accessed by
taking the slice `y[:,0,0]`.
Returns
---------
y : complex :class:`numpy.ndarray` of shape `fxnxn`
the admittance parameter matrix.
See Also
------------
s
y
z
t
a
References
------------
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
'''
return s2y(self._s, self.z0)
@y.setter
def y(self, value):
self._s = y2s(value, self.z0)
@property
def z(self):
'''
Impedance parameter matrix.
The z-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so z11 can be accessed by
taking the slice `z[:,0,0]`.
Returns
---------
z : complex :class:`numpy.ndarray` of shape `fxnxn`
the Impedance parameter matrix.
See Also
------------
s
y
z
t
a
References
------------
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
'''
return s2z(self._s, self.z0)
@z.setter
def z(self, value):
self._s = z2s(value, self.z0)
@property
def t(self):
'''
Scattering transfer parameters
The t-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray`
which has shape `fx2x2`, where `f` is frequency axis.
Note that indexing starts at 0, so t11 can be accessed by
taking the slice `t[:,0,0]`.
The t-matrix, also known as the wave cascading matrix, is
only defined for a 2-port Network.
Returns
--------
t : complex numpy.ndarry of shape `fx2x2`
t-parameters, aka scattering transfer parameters
See Also
------------
s
y
z
t
a
References
-----------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Scattering_transfer_parameters
'''
return s2t(self.s)
@property
def sa(self):
'''
Active scattering parameter matrix.
Active scattering parameters are simply inverted s-parameters,
defined as a = 1/s. Useful in analysis of active networks.
The a-matrix is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so a11 can be accessed by
taking the slice a[:,0,0].
Returns
---------
a : complex :class:`numpy.ndarray` of shape `fxnxn`
the active scattering parameter matrix.
See Also
------------
s
y
z
t
a
'''
return 1/self.s
@sa.setter
def sa(self, value):
raise (NotImplementedError)
@property
def a(self):
'''
abcd parameter matrix. Used to cascade two-ports
The abcd-matrix [#]_ is a 3-dimensional :class:`numpy.ndarray` which has shape
`fxnxn`, where `f` is frequency axis and `n` is number of ports.
Note that indexing starts at 0, so abcd11 can be accessed by
taking the slice `abcd[:,0,0]`.
Returns
---------
abcd : complex :class:`numpy.ndarray` of shape `fxnxn`
the Impedance parameter matrix.
See Also
------------
s
y
z
t
a
abcd
References
------------
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
'''
return s2a(self.s, self.z0)
@a.setter
def a(self, value):
raise (NotImplementedError)
@property
def z0(self):
'''
Characteristic impedance[s] of the network ports.
This property stores the characteristic impedance of each port
of the network. Because it is possible that each port has
a different characteristic impedance each varying with
frequency, `z0` is stored internally as a `fxn` array.
However because `z0` is frequently simple (like 50ohm), it can
be set with just number as well.
Returns
--------
z0 : :class:`numpy.ndarray` of shape fxn
characteristic impedance for network
'''
# i hate this function
# it was written this way because id like to allow the user to
# set the z0 before the s-parameters are set. However, in this
# case we dont know how to re-shape the z0 to fxn. to solve this
# i attempt to do the re-shaping when z0 is accessed, not when
# it is set. this is what makes this function confusing.
try:
if len(npy.shape(self._z0)) ==0:
try:
#try and re-shape z0 to match s
self._z0=self._z0*npy.ones(self.s.shape[:-1])
except(AttributeError):
print ('Warning: Network has improper \'z0\' shape.')
#they have yet to set s .
elif len(npy.shape(self._z0)) ==1:
try:
if len(self._z0) == self.frequency.npoints:
# this z0 is frequency dependent but not port dependent
self._z0 = \
npy.repeat(npy.reshape(self._z0,(-1,1)),self.number_of_ports,1)
elif len(self._z0) == self.number_of_ports:
# this z0 is port dependent but not frequency dependent
self._z0 = self._z0*npy.ones(\
(self.frequency.npoints,self.number_of_ports))
else:
raise(IndexError('z0 has bad shape'))
except(AttributeError):
# there is no self.frequency, or self.number_of_ports
raise(AttributeError('Error: I cant reshape z0 through inspection. you must provide correctly shaped z0, or s-matrix first.'))
return self._z0
except(AttributeError):
#print('Warning: z0 is undefined. Defaulting to 50.')
self.z0=50
return self.z0 #this is not an error, its a recursive call
@z0.setter
def z0(self, z0):
'''z0=npy.array(z0)
if len(z0.shape) < 2:
try:
#try and re-shape z0 to match s
z0=z0*npy.ones(self.s.shape[:-1])
except(AttributeError):
print ('Warning: you should store a Network\'s \'s\' matrix before its \'z0\'')
#they have yet to set s .
pass
'''
self._z0 = npy.array(z0,dtype=complex)
@property
def frequency(self):
'''
frequency information for the network.
This property is a :class:`~skrf.frequency.Frequency` object.
It holds the frequency vector, as well frequency unit, and
provides other properties related to frequency information, such
as start, stop, etc.
Returns
--------
frequency : :class:`~skrf.frequency.Frequency` object
frequency information for the network.
See Also
---------
f : property holding frequency vector in Hz
change_frequency : updates frequency property, and
interpolates s-parameters if needed
interpolate : interpolate function based on new frequency
info
'''
try:
return self._frequency
except (AttributeError):
self._frequency = Frequency(0,0,0)
return self._frequency
@frequency.setter
def frequency(self, new_frequency):
'''
takes a Frequency object, see frequency.py
'''
if isinstance(new_frequency, Frequency):
self._frequency = new_frequency.copy()
else:
try:
self._frequency = Frequency.from_f(new_frequency)
except (TypeError):
raise TypeError('Could not convert argument to a frequency vector')
@property
def inv(self):
'''
a :class:`Network` object with 'inverse' s-parameters.
This is used for de-embedding. It is defined so that the inverse
of a Network cascaded with itself is unity.
Returns
---------
inv : a :class:`Network` object
a :class:`Network` object with 'inverse' s-parameters.
See Also
----------
inv : function which implements the inverse s-matrix
'''
if self.number_of_ports <2:
raise(TypeError('One-Port Networks dont have inverses'))
out = self.copy()
out.s = inv(self.s)
return out
@property
def f(self):
'''
the frequency vector for the network, in Hz.
Returns
--------
f : :class:`numpy.ndarray`
frequency vector in Hz
See Also
---------
frequency : frequency property that holds all frequency
information
'''
return self.frequency.f
@f.setter
def f(self,f):
tmpUnit= self.frequency.unit
self.frequency = Frequency.from_f(f, unit=tmpUnit)
## SECONDARY PROPERTIES
@property
def number_of_ports(self):
'''
the number of ports the network has.
Returns
--------
number_of_ports : number
the number of ports the network has.
'''
try:
return self.s.shape[1]
except (AttributeError):
return 0
@property
def nports(self):
'''
the number of ports the network has.
Returns
--------
number_of_ports : number
the number of ports the network has.
'''
return self.number_of_ports
@property
def port_tuples(self):
'''
Returns a list of tuples, for each port index pair
A convenience function for the common task fo iterating over
all s-parameters index pairs
This just calls:
`[(y,x) for x in range(self.nports) for y in range(self.nports)]`
'''
return [(y,x) for x in range(self.nports) for y in range(self.nports)]
@property
def passivity(self):
'''
passivity metric for a multi-port network.
This returns a matrix who's diagonals are equal to the total
power received at all ports, normalized to the power at a single
excitement port.
mathematically, this is a test for unitary-ness of the
s-parameter matrix [#]_.
for two port this is
.. math::
( |S_{11}|^2 + |S_{21}|^2 \, , \, |S_{22}|^2+|S_{12}|^2)
in general it is
.. math::
S^H \\cdot S
where :math:`H` is conjugate transpose of S, and :math:`\\cdot`
is dot product.
Returns
---------
passivity : :class:`numpy.ndarray` of shape fxnxn
References
------------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Lossless_networks
'''
return passivity(self.s)
@property
def reciprocity(self):
'''
reciprocity metric for a multi-port network.
This returns the difference between the s-parameter matrix
and its transpose.
for two port this is
.. math::
S - S^T
where :math:`T` is transpose of S
Returns
---------
reciprocity : :class:`numpy.ndarray` of shape fxnxn
'''
return reciprocity(self.s)
@property
def reciprocity2(self):
'''
Reciprocity metric #2
.. math::
abs(1 - S/S^T )
for the two port case, this evaluates to the distance of the
determinant of the wave-cascading matrix from unity.
'''
return abs(1-self.s/self.s.swapaxes(1,2))
## NETWORK CLASIFIERs
def is_reciprocal(self):
'''
test for reciprocity
'''
raise(NotImplementedError)
def is_symmetric(self):
'''
test for symmetry
'''
raise(NotImplementedError)
def is_passive(self):
'''
test for passivity
'''
raise(NotImplementedError)
def is_lossless(self):
'''
test for losslessness
'''
raise(NotImplementedError)
## specific ploting functions
def plot_passivity(self, port = None,label_prefix=None, *args, **kwargs):
'''
Plot dB(diag(passivity metric)) vs frequency
Notes
-------
This plot does not completely capture the passivity metric, which
is a test for `unitary-ness` of the s-matrix. However, it may
be used to display a measure of power disapated in a network.
See Also
-----------
passivity
'''
name = '' if self.name is None else self.name
if port is None:
ports = range(self.nports)
else:
ports = [port]
for k in ports:
if label_prefix==None:
label = name +', port %i'%(k+1)
else:
label = label_prefix+', port %i'%(k+1)
self.frequency.plot(mf.complex_2_db(self.passivity[:,k,k]),
label=label,
*args, **kwargs)
plb.legend()
plb.draw()
def plot_reciprocity(self, db= False, *args, **kwargs):
'''
Plot reciprocity metric
See Also
-----------
reciprocity
'''
for m in range(self.nports):
for n in range(self.nports):
if m>n:
if 'label' not in kwargs.keys():
kwargs['label'] = 'ports %i%i'%(m,n)
y = self.reciprocity[:,m,n].flatten()
if db:
y = mf.complex_2_db(y)
self.frequency.plot(y,*args, **kwargs)
plb.legend()
plb.draw()
def plot_reciprocity2(self, db= False, *args, **kwargs):
'''
Plot reciprocity metric #2
this is distance of the determinant of the wave-cascading matrix
from unity.
.. math::
abs(1 - S/S^T )
See Also
-----------
reciprocity
'''
for m in range(self.nports):
for n in range(self.nports):
if m>n:
if 'label' not in kwargs.keys():
kwargs['label'] = 'ports %i%i'%(m,n)
y = self.reciprocity2[:,m,n].flatten()
if db:
y = mf.complex_2_db(y)
self.frequency.plot(y,*args, **kwargs)
plb.legend()
plb.draw()
## CLASS METHODS
def copy(self):
'''
Returns a copy of this Network
Needed to allow pass-by-value for a Network instead of
pass-by-reference
'''
ntwk = Network(s = self.s,
frequency = self.frequency.copy(),
z0 = self.z0,
)
ntwk.name = self.name
return ntwk
def copy_from(self,other):
'''
Copies the contents of another Network into self
Uses copy, so that the data is passed-by-value, not reference
Parameters
-----------
other : Network
the network to copy the contents of
Examples
-----------
>>> a = rf.N()
>>> b = rf.N('my_file.s2p')
>>> a.copy_from (b)
'''
for attr in ['_s','frequency','_z0','name' ]:
self.__setattr__(attr,copy(other.__getattribute__(attr)))
# touchstone file IO
def read_touchstone(self, filename):
'''
loads values from a touchstone file.
The work of this function is done through the
:class:`~skrf.io.touchstone` class.
Parameters
----------
filename : str or file-object
touchstone file name.
Notes
------
only the scattering parameters format is supported at the
moment
'''
from .io import touchstone
touchstoneFile = touchstone.Touchstone(filename)
if touchstoneFile.get_format().split()[1] != 's':
raise NotImplementedError('only s-parameters supported for now.')
self.comments = touchstoneFile.get_comments()
# set z0 before s so that y and z can be computed
self.z0 = complex(touchstoneFile.resistance)
f, self.s = touchstoneFile.get_sparameter_arrays() # note: freq in Hz
self.frequency = Frequency.from_f(f, unit='hz')
self.frequency.unit = touchstoneFile.frequency_unit
if self.name is None:
try:
self.name = os.path.basename( os.path.splitext(filename)[0])
# this may not work if filename is a file object
except(AttributeError):
# in case they pass a file-object instead of file name,
# get the name from the touchstone file
try:
self.name = os.path.basename( os.path.splitext(touchstoneFile.filename)[0])
except():
print('warning: couldnt inspect network name')
self.name=''
pass
#TODO: add Network property `comments` which is read from
# touchstone file.
def write_touchstone(self, filename=None, dir = None,
write_z0=False,skrf_comment=True,
form='ri'):
'''
Write a contents of the :class:`Network` to a touchstone file.
Parameters
----------
filename : a string, optional
touchstone filename, without extension. if 'None', then
will use the network's :attr:`name`.
dir : string, optional
the directory to save the file in.
write_z0 : boolean
write impedance information into touchstone as comments,
like Ansoft HFSS does
skrf_comment : bool, optional
write `created by skrf` comment
form : 'db','ma','ri'
format to write data,
* db = db, deg
* ma = mag, deg
* ri = real, imag
Notes
-------
format supported at the moment are,
[Hz/kHz/MHz/GHz] S [DB/MA/RI]
Frequency unit can be changed by setting Network.frequency.unit property
The functionality of this function should take place in the
:class:`~skrf.touchstone.touchstone` class.
'''
# according to Touchstone 2.0 spec
# [no tab, max. 4 coeffs per line, etc.]
if filename is None:
if self.name is not None:
filename= self.name
else:
raise ValueError('No filename given. Network must have a name, or you must provide a filename')
if get_extn(filename) is None:
filename = filename +'.s%ip'%self.number_of_ports
if dir is not None:
filename = os.path.join(dir, filename)
# set internal varialbes according to form
form = form.upper()
if form == "RI":
formatDic = {"labelA":"Re", "labelB":"Im"}
funcA = npy.real
funcB = npy.imag
elif form == "DB":
formatDic = {"labelA":"dB", "labelB":"ang"}
funcA = mf.complex_2_db
funcB = mf.complex_2_degree
elif form == "MA":
formatDic = {"labelA":"mag", "labelB":"ang"}
funcA = mf.complex_2_magnitude
funcB = mf.complex_2_degree
else:
raise ValueError('`form` must be either `db`,`ma`,`ri`')
with open(filename,"w") as outputFile:
# Add '!' Touchstone comment delimiters to the start of every line
# in self.comments
commented_header = ''
if self.comments:
for comment_line in self.comments.split('\n'):
commented_header += '!{}\n'.format(comment_line)
if skrf_comment:
commented_header +='!Created with skrf (http://scikit-rf.org).\n'
outputFile.write(commented_header)
# write header file.
# the '#' line is NOT a comment it is essential and it must be
# exactly this format, to work
# [HZ/KHZ/MHZ/GHZ] [S/Y/Z/G/H] [MA/DB/RI] [R n]
outputFile.write('# {} S {} R {} \n'.format(self.frequency.unit, form, str(abs(self.z0[0,0]))))
if self.number_of_ports == 1 :
# write comment line for users (optional)
outputFile.write('!freq {labelA}S11 {labelB}S11\n'.format(**formatDic))
# write out data
for f in range(len(self.f)):
outputFile.write(str(self.frequency.f_scaled[f])+' '\
+ str(funcA(self.s[f,0,0])) + ' '\
+ str(funcB(self.s[f,0,0])) +'\n')
# write out the z0 following hfss's convention if desired
if write_z0:
outputFile.write('! Port Impedance ' )
for n in range(self.number_of_ports):
outputFile.write('%.14f %.14f '%(self.z0[f,n].real, self.z0[f,n].imag))
outputFile.write('\n')
elif self.number_of_ports == 2 :
# 2-port is a special case with
# - single line, and
# - S21,S12 in reverse order: legacy ?
# write comment line for users (optional)
outputFile.write('!freq {labelA}S11 {labelB}S11 {labelA}S21 {labelB}S21 {labelA}S12 {labelB}S12 {labelA}S22 {labelB}S22\n'.format(**formatDic))
# write out data
for f in range(len(self.f)):
outputFile.write(str(self.frequency.f_scaled[f])+' '\
+ str(funcA(self.s[f,0,0])) + ' '\
+ str(funcB(self.s[f,0,0])) + ' '\
+ str(funcA(self.s[f,1,0])) + ' '\
+ str(funcB(self.s[f,1,0])) + ' '\
+ str(funcA(self.s[f,0,1])) + ' '\
+ str(funcB(self.s[f,0,1])) + ' '\
+ str(funcA(self.s[f,1,1])) + ' '\
+ str(funcB(self.s[f,1,1])) +'\n')
# write out the z0 following hfss's convention if desired
if write_z0:
outputFile.write('! Port Impedance' )
for n in range(2):
outputFile.write(' %.14f %.14f'%(self.z0[f,n].real, self.z0[f,n].imag))
outputFile.write('\n')
elif self.number_of_ports == 3 :
# 3-port is written over 3 lines / matrix order
# write comment line for users (optional)
outputFile.write ('!freq')
for m in range(1,4):
for n in range(1,4):
outputFile.write(" {labelA}S{m}{n} {labelB}S{m}{n}".format(m=m, n=n, **formatDic))
outputFile.write('\n!')
outputFile.write('\n')
# write out data
for f in range(len(self.f)):
outputFile.write(str(self.frequency.f_scaled[f]))
for m in range(3):
for n in range(3):
outputFile.write( ' ' + str(funcA(self.s[f,m,n])) + ' '\
+ str(funcB(self.s[f,m,n])))
outputFile.write('\n')
# write out the z0 following hfss's convention if desired
if write_z0:
outputFile.write('! Port Impedance' )
for n in range(3):
outputFile.write(' %.14f %.14f'%(self.z0[f,n].real, self.z0[f,n].imag))
outputFile.write('\n')
elif self.number_of_ports >= 4 :
# general n-port
# - matrix is written line by line
# - 4 complex numbers / 8 real numbers max. for a single line
# - continuation lines (anything except first) go with indent
# this is not part of the spec, but many tools handle it this way
# -> allows to parse without knowledge of number of ports
# write comment line for users (optional)
outputFile.write ('!freq')
for m in range(1,1+self.number_of_ports):
for n in range(1,1+self.number_of_ports):
if (n > 0 and (n%4) == 0 ) :
outputFile.write('\n!')
outputFile.write(" {labelA}S{m}{n} {labelB}S{m}{n}".format(m=m, n=n, **formatDic))
outputFile.write('\n!')
outputFile.write('\n')
# write out data
for f in range(len(self.f)):
outputFile.write(str(self.frequency.f_scaled[f]))
for m in range(self.number_of_ports):
for n in range(self.number_of_ports):
if (n > 0 and (n%4) == 0 ) :
outputFile.write('\n')
outputFile.write( ' ' + str(funcA(self.s[f,m,n])) + ' '\
+ str(funcB(self.s[f,m,n])))
outputFile.write('\n')
# write out the z0 following hfss's convention if desired
if write_z0:
outputFile.write('! Port Impedance' )
for n in range(self.number_of_ports):
outputFile.write(' %.14f %.14f'%(self.z0[f,n].real, self.z0[f,n].imag))
outputFile.write('\n')
def write(self, file=None, *args, **kwargs):
'''
Write the Network to disk using the :mod:`pickle` module.
The resultant file can be read either by using the Networks
constructor, :func:`__init__` , the read method :func:`read`, or
the general read function :func:`skrf.io.general.read`
Parameters
-----------
file : str or file-object
filename or a file-object. If left as None then the
filename will be set to Network.name, if its not None.
If both are None, ValueError is raised.
\*args, \*\*kwargs :
passed through to :func:`~skrf.io.general.write`
Notes
------
If the self.name is not None and file is can left as None
and the resultant file will have the `.ntwk` extension appended
to the filename.
Examples
---------
>>> n = rf.N(f=[1,2,3],s=[1,1,1],z0=50, name = 'open')
>>> n.write()
>>> n2 = rf.read('open.ntwk')
See Also
---------
skrf.io.general.write : write any skrf object
skrf.io.general.read : read any skrf object
'''
# this import is delayed until here because of a circular depency
from io.general import write
if file is None:
if self.name is None:
raise (ValueError('No filename given. You must provide a filename, or set the name attribute'))
file = self.name
write(file,self,*args, **kwargs)
def read(self, *args, **kwargs):
'''
Read a Network from a 'ntwk' file
A ntwk file is written with :func:`write`. It is just a pickled
file.
Parameters
-------------
\*args, \*\*kwargs : args and kwargs
passed to :func:`skrf.io.general.write`
Notes
------
This function calls :func:`skrf.io.general.read`.
Examples
-----------
>>> rf.read('myfile.ntwk')
>>> rf.read('myfile.p')
See Also
----------
write
skrf.io.general.write
skrf.io.general.read
'''
from .io.general import read
self.copy_from(read(*args, **kwargs))
def write_spreadsheet(self, *args, **kwargs):
'''
Write contents of network to a spreadsheet, for your boss to use.
See Also
---------
skrf.io.general.network_2_spreadsheet
'''
from .io.general import network_2_spreadsheet
network_2_spreadsheet(self, *args, **kwargs)
def to_dataframe(self, *args, **kwargs):
'''
Convert attributes of a Network to a pandas DataFrame
See Also
---------
skrf.io.general.network_2_dataframe
'''
from .io.general import network_2_dataframe
return network_2_dataframe(self, *args, **kwargs)
# interpolation
def interpolate(self, new_frequency,**kwargs):
'''
Return an interpolated network, from a new :class:'~skrf.frequency.Frequency'.
Interpolate the networks s-parameters linearly in real and
imaginary components. Other interpolation types can be used
by passing appropriate `\*\*kwargs`. This function `returns` an
interpolated Network. Alternatively :func:`~Network.interpolate_self`
will interpolate self.
Parameters
-----------
new_frequency : :class:`~skrf.frequency.Frequency`
frequency information to interpolate
**kwargs : keyword arguments
passed to :func:`scipy.interpolate.interp1d` initializer.
Returns
----------
result : :class:`Network`
an interpolated Network
Notes
--------
See :func:`scipy.interpolate.interpolate.interp1d` for useful
kwargs. For example
**kind** : str or int
Specifies the kind of interpolation as a string ('linear',
'nearest', 'zero', 'slinear', 'quadratic, 'cubic') or
as an integer specifying the order of the spline
interpolator to use.
See Also
----------
resample
interpolate_self
interpolate_from_f
Examples
-----------
.. ipython::
@suppress
In [21]: import skrf as rf
In [21]: n = rf.data.ring_slot
In [21]: n
In [21]: new_freq = rf.Frequency(75,110,501,'ghz')
In [21]: n.interpolate(new_freq, kind = 'cubic')
'''
# create interpolation objects
interpolation_s_re = \
interp1d(self.frequency.f,self.s_re,axis=0,**kwargs)
interpolation_s_im = \
interp1d(self.frequency.f,self.s_im,axis=0,**kwargs)
interpolation_z0_re = \
interp1d(self.frequency.f,self.z0.real,axis=0,**kwargs)
interpolation_z0_im = \
interp1d(self.frequency.f,self.z0.imag,axis=0,**kwargs)
# make new network and fill with interpolated s, and z0
result = self.copy()
result.frequency = new_frequency
result.s = interpolation_s_re(new_frequency.f) +\
1j*interpolation_s_im(new_frequency.f)
result.z0 = interpolation_z0_re(new_frequency.f) +\
1j*interpolation_z0_im(new_frequency.f)
return result
def interpolate_self_npoints(self, npoints, **kwargs):
'''
Interpolate network based on a new number of frequency points
Parameters
-----------
npoints : int
number of frequency points
**kwargs : keyword arguments
passed to :func:`scipy.interpolate.interp1d` initializer.
See Also
---------
interpolate_self : same functionality but takes a Frequency
object
interpolate : same functionality but takes a Frequency
object and returns a new Network, instead of updating
itself.
Notes
-------
The function :func:`~Network.resample` is an alias for
:func:`~Network.interpolate_self_npoints`.
Examples
-----------
.. ipython::
@suppress
In [21]: import skrf as rf
In [21]: n = rf.data.ring_slot
In [21]: n
In [21]: n.resample(501) # resample is an alias
In [21]: n
'''
new_frequency = self.frequency.copy()
new_frequency.npoints = npoints
self.interpolate_self(new_frequency, **kwargs)
##convenience
resample = interpolate_self_npoints
def interpolate_self(self, new_frequency, **kwargs):
'''
Interpolates s-parameters given a new
:class:'~skrf.frequency.Frequency' object.
See :func:`~Network.interpolate` for more information.
Parameters
-----------
new_frequency : :class:`~skrf.frequency.Frequency`
frequency information to interpolate at
**kwargs : keyword arguments
passed to :func:`scipy.interpolate.interp1d` initializer.
See Also
----------
resample
interpolate
interpolate_from_f
'''
ntwk = self.interpolate(new_frequency, **kwargs)
self.frequency, self.s,self.z0 = ntwk.frequency, ntwk.s,ntwk.z0
def interpolate_from_f(self, f, interp_kwargs={}, **kwargs):
'''
Interpolates s-parameters from a frequency vector.
Given a frequency vector, and optionally a `unit` (see \*\*kwargs)
, interpolate the networks s-parameters linearly in real and
imaginary components.
See :func:`~Network.interpolate` for more information.
Parameters
-----------
new_frequency : :class:`~skrf.frequency.Frequency`
frequency information to interpolate at
interp_kwargs :
dictionary of kwargs to be passed through to
:func:`scipy.interpolate.interpolate.interp1d`
\*\*kwargs :
passed to :func:`scipy.interpolate.interp1d` initializer.
Notes
---------
This creates a new :class:`~skrf.frequency.Frequency`, object
using the method :func:`~skrf.frequency.Frequency.from_f`, and then calls
:func:`~Network.interpolate_self`.
See Also
----------
resample
interpolate
interpolate_self
'''
freq = Frequency.from_f(f,**kwargs)
self.interpolate_self(freq, **interp_kwargs)
def crop(self, f_start, f_stop):
'''
Crop Network based on start and stop frequencies.
No interpolation is done.
Parameters
-----------
f_start : number
start frequency of crop range, in units of self.frequency.unit
f_stop : number
stop frequency of crop range, in units of self.frequency.unit
'''
if f_start < self.frequency.f_scaled.min():
raise ValueError('`f_start` is out of range.')
elif f_stop > self.frequency.f_scaled.max():
raise ValueError('`f_stop` is out of range.')
start_idx = find_nearest_index(self.frequency.f_scaled,f_start)
stop_idx = find_nearest_index(self.frequency.f_scaled,f_stop)
ntwk = self[start_idx:stop_idx+1]
self.frequency, self.s,self.z0 = ntwk.frequency, ntwk.s,ntwk.z0
def cropped(self, f_start, f_stop):
'''
returns a cropped network, leaves self alone.
See Also
---------
crop
'''
out = self.copy()
out.crop(f_start = f_start, f_stop = f_stop)
return out
def flip(self):
'''
swaps the ports of a two port Network
'''
if self.number_of_ports == 2:
self.renumber( [0,1], [1,0] )
else:
raise ValueError('you can only flip two-port Networks')
def flipped(self):
'''
returns a flipped network, leaves self alone.
See Also
---------
flip
'''
out = self.copy()
out.flip()
return out
def renormalize(self, z_new, powerwave=False):
'''
Renormalize s-parameter matrix given a new port impedances
Parameters
---------------
z_new : complex array of shape FxN, F, N or a scalar
new port impedances
powerwave : bool
if true this calls :func:`renormalize_s_pw`, which assumes
a powerwave formulation. Otherwise it calls
:func:`renormalize_s` which implements the default psuedowave
formuation. If z_new or self.z0 is complex, then these
produce different results.
See Also
----------
renormalize_s
renormalize_s_pw
fix_z0_shape
'''
if powerwave:
self.s = renormalize_s_pw(self.s, self.z0, z_new)
else:
self.s = renormalize_s(self.s, self.z0, z_new)
self.z0 = fix_z0_shape(z_new,self.frequency.npoints, self.nports)
def renumber(self, from_ports, to_ports):
'''
renumbers ports of a Network
Parameters
-----------
from_ports : list-like
to_ports: list-like
Examples
---------
To flip the ports of a 2-port network 'foo':
>>> foo.renumber( [0,1], [1,0] )
To rotate the ports of a 3-port network 'bar' so that port 0 becomes port 1:
>>> bar.renumber( [0,1,2], [1,2,0] )
To swap the first and last ports of a network 'duck':
>>> duck.renumber( [0,-1], [-1,0] )
'''
from_ports = npy.array(from_ports)
to_ports = npy.array(to_ports)
if len(npy.unique(from_ports)) != len(from_ports):
raise ValueError('an index can appear at most once in from_ports or to_ports')
if any(npy.unique(from_ports) != npy.unique(to_ports)):
raise ValueError('from_ports and to_ports must have the same set of indices')
self.s[:,to_ports,:] = self.s[:,from_ports,:] # renumber rows
self.s[:,:,to_ports] = self.s[:,:,from_ports] # renumber columns
self.z0[:,to_ports] = self.z0[:,from_ports]
def windowed(self, window=('kaiser',6), normalize = True):
'''
Return a windowed version of s-matrix. Used in time-domain analysis.
When using time domain through :attr:`s_time_db`,
or similar properies, the spectrum is usually windowed,
before the IFFT is taken. This is done to
compensate for the band-pass nature of a spectrum [1]_ .
This function calls :func:`scipy.signal.get_window` which gives
more details about the windowing.
Parameters
-----------
window : string, float, or tuple
The type of window to create. See :func:`scipy.signal.get_window`
for details.
normalize : bool
Normalize the window to preserve power. ie
sum(ntwk.s,axis=0) == sum(ntwk.windowed().s,axis=0)
Examples
-----------
>>> ntwk = rf.Network('myfile.s2p')
>>> ntwk_w = ntwk.windowed()
>>> ntwk_w.plot_s_time_db()
References
-------------
.. [1] Agilent Time Domain Analysis Using a Network Analyzer Application Note 1287-12
'''
window = signal.get_window(window, len(self))
window =window.reshape(-1,1,1) * npy.ones((len(self),
self.nports,
self.nports))
windowed = self * window
if normalize:
# normalize the s-parameters to account for power lost in windowing
windowed.s = windowed.s * npy.sum(self.s_mag,axis=0)/\
npy.sum(windowed.s_mag,axis=0)
return windowed
def time_gate(self, t_start, t_stop=None, window = ('kaiser',6)):
'''
Time-gate s-parameters
The gate can be defined with start/stop times, or by the gate
width. If `t_stop` is None, the it will default to -`t_start`.
In this case `t_start`== gate width/2
See Warning!
Parameters
------------
t_start : number
start of time gate, (s). Or, if t_stop==None, then it is
1/2*gate width.
t_stop : number
stop of time gate (s), if None will be -t_start.
Returns
--------
ntwk : Network
copy of self with time-gated s-parameters
.. warning::
This is not fully tested, and doesnt appear to be preserve power
correctly
'''
if t_stop is None:
t_stop = -1*t_start
if t_start >t_stop:
t_start *=-1
t_stop *=-1
# find start/stop gate indecies
t = self.frequency.t
t_start_idx = find_nearest_index(t,t_start)
t_stop_idx = find_nearest_index(t,t_stop)
# create window
window_width = abs(t_stop_idx-t_start_idx)
window = signal.get_window(window, window_width)
# create the gate by padding the window with zeros
padded_window = npy.r_[npy.zeros(t_start_idx),
window,
npy.zeros(len(t)-t_stop_idx)]
# reshape the gate array so it operates on all s-parameters
padded_window = padded_window.reshape(-1,1,1) *\
npy.ones((len(self), self.nports, self.nports))
s_time = fft.ifftshift(fft.ifft(self.s, axis=0), axes=0)
s_time_windowed = self.s_time*padded_window
s_freq = fft.fft(fft.fftshift(s_time_windowed, axes=0), axis=0)
gated = self.copy()
gated.s = s_freq
return gated
# plotting
def plot_s_smith(self,m=None, n=None,r=1,ax = None, show_legend=True,\
chart_type='z', draw_labels=False, label_axes=False, *args,**kwargs):
'''
plots the scattering parameter on a smith chart
plots indices `m`, `n`, where `m` and `n` can be integers or
lists of integers.
Parameters
-----------
m : int, optional
first index
n : int, optional
second index
ax : matplotlib.Axes object, optional
axes to plot on. in case you want to update an existing
plot.
show_legend : boolean, optional
to turn legend show legend of not, optional
chart_type : ['z','y']
draw impedance or addmitance contours
draw_labels : Boolean
annotate chart with impedance values
label_axes : Boolean
Label axis with titles `Real` and `Imaginary`
border : Boolean
draw rectangular border around image with ticks
\*args : arguments, optional
passed to the matplotlib.plot command
\*\*kwargs : keyword arguments, optional
passed to the matplotlib.plot command
See Also
--------
plot_vs_frequency_generic - generic plotting function
smith - draws a smith chart
Examples
---------
>>> myntwk.plot_s_smith()
>>> myntwk.plot_s_smith(m=0,n=1,color='b', marker='x')
'''
# TODO: prevent this from re-drawing smith chart if one alread
# exists on current set of axes
# get current axis if user doesnt supply and axis
if ax is None:
ax = plb.gca()
if m is None:
M = range(self.number_of_ports)
else:
M = [m]
if n is None:
N = range(self.number_of_ports)
else:
N = [n]
if 'label' not in kwargs.keys():
generate_label=True
else:
generate_label=False
for m in M:
for n in N:
# set the legend label for this trace to the networks name if it
# exists, and they didnt pass a name key in the kwargs
if generate_label:
if self.name is None:
if plb.rcParams['text.usetex']:
label_string = '$S_{'+repr(m+1) + repr(n+1)+'}$'
else:
label_string = 'S'+repr(m+1) + repr(n+1)
else:
if plb.rcParams['text.usetex']:
label_string = self.name+', $S_{'+repr(m+1) + \
repr(n+1)+'}$'
else:
label_string = self.name+', S'+repr(m+1) + repr(n+1)
kwargs['label'] = label_string
# plot the desired attribute vs frequency
if len (ax.patches) == 0:
smith(ax=ax, smithR = r, chart_type=chart_type, draw_labels=draw_labels)
ax.plot(self.s[:,m,n].real, self.s[:,m,n].imag, *args,**kwargs)
#draw legend
if show_legend:
ax.legend()
ax.axis(npy.array([-1.1,1.1,-1.1,1.1])*r)
if label_axes:
ax.set_xlabel('Real')
ax.set_ylabel('Imaginary')
def plot_it_all(self,*args, **kwargs):
'''
Plots dB, deg, smith, and complex in subplots
Plots the magnitude in dB in subplot 1, the phase in degrees in
subplot 2, a smith chart in subplot 3, and a complex plot in
subplot 4.
Parameters
-----------
\*args : arguments, optional
passed to the matplotlib.plot command
\*\*kwargs : keyword arguments, optional
passed to the matplotlib.plot command
See Also
--------
plot_s_db - plot magnitude (in dB) of s-parameters vs frequency
plot_s_deg - plot phase of s-parameters (in degrees) vs frequency
plot_s_smith - plot complex s-parameters on smith chart
plot_s_complex - plot complex s-parameters in the complex plane
Examples
---------
>>> from skrf.data import ring_slot
>>> ring_slot.plot_it_all()
'''
plb.subplot(221)
getattr(self,'plot_s_db')(*args, **kwargs)
plb.subplot(222)
getattr(self,'plot_s_deg')(*args, **kwargs)
plb.subplot(223)
getattr(self,'plot_s_smith')(*args, **kwargs)
plb.subplot(224)
getattr(self,'plot_s_complex')(*args, **kwargs)
# noise
def add_noise_polar(self,mag_dev, phase_dev,**kwargs):
'''
adds a complex zero-mean gaussian white-noise.
adds a complex zero-mean gaussian white-noise of a given
standard deviation for magnitude and phase
Parameters
------------
mag_dev : number
standard deviation of magnitude
phase_dev : number
standard deviation of phase [in degrees]
'''
phase_rv= stats.norm(loc=0, scale=phase_dev).rvs(size = self.s.shape)
mag_rv = stats.norm(loc=0, scale=mag_dev).rvs(size = self.s.shape)
phase = (self.s_deg+phase_rv)
mag = self.s_mag + mag_rv
self.s = mag* npy.exp(1j*npy.pi/180.*phase)
def add_noise_polar_flatband(self,mag_dev, phase_dev,**kwargs):
'''
adds a flatband complex zero-mean gaussian white-noise signal of
given standard deviations for magnitude and phase
Parameters
------------
mag_dev : number
standard deviation of magnitude
phase_dev : number
standard deviation of phase [in degrees]
'''
phase_rv= stats.norm(loc=0, scale=phase_dev).rvs(size = self.s[0].shape)
mag_rv = stats.norm(loc=0, scale=mag_dev).rvs(size = self.s[0].shape)
phase = (self.s_deg+phase_rv)
mag = self.s_mag + mag_rv
self.s = mag* npy.exp(1j*npy.pi/180.*phase)
def multiply_noise(self,mag_dev, phase_dev, **kwargs):
'''
multiplys a complex bivariate gaussian white-noise signal
of given standard deviations for magnitude and phase.
magnitude mean is 1, phase mean is 0
takes:
mag_dev: standard deviation of magnitude
phase_dev: standard deviation of phase [in degrees]
n_ports: number of ports. defualt to 1
returns:
nothing
'''
phase_rv = stats.norm(loc=0, scale=phase_dev).rvs(\
size = self.s.shape)
mag_rv = stats.norm(loc=1, scale=mag_dev).rvs(\
size = self.s.shape)
self.s = mag_rv*npy.exp(1j*npy.pi/180.*phase_rv)*self.s
def nudge(self, amount=1e-12):
'''
Perturb s-parameters by small amount.
This is useful to work-around numerical bugs.
Notes
-----------
This function is
self.s = self.s + 1e-12
Parameters
------------
amount : number,
amount to add to s parameters
'''
self.s = self.s + amount
# other
def func_on_parameter(self, func, attr= 's',*args, **kwargs):
'''
Applies a function parameter matrix, one frequency slice at a time
This is useful for functions that can only operate on 2d arrays,
like numpy.linalg.inv. This loops over f and calls
`func(ntwkA.s[f,:,:], *args, **kwargs)`
Parameters
------------
func : func
function to apply to s-parameters, on a single-freqency slice.
(ie func(ntwkA.s[0,:,:], *args, **kwargs)
\*args, \*\*kwargs :
passed to the func
Examples
-----------
>>> from numpy.linalg import inv
>>> ntwk.func_on_parameter(inv)
'''
ntwkB= self.copy()
p = self.__getattribute__(attr)
ntwkB.s = npy.r_[[func(p[k,:,:],*args,**kwargs) \
for k in range(len(p))]]
return ntwkB
def nonreciprocity(self,m,n, normalize = False):
'''
Normalized non-reciprocity metric.
This is a port-by-port measure of how non-reciprocal a n-port
network is. It is defined by,
.. math::
(S_{mn} - S_{nm}) / \\sqrt ( S_{mn} S_{nm} )
'''
forward = self.__getattribute__('s%i%i'%(m,n))
reverse = self.__getattribute__('s%i%i'%(n,m))
if normalize:
denom = forward*reverse
denom.s = npy.sqrt(denom.s)
return (forward-reverse)/denom
else:
return (forward-reverse)
# generalized mixed mode transformations
# XXX: experimental implementation of gmm s parameters
# TODO: automated test cases
def se2gmm(self, p, z0_mm=None):
'''
Transform network from single ended parameters to generalized mixed mode parameters [1]
[1] Ferrero and Pirola; Generalized Mixed-Mode S-Parameters; IEEE Transactions on
Microwave Theory and Techniques; Vol. 54; No. 1; Jan 2006
Parameters
------------
p : int, number of differential ports
z0_mm: f x n x n matrix of mixed mode impedances, optional
if input is None, 100 Ohms differentail and 25 Ohms common mode reference impedance
.. warning::
This is not fully tested, and should be considered as experimental
'''
#XXX: assumes 'proper' order (first differential ports, then single ended ports)
if z0_mm is None:
z0_mm = self.z0.copy()
z0_mm[:,0:p] = 100 # differential mode impedance
z0_mm[:,p:2*p] = 25 # common mode impedance
Xi_tilde_11, Xi_tilde_12, Xi_tilde_21, Xi_tilde_22 = self._Xi_tilde(p, self.z0, z0_mm)
A = Xi_tilde_21 + npy.einsum('...ij,...jk->...ik', Xi_tilde_22, self.s)
B = Xi_tilde_11 + npy.einsum('...ij,...jk->...ik', Xi_tilde_12, self.s)
self.s = npy.transpose(npy.linalg.solve(npy.transpose(B, (0,2,1)).conj(), npy.transpose(A, (0,2,1)).conj()), (0,2,1)).conj() # (34)
self.z0 = z0_mm
def gmm2se(self, p, z0_se=None):
'''
Transform network from generalized mixed mode parameters [1] to single ended parameters
[1] Ferrero and Pirola; Generalized Mixed-Mode S-Parameters; IEEE Transactions on
Microwave Theory and Techniques; Vol. 54; No. 1; Jan 2006
Parameters
------------
p : int, number of differential ports
z0_mm: f x n x n matrix of single ended impedances, optional
if input is None, assumes 50 Ohm reference impedance
.. warning::
This is not fully tested, and should be considered as experimental
'''
# TODO: testing of reverse transformation
# XXX: assumes 'proper' order (differential ports, single ended ports)
if z0_se is None:
z0_se = self.z0.copy()
z0_se = 50
Xi_tilde_11, Xi_tilde_12, Xi_tilde_21, Xi_tilde_22 = self._Xi_tilde(p, z0_se, self.z0)
A = Xi_tilde_22 - npy.einsum('...ij,...jk->...ik', self.s, Xi_tilde_12)
B = Xi_tilde_21 - npy.einsum('...ij,...jk->...ik', self.s, Xi_tilde_11)
self.s = npy.linalg.solve(A, B) # (35)
self.z0 = z0_se
# generalized mixed mode supplement functions
_T = npy.array([[1, 0 , -1, 0], [0, 0.5, 0, -0.5], [0.5, 0, 0.5, 0], [0, 1, 0, 1]]) # (5)
def _m(self, z0):
scaling = npy.sqrt(z0.real) / (2 * npy.abs(z0))
Z = npy.ones((z0.shape[0], 2, 2), dtype=npy.complex128)
Z[:,0,1] = z0
Z[:,1,1] = -z0
return scaling[:,npy.newaxis,npy.newaxis] * Z
def _M(self, j, k, z0_se): # (14)
M = npy.zeros((self.f.shape[0],4,4), dtype=npy.complex128)
M[:,:2,:2] = self._m(z0_se[:,j])
M[:,2:,2:] = self._m(z0_se[:,k])
return M
def _M_circle(self, l, p, z0_mm): # (12)
M = npy.zeros((self.f.shape[0],4,4), dtype=npy.complex128)
M[:,:2,:2] = self._m(z0_mm[:,l]) # differential mode impedance of port pair
M[:,2:,2:] = self._m(z0_mm[:,p+l]) # common mode impedance of port pair
return M
def _X(self, j, k, l, p, z0_se, z0_mm): # (15)
return npy.einsum('...ij,...jk->...ik', self._M_circle(l, p, z0_mm).dot(self._T), npy.linalg.inv(self._M(j,k, z0_se))) # matrix multiplication elementwise for each frequency
def _P(self, p): # (27) (28)
n = self.nports
Pda = npy.zeros((p,2*n), dtype=npy.bool)
Pdb = npy.zeros((p,2*n), dtype=npy.bool)
Pca = npy.zeros((p,2*n), dtype=npy.bool)
Pcb = npy.zeros((p,2*n), dtype=npy.bool)
Pa = npy.zeros((n-2*p,2*n), dtype=npy.bool)
Pb = npy.zeros((n-2*p,2*n), dtype=npy.bool)
for l in npy.arange(p):
Pda[l,4*(l+1)-3-1] = True
Pca[l,4*(l+1)-1-1] = True
Pdb[l,4*(l+1)-2-1] = True
Pcb[l,4*(l+1)-1] = True
if Pa.shape[0] is not 0:
Pa[l,4*p+2*(l+1)-1-1] = True
Pb[l,4*p+2*(l+1)-1] = True
return npy.concatenate((Pda, Pca, Pa, Pdb, Pcb, Pb))
def _Q(self): # (29) error corrected
n = self.nports
Qa = npy.zeros((n,2*n), dtype=npy.bool)
Qb = npy.zeros((n,2*n), dtype=npy.bool)
for l in npy.arange(n):
Qa[l,2*(l+1)-1-1] = True
Qb[l,2*(l+1)-1] = True
return npy.concatenate((Qa, Qb))
def _Xi(self, p, z0_se, z0_mm): # (24)
n = self.nports
Xi = npy.ones(self.f.shape[0])[:,npy.newaxis,npy.newaxis] * npy.eye(2*n, dtype=npy.complex128)
for l in npy.arange(p):
Xi[:,4*l:4*l+4,4*l:4*l+4] = self._X(l*2, l*2+1, l, p, z0_se, z0_mm)
return Xi
def _Xi_tilde(self, p, z0_se, z0_mm): # (31)
n = self.nports
P = npy.ones(self.f.shape[0])[:,npy.newaxis,npy.newaxis] * self._P(p)
QT = npy.ones(self.f.shape[0])[:,npy.newaxis,npy.newaxis] * self._Q().T
Xi = self._Xi(p, z0_se, z0_mm)
Xi_tilde = npy.einsum('...ij,...jk->...ik', npy.einsum('...ij,...jk->...ik', P, Xi), QT)
return Xi_tilde[:,:n,:n], Xi_tilde[:,:n,n:], Xi_tilde[:,n:,:n], Xi_tilde[:,n:,n:]
## Functions operating on Network[s]
def connect(ntwkA, k, ntwkB, l, num=1):
'''
connect two n-port networks together.
specifically, connect ports `k` thru `k+num-1` on `ntwkA` to ports
`l` thru `l+num-1` on `ntwkB`. The resultant network has
(ntwkA.nports+ntwkB.nports-2*num) ports. The port indices ('k','l')
start from 0. Port impedances **are** taken into account.
Parameters
-----------
ntwkA : :class:`Network`
network 'A'
k : int
starting port index on `ntwkA` ( port indices start from 0 )
ntwkB : :class:`Network`
network 'B'
l : int
starting port index on `ntwkB`
num : int
number of consecutive ports to connect (default 1)
Returns
---------
ntwkC : :class:`Network`
new network of rank (ntwkA.nports + ntwkB.nports - 2*num)
See Also
-----------
connect_s : actual S-parameter connection algorithm.
innerconnect_s : actual S-parameter connection algorithm.
Notes
-------
the effect of mis-matched port impedances is handled by inserting
a 2-port 'mismatch' network between the two connected ports.
This mismatch Network is calculated with the
:func:`impedance_mismatch` function.
Examples
---------
To implement a *cascade* of two networks
>>> ntwkA = rf.Network('ntwkA.s2p')
>>> ntwkB = rf.Network('ntwkB.s2p')
>>> ntwkC = rf.connect(ntwkA, 1, ntwkB,0)
'''
# some checking
check_frequency_equal(ntwkA,ntwkB)
if (k+num-1> ntwkA.nports-1):
raise IndexError('Port `k` out of range')
if (l+num-1> ntwkB.nports-1):
raise IndexError('Port `l` out of range')
# create output Network, from copy of input
ntwkC = ntwkA.copy()
# if networks' z0's are not identical, then connect a impedance
# mismatch, which takes into account the effect of differing port
# impedances.
#import pdb;pdb.set_trace()
if assert_z0_at_ports_equal(ntwkA,k,ntwkB,l) == False:
ntwkC.s = connect_s(
ntwkA.s, k,
impedance_mismatch(ntwkA.z0[:,k], ntwkB.z0[:,l]), 0)
# the connect_s() put the mismatch's output port at the end of
# ntwkC's ports. Fix the new port's impedance, then insert it
# at position k where it belongs.
ntwkC.z0[:,k:] = npy.hstack((ntwkC.z0[:,k+1:], ntwkB.z0[:,[l]]))
ntwkC.renumber(from_ports= [ntwkC.nports-1] + list(range(k, ntwkC.nports-1)),
to_ports=list(range(k, ntwkC.nports)))
# call s-matrix connection function
ntwkC.s = connect_s(ntwkC.s,k,ntwkB.s,l)
# combine z0 arrays and remove ports which were `connected`
ntwkC.z0 = npy.hstack(
(npy.delete(ntwkA.z0, range(k,k+1), 1), npy.delete(ntwkB.z0, range(l,l+1), 1)))
# if we're connecting more than one port, call innerconnect recursively
# untill all connections are made to finish the job
if num>1:
ntwkC = innerconnect(ntwkC, k, ntwkA.nports-1+l, num-1)
# if ntwkB is a 2port, then keep port indices where you expect.
if ntwkB.nports == 2 and ntwkA.nports>2:
from_ports = list(range(ntwkC.nports))
to_ports = list(range(ntwkC.nports))
to_ports.pop(k);
to_ports.append(k)
ntwkC.renumber(from_ports=from_ports,
to_ports=to_ports)
return ntwkC
def connect_fast(ntwkA, k, ntwkB, l):
'''
Connect two n-port networks together (using C-implementation)
Specifically, connect ports `k` on `ntwkA` to ports
`l` thru on `ntwkB`. The resultant network has
(ntwkA.nports+ntwkB.nports-2) ports. The port indices ('k','l')
start from 0. Port impedances **are** taken into account.
Parameters
-----------
ntwkA : :class:`Network`
network 'A'
k : int
starting port index on `ntwkA` ( port indices start from 0 )
ntwkB : :class:`Network`
network 'B'
l : int
starting port index on `ntwkB`
Returns
---------
ntwkC : :class:`Network`
new network of rank (ntwkA.nports + ntwkB.nports - 2)
See Also
-----------
:mod:`skrf.src`
Notes
-------
the effect of mis-matched port impedances is handled by inserting
a 2-port 'mismatch' network between the two connected ports.
This mismatch Network is calculated with the
:func:`impedance_mismatch` function.
Examples
---------
To implement a *cascade* of two networks
>>> ntwkA = rf.Network('ntwkA.s2p')
>>> ntwkB = rf.Network('ntwkB.s2p')
>>> ntwkC = rf.connect(ntwkA, 1, ntwkB,0)
'''
num = 1
from src import connect_s_fast
# some checking
check_frequency_equal(ntwkA,ntwkB)
# create output Network, from copy of input
ntwkC = ntwkA.copy()
# if networks' z0's are not identical, then connect a impedance
# mismatch, which takes into account the effect of differing port
# impedances.
if assert_z0_at_ports_equal(ntwkA,k,ntwkB,l) == False:
ntwkC.s = connect_s(
ntwkA.s, k,
impedance_mismatch(ntwkA.z0[:,k], ntwkB.z0[:,l]), 0)
# the connect_s() put the mismatch's output port at the end of
# ntwkC's ports. Fix the new port's impedance, then insert it
# at position k where it belongs.
ntwkC.z0[:,k:] = npy.hstack((ntwkC.z0[:,k+1:], ntwkB.z0[:,[l]]))
ntwkC.renumber(from_ports= [ntwkC.nports-1] + range(k, ntwkC.nports-1),
to_ports=range(k, ntwkC.nports))
# call s-matrix connection function
ntwkC.s = connect_s_fast(ntwkC.s,k,ntwkB.s,l)
# combine z0 arrays and remove ports which were `connected`
ntwkC.z0 = npy.hstack(
(npy.delete(ntwkA.z0, range(k,k+num), 1), npy.delete(ntwkB.z0, range(l,l+num), 1)))
return ntwkC
def innerconnect(ntwkA, k, l, num=1):
'''
connect ports of a single n-port network.
this results in a (n-2)-port network. remember port indices start
from 0.
Parameters
-----------
ntwkA : :class:`Network`
network 'A'
k,l : int
starting port indices on ntwkA ( port indices start from 0 )
num : int
number of consecutive ports to connect
Returns
---------
ntwkC : :class:`Network`
new network of rank (ntwkA.nports - 2*num)
See Also
-----------
connect_s : actual S-parameter connection algorithm.
innerconnect_s : actual S-parameter connection algorithm.
Notes
-------
a 2-port 'mismatch' network is inserted between the connected ports
if their impedances are not equal.
Examples
---------
To connect ports '0' and port '1' on ntwkA
>>> ntwkA = rf.Network('ntwkA.s3p')
>>> ntwkC = rf.innerconnect(ntwkA, 0,1)
'''
if (k+num-1> ntwkA.nports-1):
raise IndexError('Port `k` out of range')
if (l+num-1> ntwkA.nports-1):
raise IndexError('Port `l` out of range')
# create output Network, from copy of input
ntwkC = ntwkA.copy()
if not (ntwkA.z0[:,k] == ntwkA.z0[:,l]).all():
# connect a impedance mismatch, which will takes into account the
# effect of differing port impedances
mismatch = impedance_mismatch(ntwkA.z0[:,k], ntwkA.z0[:,l])
ntwkC.s = connect_s( ntwkA.s,k, mismatch, 0)
#print 'mismatch %i-%i'%(k,l)
# the connect_s() put the mismatch's output port at the end of
# ntwkC's ports. Fix the new port's impedance, then insert it
# at position k where it belongs.
ntwkC.z0[:,k:] = npy.hstack((ntwkC.z0[:,k+1:], ntwkC.z0[:,[l]]))
ntwkC.renumber(from_ports= [ntwkC.nports-1] + list(range(k, ntwkC.nports-1)),
to_ports=list(range(k, ntwkC.nports)))
# call s-matrix connection function
ntwkC.s = innerconnect_s(ntwkC.s,k,l)
# update the characteristic impedance matrix
ntwkC.z0 = npy.delete(ntwkC.z0, list(range(k,k+1)) + list(range(l,l+1)),1)
# recur if we're connecting more than one port
if num>1:
ntwkC = innerconnect(ntwkC, k, l-1, num-1)
return ntwkC
def cascade(ntwkA,ntwkB):
'''
Cascade two 2-port Networks together
Connects port 1 of `ntwkA` to port 0 of `ntwkB`. This calls
`connect(ntwkA,1, ntwkB,0)`, which is a more general function.
Parameters
-----------
ntwkA : :class:`Network`
network `ntwkA`
ntwkB : Network
network `ntwkB`
Returns
--------
C : Network
the resultant network of ntwkA cascaded with ntwkB
See Also
---------
connect : connects two Networks together at arbitrary ports.
'''
return connect(ntwkA,1, ntwkB,0)
def cascade_list(l):
'''
cascade a list of 2-port networks
all networks must have same frequency
Parameters
--------------
l : list-like
(ordered) list of networks
Returns
----------
out : 2-port Network
the results of casacading all networks in the list `l`
'''
return reduce(cascade, l)
def de_embed(ntwkA,ntwkB):
'''
De-embed `ntwkA` from `ntwkB`.
This calls `ntwkA.inv ** ntwkB`. The syntax of cascading an inverse
is more explicit, it is recomended that it be used instead of this
function.
Parameters
-----------
ntwkA : :class:`Network`
network `ntwkA`
ntwkB : :class:`Network`
network `ntwkB`
Returns
--------
C : Network
the resultant network of ntwkB de-embeded from ntwkA
See Also
---------
connect : connects two Networks together at arbitrary ports.
'''
return ntwkA.inv ** ntwkB
def stitch(ntwkA, ntwkB, **kwargs):
'''
Stitches ntwkA and ntwkB together.
Concatenates two networks' data. Given two networks that cover
different frequency bands this can be used to combine their data
into a single network.
Parameters
------------
ntwkA, ntwkB : :class:`Network` objects
Networks to stitch together
\*\*kwargs : keyword args
passed to :class:`Network` constructor, for output network
Returns
---------
ntwkC : :class:`Network`
result of stitching the networks `ntwkA` and `ntwkB` together
Examples
----------
>>> from skrf.data import wr2p2_line, wr1p5_line
>>> rf.stitch(wr2p2_line, wr1p5_line)
2-Port Network: 'wr2p2,line', 330-750 GHz, 402 pts, z0=[ 50.+0.j 50.+0.j]
'''
A,B = ntwkA, ntwkB
C = Network(
frequency = Frequency.from_f(npy.r_[A.f[:],B.f[:]], unit='hz'),
s = npy.r_[A.s,B.s],
z0 = npy.r_[A.z0, B.z0],
name = A.name,
**kwargs
)
C.frequency.unit = A.frequency.unit
return C
def overlap(ntwkA, ntwkB):
'''
Returns the overlapping parts of two Networks, interpolating if needed.
If frequency vectors for each ntwk dont perfectly overlap, then
ntwkB is interpolated so that the resultant networks have identical
frequencies.
Parameters
------------
ntwkA : :class:`Network`
a ntwk which overlaps `ntwkB`. (the `dominant` network)
ntwkB : :class:`Network`
a ntwk which overlaps `ntwkA`.
Returns
-----------
ntwkA_new : :class:`Network`
part of `ntwkA` that overlapped `ntwkB`
ntwkB_new : :class:`Network`
part of `ntwkB` that overlapped `ntwkA`, possibly interpolated
See Also
------------
:func:`skrf.frequency.overlap_freq`
'''
new_freq = ntwkA.frequency.overlap(ntwkB.frequency)
return ntwkA.interpolate(new_freq),ntwkB.interpolate(new_freq)
def average(list_of_networks, polar = False):
'''
Calculates the average network from a list of Networks.
This is complex average of the s-parameters for a list of Networks.
Parameters
-----------
list_of_networks : list of :class:`Network` objects
the list of networks to average
Returns
---------
ntwk : :class:`Network`
the resultant averaged Network
Notes
------
This same function can be accomplished with properties of a
:class:`~skrf.networkset.NetworkSet` class.
Examples
---------
>>> ntwk_list = [rf.Network('myntwk.s1p'), rf.Network('myntwk2.s1p')]
>>> mean_ntwk = rf.average(ntwk_list)
'''
out_ntwk = list_of_networks[0].copy()
if polar:
# average the mag/phase components individually
raise NotImplementedError
else:
# average the re/im components individually
for a_ntwk in list_of_networks[1:]:
out_ntwk += a_ntwk
out_ntwk.s = out_ntwk.s/(len(list_of_networks))
return out_ntwk
def one_port_2_two_port(ntwk):
'''
calculates the two-port network given a symmetric, reciprocal and
lossless one-port network.
takes:
ntwk: a symmetric, reciprocal and lossless one-port network.
returns:
ntwk: the resultant two-port Network
'''
result = ntwk.copy()
result.s = npy.zeros((result.frequency.npoints,2,2), dtype=complex)
s11 = ntwk.s[:,0,0]
result.s[:,0,0] = s11
result.s[:,1,1] = s11
## HACK: TODO: verify this mathematically
result.s[:,0,1] = npy.sqrt(1- npy.abs(s11)**2)*\
npy.exp(1j*(npy.angle(s11)+npy.pi/2.*(npy.angle(s11)<0) -npy.pi/2*(npy.angle(s11)>0)))
result.s[:,1,0] = result.s[:,0,1]
return result
def chopinhalf(ntwk, *args, **kwargs):
'''
Chops a sandwich of identical,recicprocal 2-ports in half.
Given two identical, reciprocal 2-ports measured in series,
this returns one.
Notes
--------
In other words, given
.. math::
B = A\\cdot\\cdotA
Return A, where A port2 is connected to A port1. The result may
be found through signal flow graph analysis and is,
.. math::
a_{11} = \frac{b_{11}}{1+b_{12}}
a_{22} = \frac{b_{22}}{1+b_{12}}
a_{12}^2 = b_{21}(1-\frac{b_{11}b_{22}}{(1+b_{12})^2}
Parameters
------------
ntwk : :class:`Network`
a 2-port that is equal to two identical two-ports in cascade
'''
if ntwk.nports != 2:
raise ValueError('Only valid on 2ports')
b11,b22,b12 = ntwk.s11,ntwk.s22,ntwk.s12
kwargs['name'] = kwargs.get('name', ntwk.name)
a11 = b11/(1+b12)
a22 = b22/(1+b12)
a21 = b12*(1-b11*b22/(1+b12)**2) # this is a21^2 here
a21.s = mf.sqrt_phase_unwrap(a21.s)
A = n_oneports_2_nport([a11,a21,a21,a22], *args, **kwargs)
return A
## Building composit networks from sub-networks
def n_oneports_2_nport(ntwk_list, *args, **kwargs):
'''
Builds a N-port Network from list of N one-ports
Parameters
-----------
ntwk_list : list of :class:`Network` objects
must follow left-right, top-bottom order, ie, s11,s12,s21,s22
\*args, \*\*kwargs :
passed to :func:`Network.__init__` for the N-port
Returns
----------
nport : n-port :class:`Network`
result
'''
nports = int(npy.sqrt(len(ntwk_list)))
s_out = npy.concatenate(
[npy.concatenate(
[ntwk_list[(k+(l*nports))].s for k in range(nports)],2)\
for l in range(nports)],1)
z0 = npy.concatenate(
[ntwk_list[k].z0 for k in range(0,nports**2,nports+1)],1)
frequency = ntwk_list[0].frequency
return Network(s=s_out, z0=z0, frequency=frequency, *args, **kwargs)
def n_twoports_2_nport(ntwk_list,nports, offby=1, **kwargs):
'''
Builds a N-port Network from list of two-ports
By default all entries of result.s are filled with 0's, in case you
dont fully specify the entire s-matrix of the resultant ntwk.
Parameters
-----------
ntwk_list : list of :class:`Network` objects
the names must contain the port index, ie 'p12' or 'p43'
offby : int
starting value for s-parameters idecies. ie a value of `1`,
assumes that a s21 = ntwk.s[:,1,0]
\*args, \*\*kwargs :
passed to :func:`Network.__init__` for the N-port
Returns
----------
nport : n-port :class:`Network`
result
'''
frequency = ntwk_list[0].frequency
nport = Network(frequency = frequency ,
s=npy.zeros(shape=(frequency.npoints,nports,nports)),
**kwargs)
for subntwk in ntwk_list:
for m,n in nport.port_tuples:
if m!=n and m>n:
if '%i%i'%(m+offby,n+offby) in subntwk.name:
pass
elif '%i%i'%(n+offby,m+offby) in subntwk.name:
subntwk = subntwk.flipped()
else:
continue
for mn,jk in zip(product((m,n), repeat=2),product((0,1), repeat=2)):
m,n,j,k = mn[0],mn[1],jk[0],jk[1]
nport.s[:,m,n] = subntwk.s[:,j,k]
nport.z0[:,m] = subntwk.z0[:,j]
return nport
def four_oneports_2_twoport(s11,s12,s21,s22, *args, **kwargs):
'''
Builds a 2-port Network from list of four 1-ports
Parameters
-----------
s11 : one-port :class:`Network`
s11
s12 : one-port :class:`Network`
s12
s21 : one-port :class:`Network`
s21
s22 : one-port :class:`Network`
s22
\*args, \*\*kwargs :
passed to :func:`Network.__init__` for the twoport
Returns
----------
twoport : two-port :class:`Network`
result
See Also
-----------
n_oneports_2_nport
three_twoports_2_threeport
'''
return n_oneports_2_nport([s11,s12,s21,s22], *args, **kwargs)
def three_twoports_2_threeport(ntwk_triplet, auto_order = True, *args,
**kwargs):
'''
Creates 3-port from three 2-port Networks
This function provides a convenient way to build a 3-port Network
from a set of 2-port measurements. Which may occur when measuring
a three port device on a 2-port VNA.
Notes
---------
if `auto_order` is False, ntwk_triplet must be of port orderings:
[p12, p13, p23]
else if `auto_order`is True, then the 3 Networks in ntwk_triplet must
contain port identification in their names.
For example, their names may be like `me12`, `me13`, `me23`
Parameters
--------------
ntwk_triplet : list of 2-port Network objects
list of three 2-ports. see notes about order.
auto_order : bool
if True attempt to inspect port orderings from Network names.
Names must be like 'p12', 'p23', etc
contains : str
only files containing this string will be loaded.
\*args,\*\*kwargs :
passed to :func:`Network.__init__` for resultant network
Returns
------------
threeport : 3-port Network
See Also
-----------
n_oneports_2_nport
Examples
-----------
>>> rf.three_twoports_2_threeport(rf.read_all('.').values())
'''
raise DeprecationWarning('Use n_twoports_2_nport instead')
if auto_order:
p12,p13,p23 = None,None,None
s11,s12,s13,s21,s22,s23,s31,s32,s33 = None,None,None,None,None,None,None,None,None
for k in ntwk_triplet:
if '12' in k.name:
p12 = k
elif '13' in k.name:
p13 = k
elif '23' in k.name:
p23 = k
elif '21' in k.name:
p12 = k.flipped()
elif '31' in k.name:
p31 = k.flipped()
elif '32' in k.name:
p23 = k.flipped()
else:
p12,p13,p23 = ntwk_triplet
p21= p12.flipped()
p31= p13.flipped()
p32= p23.flipped()
if p12 != None:
s11 = p12.s11
s12 = p12.s12
s21 = p12.s21
s22 = p12.s22
if p13 != None:
s11 = p13.s11
s13 = p13.s12
s31 = p13.s21
s33 = p13.s22
if p23 != None:
s22 = p23.s11
s23 = p23.s12
s32 = p23.s21
s33 = p23.s22
ntwk_list = [s11,s12,s13,s21,s22,s23,s31,s32,s33]
for k in range(len(ntwk_list)):
if ntwk_list[k] == None:
frequency = ntwk_triplet[0].frequency
s = npy.zeros((len(ntwk_triplet[0]),1,1))
ntwk_list[k] = Network(s=s, frequency=frequency)
threeport = n_oneports_2_nport( ntwk_list, *args, **kwargs)
return threeport
## Functions operating on s-parameter matrices
def connect_s(A,k,B,l):
'''
connect two n-port networks' s-matricies together.
specifically, connect port `k` on network `A` to port `l` on network
`B`. The resultant network has nports = (A.rank + B.rank-2). This
function operates on, and returns s-matricies. The function
:func:`connect` operates on :class:`Network` types.
Parameters
-----------
A : :class:`numpy.ndarray`
S-parameter matrix of `A`, shape is fxnxn
k : int
port index on `A` (port indices start from 0)
B : :class:`numpy.ndarray`
S-parameter matrix of `B`, shape is fxnxn
l : int
port index on `B`
Returns
-------
C : :class:`numpy.ndarray`
new S-parameter matrix
Notes
-------
internally, this function creates a larger composite network
and calls the :func:`innerconnect_s` function. see that function for more
details about the implementation
See Also
--------
connect : operates on :class:`Network` types
innerconnect_s : function which implements the connection
connection algorithm
'''
if k > A.shape[-1]-1 or l > B.shape[-1] - 1:
raise(ValueError('port indices are out of range'))
nf = A.shape[0] # num frequency points
nA = A.shape[1] # num ports on A
nB = B.shape[1] # num ports on B
nC = nA + nB # num ports on C
#create composite matrix, appending each sub-matrix diagonally
C = npy.zeros((nf, nC, nC), dtype='complex')
C[:, :nA, :nA] = A.copy()
C[:, nA:, nA:] = B.copy()
# call innerconnect_s() on composit matrix C
return innerconnect_s(C, k, nA + l)
def innerconnect_s(A, k, l):
'''
connect two ports of a single n-port network's s-matrix.
Specifically, connect port `k` to port `l` on `A`. This results in
a (n-2)-port network. This function operates on, and returns
s-matrices. The function :func:`innerconnect` operates on
:class:`Network` types.
Parameters
-----------
A : :class:`numpy.ndarray`
S-parameter matrix of `A`, shape is fxnxn
k : int
port index on `A` (port indices start from 0)
l : int
port index on `A`
Returns
-------
C : :class:`numpy.ndarray`
new S-parameter matrix
Notes
-----
The algorithm used to calculate the resultant network is called a
'sub-network growth', can be found in [#]_. The original paper
describing the algorithm is given in [#]_.
References
----------
.. [#] Compton, R.C.; , "Perspectives in microwave circuit analysis," Circuits and Systems, 1989., Proceedings of the 32nd Midwest Symposium on , vol., no., pp.716-718 vol.2, 14-16 Aug 1989. URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=101955&isnumber=3167
.. [#] Filipsson, Gunnar; , "A New General Computer Algorithm for S-Matrix Calculation of Interconnected Multiports," Microwave Conference, 1981. 11th European , vol., no., pp.700-704, 7-11 Sept. 1981. URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4131699&isnumber=4131585
'''
if k > A.shape[-1] - 1 or l > A.shape[-1] - 1:
raise(ValueError('port indices are out of range'))
nA = A.shape[1] # num of ports on input s-matrix
# create an empty s-matrix, to store the result
C = npy.zeros(shape=A.shape, dtype='complex')
# loop through ports and calulates resultant s-parameters
for i in range(nA):
for j in range(nA):
C[:,i,j] = \
A[:,i,j] + \
( A[:,k,j] * A[:,i,l] * (1 - A[:,l,k]) + \
A[:,l,j] * A[:,i,k] * (1 - A[:,k,l]) +\
A[:,k,j] * A[:,l,l] * A[:,i,k] + \
A[:,l,j] * A[:,k,k] * A[:,i,l])/\
((1 - A[:,k,l]) * (1 - A[:,l,k]) - A[:,k,k] * A[:,l,l])
# remove ports that were `connected`
C = npy.delete(C, (k,l), 1)
C = npy.delete(C, (k,l), 2)
return C
## network parameter conversion
def s2z(s,z0=50):
'''
Convert scattering parameters [1]_ to impedance parameters [2]_
.. math::
z = \\sqrt {z_0} \\cdot (I + s) (I - s)^{-1} \\cdot \\sqrt{z_0}
Parameters
------------
s : complex array-like
scattering parameters
z0 : complex array-like or number
port impedances.
Returns
---------
z : complex array-like
impedance parameters
References
----------
.. [1] http://en.wikipedia.org/wiki/S-parameters
.. [2] http://en.wikipedia.org/wiki/impedance_parameters
'''
nfreqs, nports, nports = s.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
z = npy.zeros(s.shape, dtype='complex')
I = npy.mat(npy.identity(s.shape[1]))
s = s.copy() # to prevent the original array from being altered
s[s==1.] = 1. + 1e-12 # solve numerical singularity
s[s==-1.] = -1. + 1e-12 # solve numerical singularity
for fidx in xrange(s.shape[0]):
sqrtz0 = npy.mat(npy.sqrt(npy.diagflat(z0[fidx])))
z[fidx] = sqrtz0 * (I-s[fidx])**-1 * (I+s[fidx]) * sqrtz0
return z
def s2y(s,z0=50):
'''
convert scattering parameters [#]_ to admittance parameters [#]_
.. math::
y = \\sqrt {y_0} \\cdot (I - s)(I + s)^{-1} \\cdot \\sqrt{y_0}
Parameters
------------
s : complex array-like
scattering parameters
z0 : complex array-like or number
port impedances
Returns
---------
y : complex array-like
admittance parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/S-parameters
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
'''
nfreqs, nports, nports = s.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
y = npy.zeros(s.shape, dtype='complex')
I = npy.mat(npy.identity(s.shape[1]))
s = s.copy() # to prevent the original array from being altered
s[s==-1.] = -1. + 1e-12 # solve numerical singularity
s[s==1.] = 1. + 1e-12 # solve numerical singularity
for fidx in xrange(s.shape[0]):
sqrty0 = npy.mat(npy.sqrt(npy.diagflat(1.0/z0[fidx])))
y[fidx] = sqrty0*(I-s[fidx])*(I+s[fidx])**-1*sqrty0
return y
def s2t(s):
'''
Converts scattering parameters [#]_ to scattering transfer parameters [#]_ .
transfer parameters are also refered to as
'wave cascading matrix', this function only operates on 2-port
networks.
Parameters
-----------
s : :class:`numpy.ndarray` (shape fx2x2)
scattering parameter matrix
Returns
-------
t : numpy.ndarray
scattering transfer parameters (aka wave cascading matrix)
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] http://en.wikipedia.org/wiki/S-parameters
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
'''
#TODO: check rank(s) ==2
t = npy.array([
[-1*(s[:,0,0]*s[:,1,1]- s[:,1,0]*s[:,0,1])/s[:,1,0],
-s[:,1,1]/s[:,1,0]],
[s[:,0,0]/s[:,1,0],
1./s[:,1,0] ]
]).transpose()
return t
def z2s(z, z0=50):
'''
convert impedance parameters [1]_ to scattering parameters [2]_
.. math::
s = (\\sqrt{y_0} \\cdot z \\cdot \\sqrt{y_0} - I)(\\sqrt{y_0} \\cdot z \\cdot\\sqrt{y_0} + I)^{-1}
Parameters
------------
z : complex array-like
impedance parameters
z0 : complex array-like or number
port impedances
Returns
---------
s : complex array-like
scattering parameters
References
----------
.. [1] http://en.wikipedia.org/wiki/impedance_parameters
.. [2] http://en.wikipedia.org/wiki/S-parameters
'''
nfreqs, nports, nports = z.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
s = npy.zeros(z.shape, dtype='complex')
I = npy.mat(npy.identity(z.shape[1]))
for fidx in xrange(z.shape[0]):
sqrty0 = npy.mat(npy.sqrt(npy.diagflat(1.0/z0[fidx])))
s[fidx] = (sqrty0*z[fidx]*sqrty0 - I) * (sqrty0*z[fidx]*sqrty0 + I)**-1
return s
def z2y(z):
'''
convert impedance parameters [#]_ to admittance parameters [#]_
.. math::
y = z^{-1}
Parameters
------------
z : complex array-like
impedance parameters
Returns
---------
y : complex array-like
admittance parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
'''
return npy.array([npy.mat(z[f,:,:])**-1 for f in xrange(z.shape[0])])
def z2t(z):
'''
Not Implemented yet
convert impedance parameters [#]_ to scattering transfer parameters [#]_
Parameters
------------
z : complex array-like or number
impedance parameters
Returns
---------
s : complex array-like or number
scattering parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
'''
raise (NotImplementedError)
def z2a(z):
'''
Converts impedance parameters to abcd parameters [#]_ .
Parameters
-----------
z : :class:`numpy.ndarray` (shape fx2x2)
impedance parameter matrix
Returns
-------
abcd : numpy.ndarray
scattering transfer parameters (aka wave cascading matrix)
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] https://en.wikipedia.org/wiki/Two-port_network
'''
abcd = npy.array([
[z[:,0,0]/z[:,1,0],
1./z[:,1,0]],
[(z[:,0,0]*z[:,1,1]- z[:,1,0]*z[:,0,1])/z[:,1,0],
z[:,1,1]/z[:,1,0]],
]).transpose()
return abcd
def s2a(s,z0):
'''
Converts scattering parameters to abcd parameters [#]_ .
Parameters
-----------
s : :class:`numpy.ndarray` (shape fx2x2)
impedance parameter matrix
z0: number or, :class:`numpy.ndarray` (shape fx2)
port impedance
Returns
-------
abcd : numpy.ndarray
scattering transfer parameters (aka wave cascading matrix)
'''
return z2a(s2z(s,z0))
def y2s(y, z0=50):
'''
convert admittance parameters [#]_ to scattering parameters [#]_
.. math::
s = (I - \\sqrt{z_0} \\cdot y \\cdot \\sqrt{z_0})(I + \\sqrt{z_0} \\cdot y \\cdot \\sqrt{z_0})^{-1}
Parameters
------------
y : complex array-like
admittance parameters
z0 : complex array-like or number
port impedances
Returns
---------
s : complex array-like or number
scattering parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
.. [#] http://en.wikipedia.org/wiki/S-parameters
'''
nfreqs, nports, nports = y.shape
z0 = fix_z0_shape(z0, nfreqs, nports)
s = npy.zeros(y.shape, dtype='complex')
I = npy.mat(npy.identity(s.shape[1]))
for fidx in xrange(s.shape[0]):
sqrtz0 = npy.mat(npy.sqrt(npy.diagflat(z0[fidx])))
s[fidx] = (I - sqrtz0*y[fidx]*sqrtz0) * (I + sqrtz0*y[fidx]*sqrtz0)**-1
return s
def y2z(y):
'''
convert admittance parameters [#]_ to impedance parameters [#]_
.. math::
z = y^{-1}
Parameters
------------
y : complex array-like
admittance parameters
Returns
---------
z : complex array-like
impedance parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
'''
return npy.array([npy.mat(y[f,:,:])**-1 for f in xrange(y.shape[0])])
def y2t(y):
'''
Not Implemented Yet
convert admittance parameters [#]_ to scattering-transfer parameters [#]_
Parameters
------------
y : complex array-like or number
impedance parameters
Returns
---------
t : complex array-like or number
scattering parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Admittance_parameters
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
'''
raise (NotImplementedError)
def t2s(t):
'''
converts scattering transfer parameters [#]_ to scattering parameters [#]_
transfer parameters are also referred to as
'wave cascading matrix', this function only operates on 2-port
networks. this function only operates on 2-port scattering
parameters.
Parameters
-----------
t : :class:`numpy.ndarray` (shape fx2x2)
scattering transfer parameters
Returns
-------
s : :class:`numpy.ndarray`
scattering parameter matrix.
See Also
---------
inv : calculates inverse s-parameters
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
-----------
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
.. [#] http://en.wikipedia.org/wiki/S-parameters
'''
#TODO: check rank(s) ==2
s = npy.array([
[t[:,0,1]/t[:,1,1],
1/t[:,1,1]],
[(t[:,0,0]*t[:,1,1]- t[:,1,0]*t[:,0,1])/t[:,1,1],
-1*t[:,1,0]/t[:,1,1] ]
]).transpose()
return s
def t2z(t):
'''
Not Implemented Yet
Convert scattering transfer parameters [#]_ to impedance parameters [#]_
Parameters
------------
t : complex array-like or number
impedance parameters
Returns
---------
z : complex array-like or number
scattering parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
.. [#] http://en.wikipedia.org/wiki/impedance_parameters
'''
raise (NotImplementedError)
def t2y(t):
'''
Not Implemented Yet
Convert scattering transfer parameters to admittance parameters [#]_
Parameters
------------
t : complex array-like or number
t-parameters
Returns
---------
y : complex array-like or number
admittance parameters
See Also
----------
s2z
s2y
s2t
z2s
z2y
z2t
y2s
y2z
y2z
t2s
t2z
t2y
Network.s
Network.y
Network.z
Network.t
References
----------
.. [#] http://en.wikipedia.org/wiki/Scattering_transfer_parameters#Scattering_transfer_parameters
'''
raise (NotImplementedError)
## these methods are used in the secondary properties
def passivity(s):
'''
Passivity metric for a multi-port network.
A metric which is proportional to the amount of power lost in a
multiport network, depending on the excitation port. Specifically,
this returns a matrix who's diagonals are equal to the total
power received at all ports, normalized to the power at a single
excitement port.
mathmatically, this is a test for unitary-ness of the
s-parameter matrix [#]_.
for two port this is
.. math::
\sqrt( |S_{11}|^2 + |S_{21}|^2 \, , \, |S_{22}|^2+|S_{12}|^2)
in general it is
.. math::
\\sqrt( S^H \\cdot S)
where :math:`H` is conjugate transpose of S, and :math:`\\cdot`
is dot product.
Notes
---------
The total amount of power disipated in a network depends on the
port matches. For example, given a matched attenuator, this metric
will yield the attenuation value. However, if the attenuator is
cascaded with a mismatch, the power disipated will not be equivalent
to the attenuator value, nor equal for each excitation port.
Returns
---------
passivity : :class:`numpy.ndarray` of shape fxnxn
References
------------
.. [#] http://en.wikipedia.org/wiki/Scattering_parameters#Lossless_networks
'''
if s.shape[-1] == 1:
raise (ValueError('Doesn\'t exist for one ports'))
pas_mat = s.copy()
for f in range(len(s)):
pas_mat[f,:,:] = npy.sqrt(npy.dot(s[f,:,:].conj().T, s[f,:,:]))
return pas_mat
def reciprocity(s):
'''
Reciprocity metric for a multi-port network.
This returns the magnitude of the difference between the
s-parameter matrix and its transpose.
for two port this is
.. math::
| S - S^T |
where :math:`T` is transpose of S
Returns
---------
reciprocity : :class:`numpy.ndarray` of shape fxnxn
'''
if s.shape[-1] == 1:
raise (ValueError('Doesn\'t exist for one ports'))
rec_mat = s.copy()
for f in range(len(s)):
rec_mat[f,:,:] = abs(s[f,:,:]- s[f,:,:].T)
return rec_mat
## renormalize
def renormalize_s(s, z_old, z_new):
'''
Renormalize a s-parameter matrix given old and new port impedances
In the Parameters descriptions, F,N,N = shape(s).
Notes
------
This re-normalization assumes psuedo-wave formulation. The
function :func:`renormalize_s_pw` implementes the power-wave
formulation. However, the two implementation are only different
for complex characteristic impedances.
See the [1]_ and [2]_ for more details.
Parameters
---------------
s : complex array of shape FxNxN
s-parameter matrix
z_old : complex array of shape FxN, F, N or a scalar
old (original) port impedances
z_new : complex array of shape FxN, F, N or a scalar
new port impedances
Notes
------
The impedance renormalization. This just calls ::
z2s(s2z(s,z0 = z_old), z0 = z_new)
However, you can see ref [1]_ or [2]_ for some theoretical background.
See Also
--------
renormalize_s_pw : renormalize using power wave formulation
Network.renormalize : method of Network to renormalize s
fix_z0_shape
ssz
z2s
References
-------------
.. [1] R. B. Marks and D. F. Williams, "A general waveguide circuit theory," Journal of Research of the National Institute of Standards and Technology, vol. 97, no. 5, pp. 533-561, 1992.
.. [2] http://www.anritsu.com/en-gb/downloads/application-notes/application-note/dwl1334.aspx
Examples
------------
>>> s = zeros(shape=(101,2,2))
>>> renormalize_s(s, 50,25)
'''
# thats a heck of a one-liner!
return z2s(s2z(s, z0=z_old), z0=z_new)
def renormalize_s_pw(s, z_old, z_new):
'''
Renormalize a s-parameter matrix given old and new port impedances
by the power wave renormalization
In the Parameters descriptions, F,N,N = shape(s).
Parameters
---------------
s : complex array of shape FxNxN
s-parameter matrix
z_old : complex array of shape FxN, F, N or a scalar
old (original) port impedances
z_new : complex array of shape FxN, F, N or a scalar
new port impedances
Notes
------
This re-normalization assumes psuedo-wave formulation. The
function :func:`renormalize_s_pw` implementes the power-wave
formulation. However, the two implementation are only different
for complex characteristic impedances.
See the [1]_ and [2]_ for more details.
References
-------------
.. [1] http://www.anritsu.com/en-gb/downloads/application-notes/application-note/dwl1334.aspx
power-wave Eq 10,11,12 in page 10
See Also
----------
renormalize_s : renormalize using psuedo wave formulation
Network.renormalize : method of Network to renormalize s
fix_z0_shape
fix_z0_shape
ssz
z2s
Examples
------------
>>> z_old = 50.+0.j # original reference impedance
>>> z_new = 50.+50.j # new reference impedance to change to
>>> load = rf.wr10.load(0.+0.j, nports=1, z0=z_old)
>>> s = load.s
>>> renormalize_s_powerwave(s, z_old, z_new)
'''
nfreqs, nports, nports = s.shape
A = fix_z0_shape(z_old, nfreqs, nports)
B = fix_z0_shape(z_new, nfreqs, nports)
S_pw = npy.zeros(s.shape, dtype='complex')
I = npy.mat(npy.identity(s.shape[1]))
s = s.copy() # to prevent the original array from being altered
s[s==1.] = 1. + 1e-12 # solve numerical singularity
s[s==-1.] = -1. + 1e-12 # solve numerical singularity
# make sure real part of impedance is not zero
A[A.real==0] = 1e-12 + 1.j*A.imag[A.real<=0]
B[B.real==0] = 1e-12 + 1.j*B.imag[B.real<=0]
for fidx in xrange(s.shape[0]):
A_ii = A[fidx]
B_ii = B[fidx]
# Eq. 11, Eq. 12
Q_ii = npy.sqrt(npy.absolute(B_ii.real/A_ii.real)) * (A_ii + A_ii.conj()) / (B_ii.conj() + A_ii) # Eq(11)
G_ii = (B_ii - A_ii) / (B_ii + A_ii.conj()) # Eq(12)
Q = npy.mat(npy.diagflat(Q_ii))
G = npy.mat(npy.diagflat(G_ii))
S = s[fidx]
# Eq. 10
S_pw[fidx] = Q**-1 * (S - G.conj().T) * (I - G*S)**-1 * Q.conj().T
return S_pw
def fix_z0_shape( z0, nfreqs, nports):
'''
Make a port impedance of correct shape for a given network's matrix
This attempts to broadcast z0 to satisy
npy.shape(z0) == (nfreqs,nports)
Parameters
--------------
z0 : number, array-like
z0 can be:
* a number (same at all ports and frequencies)
* an array-like of length == number ports.
* an array-like of length == number frequency points.
* the correct shape ==(nfreqs,nports)
nfreqs : int
number of frequency points
nportrs : int
number of ports
Returns
----------
z0 : array of shape ==(nfreqs,nports)
z0 with the right shape for a nport Network
Examples
----------
For a two-port network with 201 frequency points, possible uses may
be
>>> z0 = rf.fix_z0_shape(50 , 201,2)
>>> z0 = rf.fix_z0_shape([50,25] , 201,2)
>>> z0 = rf.fix_z0_shape(range(201) , 201,2)
'''
if npy.shape(z0) == (nfreqs, nports):
# z0 is of correct shape. super duper.return it quick.
return z0.copy()
elif npy.isscalar(z0):
# z0 is a single number
return npy.array(nfreqs*[nports * [z0]])
elif len(z0) == nports:
# assume z0 is a list of impedances for each port,
# but constant with frequency
return npy.array(nfreqs*[z0])
elif len(z0) == nfreqs:
# assume z0 is a list of impedances for each frequency,
# but constant with respect to ports
return npy.array(nports * [z0]).T
else:
raise IndexError('z0 is not an acceptable shape')
## cascading assistance functions
def inv(s):
'''
Calculates 'inverse' s-parameter matrix, used for de-embedding
This is not literally the inverse of the s-parameter matrix. Instead, it
is defined such that the inverse of the s-matrix cascaded
with itself is unity.
.. math::
inv(s) = t2s({s2t(s)}^{-1})
where :math:`x^{-1}` is the matrix inverse. In words, this
is the inverse of the scattering transfer parameters matrix
transformed into a scattering parameters matrix.
Parameters
-----------
s : :class:`numpy.ndarray` (shape fx2x2)
scattering parameter matrix.
Returns
-------
s' : :class:`numpy.ndarray`
inverse scattering parameter matrix.
See Also
---------
t2s : converts scattering transfer parameters to scattering parameters
s2t : converts scattering parameters to scattering transfer parameters
'''
# this idea is from lihan
i = s2t(s)
for f in range(len(i)):
i[f,:,:] = npy.linalg.inv(i[f,:,:]) # could also be written as
# npy.mat(i[f,:,:])**-1 -- Trey
i = t2s(i)
return i
def flip(a):
'''
invert the ports of a networks s-matrix, 'flipping' it over
Parameters
-----------
a : :class:`numpy.ndarray`
scattering parameter matrix. shape should be should be 2x2, or
fx2x2
Returns
-------
a' : :class:`numpy.ndarray`
flipped scattering parameter matrix, ie interchange of port 0
and port 1
Note
-----
only works for 2-ports at the moment
'''
c = a.copy()
if len (a.shape) > 2 :
for f in range(a.shape[0]):
c[f,:,:] = flip(a[f,:,:])
elif a.shape == (2,2):
c[0,0] = a[1,1]
c[1,1] = a[0,0]
c[0,1] = a[1,0]
c[1,0] = a[0,1]
else:
raise IndexError('matrices should be 2x2, or kx2x2')
return c
## COMMON CHECKS (raise exceptions)
def check_frequency_equal(ntwkA, ntwkB):
'''
checks if two Networks have same frequency
'''
if assert_frequency_equal(ntwkA,ntwkB) == False:
raise IndexError('Networks dont have matching frequency. See `Network.interpolate`')
def check_z0_equal(ntwkA,ntwkB):
'''
checks if two Networks have same port impedances
'''
#note you should check frequency equal before you call this
if assert_z0_equal(ntwkA,ntwkB) == False:
raise ValueError('Networks dont have matching z0.')
def check_nports_equal(ntwkA,ntwkB):
'''
checks if two Networks have same number of ports
'''
if assert_nports_equal(ntwkA,ntwkB) == False:
raise ValueError('Networks dont have matching number of ports.')
## TESTs (return [usually boolean] values)
def assert_frequency_equal(ntwkA, ntwkB):
'''
'''
return (ntwkA.frequency == ntwkB.frequency)
def assert_z0_equal(ntwkA,ntwkB):
'''
'''
return (ntwkA.z0 == ntwkB.z0).all()
def assert_z0_at_ports_equal(ntwkA,k,ntwkB,l):
'''
'''
return (ntwkA.z0[:,k] == ntwkB.z0[:,l]).all()
def assert_nports_equal(ntwkA,ntwkB):
'''
'''
return (ntwkA.number_of_ports == ntwkB.number_of_ports)
## Other
# dont belong here, but i needed them quickly
# this is needed for port impedance mismatches
def impedance_mismatch(z1, z2):
'''
creates a two-port s-matrix for a impedance mis-match
Parameters
-----------
z1 : number or array-like
complex impedance of port 1
z2 : number or array-like
complex impedance of port 2
Returns
---------
s' : 2-port s-matrix for the impedance mis-match
'''
gamma = zl_2_Gamma0(z1,z2)
result = npy.zeros(shape=(len(gamma),2,2), dtype='complex')
result[:,0,0] = gamma
result[:,1,1] = -gamma
result[:,1,0] = (1+gamma)*npy.sqrt(1.0*z1/z2)
result[:,0,1] = (1-gamma)*npy.sqrt(1.0*z2/z1)
return result
def two_port_reflect(ntwk1, ntwk2=None):
'''
Generates a two-port reflective two-port, from two one-ports.
Parameters
----------
ntwk1 : one-port Network object
network seen from port 1
ntwk2 : one-port Network object, or None
network seen from port 2. if None then will use ntwk1.
Returns
-------
result : Network object
two-port reflective network
Notes
-------
The resultant Network is copied from `ntwk1`, so its various
properties(name, frequency, etc) are inherited from that Network.
Examples
---------
>>> short,open = rf.Network('short.s1p', rf.Network('open.s1p')
>>> rf.two_port_reflect(short,open)
'''
result = ntwk1.copy()
if ntwk2 is None:
ntwk2 =ntwk1
s11 = ntwk1.s[:,0,0]
s22 = ntwk2.s[:,0,0]
s21 = npy.zeros(ntwk1.frequency.npoints, dtype=complex)
result.s = npy.array(\
[[s11, s21],\
[ s21, s22]]).\
transpose().reshape(-1,2,2)
result.z0 = npy.hstack([ntwk1.z0, ntwk2.z0])
try:
result.name = ntwk1.name+'-'+ntwk2.name
except(TypeError):
pass
return result
|
hohe/scikit-rf
|
skrf/network.py
|
Python
|
bsd-3-clause
| 141,218
|
[
"Gaussian"
] |
6834317bc384623c2e9e41755a566b856ceea90a905d87fe64dee70b4e46a5db
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import tests_common
import numpy as np
import espressomd
import espressomd.interactions
import espressomd.magnetostatics
import espressomd.analyze
import espressomd.galilei
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(["DIPOLES", "ROTATION", "LENNARD_JONES"])
class DDSGPUTest(ut.TestCase):
# Handle for espresso system
es = espressomd.System(box_l=[1.0, 1.0, 1.0])
@ut.skipIf(es.cell_system.get_state()["n_nodes"] > 1,
"Skipping test: only runs for n_nodes == 1")
def test(self):
pf_dds_gpu = 2.34
pf_dawaanr = 3.524
ratio_dawaanr_dds_gpu = pf_dawaanr / pf_dds_gpu
self.es.box_l = 3 * [15]
self.es.periodicity = [0, 0, 0]
self.es.time_step = 1E-4
self.es.cell_system.skin = 0.1
for n in [128, 541]:
dipole_modulus = 1.3
part_dip = dipole_modulus * tests_common.random_dipoles(n)
part_pos = np.random.random((n, 3)) * self.es.box_l[0]
self.es.part.add(pos=part_pos, dip=part_dip)
self.es.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=10.0, sigma=0.5, cutoff=0.55, shift="auto")
self.es.thermostat.turn_off()
self.es.integrator.set_steepest_descent(
f_max=0.0, gamma=0.1, max_displacement=0.1)
self.es.integrator.run(500)
g = espressomd.galilei.GalileiTransform()
g.kill_particle_motion(rotation=True)
self.es.integrator.set_vv()
self.es.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=0.0, sigma=0.0, cutoff=0.0, shift=0.0)
self.es.cell_system.skin = 0.0
self.es.time_step = 0.01
self.es.thermostat.turn_off()
# gamma should be zero in order to avoid the noise term in force
# and torque
self.es.thermostat.set_langevin(kT=1.297, gamma=0.0, seed=42)
dds_cpu = espressomd.magnetostatics.DipolarDirectSumCpu(
prefactor=pf_dawaanr)
self.es.actors.add(dds_cpu)
self.es.integrator.run(steps=0, recalc_forces=True)
dawaanr_f = np.copy(self.es.part[:].f)
dawaanr_t = np.copy(self.es.part[:].torque_lab)
dawaanr_e = self.es.analysis.energy()["total"]
del dds_cpu
for i in range(len(self.es.actors.active_actors)):
self.es.actors.remove(self.es.actors.active_actors[i])
self.es.integrator.run(steps=0, recalc_forces=True)
dds_gpu = espressomd.magnetostatics.DipolarDirectSumGpu(
prefactor=pf_dds_gpu)
self.es.actors.add(dds_gpu)
self.es.integrator.run(steps=0, recalc_forces=True)
ddsgpu_f = np.copy(self.es.part[:].f)
ddsgpu_t = np.copy(self.es.part[:].torque_lab)
ddsgpu_e = self.es.analysis.energy()["total"]
# compare
for i in range(n):
np.testing.assert_allclose(
np.array(dawaanr_t[i]),
ratio_dawaanr_dds_gpu * np.array(ddsgpu_t[i]),
err_msg='Torques on particle do not match for particle {}'
.format(i), atol=3e-3)
np.testing.assert_allclose(
np.array(dawaanr_f[i]),
ratio_dawaanr_dds_gpu * np.array(ddsgpu_f[i]),
err_msg='Forces on particle do not match for particle i={}'
.format(i), atol=3e-3)
self.assertAlmostEqual(
dawaanr_e,
ddsgpu_e * ratio_dawaanr_dds_gpu,
places=2,
msg='Energies for dawaanr {0} and dds_gpu {1} do not match.'
.format(dawaanr_e, ratio_dawaanr_dds_gpu * ddsgpu_e))
self.es.integrator.run(steps=0, recalc_forces=True)
del dds_gpu
self.es.actors.clear()
self.es.part.clear()
if __name__ == '__main__':
ut.main()
|
fweik/espresso
|
testsuite/python/dawaanr-and-dds-gpu.py
|
Python
|
gpl-3.0
| 4,792
|
[
"ESPResSo"
] |
0d4b24eae211a3783fcb93633ad064a32117ce98070fd323f631b4b6b6971585
|
'''
synbiochem (c) University of Manchester 2015
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=wrong-import-order
import unittest
from synbiochem.utils import neo4j_utils
import pandas as pd
class Test(unittest.TestCase):
'''Test class for neo4j_utils.'''
def test_type_files(self):
'''Tests type_files method.'''
data = [[[1.0, 3.7, -34.8e-17],
['some', 'random', 'string'],
[4, -3],
[True, False, True],
32.124,
-34,
'random string',
False,
13,
56.3,
[23.5, 'Tree', False]],
[[1.6, 3.77, -374.8e-17],
['another', 'random', 'string'],
[47, -31],
[True, True, False, True],
132.124,
-314,
'another random string',
True,
34,
78.3,
[-123.5, 6, 'Trees', True]]]
columns = ['float_array',
'string_array',
'int_array',
'boolean_array',
'float',
'int',
'string',
'boolean',
':START_ID',
':END_ID(Label)',
':LABEL']
df = pd.DataFrame(data, columns=columns)
new_df = neo4j_utils.type_df(df, array_delimiter='|')
expected = ['float_array:float[]',
'string_array:string[]',
'int_array:int[]',
'boolean_array:boolean[]',
'float:float',
'int:int',
'string:string',
'boolean:boolean',
':START_ID',
':END_ID(Label)',
':LABEL']
self.assertEqual(sorted((list(new_df.columns))), sorted(expected))
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
synbiochem/synbiochem-py
|
synbiochem/utils/test/test_neo4j_utils.py
|
Python
|
mit
| 2,255
|
[
"VisIt"
] |
e18fb7a13a134a3ffcfe16277ea671d29c7ff876a6701f70f799a7a4e407e6a3
|
#!/usr/bin/env python
"""Copyright 2010 Phidgets Inc.
This work is licensed under the Creative Commons Attribution 2.5 Canada License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/2.5/ca/
"""
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'
#Basic imports
from ctypes import *
import sys
#Phidget specific imports
from Phidgets.PhidgetException import PhidgetErrorCodes, PhidgetException
from Phidgets.Events.Events import AttachEventArgs, DetachEventArgs, ErrorEventArgs, EncoderPositionChangeEventArgs, InputChangeEventArgs
from Phidgets.Devices.Encoder import Encoder
from Phidgets.Phidget import PhidgetLogLevel
#Create an accelerometer object
try:
encoder = Encoder()
except RuntimeError as e:
print("Runtime Exception: %s" % e.details)
print("Exiting....")
exit(1)
#Information Display Function
def displayDeviceInfo():
print("|------------|----------------------------------|--------------|------------|")
print("|- Attached -|- Type -|- Serial No. -|- Version -|")
print("|------------|----------------------------------|--------------|------------|")
print("|- %8s -|- %30s -|- %10d -|- %8d -|" % (encoder.isAttached(), encoder.getDeviceName(), encoder.getSerialNum(), encoder.getDeviceVersion()))
print("|------------|----------------------------------|--------------|------------|")
#Event Handler Callback Functions
def encoderAttached(e):
attached = e.device
print("Encoder %i Attached!" % (attached.getSerialNum()))
def encoderDetached(e):
detached = e.device
print("Encoder %i Detached!" % (detached.getSerialNum()))
def encoderError(e):
try:
source = e.device
print("Encoder %i: Phidget Error %i: %s" % (source.getSerialNum(), e.eCode, e.description))
except PhidgetException as e:
print("Phidget Exception %i: %s" % (e.code, e.details))
def encoderInputChange(e):
source = e.device
print("Encoder %i: Input %i: %s" % (source.getSerialNum(), e.index, e.state))
def encoderPositionChange(e):
source = e.device
print("Encoder %i: Encoder %i -- Change: %i -- Time: %i -- Position: %i" % (source.getSerialNum(), e.index, e.positionChange, e.time, encoder.getPosition(e.index)))
#Main Program Code
try:
#logging example, uncomment to generate a log file
#encoder.enableLogging(PhidgetLogLevel.PHIDGET_LOG_VERBOSE, "phidgetlog.log")
encoder.setOnAttachHandler(encoderAttached)
encoder.setOnDetachHandler(encoderDetached)
encoder.setOnErrorhandler(encoderError)
encoder.setOnInputChangeHandler(encoderInputChange)
encoder.setOnPositionChangeHandler(encoderPositionChange)
except PhidgetException as e:
print("Phidget Error %i: %s" % (e.code, e.details))
exit(1)
print("Opening phidget object....")
try:
encoder.openPhidget()
except PhidgetException as e:
print("Phidget Error %i: %s" % (e.code, e.details))
exit(1)
print("Waiting for attach....")
try:
encoder.waitForAttach(10000)
except PhidgetException as e:
print("Phidget Error %i: %s" % (e.code, e.details))
try:
encoder.closePhidget()
except PhidgetException as e:
print("Phidget Error %i: %s" % (e.code, e.details))
exit(1)
exit(1)
else:
displayDeviceInfo()
print("Press Enter to quit....")
chr = sys.stdin.read(1)
print("Closing...")
try:
encoder.closePhidget()
except PhidgetException as e:
print("Phidget Error %i: %s" % (e.code, e.details))
print("Exiting....")
exit(1)
print("Done.")
exit(0)
|
danielsuo/mobot
|
src/move/Python/Encoder-simple.py
|
Python
|
mit
| 3,692
|
[
"VisIt"
] |
496dcf1560db122b2c1681adadb71a430f9da57f2ff10bc6eb5f246f4b17c89b
|
#
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################################
#
# Active Matter: Rectification System Setup
#
##########################################################################
from math import cos, pi, sin
import numpy as np
import os
import espressomd
espressomd.assert_features(["CUDA", "LB_BOUNDARIES_GPU"])
from espressomd import lb
from espressomd.lbboundaries import LBBoundary
from espressomd.shapes import Cylinder, Wall, HollowCone
# Setup constants
outdir = "./RESULTS_RECTIFICATION"
os.makedirs(outdir, exist_ok=True)
# Setup the box (we pad the geometry to make sure
# the LB boundaries are away from the edges of the box)
LENGTH = 100
DIAMETER = 20
PADDING = 2
TIME_STEP = 0.01
# Setup the MD parameters
BOX_L = np.array(
[LENGTH + 2 * PADDING,
DIAMETER + 2 * PADDING,
DIAMETER + 2 * PADDING])
system = espressomd.System(box_l=BOX_L)
system.cell_system.skin = 0.1
system.time_step = TIME_STEP
system.min_global_cut = 0.5
# Setup LB fluid
lbf = lb.LBFluidGPU(agrid=1.0, dens=1.0, visc=1.0, tau=TIME_STEP)
system.actors.add(lbf)
##########################################################################
#
# Now we set up the three LB boundaries that form the rectifying geometry.
# The cylinder boundary/constraint is actually already capped, but we put
# in two planes for safety's sake. If you want to create a cylinder of
# 'infinite length' using the periodic boundaries, then the cylinder must
# extend over the boundary.
#
##########################################################################
# Setup cylinder
cylinder = LBBoundary(
shape=Cylinder(
center=BOX_L / 2.,
axis=[1, 0, 0], radius=DIAMETER / 2.0, length=LENGTH, direction=-1))
system.lbboundaries.add(cylinder)
# Setup walls
## Exercise 1 ##
# Set up two walls to cap the cylinder respecting the padding
# between them and the edge of the box, with a normal along the x-axis
wall = ...
# Setup cone
IRAD = 4.0
ANGLE = pi / 4.0
ORAD = (DIAMETER - IRAD) / sin(ANGLE)
SHIFT = 0.25 * ORAD * cos(ANGLE)
hollow_cone = LBBoundary(
shape=HollowCone(
center=[BOX_L[0] / 2. + SHIFT,
BOX_L[1] / 2.,
BOX_L[2] / 2.,
axis=[-1, 0, 0],
outer_radius=ORAD,
inner_radius=IRAD,
width=2.0,
opening_angle=ANGLE,
direction=1))
system.lbboundaries.add(hollow_cone)
##########################################################################
# Output the geometry
lbf.print_vtk_boundary("{}/boundary.vtk".format(outdir))
##########################################################################
## Exercise 2 ##
# Visualize this geometry using paraview
##########################################################################
|
psci2195/espresso-ffans
|
doc/tutorials/06-active_matter/EXERCISES/rectification_geometry.py
|
Python
|
gpl-3.0
| 3,483
|
[
"ESPResSo",
"ParaView",
"VTK"
] |
d4d7c1b9e6e085b621add023d2916cd31a5e3ac34e5c81ab64af8ca9570ad818
|
import os
from confypy import Config
from confypy import Location
yamlConf = '.octobot/config.yaml'
ud = os.path.expanduser('~')
cwd = os.getcwd()
user_config = os.path.join(ud, yamlConf)
local_config = os.path.join(cwd, yamlConf)
env_keys = [
'OCTOBOT_INCOMING_WEBHOOK_URL',
'OCTOBOT_USERNAME',
'OCTOBOT_CHANNEL',
'OCTOBOT_ICON_EMOJI',
'OCTOBOT_ALIASES',
'OCTOBOT_DEBUG',
]
defaults = {
'OCTOBOT_INCOMING_WEBHOOK_URL': None,
'OCTOBOT_USERNAME': 'octobot',
'OCTOBOT_CHANNEL': '#general',
'OCTOBOT_ICON_EMOJI': ':octopus:',
'OCTOBOT_ALIASES': []
}
def load_config(overrides):
config = Config(chain=True, defaults=defaults)
config.locations = [
Location.from_env_keys(env_keys),
Location.from_path(user_config),
Location.from_path(local_config),
Location.from_dict(overrides),
]
return config
|
aubricus/octobot
|
octobot/config.py
|
Python
|
mit
| 914
|
[
"Octopus"
] |
493b1bc4c498185c0d4094a8e0c34c38e69281803cd7a423ebb0cdd7065f19f4
|
TRIPLE_SCORES = {}
for index, triple in enumerate("""
ing$ ed$ es$ er$ ^co ^re ate ers$ ion$ ^de ^di ess$ atio ies$ tion ^ca ine ati
ica te$ ted ^su ive ting ions$ ^ma tes ^pa ier$ ^mi ere ^ba al$ red iest$ ove
era ^pe rat ic$ lly$ ^pro ble$ ring eri ty$ ^po est$ ent$ ^ha tions cat ity
^se ly$ lat able ^mo ina ene ^sa ^bu ness res ^be ide nat ve$ ry$ ^pre ure en$
^li ula ^la ^ho ^ra ^fo lit ned ^wa ^ov ^me age inte ^si ally ning ^fi ^ta
nce$ ses ne$ re$ ^te ^int ^pu one ^pi ite ^wi ings$ ele ore min ^mu ome ato
ali ^tra eme ely ^ga ^fa ^do ^so ita ^no ents$ ver se$ des ist$ ment ists$ ise
ete ^to ^cu ance unde ili or$ log tor tiv tic nes call cal ^und ^bi ter ics$
ors$ ied$ de$ ^sta ^bo ari ^lo ^he ora ^vi ^ti mat iti ace an$ ying$ rec tat
iona as$ ^hu sly$ per et$ ini eti ves ries ^ri ence als$ ^cha eco ets$ eli are
^le ade ^imp nal gat ^da ^ve ire ice ose olo ave ame sed eve ^un ^fu dis oli
^gra ara ile ster ling ake ole edi ant$ ale eni ^va ^ru owe man ime bly$ ner
nder stic sing ded vel on$ le$ tter ous$ oni ism$ ge$ esse ding ani ici ^tu
der omi ifie ^inc ties ^ro ^ne ute ^ind med sses ouse led enta ^rea ^hi emi
aste ction use nces isti rit ema comm ^ge osi bil ^sha enti ^in os$ les ivi
itio ^ki ^wo vers tel par ^hea ^el ima ial$ ^du ^cou ious$ ena ori ope ede ^fe
ntly$ esti ce$ sit rel ons$ ish$ ende anti ^lu ogi for ^ex nted is$ iate emo
uni itie imi ens$ atte ^fla dec ces ^ni ote oma nit fic comp ^ce ven ul$ ape
shes pol ily ifi ^chi nic me$ ^exp mon mer ega ^na ref ese ami ^cra isa ecti
cked ^sca sting ona ishe esi ^ins inge ges ar$ ngly$ eta ^go ura isi graph ery
ctiv ary ^ci uri rem nting el$ ^bla ude in$ ays$ cking ^sho tors ingly ^hy efu
^tri upe tting ants$ cons ala ^qua els$ disc ther rep oca iste ana ^ju rev iga
ible gen nding ^whi ^sto icke asse ^pri ^nu ped def ane ^acc oke itte lling
lic ke$ ase ^inv ^gu rest ono ^ass ogra ode it$ ably abi wing men ente ange
us$ sted ectio eca ^shi ^cla ^ant ret itu dit cont ^sti ^inf ^an shed lin ila
conc ^pla oti ^spa ^gi ^ev ral king iva aci ermi efi conf tur ments lled ioni
dic dem agi acke ume rer ory ^sy nded inde eci cted asti nter ^ab pers erie
elo del ced ully ow$ ming ^stu ^ja shing essi ctor ^boo ssion lar its$ conv
ama ^em ibi dly$ cul wed vat cit atu ^spi met ip$ idi sup stat list impe cting
coll ^bra pping pped indi ela ters ssed iso assi usi isco iously fin evi val
tabl oto ola mes kes ins$ arti ande alle ^bri tted sol ows$ dat ^vo ^am reg
nom avi arie sid ler car bles utte ten ssing rabl ensi acti ping nar mis ata
adi ^we rin pes ounte ked ille eva ero ecu ule tar sness ione det appe une mot
div ars$ aria ^unc ^spe wer ving ser ric pe$ ipe ike fied erse ece ashe and$
^all uti sal ously ito cond ^sla ^lea um$ ues$ ncy$ este ^cro ^clo ^ad vit umi
rted othe ot$ ocke ack$ ved una leg exe essio epo dev allo id$ icu ibe ette
ees$ ctions ^sea vis tal elli ecto aine ^swa ^sli ^cru xes fy$ ard$ ante ^ste
^qui ulle sem orte olu nger mel illa ife ian$ gged athe ^gri son ren inco fies
eate contr ans$ ^sou ^cri sters rag rad owi opo ller fying erti ease count ^jo
^dea retr mic erve epa andi abo ^tru ^bea rous pos ogy ira expe ently egi disp
dent den cker ^uns ^exc pres pat nist nders hom gy$ eno endi api acks$ ^plu
^bro uta pit mor mil ippe tics repr ker ium$ ifyi elle efe diss bit ^av uate
siv pper llow ight$ ify esta ersi edu ands$ ado ^impr ^cho ^app ulti opi onde
ians$ cing ^stra ^spo ^sno ^ski ype typ sion prec ots$ orie ompa mit ighte
icia gging ain$ ^thi ^pho ^irr test strat rting olle ips$ ead$ aze ^imm ^fra
^al utio prop odi nging mar ged equi elie dist deb corr ^scu ^adv rmin rly$
phon ota ntat ino ibu gic esce ecte ava ^swi ^flu ^brea op$ ock$ mper iatio
ging eso duc cut cid ards$ ^wea ton reb part ops$ ners lev iri exi evo dest
dep cur ^woo ^mou ^extr ^emb ^appr tters onve ny$ illi ida grat form efo eade
^ree ^ext vid sel otte orse ocu nced lism igi gist full fer apa ago ^rei ^foo
^che som port olde ocks$ nabl mal edly col cer ^tea ^fri ^cli udi orti hous
exa esu epre ect$ ches cap alo ^ty ^rou ^or ^op ^ji ^fli ^dra ^att umbe serv
rect less ipa ige iers$ bes at$ all$ aca ^the ^blo tin sur prof pred pen orma
oppe onte once omme ntal not nged eda eads$ circ alli ains$ ^mai ushe ssly$
pul ovi onti odu mod iati epi ects$ compl ^unr ^fro sis sat osse onsi oes$
mber fir ewa ege bat am$ ^glo ^enc ^cre ^coo ^bli unco ulne ros out$ mul lut
ials$ ger eto entia eed$ cy$ aying$ ap$ anne agge ^slo ^mea ^ent ^dia tric
term rac ound$ ntin ncing lter kers impo etti essly eque ense efa eba anta
angle ^squa ^sco ^id ^chu ^ac uste tches own$ lness lists ico ends$ end$ dom
ber alls$ ail$ ^twi ^stri ^gla ^free ^flo ^eq ^blu ^ann ums$ ood$ oge ntic
mped lif ishi iousne inci ieve erva eave dy$ ats$ atche asi aps$ ails$ ^unf
^hou rring rers prov ony ommu nning nking mmer liv issi imme ick$ hum ewe erma
ctur arra apo aki abu ^hoo ^coa ^ap vol vert udge uce uali tim sent pal outs$
ompe oga nim ights$ ially hyp ewo etra erna com cket bas amme ^sna ^aff uci
ual$ stor ssiv sign ontra onse omo llat lid ircu ill$ her enu eati cor cent
bbing ano anke actio ^tou ^shu umble ugge ued$ rial reh oxi oly nish mult misc
mag itti irre ince ilo ibly gur ful bbed arte aro arde aile acce aba ^tre ^ep
^aut ^adm urre ucti tom tif tiat sin ounde ork$ onfi lectr eys$ epe eathe cess
but arke aphe ampe ^tro ^moo ^je ^intr ^gro ^dri ^chee voc tut ssness rred
nists net lim insu gal fing ffic esto ervi eepe ebu bus arro antly acki ^unb
^slu ^en vent tud tit spir rded orta oria om$ obe mping iali gon ewi erne cop
con ckers cant bor ams$ acy ^unl ^stea ^scra ^fai ^expl ^er ^eff uing$ rse$
rew rding rap ova oste ortio orre olli nall nag lib lect lam ken ithe inve
ills$ hydr gists etro erou epu enda edge butt arge aga ^pea ^loo ^inh ^hai
^bou ^adj usti ucke sher sec rtic ron rmed rid rdin prot proc perf onne nker
nest lder isse ffer ew$ ests$ erri erge emu ehe bel aphi acte ^rai ^ill ^end
spect rnat ril resp ption psych pac oard$ non nif nals mus lon loc let inse
indu igni icks$ gin gger fed fac exte essne erte erso dul dness dif ders ctors
ched cad auto adve ^see ^roo ^psy ^fre ^fou ^dro ^clea ^arr xed wat verb usio
uffe tched rot rmat ram phil ounce orou orde opu oom$ ook$ onge olve oce nned
nanc mmun lanc isio igra epti ella dict cif can aw$ ash$ alte aini acco ^wri
^sni ^she ^pra ^infl ^clu ^ar vil var vac umme uma osti orn$ orme old$ num
nous nked nges ngers nge$ mpos mist ique ency ego dge$ depr crat ciat ching
bed ayed$ anda adde ^roa ^lau ^impl ^gru ^gli ^exh ^cy ^bee ze$ wers ubli terr
tent scop rson rif rent ost$ ordi onsu ommi olla ocki nem nches mming mmed
ient$ etri entio eer$ edo eat$ cip bell appro appi aili admi ^unh ^squi ^inst
^grou xing tching rtion rned ppers plac orts$ ort$ oro onfe oba nters my$ mol
micr mast ista inti imo ilia expo erme ell$ ear$ duct distr cycl ctic ast$
anci afte ^bloo ysi vag uli uffi uctio thing stin sions sens rry$ rming rish
ranc perm perc oxe orm$ oppi oon$ onta ompo oing$ oco oat$ nly$ ndic itche
inne inke infe gam espo erce ecta eake cious bar ^wai ^trea ^thu ^slee ^cau
^art work urge uous$ stim rking rked rist repl rder pap ounds$ oons$ ollo obi
oats$ nos nel mut mpet mac lous inu impa idge icki erre eria erco ells$ eal$
dir dies cov constr cle$ bul ball arre anni affi acto aciou ^whe ^off ^obs
^gui ^fea ^ed ^dou ^crea ^cle zes vin unke uga tten ssess rter rning rier prep
pref phic past ossi ooms$ ntag nsit nner neg nam mid magn iro insi imu illo
iable esso esa eru eers$ eds$ eari eali dges conn cles cem bod be$ aws$ attle
atti assa ada acu ^zi ^wee ^phi ^my ^act ut$ unne subs ssions spec sil renc
rant rall pic patr ox$ orms$ opa oose oile ocra oards$ ntim ntial nterr nching
nad lted lent hon gul extra elu easi dil bber barr asts$ ass$ arri aid$ ag$
ache ^unt ^strai ^spu ^sma ^pee ^pai ^kno ^entr ^arch ^ag ^abs zing up$ tte$
sten sic shly$ rses rian refr prom poss pher pass ormi ord$ onso nsiv ncies
nched liat itia ingi igu ictio icro hors fest espe erra egu een$ ectro eats$
diat cel cast amo allu ags$ accu abe ^unw ^spri ^poi ^loa ^dru ^boa wom viv
unda umpe ubbe uatio thers thed suppl ston rrow rig riest perv pend owns$ ooke
obli nol mest lting lenc isto issio incu icle hold hol ffect fact esca eous$
ensio embe emba ehea eare dding const comb class afe adu ^trou ^sky ^env ypo
verl ulou ube temp tect tam tab supp rries rol ris quadr prod prev pil orro
ords$ oope ooks$ oms$ ommo og$ ocia obse nor nism nform ndit na$ isms$ isca
iffe ience icie gress ffed erly erio erba eha egre egra eadi cam cabl ben axi
ardi appre anu act$ abbi abbe ^who ^stre ^goo ^doo ^aw ward uts$ uto ubi
transf trans thes press post pin phys phot pag oys$ owa otio ortu onco ntif
nies nics mak lop long lest ize ispe iscou ighti gnat forg ertai erea entu
enge edia eache din dim compr cial awe arpe archi adju ^unm ^schoo ^pha ^inn
^bio ^amb urni ups$ unre ually turn ttes rvat rus rtif rrat rnish rious rav
py$ pur prem ppos plic pert ospe ofi ody odo ntur ntil nsat mpress mers mas
lding lac lab ivo ippi iolo impre il$ ifu ias$ hab gar gall ffing ercha effe
cult cin cas bal bab ashi angi aly addle acie ^wei ^smo ^scri ^plea ^obl ^ke
^geo zed way$ wall verr usse urse uisi tul top sy$ suff sty$ scal sav rsed
rrel riv rings rge$ rals ract pport pot pet orne oote oot$ ondi offe ntion
nterm nterl nstruct nsid nef mput mous metr lleg lers istra isma ipi illu
illio iddle grad gor ghting fil ety erla equa entri ensa edne ebo dism dged
denc conj cav carp capt brat barb ad$ acts$ aco ^wre ^too ^swee ^neu ^gua ^bru
^alt yste ways vet uls$ uckle tis til thor tend summ secr sag rges recr rang
pop plat oundi orce olds$ ogue offi oed$ ntest mark llies llest llab ixe itua
ishly inki ind$ igo igne igna ids$ iciou got git gest ews$ ert$ ersa erry erni
ern$ empe eeri earte ears$ eame ddles cod cher bic axe arma ark$ andle aise
affe ^us ^stro ^stee ^spoo ^shri ^que ^outl ^ir ^gree ^esc ^coi ^brai ^ast
viol vabl uppo ug$ tir sor sex rom rar phy$ pan ompre omple ogs$ ogge nterc
ndent miss llect lem ject ispo info infi impu igge gnif far erts$ ersta ermo
erfe ello eive eeds$ ecre ecei dor diff dging desp ddle$ dder dden dded creat
cil cann atro arks$ aphy aire ^opt ^dee ^aud ^aft ^acq ^abr yte war ush$ ugs$
uffle surr succ spher ras ptiv priv plan pid path opho oone oods$ onou oid$
ofe obbe nterpr nin nduct mpan mble$ mand llers lig lier lders lad la$ iums$
inks$ ingle imple icti ibra hand etai eepi eeing$ decl clar ciousl bject ban
azi audi ank$ ancy anchi ^trai ^tha ^tai ^stru ^snu ^play ^ord ^lou ^im ^gy
^fle ^eng ^drea ^dre ^broa ^asp yna yli ycho whit vig urba unctio ttle$ tid
tan subm ssembl spac soc sim sers sen sect rtun rlin rging rged req quit oute
ouri onie ompu obo nvent nsur nstit nsion nent nderst ncer nas mplif mount
monstr mbles ma$ lesc inca impro ils$ iece idly grav gies fus fix exce ettle
etche eshe eptio epro ento elte eho edie ecra ecia easu easo depl ddling curr
congr cies cals board bing bet band awi avo atho asso art$ arrie arou arni
armi arme amma amble alla acle ^wha ^vio ^thro ^smu ^scru ^key ^ka ^ic ^fie
^aer ^add xic vot vic urde uously ultu uggi tty$ trat transp tist tap stics
stabl shad sev scent scenc rod rment riat rians regr reass rden quest publ
ptions prim pill owne otti orge onsti ong$ oney ondu nus ntar nov nnect ngles
ngle$ neer nction mov mmat mill mbled lves ltur llion lish liq lik lec irri
ink$ impli ghted fat ex$ escri errie erpe eppe emble eere eak$ eable disr dier
ddled cken char card carb bull bol bill back arts$ appea alu aggi ^uncl ^tee
^sai ^pou ^phy ^occ ^incr ^est ^enl ^ble vest utto ust$ uppe unti unk$ unge
uck$ uca transm tol tig tak subl struct ssim sper shness rrang recl read rab
prob owle ousi orns$ orbi ophe odde od$ oble ntent nsol nny$ mpers mos mmers
mbling mbers llet light las jack isce iple invi inva int$ infa immo immi ifte
har ghter fish evia etu erpo erli entra endo em$ eete eens$ eedi eck$ cord
clos ckle$ camp bon blish atta atri ansi ania air$ ads$ adia ^whee ^spee ^smi
^poo ^ple ^plai ^goa ^fee ^dy ^chro ^blue ^astr writ vern urri undi un$ umpi
ulli uati try$ tract tness techn tall ta$ spic scrib rtain rousl rness rner
rnal restr put pros osta oso osco orri orks$ oots$ onu onia oise oggi nspir
nguish new nchis mport mort mim mass lla$ lars iqui ipli into inno inni inds$
ilde ifo ickle ibbe iance head ghten fting fort fam exti expa exo erta ersio
erpre ercu eogra embo ebra ebi dol dal ctat cred cott cosm corn cock ckets
cept ccess calc cab beh beg bef awa astro asce arbi aph$ anto anno ambi achie
abli ab$ ^tho ^soa ^ske ^sau ^py ^outr ^jou ^ign ^gau ^flou ^expr ^chai ^at
^appl yri yra ydro xist wher west wak utti ushi usa unche ummi ulte uke uity
uarte uals$ tuat ttles trad tot tons thol therm tens tem tell tef ssif spons
sist shers sew seq script scar rtis rtin rten rsing rrect rman rej puls ptur
ptic priat phers pel outi ouche ouble otto oth$ orke opria ooni onstra ondo
onci ompli oids$ ogie obs$ obble ob$ ntain nship nounc neur mmod misd mics lov
los liev liest lag isso issa iogra insti im$ ients$ host hens gramm gnit gned
gaz fted fess ferr erwo erro enou ems$ empo elve efle eele ecks$ ecke eboa ebe
eam$ eals$ eale eaks$ dyn dow don diest crit cret cos ckles cians chor bserv
bric aspi arbo aphs$ apho anki ambe aft$ aby abs$ ^wra ^sty ^ob ^ly ^kni ^hoa
^by ^auth ^aq ^ang ^aggr ycle wav unma ulsi ulge uff$ ubs$ ubbi ub$ tub trib
the$ tast subst sub stly$ stion stil stig ssur spar soph sett scat scap san
sacr sabl rship rrog rmon riousl ringl ra$ quart pist pick orga oos$ ontro
onstru onju omba ol$ oiste oil$ occu oad$ nval nthrop nsibl nness ngling ngled
nav nac morph meg mant ltipl llus lett ishne ios$ ints$ instru inou inha indle
iki iggle ido hib hem gmat gent fluor fill fier fe$ eyed$ erns$ erhea erfu
erfo erbe eon$ eolo enli enie eet$ ectu eas$ eani ean$ dish discr dig destr
cter cong cky$ cian chiev ccup buff bble$ atria armo arki any anthro anks$
ancie amu amou ambu alia aide abso ^ze ^unpr ^unp ^sle ^plo ^instr ^hei ^hau
^die ^dai ^coe ^asc yme wind viat verh verc veng usts$ upli unni ulta ucki
ubsta tort tious tern tang tail syll subj stern stal ssibl sir sint scend saf
rpret rog rock rob rism resc rend rach quat prol ppy$ poll pleas perp oughe
otu ostu orru opy oof$ onto omma oki odge oate nuat nkers ngest nfid nev ndec
ndat ncil moth merg mend marr lum llows lded kiest kier ittle istri isre ishme
ipo inso impi imbe igno ig$ ideo iba iato heav gue$ gram ggles ggle$ ggers
fter flect fall explo enche eeli eede ecla disb curs ckling cet cens cac atia
asto aske arve arche arce apti ando ancho alke airs$ agra acro ackle acka ^wro
^toa ^theo ^swea ^swe ^spli ^snoo ^shee ^reu ^pie ^is ^inj ^fly ^floo ^embr
^attr ^as ^agr ^acr yma yle xpos xped wir wash verst urs$ urgi uns$ unki ung$
ummo uite uctu tum to$ tly$ tingl tcher tax styl strict stress strang stif
ssor ssier spos spit spens sked simpl sibl show shin sappr rsion rrupt rried
rip real quiv pyr progr poth pon ple$ pack ousne oura ought$ ortho ooti ontri
oncei ombi olste ollu oils$ oili oci nvest nvers nut ntiv nsist nserv no$
nners nment nings ngul nfect nderl mplic mism migr mies mier mbed max matr lod
llig lis len lcul lav land keep journ inkle ingui ift$ icy icto ict$ haz gues
graphs grams gil gibl ggling ggled get flow fit fel fath etio etee essa espi
epri epla eously enci encha cus ckiest ckier cis chem che$ ceiv cart bomb
bbles bbled batt ax$ aughte aspe arna appie ampi ammi alua alie alcu agne addi
acri abri abble ^ur ^thri ^soo ^sche ^nea ^kee ^fru ^fluo ^cree ^blea ^beau
^arb ypi xat wis wid vor verd vels ustie urne ur$ unte unta uno unks$ umu
umps$ uits$ uit$ uise uire uffs$ uddle ucks$ tus ttled ttiest ttier tox togr
tings thund thet susp stock stiest stier ssy$ sser spond sob singl sies sick
shier sep sapp rters rrit rmal rists rcharg ran qual pret pplic ppiest ppier
phen pens oze osu oshe orni opti oppo ophi oole omy oll$ oads$ oade nven nterp
nterf ntam nsul nse$ nonc nion nial nher ngrat ncomp ncid narr nant myst mpass
mmut mish mess mem mach lyt lies lax ky$ ix$ isu istle isha inhe indo inche
igs$ idde icts$ icta ickie iche ibble hall hal gning gentl fut fract fet fals
exu expre exci ertu erbu enfo encie eje eep$ eel$ ecli echa easa eans$ eams$
eaki decr crim coh ckled cism chron cert cass carr burn break bled bby$ bbling
away augu atch$ asta aple ansmi amps$ amp$ alco aggle adva acia abou ^prea
^outs ^org ^on ^nee ^lee ^it ^gue ^gou ^glu ^frui ^fau ^ec ^dei ^cry ^amp ylla
ydra xpect wning wling wast van uzzle utu ustra ump$ uishe udgi tun ttling
trop tism tag surf subc stud stle$ still stent shy$ short sham scont sass
sanct rul rtur rtak rrying rmit rling rim ress rches rcept rbit punct pted
prosp proph pric plet phras pall our$ oupe ountai ough$ orshi orche ooki ongs$
ongi ompi olls$ ollie oint$ ohe ogre ods$ odie oda obje obbi oache nty$ ntract
nterv ntend ntell ntabl nsor nier nfin nerv ndol nctur mpens mistr mbing mart
lty$ lousl lloc llar lith lics libr lessl lant kin itiou ispro ispa iptio
ipple inua inea ifts$ icte heart greg gnost gment gers fresh flatt ffus eying$
etrie estru ervie eps$ eons$ ensu enne enia eight$ eeze eels$ eane do$ disl
dger cum ctabl clin ciousn chatt centr cand bound bot bord bis bew best beaut
awle ath$ astu assie aso arne argi apse angui andie ample alve aite actua achi
^tau ^sui ^strea ^stai ^spla ^pree ^nau ^joy ^ingr ^gna ^flee ^excl ^err ^eg
^dau ^chea ^arm ^arg ^ach ^abstr ^abd ycli xec wood wned wled walk vow vill
view vens vem vas usto uspe uriou urbi unbu uggle uestio uence ucts$ ucto uct$
trav transl tisf till sund stur stup stles ssoc ssin ssiest solv smok sier set
seb seas rubb round rium rient rger refl reapp rced quent que$ purs pses princ
ppropr por pogr pett pest pepp pent pell ourse ours$ ounci otche osts$ oque
opie oome ooli oofe oodle onvi ongre ols$ octo ocka oarde numb ntry$ ntiat
ntern ntens nten nstrat nses nop nogr nnot nnies nistr nis niest ngel ndel
ncis ncert nceiv nast nan mun mpuls mprov mmon mmiss meth mens mad lved lve$
luat ltim load llop llions llas lign leas itra itle ith$ itchi ipu iola insta
inqui infla illie ifti iary iage holl hel hav gog gan fund fuln exua exhi euro
ethe estra estle eshi erru erpri erdi erbi ep$ envi entle enco enca emai eju
effi eeme edde eami eage eadie dult dress displ desc dam dag cup ctuat cow
chast cell ceas bling blig betr attie atchi assio arli ariou ario arca arbe
aqua antia anspo ang$ aing$ aha aed$ adio acqui ^vu ^upr ^unst ^squea ^spea
^shre ^shea ^scree ^scou ^reo ^quo ^pru ^outw ^outb ^opp ^oc ^incl ^imb ^hee
^frie ^enf ^encr ^emp ^broo ^bree ^bre ^anch ^accl zon zer yro xpress xper xam
with vish visc vir vict vibr verp vend urrie urli urle uria uple upi umpie
umma uita uge udde uba tton trit trim trag tos tocr tant tac syn surv surg
stry$ string stom stiv stick ste$ ssors ssent ssar slat sinf sill siest shment
shion shiest shak scul sall sad rving rved rser rpor room roll rgenc rfect
react rdly$ rcul rce$ rbat print ppreh ppear pow pois pies pict peac oyed$ oxy
owni ount$ oss$ osphe osmo osa orpo orci opla oops$ ool$ oofs$ onds$ ond$ omfo
oldi oints$ oice oggle ofte oati nunc ntrov ntangl nois nium nick nhol nforc
nfer negl ndling ndled ndenc narch nacc mpreh mpor mplet mplem mann maj lus
lster low lot lor llin lking lked lentl lang junk jud isla isci iphe ims$ illy
ign$ iffs$ iffi iff$ ief$ iatri iant$ iac$ hist hes hamm gran grac gem front
flag fig fast fan evie etta ethi estri eroi ereo erbo ept$ eple epai enso
eights$ eeps$ eckle ecki eara dvent dram dogg doc dill detr descr dents defr
dders curl crac coop cloth ckag chlor chanc cers ccus caus caps bur blim blem
bin bid berr beat atie aths$ assu arse arle aptu appoi anie ampu ampa alue
aids$ ah$ ague affo advi abra ^way ^uph ^thou ^shru ^quee ^pau ^ox ^orn ^obsc
^obj ^noi ^insp ^grea ^gho ^gai ^enr ^clou ^chlo ^bai ^anc yards$ yard$ xim
worth word will wild weath wear warr volv verpr ustle ushie urpo urna urn$
urie unsu unsa unkie unfo ull$ uer$ uent$ ubsi ubco ubbie uara tyr turb troubl
tripl transc trac tists teer tard sunb stubb strib stol stibl stes stead stag
ssert sput spers sking sings sest scov scant sauc salv rves rub rtat rstat
rsen rrows rriag rpos rough rior right ried ria$ rebr rders rcer purp pting
pron pract ppress pprais pond pod ples plast pier persp pars parr parl palm
owl$ otia oths$ oriou ooze oop$ ools$ ookie oofi onspi omplai omie oje ogno
oche oarse nur ntric ntor nton ntiq ntip nties nthus ntegr nsions north norm
nod njur nip nfus ndly$ ndles ndle$ ndiv ndemn ndabl musk mpat mpar moll mmy$
mitt misr miest merc masc manc mall mah lving lumn lumb llum llit llied llag
itch$ istre iske isfi iscu invo inue intro innie inna inie infu inee imma ilte
igma idle idea iarie iagno horr hood hil hid hend hard gov gnet ggy$ ggiest
ggier garr garn gard gab frost frig fogg fiers ffle$ fect exto eway etto ergi
erca epea eoccu enly empla elly elea ehou ehi eeke eedle eedie edra ectri eclu
echni easte easse earne dvis dvert dos dmin disg died derm dar cub cruc crack
court cot conq coff coc cling claim ckad chin bstruct bstant book ause aunte
asy asci arria arci anshi angs$ aneou andba anche ampli alti alk$ ajo aime
adge acci ^vie ^ung ^toi ^shoe ^sei ^poe ^pay ^outp ^outf ^orth ^om ^noo ^neo
^jau ^infr ^excr ^esp ^empl ^day ^croo ^ampl zzles zzled zzle$ zen ysti yco
xplic xid whol wag vul vok void vings veg vail uttle utri usta uousne unse
unrea unea uncia unca umbli ulls$ ulga uest$ ubo tual ttest tron tow tous
touch tops thous therl them tatt sum sug stroph stron straight stit stenc
stand stanc stak ssag spin sot sort sman semb search scount scor sand salt sab
rying rvic rve$ rum rtly$ rthy$ rsel rric rresp rrant rout rmer rmen rled rill
rgan redr rching rched rcat rbal rann ques quenc pull protr procr pris prer
preh pprov pott postm point pock pir pip perb pant palp oying$ owls$ oure
ounti ottle orwa oru orthie oreo ophy oode onque oniou onfo oncu olly ointe
oins$ oin$ oggie offs$ off$ oddi obstru nutr ntiall ntall nsult nsens nousl
noc nlight nia$ ngin ngen ngag nets nenc nec ndulg ndest ndef ndar ndant ncip
nciat ncem mur mpir mpil mmand mists mir mfort mbul mbin manl mabl lver lux
lud lpat loos logr lock lmin llying lle$ llabl lets leph lef laz last laps
lact kil just izzle itho ispu irds$ ird$ iny inju ilve ihi igns$ iggi ifle
ievi iently idio iani iacs$ hears hast harm guar go$ glass gibb ghters ghing
ghed ggest genc fug freel forb foot fitt ffles ffled ffin fers fend feat fault
fabr expu expli excu etho esou esie erto ershi erki epto epta entua entiou
enga endu ellu eets$ edou ecrea eauti dying dream drat dox down doubl dmir
dles dle$ dings dew dens defl deal ddy$ dabl cust curt ctif cross crem corp
clim cist ciall chrom chees cew cath catch canc bush burr budg bsol bridg brav
blic bif bbiest bbier azie ayer$ auti autho auli arba appo appa ansa annie
anna angli amba almi allia alks$ alde agni agna afts$ affle ^unn ^twee ^tria
^threa ^sme ^sku ^sce ^quie ^orch ^oct ^obstr ^kna ^joi ^inq ^ing ^hay ^et
^ench ^eas ^brie ^ax ^accr zzling zier yla yho yca xur xit xcit win wick wel
warm vulg verg verdr vals uxu utche uske usca ury urro urns$ urga unwi unla
unfi undre ummie umbi ulsio ulse ulmi uitie ucte ucce ubri ubmi ubme uadru
trust trap toil thy$ thick sweet sult sturb stul strok stiff stens stel stall
spok speed south sons sod sleep slav skat silv sib shuffl shoot sharp shap
shabl settl sept segr sconc sals rwom run rty$ rtions rsit rships rrif rpet
rped ro$ rners rmul rmost rmac rgat rfeit rfac resh rdon rcing rbon raw rants
quot quid quant puss push pun proj progn prett pprec ppoint ppings plex pigg
phor petr penn pec patt owde ourna ource ounts$ ounta ouchi oty osci orna ormu
orki orgi ooing$ ooed$ onme onfu ombo ombe omb$ olia ointi oho ofa obu oaste
nym nvert nud ntit ntill ntert nsign nky$ nkiest nkier nject nint nhal ngem
nfirm nett neff ndul ndert nderp nderc ncumb nctions ncons ncomm ncers ncat
ncarn must mpy$ mple$ mpiest mpier mpact mom mmend midw mbler mans mang lys
ltrat lters ltern lsiv lsif loq lluc llis llets lings lia$ legr labl kindl kel
izze izo ixi iths$ iscre irma ir$ ipro ioti iora iole intra inta impri iltra
ifia iewe ieva ield$ iefs$ iably hop hook hipp heat headl happ hang gyr gued
guard ground grim gnos glob germ furn found foc flex flash flam fight fiabl
ffling fes excre exco ewri etry etie etfu etba erspi erha erfi erdo erde erci
enthu engi ench$ emie elio elia ekee eite efie eeti eek$ ectua eadli eacti
each$ dvant disq dispr discl digr dibl deadl ddies dall ctly$ crav comf clear
clean ckness chill chet charg cenc cef ccept carn capp canv bust bugg buck
brown bras brand black bett bbers battl bass backb azo aza ayers$ avai aunche
attri attra assai asio ashie arta arm$ arly apsu appli anva anha angu alpa
almo allie airli ahs$ aggre aero ady adua acta accou abstra ^ut ^upp ^untr
^thra ^swo ^smoo ^ska ^prie ^outd ^ol ^nai ^moi ^mee ^may ^mau ^ko ^gre ^ens
^ej ^crou ^boy ^aug ^alb ^aggl zers yno yne xhib whipp weight weed want vind
versh vanc usu ussi uss$ urcha upti unts$ unt$ unsea unli und$ unctu unbo unbe
unba umbra ulu ultra ulce uish$ uide uddie uble uadri ttrib tterb troll triev
treat treas train tousl tok tob tier tick think thic theor text tenc teg teen
tants tals suit subt stretch stled stinct sties sterl sourc sound sorr sold
soft snak sics shimm shest shell sell secl scipl schem scen scall sar rust
runn rud rsem rrid rrev rrenc rons rless rland rket riums riot rie$ rick rhead
rfull reun reed reall rces rattl ratt rans rand rak rack quint quin putt pupp
punch pter pse$ proh pples pple$ ppend portr pont pling plen plant piec pholst
phob pharm phal perch pef pav pand owse owli ovo outra outhe oughs$ osy osie
orthy oriu oopi oomi onsta onsciou omni ombu ombs$ oite oiso oine ohi oddle
obste nvit nvis nvinc ntrif ntions ntals nons nnier nlik nless nious nions nil
niac ni$ ngy$ ngiest ngier nfess netr neous ndy$ ndustr nduc ndness ndid nderb
ncub ncorr nconv nclin nchant ncept narc nair nab mples mmitt mment misg misf
misch mig mew mell mean matt mamm maid ltiv lons lo$ llut llier llel limb lien
lessn lebr leag kick kabl jell iza iwi itta istru isru iski isho irt$ irs$
ircle ior$ intri inkie inio ingie infle incre incli inau imps$ imp$ ightly
iety ields$ iddi husk hos herb heads harp gust guil grudg group gons gnor gnom
gloss glor gistr gim ghtly$ gas furb fum forw foll fold ffirm ffers fem fell
fatt fanc eur$ etou eteo etch$ esque esma esco erlo erga encou enchi elf$ elde
ejoi egna efro eeve eeks$ eddle ecou ecko eche earra earie eape eachi eace
dual driv doll dlock dling dlin dled dium diet dget deh deem croc crob cream
cost cook consp consc ckin civ cists ciph cier chopp chol chas champ cclim
caut ca$ by$ broth brok breath bout blaz blackb bitt bird bibl bern bedr bec
bash awn$ avia athi astie ashio asca asa arsi arms$ ardly ardio arble aque
antle ansce andy ammo alpha alme alci aive airy airi ainti ainte aina actu
accli aboo abbie ^zo ^za ^vau ^upst ^unscr ^unq ^ultr ^thru ^thre ^slou ^shoo
^scre ^sci ^oppr ^omn ^od ^lie ^groo ^gee ^foa ^fei ^ess ^enj ^engr ^edg ^droo
^dio ^boi ^atr ^asph ^ash ^aph ^anthr ^alph ^abst ^abl ^abj zy$ ywo yti yphe
ymo ylo yclo xual xtrem xplor xcept worr wels wder wad vy$ vist virt vies vial
verm venl vang vamp va$ ussy urtai ursi urry uptio upte upsta upple upho upa
uori unwa unpro unnie unha unfai unclea ult$ ulpa ulldo ulla uivo uine uile
uffie uette uet$ uds$ ud$ ubu ubse ubscri uanti tup tto$ ttif ttic trump trogr
triat torm torc toast tish tiest thent tew teor tchers tarr syst surp sugg
subscr subd strain stow stling stew stepp stem stam ssist ssign ssail sque$
squal sport splac sphem spell slipp sland slack shov ship seh sculpt scrim
scour scill sched saur sandb rvis rvers rven rthod rtabl rsions rsif rrul
rrier rpris rnam rmiss rments rlies rker risk rials rhous rform rett rentl
reen reat reas rbrush rbitr rber rban quir quett quar pus puff psul prud prais
ppar pomp pok plin pled piest phant peat paint owie owboa outla outh$ othi
ossie osau oprie oppie oothe oodie onvo ont$ onni onga onclu olts$ olt$ ofo
offee octri oardi nurs ntol ntment ntes nterd nsum nstrum nsom nser nsel nsed
nscend nonr nog nobl nniest nna$ nliest nkest niv niq nhandl nfranch nfig nexp
news neq neat ndors ndings nderf ndanc nctiv nclus naut murd mumm mull mpris
mpling mplex mpen mock mmerc mmenc misst mispr mings mif merch mblers maz marg
manh lust lunch luc lubr lties loft llyh llustr llings llif linq limp liar li$
lex length left leav lden lass kidd jur juic itze itne ispla islo isgui iscri
isbe irts$ irmi iqua ippie ionee ioli intru inchi inch$ implo immu imble igeo
iere iency iefe idua icky iari iame hyph horn hex herm hat harr hands handb
greed glad gisl gib garl gans furr funn funct forc fool fol flour flat ffy$
feath fash extre exhau exclu esty espa erwri erwei erspe errea eroo erlie
eriou eppi epho epha enuou enua entie enha enfra encu emni embroi embla ein$
egs$ egrou egio egia egge eene eddi echno ecca eavi eakie eaka eague eaco dupl
dull duat drupl dot dorm djust disgr dial degr dand damp curv ctual ctroc
ctrin ctom ctness creep craz craft coup cool concl cogn cog cocc coal clam cim
ciest chnic chic check chabl ccur ccount ccin ccas cash cak bund bulld build
bsid brog brief box bouts boss blood bless blasph bits bik big bies bast bankr
bag babl ayme awls$ awl$ awfu avoi authe ausa aunti aunde aulte auce attai
astra asphe ascu ariu argo aree apple antie ansfi anse annou ankle amne allio
ait$ aisi aint$ aims$ aimi aim$ agri agree affli adhe ackbi abse abie ^zoo
^yell ^wrea ^whoo ^vou ^voi ^utt ^ups ^ulc ^twe ^squee ^sie ^shou ^scoo ^schi
^roi ^preo ^pia ^phe ^osc ^obt ^obst ^nou ^loi ^koo ^knee ^jui ^grie ^gea ^foi
^enth ^earn ^aur ^anth ^af ^adh ^absc zzes zipp zin ziest ysio ymme yba xy$
xtur xting xpans xon xhaust xer xempl xcret wy$ wriggl woodc wond wledg witt
witch wish wil wiggl whin welt weep wax wards wand wal vinc vif vast vap valr
ussio urvi urtle upu uote unny unlea unio unhi ungi unchi unch$ unce ullie
ulca uitou uiti uida uddi ubje ubby uave uance uadra twitt tumbl ttract tties
ttend trench trail track tour tough tiz things tful tempt teas tchy$ tchiest
tchier tas tart tamp symm symb swank susc surm sumpt sull sulk subtr stuff
strik strid straggl storm stopp stirr stfull stful stest steam stas ssit ssic
sse$ spong spend sord sooth snowb smat smart slugg sket silk shor shift seed
seat seam scrap scomf scit scatt saw sarr rward rview rvent rupt rudd rturb
rtiv rtes rstand rsec rror rpent rpass rook roc rnit rnic rnall rmark rint
rind rheads rgin rfer rexp reel rech rearm rderl rdain rchestr rbed rbar ramp
ramb ragg quick quer purg ptom ptim proof prej preach ppies plom plift pists
pink pig pied phing phies phed pewr pern pals oule oulde ouch$ ottie otra osh$
orsa orea optio oppre opli oor$ ooche ontu ontai onstri onre onca oist$ oinci
oida ogma odia ociou ocea occa obtru obscu oale oach$ nul ntuat ntries ntrib
ntrav ntrad ntos ntom nto$ ntid ntess ntact nslat nsing nscript nquer nonp
nnov nnounc nniv nnel nlist nlin nlier njunct nipp nhab neym nex nents neers
ndon ndisp ndig ndesc ndersh ndercl ndens ndem nda$ ncurr natt nap nants nact
muss mugg mpol mpled mplat mplac mousl mong mo$ mmetr mmens mlin missp mind
middl mi$ merr mech meas mbod mbat mbark marsh maint main mail macr lvag lur
lunt lob llot lloq llness lles llerg lion link lick lgat leth lel ldest lcer
latt lath larl larg kook kidn jump judg jor joc jest jerk jaunt izzie ixa itro
isspe isba irru irra irls$ irl$ ipti ipt$ iplo iors$ inspi ingo ilu ilts$ ilt$
ilie ild$ igre iftie ienta ielde iege idne idiou ickne ibs$ ib$ iabi humbl
hoth hir highl hell hect hawk hash halt gutt gush gull guing guid greas grant
grand glyc glow giv gious giggl gents gell gawk gant gad fuzz fuss furl fty$
fters freq foul fond flor flak film fec fasc extri evou eurs$ euni etha esho
esci ersua ersho erproo erpa erms$ erm$ ephe eoty eone entre entee ennia empti
embra elt$ ellie eja efri efre eese eena eemi eeche edgi edee ecro eclo eatio
east$ earfu eamie dvers dust dur dung drows drill drag dows doubt dock dmon
dipl dious dick diagn dern dentl deg ddress dazzl day$ dav danc ctric ctibl
ctar crust crud crossb cros crank countr cors confl coinc coars clon cled cipr
cill chos chok chis chap chant chan ceb cean ceabl ccomp ccent catt cants cag
buzz burl burg bunk bumbl bsess bscur brows brid breez brass bounc bort booz
boot bookm bogg boards block blest bler blank bind bask bark barg bail bagg
backsl azzle aywa axo axa austi ausi aura auda atty arry arka ardo ardie apy
apsi apro antry anso ansla ansfe anoi ankie ankfu angie alva alsi alse alpi
alki alga aits$ aints$ aighte afti admo adie addli acre acea ^via ^unpl ^unfr
^unch ^triu ^stree ^stoo ^snee ^snea ^rhy ^quai ^proo ^peo ^out ^ost ^old ^obv
^idl ^hie ^glea ^frau ^eth ^eight ^ecl ^eat ^earth ^drai ^crue ^clai ^bui
^blee ^amn ^alg ^airl ^air ^affl ^adr ^abn zzy$ zzing zzed zett zard ywa ysta
ypno yni ympho ympa ymna ydi yce xterm xorc xen xcav xact wspap wrinkl worm
wobbl wness winn winds wiest wier whims wheel wait volt vergr ventr velv vant
vacc uxe uve utle usty ustfu ussie uspi usia urthe urte urso uro urgeo urfa
ureau urdle uptu upplie uppli unsna unru unpre unpa unfa undu unctua uncheo
uncha umptio umna ummy ults$ ulsa uinte uice ugi uffa uets$ ueste uently uckli
uckie ubtra ubbo uary uare tuit ttis ttern ttent ttack troth trosp troph trick
triarch trem treach trapp transv transgr transcr tooth toc tless tiousl throw
thresh thogr thod thirt thful theist thal tfull teers teb tear taut tattl taph
tann tain tact tack sync swip swatt sus supr stries strial strangl strait
stness stlin stip sthet steamr start star stanch ssies ssass squar sprint
spott spoil sped speak spass southw snoop sneak snapp smol smirch small sledg
slaught skyl skew sixt sins sieg siast shuttl shments shingl shar shall sembl
seism sef seaw scuss scratch schoolm school scarr sarc sant sam sagr sadv
saddl rwrit rviv rtness rtment rtit rtil rties rthiest rthier rtgag rtens rtax
rsat rrot rrob rrig rrent rping row ror rmic rmers rliest rlier rkers rjur
rjack riousn rics rib ri$ rgy$ rgiv rgers reord reogr reocc rek reinv rcis
rchang rboard rbish rain rabb quis quiet putr pulp puck ptics ptibl propr
procl priet ppling ppled ppet ppeas pox postp pom pment ply$ plag pings pian
phyl phosph perj paym patch paq panth pair padd oyme owdie owba ovu outli ousa
ouple oupi ountry ounge ouge otho otch$ ospi ortie ortga orda orbe opte ootle
ooth$ oors$ oordi ooka oodi onvu onva onts$ onda onau olvi olti olte olio
oists$ ointme oini oings$ oide ogo ofu ocri ockie obtai obio obbie oadi oadca
nvuls nvok nvir nutt ntroll ntox nticl nstrict nstanc nsmitt nsgress nsert
nscrib nquish noth nnon nnoc nnex nlarg nkling nkles nkle$ ningl nigg ngo$
ngat nful nflat nfed nfat newsp neutr nerg neousl needl ndo$ ndet nderg ndents
ndam ndag ncreas ncompl nclud ncloth ncin ncess ncentr ncell naug myth mush
mption mpregn mplain mpest mpath moss mortg mors moon mons moist mogr mob
mmies mmest mmem mmag mly$ mispl misl milk memb meb meal mbos mbol mbig mbarr
math masq marq march malg malf madd lyr lump lug luck ltif lsion lom lobb
lming lmed llud lliest llic llianc llen littl litt lip lges lep lend lean leak
laur laugh lash lan lamb knock kings kens joll join jitt jett jap jabb ivia
ittie itou ithi istu istry iss$ isgo isdi ischa irth$ ipts$ ionai ioce invei
intui inspe inje ingra inclu impla impie imie igiou ightfu ieths$ ieth$ ierce
idwi idie ictu icho iblio iatu iasti hyst hypn hundr humm hov hor hobn hedg
headw hatch hank handl handc hair gymn gunn grouch griev grass grap gold gion
ghty$ gend gel gees furth fung fuddl ften fruit frisk fright freeb franch fram
fors flipp flabb fift field fiddl ffix ffil ffiest ffier ferm fak exploi exca
ewspa eum$ etua ettie etrea etre etchi estu essme esme esia esche ertio ersto
erno ermu ermea erle erju erhou ergy ercia erchie erboa epte eproo epli epay
eordai eople enra enla engthe emou elfi eld$ elay eki einve eighte ehu eggi
eg$ efea eeple eepie eenth$ eeni edro ecy ectly eclai ecea eath$ easie earche
eapo eamro ealthie ealou eadmi eadlo dsid drunk downs dov dors dogm dizz dirt
dign dienc diatr diar dian dgeh deq ddit dapt damn dair dact ctit ctil crypt
crunch crumbl crow cron crisp crinkl creas cours coddl cocks cobbl clunk clud
cleav cleans circl cienc chut chick chew chers cheer cheek charm chang chamb
centl ccum ccompl ccid ccel carc calm cach byt butch burgl bump bstract bsorb
bseq brut broadc broad brig braz brain bra$ bottl boom bom bog boat blund
blueb blow blam biol biogr bem bear bblers bbler bbin bath bant bals balls
bald bak bact backp awni awli awki autio aunts$ aunt$ aule aty atrio asque
asps$ asp$ asks$ ask$ asha arty artne artme artia arryi arque arpi aroo arch$
apu apri appy apprai apha anthe ansve ansgre ansfo anscri anqui annu anly anio
angea andsta andpa andma ancti alt$ alsa alou alms$ alley alka airie aintai
ainfu ahe aggie ageou ackli ackha ac$ abdu ^vei ^upt ^upl ^unv ^unsp ^unsn
^unk ^umbr ^ult ^two ^twa ^troo ^thwa ^swoo ^spra ^spoi ^splu ^splo ^slau
^scro ^rui ^rho ^rhi ^prio ^prai ^outstr ^outst ^moa ^loya ^lay ^jee ^jay ^jai
^itch ^isl ^inscr ^inch ^inb ^frea ^eyel ^ethn ^enthr ^enshr ^encl ^empt ^eav
^dwa ^dry ^chau ^brui ^bia ^angl ^ah zziest zzier zan yxia ytho yspe ysa ypa
ygo yge yer$ yello xtric xtract xtort xpurg xploit xpend xcus xampl wretch
withdr wings wheez whal weird weak warn virg vex verw verts veal vass utre
uskie ushy usci usba urpri urfe upre upo upie uota uore unwo unu unho ungle
unfu undo uncti uncou umsta umpy umo umbs$ umb$ ullfi ulki uishi uildi uiesce
uffra uers$ uera uents$ uentia uels$ uel$ udie udgeo uckra uccu ucci ubve ubju
ubble uarie uards$ uard$ uage tund tuck ttons ttin ttal ttabl troop triv trill
trigg tries trend tref trash tramp torp tment tiq tins tight tient tiabl ti$
thron threat thrash thirst thatch tess terw tep tees tee$ tedl teach tball
tars tans tank talk tach symp sweep sweat swagg sun suffr suck subv stliest
stlier stis stef steepl steel starr starch stant stamp stain ssum ssip ssil
ssib ssat ssal squish squiggl squer squatt squash spunk sprop sprinkl spous
spotl spoon splotch splint splend splash sping spindl spik spid sphyx sperm
spectr spat spars spad soap snipp snip smudg smiss smic smet smen slouch slopp
slid sleepw sleaz sky$ skin skimp skier skets sketch sitt sinc sickl shutt
shrubb shriv shortc shiv shav sets sert senc self sees seal scull scrubb
screech sconn sclos scint schol scer scents scalp scabb sband sans sanc rvant
rumm ruff rtebr rtag rspir rsal rrym rres rrac rproof rpetr rott rosc rop
rooms ronm rnment rmis rlands riw rits rithm rion riod rins rienc rias riall
rheum rgic rget rgent rful rfing rfed rests rents renth reign reev redd rdy$
rdress rdier rdens rcumst rcomp rcol rcoat rchy$ rchic rchas rcent rcen rbox
rbing rbid rambl rail quarr pying puzzl purv purl purch psy$ psing psid psed
prost prompt prior prickl prescr pprox postd popp pocr pluck plets pith phol
phist phes phas pez perk perh peev pebbl paunch pard panc pabl owsi ouths$
ousie ourte oups$ oupie oup$ oughne oude oud$ otta ostra ostma ostle ossu
orrie orpho orphi opsi opra ootie oori oonli ookma oodwi oodli onno ongra
onfro ompou olie oldie okie ojou oha ogui oeing$ octa obby oagu nwar nverg
nvel nvas nuous null nuanc ntuit ntrod ntriv ntort ntness ntitl ntis ntious
ntier ntials nterw ntenn ntempl ntains nstig nsport nspect nsif nsic nsib
nsform nseq nsent nsect nrel nquis nquest nonpr nnih nnial nnab nkled nkies
nket njug nitr nishm nisc night nienc niacs nhum nhib nhanc nguin ngreg ngrav
ngent ngar nfull nfold nflict nfiltr nen need neck neas near ndur ndist ndish
ndiscr ndingl ndiest ndier ndict nderwr ndergr ndants ncrust ncour ncont nconc
ncom nchor ncher ncern ncap nbel naus napp muls muff mudd mroll mpur mpost
mply$ mpert mpart mpal moonl monk mocr mness mned mmigr mmar mits mild menstr
meddl mead mbo$ mbit mbezzl mbass mainl lyg lvet lubl lub ltin ltan lry$ lpit
lphab lots lops look lodg loath lner lment llo$ llish llip lliat lleng llan
llad lker lights lief lgar lgam lemm leer learn leaf lead ldings lcoh laud
lark lap lants lank landl lack know kly$ kitch kingl kib kest juv jub joyr jok
jock jazz izzi ismi isks$ isk$ isclo ischie irtie irths$ irme irli ircui
ionshi iodi insa inque inkli inka inhi ingli influe infli incte impea impai
imou ilke ika ija ignme iggie iffu iffle iffie ienti ienne iena ieing$ idy
idna iddie husb hurr huff hosp holds hog hobb hill hij hick het guit guess
groov green gobbl gnant glutt glitt glig gig ghtness ghtens ghostwr gglers
ggler ggin geol geogr gentr gee$ gath garb gap gand gals gain gac gabl ga$ fur
fun fuls frizz freak fraud fox fount forms fly$ fluk fluff flopp flirt flick
flesh fleec flatf fizz fits firm ffrag ffens fens farm fair euri eupho ethro
estrai estio estie estfu essma esh$ esee erpro erpi erhoo erfa ercla epts$
eproa eploy eore enve entou entfu enstrua enno enna engra endea emia eivi eiva
eit$ eists$ eist$ eism$ einte egli egle efte eft$ efra effu effa eevi eerie
eenths$ eeki eedy eedu eeble ecrui eckli ebrea ebou eaway eatu eata eassu
earse ealo eagle eafe eady dwarf dump ducts dron droll drizzl draw dop doct
dless djud diousl diol diab dher dgers deep deaf dead dash das dark da$ cyn
cush cuddl ctics ctest croup crook crick crast crapp cran crackl crabb cour
copp coord confr coat coast coag co$ clov cloud ckon ckly$ ckbit cient chy$
chumm chnol chiv chir chim child chiest chier cheap chart cerb ccord ccol ccat
carv carj candl bys bun bullf bulk bulg bubbl bristl brisk bright brew brac
bow born bootl bond boil bmers blotch blink blind blesh blat blast bitch bia$
bert begg beds bduct bapt backw backst backs backl bac babbl aut$ auste aulti
aught$ aude auche atua atto atee asty astle assy asphy asm$ aski artoo arto
arso arla arja arga arfi arco apie anty answe ansio anny annua anli aniu ango
anga andso ambli aloo alm$ allou alge aiti aithfu aisa ainme ainie ailme aila
agma agea afa aera adre addre acs$ acquai accre ^wou ^woe ^urg ^ump ^ugl ^toe
^tie ^thy ^throa ^three ^thie ^stau ^spru ^sprea ^slea ^scho ^roya ^rio ^rheu
^rau ^phra ^outgr ^outcr ^orb ^oil ^oft ^obtr ^ny ^inl ^ink ^feu ^exch ^euph
^etch ^enh ^embl ^ell ^east ^earl ^dwe ^chie ^cea ^alc ^addr zill zil zeal
ywei yway ythi ysts$ yst$ yse yre ypto ypti yphi ynthe yms$ ymbo ym$ yja ygie
ybo xyg xult xtrav xtrap xtrad xtens xtend xtap xpropr xpost xport xplos xpir
xpens xil xies xiat xhum xhal xempt xcor xcomm xclus xcis xagg wrestl wreck
wrangl works witn wip wint windb wimp wigw whittl whistl whisk whinn wfull wes
weekl weav wcas watch washb wart warbl walls waiv wail wack voyag vouch voll
vog vils vets vesdr veh valv uzze uxta uttie utme ustria ustme usk$ ushwha
ushio urti urt$ urrea urpe urpa urmu urmou urls$ urlie url$ urio urgle urca
urbo upt$ uoro unsee unri unque unme unkno unga unds$ undle unblo unbea umscri
umple umci umbre umba ulie ulfi uiri uintu uins$ uin$ uilde uid$ uicke uici
ufle uffo uffli ufa uests$ ueri uely udia udgie uddli ucu uctua ucta ucia uch$
ubtle ubsti ubdi uatte uake uade tzes tyl turtl turm turg ttress ttos ttler
ttlem ttings ttil ttens ttag tta$ truckl troup trott trons triumph trip triot
trin trimm trifl trif trics tress tresp traff tourn tott toss torr torb tool
toll tog toddl titt tipt tippl tinn timb tibl throat thrift thought thinn
thimbl thiev thfull thern teur tents teeth tchhik tchet tanc tails tackl syr
synth switch swish swirl swindl swell swamp swall sust surpr surpl sunn suckl
subtl subh stward stutt stump stumbl strust streak stpon stort stors stok stod
stocr stment stings stigm stess steps staunch starv starl stapl stamm stalk
stagn stagg staff sta$ sstat sson ssol ssmen ssman ssid ssful sset ssers srupt
sresp srepr squot squirm squir sques squeez squeak spruc spron spring spor
spoonf spook splic splatt spiff spellb spects speck spatt spatch sparkl span
soup sopp snowm snow snork snor snobb smuggl smell slov sloc sling slight
slick slic sleet skyj skyd skipp skinn skiest siz sits sight sia$ shrink
shrill should shopl shock shipwr shinn shill sheph shen sheath shagg sgrac
sfig sfact sext seng sels seiz seg seab scurr scroung scribbl screw scrambl
scorch sconstr scing scin scient sces sced sce$ scav scarc scand scamp scalc
sburs sax savv sash sard santhr sang sandp sandbl sampl samb sailb sail sadd
rym rwork rweight rways rway$ rvesc rval rustl ruin rugg rtor rtier rticl
rtial rtest rtedl rtar rsor rsiv rsist rsimpl rsign rsid rsens rsar rrug rriv
rris rriest rrend rrels rreg rrass rrag rprod rprint rpow rport rpol rplex
rpetb rpers rpat rox rowd rov roughn roist roid robb rnet rnabl rmy$ rmut rmor
rmist rmingl rmarr rmam rlet rken risms rios riol rifl ridg riabl rhin rgon
rgo$ rfum rfor reth reit reint reinf reinc rein reimb reav reaucr rearr reappr
rean readj rdos rdness rdiogr rdiest rcycl rcumscr rcumn rcumc rconn rcom rcin
rchiv rchit rcher rcef rback raz rass rasp rapp rank ranch rais racl quirk
quil quibbl questr qued quav quac pup punt pud pstick prostr proscr probl
prepp preocc prel prav pranc pragm pproach pprent pplying pply$ pplies pplied
pplem ppin pperc pound posh porn popl poet poach pnot plut plush plur plung
plund plum plott plor pleg playb pizz pitch piq pilf pik pierc picn perst
perpl pees pee$ peddl pearl peal pboard paus partn partm pann panh oyri oxie
owwo owto owo ownie owca outsi outse outhwe outgro outfi outba ousti ourney
ourne ourie ounda oughi ouette ouchie oubte otru othou otchi ostwri ostpo osso
ospho osme osio oscri orthea orthe orpha ornie ormou ormo ordia orcy oppy oppa
opou opme ople opia opha oothi ooste oore oopie oonfu oolma oodcu onstrue
onstrai onpro onna onkey onglo ongho onfa ompro omp$ olley olk$ ogni ogly oggy
oggo oggli oeve oeho odiou ocy ocrea ockpi ocio ochi occlu occi obso obsce
obno obia obdu oaxe oathe oasts$ oast$ oans$ oane oan$ oali oadsi oachi nza$
nvolv nvass nvad nutm nund nufl nuf nues nue$ nucl ntus ntupl ntrud ntreat
ntrat ntrans ntings ntiest nthron nthol nthet nterj nterch ntemp nte$ ntas
ntan nswer nsus nsurg nsumm nstead nstat nspos nsmut nslit nsin nsfus nsfer
nsecr nsec nsan nres nprof northw noff nnet nnels nkind njoin nix nitp nisms
niousl nied nicl nibbl niat ngthen nglom ngit ngibl nghous nfront nfring
nfluenc nflect nfisc nfam nestl nentl neb neal ndscap ndress ndow ndos ndom
ndless ndir ndign ndies ndes nderw nderm nderh ndas ndal ncur ncrim ncred
ncount ncorp ncor ncontr ncompr ncod nclos ncit ncheon ncest ncens ncarc ncaps
nberr nbath naught naiv nagl mutt musc murk mumbl muffl muckr muck mpus mpted
mprom mpound mplim mow morrh moph mop moos mood mooch monthl mog moan mnif
mnav mmox mmons mmis mmiest mmier mmet mmabl mix misq mint mingl mik might
meet mec mcis mburs mbroid mbly$ mberl mbell mbast mav matz match mask mash
maids mack lym ltras ltit ltic lse$ lsat lries lping lped lowl loss lopm loop
loon loit logg llos llor lleys lley$ lldoz llars lky$ lkiest lkier lkers lkal
lisms lir lins lil lighth liais lfish lfer lents legg ledg ldren lays lay$ law
laundr laund launch larm lanthr landsc lanch ladd knuckl kiss kink kind kill
keyb kettl kend keb kar kal kab ka$ juxt jul juggl jigs jigg jayw jac izza
ixtu ivu iviou itpi ithdra itchhi issua issta issie isquo isqua ispie ispi
isme isle isgu isgra isfa isdai iscree isclai isbu irke irche ipsti iplie
iothe iote inste inscri inny inklie inia inhu ingua ingu inghou infri indlie
indli inctu inctio incti inct$ incri increa imne imbi illia ilfe ildre ildi
igwa iguou igui igue igree ighway ightne ightie ighli iggy igglie iggli igga
ifa iew$ iente ieni iefi idia iddli icuou icni ickli ickey icka icca ibbo ibbi
iar$ iants$ hyg hustl hurdl hung hug howd how hoot hoax hiv hitchh hing hind
hik hier herr help heft hed heckl health haw harv hardh haggl guzzl gum guilt
guerr guer grumbl grubb gross grin grain gout goug gos gorg goos goof gods
godl goat gnarl gnanc gloom glid gles gle$ glaz gladd ginn gien gics ghtiest
ghtier ghtest ghen ggress ggreg ggrav ggon ggly$ gglut gglom ggliest gglier
ggies ggard geom gav gastr gantl gambl gains furc funk fumbl fulm ftiest ftier
ftest frustr froth friendl fric freez frank fragm foam flutt flut flush fluct
float flight fleet ffoc fferv ffabl feud fenc fen feg feel feed fant falt fail
ezzle eymoo eyboa extru expro expi exhu exho exha ewie eute euse ettee etoo
estro estria estre estly estea essfu eshoo esea esdro ertly ersu ersti ershoo
ersha erple ernu ernme erlu erlea erlai erje ergro erfei erei erds$ erdra erda
erd$ ercoa erai epra eplo eota eopa eoni eome enue entry entrea enthro enea
endou encru encroa enclo empt$ empli empi emoi emea embryo embli elvi elts$
elti eloa eisti eismo eins$ eing$ einfo eine einca eimbu eigne eigh$ ehy egua
egoi egme efti efau eezi eethi eethe eepwa eeloa eela eech$ eeca edy ectfu
ecri ecle eciou ebau eawa eaucra eatie eaths$ eashe early earlie eapprai
eappea eande eado eadju eada eactio dwif dvanc dut duk dropl droop drink drift
dribbl dredg drear drap drain doz dowd dour dopt doodl dont dogf does dodg
dmitt dliest dlier djur ditt dits disf disd dios dioc dinn dient diam deol
dendr dels dej deic dedl deck debr ddict ddest dcast days daydr dawdl dant
dang dams dais daint dac dabbl cyt cun culm cuck cuat ctuall cryst crus crumb
cruis crosc cropp croch cresc creak cre$ crats crash cram cquir cquiesc
cquaint coz coupl counc copt coph congl coil coiff coerc coach clus clown
clinch clerg cler clapp clamm clamb ckups ckup ckslid ckrak ckler ckings ckey$
ckest ckens citr cinch cig ciar cials ciabl choos cho$ chapl chall chalk ccurr
ccomm casts castr castl cans camb cadg bushwh burd burb bungl bugl bug buckl
bstit bsent bscrib bscen bruis brak bragg brack bowl both borr books bonn bobb
blush bluff bloom bliogr blets blet bleach blackm bjur biv bitz bisc birds bip
bidd bev besm bers beq bels beef bead bdur bdiv bdic bcontr bbrev bbish bbies
baz bauch bart bann bank bamb balk badg backstr backh backf bacc ayma aydrea
aybe awns$ awnie awne auts$ auspi ausea auru aurea augme aud$ auctio atzo
attli atoo atiou athy ateu atchwo atchma atchie atca asua astri ascri arvi
artle artie arrai arps$ arpa arp$ arlo ardhea arcoa arbu arau aptio aptai
approa aphie anza ansmu ansli ansfu anlie ankru andsca andpi andli andfa andee
andca andbla anca amro ampoo ampo ampio ampai amboo ambo allyi allpa alloo
ald$ akie aje airway airbru ainsto ainly ainee ailo ailboa aic$ agoo agme
aggra agglu agglo agga afi affro aff$ adroi adle adjou adiu addo addie acua
acty acquie acque ackwa acksli ackpa ackmai ackda achu acho accha absu abstai
abru abro abne abju abho abdi abby abbre abba ^zea ^yuck ^yok ^wie ^voya ^vee
^vai ^ush ^upbr ^up ^unsh ^unkn ^ungr ^umb ^tree ^thei ^spou ^spie ^sphe ^shie
^scrou ^scie ^schle ^roe ^poa ^pli ^own ^ott ^orph ^offs ^occl ^obd ^mayo
^liai ^lei ^laye ^knu ^knea ^kne ^inp ^grue ^grai ^gloo ^gloa ^frei ^flue
^floa ^flau ^flai ^estr ^enn ^enm ^eld ^egr ^ech ^earthw ^earm ^dye ^due ^crui
^creo ^cloa ^choo ^choi ^broi ^az ^aust ^augm ^auct ^ascr ^ars ^archd ^arc
^ankl ^alm ^alk ^ald ^airw ^airm ^affr ^abh ^abbr ^abb zens zef ywri ys$ yoke
ynta yndi ynco ympto yly ylu yls$ yl$ yeste yers$ ydrau ychi yche ybe xyl
xtirp xtinct xter xten xpon xpiat xpatr xion xin xig xif xhort xhil xert xers
xecr xchang xcell xasp xag xac wrong wreath wrapp wound wors words wooll woof
woodw woods wolf wok wny$ wniest wnier wner wly$ wlers wler wins wif whoop
whirl whil well weld weights weightl week wealth we$ wbon watt warp warl wann
wan wals wainsc wabl vort vood voic vism viar verwr verwh verthr versp vercr
ventf vault vad uthfu usy ustwo ustai usque usks$ uski uscu uscle urvie ursue
urst$ ursio urrou urra uree urbe upts$ uppu uppre uppi upda untru unsto unstea
unso unsi unro unplu unpi unlo unky unhea ungra ungo uncle uncla unchie umve
ulptu ullio ulkie ulke uki uiva uitfu uini uilt$ uili uids$ uidi uibble ugu
ugna ugle uggie uffy uffu uesse uene uency ueeze udsli uco ubie uato uable tym
twork twist twirl twin twigg tweet tuall ttom ttanc ttain trunc truck truc
troc trid tread trawl traps transsh transpl tpick toz tout tong tomb toes toad
tnot tmosph tled tips tint tickl tib thwest thriv thrill thon thom thmic thlet
thless thier thfuln theol thead thankf than tet tentl teet tchmak tchen tballs
tarp tad synt synd sympt syc syb swift swelt swath suppr sunr sunl suct substr
suav sual stwrit stworth stut sturd stult stroll strip strength stream stract
stments stlers stler stink stifl steep stealth startl stark stack ssett ssem
ssect ssant ssad ssabl squint squeal spy$ spum spread sposs sportsc spleas
spinn sphor spasm sparr sow sov sour sorc somn sogg sock snuff snott snoot
smooth smil smemb smelt smal smack slumb slow slop slith slink slin slim sleek
skitt skill sink sincl simp signs sift sicc shwhack showb shodd shirk ships
shipb sheer shear shatt shant shabb sgust sguis sguid sfranch serp senfr send
sench selv segm seeth sebr sdir sdiagn scuttl scus scumm scret scred scrapp
scours scoot scomp scler sclaim schiz scert scern scad sbel sappl saff rvings
rvest rump rumbl rtoon rton rtles rtled rtle$ rtist rtim rtiest rthing rthin
rther rtheast rtex rtend rtal rtains rsuas rsom rsight rsibl rsev rset rsest
rsect rscor rsals rsak rrod rrings rriers rribl rret rreal rreact rrad rquis
rpop rphos rphan rpen root roost romp roids roast roam rnum rnov rniv rneys
rney$ rnest rne$ rmount rmall rloin rlock rlic rliam rlat rky$ rkiest rkier
rkel rkabl rject riz riors riff rhythm rhood rhet rhaps rgit rgies rgeon rgen
rfish rex rewr reus rets restf reot rels reinst reins rees reef ree$ redn reck
reap ream reach reab rdict rdest rdash rdant rcuss rcumv rcuit rciss rchies
rchief rchers rceiv rcas rcad rbor rboards rbidd rbear raud rath rash rasc
rald raid raft raff racts quor quizz quip quilt quentl quel quef queer queas
pwalk purpl pugn pug pub ptors ptor ptogr ptness ptly$ ptit ptabl prowl proofr
proach priz priss preshr pren preempt precl prearr ppur ppointm ppell powd pov
potb ports poch plump plumb plodd plies pless plent plead play$ plann plaint
piv pinn pimpl pimp piers phrod phet pherd phem phar pew pess perw perl peopl
peg peer peep pdat parq park pants pans pamp oyfu oyage owy owpo owme owhea
owdo owdi ovie outri outhwa outdi outcro ourteou ourni ournfu oundle oulti
oul$ otty othea otbe ostbi ossa oshi ortsca ortrai ortle orthi orth$ ortcu
orry orra orpu orpe orno orka orfei oquia ophie oozie oozi oosi oondo oofrea
ooch$ onste onscri onscie only onjoi onje onio oncre oncea omu ompte omps$
omfi omai oisi oirs$ oir$ oici ogu ogro ogna ogfi oft$ oexi oers$ oer$ odra
odiu odgi ocla ocee obtu obfu obbyi obbli oars$ oar$ oals$ oal$ nzas nyms
nying nxes nwash nvol nvoic nviol nvig nvict nvex nvar ntum ntrig ntrar ntour
ntors ntments ntir ntiousl nthes nterst ntercl ntempt ntedl nta$ nsvers nsuff
nstruat nstip nstant nstall nstabl nspic nspar nsort nsors nsolv nsmigr nslav
nsfig nsett nsers nsem nscious nsact nroll nread nox now nour nors noph noid
noct nocl nnobl nnings nnil nnials nnets nnest nne$ nnat nnas nmak nliv nkly$
njust niums nimbl nill nigr nightcl niggl nib nials nhapp nhand ngress ngren
ngov ngenc ngal nfur nflamm nfett nfest nferr nexc newsc nettl nesc nell necr
nebr ndstand ndsom ndrom ndoctr ndliest ndlier ndex ndern ndep nden ndbreak
ndblast ncycl nculc nctuat nctif ncret ncrem ncop nconsc ncoct ncircl ncipl
ncestr nceph ncel ncef ncas nbound naw nappr nann nalt nak nadv munch mums mum
mulg mudsl mpting mprec mpow mpov mpot mposs mphom mperf mpend mpell mpeach
mpaign mournf mourn mold mnest mmol mmin mmif mmas mma$ mless misb minc mesc
menc membr meat mbust mbrat mberm mbec mbard mbalm marm marc mantl maltr mals
mains lyth lyps lynch lymph lupt lung lun lull lues lue$ luct lthy$ lthiest
lthier ltat lptur loyal lousn lott loot longs longh loll lodr loan loaf loads
lmon lman llul llops lloon llog llingl llid llibl llev llaps llac lix lior
lions lingl lih lift liers lias lians lian liabl lgenc lge$ lert lech leath
leafl ldness ldly$ lcon lcom lches lcat laying lard lands lamp knott kness
knead klutz kitt kit kish kenn keen kas jun joyf jol joic jamb jail jagg
jackkn izzlie izi ivvie itty itly itcha isty istrea istfu issue isplea isjoi
isfra isfie isea isdia iru irti irpa irm$ irle irks$ irk$ irgi irde ipse ippli
ippa iots$ iot$ iosi iori ioma ioche inui iniu ingy infra indsi imsie imsi
impse imbo imba ilti ilme illai ilkie ilk$ ildca iing$ ihoo igme ighe if$
iews$ iewi ievou ietie iete ieta ieri iennia iends$ iendlie iend$ idu idgi
ideou ich$ iat$ iase iantly iale iagra hymn hurl hunt hunk hull howl hot hort
hoods hock hob hoars hoard hint highb hew hends helm height heath heartl
heartbr hear heal haul haught handm halv had hacks hack gyn gurg gunf gumm
guff grump gruff groom grom grogg grizzl gritt grip grill grett grees gree$
grants graft goon golf godd goal gnment gnets gners gner gments gma$ gling
glimm gled gleam glar gird gimm gidd gid ghtin ghtful ghlight ghest gher ggish
gens gaunt gaud gauch gass garc gangr ganc gaff gabb fuck fruitf fruct frostb
frol frill friend freight freeh frat forsw forn forf foots fondl fom foil
flunk floodl fling flims flies flamb find filtr filth filt fiest fierc fidg
ffor fflict ffins ffid ffet ffend ffen ffein ffal ffac few fev fetch ferv
feebl fearf falc faint fain fad eyma extro ext$ explai expia exio excha excee
ewsca eviou evea euti eums$ euma etwo ethou esy estia esqui espou espai escue
erwhe erstu erste ersee ersco erplay erke erk$ ergo erflo erdre ercei erbia
erb$ eptua epsi eppie eove eouts$ eout$ eoso eos$ eori eole enthe ensla enri
enoi enme eniu enchme emy empte employe employ emna embly elme ellbi elco elbo
eke eits$ einsta eign$ eightie eighs$ eighi eighe eice egui egru egga efts$
efla eesta eerfu eepy eems$ eem$ eekly eeha edre ectne ecce ebrie ebria ebrai
eaus$ eathi easy earni earma eard$ eappoi eappo eant$ eanse eamli eally eallo
eaky eafle eaf$ eadba eabou eabi dyb dwell dvoc dup dunn dun dumbr dumb dud
dub drudg dropp drop drog draul draft doctr dnapp dmouth dmiss dlocks djustm
disj disfr diot dimm did dians diagr dgets dgeon dethr deif dedn deathl ddings
ddiest ddier ddied daub darn dals daff dab cyb cutl cusp cuous cunn cund ctrif
cruit crotch crossw croon crof cring crif cowp coq cops coon conscr concr coin
coex clums clips clipp clink climb clic clev ckpack cknowl ckmail cklers ckeys
cings cientl cic cibl chunk christ chop chment cheerl cheerf cheat cham chal
chains chaff ceps ceous ceh ccust ccred ccentr cats catn cars capr cank caj
caff byl bvers buts bustl bung bumm budd btus btrus btract btain bsurd bstrus
bstin bstetr bstain brusq brush brood broil broads brill brib briat bred brawn
brawl brad boys bount bott bos boost boos boond boob bong bob boast bnorm bneg
bmitt bmerg blust blurr blunt blit blinds blend blen bleed bland bjug biot
bioch bias bfusc bestr besp begr beep beck bean bborn bbit bawd bars barn balm
baffl badm bad backsp ayone axio awkwa awke awdle awbo avu autiou auntle
aunchi aults$ ault$ aughtie aughti auds$ audio atue atmo athle asu astou assme
assau asms$ asmi asie ashbu arrio armle armfu arlia arkle argai arfe ardne
archy archie arboa apte applau aphro antee ansshi anspla anspi ansie angre
andwi andlo anctio amy alts$ altrui altrea alto alry alpe aloe alds$ aldi aja
airwo airme ainwa aintie ainsco ainli aidi agno agle afo affs$ affei affa
aesthe advo adri adou acra ackno ackkni ackie acha abstru abje ^yog ^yard ^xy
^xe ^whea ^voo ^urb ^upd ^unwr ^unfl ^unct ^unbl ^try ^true ^trua ^sue ^sua
^stou ^stoi ^sple ^slui ^shy ^rue ^rhe ^quea ^playa ^plau ^ow ^outpl ^outc
^outbr ^oust ^oss ^odd ^obf ^nei ^lio ^lia ^leo ^lai ^ku ^klu ^jea ^iss ^inbr
^ib ^hoe ^frai ^flie ^esch ^ensl ^enfr ^emph ^elb ^eer ^ecc ^earthl ^eag ^duo
^drie ^doe ^deu ^chri ^bie ^awkw ^awf ^ausp ^atm ^athl ^ath ^ask ^arthr ^ard
^apt ^apr ^answ ^and ^amph ^amm ^altr ^ail ^aid ^aesth zzic zvous zur zool
zons zigz zeb yxe yx$ yto ythei ythe ynchro ymi ygra yga yfoo yfi yes$ yda
ycla ychia ybu yarda yamme xus xud xuals xtrud xtrov xtemp xpung xplod xplain
xpat xpand xot xoph xists xis xiom xibl xerc xculp xclud xcess xceed xalt
wsing wses wsed wscast wrongd writh wring wrench wpok worldl worksh workm
workh workb wooz woodp wolv woef wnsiz wnscal wngrad wners wmob wmen wman
wking wked wiz withst withh wisp wingl windm windbr winch winc wimpl wildc
wigg wig whor whopp whoosh whith whisp whimp wheelb wheedl whamm wful werb
welsh welc weigh wees wee$ wedg wedd weap weakl wdy$ wbreak wbox wboat wberr
wball wayl wayf waxw waw wattl watchm wass warh wardr wangl waltz wallp walks
waistc waggl waffl waddl vvies vur vuln vouchs vorc vom volc vitr vists vip
viousl vious vingl vign viest vier vib versl verpl verns vernm verk verj verfl
verch vents vell vect vatt varn vans vanq vann vam uzzli uzzie uzzi ux$ uvu
uttre utt$ uth$ utch$ usua ustriou ustproo ustee ussa usie ushroo urveyo urvey
uru urtu urts$ urta ursts$ urryi urplu urple urpi urou urnu urnou urno urmoi
urmi urlou urloi urgla urfei urfboa urf$ urdi urcea urbs$ urb$ upy upsu
upstrea uproo uproa upri uprai uppla upgra upchu upbrai uoti unzi unwra unwie
unvei untwi untee unsta unspo unshea unscre unscra unquo unpo unmi unlu unloo
unloa unju unhoo ungs$ ungeo ungai unfro unfrie unfe uncu uncoi unclo unbi
umptuou umpki umnia umlo umbu umbo umbly umbfou uly ulso ulpi ulo ullshi ullo
ullba ulks$ ulk$ ulia ulgi ulae uive uitio uitca uists$ uisti uist$ uisha
uillo uila uietu uiete uglie uggli uffia uessti ueso uerreo uere uepri uente
uendo uele uebe uddy ucky uckste ucksa ucko uche uccee ucca ubti ubsu ubsoi
ubriou ubpoe ublea ubjoi ubhea ubblie uasi uashe uarde uapla uande uainte
uagmi tzing tzed typh twof twitch twinkl twing twiddl twent tweed twear twaddl
tvot tux tussl tush tunn tums tuals ttun ttor ttonh ttock ttlers ttlen ttlef
ttlec ttish ttir tterl tterfl ttas ttach tstretch tsourc tsid trustw truss
trus trundl trudg trud trounc tromb trol trof troch trix tris tril trik trickl
trembl trell trebl treadl treacl tranq trampl tramm trait traips tracts trach
tperf torsh torq torch toppl topl toothp toolb tonn toggl toed tnumb tments
tliv tling tlin tlight tles tler tlegg tle$ tjack tittl titl tith tiss tipp
tionsh tiol tinkl tink tinct tients throttl thos thorn thnol thlons thlon
thinks thin thiest thest thesp thesl therw therpr therh therc thenc then
theists theism thdraw tfuln tfox tfitt tfish teth tesm terpr terf terc templ
tec teasp tearg tdist tclass tcak tbal tbagg tbacks tback tawn tawdr tav tastr
tass task tarn tarm targ tapr tapp tantl tangl tailg tabb syph synchr sying
swom switchb swing swerv sweetbr sweatsh swear swash swaddl sut surt surl
surfb surch surc sunt suns sulf suds subp subb suall stym stum stucc stubbl
strump struggl strol striv stript stripp strept streaml straitj straf straddl
stov stound stos stop stoic stodg stockp sto$ stmast stitch stippl stions
stillb sticks stickl stfuln sterns sterm steop stentl stench stems steer stdat
stbit staur stast stash stars stard stants stalg stairw stad stab ssyf sswom
ssuag ssuad sstim sspell ssom ssolv ssment ssings ssev ssessm ssens sseng
ssenc ssen ssel sscross ssault ssants ssanc ssacr srul srob srep sreg squit
squelch squeeg squand squabbl sputt spur sprov spritz sprightl spot sportsm
splutt splurg spitt spis spill spel speech spectf speckl spearh spann spank
spangl spair sown sous soundpr sors soot sonn soj sog soch sobl soapb soak so$
snuggl snugg snuffl snowpl snowdr snooz snom snitch snift sniffl snid snick
sneez sneer snatch snar snaffl smugg smoth smooch smith smatch smash smantl
slush sluic sludg slosh slodg slobb sliv slimm slik sles slend sled sle$ slath
slash slant slang slamm slal slak skyr skivv skirt skirm sker sjudg sjoint
sizzl siss siol sinn sinh simm silh signp sights siev sied sidl sickn shut
shush shudd shroom shrew shredd showc shout shotg shorts shortch shopr shopp
shon shoeh shitt shirr shipp shipm shions shings shelv shelt shells sheep
sheart shangh shandl shamp shambl shackl sgruntl sgov sgorg sfir setr serm
sents seep seeml seem seek seash seaf sdropp scyth scurf scupp scuffl scub
scrupl scrunch scruff scrimsh scrimm screet screen scrabbl scourt scourg
scotch scord scond scomm scomb scold sco$ scienc schuss schoolb schmooz
schmaltz schlock scharg schanc scarl scann scald sbeh sawd saunt saun sas sarm
sandw salm saintl sack sach sa$ ryng rwhelm rweights rwards rvil rver rvedl
rvad rustpr russ rush rusc rup rums rumpl rumb ruffl rubbl rtwin rtscast
rtridg rtress rtrait rtois rtments rtig rtied rtibl rthquak rthed rterb rtent
rtchang rtaint rsuppl rsuad rstyl rstud rstit rsting rstepp rspic rspers rsons
rsiz rshoot rshad rsewh rsesh rsell rryc rrup rround rros rrors rringb rrhag
rrestr rrer rreot rrents rren rreach rpric rpois rpiec rphic rphem rounds
roughh roug rouett rost rors ropr roon rof roadw roadbl rnogr rnmost rnly$
rnings rnfull rnets rnel rneck rnacl rnac rmos rmly$ rmitt rmists rmish rmiest
rmier rmhol rmet rmeat rmatt rmand rmanc rmad rlud rlift rlets rlesq rleav
rlac rkin rkat risc rippl ripp riph rio$ rimp riffl rieg riddl ridd rich ribb
rhym rhomb rhang rhand rgum rgoes rgett rgath rgar rgain rflow rfic rfar rext
reuph retch resch rept rephr reop reol renn reiss reimp reff reex reest reent
reenl reem redc rects reaw reappl reaff readm rdur rdling rdlin rdit rdigr
rdhous rdess rcraft rcles rcle$ rclassm rcif rcial rchbish rchand rcess rcel
rceas rcast rburd rbook rbons rbol rbling rbles rbled rble$ rbin rbik rberr
rbell rbec razz rays ray$ raunch ratch rarch rapt raphr rankl ramr raj railr
ragr ragl raffl quy$ quipp quing quet quench quash quas quapl quak quagm quack
pyx pyc pump pum puk puddl ptil ptest pteas psurg pstag psiz pscotch prun
prosth pror proot prog proff proct primr priev prid prest presch preord preex
preambl prattl prat ppris ppil ppets ppes ppert ppen ppeal ppag ppabl poww
pout pounc poult pouch potsh pots potp postl postc portl porp porc pooch polk
poes pockm poc plying plumm plight plied pledg pleb platf plasm plain pix pitt
pitchf piss pisc pis pipp pion pinwh pinpr pinp pinh pinch pinc pim pilgr pigl
pif piddl pics pickl pia$ phyt phthalm phren phel pgrad pestl peon penth pends
pelv pels peek peck pecc peas pbox pboards paw paths parch parb pamphl paltr
painf padl paddl pad ozoa ozi oyer$ oyco oyance oyali oyable oxtro oxswai
owzie owth$ owroo owplo owpa ownsi ownsca ownloa owngra owmo owma owly owla
owgi owbo owbea outwi outwei outvo outstri outstre outsta outsou outsma outrea
outpu outpe outnu outloo outhea outfo outfla outfie outcla ouste ourtee
oundproo ouls$ oughts$ oughhou ouga oufla ouds$ oudi ouchsa otsho otli othie
othei othba otha otgu otchie ostru ostri osto ostlie osthe osteo ostda ossche
ossba ospo osai ortsma ortre ortoi ortme orthwe ortcha orrha orpoi orphe ornro
ormie orlo orkscre orio orgo ordo ordai orchi orch$ orca orbea oray orai opyi
opse opsco opro opri ophtha oove ootno ooths$ ootba oorste oomie oolo oolha
ookwo ookou ooge oody oodthi oodoo onze ontou ontoo ontie onqui onplu onpa
onnie onke oniu onho ongrui ongo ongea onfou onfli oncla oncho onba onai ompri
ompla omou omia omeo ollea olfe oleu olee oldfi oldbri olca oisti oire oiffu
ogtro ogia ogga ogey ofou oeti oence oefu odlie odeo octu ocoo oclai ockya
ocky ockou ockma ockle occo oc$ obvia obsle obnai obey oautho oasti oari oapbo
oani oamie oami oame oaks$ oaki oake oak$ oadblo nymph nworth nwis nwield nvoc
nveigl nuzzl nurt nuousl nuk nuit nudg ntwin ntryw ntrym ntrus ntress ntrench
ntranc ntral ntling ntless ntles ntlem ntled ntle$ ntith ntiousn ntingl ntih
ntig ntics ntib ntiar nthous ntertw ntersp nterbr nterb ntents ntenc ntel nteg
ntanc ntail nsurr nsupp nsubst nstranc nstorm nster nsplant nson nsnar nsil
nsightl nshrin nsheath nsfix nsettl nset nsest nsep nseeml nseas nscrambl
nscot nsconc nscient nsaddl nsabl nry$ nrul nrich nreas nrapt nrag nquot nquil
npoint notch nostr nost nosh noon noodl nonv nonpl nond noids noes nob nnying
nnuend nnuat nnow nnos nnonb nnin nnied nnib nnes nnerv nnects nnair nnacl
nmesh nments nluck nloos nleash nlatch nlac nknow nkness nkings nkin nkabl
nippl nins nik nih nigm nightsh nid nickn nias niall nhors nhitch nhing nheads
nhead ngual ngruit ngross ngos ngorg ngness nglers ngler ngings ngerpr ngend
ngeal ngeabl ngbon ngainl nfut nfriendl nfreq nfound nforg nflex nflam nfeebl
nfast nfantr nfant nfall neyc neyb nexpl newb neut netw neth nerd neon nels
neckl nearl ndying ndwich ndslid ndsid ndry$ ndpip ndpap ndows ndown ndoggl
ndlessl ndlers ndler ndis ndin ndil ndfath ndezv ndescr nderv ndersc nderr
nderpl nderd nderch nderbr ndeer ndear ndbagg ndang ncurs nculp ncroach ncov
ncoupl ncord ncongr nconf ncond nclav ncier nchy$ nchron nchol nchoat nchiest
nchier nchers nchar ncann ncak nbutt nburd nbuckl nbos nblock nball nass nasc
nail nactm nacl nach mvent muzzl mushr muscl murm mulch mulc muddl mstitch
mstanc mson mscrib mros mptiv mptions mprob mpract mplor mplod mplish mplaus
mplant mpin mphat mpets mperm mpecc mpair mpag mouth mouss moufl mottl motl
mothb mosq morr morn mord mopp moot moor moonsh mont momm molt mois mning
mniat mnat mmur mmort mmor mmock mmingl mmerh mlet mizz misph misn misj mirr
mipr milkm midg mfit mewh mesh merm mep meogr mek mef medl measl meand mbush
mbrell mbranc mbrag mbrac mbow mboss mboozl mbon mbold mbob mblings mblaz
mblanc mbitt mbib mbarg mbal mayfl maund matchb marbl mapp maph mangl mandr
mamb mam malt mainstr madr lyst lypt lygr lwom lvers lvan lush lurch lul luing
lued ltry$ ltil lsters lsions lsing lses lsed lpabl lox loung loud losh loph
lol lobst loam lments lmen lmat llyf llyd llyc llyb llyach llurg llur llston
llpap llous lliv llist llips llib lli$ llfight lletpr llenn lleag llbind llain
llae$ lkin lishl lipst lios lindr lind ligr lightn lienn lie$ lids liab
lhouett lhard lging lged lgebr lfunct lept lepr lemn leh leech lects leash
leapfr leach ldier lcin lcif lching lched lcan lbox lbas laws latr latch lantl
landsl landm landf lak lagg lae$ ladl lacq kowt kosh knowl knitt knight knif
kneec knav knack kipp kies keystr keyp keyn kerch kaz kang kag junct jumbl
jugg jounc jostl josh joggl jogg jobb jiv jinx jingl jing jimm jiggl jew jeer
jects jeal jawb jaund jangl jal jad jacks jackh izzy izzli ixtee iwee ivoua
ivie itzie itz$ itue itto ittee ithsta itee itchie itchfo isua istlie isste
isscro isro isrea isquie ispri isplay isphe isno ismou isli iskie isju ishtai
ishea ishbo isgru isdea iscui irtee irte irta irstie irro iroue irms$ irki
irea iquo iqueu ipwre ipto iptea ippo ipme ipla ipboa iotee iophi ionnai ionle
inxe inwhee invoi inuou intie inths$ inth$ inpu inpri inpoi innue inky inhea
ingsto ingso ingre ingrai ingoi inglie ingbo ingbi inga infie indsu indsto
indne indmi indly indfu indfo indbu indbrea inchoa inbou inai imso imsha imro
impra impou imply implie implau immie imly imle imfla imeo ilou illne illfu
illbo illbi ilks$ iliou ilhoue ilgri ildproo ihe igza igua igsa igri ignpo
ightlie ighthea ightclu ightai igh$ igee ifty iftee ifs$ ifli iesta iese iera
iene iels$ ieldi iel$ iega iduou idlo icli icku icko ickna ickly icklie ickba
icio ichi icea ibre ibo ibbli iats$ iathlo iape iano iabo hutch huss hush
hurtl hungr hunch humb hul huddl huckst hubb hound hord hopsc hopp hoor hoodw
holst hok hoed hobbl hoar hitch hiss hills highw hight hep henp henn henc
hemst heml heeh hedr heark headm headb haws haunt haunch hatt hassl has harn
harl handp handcr hamstr hamp hallm hairst hairp hairbr hailst hackn ha$ gyps
guts guss gurgl guous guill guest guesst guards guardr grung grues grown grow
grov grous grounds grop groc gristl gris gripp grind gridl griddl grid gric
gren greet greenh greem greeabl graz grappl grandst grandp grandm grandf
grandd gourm gott goss gond goldf goldbr going goggl god gobl gnments gnett
gnes gnash gnac gmir glov glott glitz glist glimps glanc glac girdl gions gins
gingl giml giar ghtless ghtfull ghostl ghness ghly$ ghhous ghastl ggyb ggriev
ggedn ggedl gett gerr geousl geous geonh geon geod geld geek geabl gauz gash
gars gargl garch garbl gaph gangl gamb gabbl futz fust fudg ftin ftil ftersh
fterb frump frizzl friv fritt fring friez freshm frenz freewh freest freckl
fread frazzl fragr foxtr foxh fowl fourt fost fortr forml forl footst footn
footl footh footb foolh folks flysp flux flust flurr flumm flound flounc flot
floss flood flint flinch flimfl fled flatb flashb flar flapp flagst fizzl fist
fisht finn filch fict fickl fibb ffur ffront fford ffick ffects fett feedb faz
fax farth farr farmh fann faithf facs ezvou eystro eypu eyno eyeli eyeba eyco
eyba expou exclai ewy ewhi ewee ewde ewai evai eutra eups$ euphe eup$ ettli
etrai etproo ethno ethni etchie eswo eswi estwa estuou estry estoo estme estee
estau esswo essu essie essee espoo espoi esmi esli esio eshu esha eschoo esau
erwea erwau ervo erty ertwi ertne erthro ersna ersau erpay ernmo ermai erloo
erloa erlay erks$ erkie erja erhau ergra erfro eree ercro ercoo erclai erbs$
erbru erboo equea eprie eplay ephra eoro eorde eophy eope eonho eona eode enza
enwa enui enty entwi entru entme enthou entai ensua ensna enshrou enshri ensco
enpe enny enni enlie enjoy enjoi engu engro engo enfee enee endro endri encry
encla emsti emptie empha emoa emmi emma emli embi elva elpfu elpe ellya ellio
ellho elds$ eldi eity einse eini eina eimpo eili eigns$ eightee eide egy egroo
egoa egla eggie egai efyi efy efrie efrau efrai efou efoo effo eezie eexi eexa
eewhee eeva eetie eesca eesa eerle eente eenli eemlie eema eelba eekie eeho
eefe eedo eedba eechi eeba eeable ectroe ecsta ecs$ ecoi ecoa eclea eckma ecau
ebui ebro ebri eboo ebee eavy eava eaty eatme eathle eastwa easts$ eassi
easpoo eary earthwo earths$ earthqua earthe earth$ earsi earns$ earn$ earms$
earme earm$ earle earke earhea earga eapfro eap$ eanu eants$ eano eanne eanlie
eamy ealthy ealthi ealthfu ealth$ eakne eakfa eaffi eadwi eadpa eadle eacha
eably dwindl dway$ dusk dund dumbf duch dthirst dryw drumm drugg drown drov
drom droit drench draggl downt downsc downpl downl downgr dowb dous dough
douch dorn dork doorst doorm dons dogtr dodd dod dmen dman dlight dlands djudg
dject divv dith ditch disch dips dipp diousn dink dimpl diddl dibbl dib diap
dhous dgy$ dgiest dgier dgep deuc desw desm derr deot deogr deod denz denh
delv deesc deej deadp deadb deact deac ddying dduc ddock ddlers ddler ddin
ddens daz dappl dapp dangl dandl daddl dad dach cutt cuss curts curdl cuousl
cumb ctyl cturn ctrosc ctropl ctron ctrom ctrol ctroenc ctress ctfull ctful
ctedl ctal ctab csim crush crumpl crouch crossch crossbr crist crisscr cris
crippl crin crims crev crep creos crann cradl cquitt cquis coxsw cowl cowh
cowb coust cous courtl couch costl cosp cort cornr corkscr conds combs cok
cockl coch coax coauth cnick clutt clutch clust clop cloist clockw clobb cliv
clench clem clect cleanl clav clatt clash clapb claims ckstrok ckster ckspac
cksaw ckpil ckouts ckout cknam ckmat cklist ckless ckleb ckknif ckhand ckhamm
ckfir cketf ckel ckdat ckax cits citl ciol cind cients churl church chuckl
chubb chsaf chrys chowd choth chot chortl choic chments chitch chirr chipp
chintz childpr chid chich chiatr chi$ chets chel checkm chauv chauff charbr
chagr chaf cest cesh ceed cec cco$ cclud cchan ccessf cced ccan ccal caw caust
cauc catc catb casc cartwh camps calv cackl cabb byw byp bviat buttr butl busb
burs burbl bunt bundl bunch bullsh bulb buk bugs bucks bucc btrud btitl bted
bsum bster bsolv bsist bscess brunch brun browb bronz bront bronch broc broach
brittl briq brin bridl brett breed breast breakf breach brar branch brais
brainw brainst bpoen boyc bov bould bouill botch booksh boog boo$ bolst boist
boggl bobsl bobbl boats bluepr bludg blubb blous blott bloss bloodth blockh
bliv blitz blith blist bliss bliq blindf blew blesp blench bleas blear blarn
blar blanch blad blackt blackl blackj blabb bish birthr birth birch biops binn
bigg bick bhorr beth berd bench belch befr beetl beel beav beastl beard beam
beak beagl beach bbyh bbly$ bbliest bblier bbet bbern bberg bayon bathr batch
barnst barm banq banj bang ballp backtr backd bach bacch azzli azzie azze azy
azoo ayfa ayday ayable axie awye awo awny awks$ awk$ awdu awbrea awai avvie
avio avie avea auvi aurs$ aureo aur$ aupe aundi aunchie aunch$ auna auls$ aul$
aughi auffeu audie aucu auci atui attu atna atla athlo athei atha atfoo atfo
atfi atchba atcha atboa astne astly assua assoo assma assle asseu aspha ashy
ashca ashboa ashba asco artwhee artu artri arshi arsha arsa arru array arpoo
arpo arnsto arns$ arn$ argu arf$ area ardy ardli archbi archai arbroi arboi
arao aqui apto apta apt$ aproo appu appri applie aplai aphra apboa anya antrie
antme antho anthei anspa anque anoe annea ankro ankne angy anguo angua angio
anee andwa andsli andou andmai andho andcu andcra anch$ anbe ampie amphle amoo
amie alvi alui alty altie aloi almie alma allma alkie algi alfu albu alba alaa
akeu akeou akeo aitja aith$ aistcoa aissa airsty airne airmai airma airdro
ainy ainstrea ainsa ainma ainle ailsto ailroa aille ailga aigne aho agua
agreea aglio agio agia aggrie agglie aggli aerie aeria adne admou adly adlo
adjoi adje adee addu acuu actre actne actme acsi acou acku acktra ackto
ackstro acksto ackspa acksa ackpo acklo ackja ackfi ackboa ackba achro achme
ach$ aceou acclai acca absco absce abno abbli ^young ^yipp ^yield ^yest ^yeast
^yearn ^yamm ^voyeu ^vea ^uv ^upstr ^upgr ^upch ^unz ^untw ^unj ^undr ^unbr
^ul ^twea ^trie ^stei ^sque ^sprou ^sprai ^sphi ^soi ^snou ^snai ^smea ^slei
^skie ^skei ^shrou ^shrie ^shoa ^shei ^scy ^screa ^schu ^schmoo ^schma ^schlo
^quoi ^pue ^psa ^prou ^plia ^pio ^phoe ^phle ^phia ^owl ^outv ^outsm ^outsh
^outn ^outg ^outfl ^outcl ^oth ^ophth ^ooz ^onl ^ogl ^nay ^mae ^lla ^lieu
^layo ^kha ^kaya ^iv ^hoi ^groa ^gly ^gia ^ghe ^gha ^gao ^flui ^fia ^eyes
^eyeb ^ew ^eur ^erg ^entw ^ensn ^ensc ^emc ^ek ^eid ^edd ^ecst ^eb ^earthq
^eagl ^dwi ^doi ^diu ^croa ^crie ^crayo ^coau ^clue ^cloi ^clau ^chry ^cai
^boui ^blou ^bloa ^bayo ^bay ^bau ^arts ^archb ^anx ^angr ^andr ^ambl ^alp
^aisl ^airf ^airdr ^airbr ^addl ^ackn zzlers zzler zzin zzer zzards zzard
zophr zoom zont zoid zod zirc zies ziers zarr zards ywhe ythmi ysy ysms$ ysm$
ysle ysfu yru yrs$ yr$ youthfu yon$ yolo ynx$ yncra ymou ymie ymbio yllo ylli
yings$ yieldi yhoo yest$ year$ yde ychoa yache xub xtraord xtern xswain xpound
xplan xpert xorb xom xiv xins xill xiest xier xia$ xclam xclaim xcerpt xabl
wwow wtow wsworth wse$ wry$ wrot wrongh wrongf writt wrest wreak wrath wrap
wplow wow wov worst worsh worn worms womb wnload wkward wiv wistf wist wintr
wink wilt wildf wight wield whizz whiff whid whew whelp wheels wheat whead
whats what whack wett wetl werl wend weir weft weeds webs wean wdust wdown
wdin wdiest wdies wdier wboard wbeat wayw watchf wardl waist waif wagg waft
vvying vvy$ vvied voyeur vows vout vour voids vix vishl viousn views vidl vich
viatr verv verspr vein veil veer veb vaunt vars vand vall vain vails vagr uzz$
uxo utze utz$ utua utty utts$ uths$ uthle ustly ustli ussle usky usciou urve
urva ursui urps$ urpli urp$ urly urki urfs$ urfi urea urdli urd$ urchi urche
urbu urble upyi upwa uptuou upse upplyi upply uppie uode unwe untrue untou
unto untle untie unthi unsui unsou unsmi unscru unpri unplea unno unna unmo
unle unia unhe ungu unfla unctuou uncoo unchba umpti umpli umpba umni umblie
ulche ulbe uizzi uitte uito uitle uism$ uino uilti uilte uilds$ uild$ uiggle
uies$ uh$ ugly ugga ufti uffoo uesti uery uencie ueing$ ueere ueegee ueami
ueale udyi udy ubte ubstra ubiou ubdue ubcu uasio uarre uaria uardia uant$
uana uabble tzy$ tziest tzier tying twos twits twit tweigh tweak twang twalk
turf tups tuous tuft tubb tuar ttoes ttingl tterns tterm tterbr ttempt ttees
tteer ttee$ ttant ttals tspok tsmart tshin tryst truthf trustf trunk truant
trous tromp trod trists trist trig traum trar trank tranc trains tracks trabl
tproof tplac towns town towh touts tosph torn topp topm toot toon took toms
told toj toing toff tod tlook tlock tlights tlessl tlaw tlast tland tiousn
tios tio$ tind timp tilt tifs tiff tienc tied tie$ tial thym thwart thwack
thump thumb thstand thsom throng throb thren threes thread thrall thoughtl
thoughtf thons thold thog thnic thmet thlessn thlessl thistl third thies therf
therb theos theocr thel theast thball thaw tharg thank tfuls tform tfoot
tflank tfield tfalls tfall teurs tests tesq terl teous tels tells teh teens
teem tects team teak tcropp tcom tchin tchfork tchets tcall tbell taunt tards
taint taill taff tactl tacl syncr symph sword swoop swoon swin swims swimm
swill swellh swashb swarth swarm swam svelt sultr suic sudd such styp stying
stunt stunn strustf strums strum strong stricts strials strew stren streetw
streat straw strand stral straightf stproof stout stoop stoon stomp stmark
stless stir stint stingl stid stib stial step stend stein steem steb steadf
stcoat stav stats staph stan stals staid stacks stabb ssues ssue$ ssos ssort
ssoon ssocks ssock sso$ ssments ssmak ssists ssingl ssfull ssets ssest sserts
ssels ssees ssee$ sscheck ssals sread squirt squiet squeam squawk squall spurt
spurn sprout sprawl sprain spout sportsw spoor spoons spool spoof spont splitt
splays splaying splayed splay$ spinst spiest spier spiel sphalt spew sperms
spear spawn spark sounds soundl soulf sough sott sos sop sooths soon soms
sombr sols soil softw sodd sobr soar snowsh snows snout snort sniff snazz
snarl snail snack smutt smount smos smogr smogg smog smock smit smirk smear
smarm smansh sma$ slurp slump slumm slough sloth slog slitt sliest slier slex
slew sleigh sleev slatt slaph skywr skunk skulk skit skiff skel skein sium
sioth silt sigh shunt shuck shtail shrunk shroud shrimp shriek shrewd showm
shorth shops shop shoal shirt shield sheet shbuckl shawl shark shaft sha$
sfying sfunct sfied serg selfl sek seen seech see$ sedl sects sear sdain scuzz
scurv scuff scroll scripts scrimp scream scrawn scrawl scraggl scowl scout
scounts scort scorn scoop scoff sciv sciss scind schism schiev schief schew
scet scens scends scell scarf scard scals scaff sbands saurs sars saps sap
santr santl sandst sak saic sagg ryll ryl ryb rworth rworks rweav rwaul rvy$
rvit rviest rvier rvert rvel rvants ruthl rut rund rtyr rtwheel rturn rtuos
rtsight rtook rtogr rto$ rtner rtling rtlessl rtless rtill rthwest rthrit
rthogr rthog rthly$ rthern rtedn rte$ rtals rtail rstock rspac rshot rshal
rsault rril rrets rrest rrants rran rrals rral rraign rquet rpus rprets rports
rpoon rplay$ rpinn royal rousn roof ront roil roff roes rocc roar roads rny$
rnums rnstorm rnrow rnments rnith rnist rnip rniest rnier rnals rmur rmouth
rmong rmoil rmint rmind rmezz rmest rmeabl rmaphr rmail rmaids rmaid rlough
rlorn rlook rloins rlocks rload rlink rlick rlessn rlessl rlers rler rldly$
rlays rlay$ rlard rlain rkscrew rkill rjacks ritz rishl rips rink rightf rift
riffs riet ricks riats riant rianc rhod rheat rhaul rgrad rgos rgoing rgling
rgles rgled rgle$ rglar rgist rgetf rgeons rgast rgans rfuck rfid rfeits
rfects rfboard reup restl resq respl reof rens reer reens reek reds redh rectn
rectl reckl rearms rear reag reaft rea$ rdsmen rdsman rdship rdrooms rdroom
rdo$ rdless rdles rdled rdle$ rdiol rdil rdial rdheart rdhead rdep rdanc
rdains rcutt rcuts rcut rcur rcup rcrowd rcours rcook rcond rcock rcoats
rclaim rcil rcials rchist rchiefs rchet rchant rchaeol rcem rby$ rbul rbugs
rbug rbroil rborn rboil rbiv rbic rball rbag rax rauc rats ratr rast rarm rapl
raph rantl rails ragt quoit quitt quins quies quicks quial queur quets quests
quen quell queenl queen queath quan quals quaint quail quaff pwreck purr punk
pung pult pudg pturn ptoc ptist pters ptain ptab pstream psticks pstart psom
psod psis psies pseud psalm prox proud proofs profl proar prints primp priestl
priest prick pregn preen preem prawn pran pprob ppoints pplaud pplant ppie$
ppest pperm ppears ppe$ ppanc ppall pour posts posth portm porr poor poop pool
pooh pons pointl poign pogl pods pneum pness pments plunk ploys ploym ploy$
plectr pleat playf playact plaus plank plait plaincl plaid pkeep pism pint
pinstr pins pigr pigm pient pids pidl picc phom phlegm phics phe$ phapp phall
peut pesk penl penc pelt peel peds pect peak pchuck pbuild pbraid pays paying
pay$ pawn paup pats pastr pang pancr pain paid pab pa$ ozo oyle oyers$ oyalty
oyally oyal$ oxiou owdy owbi ovia outwea outwa outsprea outspo outshi outru
outca outbu outbrea outbi ousy ousse ousle ourns$ ourn$ ourma ourly ourge
ourci ounse oundwo oundly ouli oulfu ould$ oughtle oughtfu oughly oudmou ouchy
oubli ottli otro otle ostmo ostly ostli ostdo ossy ossbree ossbo osce oryi
ortsi ortni ortli orthri orswea orsi orque orphou orny ornsta ornea ormle
orldly orkma orei ordu oquy oqui opto opple opoeia ophre opco oozy oozle ooty
oorma oopy oonie oollie ooky ookkee oohi oogie oodwo oodstai oodshe oodooi
oochi oobe onwa onthly onthlie onsmo onry onru onks$ onk$ ongue ongruou onghea
ongfu onflue onfla ondria ondle onchi oncha omra omplia omnia omna omey ombre
olye oloi olme olks$ olfi olf$ olea ok$ oita oit$ oism$ ois$ ointle oilie
oigna oica ogle ofts$ oftie ofti ofli offse oerce oenai oele oebe odyi oddy
oddli oddie octe ocs$ ockwi ocksu ockey ocie ochro ocho ocha obviou obsti oboi
oboe oblo obla obei obblie oastie oastfu oare oana oam$ oafe oaf$ oacti oachme
oacha nyt nyl nwork nwill nwheel nwards nward nvuln nviabl nvents nveil nveigh
nuts nup nuousn nuin nuend nu$ ntying ntwist ntual ntrust ntrit ntril ntrepr
ntrast ntrac ntous ntouch ntoon ntomb ntlessn ntlessl ntler ntists ntist ntifr
ntied nthly$ nthlies nthink nthem ntfull ntful ntends ntees nteer nteel ntee$
nteb ntasm ntant ntank ntang ntaint nsvest nsumpt nsuit nsual nstructs nstrip
nstream nstrain nstopp nstinct nstill nsters nstab nsound nsoms nsomn nsnarl
nsmok nsmiss nsinc nsill nsig nshroud nships nshin nsets nself nseat nseal
nscrup nscrew nsciousn nsciousl nsaw nsaf nsack nrush nrunn nris nrip nrestr
nrest nreg nref nreal nquir nquet nprov nprick npleas npeck npack nown nont
nonst nonsm nonm nonf nonch noint nodd nochr nnul nnual nneal nmit nmask nmann
nmanl nman nlock nload nkrupt nkroll nkindl nkie$ nkfull nkets nken nkag nits
nior ninj ninh nimp nightm nigh nift nientl nient nhook nhood nhealth nguor
ngulf nguist ngues ngue$ ngsters ngster ngsong ngrain ngoing ngoes ngodl nglor
nglet nglem ngic nghead ngeon ngents ngentl ngef ngbirds ngbird nfurl nfuls
nfrock nfort nforms nfolk nfolds nfluent nflow nfir nfight nfield nfern nfel
nfaithf nfair neys ney$ newt newsw newl nert nept nep neocl nempl neit neigh
nect necks necd nearth nears ndum ndue$ nducts ndsurf ndston ndstands ndshak
ndries ndproof ndpick ndousl ndous ndmill ndmaid ndlin ndisc ndim ndiff ndied
ndful ndfold ndentl ndcuff ndcraft ndburn ndbags ndbag ndard ndan ndals ndab
nctly$ ncter ncrypt ncos ncork nconst nconsp ncomb ncoil ncoh nco$ nclear
ncleanl nclean nclasp nciv ncils nciest ncient ncial nchants nchantm nchal
ncent nceal ncav ncand ncamp nburn nbon nbolt nays nauts nart naps nantl nalg
naing naed nae$ nachr nabs nabr myop murs murr mund muc msy$ mston mstant
msiest msier msic mshaw mrad mpugn mpud mptuous mprint mpoon mpon mpness
mploym mployed mployabl mpler mplais mpion mphon mphlet mphib mpern mpad
mpacts mouthw mounts mound moult morb moir moeb moat mniv mnit mnes mnast
mnamb mmogr mmels mmel mme$ mmal mmac mium mitr mins mindl mindf milks miff
midt midsh mids midl midd mia$ mewl menf mends melt mels meld megr meek mbus
mburg mbryol mbroil mbran mbiv mbil mbfound mbent mbas mba$ mayor maw maul
matchm mars marksm marks mank mands maind maim lysts lying lustr lusc lurk
lums lugg lucr luabl ltruist ltreat ltiest ltier lthin ltant lsor lsom lsam
lroad lphin lows lout loudm loudl lors lordl lord loons loom loing loh logn
loed lmy$ lmol lmistr lmiest lmier lmet lluv llown llov llout llots llons llon
llmark llium llipt llionths llionth lligr llick llhead llects llbacks llback
llast llants llant lken listl liss lisp lishn lips lipr liph lious lio$ lint
limps limn lilt lignm lieut liefs lied lidl liant lgrim lgor lgic lgent lful
lform lfing lfill lfheart lfed lewd leum lesm lesb leps leont leis leid leew
lees lee$ leap ldproof ldew lderm ldbrick lchem lcast lbum lbow lboard layer
layed lawl lawf lasc larv larc lamm laid labr laam kul kron knobb knell kmark
kmak klies klept kking kkeep kked kith kirk kiln kie$ ki$ kfast ketch kesp
keouts keout keeps keel kayak jumps juj jug jov joust jott jolt joint jilt
jets jaw jan jahs jah jag jab ixtie iwo iumpha itzi ittli itru itli ithme
ithie ithho istrie istie issu issfu isps$ isp$ isloya islea ishwa ishee isfyi
isee isdo iscue irtuou irtuo irsti irst$ irdle irdie irdi irco irchi irch$
iptu iptoe ipso ipsi iprea ipre ippy ipma ipie ipco ipbui ioxi iouse iosy iose
iophy ionths$ ionth$ ionme iode inx$ inwa invu invio instri inle inho ingti
ingne ingma infre indro incts$ inctly incia inbree inbo imy imwi impy imia
imbue ilki ilio ilch$ ighty ightni ightma ightli ighteou ighs$ ighbo iggly
iffy ieze ieute ieto ieti iery ierie ierci ientiou iens$ iendi iencie ien$
ieba idshi idli iddy icue icrou ibli ibia iasi iars$ iannua iaise iago iads$
iad$ iacri iaca iabe hunchb humpb hump hugg horm hoop hool hoof honk hoist
hogs hogg ho$ hitt hinds high hertz herd helpl helpf heist heir hein heel
heedl heed heats heartb heap healthf headr hays harsh harps harml harmf hark
hardw hardt hardl hardb handsh halfp halfh hairl hairdr hairc hail hagg gus
gunr gun gulp guild gueing gubr guan gsaw grunt growl grout grot grooms gron
groan grisl greeing great grasp grands granc grabb gown goons goodl good gong
godf goad gnpost gnaw gnantl gnan gnabl gmas glyph gly$ glut gloat glios glio$
glint glect glean gladl girt girl giousl gimp gild gift giant gia$ ghtmar
ghtlift ghtil ghties ghtheart ghtforw ghtedn ghtail ghost ghneck ghett ggings
ggie$ ggards ggar ggan geys gew geth gear gasp garg gaol gang galv gait gadg
furt ftness ftly$ fties fruitl frug frowz frown fronts friends fretf freew
freeth freed fraught frain frail fours fortn forthr forth fork ford fopp folds
foist foils fogs fog foal fness flout floor flogg flock flig flet fleshl fles
flects fleck fle$ flaw flaunt flank flamm flail fiv fiss fisc fink fights
fiend fields fid fics fich fib ffies ffian ffeur ffest ffaw felt feist feint
feign feast feas fearl fear fawn faun fart farc faithl faith fabl fab ezoi eze
eyors$ eyor$ eyes$ eyance extua extu exts$ extrao exta expla exie excla ewswo
ewoo ewly ewhe eure eura eupo eulo eudo euda euce euca etty etraye etoe etiou
estroye estrie estli essayi esple espea eski eshri eshoe eshme esbia esai eryi
ervou ertz$ ertie ertia erstru ersprea erslee ersia ersea erryi eroe ermou
ergs$ ergree ergie erg$ erfoo erfee ereu ercou erclo erbree erbrai erbea
equiou equie eousne eosta eopo eonto eof$ eocra eocla enzy enzie eny enviou
envia entrie enths$ enth$ ensuou ensue enro enpo enma enio enhea engths$
ength$ eneu eneou endue endle endie enchma empts$ emptio employa emne emme
emcee elry elrie elple elpi elp$ elms$ elm$ elliou ellhea ellae elfle elayi
elaye eize eiti eitfu eissue eisa eirs$ eire eirde eir$ einou eils$ eil$ eigni
eigle eightli eighti eige eido ehoo egue egro egri egma egiou eftie efs$ efly
eflie efia ef$ eeway eeto eetly eetli eetle eeth$ eest$ eera eelie eeklie
eekee eedli eedfu eece edoe ediu ediou edio edhea ecue ectru ectra ectiou
ecree ecie ecdo eccle ec$ eby ebs$ ebru ebroa eble ebble eb$ eawo eavie earwa
eartle eartie earti earthi eartbrea earmi earchi eaps$ eapi eanly eamble ealie
eafte eafa eadths$ eadth$ eadfa eacle dysp dysl dysf dyg dwork dwink dways
dverb duod dunk dulc dug duff duck dsling drubb drool drons dripp dripl dressm
dren dreaml dread drawl dras drang downw downst downh dound dought dott dosc
door doom donn doing dog doff dlessl dland djourn djoin diums dists disk dishw
diphth dip diox dioth diom diocr dio$ dimw digg dients dientl dicts dics dicr
diag diacr diac diabl dia$ dhes dgings dgin dful dextr dext desh dersh dept
deign deft deed debt dear dean ddly$ ddish ddend dcutt dblock dawn dauntl
daunt dart darkl dank dan daft cyl cuum cumm cull cuit cuing cuff cues cued
cue$ cud ctus ctuar ctrod ctments ctment ctant ctanc ctag ctac cry$ crumm
cruel crudd crown crowd croq croph crom crock croak crimp cril cries crid
cribb crew crest cres creel crayon crawl crass crap cramp cragg craftsm crafts
cquer couns cough corps cornst corns cornfl corm cork cords coot coons conk
conch comr cold cogr coed cochl coats clump cluck clout clot clomp clock cloak
clit clists clist cliq click clew clerk cleft claw clasp clank clang clamp
clairv clack ckyards ckyard ckwis cktrack ckold ckmark ckliest cklier ckjack
ckish ckil ckies ckboards ckboard ckberr ckball citn cisms cion cinct ciet
cidl churn chuck chow chords chord chondr chomp chock choan chnocr chism chirp
chink chimp childl childb chief chest chess cheep chairm chair chain cey$ cerv
cep cents cen ceitf ceipt ceans cdot ccurs ccumb ccul ccost ccles cclaim
ccinct cci$ cceed ccabl caulk cattl cask cards cantl caf cadd byr bvert bux
burp bullh buckt bstrat bsoil bscript bscond brupt brunt brow brook brom
brious brickl brick brev breathl breaks bread bratt brash braid boy$ bounds
boor bookk boodl bolt boll bold bobs boastf bo$ bnail bmiss blurt blowz
bloodst bloods blond bloat blissf blintz blight bleep blec bleat bleak blah
bjoin bjects biz bir biq bioph bins bim bilk bigh bier bienn bied bhead bets
bests berth berc bent bend belt beis bees beek bee$ bedd beans bdom bbag bawl
bashf barf bard bandw bait bags ba$ azz$ aype ayoffs$ ayoff$ ayfu ayba ayal$
awkie avy auze auxi autu aute auri auntie aundry auma aughts$ audu aucou aube
atwa atuou attooi atroo ateau atchy atchfu asui asthma assee asma ashfu aryi
arva artly arthri arthe artfu artee arshe arqui armie arls$ arley arl$ argy
argue argoe argle arfs$ arduou ardshi ardba arda arcti archaeo arbs$ arbli
arb$ apso apne apeu aordi anyo anxiou anua antu anky ankly angue andyi andu
andsto andsha andme ancrea amstri amsha amphi amoe amna ammie amia amboya alta
alnu alleye algo alfpe alfo alfhea alf$ aleo alei alche alai aky aka aize aito
aiths$ aithle aisle ais$ airvoya airly airdre aircu aipse ainty ainsai ainde
ainclo aimle aightfo aica ahi agro aggy afflue affie aeolo adoe adioi actly
actle acly acksla ackbe achy accrue abste abla abia abdo ^yumm ^yowl ^youthf
^yours ^yelp ^yearl ^yawn ^yaw ^yank ^yacht ^wry ^wooe ^urn ^upw ^unth ^unstr
^um ^ub ^trui ^trau ^throu ^throe ^thee ^sway ^sve ^spy ^spry ^spree ^spraye
^soy ^sly ^slue ^scle ^rha ^queue ^pseu ^praye ^pneu ^plie ^playe ^paye
^outspr ^outsp ^ostr ^ons ^onr ^onc ^oink ^oh ^offsh ^oar ^oak ^nua ^noe ^myo
^mue ^kro ^kle ^joyou ^irk ^ion ^inw ^inm ^iff ^ick ^iamb ^hue ^hey ^gray ^gno
^glue ^glee ^ghou ^gay ^fria ^foe ^eul ^euc ^es ^endl ^elf ^egg ^eccl ^ebb
^earw ^ear ^dua ^cui ^cue ^coy ^chao ^buoya ^brou ^boyi ^bei ^atl ^asthm ^artl
^artf ^antl ^alch ^airs ^aiml ^aim ^agn ^agl ^afl zzly$ zzliest zzlier zzlem
zzil zzies zzenm zzas zzan zza$ zyg zucch zos zoos zook zoo$ zomb zom zoids
zoa$ zo$ zith zinn zings zinck zim zestf zepp zeph zeng zeln zell zebr zapp
zal ywhee ywe yuppie yummie yule yuckie yucca ythro ythms$ ythm$ ythia ytai
yswa yssey ysma yscra yrtle yroi yria ypu yptu ypts$ ypt$ ypso ypse yps$ ypro
ypre ypla yphoo ypho yp$ yove youts$ yout$ yourse youngste younge yopi yone
yogu yogi yode ynge yndro ynchi ynche ymu ympo ympha ymph$ ymno ymba ymai yll$
ylea yke yists$ yist$ yippee yhea yhe ygua ygro ygne yglo ygla yfu yeshi yena
yeastie years$ yearni yearboo ydroe ycoo ychs$ ych$ yceu yby ybri yboo yboa
ybi ybea yati yarmu yardsti yahoo yaci xym xuall xtual xtrus xtrott xtrins
xtrat xtras xtran xtrac xtoll xtil xtends xtant xquis xpuls xpressw xplet
xperts xpenc xpell xor xol xod xious xions xings xied xics xib xhol xglov xest
xeg xe$ xcurs xcruc xcresc xcheq wzy$ wziest wzier wyers wyer wuss wtorch wsy$
wsuits wsuit wstrings wstring wslett wsiest wsier wsers wser wroom wristw
wristb wright wries wrathf wpunch wplac wpiec wpan worthl wormh workw workst
workpl workl workf workd woodsm woodsh woodm woodl woodch wont wondr wolfh
wnston wnish wnings wnhous wnbrok wmak wlyw wlspac wlessl wless wkish witz
witl wishf wishb winks winkl wingt wingspr wingsp windst windsh windscr windp
windl windj windf wildl wildfl wigs wien wicks whos whors whorl whom whod
whirr whirlw whirlp whips whipl whip whimm which whett whetst whenc when whelk
wheelwr wheelch wheads whatch wgiv wgirls wgirl wfuln wflak wfind wetb westw
werp werh werf went wench welr wellspr wells weev weekn weekd weds wedl webb
weas weakn weakf wdrops wdrop wdriv wdowns wdness wdly$ wding wdil wdest wders
wded wcom wbridg wboats wballs wbacks wback watts watchw watcht watchd watchb
wastr wasp washt washst washr washcl was warts warth warsh warns wap wanc walr
waln wallfl walkw waits waitr waistl waistb wails wags waf wadd vyw vying vulv
vult vousl vous vouack vott vots volts voltm viz vits vishn vion vintn vint
viltr vilr vilm vild viewp viewf vied vie$ vicl viath vian viall viad via$ vew
vett vetch vests vestr vestm vess vesp vertl vergl verfr verbs vep venw venths
venth venp venn velt velr velh velf vegg veaw veatt veals vats vasts vasc varm
varl vants vantl vamps valc vaingl vae$ uzzy uzzwo uzza uys$ uying$ uway uviu
uvia uumuu uum$ utzie utzi uty uttu uttli utthroa utta utsie utshe utrie utria
utou utmea utla utie utia utha utee utdo utcra utchi utcheo utba ustu ustpa
ustne ustbi usso ussbu uskra uskme usho ushka ushca useu uscri usce usboy urvy
urveyi urveye urtsie urtiu ursua ursti ursa urrs$ urrey urr$ urquoi uroy urnta
urnsti urnpi urnoo urnkey urney urnee urncoa urme urlo urks$ urkie urkey urk$
urgy urgs$ urgli urghe urg$ urfie urds$ urdie urchya urchgoe urch$ urbli uptua
uptne uptly uptia upta upswi upsi upsho uppy upplia upla upia uphi uphea upfu
upee upca upbri upboa upbea uotie uoso uosi uoru uors$ uorou uor$ uoise unway
unva untre unstu unstru unste unspea unspe unsli unski unsho unsha unscree
unsay unroo unra unpu unpai unkya unkhou unka uniou unie unhu ungue ungrie
ungie ungho unfou unflo unfli unfee undru undra undou undoi undli undia undae
uncts$ uncto unctiou unct$ uncio unchy unchti unchroo unchbo uncea unbou unboa
umsti umspe umsku umsie umsi umqua umpu umptiou umptee umpsui umou umns$ umne
umn$ umlau umia umfle umfe umdro umdi umbwai umbta umbscre umbne umbnai umbie
umbbe ulwa ulve ulty ultrie ultie ulru ulpto ulps$ ulpri ulpie ulpe ulp$ ulmo
ulme ullyi ullri ullpe ullou ullne ullho ullfro ulley ullca ulky ulkhea ulgie
ulfs$ ulfa ulf$ ulea ulcru ulci ulch$ ulbs$ ulb$ ulai uku uklu ukie uju uja
uizze uitti uitta uista uisli uishie uisa uirre uirmie uirkie uips$ uippi
uippe uipa uip$ uinou uinea uince uina uiltie uildu uil$ uigglie uife uiets$
uieti uiet$ uidly uicksa uickie uicie uiche uiali ugue ugsto ugou ugne ugli
uggy ugee ugboa ugbea uffbo uffba uezzi uetti uetry uetoa ueti uete uetba
uestrie uestria uestra uess$ ueru uerri uero uerie ueno uenci uenche uelche
ueja uefu uefie uefi uees$ ueenlie uedu uebo ueblo uebi ueasie ueasi ueaks$
ueakie ueak$ udzu udsie udra udo udiou udio udgy udgua uddyi uddlie uda uctee
ucra uclei uclea ucku ucktoo uckski uckoo uckloa uckho uckeye uckboa uckbi
uchsia uccoe ucco ucchi ubway ubto ubtee ubsy ubstru ubrou ubpro ubplo ubma
ubhu ubhou ubgrou ubdo ubca ubbu ubbly ubbli ubba ubae uavi uava uats$ uatrai
uat$ uashie uashi uash$ uasa uarto uarrie uaro uariu uardroo uardrai uardi
uardhou uanda uancy uanau uama uaki uainta uaint$ uaho uads$ uadro uadi uad$
uacy uacke uably tzers tzer tymp tyk tyd tyc twinn twill twelv tuss tusk turr
turq turp turnt turnst turns turnp turnk turnc turk tuousl tundr tumn tumm
tumbr tugg tugb tuett tuenc ttying ttur ttuc ttors ttorn ttonw ttont ttonm
ttocks ttletr ttlet ttlest ttlesn ttlesh ttlegr ttledr ttit ttied tti$ ttersw
tterh tterf tterc ttak ttainm ttacks ttachm tsy$ tsun tstripp tston tstat
tstand tspread tsiz tsies tshots tshot tsell tsam trying trunch truffl trow
trouss troub troths tross tror troopsh troops trolw trolm troik trog trodd
troact triumv tript tripp triots triol trink trigl trienn tried trickst
triathl triarchs triangl triang trials triall trial tria$ trestl tresc treous
trenchm trekk treet treats treatm treads treadm trays tray$ trashc trapd
transs transn transd tral traj trailbl trah tputt tpourr tpour tpat towp
townsm townsh townh tours toup touchst touchd touc torw torts toq topkn topc
toothbr tools too$ tomf tomc tombst tolls tollg tollb toils tof toen toeh toe$
todd tocs toasts toastm toadst tney$ tnapp tmot tmen tmegg tmeats tmeat tmast
tman tmak tlets tlet tlessn tlem tlass tlas tlant tizz titm tipst tinw tinsm
tingh timbr tild tigr tightw tightr tientl tiebr tids tidb ticl tiar tians
tian tiam tiall tia$ thyr thwash thumbt thumbscr thumbn thudd thtak thskell
thruw thrust thrush thrumm throwb through thromb throc throbb threesc thpiec
thpast thosph thosc thors thly$ thingl thighb thics thickn thhous thhold
thfuls thesm theq thees thee$ thedr thec theatr theads thbrush thart thars
thanksg thankl thals thall thach tgunn tgrow tfir texts textb tetr tert tersp
tersh terp terg terfr tercr teq teousl tenp tenf tends tendr tems temptr temps
tembl tellt tecr tech teaus teat tearj tearf teardr teap teamst teamm teac
tdowns tdown tcrack tchwords tchword tchtow tchmen tchman tchings tchbox
tchblad tchbacks tchback tchatt tcham tcast tcas tcars tcar tburst tbuild
tbrok tbridg tbreak tboil tboats tboat tbeds tbed taxp taw tats taskm tash
taps taphs tantr tankf tand tamb talp tair tains tainm tailw tailsp tailp
tailc tagg taf tadp tactf tachm syndr syl swordsm swordf swiv switchbl swigg
sweetm sweeth swearw swatch swast swapp swabb surn sunsp sunscr sunk sunfl
sunf sundr sunbl suits suitc sued subw subr subpr subpl subgr sualt suals
stypt stwatch stwards sturt sturg stuous studd stuar ststrok stsell strutt
strusts strumm strud struck strous stros stropp strongh strongb strod strob
stro$ strious strion strings strienn stridd strich strians strian stressf
strels strel streetl streetc stray$ strawb strapp strapl strants strant
strains strad stplat stows storms stopw stopl stopg stopc stoog stoms stogr
stoffs stoff stocks stockr stockh stockbr stmort stmistr stmen stman stlud
stlessn stlessl stiz stirs stionn stingr stinctl stilt stills stik stiffs
stied sticl stickp stich stiar stian sthum sthmus sthmat sthes sthen stgrad
stev steth sterw sterstr sterp sternm sterh sterf sterd stepm stepl stepf
stepd stepch stepbr stemm stels stelr steesh stees stee$ stedl stect steamsh
steamb steal steakh steads stcod stbound stbon stbands stband staw starts
starg starf stards stanz standst standp standb stalw stalls stairc stains
staffs stach stacc stac ssyc sswords ssword ssus ssums ssumpt ssuing ssued
sstepp sspiec sspend ssov ssortm ssorsh ssons ssmat ssling ssles ssled ssle$
ssiz ssionl ssigns ssignm sshopp ssfir sseus sseurs sseur sserv ssents ssedl
ssbudg ssbreed ssbarr ssays ssaying ssayed ssay$ ssav ssars ssand ssails ssafr
ssa$ squis squirr squidd squel squadr squadd squabb spying spygl spurr spun
sprock sprit sprints springb spress spreadsh spots sports spoonb sponds
spoilsp spoils splin splashd splan spitf spitb spion spins spillw spig spies
spied spiec spids sphinx sphinct sphat spept speopl spendthr spends spels
spells speedw speedst speedb specks speaks spberr spast spart spans spadr soyb
souv southp sourp sourd soundtr sought sots sorts sorb sopr sonm songwr songst
songb solst softb sof soev sobb soaps snuffb snubb snowst snowfl snowf sniv
snapsh snapdr snagg snaf smot smop smon smold smod smin smidg smatt smag slurr
sluml slowp slowd slott slothf slogg slipkn slipc slingsh slings slights
sleepl sledd slead slays slaying slay$ slaw slapp slapd slabb skyw skyscr skyc
skullc skmel skmast skirts skins skinh skinfl skimm skillf skil skies skidd
skett sketb skers skell sixp six siums sitr sitc sists sirr sirl siq sipp
siogn sinkh silkw signb siers sierr sie$ sicl sickb siacs siac shyst shwat
shwash shutd shunn shucks shtow shrugg shrooms shrinks shrin shrik shrank
shows showr showpl showp showg showd shouts shot shortw shortst shortl shortf
shopk shoots shog shoestr shoesh shoes shoem shoel shoeing shoe$ shmen shmash
shman shist shirtw shirtt shirtsl shir shipl shind shinb shik shifts shiftl
shibb shi$ shfull shful shev sherw sherr sherm sherb shellf shek sheikd
sheepsk sheepf sheepd sheb sheav shcloths shcloth shcans shcan shbowls shbowl
shbon shboards shboard shbas shas sharpsh sharm shapp shamr shamm sgiv sfy$
sfort sfitt sfies sexp seums seum setb sess sesh serr serl sepl seous sentr
sentm sentf senb sells sefl seers seeing seeds seedl seeabl sebl seasc searchl
seapl seap seamstr seacr seac sea$ sdict sdeal sdainf scutch scumb scuit
scuing scues scued scue$ scudd scrup scrot scroog scriptwr screwdr screwb
screp screetl screenwr screenpl scrapb scramm scoutm scoundr scots scoth scot
scos scorp scornf scons scon scoffl scoes scles scle$ scis scim sciat schoon
schoolt schools schoolr schoolh schoolg schoolch schnauz schlepp schlem scherz
scept scath scast scarpm scants scans scamm scab sbian sbarr saws sawm sawh
saus satr satch sastr sast sart sapr sapph sants santn santh sann sands sandm
sandl sandh sandc samp saltsh saltc sails sailf said sah sagn saffr saffl
sacks sackf sacch sac sabb rythr ryt rynx ryat rwritt rwov rwords rword rwis
rwent rvom rvish rviews rviett rveys rveyors rveyor rveying rveyed rvey$ rvanc
rvabl rutt runw runs ruef rucks ruck rubs rubr rubd rtying rtwav rtuousl
rtuous rtuit rtuar rtsy$ rtswom rtsmanl rtsleev rtos rtoir rtnersh rtmast
rtmant rtlin rtliest rtlier rtlessn rtlen rtium rtionm rtight rtieths rtieth
rtich rtiall rtia$ rthwards rthward rthston rthrow rthrop rthright rthrat
rthplac rthous rthol rthless rthest rtherm rtherl rthen rtful rtfol rterr
rterm rterl rterh rterf rterd rteousl rteous rteenths rteenth rteens rteen
rtcull rtcom rtcak rtbrok rtbreak rtast rtars rtarb rtant rtank rtan rtall
rtainm rtainl rtailm rtach rta$ rsyth rswear rsup rsty$ rstruct rstrok rstood
rston rstiest rstier rstic rsteps rstep rsted rstands rspread rsplitt rspend
rspect rsors rsonn rsold rsnip rsnapp rsmen rsman rsleep rskin rsink rsimm
rsim rsigns rsickn rsick rsial rshow rshoots rshmall rshav rsey$ rsew rsers
rselv rsells rself rseh rsefl rsef rsecr rseas rsatz rsap rsag rryw rryb
rrupts rrups rrowh rrowf rrots rron rrist rrill rrett rretr rrept rrepr rrep
rrentl rrens rrem rrects rrectn rrectl rrec rre$ rrays rraying rrayed rray$
rrassm rras rrar rrap rrall rraignm rra$ rquois rquetr rquess rpuss rpuscl
rpul rpsich rpshoot rproofs rprints rpluss rplus rplic rplays rplaying rplayed
rpins rpin rpill rpid rpi$ rphon rphol rphism rper rpend rpegg rpays rpaying
rpay$ rpaul rpan rpals rpal rpaid roys royalt roy$ rows rowb roust roundw
roundh roughs roughl roughf roughbr rots rotr rostr roots roons roomm roomf
rooft ronw roncl rolls rollb roin roic rogr rogl rofl roentg roeb roch roadst
roadr roadh roadb roach rnuc rntabl rnstil rnpip rnpik rnouts rnout rnoos rno$
rnness rnists rniq rnibl rnful rnflow rneym rneying rneyed rnerst rnels rnees
rnee$ rnecks rnar rnad rnab rmulk rmudg rmuch rmouths rmous rmopl rmom rmod
rmlessn rmlessl rmless rmits rmints rmil rmig rmid rmhous rmful rmel rmats
rmast rmarks rmar rmants rmant rmands rmals rmag rmack rma$ rlying rlords
rlord rlop rlong rlob rlishl rlish rlings rlig rlifts rlie$ rley$ rleq rlead
rlaying rlast rlapp rlap rlains rkstat rkplac rkness rkly$ rkins rkings rkingm
rkil rkhous rkhors rkey$ rkets rketpl rkest rkens rkbench rkah rjerk rithms
risks rishm risd riphr riots rions rinth ringm ringh rinds rimm rimb rils rih
rigm rights rigg rients ridl ricl ricksh ribs riatr riach rhythms rhub rhors
rhoods rhiz rhighw rheost rhear rgyw rgym rgyl rgul rgues rgue$ rguabl rgrowth
rgrow rgrounds rground rgreen rgoyl rgot rgnett rglass rglad rgists rgins rgid
rgets rgeois rgents rgem rgeabl rgaz rgass rgasm rgas rgart rgarm rfunct rfuln
rfug rfronts rfront rforms rfoot rfin rfeed rfectl revv rethr resk reosc reos
reon reo$ rends rems rempt remn reigns reentr reeing redw redsk redbr rearw
reans reads rdwatch rdships rdrops rdropp rdrop rdrob rdraw rdplay$ rdous
rdons rdom rdings rdingh rdig rdies rdid rdicts rdials rdfish rdell rdcov
rdcag rdants rdag rcus rcurr rcumsp rcuml rcumfl rcumf rctic rcrit rcot rcoph
rcon rcloth rcling rcled rcibl rchlight rchists rchism rchip rchion rchen
rchduk rchdioc rchdeac rchantm rchal rchaic rcest rceabl rcass rcarr rcar rcan
rcal rbut rbuss rburn rbur rbuncl rbugg rbreed rbrain rboys rboy$ rbos rborv
rbopr rbooks rboj rboh rboats rboat rbishm rbids rbial rbets rbet rbersh
rbents rbent rben rbells rbeds rbears rbanz rbans rbanc rbacks rbabl raying
rayed rattr rathsk raspb rapsc raps rapr rappr raok ranh ramm ralt ralls ralg
ralds rainst rainm rainf raindr rainc rainb railw railm raill raf racq rachn
racc rabbl quisl quips quinc quif quiesc quids quickl quich quents quels quatr
quand quam quall quah quabl qua$ pywr pyth pyl pyh pygm pwatch putsch pust
pushc punst punn pund pumpk pumm pulm pullb pulchr puffb puer puebl pudd pubb
ptwrit ptus ptuar ptual ptuag ptops ptop ptism ptib ptat ptarm ptanc ptak
ptainc ptag ptacl psying psuck pskat psist psil psied pshots pshot pscall psak
prur prow prosc props propp proots prongh prong prodd proachf prith prism
primm prietr priesth prics pretz pressm presc preq prech pread pratf prankst
pral prair ppurt ppucc pproch pprobr pports ppop ppoorw ppon pplianc ppler
ppish ppingst pphir ppersn pperh ppercl ppenst ppel pouts pounds potl potf
postscr postr postgr posph porth portf portc porch pops popg poos poorh poodl
ponch pomm poltr polt pollst poins pog poeia$ podg podd po$ pnos pnel pmoth
pmat pluss plus plup plugs plugg plug ploying ployed plov plopp pload plights
plifts pliers plej plech plaz playwr playth plays playr playp playm playing
playh playgr playg played plaud platt plash plaq plants plains plainc pladd
plack pknots pknot pits pitf pitchm pistl pisms pipsq piousl pious pinj pinf
pillb pilch pigt pigst pigsk pigp pieb pie$ pidg pickp piazz piat pias piar
pial phying phous phold phoen phoeb phistr philt philh phied phiall phec pheav
pheas phaz phat phants phan phag pful pfrogg pfath pex pewt pev pets pests
pestr pescr perstr perscr perq perg perfl percr pept peps pents penkn peng
pench pem pej pegg pefr peew peeph pecks pean peals peah peaf peach pdrag
pdaught pdash pcov pcak pbroth pbring paz payr payl payd paych payabl paws
pawnsh pawnbr paul pathw patchw pasts passw passp passk passb pash pas parv
parts partr parth parsn parkw parf parchm parc papp pantr palms pallb paisl
pairs pairm paints paintbr painst painl paink pailf padr pach ozzle oysti
oyste oyse oyote oyhoo oyfrie oyeuri oybea oyant$ oyaltie oyale oxo oxhou oxho
oxglo oxfo oxco oxca oxbo owzy owu owths$ owtai owsui owstri owsto owspri
owsli owsie owshoe owshi owpu owpla owpie owou ownwa ownu owntu ownswi ownsto
ownsta ownshi ownpou ownplay ownou ownhou ownhi ownfa ownbea owlie owhi owha
owgu owfu owfla owfa owee owdyi owds$ owdro owdri owda owd$ owbro owboy oway
ovoi ovey ovae ouve outwo outta outstay outski outpou outpo outplay outpa
outne outma outly outle outlay outie outhpie outhpa outhou outhi outhfu outgoi
outdoo outdo outco outbui outbro outbou outa ousts$ ousta oust$ ousseau ourtya
ourtshi ourtroo ourtlie ourtie ourthou ourso oursi ourri ourpu ournme ourme
ourgla ourgeoi ourdou ouque oupy oupo oupli oupee ounty ountie ountdo ouns$
oungi oundu oundtra oundswe oundre oundne oundhou oundho oundbrea ouncie
ouncea oun$ oula ouillo ouilla ouha ougi oughtie oughie oughfa oughbre ouffa
ouetti oueme oudspea oudoi oudly oudlie oudie oudbu oucia ouchsto ouchdo oucha
ouca oubts$ oubtle oubti oubtfu oubt$ oubloo ouba ottoe otshe otpou otpie otoe
otme otlu otlie otley othoo othfu otheo otfu oteu otei otee otea otchy otca
otboi ostscri ostro ostmi ostlu ostie osthu ostgra ostco ostca osswo osswa
ossroa osspie
""".split()):
TRIPLE_SCORES[triple] = (20000 - index) / 20000.0
|
jcrocholl/nxdom
|
languages/english.py
|
Python
|
mit
| 111,819
|
[
"ASE",
"BLAST",
"GULP",
"ORCA"
] |
95435f8316647006ae39708e08b668e9e31308a28440a883d2e20c2c468a04d7
|
#
# Loxodo -- Password Safe V3 compatible Password Vault
# Copyright (C) 2008 Christoph Sommer <mail@christoph-sommer.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import os
import wx
import wx.adv
import six
from .wxlocale import _
from ...vault import Vault
from ...config import config
from .recordframe import RecordFrame
from .mergeframe import MergeFrame
from .settings import Settings
from .paths import get_resourcedir
class VaultFrame(wx.Frame):
"""
Displays (and lets the user edit) the Vault.
"""
class VaultListCtrl(wx.ListCtrl):
"""
wx.ListCtrl that contains the contents of a Vault.
"""
def __init__(self, *args, **kwds):
wx.ListCtrl.__init__(self, *args, **kwds)
self.vault = None
self._filterstring = ""
self.displayed_entries = []
self.InsertColumn(0, _("Title"))
self.InsertColumn(1, _("Username"))
self.InsertColumn(2, _("Group"))
self.SetColumnWidth(0, 256)
self.SetColumnWidth(1, 128)
self.SetColumnWidth(2, 256)
self.sort_function = lambda e: six.text_type.lower(e.group)
self.update_fields()
def OnGetItemText(self, item, col):
"""
Return display text for entries of a virtual list
Overrides the base classes' method.
"""
# Workaround for obscure wxPython behaviour that leads to an empty wx.ListCtrl sometimes calling OnGetItemText
if (item < 0) or (item >= len(self.displayed_entries)):
return "--"
if (col == 0):
return self.displayed_entries[item].title
if (col == 1):
return self.displayed_entries[item].user
if (col == 2):
return self.displayed_entries[item].group
return "--"
def update_fields(self):
"""
Update the visual representation of list.
Extends the base classes' method.
"""
if not self.vault:
self.displayed_entries = []
return
self.displayed_entries = [record for record in self.vault.records if self.filter_record(record)]
self.displayed_entries.sort(key=self.sort_function)
self.SetItemCount(len(self.displayed_entries))
wx.ListCtrl.Refresh(self)
def filter_record(self,record):
if record.title.lower().find(self._filterstring.lower()) >= 0:
return True
if record.group.lower().find(self._filterstring.lower()) >= 0:
return True
if record.user.lower().find(self._filterstring.lower()) >= 0:
return True
if config.search_notes:
if record.notes.lower().find(self._filterstring.lower()) >= 0:
return True
if config.search_passwd:
if record.passwd.find(self._filterstring) >= 0:
return True
return False
def set_vault(self, vault):
"""
Set the Vault this control should display.
"""
self.vault = vault
self.update_fields()
self.select_first()
def set_filter(self, filterstring):
"""
Sets a filter string to limit the displayed entries
"""
self._filterstring = filterstring
self.update_fields()
self.select_first()
def deselect_all(self):
"""
De-selects all items
"""
while (self.GetFirstSelected() != -1):
self.Select(self.GetFirstSelected(), False)
def select_first(self):
"""
Selects and focuses the first item (if there is one)
"""
self.deselect_all()
if (self.GetItemCount() > 0):
self.Select(0, True)
self.Focus(0)
def __init__(self, *args, **kwds):
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.Bind(wx.EVT_CLOSE, self._on_frame_close)
self.panel = wx.Panel(self, -1)
self._searchbox = wx.SearchCtrl(self.panel, size=(200, -1))
self._searchbox.ShowCancelButton(True)
self.list = self.VaultListCtrl(self.panel, -1, size=(640, 240), style=wx.LC_REPORT|wx.SUNKEN_BORDER|wx.LC_VIRTUAL)
self.list.Bind(wx.EVT_COMMAND_RIGHT_CLICK, self._on_list_contextmenu)
self.list.Bind(wx.EVT_RIGHT_UP, self._on_list_contextmenu)
self.list.Bind(wx.EVT_CHAR, self._on_list_box_char)
self.statusbar = self.CreateStatusBar(1, wx.STB_SIZEGRIP)
# Set up menus
filemenu = wx.Menu()
temp_id = wx.NewId()
filemenu.Append(temp_id, _("Change &Password") + "...")
self.Bind(wx.EVT_MENU, self._on_change_password, id=temp_id)
temp_id = wx.NewId()
filemenu.Append(temp_id, _("&Merge Records from") + "...")
self.Bind(wx.EVT_MENU, self._on_merge_vault, id=temp_id)
filemenu.Append(wx.ID_ABOUT, _("&About"))
self.Bind(wx.EVT_MENU, self._on_about, id=wx.ID_ABOUT)
filemenu.Append(wx.ID_PREFERENCES, _("&Settings"))
self.Bind(wx.EVT_MENU, self._on_settings, id=wx.ID_PREFERENCES)
filemenu.AppendSeparator()
filemenu.Append(wx.ID_EXIT, _("E&xit"))
self.Bind(wx.EVT_MENU, self._on_exit, id=wx.ID_EXIT)
self._recordmenu = wx.Menu()
self._recordmenu.Append(wx.ID_ADD, _("&Add\tCtrl+Shift+A"))
self.Bind(wx.EVT_MENU, self._on_add, id=wx.ID_ADD)
self._recordmenu.Append(wx.ID_DELETE, _("&Delete\tCtrl+Del"))
self.Bind(wx.EVT_MENU, self._on_delete, id=wx.ID_DELETE)
self._recordmenu.AppendSeparator()
self._recordmenu.Append(wx.ID_PROPERTIES, _("&Edit\tCtrl+E"))
self.Bind(wx.EVT_MENU, self._on_edit, id=wx.ID_PROPERTIES)
self._recordmenu.AppendSeparator()
temp_id = wx.NewId()
self._recordmenu.Append(temp_id, _("Copy &Username\tCtrl+Shift+C"))
self.Bind(wx.EVT_MENU, self._on_copy_username, id=temp_id)
temp_id = wx.NewId()
self._recordmenu.Append(temp_id, _("Copy &Password\tCtrl+C"))
self.Bind(wx.EVT_MENU, self._on_copy_password, id=temp_id)
temp_id = wx.NewId()
self._recordmenu.Append(temp_id, _("Open UR&L\tCtrl+L"))
self.Bind(wx.EVT_MENU, self._on_open_url, id=temp_id)
temp_id = wx.NewId()
self._recordmenu.Append(temp_id, _("Search &For Entry\tCtrl+F"))
self.Bind(wx.EVT_MENU, self._on_search_for_entry, id=temp_id)
menu_bar = wx.MenuBar()
menu_bar.Append(filemenu, _("&Vault"))
menu_bar.Append(self._recordmenu, _("&Record"))
self.SetMenuBar(menu_bar)
self.SetTitle("Loxodo - " + _("Vault Contents"))
self.statusbar.SetStatusWidths([-1])
statusbar_fields = [""]
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
sizer = wx.BoxSizer(wx.VERTICAL)
_rowsizer = wx.BoxSizer(wx.HORIZONTAL)
self.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, self._on_search_cancel, self._searchbox)
self.Bind(wx.EVT_TEXT, self._on_search_do, self._searchbox)
self._searchbox.Bind(wx.EVT_CHAR, self._on_searchbox_char)
_rowsizer.Add(self._searchbox, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5)
sizer.Add(_rowsizer, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
sizer.Add(self.list, 1, wx.EXPAND, 0)
self.panel.SetSizer(sizer)
_sz_frame = wx.BoxSizer()
_sz_frame.Add(self.panel, 1, wx.EXPAND)
self.SetSizer(_sz_frame)
sizer.Fit(self)
self.Layout()
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self._on_list_item_activated, self.list)
self.Bind(wx.EVT_LIST_COL_CLICK, self._on_list_column_click, self.list)
self._searchbox.SetFocus()
self.vault_file_name = None
self.vault_password = None
self.vault = None
self._is_modified = False
def _on_list_box_char(self, key_event):
"""
Typing in the list box doesn't do anything, redirect it to the search box
"""
if not (0 < key_event.GetKeyCode() < 256):
# Arrow keys, page up, etc -- let event propagate to default handler
key_event.Skip()
return
if key_event.HasModifiers():
# ctrl (eg Ctrl-U to copy username, Ctrl-P to copy password)
key_event.Skip()
return
self._searchbox.SetFocus()
self._searchbox.EmulateKeyPress(key_event)
def mark_modified(self):
self._is_modified = True
if ((self.vault_file_name is not None) and (self.vault_password is not None)):
self.save_vault(self.vault_file_name, self.vault_password)
self.list.update_fields()
def open_vault(self, filename, password):
"""
Set the Vault that this frame should display.
"""
self.vault_file_name = None
self.vault_password = None
self._is_modified = False
self.vault = Vault(password, filename=filename)
self.list.set_vault(self.vault)
self.vault_file_name = filename
self.vault_password = password
self.statusbar.SetStatusText(_("Read Vault contents from disk"), 0)
def save_vault(self, filename, password):
"""
Write Vault contents to disk.
"""
try:
self._is_modified = False
self.vault_file_name = filename
self.vault_password = password
self.vault.write_to_file(filename, password)
self.statusbar.SetStatusText(_("Wrote Vault contents to disk"), 0)
except RuntimeError:
dial = wx.MessageDialog(self,
_("Could not write Vault contents to disk"),
_("Error writing to disk"),
wx.OK | wx.ICON_ERROR
)
dial.ShowModal()
dial.Destroy()
def _clear_clipboard(self, match_text = None):
if match_text:
if not wx.TheClipboard.Open():
raise RuntimeError(_("Could not open clipboard"))
try:
clip_object = wx.TextDataObject()
if wx.TheClipboard.GetData(clip_object):
if clip_object.GetText() != match_text:
return
finally:
wx.TheClipboard.Close()
wx.TheClipboard.Clear()
self.statusbar.SetStatusText(_('Cleared clipboard'), 0)
def _copy_to_clipboard(self, text, duration = None):
if not wx.TheClipboard.Open():
raise RuntimeError(_("Could not open clipboard"))
try:
clip_object = wx.TextDataObject(text)
wx.TheClipboard.SetData(clip_object)
if duration:
wx.CallLater(duration * 1000, self._clear_clipboard, text)
finally:
wx.TheClipboard.Close()
def _on_list_item_activated(self, event):
"""
Event handler: Fires when user double-clicks a list entry.
"""
index = event.GetIndex()
self.list.deselect_all()
self.list.Select(index, True)
self.list.Focus(index)
self._on_copy_password(None)
def _on_list_column_click(self, event):
"""
Event handler: Fires when user clicks on the list header.
"""
col = event.GetColumn()
if (col == 0):
self.list.sort_function = lambda e: six.text_type.lower(e.title)
if (col == 1):
self.list.sort_function = lambda e: six.text_type.lower(e.user)
if (col == 2):
self.list.sort_function = lambda e: six.text_type.lower(e.group)
self.list.update_fields()
def _on_list_contextmenu(self, dummy):
self.PopupMenu(self._recordmenu)
def _on_about(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
gpl_v2 = """This program is free software; you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software Foundation;
either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program;
if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA."""
developers = (
"Christoph Sommer",
"Bjorn Edstrom (Python Twofish)",
"Brian Gladman (C Twofish)",
"Tim Kuhlman",
"David Eckhoff",
"Nick Verbeck"
)
about = wx.adv.AboutDialogInfo()
about.SetIcon(wx.Icon(os.path.join(get_resourcedir(), "loxodo-icon.png"), wx.BITMAP_TYPE_PNG, 128, 128))
about.SetName("Loxodo")
about.SetVersion("0.0-git")
about.SetCopyright("Copyright (C) 2008 Christoph Sommer <mail@christoph-sommer.de>")
about.SetWebSite("http://www.christoph-sommer.de/loxodo")
about.SetLicense(gpl_v2)
about.SetDevelopers(developers)
wx.adv.AboutBox(about)
def _on_settings(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
settings = Settings(self)
settings.ShowModal()
settings.Destroy()
self.list.update_fields()
def _on_change_password(self, dummy):
# FIXME: choose new SALT, B1-B4, IV values on password change? Conflicting Specs!
dial = wx.PasswordEntryDialog(self,
_("New password"),
_("Change Vault Password")
)
retval = dial.ShowModal()
password_new = dial.Value
dial.Destroy()
if retval != wx.ID_OK:
return
dial = wx.PasswordEntryDialog(self,
_("Re-enter new password"),
_("Change Vault Password")
)
retval = dial.ShowModal()
password_new_confirm = dial.Value
dial.Destroy()
if retval != wx.ID_OK:
return
if password_new_confirm != password_new:
dial = wx.MessageDialog(self,
_('The given passwords do not match'),
_('Bad Password'),
wx.OK | wx.ICON_ERROR
)
dial.ShowModal()
dial.Destroy()
return
self.vault_password = password_new
self.statusbar.SetStatusText(_('Changed Vault password'), 0)
self.mark_modified()
def _on_merge_vault(self, dummy):
wildcard = "|".join((_("Vault") + " (*.psafe3)", "*.psafe3", _("All files") + " (*.*)", "*.*"))
dialog = wx.FileDialog(self, message = _("Open Vault..."), defaultFile = self.vault_file_name, wildcard = wildcard, style = wx.FD_OPEN)
if dialog.ShowModal() != wx.ID_OK:
return
filename = dialog.GetPath()
dialog.Destroy()
dial = wx.PasswordEntryDialog(self,
_("Password"),
_("Open Vault...")
)
retval = dial.ShowModal()
password = dial.Value
dial.Destroy()
if retval != wx.ID_OK:
return
merge_vault = None
try:
merge_vault = Vault(password, filename=filename)
except Vault.BadPasswordError:
dial = wx.MessageDialog(self,
_('The given password does not match the Vault'),
_('Bad Password'),
wx.OK | wx.ICON_ERROR
)
dial.ShowModal()
dial.Destroy()
return
except Vault.VaultVersionError:
dial = wx.MessageDialog(self,
_('This is not a PasswordSafe V3 Vault'),
_('Bad Vault'),
wx.OK | wx.ICON_ERROR
)
dial.ShowModal()
dial.Destroy()
return
except Vault.VaultFormatError:
dial = wx.MessageDialog(self,
_('Vault integrity check failed'),
_('Bad Vault'),
wx.OK | wx.ICON_ERROR
)
dial.ShowModal()
dial.Destroy()
return
oldrecord_newrecord_reason_pairs = [] # list of (oldrecord, newrecord, reason) tuples to merge
for record in merge_vault.records:
# check if corresponding record exists in current Vault
my_record = None
for record2 in self.vault.records:
if record2.is_corresponding(record):
my_record = record2
break
# record is new
if not my_record:
oldrecord_newrecord_reason_pairs.append((None, record, _("new")))
continue
# record is more recent
if record.is_newer_than(my_record):
oldrecord_newrecord_reason_pairs.append((my_record, record, _('updates "%s"') % my_record.title))
continue
dial = MergeFrame(self, oldrecord_newrecord_reason_pairs)
retval = dial.ShowModal()
oldrecord_newrecord_reason_pairs = dial.get_checked_items()
dial.Destroy()
if retval != wx.ID_OK:
return
for (oldrecord, newrecord, reason) in oldrecord_newrecord_reason_pairs:
if oldrecord:
oldrecord.merge(newrecord)
else:
self.vault.records.append(newrecord)
self.mark_modified()
def _on_exit(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
self.Close(True) # Close the frame.
def _on_edit(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
index = self.list.GetFirstSelected()
if (index is None):
return
entry = self.list.displayed_entries[index]
recordframe = RecordFrame(self)
recordframe.vault_record = entry
if recordframe.ShowModal() != wx.ID_CANCEL:
self.mark_modified()
recordframe.Destroy()
def _on_add(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
entry = self.vault.Record.create()
recordframe = RecordFrame(self)
recordframe.vault_record = entry
if recordframe.ShowModal() != wx.ID_CANCEL:
self.vault.records.append(entry)
self.mark_modified()
recordframe.Destroy()
def _on_delete(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
index = self.list.GetFirstSelected()
if (index == -1):
return
entry = self.list.displayed_entries[index]
if ((entry.user != "") or (entry.passwd != "")):
dial = wx.MessageDialog(self,
_("Are you sure you want to delete this record? It contains a username or password and there is no way to undo this action."),
_("Really delete record?"),
wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION
)
retval = dial.ShowModal()
dial.Destroy()
if retval != wx.ID_YES:
return
self.vault.records.remove(entry)
self.mark_modified()
def _on_copy_username(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
index = self.list.GetFirstSelected()
if (index == -1):
return
entry = self.list.displayed_entries[index]
try:
self._copy_to_clipboard(entry.user)
self.statusbar.SetStatusText(_('Copied username of "%s" to clipboard') % entry.title, 0)
except RuntimeError:
self.statusbar.SetStatusText(_('Error copying username of "%s" to clipboard') % entry.title, 0)
def _on_copy_password(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
index = self.list.GetFirstSelected()
if (index == -1):
return
entry = self.list.displayed_entries[index]
try:
self._copy_to_clipboard(entry.passwd, duration=10)
self.statusbar.SetStatusText(_('Copied password of "%s" to clipboard') % entry.title, 0)
except RuntimeError:
self.statusbar.SetStatusText(_('Error copying password of "%s" to clipboard') % entry.title, 0)
def _on_open_url(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
index = self.list.GetFirstSelected()
if (index == -1):
return
entry = self.list.displayed_entries[index]
try:
import webbrowser
webbrowser.open(entry.url)
except ImportError:
self.statusbar.SetStatusText(_('Could not load python module "webbrowser" needed to open "%s"') % entry.url, 0)
def _on_search_for_entry(self, dummy):
"""
Event handler: Fires when user chooses this menu item.
"""
self._searchbox.SetFocus()
self._searchbox.SelectAll()
def _on_search_do(self, dummy):
"""
Event handler: Fires when user interacts with search field
"""
self.list.set_filter(self._searchbox.GetValue())
def _on_search_cancel(self, dummy):
"""
Event handler: Fires when user interacts with search field
"""
self._searchbox.SetValue("")
def _on_frame_close(self, dummy):
"""
Event handler: Fires when user closes the frame
"""
self.Destroy()
def _on_searchbox_char(self, evt):
"""
Event handler: Fires when user presses a key in self._searchbox
"""
# If "Enter" was pressed, ignore key and copy password of first match
if evt.GetKeyCode() == wx.WXK_RETURN:
self._on_copy_password(None)
return
# If "Escape" was pressed, ignore key and clear the Search box
if evt.GetKeyCode() == wx.WXK_ESCAPE:
self._on_search_cancel(None)
return
# If "Up" or "Down" was pressed, ignore key and focus self.list
if evt.GetKeyCode() in (wx.WXK_UP, wx.WXK_DOWN):
self.list.SetFocus()
return
# Ignore all other keys
evt.Skip()
|
sommer/loxodo
|
src/frontends/wx/vaultframe.py
|
Python
|
gpl-2.0
| 24,282
|
[
"Brian"
] |
3d0ced28a677d428a5dc97f768109bc692cf92c0664ccc133593f90a7210fc15
|
# Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for third_party.nucleus.util.vcf_constants."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from absl.testing import parameterized
from third_party.nucleus.protos import variants_pb2
from third_party.nucleus.util import struct_utils
from third_party.nucleus.util import vcf_constants
class VcfConstantsTest(parameterized.TestCase):
def test_unique_reserved_filter(self):
num_reserved_filter = len(vcf_constants.RESERVED_FILTER_FIELDS)
unique_filt_ids = {filt.id for filt in vcf_constants.RESERVED_FILTER_FIELDS}
self.assertLen(unique_filt_ids, num_reserved_filter)
def test_unique_reserved_info(self):
num_reserved_info = len(vcf_constants.RESERVED_INFO_FIELDS)
unique_info_ids = {info.id for info in vcf_constants.RESERVED_INFO_FIELDS}
self.assertLen(unique_info_ids, num_reserved_info)
def test_unique_reserved_format(self):
num_reserved_format = len(vcf_constants.RESERVED_FORMAT_FIELDS)
unique_format_ids = {f.id for f in vcf_constants.RESERVED_FORMAT_FIELDS}
self.assertLen(unique_format_ids, num_reserved_format)
def test_get_reserved_filter(self):
filt = vcf_constants.reserved_filter_field('PASS')
self.assertIsInstance(filt, variants_pb2.VcfFilterInfo)
self.assertEqual(filt.id, 'PASS')
self.assertEqual(filt.description, 'All filters passed')
@parameterized.parameters(
'RefCall',
'LowQual',
'AD',
'DP',
'GT',
'GQ',
)
def test_invalid_get_reserved_filter(self, field_id):
with self.assertRaisesRegexp(ValueError, 'No reserved field with id'):
vcf_constants.reserved_filter_field(field_id)
@parameterized.parameters(
'AA',
'AC',
'AD',
'ADF',
'END',
'H2',
)
def test_get_reserved_info(self, field_id):
info = vcf_constants.reserved_info_field(field_id)
self.assertIsInstance(info, variants_pb2.VcfInfo)
self.assertEqual(info.id, field_id)
@parameterized.parameters(
'PASS',
'GT',
'GQ',
'GL',
'FT',
)
def test_invalid_get_reserved_info(self, field_id):
with self.assertRaisesRegexp(ValueError, 'No reserved field with id'):
vcf_constants.reserved_info_field(field_id)
@parameterized.parameters(
'AD',
'ADF',
'DP',
'GT',
'GQ',
'GL',
'FT',
'PL',
)
def test_get_reserved_format(self, field_id):
fmt = vcf_constants.reserved_format_field(field_id)
self.assertIsInstance(fmt, variants_pb2.VcfFormatInfo)
self.assertEqual(fmt.id, field_id)
@parameterized.parameters(
'PASS',
'AN',
'1000G',
'END',
'H2',
)
def test_invalid_get_reserved_format(self, field_id):
with self.assertRaisesRegexp(ValueError, 'No reserved field with id'):
vcf_constants.reserved_format_field(field_id)
@parameterized.parameters(
dict(
value_type=vcf_constants.CHARACTER_TYPE,
values=['a'],
number='1',
expected='a'),
dict(
value_type=vcf_constants.CHARACTER_TYPE,
values=['b'],
number='.',
expected=['b']),
dict(
value_type=vcf_constants.CHARACTER_TYPE,
values=['c', 'd'],
number='R',
expected=['c', 'd']),
dict(
value_type=vcf_constants.FLAG_TYPE,
values=[True],
number='0',
expected=True),
dict(
value_type=vcf_constants.FLOAT_TYPE,
values=[2.5],
number='1',
expected=2.5),
dict(
value_type=vcf_constants.FLOAT_TYPE,
values=[2.5],
number='.',
expected=[2.5]),
dict(
value_type=vcf_constants.FLOAT_TYPE,
values=[2.5, 3.5],
number='A',
expected=[2.5, 3.5]),
dict(
value_type=vcf_constants.INTEGER_TYPE,
values=[2, 3, 4],
number='G',
expected=[2, 3, 4]),
dict(
value_type=vcf_constants.STRING_TYPE,
values=['a', 'bc'],
number='.',
expected=['a', 'bc']),
)
def test_create_get_fn(self, value_type, values, number, expected):
info = variants_pb2.Variant().info
set_fn = vcf_constants.SET_FN_LOOKUP[value_type]
set_fn(info, 'field', values)
get_fn = vcf_constants.create_get_fn(value_type, number)
actual = get_fn(info, 'field')
self.assertEqual(actual, expected)
@parameterized.parameters(
dict(field='CIGAR', expected=struct_utils.set_string_field),
dict(field='DP', expected=struct_utils.set_int_field),
dict(field='MQ', expected=struct_utils.set_number_field),
dict(field='SOMATIC', expected=struct_utils.set_bool_field),
)
def test_reserved_info_field_set_fn(self, field, expected):
actual = vcf_constants.reserved_info_field_set_fn(field)
self.assertIs(actual, expected)
@parameterized.parameters(
dict(field='INVALID'),
dict(field='EC'),
dict(field='HQ'),
)
def test_invalid_reserved_info_field_set_fn(self, field):
with self.assertRaisesRegexp(ValueError, 'Unknown reserved INFO field:'):
vcf_constants.reserved_info_field_set_fn(field)
def test_reserved_info_field_get_fn(self):
info = variants_pb2.Variant().info
values = ['C']
struct_utils.set_string_field(info, 'AA', values)
get_fn = vcf_constants.reserved_info_field_get_fn('AA')
actual = get_fn(info, 'AA')
self.assertEqual(actual, values[0])
@parameterized.parameters(
dict(field='INVALID'),
dict(field='EC'),
dict(field='HQ'),
)
def test_invalid_reserved_info_field_get_fn(self, field):
with self.assertRaisesRegexp(ValueError,
'Unknown reserved INFO field to get:'):
vcf_constants.reserved_info_field_get_fn(field)
@parameterized.parameters(
dict(field='AD', expected=struct_utils.set_int_field),
dict(field='GL', expected=struct_utils.set_number_field),
dict(field='FT', expected=struct_utils.set_string_field),
)
def test_reserved_format_field_set_fn(self, field, expected):
actual = vcf_constants.reserved_format_field_set_fn(field)
self.assertIs(actual, expected)
@parameterized.parameters(
dict(field='INVALID'),
dict(field='CIGAR'),
dict(field='H2'),
)
def test_invalid_reserved_format_field_set_fn(self, field):
with self.assertRaisesRegexp(ValueError, 'Unknown reserved FORMAT field:'):
vcf_constants.reserved_format_field_set_fn(field)
def test_reserved_format_field_get_fn(self):
info = variants_pb2.VariantCall().info
expected = [0.2, 0.5, 0.3]
struct_utils.set_number_field(info, 'GP', expected[:])
get_fn = vcf_constants.reserved_format_field_get_fn('GP')
actual = get_fn(info, 'GP')
self.assertEqual(actual, expected)
@parameterized.parameters(
dict(field='INVALID'),
dict(field='CIGAR'),
dict(field='H2'),
)
def test_invalid_reserved_format_field_get_fn(self, field):
with self.assertRaisesRegexp(ValueError,
'Unknown reserved FORMAT field to get:'):
vcf_constants.reserved_format_field_get_fn(field)
if __name__ == '__main__':
absltest.main()
|
google/deepvariant
|
third_party/nucleus/util/vcf_constants_test.py
|
Python
|
bsd-3-clause
| 8,955
|
[
"ADF"
] |
d51cdcaeb2ec44c75af6be3cbf473872c92df9c2d8808143e223d60d70954356
|
import sys
import subprocess
import os
import os.path
import shutil
import argparse
import tempfile
import re
import perform_stats
import time
import pickle
from parse_pra_output import parse_pra_output
from checkpoint_jobs import *
try:
fp = open("locations.txt")
except:
sys.stderr.println("locations.txt file not present.")
exit(1);
location = fp.readline().strip()
if not (location.lower() in {'redhawk', 'oakley', 'osx'}):
sys.stderr.println("location.txt: Bad content")
exit(1)
wait_time = 100 # Amount of time to spin before checking job progress in a wait() call.
# Need to be higher on Oakley.
sleep_pause = 60
#################################################################
# The following global variables are related to debugging issues.
show_progress = False
stats_only = False
job_index = {}
default_time_limit = "4:00:00"
#default_time_limit = "00:20:00"
rm_time_limit = "25:00:00"
#rm_time_limit = "2:00:00"
time_limit = default_time_limit
timing = False
timing_jobs = False
start_time = None
quit_time = None
prog_walltime = None
safety_margin = None
continue_prev = False
check_fname = 'reval.dat'
log_fname = 'reval.log'
flist_start = "START_FILE_LIST"
flist_end = "END_FILE_LIST"
csjobs_start = "START_CHROM_SIM_JOBS"
csjobs_end = "END_CHROM_SIM_JOBS"
tjobs_start = "START_TOOL_JOBS"
tjobs_end = "END_TOOL_JOBS"
rmjobs_start = "START_REPMASK_JOBS"
rmjobs_end = "END_REPMASK_JOBS"
prajobs_start = "START_PRA_JOBS"
prajobs_end = "END_PRA_JOBS"
jobdic_start = "START_JOB_DICT"
jobdic_end = "END_JOB_DICT"
blast_db_start = "START_BLAST_DB"
blast_db_end = "END_BLAST_DB"
stats_start = "START_STATS_JOBS"
stats_end = "END_STATS_JOBS"
#################################################################
def print_time():
return time.strftime("%x %X", time.localtime())
#################################################################
# These global variables have to do with executable locations.
MacLocations = {'build_lmer_table':'/usr/local/RepeatScout/build_lmer_table',
'RptScout':'/usr/local/RepeatScout/RepeatScout',
'filter_stage-1':'/usr/local/RepeatScout/filter-stage-1.prl',
'filter_stage-2':'/usr/local/RepeatScout/filter-stage-2.prl',
'raider':'./raider',
'raider_pre':'./raider_pre',
'bigfoot':'./bigfoot',
'python':'python3.4',
'araider':'./araider',
'raider2': './phRAIDER',
'rm_modules': None,
'RepeatMasker' : 'RepeatMasker',
'proc_per_node' : 1,
'basic_arch_type' : None,
'high_mem_arch' : None}
RedhawkLocations = {'build_lmer_table':'./build_lmer_table',
'RptScout':'./RepeatScout',
'filter_stage-1':'./filter-stage-1.prl',
'filter_stage-2':'./filter-stage-2.prl',
'raider':'./raider',
'raider_pre':'./raider_pre',
'bigfoot':'./bigfoot',
'python':'python3.3',
'araider':'./araider',
'raider2': './phRAIDER',
'rm_modules' : ['RepeatMasker', 'python-3.3.3'],
'RepeatMasker' : 'RepeatMasker',
'proc_per_node' : 4,
'basic_arch_type' : ["n09","bigmem"],
'high_mem_arch' : 'redhawk'}
OakleyLocations = {'build_lmer_table':'./build_lmer_table',
'RptScout':'./RepeatScout',
'filter_stage-1':'./filter-stage-1.prl',
'filter_stage-2':'./filter-stage-2.prl',
'raider':'./raider',
'raider_pre':'./raider_pre',
'bigfoot':'./bigfoot',
'python':'python',
'araider':'./araider',
'raider2': './phRAIDER',
'rm_modules' : None,
'RepeatMasker' : 'RepeatMasker',
'proc_per_node' : 12,
'basic_arch_type' : None,
'high_mem_arch' : 'oakley'}
Locations = None; # This will be set to one of the above two, and references to find exectuable locations.
#########
# Utility functions
def sum_resources(T1, T2):
if T1[0] == -1 or T2[0] == -1:
return [-1]*4
return [T1[0] + T2[0], T1[1] + T2[1], max(T1[2], T2[2]), max(T1[3], T2[3])]
def get_job_index(s):
global job_index
if s not in job_index:
job_index[s] = 0
v = job_index[s]
job_index[s] += 1
return v
def file_base(file):
return os.path.basename(file)
def file_dir(file):
return file.rstrip(file_base(file)).rstrip("/")
def parse_redhawk_time(time_str):
"""Parse time limit string for redhawk (format HH:MM:SS) into seconds amount"""
secs = sum(int(x) * 60 ** i for i,x in enumerate(reversed(time_str.split(":"))))
#print(time_str, '/t', secs)
return secs
def convert_seed(seed):
"""Convert an abriviated seed to a full seed (e.g. "1{2}0{3}1{2}" => "1100011" """
i = 0
while (i < len(seed)-1):
if seed[i+1] == '^':
j = i+2
assert seed[j] == "{"
k = j+1
while seed[k] != '}':
k += 1
n = int(seed[j+1:k])
seed = seed[:i] + seed[i]*n + seed[k+1:]
i += 1
return seed
def parse_params(args):
"""Parse command line arguments using the argparse library"""
parser = argparse.ArgumentParser(description = "Evaluate RAIDER against RepeatScout")
# GENERAL ARGUMENTS
#parser2 = parser.add_mutually_exclusive_group()
#parser2.add_argument('--organize', action = "store_true", help = "Create directory for all Raider Eval output", default = False)
#parser2.add_argument('--no', '--named_organize', dest = "named_organize", help = "Organize under a named directory", default = None)
# TOOL SELECTION
parser_tools = parser.add_argument_group("tool selection (all on by default)")
parser_tools.add_argument('-R', '--raider_on', dest = 'run_raider', action = 'store_true', help = 'Turn RAIDER on', default = False)
parser_tools.add_argument('--R2', '--raider2_on', dest = 'run_raider2', action = 'store_true', help = 'Turn RAIDERV2 on', default = False)
parser_tools.add_argument('--AR', '--araider_on', dest = 'run_araider', action = 'store_true', help = 'Turn ARAIDER on', default = False)
parser_tools.add_argument('--RS', '--repscout_on', dest = 'run_repscout', action = 'store_true', help = 'Turn RAIDER on', default = False)
parser_tools.add_argument('-B', '--bigfoot_on', dest = 'run_bigfoot', action = 'store_true', help = 'Turn BIGFOOT on', default = False)
parser_tools.add_argument('-P', '--piler_on', dest = 'run_piler', action = 'store_true', help = 'Turn PILER on', default = False)
parser_tools.add_argument('-A', '--all_tools', dest = 'all_tools', action = 'store_true', help = 'Turn all tools on (overide all other tool arguments)', default = False)
parser_tools.add_argument('--A2', '--all_tools2', dest = 'all_tools2', action = 'store_true', help = 'Turn all tools on except araider (overide all other tool arguments)', default = False)
parser_tools.add_argument('--tl', '--time_limit', dest = 'time_limit', help = 'Redhawk time limit (max: 400:00:00 default: 4:00:00)', default = default_time_limit)
parser_tools.add_argument("--mn", '--max_nodes', dest = "max_nodes", action="store_true", help="Reserve all nodes of a processor for each tool (disabled by default).", default=False)
# Will later add: RepeatModeler, RECON, PILER (other?)
# I/O ARGUMENTs
parser_io = parser.add_argument_group("i/o arguments")
parser_io.add_argument('-r', '--results_dir', dest = "results_dir", help = "Directory containing all results", default = "EVAL")
parser_io.add_argument('--nuke', dest ='nuke', action = "store_true", help = "Nuke the results directory", default = False)
parser_io.add_argument('--rd', '--raider_dir', dest = "raider_dir", help = "Subdirectory containing raider results", default = "RAIDER")
parser_io.add_argument('--ard', '--araider_dir', dest = "araider_dir", help = "Subdirectory containing araider results", default = "ARAIDER")
parser_io.add_argument('--r2d', '--raider2_dir', dest = "raider2_dir", help = "Subdirectory containing araider results", default = "RAIDERV2")
parser_io.add_argument('--rsd', '--rptscout_dir', dest = 'rptscout_dir', help = "Subdirectory containing rpt scout results", default = "RPT_SCT")
parser_io.add_argument('--bfd', '--bigfoot_dir', dest = 'bigfoot_dir', help = "Subdirectory containing bigfoot results", default = "BIGFOOT")
parser_io.add_argument('--pd', '--pilder_dir', dest = 'piler_dir', help = "Subdirectory containing piler results", default = "PILER")
parser_io.add_argument('--dd', '--data_dir', dest = 'data_dir', help = "Directory containing the resulting simulated chromosome", default = "SOURCE_DATA")
parser_tools.add_argument('--hj', '--hooke_jeeves', dest = 'hooke_jeeves', action = 'store_true', help = 'Simply print the tp+tn statistics counts', default = False)
# RAIDER ARGUMENTS
raider_argument = parser.add_argument_group("RAIDER parameters")
raider_argument.add_argument('-f', type = int, help = "E.R. occurrence threshold", default = 5)
raider_argument.add_argument('-d', '--output_dir', help = "Raider output directory", default = None)
raider_argument.add_argument('-e', '--output_ext', help = "Output Extension", default = None)
raider_argument.add_argument('-C', '--cleanup_off', dest = "cleanup", action = "store_false", help = "Turn off file cleanup", default = True)
raider_argument.add_argument('--raider_min', '--raider_min', type = int, help = "Minimum repeat length. Defaults to pattern length.", default = None)
raider_argument.add_argument('--pre', '--pre_scan', action = 'store_true', help = "Use pre-scan version of raider", default = False)
raider_argument.add_argument('--mem', action = 'store_true', help = "Use large memory-nodes", default = False);
seed_group = raider_argument.add_mutually_exclusive_group(required = False)
seed_group.add_argument('-s', '--seed', dest = "seed", help = "Spaced seed string", default = "111111111111111111111111111111")
seed_group.add_argument('--sf', '--seed_file', dest = 'seed_file', help = 'File containing raider seeds', default = None)
# RAIDER2 ARGUMENTS
raider2_argument = parser.add_argument_group("RAIDER2 parameters")
raider2_argument.add_argument('--age', type = int, help="Use older version of raider2", default=1)
raider2_argument.add_argument('--aa', '--all_ages', dest="all_ages", action="store_true", help="Run all ages of raider2", default=False) # type = int, help="Use older version of raider", default=0)
#raider2_argument.add_argument('--multi', '--multi_seed', dest="multi_seed", action="store_true", help="Run all seeds in seed file concurrently",default=False)
raider2_argument.add_argument('--na', '--no_family_array', dest="family_array", action="store_false", help="Disable family array in Raider2", default=True)
raider2_argument.add_argument('--ex', '--excise', dest="excising", action="store_true", help="Enable excising in RAIDER2", default=False)
raider2_argument.add_argument('--no', '--no_overlaps', dest="overlaps", action="store_false", help="Do not require overlaps in RAIDER2", default=True)
raider2_argument.add_argument('--tu', '--tie_up', dest="tieup", action="store_true", help="Enable alternative tie ups", default=False)
raider2_argument.add_argument('--ps', '--prosplit', dest="prosplit", action="store_true", help="Enable proactive splitting(disabled by default).", default=False)
raider2_argument.add_argument("--pf", '--prevfam', dest="prevfam", action="store_true", help="Enable pointers to prev family (disabled by default).", default=False)
# REPSCOUT ARGUMENTS
repscout_argument = parser.add_argument_group("REPSCOUT parameters")
repscout_argument.add_argument('--repscout_min', type = int, help = "Minimum repeat length for repscout.", default = 10)
repscout_argument.add_argument('--rs_min_freq', type = int, help = "Minimum repeat length for repscout.", default = 3)
repscout_argument.add_argument('--rs_filters', type = int, dest = "rs_filters", help = "Specify how many RS filters to use {0,1,2}. 3 specifies to run all versions", default = 0)
#raider_argument.add_argument('--uff', '--use_first_filter', dest = "use_first_filter", action = "store_true", help = "Use the first RepScout filter", default = True)
#raider_argument.add_argument('--usf', '--use_second_filter', dest = "use_second_filter", action = "store_true", help = "Use the second RepScout filter", default = True)
# BIGFOOT ARGUMENTS
bigfoot_arguments = parser.add_argument_group("BIGFOOT parameters")
bigfoot_arguments.add_argument('-L', '--bigfoot_L', type = int, help = "Minimum repeat length. Defaults to 20.", default = 20)
bigfoot_arguments.add_argument('-min', '--bigfoot_min', type = int, help = "E.R. occurrence threshold", default = 2)
bigfoot_arguments.add_argument('-I', '--bigfoot_I', type = float, help = "Minimum percent frequency of more frequent Lmer a less frequent Lmer must have to be part of the same family", default = 0.75)
bigfoot_arguments.add_argument('-T', '--bigfoot_T', type = float, help = "Minimum percent of time a base must occur after an Lmer to be considered significant", default = 0.75)
# REPEAT MASKER ARGUMENTS
repeatmasker_arguments = parser.add_argument_group("RepeatMasker parameters")
repeatmasker_arguments.add_argument('--masker_dir', help = "Repeat masker output directory", default = None)
repeatmasker_arguments.add_argument('-p', '--pa', type = int, help = "Number of processors will be using", default = 1)
# STATISTICS ARGUMENT
stats_group = parser.add_argument_group(title = "Statistics argument")
stats_group.add_argument('--stats_dir', dest = 'stats_dir', help = "Statistics output directory", default = "STATS_OUTPUT")
stats_group.add_argument('--stats_file', dest = 'stats_file', help = "Statistics output file", default = "stats.txt")
stats_group.add_argument('--stats_only', dest = 'stats_only', action = 'store_true', help = 'Remove files not involved in stats analysis', default = False)
#stats_group.add_argument('--print_reps', action = "store_true", help = "Print out repeats in statistics file", default = False)
# DEBUGGING ARGUMENTS
debug_group = parser.add_argument_group(title = "debugging")
debug_group.add_argument('--sp', '--show_progress', dest = 'show_progress', action = 'store_true', help = "Print reports on program progress to stderr", default = False)
debug_group.add_argument('--so', '--simulate_only', dest = 'simulate_only', action = 'store_true', help = "Quit after creating simulated file", default = False)
# ANALYSIS
parser_analysis = parser.add_argument_group("Analysis options")
parser_analysis.add_argument('--PRA', '--pre_rm_analysis_off', dest = 'pra', action = 'store_false', help = 'Turn off pre-RM stats. analysis', default = True)
parser_analysis.add_argument('--RA', '--rm_analysis_off', dest = 'repmask', action = 'store_false', help = 'Turn off RM stats. analysis', default = True)
parser_analysis.add_argument('--ce', '--class_exclude', dest = 'exclude', action = 'store', help = 'File of family classes to exclude from PRA analysis', default = 'exclude.txt')
### KARRO END
# CONTINUE PREVIOUS RUN ARGUMENTS
cont_group = parser.add_argument_group(title = "continuing previous")
cont_group.add_argument('--timing_jobs', dest = 'timing_jobs', action = 'store_true', help = "Set up timed jobs", default = False)
cont_group.add_argument('--pwt', '--prog_walltime', dest = 'prog_walltime', help = 'Redhawk time limit for program', default = None)
cont_group.add_argument('--cp', '--continue_prev', dest = 'continue_prev', action = 'store_true', help = "Continue previously started job", default = False)
cont_group.add_argument('--sm', '--safe_marg', dest = 'safety_margin', help = "Amount of time left on clock (secs) when start to save run state", default = None)
subparsers = parser.add_subparsers(dest="subparser_name")
# SEQUENCE FILE OPTION ARGUMENTS
parser_seqs = subparsers.add_parser("seq_files")
parser_seqs.add_argument('seq_files', nargs = '+', help = "Use files directly (no simulation)", default = None)
# CHROMOSOME SIMULATION OPTION ARGUMENTS
parser_chrom = subparsers.add_parser("chrom_sim")
parser_chrom.add_argument('-k', type = int, help = "Order of markov chain", default = 5) # KARRO: Added this
parser_chrom.add_argument('--rng_seed', type = int, help = "RNG seed", default = None)
parser_chrom.add_argument('-n', '--negative_strand', action = "store_true", help = "Use repeats on negative string", default = False)
parser_chrom.add_argument('--family_file', help = "List of repeat families to use", default = None)
parser_chrom.add_argument('--mc', '--mc_file', dest = 'mc_file', help = "Markov Chain file", default = False)
parser_chrom.add_argument('--mi', '--max_interval', dest = "max_interval", type = int,
help = "Maximum allowed length of interval between repeats; -1 value (default) means no maximum", default = None)
parser_chrom.add_argument('--rn', '--retain_n', dest = "retain_n", action = 'store_true',
help = "If used, will use the whole chromosome. Otherwise, cuts of Ns at either end.", default = False)
parser_chrom.add_argument('--nr', '--num_repeats', dest = 'num_repeats', type = int,
help = "Specify the number of repeats. Simulation will terminate either 1000 bases or max interval bases past the nth instance of a repeat (excluding any other repeats in that range).", default = None)
parser_chrom.add_argument('-l', '--length', type = int, help = "Simulated sequence length", default = None)
parser_chrom.add_argument('-o', '--output', help = "Output file (Default: replace chromosome file \".fa\" with \".sim.fa\")")
parser_chrom.add_argument('-t', '--num_sims', type = int, dest = "num_sims", help ="Number of simulations", default = 1)
parser_chrom.add_argument('--lc', '--low_complexity', dest = 'low_complexity', action = 'store_false', help = "Toss low complexity and simple repeats (tossed by default)", default = True)
parser_chrom.add_argument('--st', '--sim_type', dest = 'sim_type', type = int, help = "0 = use mdern sequence exactly (default); 1 = use ancestor fragment; 2 = preserve mutations, but not indels; 3 = preserve indels, but not mutations", default = 0)
parser_chrom.add_argument('chromosome', help = "Template chromosome file")
arg_return = parser.parse_args(args)
global time_limit
time_limit = arg_return.time_limit
global show_progress
show_progress = arg_return.show_progress
global stats_only
stats_only = arg_return.stats_only
###
# Update global vars related to continuing previous jobs
global prog_walltime
prog_walltime = parse_redhawk_time(arg_return.prog_walltime) if arg_return.prog_walltime else None
global timing
timing = True if prog_walltime else False
global timing_jobs
timing_jobs = True if timing else arg_return.timing_jobs
global safety_margin
safety_margin = arg_return.safety_margin if arg_return.safety_margin else prog_walltime/10.0 if timing else None
global continue_prev
continue_prev = arg_return.continue_prev if timing else False
global check_fname
check_fname = arg_return.results_dir + "/" + check_fname
global log_fname
log_fname = arg_return.results_dir + "/" + log_fname
if arg_return.all_tools or arg_return.all_tools2:
arg_return.run_raider = True
arg_return.run_repscout = True
arg_return.run_piler = True
if arg_return.all_tools:
arg_return.run_araider = True
arg_return.run_raider2 = True
arg_return.run_bigfoot = True
#### The following is to set the global debugging variables
if arg_return.simulate_only: # Set to supress all tools
arg_return.run_raider = False
arg_return.run_araider = False
arg_return.run_raider2 = False
arg_return.run_repscout = False
arg_return.run_bigfoot = False
arg_return.run_piler = False
return arg_return
############################################################
# Main functions
def simulate_chromosome(chromosome_file, rng_seed, length, neg_strand, fam_file, data_dir, output_file, file_index, k, mc_file, mi, retain_n, num_repeats, low_complexity, sim_type):
"""Given chromosome file and repeat file and rng_seed, runs chromosome
simulator and then passes raider params (including path to new simulated chromosome
file) into run_raider"""
# Output file is either specified or replace .fa with .sim.#.fa
length_arg = "-l %d" % (length) if length else ""
k_arg = "-k %d" % (k)
seed_arg = "-s %d" % (rng_seed) if rng_seed else ""
neg_arg = "-n" if neg_strand else ""
fam_arg = "-f %s" % (fam_file) if fam_file else ""
mi = ("--mi %d" % (mi)) if mi else ""
retain_n = "--rn" if retain_n else ""
num_repeats = ("--nr %d" % (num_repeats)) if num_repeats else ""
low_complexity = "--lc" if low_complexity else ""
seq_arg = chromosome_file
repeat_arg = chromosome_file + ".out"
output_file = (output_file if output_file else re.sub(".fa$", ".sim.%d.fa" % (file_index), file_base(chromosome_file)))
output_path = "%s/%s" % (data_dir, output_file)
mc = "--mc %s" % mc_file if mc_file else ""
if k == 0: # Really only for debugging
cmd = "{python} chromosome_simulator3.py {mi} {length} {mc} {k} {seed} {neg} {fam} {retain_n} {num_repeats} {lc} {seq} {repeat} {output}".format(python = Locations['python'], mi=mi, mc=mc, length=length_arg, k=k_arg, seed=seed_arg, neg=neg_arg, fam=fam_arg, retain_n=retain_n, num_repeats=num_repeats, lc=low_complexity, seq = seq_arg, repeat=repeat_arg, output=output_path)
elif sim_type == 0 and os.path.isfile(repeat_arg):
cmd = "{python} chromosome_simulator.py {mi} {length} {mc} {k} {seed} {neg} {fam} {retain_n} {num_repeats} {lc} {seq} {repeat} {output}".format(python = Locations['python'], mi=mi, mc=mc, length=length_arg, k=k_arg, seed=seed_arg, neg=neg_arg, fam=fam_arg, retain_n=retain_n, num_repeats=num_repeats, lc=low_complexity, seq = seq_arg, repeat=repeat_arg, output=output_path)
else:
sim_type = "--st %d" % (sim_type)
cmd = "{python} chromosome_simulator2.py {sim_type} {mi} {length} {mc} {k} {seed} {neg} {fam} {retain_n} {num_repeats} {lc} {seq} {output}".format(python = Locations['python'], sim_type = sim_type, mi=mi, mc=mc, length=length_arg, k=k_arg, seed=seed_arg, neg=neg_arg, fam=fam_arg, retain_n=retain_n, num_repeats=num_repeats, lc=low_complexity, seq=seq_arg, output=output_path)
if show_progress:
sys.stderr.write("Creating simulation:\n%s\n" % (cmd))
sys.stderr.flush()
##progress_fp.write(print_time() + "\n")
progress_fp.write("Creating simulation:\n%s\n" % (cmd))
progress_fp.flush()
batch_name = data_dir + "/" + output_file + ".sim.batch"
job_name = "simulation.%d" % (get_job_index("simulation"))
p = pbsJobHandler(batch_file = batch_name, executable = cmd, job_name = job_name,
stdout_file = output_file + ".stdout", stderr_file = output_file + ".stderr",
output_location = data_dir, walltime = time_limit, arch_type = Locations['basic_arch_type'])
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.output_file = output_file
p.seq_file = file_base(output_file)
p.sim_output = output_path
p.index = file_index
return p
def run_raider(seed, seed_num, f, m, input_file, raider_dir, mem, max_nodes):
"""Given raider parameters and an input file, run RAIDER and put the output into
the directory specified in output_dir (creating a random name is none is
specified."""
input_base = file_base(input_file).rstrip(".fa")
output_dir = raider_dir + "/" + input_base.upper() + ".s" + str(seed_num)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
min_arg = "-m %d" % (m) if m else ""
cmd1 = "{raider} -q -c {f} {min_arg} {seed} {input_file} {output_dir}".format(raider = Locations['raider'], f = f, min_arg = min_arg, seed = seed, input_file = input_file, output_dir = output_dir)
out_file = raider_dir + "/" + input_base + ".s" + str(seed_num) + ".raider_consensus.txt"
lib_file = raider_dir + "/" + input_base + ".s" + str(seed_num) + ".raider_consensus.fa"
cmd2 = "{python} consensus_seq.py -s {seq_file} -e {elements_dir}/elements {output_file} {fa_file}".format(python = Locations['python'], seq_file = input_file, elements_dir = output_dir, output_file = out_file, fa_file = lib_file)
if show_progress:
sys.stderr.write("\nLaunching raider:\n%s\n%s\n" % (cmd1, cmd2))
sys.stderr.flush()
##progress_fp.write(print_time() + "\n")
progress_fp.write("\nLaunching raider:\n%s\n%s\n" % (cmd1, cmd2))
progress_fp.flush()
batch_name = raider_dir + "/" + input_base + ".raider.batch"
job_name = "R.{input}.{seed}.{num}".format( num = get_job_index("raider") , input=re.sub("hg18.","",input_base), seed=seed_num)
p = pbsJobHandler(batch_file = batch_name, executable = cmd1 + "; " + cmd2, job_name = job_name,
stdout_file = input_base + ".raider.stdout", stderr_file = input_base + ".raider.stderr",
output_location = output_dir, walltime = time_limit, mem = Locations['high_mem_arch'] if mem else False, ppn = Locations['proc_per_node'] if max_nodes else 1,
arch_type = Locations['basic_arch_type'] if not mem else False)
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.tool_resources = [0]*4
p.description = "raider"
p.tools_resources = [0]*4
p.seed = seed
p.seed_num = seed_num
p.seq_file = input_file
p.lib_file = lib_file
return p
def run_composites_finder(elements_file, seq_file, compositesFinderDir):
input_base = file_base(elements_file)
output_dir = compositesFinderDir + "/" + input_base.upper()
if not os.path.exists(output_dir):
os.makedirs(output_dir)
compositesDiscover = compositesFinderDir + "/" + "CompositesDiscover"
slimComFinder = compositesFinderDir + "/" + "SlimComFinder.py"
cmd1 = "{compositesFinder} {input_file}".format(compositesFinder = compositesDiscover, input_file = elements_file)
cmd2 = "{python} {slim_composites_finder} {elements} {sequence_file} {output_file}".format(python = "python", slim_composites_finder = slimComFinder,
elements = elements_file, sequence_file = seq_file, output_file = output_dir + "/" + "ConsensusSequences")
if show_progress:
sys.stderr.write("\nLaunching composites finder:\n%s\n%s\n" % (cmd1, cmd2))
sys.stderr.flush()
##progress_fp.write(print_time() + "\n")
progress_fp.write("\nLaunching composites finder:\n%s\n%s\n" % (cmd1, cmd2))
progress_fp.flush()
batch_name = compositesFinderDir + "/" + input_base + ".composites finder.batch"
job_name = "composites finder.%d" % get_job_index("composites finder")
p = pbsJobHandler(batch_file = batch_name, executable = cmd1 + "; " + cmd2, job_name = job_name,
stdout_file = input_base + ".comFinder.stdout", stderr_file = input_base + ".comFinder.stderr",
output_location = output_dir, walltime = time_limit)
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.description = "composites.finder"
p.elementsFile = elements_file
p.seqFile = seq_file
return p
def run_raider2(seed, seed_num, f, m, input_file, raider2_dir, family_array, excise, overlaps, tieup, prosplit, prevfam, age, age_only, max_nodes, mem):
"""Given raider parameters and an input file, run RAIDER and put the output into
the directory specified in output_dir (creating a random name is none is
specified."""
input_base = file_base(input_file).rstrip(".fa")
#raider2_dir += "NO_FA." if not family_array else "FA."
#raider2_dir += "EXC." if excise else "NO_EXC."
#raider2_dir += "NO_OV." if not overlaps else "OV."
#raider2_dir += "TU." if tieup else "NO_TU."
#raider2_dir += "PS" if prosplit else "NO_PS."
#raider2_dir += "PF" if prevfam else "NO_PF"
output_dir = raider2_dir + "/" + input_base.upper() + ".s" + str(seed_num)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
min_arg = "-m %d" % (m) if m else ""
if type(seed) is list:
seed_string = "-s " + " -s ".join(seed)
else:
seed_string = "-s {seed}".format(seed = seed)
opt_str = ""
if not age_only:
opt_str += "--na " if not family_array else ""
opt_str += "--e " if excise else ""
opt_str += "--no " if not overlaps else ""
opt_str += "--t " if tieup else ""
opt_str += "--ps " if prosplit else ""
opt_str += "--pf " if prevfam else ""
else:
opt_str += "--age " + str(age)
cmd1 = "{raider2} -q -c {f} {version} {min_arg} {seed} {input_file} {output_dir}".format(raider2 = Locations['raider2'], f = f, version = opt_str, min_arg = min_arg, seed = seed_string, input_file = input_file, output_dir = output_dir)
out_file = raider2_dir + "/" + input_base + ".s" + str(seed_num) + ".raider2_consensus.txt"
lib_file = raider2_dir + "/" + input_base + ".s" + str(seed_num) + ".raider2_consensus.fa"
cmd2 = "{python} consensus_seq.py -s {seq_file} -e {elements_dir}/elements {output_file} {fa_file}".format(python = Locations['python'], seq_file = input_file, elements_dir = output_dir, output_file = out_file, fa_file = lib_file)
element_file = output_dir + "/elements"
family_file = output_dir + "/families"
cmd3 = "rm {elements}; rm {family}".format(elements = element_file, family = family_file ) if stats_only else ""
if show_progress:
sys.stderr.write("\nLaunching raider2:\n%s\n%s\n\n" % (cmd1, cmd2))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("Launching raider2:\n%s\n%s\n\n" % (cmd1, cmd2))
progress_fp.flush()
batch_name = raider2_dir + "/" + input_base + ".s" + str(seed_num) + ".raider2.batch"
job_name = "R2.{input}.{seed}.{num}".format( num = get_job_index("raider2") , input=re.sub("hg18.","",input_base), seed=seed_num)
p = pbsJobHandler(batch_file = batch_name, executable = cmd1 + "; " + cmd2 + "; " + cmd3, job_name = job_name,
stdout_file = input_base + ".raider2.stdout", stderr_file = input_base + ".raider2.stderr",
output_location = output_dir, walltime= time_limit, ppn = Locations['proc_per_node'] if max_nodes else 1, mem = Locations['high_mem_arch'] if mem else False)
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.tool_resources = [0]*4
p.description = "raider2"
p.tools_resources = [0]*4
p.seed = seed
p.seed_num = seed_num
p.seq_file = input_file
p.lib_file = lib_file
return p
def run_araider(seed, seed_num, f, m, input_file, araider_dir):
"""Given raider parameters and an input file, run RAIDER and put the output into
the directory specified in output_dir (creating a random name is none is
specified."""
input_base = file_base(input_file).rstrip(".fa")
output_dir = araider_dir + "/" + input_base.upper() + ".s" + str(seed_num)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
min_arg = "-m %d" % (m) if m else ""
cmd1 = "{araider} -q -c {f} {min_arg} {seed} {input_file} {output_dir}".format(araider = Locations['araider'], f = f, min_arg = min_arg, seed = seed, input_file = input_file, output_dir = output_dir)
out_file = araider_dir + "/" + input_base + ".s" + str(seed_num) + ".araider_consensus.txt"
lib_file = araider_dir + "/" + input_base + ".s" + str(seed_num) + ".araider_consensus.fa"
cmd2 = "{python} consensus_seq.py -s {seq_file} -e {elements_dir}/elements {output_file} {fa_file}".format(python = Locations['python'], seq_file = input_file, elements_dir = output_dir, output_file = out_file, fa_file = lib_file)
element_file = output_dir + "/elements"
family_file = output_dir + "/families"
cmd3 = "rm {elements}; rm {family}".format(elements = element_file, family = family_file ) if stats_only else ""
if show_progress:
sys.stderr.write("\nLaunching araider:\n%s\n%s\n" % (cmd1, cmd2))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nLaunching araider:\n%s\n%s\n" % (cmd1, cmd2))
progress_fp.flush()
batch_name = araider_dir + "/" + input_base + ".araider.batch"
job_name = "araider.%d" % get_job_index("araider")
p = pbsJobHandler(batch_file = batch_name, executable = cmd1 + "; " + cmd2 + "; " + cmd3, job_name = job_name,
stdout_file = input_base + ".araider.stdout", stderr_file = input_base + ".araider.stderr",
output_location = output_dir, walltime = time_limit)
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.tool_resources = [0]*4
p.description = "araider"
p.tools_resources = [0]*4
p.seed = seed
p.seed_num = seed_num
p.seq_file = input_file
p.lib_file = lib_file
return p
def run_bigfoot(input_file, bigfoot_dir, L, C, I, T):
"""Runs BIGFOOT and returns a submitted pbs object with specific attributes used to run RepeatMasker.
* input_file: The name of the .fa sequence file being searched.
* bigfoot_dir: The name of the directory that will contain all files from this run.
"""
input_base = file_base(input_file).rstrip(".fa") # The name of the inputfile -- which I've been using as a basis for all file names
output_dir = bigfoot_dir + "/" + input_base.upper() # If bigfoot creates its own directory for information, use this as the name of that directory.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cmd1 = "{bigfoot} -l {L} -c {C} --I {I} --T {T} {input_file} {output_dir}".format(bigfoot = Locations['bigfoot'], L = L, C = C, I = I, T = T, output_dir = output_dir, input_file = input_file) # Put the command-line executable for for bigfoot here. Use input_file for the input file name, and put any output into bigfoot_dir
cmd2 = "cp {output_dir}/seeds {bigfoot_dir}/{input_base}.seeds".format(output_dir=output_dir, bigfoot_dir=bigfoot_dir, input_base=input_base)
cmd = cmd1 + "; " + cmd2;
if show_progress:
sys.stderr.write("\nLaunching bigfoot:\n%s\n" % (cmd))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nLaunching bigfoot:\n%s\n" % (cmd))
progress_fp.flush()
lib_file = bigfoot_dir + "/" + input_base + ".seeds"
batch_name = bigfoot_dir + "/" + input_base + ".bigfoot.batch" # This is the batch fils for the qsub command.
job_name = "bigfoot.%d" % get_job_index("bigfoot") # This is the redhawk jobname. get_job_index just assigned the next unused number (for then running multiple jobs)
stdout_file = input_base + ".bigfoot.stdout" # Anything bigfoot prints to stdout will be redirected here
stderr_file = input_base + ".bigfoot.stderr" # Anything bigfoot prints to stderr will be redirected here
p = pbsJobHandler(batch_file = batch_name, executable = cmd, job_name = job_name,
stdout_file = stdout_file, stderr_file = stderr_file,
output_location = output_dir, walltime = time_limit)
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.description = "bigfoot"
p.tool_resources = [0]*4
p.seq_file = input_file # Required by run_repeat_masker -- uses this as the source sequence.
p.lib_file = lib_file # This should be set to the file name that will be the library for the repeatmasker run
return p
def run_piler(input_file, piler_dir, max_nodes):
"""Runs Piler and returns a submitted pbs object with specific attributes used to run RepeatMasker."""
input_base = file_base(input_file).rstrip(".fa")
lib_file = input_base + ".lib"
if not os.path.exists(piler_dir):
os.makedirs(piler_dir)
cmd = "{python} run_piler.py {input_file} {piler_dir} {output_file}".format(python = Locations['python'], input_file = input_file, piler_dir = piler_dir, output_file = lib_file);
if show_progress:
sys.stderr.write("\nLaunching Piler:\n%s\n" % (cmd))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nLaunching Piler:\n%s\n" % (cmd))
progress_fp.flush()
batch_name = piler_dir + "/" + input_base + ".piler.batch";
job_name = "piler%d" % get_job_index("piler")
stdout_file = input_base + ".piler.stdout";
stderr_file = input_base + ".piler.stderr";
p = pbsJobHandler(batch_file = batch_name, executable = cmd, job_name = job_name,
stdout_file = stdout_file, stderr_file = stderr_file,
output_location = piler_dir, walltime = time_limit, ppn = Locations['proc_per_node'] if max_nodes else 1)
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.description = "piler"
p.tool_resources = [0]*4
p.seq_file = input_file
p.lib_file = piler_dir + "/" + lib_file
return p
def run_scout(input_file, output_dir, min_freq, length, use_first_filter, use_second_filter, threshold, max_nodes, mem):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
input_name= file_base(input_file)
# First: run build_lmer_table
lmer_output = output_dir + "/" + input_name.rstrip(".fa") + ".freq.fa"
cmd1 = "{build_lmer_table_exe} -min {min} -sequence {sequence} -freq {freq}".format(build_lmer_table_exe=Locations['build_lmer_table'], min=min_freq,
sequence = input_file, freq = lmer_output)
# Next: Run RepeatScout
rptscout_output = output_dir + "/" + input_name.rstrip(".fa") + ".repscout.fa"
cmd2 = "{RptScout_exe} -sequence {sequence} -freq {freq} -output {output}".format(RptScout_exe = Locations['RptScout'], sequence = input_file, freq = lmer_output, output = rptscout_output)
# Next: Run filter-stage-1
if use_first_filter:
filter_stage_output = output_dir + "/" + input_name.rstrip(".fa") + ".repscout.filtered.fa"
cmd3 = "{filter} {input} > {filter_output}".format(input=rptscout_output, filter = Locations['filter_stage-1'], filter_output = filter_stage_output)
else:
cmd3 = ""
if show_progress:
sys.stderr.write("\nRepeatScout:\n%s\n%s\n%s\n" % (cmd1, cmd2, cmd3))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nRepeatScout:\n%s\n%s\n%s\n" % (cmd1, cmd2, cmd3))
progress_fp.flush()
batch_name = output_dir + "/" + file_base(input_file) + ".repscout1.batch"
job_name = "rptscout.{input}.{num}".format( num = get_job_index("repscout") , input=file_base(input_file))
p = pbsJobHandler(batch_file = batch_name, executable = cmd1 + "; " + cmd2 + "; " + cmd3, job_name = job_name, RHmodules = Locations['rm_modules'],
stdout_file = file_base(rptscout_output) + ".stdout", stderr_file = file_base(rptscout_output) + ".stderr",
output_location = output_dir, walltime = time_limit, arch_type = Locations['basic_arch_type'] if not mem else [], ppn = Locations['proc_per_node'] if max_nodes else 1,
mem = Locations['high_mem_arch'] if mem else False)
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.description = "rep_scout" if not use_first_filter else "rep_scout1" if not use_second_filter else "rep_scout12"
p.tool_resources = [0,0,0,0]
p.seq_file = input_file
p.should_filter_stage2 = use_second_filter
p.input_name= input_name
p.threshold = threshold
p.stage = "1"
p.lib_file = filter_stage_output if use_first_filter else rptscout_output
return p
def run_scout_second_filter_RM(p, num_processors):
if p.should_filter_stage2:
p2 = run_repeat_masker(p, num_processors)
p2.should_filter_stage2 = p.should_filter_stage2
p2.threshold = p.threshold
p2.input_name = p.input_name
p2.stage = "RM"
p2.description = p.description#"rep_scout"
#print("F1 resources : " + str(p2.tool_resources))
return p2
else:
return p
def run_scout_second_filter(p):
if p.should_filter_stage2:
filter_stage2_output = p.dir + "/" + p.input_name.rstrip(".fa") + ".repscout.filtered2.fa"
cmd = "cat {filtered} | {filter} --cat={rm_output} --thresh={thresh} > {filter_output}".format(filtered=p.lib_file, filter=Locations['filter_stage-2'],
filter_output= filter_stage2_output, thresh=p.threshold, rm_output = p.rm_output) #RM_output_dir + "/" + file_base(input_file) + ".out")
if show_progress:
sys.stderr.write("\nRepeatScout Filter2:\n%s\n" % cmd)
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nRepeatScout Filter2:\n%s\n" % cmd)
progress_fp.flush()
batch_name = file_dir(p.rm_output) + "/" + p.input_name.rstrip(".fa") + ".repscout2.fa"
job_name = "filter2.%d" % get_job_index("filter2")
p2 = pbsJobHandler(batch_file = batch_name, executable = cmd, job_name = job_name,
stdout_file = file_base(p.seq_file) + ".repscout2.stdout", stderr_file = file_base(p.seq_file) + ".repscout2.stderr",
output_location = file_dir(p.seq_file), walltime = time_limit)
if not timing_jobs:
p2.submit(preserve=True, delay = wait_time)
else:
p2.submit_timed_job(preserve=True, delay = wait_time)
p2.description = p.description#"rep_scout"
p2.stage = "2"
#print("RM resources : " + str(p.getResources(cleanup=False)))
p2.tool_resources = [x + y for x, y in zip(p.tool_resources, p.getResources(cleanup=False))]
#print("F1 + RM resources : " + str(p2.tool_resources))
p2.seq_file = p.seq_file
p2.lib_file = filter_stage2_output
p2.should_filter_stage2 = p.should_filter_stage2
return p2
else:
return p
def scout_second_filter(p, min_freq):
"""NOT CURRENTLY WORKING!!! Does not run correctly, and does not properly adjust time"""
p.wait(wait_time)
filter2_stage_output = p.seq_file.rstrip(".fa") + ".repscout.filtered2.fa"
cmd = "cat {output} | perl {filter} --cat={cat} --thresh={thresh} > {final}".format(output = p.lib_file, filter = Locations['filter_stage-2'], cat = p.rm_output, thresh = min_freq, final = filter2_stage_output)
if show_progress:
sys.stderr.write("\nRepeatScout Filter2:\n%s\n" % cmd)
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nRepeatScout Filter2:\n%s\n" % cmd)
progress_fp.flush()
batch_name = file_dir(p.rm_output) + "/" + file_base(p.seq_file).rstrip(".fa") + ".repscout2.fa"
job_name = "filter2%d" % get_job_index("filter2")
p2 = pbsJobHandler(batch_file = batch_name, executable = cmd, job_name = job_name,
stdout_file = file_base(p.seq_file) + ".repscout2.stdout", stderr_file = file_base(p.seq_file) + ".repscout2.stderr",
output_location = file_dir(p.seq_file), walltime = time_limit)
if not timing_jobs:
p2.submit(preserve=True, delay = wait_time)
else:
p2.submit_timed_job(preserve=True, delay = wait_time)
p2.description = "rep_scout"
p2.time_resources = p.time_resources + p.getResources(cleanup=False)
p2.lib_file = p.lib_file
p2.seq_file = p.seq_file
p2.lib_file = filter2_stage_output
return p2
def run_repeat_masker(p, num_processors):
"""Given the pbs object used to start a consensus sequence job as well as
repeatmasker arguments, wait until the job is done and then call repeatmasker
on the output and put results in masker_dir (current dir if unspecified)"""
p.wait(wait_time)
p.loadResources()
input_base = file_base(p.seq_file) # Base name of the file used for input
output_dir = file_dir(p.lib_file) + "/" + file_base(p.lib_file).upper() + ".RM"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cmd = "{RepeatMasker} -nolow -lib {library} -pa {pa} -dir {dir} {seq_file}".format(RepeatMasker = Locations['RepeatMasker'], library = p.lib_file, pa = num_processors, dir = output_dir, seq_file = p.seq_file)
if show_progress:
sys.stderr.write("\nLaunch repeatmasker (%s):\n%s\n" % (p.description, cmd))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nLaunch repeatmasker (%s):\n%s\n" % (p.description, cmd))
progress_fp.flush()
batch_name = p.lib_file.rstrip(".fa") + ".rm.batch"
tool_name = p.description if not "raider" in p.description else "R" if p.description == "raider" else "R2"
job_name = "RM.{input}.{tool}.{num}".format( num = get_job_index("repmask") , input=re.sub("hg18.","",input_base), tool=tool_name)
ppn_arg = 4*num_processors if num_processors == 1 else num_processors
p2 = pbsJobHandler(batch_file = batch_name, executable = cmd, nodes = 1, ppn = ppn_arg, RHmodules = ["RepeatMasker", "python-3.3.3"],
job_name = job_name, stdout_file = input_base + ".repmask.stdout", stderr_file = input_base + ".repmask.stderr",
output_location = output_dir, walltime = rm_time_limit, always_outputs=False);
if not timing_jobs:
p2.submit(preserve=True, delay = wait_time)
else:
p2.submit_timed_job(preserve=True, delay = wait_time)
p2.description = "RptMasker"
p2.seed = p.seed if hasattr(p, "seed") else "NA"
p2.seed_num = p.seed_num if hasattr(p, "seed_num") else "NA"
p2.dir = output_dir
p2.lib_file = p.lib_file
p2.seq_file = p.seq_file
p2.rm_output = output_dir + "/" + file_base(p.seq_file) + ".out"
p2.tool_resources = [x + y for x, y in zip(p.tool_resources, p.getResources(cleanup=False))]
p2.tool_description = p.description
return p2
def run_perform_stats(p, exclusion_file = None):
"""Given the pbs object used to start a consensus sequence job as well as
repeatmasker arguments, wait until the job is done and then call repeatmasker
on the output and put results in masker_dir (current dir if unspecified)"""
p.wait(wait_time)
p.loadResources()
input_base = file_base(p.seq_file) # Base name of the file used for input
known_repeats = p.seq_file + ".out"
found_repeats = p.rm_output
output_dir = p.dir
output_path = output_dir + "/" + file_base(p.seq_file) + ".stats"
exclusion_part = "-e {exclude}".format(exclude = exclusion_file) if exclusion_file else ""
cmd = "python perform_stats.py {exclusions} {known} {found} {output}".format(exclusions=exclusion_part, known = known_repeats, found = found_repeats, output = output_path)
if show_progress:
sys.stderr.write("\nLaunch perform_stats: %s\n" % (cmd))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nLaunch perform_stats: %s\n" % (cmd))
progress_fp.flush()
batch_name = p.lib_file.rstrip(".fa") + ".stats.batch"
job_name = "stats.%d" % get_job_index("stats")
p2 = pbsJobHandler(batch_file = batch_name, executable = cmd,
job_name = job_name, stdout_file = input_base + ".stats.stdout", stderr_file = input_base + ".stats.stderr",
output_location = output_dir, walltime = time_limit, arch_type = Locations['basic_arch_type'])
if not timing_jobs:
p2.submit(preserve=True, delay = wait_time)
else:
p2.submit_timed_job(preserve=True, delay = wait_time)
p2.description = "Stats"
p2.seed = p.seed if hasattr(p, "seed") else "NA"
p2.seed_num = p.seed_num if hasattr(p, "seed_num") else "NA"
p2.dir = p.dir
p2.lib_file = p.lib_file
p2.seq_file = p.seq_file
p2.rm_output = p.rm_output #output_dir + "/" + file_base(p.seq_file) + ".out"
p2.tool_resources = p.tool_resources
p2.tool_description = p.tool_description
return p2
#def performance_sum(job_dic, PRA_jobs):
# """Given a list of all of the statistics jobs, uses the statistics output files to
# generate a summary file indicative of overall performance. Put results in stats_dir
# (Current dir if unspecified)"""
# ######
# # Calculate statistics (not bothering with parallelization yet)
# print_str = "{:<12}" + "{:<5}" + "".join("{:<14}"*4) + "".join("{:<14}"*6) + "".join("{:<14}"*8) + "{:<14}" + "\n"
# stats_jobs = set()
# for key in test_tools:
# for p in job_dic[key]:
# stats_jobs.add(run_perform_stats(p))
#
#
# with open(args.results_dir + "/" + args.stats_file, "w") as fp:
# fp.write(print_str.format("#tool", "seed", "tp", "fp", "fn", "tn", "tpr", "tnr", "ppv", "npv", "fpr", "fdr","ToolCpuTime", "ToolWallTime", "ToolMem", "ToolVMem", "RMCpuTime", "RMWallTime", "RMMem", "RMVMem", "coverage"))
#
# for key in test_tools:
# for p in job_dic[key]:
# try:
#
# s = run_perform_stats(p)
#
# except Exception as E:
# progress_fp.write("performance Exception: " + str(E) + "\n");
# fp.write("\t".join([str(key), str(p.seed_num) if hasattr(p, "seed_num") else "NA", "INCOMPLETE\n"]))
#
# ### KARRO
# # Finally: we should not terminate until all the pra jobs are done. (If pra is off, this list will be empty.)
# for p in PRA_jobs:
# p.timed_wait() # KARRO: Is this the correct method to use to ensure resubmission of needed
# ### KARRO END
# regex = re.compile("(?<=\# Average consensus coverage: )\d+.\d+")
# >>> m = regex.findall(text)
# >>> m
# ['0.0063']
#
# tps = 0
# tns = 0
# fps = 0
# fns = 0
# for p in stats_jobs:
# sf = open(p.stats_output, "r")
# tps += int(re.split("\s+", sf.readline().rstrip())[1])
# fps += int(re.split("\s+", sf.readline().rstrip())[1])
# tns += int(re.split("\s+", sf.readline().rstrip())[1])
# fns += int(re.split("\s+", sf.readline().rstrip())[1])
# sf.close()
# stats_file = "summary.%s.stats" % (test)
# smry_path = "%s/%s" % (stats_dir, stats_file) if stats_dir else "%s/%s" %(curr_dir, stats_file) if curr_dir else stats_file
#
# smry = open(smry_path, 'w')
# smry.write("Evaluation completed.\n")
# smry.write("True Positives (TP): \t %d \n" % (tps))
# smry.write("False Positives (FP): \t %d \n" % (fps))
# smry.write("True Negatives (TN): \t %d \n" % (tns))
# smry.write("False Negatives (FN): \t %d \n" % (fns))
# smry.write("\nPerformance of Repeat Classification Tool\n")
# smry.write("Sensitivity (TPR): \t\t %f %%\n" % (tps/(tps + fns)))
# smry.write("Specificity (TNR): \t\t %f %%\n" % (tns/(fps + tns)))
# smry.write("Precision (PPV): \t\t %f %%\n" % (tps/(tps + fps)))
# smry.write("Neg. Pred. Val. (NPV): \t %f %%\n" % (tns/(tns + fns)))
# smry.write("Fall-Out (FPR): \t\t %f %%\n" % (fps/(fps + tns)))
# smry.write("False Disc. Rate (FDR): \t %f %%\n" % (fps/(tps + fps)))
# smry.close()
# def run_pra_analysis(jobs, BLAST_DATABASE):
# """This takes a list of the jobs, and a list of the BLAST_DATABASE jobs. For each
# it makes sure the BLAST_DATABASE job is done for the corresponding datafile, then launches
# the analysis tool. Returns a list of the analysis tool jobs. RepeatMasker is NOT dependent
# on these jobs -- it can be launched immediately."""
# submitted_jobs =[]
# cmd = "{python} blast_consensus.py {consensus_file} {rm_fa_file} {database_file} {output_file}"
# for j in jobs:
# j.wait()
# o = BLAST_DATABASE[j.seq_file] # This is the create_database job that was launched on this sequence file.
# o.wait() # Wait until the DATABASE file has been created.
# # (This should be parallilzed, but probably not worth the effort)
# analysis_cmd = cmd.format(python = Locations["python"], consensus_file = j.lib_file,
# rm_fa_file = o.rm_seq_file, database_file = o.rm_seq_file,
# output_file = j.lib_file.rstrip(".fa") + ".pra.txt")
# if show_progress:
# sys.stderr.write("\nLaunching pre-rm analysis:\n%s\n" % (analysis_cmd))
# sys.stderr.flush()
# progress_fp.write("pre-rm analysis:\n%s\n" % (analysis_cmd))
# progress_fp.flush()
# job_name = "pra.%d" % get_job_index("pra")
# base_name = file_base(j.lib_file)[:-3] + ".pra"
# batch_name = base_name + ".batch"
# stdout_file = base_name + ".stdout"
# stderr_file = base_name + ".stderr"
# location = file_dir(j.lib_file)
# print("Y: " + base_name + " " + batch_name + " " + job_name + " " + stdout_file + " " + stderr_file + " " + analysis_cmd)
# p = pbsJobHandler(batch_file = batch_name, executable = analysis_cmd, job_name = job_name,
# stdout_file = stdout_file, stderr_file = stderr_file,
# output_location = location, walltime = time_limit, RHmodules = ["blast+"]);
# p.submit_timed_job() # KARRO: What parameters should be used here for resubmission?
# submitted_jobs.append(p)
# return submitted_jobs
def create_blast_db(file_list):
# Launch the create_database.py tool on each chromosome. (Needed for pre-RM result analysis; not needed for any tool.)
# BLAST_DATABASE will be a dictionary mapping the (simulated) chromosome to the pbs object
BLAST_DATABASE = {}
blast_database_command = "{python} ./create_database.py {seq_file} {rm_file} {out_file}"
for i,file_name in enumerate(file_list):
file = file_name.rstrip(".fa")
seq_file = file_name
rm_file = file + ".fa.out"
rm_seq_file = file + ".rptseq.fa"
cmd = blast_database_command.format(python = Locations['python'], seq_file = seq_file, rm_file = rm_file, out_file = rm_seq_file)
if show_progress:
sys.stderr.write("\nLaunching create_database: %s\n\n" % (cmd))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\nLaunching create_database: %s\n\n" % (cmd))
progress_fp.flush()
batch_name = file + ".blast_db.batch"
job_name = "create_db.%d" % (i)
stdout_file = file + ".blast_db.stdout"
stderr_file = file + ".blast_db.stderr"
o = pbsJobHandler(batch_file = batch_name, executable = cmd, job_name = job_name,
ppn = 2, walltime = "00:20:00", RHmodules = ["blast+"],
stdout_file = stdout_file, stderr_file = stderr_file)
o.seq_file = seq_file
o.rm_file = rm_file
o.rm_seq_file = rm_seq_file
BLAST_DATABASE[file_name] = o
for o in BLAST_DATABASE.values():
#o.submit_timed_job() # KARRO: Highly unlikely this will ever exceed 20 minutes (or even 5 minutes) -- so I just took the default parameters.
if not timing_jobs:
o.submit(preserve=True, delay = wait_time)
else:
o.submit_timed_job(preserve=True, delay = wait_time)
return BLAST_DATABASE
### KARRO
def run_pra_analysis(tool_job, database_job):
"""This launchs a pra_analysis jobs and returns the job object.
* tool_job: the pbsJob for one of the de novo search tool. This will use tool_job.lib file as the query sequence set.
* database_job: the database job for the sequence on which tool_job was run.
This function will wait on completion of both jobs."""
cmd = "./pra_analysis {consensus_file} {rm_fa_file} {database_file} {output_file}"
database_job.wait(100)
analysis_cmd = cmd.format(python = Locations["python"], consensus_file = tool_job.lib_file,
rm_fa_file = database_job.rm_seq_file, database_file = database_job.rm_seq_file,
output_file = tool_job.lib_file.rstrip(".fa") + ".pra.txt", walltime = time_limit)
if show_progress:
sys.stderr.write("\nLaunching pre-rm analysis:\n%s\n" % (analysis_cmd))
sys.stderr.flush()
#progress_fp.write(print_time() + "\n")
progress_fp.write("\npre-rm analysis:\n%s\n" % (analysis_cmd))
progress_fp.flush()
job_name = "pra.%d" % get_job_index("pra")
location = file_dir(tool_job.lib_file)
base_name = file_base(tool_job.lib_file)[:-3] + ".pra"
batch_name = location + "/" + base_name + ".batch"
stdout_file = base_name + ".stdout"
stderr_file = base_name + ".stderr"
p = pbsJobHandler(batch_file = batch_name, executable = analysis_cmd, job_name = job_name,
stdout_file = stdout_file, stderr_file = stderr_file,
output_location = location, walltime = time_limit, RHmodules = ["blast+"]);
#p.submit_timed_job() # KARRO: What parameters should be used here for resubmission?
if not timing_jobs:
p.submit(preserve=True, delay = wait_time)
else:
p.submit_timed_job(preserve=True, delay = wait_time)
p.description = "PraAnalysis"
p.seed = tool_job.seed if hasattr(tool_job, "seed") else "NA"
p.seed_num = tool_job.seed_num if hasattr(tool_job, "seed_num") else "NA"
p.lib_file = tool_job.lib_file
p.seq_file = tool_job.seq_file
p.pra_output = tool_job.lib_file.rstrip(".fa") + ".pra.txt"
#p.tool_resources = tool_job.getResources(cleanup=False)
p.tool_resources = [x + y for x, y in zip(tool_job.tool_resources, tool_job.getResources(cleanup=False))] #j.tool_resources + j.getResources(cleanup = False)
#p2.tool_resources = p.resources
p.tool_description = tool_job.description
return p
### KARRO END
def exit_now():
"""If we submitted the evaluation as a PBS job with a set walltime, use this to write to the logging
file an indicator that the evalation program has not yet completed, then exit the program"""
logging_fp.write("CONTINUE\n")
sys.exit(0)
def have_time_for_another_run(last_run_time):
"""If we submitted the evaluation as a PBS job with a set walltime, check the amount
of time left and see if we can fit in another time interval of length at least 'last_run_time'"""
time_left = quit_time - time.time() - last_run_time
if quit_time - time.time() - last_run_time >= 0:
return True
else:
logging_fp.write("Running out of time. Only have {t} left. Dumping data to new checkpoint file\n".format(t=time_left))
return False
def run_timed_chrom_sim_jobs(jobs, flist=[]):
"""Given a set of chromosome simulation jobs and a list of paths to finished simulation files.
Three cases:
(1) If we submitted the evaluation as a PBS job with a set walltime, keep checking to
see if we have reached point to save work and exit.
2) If we are 'timing_jobs', keep calling timed_wait on each job until all have finished.
This ensures that if a job is running out of time it will resubmit with a longer
walltime (all work for this is done in redhawk.py)
3) Otherwise, call wait() as usual.
if/when all jobs complete, returns list of paths to resulting simulation files."""
chrom_job_set = {j for j in jobs}
finished_jobs = set()
time_est = None
for j in chrom_job_set:
t1 = time.time()
if timing and not time_est:
time_est = parse_redhawk_time(j.walltime)
if not have_time_for_another_run(time_est):
save_timed_chrom_sim_jobs(chrom_job_set, finished_jobs, flist)
exit_now()
if not timing_jobs:
j.wait(100)
else:
j.timed_wait(100)
t2 = time.time()
finished_jobs.add(j)
time_est = t2 - t1
# Make sure won't run out of time if continue to next iteration
if timing and not have_time_for_another_run(time_est):
save_timed_chrom_sim_jobs(chrom_job_set, finished_jobs, flist)
exit_now()
return [j.sim_output for j in finished_jobs]
def run_timed_tool_jobs(jobs, run_rm, pa, run_pra, blast_db, RM_jobs=None, PRA_jobs=None):
"""Given a set of repeat finding tool jobs and repmask jobs (with pa info), keep track of
what tool jobs have completed and submit corresponding repmask job upon tool job completion.
We call isJobRunning on each tool job -- if we are 'timing_jobs', this information is saved
in the job object and redhawk.py will handle whether jobs need to be resubmitted with more time.
If/when all tool jobs complete, returns list of repmask jobs (some of which are still running).
Note: If we submitted the evaluation as a PBS job with a set walltime, keep checking to
see if we have reached point to save work and exit."""
job_set = {j for j in jobs}
if not RM_jobs:
RM_jobs = set()
if not PRA_jobs:
PRA_jobs = set()
time_est = None
while job_set:
finished_jobs = set()
added_jobs = set()
t1 = time.time()
for j in job_set:
if not j.isJobRunning():
finished_jobs.add(j)
if "rep_scout" in j.description and j.should_filter_stage2 and j.stage != "2":
if j.stage == "1":
added_jobs.add(run_scout_second_filter_RM(j,pa))
elif j.stage == "RM":
added_jobs.add(run_scout_second_filter(j))
else:
if "rep_scout" in j.description and j.stage == "2":
#print("F2 resources : " + str(list(j.getResources(cleanup=False))))
#j.tool_resources = j.tool_resources + j.getResources(cleanup=False) #should we include the RM inside RS in timing? #p.getResources(cleanup=False)
j.tool_resources = [x + y for x, y in zip(j.tool_resources, j.getResources(cleanup=False))] #j.tool_resources + j.getResources(cleanup = False)
#print("F1 + RM + F2 resources : " + str(j.tool_resources))
rm_job = None
pra_job = None
if run_rm:
rm_job = run_repeat_masker(j,pa)
RM_jobs.add(rm_job)
if run_pra:
pra_job = run_pra_analysis(j, blast_db[j.seq_file])
PRA_jobs.add(pra_job)
if rm_job:
rm_job.pra_job = pra_job if pra_job else None
if pra_job:
pra_job.rm_job = rm_job if rm_job else None
job_set = job_set - finished_jobs
job_set = job_set | added_jobs
time_est = time.time() - t1
if timing and not have_time_for_another_run(time_est):
save_timed_tool_jobs(job_set, RM_jobs, PRA_jobs, blast_db)
exit_now()
time.sleep(sleep_pause) # We have checked all the jobs; lets sleep for ten minutes before checking again.
# Important for keeping time down on the head node -- required on Oakley.
return RM_jobs, PRA_jobs
test_tools = ["raider", "bigfoot", "piler", "rep_scout", "rep_scout1", "rep_scout12", "araider", "raider2", "raider2.0", "raider2.1", "raider2.2"] # List of implemented tools
def run_timed_analysis_jobs(run_rm, run_pra, RM_jobs, PRA_jobs, results_dir , stats_jobs=None, job_dic=None):
"""Given a set of repmask jobs and a working job dictionary, keep track of what repmask jobs
have completed and add completed jobs to job dictionary under appropriate tool name.
We call isJobRunning on each repmask job -- if we are 'timing_jobs', this information is saved
in the job object and redhawk.py will handle whether jobs need to be resubmitted with more time.
If/when all repmask jobs complete, returns job dict to compute statistics on results.
Note: If we submitted the evaluation as a PBS job with a set walltime, keep checking to
see if we have reached point to save work and exit."""
job_dic = job_dic if job_dic else {tool:[] for tool in test_tools}
if not stats_jobs:
stats_jobs = set()
for tool in test_tools:
if tool not in job_dic.keys():
job_dic[tool] = []
pra_job_set = {}
rm_job_set = {}
if RM_jobs or PRA_jobs:
pra_job_set = {j for j in PRA_jobs}
rm_job_set = {j for j in RM_jobs}
#finished_jobs = set()
while rm_job_set or pra_job_set:
finished_rm_jobs = set()
finished_pra_jobs = set()
time_est = None
t1 = time.time()
for j in rm_job_set:
if not j.isJobRunning():
job_dic[j.tool_description].append(j)
finished_rm_jobs.add(j)
#else:
for j in pra_job_set:
if not j.isJobRunning():
if run_pra and not run_rm:
j.pra_resources = list(j.getResources(cleanup=False))
job_dic[j.tool_description].append(j)
finished_pra_jobs.add(j)
t2 = time.time()
time_est = t2 - t1
if timing and not have_time_for_another_run(time_est):
save_timed_PRA_jobs(pra_job_set - finished_pra_jobs)
save_timed_RM_jobs(rm_job_set - finished_rm_jobs, stats_jobs, results_dir, job_dic)
exit_now()
pra_job_set = pra_job_set - finished_pra_jobs
rm_job_set = rm_job_set-finished_rm_jobs
return job_dic, stats_jobs, pra_job_set
############################################################################################
if __name__ == "__main__":
start = time.time()
args = parse_params(sys.argv[1:])
start_time = start
quit_time = prog_walltime + start_time - safety_margin if prog_walltime else None
####
# Currently: We check for the RepeatScout executable at the location on my Mac; if
# found, we assume we are running on the Mac. If not, we check for in the Redhawk
# location, and if found assume we are running on redhawk. Otherwise we print and
# error and quit.
if location.lower() == "oakley":
Locations = OakleyLocations;
assert 1 <= args.pa <= 2, "Make sure you set the --pa parameter to a value between 1 and 4 on oakley (%d)" % (args.pa)
elif location.lower() == "osx":
Locations = MacLocations
elif location.lower() == "redhawk":
Locations = RedhawkLocations
assert 1 <= args.pa <= 2, "Make sure you set the --pa parameter to a value between 1 and 4 on redhawk (%d)" % (args.pa)
else:
sys.stderr.println("locations.txt file: bad content")
exit(1);
###
data_dir = args.results_dir + "/" + args.data_dir
if not continue_prev:
if args.nuke:
if os.path.exists(args.results_dir):
subprocess.call("rm -r %s" % args.results_dir, shell = True)
else:
if os.path.exists(args.results_dir):
sys.stderr.write("%s exists; need to use --nuke option" % args.results_dir)
if not os.path.exists(args.results_dir):
os.makedirs(args.results_dir)
if timing:
checkpoint_fp = open(check_fname, "w")
logging_fp = open(log_fname, "w")
logging_fp.write("Starting new run from scratch\n")
### Generate simulated file(s) and run to completion
### Set up the debugging log file (if needed)
progress_fp = open(args.results_dir + "/debug.txt", "w")
#progress_fp.write(print_time() + "\n")
progress_fp.write(" ".join(sys.argv) + "\n\n");
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# First: we put the chromosomes (simulated or real) into data_dir
if args.subparser_name == "chrom_sim":
# Launch the jobs
f = lambda i: simulate_chromosome(chromosome_file = args.chromosome,
rng_seed = args.rng_seed, length = args.length,
neg_strand = args.negative_strand, fam_file = args.family_file,
data_dir = args.results_dir + "/" + args.data_dir, output_file = args.output, file_index = i,
k = args.k, mc_file = args.mc_file, mi = args.max_interval,
retain_n = args.retain_n, num_repeats = args.num_repeats, low_complexity = args.low_complexity,
sim_type = args.sim_type)
J = [f(i) for i in range(args.num_sims)]
if args.family_file:
family_list = [fam for line in open(args.family_file) for fam in re.split("\s+", line.rstrip()) if fam]
with open(args.results_dir + "/family_file.txt", "w") as fp:
fp.write("\n".join(["{fam}".format(fam=f) for f in family_list]) + "\n")
# Get the list of simulated file names
file_list = run_timed_chrom_sim_jobs(J) #[j.sim_output for j in J]
else:
file_list = []
for file in args.seq_files:
file_list.append(data_dir + "/" + file_base(file))
shutil.copy(file, file_list[-1])
shutil.copy(file + ".out", file_list[-1] + ".out")
if timing:
write_flist_to_checkpoint(file_list)
else:
if timing:
if os.path.exists(check_fname):
os.rename(check_fname, check_fname + ".old")
old_checkpoint_fp = open(check_fname + ".old", "r")
if os.path.exists(log_fname):
os.rename(log_fname, log_fname + ".old")
checkpoint_fp = open(check_fname, "w")
logging_fp = open(log_fname, "w")
logging_fp.write("Continuing previous run\n")
flush_files()
progress_fp = open(args.results_dir + "/debug.txt", "a")
#progress_fp.write(print_time() + "\n")
progress_fp.write(" ".join(sys.argv) + "\n\n");
# Get file list from old checkpoint file
file_list = []
next_step = old_checkpoint_fp.readline().rstrip()
if next_step == flist_start:
file_list, next_step = recover_file_list()
if next_step == csjobs_start:
chrom_job_set, next_step = recover_sim_jobs()
run_timed_chrom_sim_jobs(chrom_job_set, file_list)
flush_files()
if not continue_prev or next_step == '':
BLAST_DATABASE = create_blast_db(file_list) if args.pra else {} #CARLY: Moved this into a method to make main method (slightly) easier to follow
if timing:
write_blast_db_to_checkpoint(BLAST_DATABASE, args.results_dir)
else:
if next_step == blast_db_start:
BLAST_DATABASE, next_step = recover_blast_db()
if timing:
write_blast_db_to_checkpoint(BLAST_DATABASE, args.results_dir)
if not continue_prev or next_step == '':
### Start running each tool. Each tool should run, creating the repeat masker library (putting the file name
### in the pbs lib_file attribute), then run repeat masker (putting the output file name in the pbs
### rm_output job.
############## Second: Launch tools
############## Need to initially launch all of the tool jobs
jobs = []
if args.pre:
Locations['raider'] = Locations['raider_pre']
if args.run_raider:
seed_list = [seed for line in open(args.seed_file) for seed in re.split("\s+", line.rstrip()) if seed] if args.seed_file else [args.seed]
jobs += [run_raider(seed = convert_seed(seed), seed_num = i, f = args.f, m = args.raider_min, input_file = file,
raider_dir = args.results_dir + "/" + args.raider_dir, mem = args.mem, max_nodes = args.max_nodes) for i,seed in enumerate(seed_list)
for file in file_list]
if args.run_araider:
seed_list = [seed for line in open(args.seed_file) for seed in re.split("\s+", line.rstrip()) if seed] if args.seed_file else [args.seed]
jobs += [run_araider(seed = convert_seed(seed), seed_num = i, f = args.f, m = args.raider_min, input_file = file,
araider_dir = args.results_dir + "/" + args.araider_dir, max_nodes = args.max_nodes) for i,seed in enumerate(seed_list)
for file in file_list]
if args.run_raider2:
#raider2_ages = [0,1,2]
seed_list = [seed for line in open(args.seed_file) for seed in re.split("\s+", line.rstrip()) if seed] if args.seed_file else [args.seed]
jobs += [run_raider2(seed = convert_seed(seed), seed_num = i, f = args.f, m = args.raider_min, input_file = file,
raider2_dir = args.results_dir + "/" + args.raider2_dir, family_array = args.family_array, excise = args.excising,
overlaps = args.overlaps, tieup = args.tieup, prosplit=args.prosplit, prevfam=args.prevfam,
age=args.age, age_only=False, max_nodes=args.max_nodes, mem=args.mem) for i,seed in enumerate(seed_list) for file in file_list]
#if args.all_ages:
# if not args.multi_seed:
# jobs += [run_raider2(seed = convert_seed(seed), seed_num = i, f = args.f, m = args.raider_min, input_file = file,
# raider2_dir = args.results_dir + "/" + args.raider2_dir + "." + str(curr_age), age=curr_age) for i,seed in enumerate(seed_list) for curr_age in raider2_ages
# for file in file_list]
# else:
# jobs += [run_raider2(seed = seed_list, seed_num = "all", f = args.f, m = args.raider_min, input_file = file,
# raider2_dir = args.results_dir + "/" + args.raider2_dir + "." + str(curr_age), age=curr_age) for curr_age in raider2_ages
# for file in file_list]
#else:
# if not args.multi_seed:
# jobs += [run_raider2(seed = convert_seed(seed), seed_num = i, f = args.f, m = args.raider_min, input_file = file,
# raider2_dir = args.results_dir + "/" + args.raider2_dir + "." + str(args.age), age=args.age) for i,seed in enumerate(seed_list)
# for file in file_list]
# else:
# jobs += [run_raider2(seed = seed_list, seed_num = "all", f = args.f, m = args.raider_min, input_file = file,
# raider2_dir = args.results_dir + "/" + args.raider2_dir + "." + str(args.age), age=args.age) for file in file_list]
if args.run_repscout:
if args.rs_filters == 3:
jobs += [run_scout(input_file = file, output_dir = args.results_dir + '/' + args.rptscout_dir, min_freq = args.rs_min_freq, length = len(args.seed) if args.seed else args.repscout_min,
use_first_filter = False, use_second_filter = False, threshold = args.f, max_nodes = args.max_nodes, mem = args.mem) for file in file_list]
jobs += [run_scout(input_file = file, output_dir = args.results_dir + '/' + args.rptscout_dir + "1", min_freq = args.rs_min_freq, length = len(args.seed) if args.seed else args.repscout_min,
use_first_filter = True, use_second_filter = False, threshold = args.f, max_nodes = args.max_nodes, mem = args.mem) for file in file_list]
jobs += [run_scout(input_file = file, output_dir = args.results_dir + '/' + args.rptscout_dir + "12", min_freq = args.rs_min_freq, length = len(args.seed) if args.seed else args.repscout_min,
use_first_filter = True, use_second_filter = True, threshold = args.f, max_nodes = args.max_nodes, mem=args.mem) for file in file_list]
else:
use_first_filter = (args.rs_filters >= 1)
use_second_filter = (args.rs_filters >= 2)
dir_addon = ".F1F2" if use_second_filter else ".F1" if use_first_filter else ""
jobs += [run_scout(input_file = file, output_dir = args.results_dir + '/' + args.rptscout_dir + dir_addon, min_freq = args.rs_min_freq, length = len(args.seed) if args.seed else args.repscout_min,
use_first_filter = use_first_filter, use_second_filter = use_second_filter, threshold = args.f, max_nodes = args.max_nodes, mem = args.mem) for file in file_list]
if args.run_bigfoot:
bigfoot_dir = args.results_dir + "/" + args.bigfoot_dir # Name of the directory all bigfoot files will go into
if not os.path.exists(bigfoot_dir):
os.makedirs(bigfoot_dir)
jobs += [run_bigfoot(input_file = file, bigfoot_dir = bigfoot_dir, L = args.bigfoot_L, C = args.bigfoot_min, I = args.bigfoot_I, T = args.bigfoot_T) for file in file_list]
if args.run_piler:
piler_dir = args.results_dir + "/" + args.piler_dir # Name of the directory all piler files will go into
if not os.path.exists(piler_dir):
os.makedirs(piler_dir)
jobs +=[run_piler(input_file = file, piler_dir = piler_dir, max_nodes = args.max_nodes) for file in file_list]
############## Third: Launch repeatmasker jobs
job_set = {j for j in jobs}
RM_jobs, PRA_jobs = run_timed_tool_jobs(jobs, args.repmask, args.pa, args.pra, BLAST_DATABASE)
else:
############# Didn't finish processing all of the tool jobs
jobs = []
RM_jobs = set()
PRA_jobs = set()
if next_step == tjobs_start:
jobs, next_step = recover_tool_jobs()
if next_step == prajobs_start:
PRA_jobs, next_step = recover_pra_jobs()
if next_step == rmjobs_start:
RM_jobs, next_step = recover_rm_jobs()
flush_files()
RM_jobs, PRA_jobs = run_timed_tool_jobs(jobs, args.repmask, args.pa, args.pra, BLAST_DATABASE)
if not continue_prev or next_step == '':
########## Need to run analysis jobs
job_dic, stats_jobs, PRA_jobs = run_timed_analysis_jobs(args.repmask, args.pra, RM_jobs, PRA_jobs, args.results_dir)
else:
PRA_jobs = set()
RM_jobs = set()
stats_jobs = set()
job_dic = None
if next_step == prajobs_start:
PRA_jobs, next_step = recover_pra_jobs()
if next_step == rmjobs_start:
RM_jobs, next_step = recover_rm_jobs()
if next_step == stats_start:
stats_jobs, next_step = recover_stats_jobs()
if next_step == jobdic_start:
old_job_dic, next_step = recover_job_dic()
if not RM_jobs:
job_dic = old_job_dic
else:
job_dic, stats_jobs, PRA_jobs = run_timed_analysis_jobs(args.repmask, args.pra, RM_jobs, PRA_jobs, args.results_dir, stats_jobs, old_job_dic)
else:
flush_files()
job_dic, stats_jobs, PRA_jobs = run_timed_analysis_jobs(args.repmask, args.pra, RM_jobs, PRA_jobs, args.results_dir, stats_jobs)
job_dic['raider'].sort(key = lambda x: x.seed_num)
job_dic['raider2'].sort(key = lambda x: x.seed_num)
job_dic['araider'].sort(key = lambda x: x.seed_num)
# Print output files log
with open(args.results_dir + "/file_log.txt", "w") as fp:
for i in range(len(file_list)):
fp.write("%d simulation_file %s\n" % (i, file_list[i]))
for k in test_tools:
fp.write(k + "\n")
for j in job_dic[k]:
if args.repmask:
fp.write(j.rm_output + "\n")
######
# Create copy of seed file (if RAIDER is being used)
if job_dic['raider'] or job_dic['araider'] or job_dic['raider2']:
with open(args.results_dir + "/seed_file.txt", "w") as fp:
fp.write("\n".join(["{index:<5}{seed}".format(index=i,seed=s) for i,s in enumerate(seed_list)]) + "\n")
### KARRO
# Finally: we should not terminate until all the pra jobs are done. (If pra is off, this list will be empty.)
#for p in PRA_jobs:
# p.timed_wait() # KARRO: Is this the correct method to use to ensure resubmission of needed
### KARRO END
regex = re.compile("(?<=\# Average consensus coverage: )\d+.\d+")
######
# Calculate statistics (not bothering with parallelization yet)
#print_str: tool seed tp/fp/fn/tn tpr - fdr ToolCpuTime - RVMem Con/QuCoverage
print_str = "{:<12}" + "{:<5}" + "".join("{:<14}"*4) + "".join("{:<14}"*6) + "".join("{:<14}"*8) + "{:<14}"*2 + "\n"
with open(args.results_dir + "/" + args.stats_file, "w") as fp:
fp.write(print_str.format("#tool", "seed", "tp", "fp", "fn", "tn", "tpr", "tnr", "ppv", "npv", "fpr", "fdr","ToolCpuTime", "ToolWallTime", "ToolMem", "ToolVMem", "RMCpuTime", "RMWallTime", "RMMem", "RMVMem", "ConCoverage", "QuCoverage"))
for key in test_tools:
for p in job_dic[key]:
Counts = [0,0,0,0]
Stats = [0,0,0,0,0,0]
RMResources = [0,0,0,0]
Coverage = 0
CoverageResources = [0,0,0,0]
try:
if args.repmask:
#progress_fp.write(print_time() + "\n")
progress_fp.write("Calling: perform_stats.py(%s, %s, None)" % (p.seq_file + ".out", p.rm_output))
try:
Counts, Stats, Sets = perform_stats.perform_stats(p.seq_file + ".out", p.rm_output, None)
Stats = [round(x,5) for x in Stats]
RMResources = list(p.getResources(cleanup=False))
if args.hooke_jeeves:
print(Counts[1]+Counts[2])
except Exception as E:
#progress_fp.write(print_time() + "\n")
progress_fp.write("performance Exception: " + str(E) + "\n");
fp.write("\t".join([str(key), str(p.seed_num) if hasattr(p, "seed_num") else "NA", "INCOMPLETE\t"]))
fp.write("Resources: \t" + "\t".join(p.getResources(cleanup=False)) + "\n");
continue
if p.pra_job:
try:
progress_fp.write("parse_pra_outpt: %s %s\n" % (p.pra_job.pra_output, args.exclude))
consensus_coverage, query_coverage, Used = parse_pra_output(p.pra_job.pra_output, args.exclude)
#matches = regex.findall(open(p.pra_job.pra_output, "r").read())
#if len(matches) > 0:
# Coverage = matches[0]
except Exception as E:
#progress_fp.write(print_time() + "\n")
progress_fp.write("PRA Parsing Exception: " + str(E) + "\n");
progress_fp.flush()
else:
try:
progress_fp.write("parse_pra_outpt: %s %s\n" % (p.pra_output, args.exclude))
consensus_coverage, query_coverage, Used = parse_pra_output(p.pra_output, args.exclude)
#matches = regex.findall(open(p.pra_output, "r").read())
#if len(matches) > 0:
# Coverage = matches[0]
#CoverageResources = list(p.getResources(cleanup=False))
except Exception as E:
#progress_fp.write(print_time() + "\n")
progress_fp.write("PRA Parsing Exception: " + str(E) + "\n");
progress_fp.flush()
fp.write(print_str.format(*([key, p.seed_num] + list(Counts) + list(Stats) + list(p.tool_resources) + list(RMResources) + [consensus_coverage] + [query_coverage])))
except Exception as E:
##progress_fp.write(print_time() + "\n")
progress_fp.write("performance Exception: " + str(E) + "\n");
fp.write("\t".join([str(key), str(p.seed_num) if hasattr(p, "seed_num") else "NA", "INCOMPLETE\n"]))
|
karroje/RAIDER_eval
|
RAIDER_eval.py
|
Python
|
gpl-3.0
| 87,080
|
[
"BLAST"
] |
2a278f09256bff47838edfbd9b47840e0e922b6bcd679943a8d3307e79f28ec7
|
#!/usr/bin/env python3
# -*- encoding: <utf-8> -*-
# Sprax Lines 2017.04.01 Written with Python 3.5
# To re-import a library in Python 3.4+ (re-import), do:
# import importlib
# importlib.reload(nameOfModule)
'''text filter functions'''
import argparse
import heapq
import os.path
import re
import math
import string
import sys
import text_ops
from utf_print import utf_print
import inflection
###############################################################################
TRANS_NO_WHAT = str.maketrans(u"\u2018\u2019\u201c\u201d", "\'\'\"\"")
TRANS_NO_SMART = str.maketrans("\x91\x92\x93\x94", "''\"\"")
TRANS_NO_PUNCT = str.maketrans('', '', string.punctuation)
TRANS_NO_DIGITS = str.maketrans('', '', string.digits)
UNICODE_TO_ASCII = str.maketrans({
u"\u2018" : "'",
u"\u2019" : "'",
u"\u201c" : '"',
u"\u201d" : '"',
})
ISO_TO_ASCII = str.maketrans({
"`" : "'",
u"\x91" : "'",
u"\x92" : "'",
u"\x93" : '"',
u"\x94" : '"',
u"\x97" : '--',
u"\xf0" : '-',
})
def translate_smart_quotes(in_str, table=TRANS_NO_SMART):
'''Replace curly quotes with straight ones.'''
return in_str.translate(table)
def translate_iso_to_ascii(in_str):
'''Replace curly quotes with straight ones, etc.'''
return in_str.translate(ISO_TO_ASCII)
def remove_punctuation(in_str, table=TRANS_NO_PUNCT):
'''Remove all string.punctuation characters.'''
return in_str.translate(table)
def replace_quotes(instr):
'''Replace curly quotes one-by-one (slow)'''
return instr.replace("\x91", "'").replace("\x92", "'")\
.replace("\x93", '"').replace("\x94", '"')
def replace_emdashes(in_str):
'''Replace each em-dash with two hyphens (--).'''
return in_str.replace("\x97", "--")
#TODO: if really bored, implement reverse_trans for each class
class IsoToAscii:
'''Translate non-ASCII characters to ASCII or nothing'''
translation = ISO_TO_ASCII
def translate(self, in_str):
'''Translate non-ASCII characters to ASCII or nothing'''
try:
return in_str.encode('ascii')
except UnicodeEncodeError:
out = in_str.translate(self.translation)
return ''.join([asc for asc in out if ord(asc) < 128])
class NoSpaceBeforePunct:
'''Eliminate spaces before punctuation'''
regex = re.compile(r' ([!%,./:;?])')
def translate(self, in_str):
'''Eliminate spaces before punctuation'''
result = re.sub(r'\s+', ' ', in_str)
return self.regex.sub(r'\1', result)
class TwoSingleQuoteToDoubleQuote:
'''Translate two single-quotes to one double-quote marker'''
regex = re.compile(" ''([ !\"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]|$)")
def translate(self, in_str):
'''Translate two single-quotes to one double-quote marker'''
return self.regex.sub(r' "\1', in_str)
class JoinContractions:
'''Rejoin tokenized contractions.'''
regex = re.compile(r"\b(.*) (n't|'s) ")
def translate(self, in_str):
'''Rejoin tokenized contractions.'''
return self.regex.sub(r"\1\2 ", in_str)
class JoinPossessive:
'''Rejoin tokenized word and possive apostrophe marker'''
regex = re.compile(" ' ")
def translate(self, in_str):
'''Rejoin tokenized word and possive apostrophe marker'''
return self.regex.sub(r"' ", in_str)
class JoinQuoted:
'''Rejoin quatation marks with the text they quote'''
regex = re.compile(r"([\"']) ((?:\\\1|.)*?) \1")
def translate(self, in_str):
'''Rejoin quatation marks with the text they quote'''
return self.regex.sub(r"\1\2\1", in_str)
def filter_non_ascii(in_str):
'''deprecated because 'filter'''
return "".join(filter(lambda x: ord(x) < 128, in_str))
def remove_non_ascii(in_str):
'''filter out non-ASCII characters'''
return "".join(i for i in in_str if ord(i) < 128)
def translate_to_ascii(in_str):
'''try to translate any text to ASCII'''
try:
return translate_iso_to_ascii(in_str)
except UnicodeDecodeError:
return in_str
###############################################################################
def read_lines_to_ascii(file_spec, charset='utf-8'):
'''read and return all lines of a text file as a list of ASCII str'''
with open(file_spec, 'r', encoding=charset) as text:
for line in text:
# utf_print(line.rstrip())
line = line.decode(encoding=charset, errors='ignore')
# .encode('ascii', errors='ignore')
# line = str(line, charset, errors='ignore')
# .encode('ascii', errors='ignore')
yield line.rstrip()
def utf_print_words(fspec):
'''print each word in an ASCII or UTF-8 encoded text'''
with open(fspec, 'r', encoding="utf8") as text:
for line in text:
words = re.split(r'\W+', line.rstrip())
for word in words:
if len(word) > 0:
utf_print(word)
utf_print(words)
def rank_dict_by_value(summary_count, ranking):
'''Return the highest ranked N dictionary entries.'''
return heapq.nlargest(summary_count, ranking, key=ranking.get)
def resolve_count(sub_count, percent, total_count):
'''returns reconciled sub-count and percentage of total, where count trumps percentage'''
if not sub_count:
sub_count = int(math.ceil(percent * total_count / 100.0))
if sub_count > total_count:
sub_count = total_count
if sub_count < 1:
sub_count = 1
percent = sub_count * 100.0 / total_count
return sub_count, percent
def map_file(function, in_path, out_path, charset='utf8'):
'''Apply function to every line in the input file'''
with open(in_path, 'r', encoding=charset) as text:
with open(out_path, 'w') as out_file:
for line in text:
output = function(line.rstrip())
if output:
print(output, file=out_file)
def translate_para_file(para_filter, in_path, out_path, charset='utf8'):
'''Generator yielding filtered paragraphs from a text file'''
with open(in_path, 'r', encoding=charset) as text:
with open(out_path, 'w') as out_file:
for para in text_ops.paragraph_iter(text):
output = para_filter.filter_line(para)
print(output if output else ' ', file=out_file)
def translate_lines_in_file(line_translators, in_path, out_path, charset='utf8'):
'''
Translate input line by line to output file.
Usage: translate_lines_in_file(line_translators, in_path, out_path, charset='utf8')
'''
with open(in_path, 'r', encoding=charset) as text:
with (sys.stdout if out_path == '-' else open(out_path, 'w')) as out_file:
for line in text:
for translator in line_translators:
line = translator.translate(line)
if line:
print(line, file=out_file)
########################################################
def translate_file(in_path, out_path, opt):
"""Rewrite a text file."""
# Announce output:
print(in_path, '====>', '<stdout>' if out_path == '-' else out_path)
print('-------------------------------------------------------------------')
translators = [IsoToAscii(),
JoinContractions(),
NoSpaceBeforePunct(),
TwoSingleQuoteToDoubleQuote(),
JoinPossessive(),
JoinQuoted()]
translate_lines_in_file(translators, in_path, out_path, opt.charset)
###############################################################################
def pluralize(word):
'''
Return the plural form of the given word.
TODO: Check that word is a noun (or an adjective or at any rate can
be sensibly used as a noun) before calling inflection.pluralize?
If not, return (word, false)
FIXME BUGS: inflection is often wrong, e.g. (safe <-> saves)
'''
if word.lower()[-3:] == 'afe':
return word + 's'
return inflection.pluralize(word)
def singularize(word):
'''
Return the singular form of the given word.
TODO: Check that word is a noun (or an adjective or at any rate can
be sensibly used as a noun) before calling inflection.singularize?
FIXME BUGS: inflection returns many wrong answers by pattern:
*aves -> *afe
It uses incomplete special case matching (octopus),
and does not recognize many other pairs such as:
(locus, loci)
NB: pattern3.en is not yet functional (2017.07.10)
'''
if word.lower()[-4:] == 'aves':
return word.rstrip('sS')
return inflection.singularize(word)
def plural_if_diff(word):
'''return the plural form of word if different from the singular, else None'''
plur = pluralize(word)
sing = singularize(word)
return plur if plur != sing else None
def singular_if_diff(word):
'''return the singular form of word if different from the plural, else None'''
plur = pluralize(word)
sing = singularize(word)
return sing if plur != sing else None
###############################################################################
def abs_path(dir_spec, file_spec):
'''Returns an absolute path based on a dir_spec and a (relative) file_spec'''
if os.path.isabs(file_spec):
return file_spec
return os.path.join(dir_spec, file_spec)
REP_WEBSTER = r'\n([A-Z-]+)\s+([^\s,]+)[^,]*,\s+((?:[a-z]\.\s*)+)(?:Etym:\s+\[([^]]+)\])?\s*(?:Defn:\s)([^.]+)?'
REC_WEBSTER = re.compile(REP_WEBSTER)
def filter_text_file():
'''Filter lines or sentences in a text file.'''
parser = argparse.ArgumentParser(
# usage='%(prog)s [options]',
description="test text_filters")
parser.add_argument('in_path', type=str, nargs='?', default='train_1000.label',
help='file containing text to filter')
parser.add_argument('-dir', dest='text_dir', type=str, default='/Users/sprax/Text',
help='directory to search for in_path')
parser.add_argument('-charset', dest='charset', type=str, default='iso-8859-1',
help='charset encoding of input text')
parser.add_argument('-list_numbers', action='store_true',
help='output list number for each filtered sentence')
parser.add_argument('-map_file', action='store_true',
help='test map_file')
parser.add_argument('-number', dest='max_lines', type=int, nargs='?', const=1, default=0,
help='number of sentences to keep (default: 5), overrides -percent')
parser.add_argument('-out_path', type=str, nargs='?', default='lab.txt',
help='output path for filtered text (default: - <stdout>)')
parser.add_argument('-truncate', dest='max_words', type=int, nargs='?',
const=8, default=0,
help='truncate sentences after MAX words (default: INT_MAX)')
parser.add_argument('-verbose', type=int, nargs='?', const=1, default=1,
help='verbosity of output (default: 1)')
args = parser.parse_args()
if args.map_file:
# map_file(singular_if_diff, args.in_path, args.out_path)
map_file(singularize, args.in_path, args.out_path)
exit(0)
if args.verbose > 7:
print("out_path: <{}>".format(args.out_path))
print("args:", args)
print(__doc__)
exit(0)
in_path = abs_path(args.text_dir, args.in_path)
out_path = args.out_path
if out_path != '-':
out_path = abs_path(args.text_dir, args.out_path)
translate_file(in_path, out_path, args)
if __name__ == '__main__':
filter_text_file()
|
sprax/python
|
txt/text_filters.py
|
Python
|
lgpl-3.0
| 11,763
|
[
"Octopus"
] |
2e29c2a4dbd3bd731ef954b01df0e48c473f86e59a6df18e94f8c9e1a87405cc
|
'''
Created on Jan 24, 2014
@author: julian Garrido
'''
import os
import astropy.io.fits as fits
import sys
class whispCubes(object):
'''
This class read the fits files of whisp cubes and creates sql
commands to populate a postgres database
'''
def __init__(self, db):
'''
Constructor
'''
#self.password = password
#self.server = server
#self.user = user
self.db = db
def readHeaders(self, folder):
'''
cubesFolder is the folder where are the data. It will read the headers
for those files whose end is *cl.fits or *cl.fits
'''
#get full path for files in the folder
fullpathList = (os.path.join(folder, x) for x in os.listdir(folder))
self.files = [f for f in fullpathList if os.path.isfile(f) and (f.endswith("cl.fits") or f.endswith("cl.fit"))]
#for file in self.files:
# print file
#file = files[4]
#hdr = fits.getheader(file)
#self.hdrlist = [fits.getheader(file) for file in files]
def initIDCounters(self):
'''
It initializes the ID sequence. This method could be improved by
loading the data from the database.
'''
#Sequences
self.DATA_SET_ID = 1
self.CHAR_SPECTRAL_ID = 1
self.CHAR_SPATIAL_ID = 1
self.CHAR_FLUX_ID = 1
self.CHAR_TIME_ID = 1
self.CHAR_VELOCITY_ID = 1
self.TARGET_ID = 1
self.DATA_SET_IMAGE_ID = 1
self.PROVENANCE_ID = 1
self.TAPERING_ID = 1
#FIX values
self.POLARIZATION_ID = 1
self.LINE_ID = 1
self.TAPERING_ID_GAUSSIAN = 1
self.FACILITY_ID= 1
self.INSTRUMENT_ID = 1
self.FORMAT_ID = 1
self.DATASETTYPE_ID = 1
self.DATASETSUBTYPE_ID = 1
self.CHAR_SPECTRA_AXIS_ID = 1
self.CHAR_SPATIAL_AXIS_ID = 1
self.CHAR_FLUX_AXIS_ID = 1
self.TARGETCLASS_ID = 1
def increaseIDCounters(self):
'''
Update the counters (ID sequence).
'''
self.DATA_SET_ID += 1
self.CHAR_SPECTRAL_ID += 1
self.CHAR_SPATIAL_ID += 1
self.CHAR_FLUX_ID += 1
self.CHAR_TIME_ID += 1
self.CHAR_VELOCITY_ID += 1
self.TARGET_ID += 1
self.DATA_SET_IMAGE_ID += 1
self.PROVENANCE_ID += 1
self.TAPERING_ID += 1
def printSQLcommands(self):
self.f = sys.stdout
self.printOption = True
self.createSQLcommands()
def writeSQLcommands(self, outputFile):
self.outputPath = outputFile
# Write mode creates a new file or overwrites the existing content of the file.
# Write mode will _always_ destroy the existing contents of a file.
try:
# This will create a new file or **overwrite an existing file**.
self.f = open(self.outputPath, "w")
self.printOption = False
self.createSQLcommands()
except IOError:
pass
def createSQLcommands(self):
"""
This function requires the previous initialization of self.f as an object where
the write method is defined
"""
try:
#f.writelines(lines) # Write a sequence of strings to a file
for file in self.files :
hdr = fits.getheader(file)
try:
self.f.write("\n\n-- Data for Name: CharTime; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
self.f.write("INSERT INTO \"CharTime\" (\"ID\", \"Location\") VALUES ({}, {});\n".format(self.CHAR_TIME_ID, hdr['DATE-OBS']))
except KeyError:
self.f.write("INSERT INTO \"CharTime\" (\"ID\", \"Location\") VALUES ({}, {});\n".format(self.CHAR_TIME_ID, "NULL"))
self.f.write("\n-- Data for Name: CharSpectral; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
start = hdr['CRVAL3'] - abs(hdr['CHANSTA'])*hdr['CDELT3']
stop = hdr['CRVAL3']+ abs(hdr['CHANEND'])*hdr['CDELT3']
sampleExtent = abs(hdr['CDELT3']) * (hdr['LASTLCH']-hdr['FIRSTLCH'])
self.f.write("INSERT INTO \"CharSpectral\" (\"ID\", \"Location\", \"Extent\", \"Start\", \"Stop\", \"SampleExtent\", \"Resolution\") VALUES ({}, {}, {}, {}, {}, {}, {});\n".format(
self.CHAR_SPECTRAL_ID, hdr['FREQR'], hdr['BANDW'], start, stop, sampleExtent, abs(hdr['CDELT3'])))
self.f.write("\n-- Data for Name: CharSpatial; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
location = str(hdr['PCRA']) + ' ' + str (hdr['PCDEC'])
extent = (abs(hdr['CDELT1']*hdr['NAXIS1']/2)+abs(hdr['CDELT2']*hdr['NAXIS2']/2))/2
RALoLimit = hdr['CRVAL1']-(hdr['CDELT1']*(hdr['NAXIS1']/2))
DecLoLimit = hdr['CRVAL2']-(hdr['CDELT2']*(hdr['NAXIS2']/2))
RAHiLimit = hdr['CRVAL1']+(hdr['CDELT1']*(hdr['NAXIS1']/2))
DecHiLimit = hdr['CRVAL2']+(hdr['CDELT2']*(hdr['NAXIS2']/2))
LocationLoLimit = str(hdr['CRVAL1']-(hdr['CDELT1']*(hdr['NAXIS1']/2))) + ' ' + str(hdr['CRVAL2']-(hdr['CDELT2']*(hdr['NAXIS2']/2)))
LocationHiLimit = str(hdr['CRVAL1']+(hdr['CDELT1']*(hdr['NAXIS1']/2))) + ' ' + str(hdr['CRVAL2']+(hdr['CDELT2']*(hdr['NAXIS2']/2)))
Resolution = (abs(hdr['CDELT1'])+abs(hdr['CDELT2']))/2
self.f.write("INSERT INTO \"CharSpatial\" (\"ID\", \"RA\", \"Dec\", \"Location\", \"RALoLimit\", \"DecLoLimit\", \"RAHiLimit\", \"DecHiLimit\", \"LocationLoLimit\", \"LocationHiLimit\", \"Extent\", \"Resolution\") VALUES ({}, {}, {}, '{}', {}, {}, {}, {}, '{}', '{}', {}, {});\n".format(
self.CHAR_SPATIAL_ID, hdr['PCRA'], hdr['PCDEC'], location, RALoLimit, DecLoLimit, RAHiLimit, DecHiLimit, LocationLoLimit, LocationHiLimit, extent, Resolution))
self.f.write("\n-- Data for Name: CharFlux; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
self.f.write("INSERT INTO \"CharFlux\" (\"ID\", \"Min\", \"Max\", \"StatError\") VALUES ({}, {}, {}, {});\n".format(
self.CHAR_FLUX_ID, hdr['DATAMIN'], hdr['DATAMAX'], hdr['NOISE']))
self.f.write("\n-- Data for Name: CharVelocity; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
self.f.write("INSERT INTO \"CharVelocity\" (\"ID\", \"Location\") VALUES ({}, {});\n".format(self.CHAR_VELOCITY_ID, hdr['VEL']))
self.f.write("\n-- Data for Name: Tapering; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
if hdr['BLGRAD'] == 'GAUSSIAN':
if self.TAPERING_ID == 1:
self.f.write("INSERT INTO \"Tapering\" (\"ID\", \"Label\") VALUES ({}, '{}');\n".format(self.TAPERING_ID_GAUSSIAN, hdr['BLGRAD']))
else:
self.f.write("INSERT INTO \"Tapering\" (\"ID\", \"Label\") VALUES ({}, '{}');\n".format(self.TAPERING_ID, hdr['BLGRAD']))
self.f.write("\n-- Data for Name: DataSetImage; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
axis = str(hdr['NAXIS1']) + ' ' + str(hdr['NAXIS2']) + ' ' + str(hdr['NAXIS3'])
wcsaxes = str(hdr['CTYPE1']) + ' ' + str(hdr['CTYPE2']) + ' ' + str(hdr['CTYPE3'])
self.f.write("INSERT INTO \"DataSetImage\" (\"ID\", \"Axes\", \"Axis\", \"WCSAxes\") VALUES ({}, {}, '{}', '{}');\n".format(
self.DATA_SET_IMAGE_ID, hdr['NAXIS'], axis, wcsaxes))
self.f.write("\n-- Data for Name: Provenance; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
try:
bmmin = hdr['BMMIN']
except KeyError:
bmmin = 'NULL'
try:
bmmaj = hdr['BMMAJ']
except KeyError:
bmmaj = 'NULL'
try:
bmpa = hdr['BMPA']
except KeyError:
bmpa = 'NULL'
if hdr['BLGRAD'] == 'GAUSSIAN':
tapering_id = self.TAPERING_ID_GAUSSIAN
else:
tapering_id = self.TAPERING_ID
self.f.write("INSERT INTO \"Provenance\" (\"ID\", \"BeamMajorAxis\", \"BeamMinorAxis\", \"BeamPositionAngle\", \"Instrument_ID\", \"Facility_ID\", \"Tapering_ID\") VALUES ({}, {}, {}, {}, {}, {}, {});\n".format(
self.PROVENANCE_ID, bmmaj, bmmin, bmpa, self.INSTRUMENT_ID, self.FACILITY_ID, tapering_id))
self.f.write("\n-- Data for Name: Target; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
self.f.write("INSERT INTO \"Target\" (\"ID\", \"Name\", \"Velocity\", \"TargetClass_ID\") VALUES ({}, '{}', {}, {});\n".format(
self.TARGET_ID, hdr['OBJECT'], hdr['VELR'], self.TARGETCLASS_ID))
self.f.write("\n-- Data for Name: DataSet; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
statinfo = os.stat(file)
size = statinfo.st_size
length = hdr['NAXIS1'] * hdr['NAXIS2'] * hdr['NAXIS3']
self.f.write("INSERT INTO \"DataSet\" (\"ID\", \"AcReference\", \"Size\", \"DataLength\", \"Format_ID\", \"DataSetType_ID\", \"DataSetSubType_ID\", \"Provenance_ID\", \"Target_ID\", \n")
self.f.write("\"DataSetImage_ID\", \"CharSpectral_ID\", \"CharSpectralAxis_ID\", \"CharSpatialAxis_ID\", \"CharSpatial_ID\", \"CharFluxAxis_ID\", \"CharFlux_ID\", \"CharTime_ID\") \n")
self.f.write(" VALUES ({}, '{}', {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {});\n".format(
self.DATA_SET_ID, file, size/1024, length, self.FORMAT_ID, self.DATASETTYPE_ID, self.DATASETSUBTYPE_ID, self.PROVENANCE_ID, self.TARGET_ID,
self.DATA_SET_IMAGE_ID, self.CHAR_SPECTRAL_ID, self.CHAR_SPECTRA_AXIS_ID, self.CHAR_SPATIAL_AXIS_ID, self.CHAR_SPATIAL_ID,
self.CHAR_FLUX_AXIS_ID, self.CHAR_FLUX_ID, self.CHAR_TIME_ID))
self.f.write("\n-- Data for Name: CharPolarization; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
self.f.write("INSERT INTO \"CharPolarization\" (\"DataSet_ID\", \"Polarization_ID\") VALUES ({}, {});\n".format(
self.DATA_SET_ID, self.POLARIZATION_ID))
self.f.write("\n-- Data for Name: DataSetLines; Type: TABLE DATA; Schema: vodata_cubes; Owner: vodata_cubes\n")
self.f.write("INSERT INTO \"DataSetLines\" (\"DataSet_ID\", \"Line_ID\", \"CharVelocity_ID\") VALUES ({}, {}, {});\n".format(
self.DATA_SET_ID, self.LINE_ID, self.CHAR_VELOCITY_ID, ))
self.increaseIDCounters()
#Set values for the sequences
self.f.write("\n\n-- Set sequences \n")
self.f.write("\n SELECT pg_catalog.setval('\"DataSet_ID_seq\"', " + str(self.DATA_SET_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"CharSpectral_ID_seq\"', " + str(self.CHAR_SPECTRAL_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"CharSpatial_ID_seq\"', " + str(self.CHAR_SPATIAL_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"CharFlux_ID_seq\"', " + str(self.CHAR_FLUX_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"CharTime_ID_seq\"', " + str(self.CHAR_TIME_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"CharVelocity_ID_seq\"', " + str(self.CHAR_VELOCITY_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"Target_ID_seq\"', " + str(self.TARGET_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"DataSetImage_ID_seq\"', " + str(self.DATA_SET_IMAGE_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"Provenance_ID_seq\"', " + str(self.PROVENANCE_ID-1) +", true);")
self.f.write("\n SELECT pg_catalog.setval('\"Tapering_ID_seq\"', " + str(self.TAPERING_ID-1) +", true);")
finally:
if self.printOption == False:
self.f.close()
if __name__ == '__main__':
print "main"
whisp = whispCubes("vodata_cubes")
whisp.readHeaders('/Users/julian/DATA/WSRT-WHISP/CUBE-HI/reduced')
whisp.initIDCounters()
whisp.writeSQLcommands("/Users/julian/src/python_ws/feedCubesDB/output.sql")
whisp.printSQLcommands()
pass
|
julian-garrido/feedCubesDB
|
src/readHeaders.py
|
Python
|
gpl-2.0
| 13,453
|
[
"Gaussian"
] |
020c2b7c65248deb3afdf267ae2e8b12983127c2a745eabf57c09d0237ad9e9d
|
from __future__ import print_function, absolute_import, division
import glob
import os
import numpy as np
from .utils import expand_path, num_samples
class BaseDatasetLoader(object):
short_name = None
def load(self):
raise NotImplementedError('should be implemented in subclass')
class MSMBuilderDatasetLoader(BaseDatasetLoader):
short_name = 'msmbuilder'
def __init__(self, path, fmt=None, verbose=False):
self.path = path
self.fmt = fmt
self.verbose = verbose
def load(self):
from msmbuilder.dataset import dataset
ds = dataset(self.path, mode='r', fmt=self.fmt, verbose=self.verbose)
print('Dataset provenance:\n')
print(ds.provenance)
return ds, None
class NumpyDatasetLoader(BaseDatasetLoader):
short_name = 'numpy'
def __init__(self, filenames):
self.filenames = filenames
def load(self):
filenames = sorted(glob.glob(expand_path(self.filenames)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.filenames)
ds = [np.load(f) for f in filenames]
return ds, None
class MDTrajDatasetLoader(BaseDatasetLoader):
short_name = 'mdtraj'
def __init__(self, trajectories, topology=None, stride=1, verbose=False):
self.trajectories = trajectories
self.topology = topology
self.stride = stride
self.verbose = verbose
def load(self):
import mdtraj
filenames = sorted(glob.glob(expand_path(self.trajectories)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.trajectories)
top = self.topology
kwargs = {}
if top is not None:
top = expand_path(self.topology)
kwargs = {'top': top}
X = []
y = None
for fn in filenames:
if self.verbose:
print('[mdtraj] loading %s' % fn)
X.append(mdtraj.load(fn, stride=self.stride, **kwargs))
return X, y
class FilenameDatasetLoader(BaseDatasetLoader):
"""Just pass a bunch of filenames to the first step of the pipeline
The pipeline will do the loading.
"""
short_name = 'filename'
def __init__(self, trajectories, abs_path=True):
self.traj_glob = trajectories
self.abs_path = abs_path
def load(self):
filenames = sorted(glob.glob(expand_path(self.traj_glob)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.traj_glob)
if self.abs_path:
filenames = [os.path.abspath(fn) for fn in filenames]
return filenames, None
class JoblibDatasetLoader(BaseDatasetLoader):
short_name = 'joblib'
def __init__(self, filenames, x_name=None, y_name=None,
system_joblib=False):
self.filenames = filenames
self.x_name = x_name
self.y_name = y_name
self.system_joblib = system_joblib
def load(self):
if self.system_joblib:
import joblib
else:
from sklearn.externals import joblib
X, y = [], []
filenames = sorted(glob.glob(expand_path(self.filenames)))
if len(filenames) == 0:
raise RuntimeError('no filenames matched by pattern: %s' %
self.filenames)
for fn in filenames:
obj = joblib.load(fn)
if isinstance(obj, (list, np.ndarray)):
X.append(obj)
else:
X.append(obj[self.x_name])
y.append(obj[self.y_name])
if num_samples(X) == 1:
X = X[0]
if len(y) == 1:
y = y[0]
elif len(y) == 0:
y = None
return X, y
class SklearnDatasetLoader(BaseDatasetLoader):
short_name = 'sklearn_dataset'
def __init__(self, method, x_name='data', y_name='target', **kwargs):
self.method = method
self.x_name = x_name
self.y_name = y_name
self.kwargs = kwargs
def load(self):
import sklearn.datasets
try:
loader = getattr(sklearn.datasets, self.method)
except AttributeError:
raise RuntimeError('no %s in sklearn.datasets' % self.method)
bunch = loader(**self.kwargs)
X = bunch[self.x_name]
y = bunch[self.y_name]
return X, y
|
cxhernandez/osprey
|
osprey/dataset_loaders.py
|
Python
|
apache-2.0
| 4,570
|
[
"MDTraj"
] |
0e4e50a0d105fa34a15601cddf1fec54c1991b6a315bd37e2b6a5041206510da
|
""" The TaskManagerAgentBase is the base class to submit tasks to external systems,
monitor and update the tasks and file status in the transformation DB.
"""
import datetime
from DIRAC import S_OK, S_ERROR, gMonitor, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.TransformationSystem.Client.FileReport import FileReport
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
__RCSID__ = "$Id$"
AGENT_NAME = 'Transformation/TaskManagerAgentBase'
class TaskManagerAgentBase( AgentModule ):
""" To be extended. The extension needs to:
- provide a taskManager object as data member
- provide a shifterProxy (string) as data member
- provide a transType (list of strings) as data member
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
self.taskManager = None
self.shifterProxy = ''
self.transClient = TransformationClient()
self.transType = []
#############################################################################
def initialize( self ):
""" agent initialization
"""
if not self.taskManager:
return S_ERROR( 'No task manager provided!' )
if not self.shifterProxy:
return S_ERROR( 'No shifter proxy provided!' )
self.am_setOption( 'shifterProxy', self.shifterProxy )
if not self.transType:
return S_ERROR( 'No transformation types to look for!' )
gLogger.info( "Looking for %s" % self.transType )
gMonitor.registerActivity( "SubmittedTasks", "Automatically submitted tasks", "Transformation Monitoring", "Tasks",
gMonitor.OP_ACUM )
return S_OK()
#############################################################################
def execute( self ):
""" The TaskManagerBase execution method.
"""
# Determine whether the task status is to be monitored and updated
enableTaskMonitor = self.am_getOption( 'MonitorTasks', '' )
if not enableTaskMonitor:
gLogger.info( "execute: Monitoring of tasks is disabled." )
gLogger.info( "execute: To enable create the 'MonitorTasks' option" )
else:
res = self.updateTaskStatus()
if not res['OK']:
gLogger.warn( 'execute: Failed to update task states', res['Message'] )
# Determine whether the task files status is to be monitored and updated
enableFileMonitor = self.am_getOption( 'MonitorFiles', '' )
if not enableFileMonitor:
gLogger.info( "execute: Monitoring of files is disabled." )
gLogger.info( "execute: To enable create the 'MonitorFiles' option" )
else:
res = self.updateFileStatus()
if not res['OK']:
gLogger.warn( 'execute: Failed to update file states', res['Message'] )
# Determine whether the checking of reserved tasks is to be performed
enableCheckReserved = self.am_getOption( 'CheckReserved', '' )
if not enableCheckReserved:
gLogger.info( "execute: Checking of reserved tasks is disabled." )
gLogger.info( "execute: To enable create the 'CheckReserved' option" )
else:
res = self.checkReservedTasks()
if not res['OK']:
gLogger.warn( 'execute: Failed to checked reserved tasks', res['Message'] )
# Determine whether the submission of tasks is to be executed
enableSubmission = self.am_getOption( 'SubmitTasks', '' )
if not enableSubmission:
gLogger.info( "execute: Submission of tasks is disabled." )
gLogger.info( "execute: To enable create the 'SubmitTasks' option" )
else:
res = self.submitTasks()
if not res['OK']:
gLogger.warn( 'execute: Failed to submit created tasks', res['Message'] )
return S_OK()
def _selectTransformations( self, transType = [], status = ['Active', 'Completing'], agentType = ['Automatic'] ):
""" get the transformations
"""
selectCond = {}
if status:
selectCond['Status'] = status
if transType:
selectCond['Type'] = transType
if agentType:
selectCond['AgentType'] = agentType
res = self.transClient.getTransformations( condDict = selectCond )
if not res['OK']:
gLogger.error( "_selectTransformations: Failed to get transformations for selection.", res['Message'] )
elif not res['Value']:
gLogger.verbose( "_selectTransformations: No transformations found for selection." )
else:
gLogger.verbose( "_selectTransformations: Obtained %d transformations for selection" % len( res['Value'] ) )
return res
def updateTaskStatus( self ):
""" Updates the task status
"""
gLogger.info( "updateTaskStatus: Updating the Status of tasks" )
# Get the transformations to be updated
status = self.am_getOption( 'UpdateTasksStatus', ['Active', 'Completing', 'Stopped'] )
res = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not res['OK']:
return res
for transformation in res['Value']:
transID = transformation['TransformationID']
# Get the tasks which are in a UPDATE state
updateStatus = self.am_getOption( 'TaskUpdateStatus', ['Checking', 'Deleted', 'Killed', 'Staging', 'Stalled',
'Matched', 'Rescheduled', 'Completed', 'Submitted',
'Assigned', 'Received', 'Waiting', 'Running'] )
condDict = {"TransformationID":transID, "ExternalStatus":updateStatus}
timeStamp = str( datetime.datetime.utcnow() - datetime.timedelta( minutes = 10 ) )
res = self.transClient.getTransformationTasks( condDict = condDict,
older = timeStamp,
timeStamp = 'LastUpdateTime' )
if not res['OK']:
gLogger.error( "updateTaskStatus: Failed to get tasks to update for transformation", "%s %s" % ( transID,
res['Message'] ) )
continue
if not res['Value']:
gLogger.verbose( "updateTaskStatus: No tasks found to update for transformation %s" % transID )
continue
gLogger.verbose( "updateTaskStatus: getting %d tasks status of transformation %s" % ( len( res['Value'] ),
transID ) )
res = self.taskManager.getSubmittedTaskStatus( res['Value'] )
if not res['OK']:
gLogger.error( "updateTaskStatus: Failed to get updated task statuses for transformation", "%s %s" % ( transID,
res['Message'] ) )
continue
statusDict = res['Value']
if not statusDict:
gLogger.info( "updateTaskStatus: No tasks to update for transformation %d" % transID )
else:
for status in sorted( statusDict ):
taskIDs = statusDict[status]
gLogger.info( "updateTaskStatus: Updating %d task(s) from transformation %d to %s" % ( len( taskIDs ),
transID, status ) )
res = self.transClient.setTaskStatus( transID, taskIDs, status )
if not res['OK']:
gLogger.error( "updateTaskStatus: Failed to update task status for transformation", "%s %s" % ( transID,
res['Message'] ) )
gLogger.info( "updateTaskStatus: Transformation task status update complete" )
return S_OK()
def updateFileStatus( self ):
""" Update the files status
"""
gLogger.info( "updateFileStatus: Updating Status of task files" )
# Get the transformations to be updated
status = self.am_getOption( 'UpdateFilesStatus', ['Active', 'Completing', 'Stopped'] )
res = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not res['OK']:
return res
for transformation in res['Value']:
transID = transformation['TransformationID']
timeStamp = str( datetime.datetime.utcnow() - datetime.timedelta( minutes = 10 ) )
condDict = {'TransformationID' : transID, 'Status' : ['Assigned']}
res = self.transClient.getTransformationFiles( condDict = condDict, older = timeStamp, timeStamp = 'LastUpdate' )
if not res['OK']:
gLogger.error( "updateFileStatus: Failed to get transformation files to update.", res['Message'] )
continue
if not res['Value']:
gLogger.info( "updateFileStatus: No files to be updated for transformation %s." % transID )
continue
res = self.taskManager.getSubmittedFileStatus( res['Value'] )
if not res['OK']:
gLogger.error( "updateFileStatus: Failed to get updated file statuses for transformation", "%s %s" % ( transID,
res['Message'] ) )
continue
statusDict = res['Value']
if not statusDict:
gLogger.info( "updateFileStatus: No file statuses to be updated for transformation %s." % transID )
continue
fileReport = FileReport( server = self.transClient.getServer() )
for lfn, status in statusDict.items():
fileReport.setFileStatus( int( transID ), lfn, status )
res = fileReport.commit()
if not res['OK']:
gLogger.error( "updateFileStatus: Failed to update file status for transformation", "%s %s" % ( transID,
res['Message'] ) )
else:
gLogger.info( "updateFileStatus: Updated the status of %d files for transformation %s" % ( len( res['Value'] ),
transID ) )
gLogger.info( "updateFileStatus: Transformation file status update complete" )
return S_OK()
def checkReservedTasks( self ):
gLogger.info( "checkReservedTasks: Checking Reserved tasks" )
# Get the transformations which should be checked
status = self.am_getOption( 'CheckReservedStatus', ['Active', 'Completing', 'Stopped'] )
res = self._selectTransformations( transType = self.transType, status = status, agentType = [] )
if not res['OK']:
return res
for transformation in res['Value']:
transID = transformation['TransformationID']
# Select the tasks which have been in Reserved status for more than 1 hour for selected transformations
condDict = {"TransformationID":transID, "ExternalStatus":'Reserved'}
time_stamp_older = str( datetime.datetime.utcnow() - datetime.timedelta( hours = 1 ) )
time_stamp_newer = str( datetime.datetime.utcnow() - datetime.timedelta( days = 7 ) )
res = self.transClient.getTransformationTasks( condDict = condDict, older = time_stamp_older,
newer = time_stamp_newer )
if not res['OK']:
gLogger.error( "checkReservedTasks: Failed to get Reserved tasks for transformation", "%s %s" % ( transID,
res['Message'] ) )
continue
if not res['Value']:
gLogger.verbose( "checkReservedTasks: No Reserved tasks found for transformation %s" % transID )
continue
res = self.taskManager.updateTransformationReservedTasks( res['Value'] )
if not res['OK']:
gLogger.info( "checkReservedTasks: No Reserved tasks found for transformation %s" % transID )
continue
noTasks = res['Value']['NoTasks']
taskNameIDs = res['Value']['TaskNameIDs']
# For the tasks with no associated request found re-set the status of the task in the transformationDB
for taskName in noTasks:
transID, taskID = taskName.split( '_' )
gLogger.info( "checkReservedTasks: Resetting status of %s to Created as no associated task found" % ( taskName ) )
res = self.transClient.setTaskStatus( int( transID ), int( taskID ), 'Created' )
if not res['OK']:
gLogger.warn( "checkReservedTasks: Failed to update task status and ID after recovery", "%s %s" % ( taskName,
res['Message'] ) )
# For the tasks for which an associated request was found update the task details in the transformationDB
for taskName, extTaskID in taskNameIDs.items():
transID, taskID = taskName.split( '_' )
gLogger.info( "checkReservedTasks: Resetting status of %s to Created with ID %s" % ( taskName, extTaskID ) )
res = self.transClient.setTaskStatusAndWmsID( int( transID ), int( taskID ), 'Submitted', str( extTaskID ) )
if not res['OK']:
gLogger.warn( "checkReservedTasks: Failed to update task status and ID after recovery", "%s %s" % ( taskName,
res['Message'] ) )
gLogger.info( "checkReservedTasks: Updating of reserved tasks complete" )
return S_OK()
def submitTasks( self ):
""" Submit the tasks to an external system, using the taskManager provided
"""
gLogger.info( "submitTasks: Submitting tasks for transformations" )
res = getProxyInfo( False, False )
if not res['OK']:
gLogger.error( "submitTasks: Failed to determine credentials for submission", res['Message'] )
return res
proxyInfo = res['Value']
owner = proxyInfo['username']
ownerGroup = proxyInfo['group']
ownerDN = proxyInfo['identity']
gLogger.info( "submitTasks: Tasks will be submitted with the credentials %s:%s" % ( owner, ownerGroup ) )
# Get the transformations which should be submitted
tasksPerLoop = self.am_getOption( 'TasksPerLoop', 50 )
status = self.am_getOption( 'SubmitStatus', ['Active', 'Completing'] )
res = self._selectTransformations( transType = self.transType, status = status )
if not res['OK']:
return res
for transformation in res['Value']:
transID = transformation['TransformationID']
transBody = transformation['Body']
res = self.transClient.getTasksToSubmit( transID, tasksPerLoop )
if not res['OK']:
gLogger.error( "submitTasks: Failed to obtain tasks for transformation", "%s %s" % ( transID, res['Message'] ) )
continue
tasks = res['Value']['JobDictionary']
if not tasks:
gLogger.verbose( "submitTasks: No tasks found for submission for transformation %s" % transID )
continue
gLogger.info( "submitTasks: Obtained %d tasks for submission for transformation %s" % ( len( tasks ), transID ) )
res = self.taskManager.prepareTransformationTasks( transBody, tasks, owner, ownerGroup, ownerDN )
if not res['OK']:
gLogger.error( "submitTasks: Failed to prepare tasks for transformation", "%s %s" % ( transID,
res['Message'] ) )
continue
res = self.taskManager.submitTransformationTasks( res['Value'] )
if not res['OK']:
gLogger.error( "submitTasks: Failed to submit prepared tasks for transformation", "%s %s" % ( transID,
res['Message'] ) )
continue
res = self.taskManager.updateDBAfterTaskSubmission( res['Value'] )
if not res['OK']:
gLogger.error( "submitTasks: Failed to update DB after task submission for transformation", "%s %s" % ( transID,
res['Message'] ) )
continue
gLogger.info( "submitTasks: Submission of transformation tasks complete" )
return S_OK()
|
avedaee/DIRAC
|
TransformationSystem/Agent/TaskManagerAgentBase.py
|
Python
|
gpl-3.0
| 16,230
|
[
"DIRAC"
] |
54a9e259455b737ad7f3bff9cb9c9df63db3876e32a74ff25993a694f6f9862f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# rss - RSS feed generator functions
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""RSS generator helper functions"""
import datetime
try:
import PyRSS2Gen as rssgen
except ImportError:
rssgen = None
rss_defaults = {'title': 'no title', 'link': '', 'description': '',
'css_url': 'rss.css', 'docs': 'no docs',
'publish_date': datetime.datetime.now(), 'guid': None,
'build_date': datetime.datetime.now(), 'entries': []}
def append_rss_items(rss_feed, items):
"""Append items to rss feed object"""
if not rssgen:
return None
for raw_entry in items:
entry = rss_defaults.copy()
entry.update(raw_entry)
item = rssgen.RSSItem(
title=entry["title"],
link=entry["link"],
description=entry["description"],
guid=rssgen.Guid(entry["guid"]),
pubDate=entry["publish_date"])
rss_feed.items.append(item)
return rss_feed
def create_rss_feed(contents):
"""Create rss feed object from contents dictionary"""
if not rssgen:
return None
filled = rss_defaults.copy()
filled.update(contents)
filled['items'] = []
rss = rssgen.RSS2(
title=filled["title"],
link=filled["link"],
description=filled["description"],
lastBuildDate=filled["build_date"],
items=filled["items"],
docs=filled["docs"])
append_rss_items(rss, contents['entries'])
return rss
def write_rss_feed(rss_feed, destination, insert_header=''):
"""Write rss feed object in destination file"""
if not rssgen:
return None
feed = open(destination, 'w+')
rss_feed.write_xml(feed)
if insert_header:
# Insert header right after initial xml declaration line
feed.seek(0)
output = []
output.append(feed.readline())
css = insert_header
output.append(css)
output += feed.readlines()
feed.truncate(0)
feed.seek(0)
feed.writelines(output)
feed.close()
return rss_feed
if __name__ == "__main__":
from shared.conf import get_configuration_object
configuration = get_configuration_object()
feed_base = "demofeed"
feed_raw = "%s.xml" % feed_base
feed_css_style = "%s.css" % feed_base
feed_page = "%s.html" % feed_base
dashboard_url = configuration.https_default_url + '/cgi-bin/dashboard.py'
docs_url = configuration.https_default_url + '/cgi-bin/docs.py'
entries = [{'title': 'Dashboard page',
'link': dashboard_url,
'guid': dashboard_url,
'description': 'MiG user dashboard page'},
{'title': 'Docs page',
'link': docs_url,
'guid': docs_url,
'description': 'MiG user docs page'}]
data = {'title': 'Demo feed', 'link': 'demofeed.xml', 'docs': 'Demo output',
'css_url': feed_css_style, 'description': 'MiG demo feed',
'entries': entries}
rss_feed = create_rss_feed(data)
#css_header = '<?xml-stylesheet href="%(css_url)s" type="text/css" media="screen"?>\n' % data
css_header = ''
write_rss_feed(rss_feed, feed_raw, insert_header=css_header)
css_stylesheet = '''
rss {
display: block;
font-family: verdana, arial;
}
title {
display: block;
margin: 5px;
padding: 2px;
color: gray;
border-bottom: 1px solid silver;
}
link {
display: block;
font-size: small;
padding-left: 10px;
}
item {
display: block;
padding: 2px 30px 2px 30px;
}
docs {
display: block;
background-color: #ffffe6;
margin: 20px;
text-align: center;
padding: 5px;
color: #7f7f7f;
border: 1px solid silver;
}
/* all hidden elements */
language, lastBuildDate, ttl, guid, category, description, pubDate, generator {
display: none;
}
'''
css_style = open(feed_css_style, "w")
css_style.write(css_stylesheet)
css_style.close()
html = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<link rel="alternate" type="application/rss+xml" title="Demo RSS Feed" href="%s" />
</head>
<body>
<h1>A simple demo page</h1>
Used to display demo rss feed through the browser location bar.
</body>
</html>
''' % feed_raw
page = open(feed_page, "w")
page.write(html)
page.close()
|
heromod/migrid
|
mig/shared/rss.py
|
Python
|
gpl-2.0
| 5,207
|
[
"Brian"
] |
b6f570b7a19a83fa725ac2c60ff874ef509bb26c8fbc40a6bec367b48a5f8402
|
import random, math
from pymongo import MongoClient
from random import shuffle
#firsts = ['rushing', 'swift', 'majestic', 'glorious', 'royal', 'ancient', 'raging', 'royal', 'graceful', 'grand', 'regal', 'elegant']
#seconds = ['mountain', 'stream', 'forest', 'plains', 'ocean', 'vista', 'mesa', 'canyon', 'fjord', 'delta', 'river', 'trail', 'beach', 'glacier', 'iceberg', 'peak', 'cavern', 'crevasse']
#firsts = ['19th', 'california', 'fell', 'geary', 'grant', 'fulton', 'lincoln', 'market', 'portola', 'vanness', 'lombard', 'montgomery']
#seconds = ['24th', 'Columbus', 'Fillmore', 'Kearny', 'Mission', 'Polk', 'Stockton', 'Union', 'Third', 'Alemany', 'Broadway', 'Castro', 'Embarcadero']
#lets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
firsts = 'alert amused brave bright charming cheerful comfortable cooperative courageous delightful determined eager elated enchanting encouraging energetic excited fantastic friendly frowning funny gentle glorious good happy healthy helpful hilarious innocent jolly kind lively lovely lucky perfect proud relaxed smiling splendid successful thoughtful victorious well witty wonderful'.split(' ')
seconds = 'alligator ant bear bee bird camel cat cheetah chicken chimp cow crocodile deer dog dolphin duck eagle elephant fish fly fox frog giraffe goat goldfish hamster horse kangaroo kitten lion lobster monkey octopus owl panda pig puppy rabbit rat scorpion seal shark sheep snail snake spider squirrel tiger turtle wolf zebra'.split(' ')
results = []
"""
var = raw_input()
while not var == '' and not var == False and not var == '\n':
com, word = var.split(' ')
if com == 'f':
firsts += [word]
elif com == 's':
seconds += [seconds]
var = raw_input()
with open('data.txt', 'w') as f:
f.write(str(firsts) + '\n')
f.write(str(seconds) + '\n')
"""
cl = MongoClient()
coll = cl.cha.emails
for first in firsts:
for second in seconds:
for x in range(1000):
results += [{'email' : '{0}-{1}{2:d}@gocha.io'.format(first, second, x), 'taken' : False}]
shuffle(results)
for email in results:
coll.insert(email)
|
thepropterhoc/Cha_The_Pivot
|
names.py
|
Python
|
mit
| 2,156
|
[
"COLUMBUS",
"Octopus"
] |
01ff437a2ba8f83cb1879021294587bbded900f798071a016f6739476ded4199
|
import psi4
def test_psi4_basic():
"""tu1-h2o-energy"""
#! Sample HF/cc-pVDZ H2O computation
h2o = psi4.geometry("""
O
H 1 0.96
H 1 0.96 2 104.5
""")
psi4.set_options({'basis': "cc-pVDZ"})
psi4.energy('scf')
assert psi4.compare_values(-76.0266327341067125, psi4.get_variable('SCF TOTAL ENERGY'), 6, 'SCF energy')
def test_psi4_cas():
"""casscf-sp"""
#! CASSCF/6-31G** energy point
geom = psi4.geometry("""
O
H 1 1.00
H 1 1.00 2 103.1
""")
psi4.set_options({
"basis" : '6-31G**',
"reference" : 'rhf',
"scf_type" : 'pk',
"mcscf_algorithm" : 'ah',
"qc_module" : 'detci',
"nat_orbs" : True})
cisd_energy, cisd_wfn = psi4.energy("CISD", return_wfn=True)
assert psi4.compare_values(-76.2198474477531, cisd_energy, 6, 'CISD Energy')
psi4.set_options({
"restricted_docc": [1, 0, 0, 0],
"active": [3, 0, 1, 2]})
casscf_energy = psi4.energy('casscf', ref_wfn=cisd_wfn)
assert psi4.compare_values(-76.073865006902, casscf_energy, 6, 'CASSCF Energy')
def test_psi4_cc():
"""cc1"""
#! RHF-CCSD 6-31G** all-electron optimization of the H2O molecule
h2o = psi4.geometry("""
O
H 1 0.97
H 1 0.97 2 103.0
""")
psi4.set_options({"basis": '6-31G**'})
psi4.optimize('ccsd')
refnuc = 9.1654609427539
refscf = -76.0229427274435
refccsd = -0.20823570806196
reftotal = -76.2311784355056
assert psi4.compare_values(refnuc, h2o.nuclear_repulsion_energy(), 3, "Nuclear repulsion energy")
assert psi4.compare_values(refscf, psi4.get_variable("SCF total energy"), 5, "SCF energy")
assert psi4.compare_values(refccsd, psi4.get_variable("CCSD correlation energy"), 4, "CCSD contribution")
assert psi4.compare_values(reftotal, psi4.get_variable("Current energy"), 7, "Total energy")
def test_psi4_dfmp2():
"""dfmp2-1"""
#! Density fitted MP2 cc-PVDZ/cc-pVDZ-RI computation of formic acid dimer binding energy
#! using automatic counterpoise correction. Monomers are specified using Cartesian coordinates.
Enuc = 235.946620315069168
Ecp = -0.0224119246
formic_dim = psi4.geometry("""
0 1
C -1.888896 -0.179692 0.000000
O -1.493280 1.073689 0.000000
O -1.170435 -1.166590 0.000000
H -2.979488 -0.258829 0.000000
H -0.498833 1.107195 0.000000
--
0 1
C 1.888896 0.179692 0.000000
O 1.493280 -1.073689 0.000000
O 1.170435 1.166590 0.000000
H 2.979488 0.258829 0.000000
H 0.498833 -1.107195 0.000000
units angstrom
no_reorient
""")
psi4.set_options({
'basis': 'cc-pvdz',
'df_basis_scf': 'cc-pvdz-jkfit',
'df_basis_mp2': 'cc-pvdz-ri',
# not necessary to specify df_basis* for most basis sets
'scf_type': 'df',
'guess': 'sad',
'd_convergence': 11,
})
e_cp = psi4.energy('mp2', bsse_type='cp')
assert psi4.compare_values(Enuc, formic_dim.nuclear_repulsion_energy(), 7, "Nuclear Repulsion Energy")
assert psi4.compare_values(Ecp, e_cp, 5, "CP Corrected cc-pVDZ/cc-pVDZ-RI DFMP2")
def test_psi4_sapt():
"""sapt1"""
#! SAPT0 cc-pVDZ computation of the ethene-ethyne interaction energy, using the cc-pVDZ-JKFIT RI basis for SCF
#! and cc-pVDZ-RI for SAPT. Monomer geometries are specified using Cartesian coordinates.
Eref = [ 85.189064196429101, -0.00359915058, 0.00362911158,
-0.00083137117, -0.00150542374, -0.00230683391 ]
ethene_ethyne = psi4.geometry("""
0 1
C 0.000000 -0.667578 -2.124659
C 0.000000 0.667578 -2.124659
H 0.923621 -1.232253 -2.126185
H -0.923621 -1.232253 -2.126185
H -0.923621 1.232253 -2.126185
H 0.923621 1.232253 -2.126185
--
0 1
C 0.000000 0.000000 2.900503
C 0.000000 0.000000 1.693240
H 0.000000 0.000000 0.627352
H 0.000000 0.000000 3.963929
units angstrom
""")
# this molecule will crash test if molecule passing broken
barrier = psi4.geometry("""
0 1
He
""")
psi4.set_options({
"basis": "cc-pvdz",
"guess": "sad",
"scf_type": "df",
"sad_print": 2,
"d_convergence": 11,
"puream": True,
"print": 1})
psi4.energy('sapt0', molecule=ethene_ethyne)
Eelst = psi4.get_variable("SAPT ELST ENERGY")
Eexch = psi4.get_variable("SAPT EXCH ENERGY")
Eind = psi4.get_variable("SAPT IND ENERGY")
Edisp = psi4.get_variable("SAPT DISP ENERGY")
ET = psi4.get_variable("SAPT0 TOTAL ENERGY")
assert psi4.compare_values(Eref[0], ethene_ethyne.nuclear_repulsion_energy(), 9, "Nuclear Repulsion Energy")
assert psi4.compare_values(Eref[1], Eelst, 6, "SAPT0 Eelst")
assert psi4.compare_values(Eref[2], Eexch, 6, "SAPT0 Eexch")
assert psi4.compare_values(Eref[3], Eind, 6, "SAPT0 Eind")
assert psi4.compare_values(Eref[4], Edisp, 6, "SAPT0 Edisp")
assert psi4.compare_values(Eref[5], ET, 6, "SAPT0 Etotal")
def test_psi4_scfproperty():
"""scf-property"""
#! UFH and B3LYP cc-pVQZ properties for the CH2 molecule.
with open('grid.dat', 'w') as handle:
handle.write("""\
0.0 0.0 0.0
1.1 1.3 1.4
""")
ch2 = psi4.geometry("""
0 3
c
h 1 b1
h 1 b1 2 a1
b1 = 1.0
a1 = 125.0
""")
# Get a reasonable guess, to save some iterations
psi4.set_options({
"scf_type": "pk",
"basis": "6-31G**",
"e_convergence": 8,
"docc": [2, 0, 0, 1],
"socc": [1, 0, 1, 0],
"reference": "uhf"})
ch2.update_geometry()
assert psi4.compare_values(6.648418918908746, ch2.nuclear_repulsion_energy(), 9, "Nuclear repulsion energy")
props = ['DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'LOWDIN_CHARGES',
'WIBERG_LOWDIN_INDICES', 'MAYER_INDICES', 'MAYER_INDICES',
'MO_EXTENTS', 'GRID_FIELD', 'GRID_ESP', 'ESP_AT_NUCLEI',
'MULTIPOLE(5)', 'NO_OCCUPATIONS']
psi4.property('scf', properties=props)
assert psi4.compare_values(psi4.get_variable("CURRENT ENERGY"), -38.91591819679808, 6, "SCF energy")
assert psi4.compare_values(psi4.get_variable('SCF DIPOLE X'), 0.000000000000, 4, "SCF DIPOLE X")
assert psi4.compare_values(psi4.get_variable('SCF DIPOLE Y'), 0.000000000000, 4, "SCF DIPOLE Y")
assert psi4.compare_values(psi4.get_variable('SCF DIPOLE Z'), 0.572697798348, 4, "SCF DIPOLE Z")
assert psi4.compare_values(psi4.get_variable('SCF QUADRUPOLE XX'), -7.664066833060, 4, "SCF QUADRUPOLE XX")
assert psi4.compare_values(psi4.get_variable('SCF QUADRUPOLE YY'), -6.097755074075, 4, "SCF QUADRUPOLE YY")
assert psi4.compare_values(psi4.get_variable('SCF QUADRUPOLE ZZ'), -7.074596012050, 4, "SCF QUADRUPOLE ZZ")
assert psi4.compare_values(psi4.get_variable('SCF QUADRUPOLE XY'), 0.000000000000, 4, "SCF QUADRUPOLE XY")
assert psi4.compare_values(psi4.get_variable('SCF QUADRUPOLE XZ'), 0.000000000000, 4, "SCF QUADRUPOLE XZ")
assert psi4.compare_values(psi4.get_variable('SCF QUADRUPOLE YZ'), 0.000000000000, 4, "SCF QUADRUPOLE YZ")
psi4.property('B3LYP', properties=props)
assert psi4.compare_values(psi4.get_variable('CURRENT ENERGY'), -39.14134740550916, 6, "B3LYP energy")
assert psi4.compare_values(psi4.get_variable('B3LYP DIPOLE X'), 0.000000000000, 4, "B3LYP DIPOLE X")
assert psi4.compare_values(psi4.get_variable('B3LYP DIPOLE Y'), -0.000000000000, 4, "B3LYP DIPOLE Y")
assert psi4.compare_values(psi4.get_variable('B3LYP DIPOLE Z'), 0.641741521158, 4, "B3LYP DIPOLE Z")
assert psi4.compare_values(psi4.get_variable('B3LYP QUADRUPOLE XX'), -7.616483183211, 4, "B3LYP QUADRUPOLE XX")
assert psi4.compare_values(psi4.get_variable('B3LYP QUADRUPOLE YY'), -6.005896804551, 4, "B3LYP QUADRUPOLE YY")
assert psi4.compare_values(psi4.get_variable('B3LYP QUADRUPOLE ZZ'), -7.021817489904, 4, "B3LYP QUADRUPOLE ZZ")
assert psi4.compare_values(psi4.get_variable('B3LYP QUADRUPOLE XY'), 0.000000000000, 4, "B3LYP QUADRUPOLE XY")
assert psi4.compare_values(psi4.get_variable('B3LYP QUADRUPOLE XZ'), 0.000000000000, 4, "B3LYP QUADRUPOLE XZ")
assert psi4.compare_values(psi4.get_variable('B3LYP QUADRUPOLE YZ'), -0.000000000000, 4, "B3LYP QUADRUPOLE YZ")
|
kratman/psi4public
|
tests/pytest/test_psi4.py
|
Python
|
gpl-2.0
| 8,661
|
[
"Psi4"
] |
9933f0841c86cfdbbdf63b091ea0363d429a818be4f0d3c9d61f236655595997
|
import subprocess
import sys
import os
import linecache
import re
import numpy as np
if len(sys.argv)<3:
print 'An input file and setup file are needed as arguments'
print 'Input and setup files must be stored in folder RR_fitter/input/'
print 'Input file path name must be entered before setup file name'
print 'Example: python run_fullshape_fitter.py /input/input_file.txt /input/setup_file.txt'
sys.exit(0)
input_file='input/'+sys.argv[1]
setup_file='input/'+sys.argv[2]
if not os.path.isfile(input_file) or not os.path.isfile(setup_file):
print 'Your input file or setup file does not exist. Please provide a valid file name and ensure your files are stored in the RR_fitter/input/ directory.'
sys.exit(0)
path_base_dir=str(re.search(r"\'\s*(.*?)\s*\'",linecache.getline(input_file,10)).group(1))
path_code=path_base_dir+'codes/'
num_gals=int(re.search(r"\'\s*(.*?)\s*\'",linecache.getline(input_file,2)).group(1))
name_gals=np.loadtxt(input_file,unpack=True,skiprows=11,dtype=str)
if num_gals != len(name_gals):
print 'Number of galaxies listed in input_file:', num_gals
print 'Number of galaxy file names listed in input_file:', len(name_gals)
print 'Number of galaxies given and number of listed galaxy files must be the same.'
sys.exit(0)
input_dir=str(re.search(r"\'\s*(.*?)\s*\'",linecache.getline(input_file,11)).group(1))
for i in range(len(name_gals)):
if not os.path.isfile(input_dir+name_gals[i]):
print 'Galaxy file:', input_dir+name_gals[i], 'does not exist.'
print 'Please provide a valid path and filename in input_file.'
sys.exit(0)
subprocess.check_call(['python',path_code+'bayesian_full_spectral_reconstruction.py',input_file, setup_file])
plotting=str(re.search(r"\'\s*(.*?)\s*\'",linecache.getline(input_file,9)).group(1))
if plotting in ['True']:
print '------------------------------------------------------------------------'
print 'Plotting best fit spectra for all galaxies'
subprocess.check_call(['python',path_code+'plot_spectrum_fullshape.py',input_file, setup_file])
print 'Plotting MCMC results for all galaxies'
subprocess.check_call(['python',path_code+'plot_corner_walkers_fullshape.py',input_file, setup_file])
|
elizabethswann/RR_fitter
|
run_fullshape_fitter.py
|
Python
|
mit
| 2,183
|
[
"Galaxy"
] |
9a1d17e41756272e2eed649913a8fc4551e882b05cdd13f6ca22b81be797a52a
|
# Orca
#
# Copyright 2004-2009 Sun Microsystems Inc.
# Copyright 2010-2013 The Orca Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Messages which Orca presents in speech and/or braille. These
have been put in their own module so that we can present them in
the correct language when users change the synthesizer language
on the fly without having to reload a bunch of modules."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2013 The Orca Team"
__license__ = "LGPL"
from .orca_i18n import _, C_, ngettext
from .orca_platform import version
# Translators: Sometimes when we attempt to get the name of an accessible
# software application, we fail because the app or one of its elements is
# defunct. This is a generic name so that we can still refer to this element
# in messages.
APPLICATION_NO_NAME = C_("generic name", "application")
# Translators: This is presented when the user has navigated to an empty line.
BLANK = _("blank")
# Translators: This refers to font weight.
BOLD = _("bold")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user when a new bookmark has been entered into the list
# of bookmarks.
BOOKMARK_ENTERED = _("bookmark entered")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user when the active list of bookmarks have been saved to
# disk.
BOOKMARKS_SAVED = _("bookmarks saved")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user when an error was encountered, preventing the active
# list of bookmarks being saved to disk.
BOOKMARKS_SAVED_FAILURE = _("bookmarks could not be saved")
# Translators: Orca normally intercepts all keyboard commands and only passes
# them along to the current application when they are not Orca commands. This
# command causes the next command issued to be passed along to the current
# application, bypassing Orca's interception of it.
BYPASS_MODE_ENABLED = _("Bypass mode enabled.")
# Translators: this is an indication that Orca is unable to obtain the display/
# results area of the calculator being used (e.g. gcalctool).
CALCULATOR_DISPLAY_NOT_FOUND = _("Unable to get calculator display")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the brief/
# non-verbose output presented in response to the use of an Orca command which
# makes it possible for users to quickly cycle amongst these alternatives
# without having to get into a GUI.
CAPITALIZATION_ICON_BRIEF = C_("capitalization style", "icon")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the full/verbose
# output presented in response to the use of an Orca command which makes it
# possible for users to quickly cycle amongst these alternatives without having
# to get into a GUI.
CAPITALIZATION_ICON_FULL = _("Capitalization style set to icon.")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the brief/
# non-verbose output presented in response to the use of an Orca command which
# makes it possible for users to quickly cycle amongst these alternatives
# without having to get into a GUI.
CAPITALIZATION_NONE_BRIEF = C_("capitalization style", "none")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the full/verbose
# output presented in response to the use of an Orca command which makes it
# possible for users to quickly cycle amongst these alternatives without having
# to get into a GUI.
CAPITALIZATION_NONE_FULL = _("Capitalization style set to none.")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the brief/
# non-verbose output presented in response to the use of an Orca command which
# makes it possible for users to quickly cycle amongst these alternatives
# without having to get into a GUI.
CAPITALIZATION_SPELL_BRIEF = C_("capitalization style", "spell")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the full/verbose
# output presented in response to the use of an Orca command which makes it
# possible for users to quickly cycle amongst these alternatives without having
# to get into a GUI.
CAPITALIZATION_SPELL_FULL = _("Capitalization style set to spell.")
# Translators: Native application caret navigation does not always work as the
# Orca user wants. As such, Orca offers the user the ability to toggle between
# the application controlling the caret and Orca controlling it. This message
# is presented to indicate that the application's native caret navigation is
# active / not being overridden by Orca.
CARET_CONTROL_APP = _("The application is controlling the caret.")
# Translators: Gecko native caret navigation is where Firefox (or Thunderbird)
# itself controls how the arrow keys move the caret around HTML content. It's
# often broken, so Orca needs to provide its own support. As such, Orca offers
# the user the ability to toggle which application is controlling the caret.
CARET_CONTROL_ORCA = _("The screen reader is controlling the caret.")
# Translators: this is the name of a cell in a spreadsheet.
CELL = _("Cell %s")
# Translators: This is the description of command line option '-d, --disable'
# which allows the user to specify an option to disable as Orca is started.
CLI_DISABLE_OPTION = _("Prevent use of option")
# Translators: this is the description of command line option '-e, --enable'
# which allows the user to specify an option to enable as Orca is started.
CLI_ENABLE_OPTION = _("Force use of option")
# Translators: This string indicates to the user what should be provided when
# using the '-e, --enable' or '-d, --disable' command line options.
CLI_OPTION = _("OPTION")
# Translators: This string appears when using 'Orca -h' at the command line.
# It serves as a sort of title and is followed by a detailed list of Orca's
# optional command-line arguments.
CLI_OPTIONAL_ARGUMENTS = _("Optional arguments")
# Translators: This string appears when using 'Orca -h' at the command line.
# It is followed by a brief list of Orca's optional command-line arguments.
CLI_USAGE = _("Usage: ")
# Translators: This message is displayed when the user starts Orca from the
# command line and includes an invalid option or argument. After the message,
# the list of invalid items, as typed by the user, is displayed.
CLI_INVALID_OPTIONS = _("The following are not valid: ")
# Translators: This is the description of command line option '-l, --list-apps'
# which prints the names of running applications which can be seen by assistive
# technologies such as Orca and Accerciser.
CLI_LIST_APPS = _("Print the known running applications")
# Translators: This is the description of command line option '-p, --profile'
# which allows you to specify a profile to be loaded. A profile stores a group
# of Orca settings configured by the user for a particular purpose, such as a
# 'Spanish' profile which would include Spanish braille and text-to-speech.
# An Orca settings file contains one or more profiles.
CLI_LOAD_PROFILE = _("Load profile")
# Translators: This message is presented to the user when the specified profile
# could not be loaded. A profile stores a group of Orca settings configured for
# a particular purpose, such as a Spanish profile which would include Spanish
# braille and Spanish text-to-speech. The string substituted in is the user-
# provided profile name.
CLI_LOAD_PROFILE_ERROR = _("Profile could not be loaded: %s")
# Translators: This message is presented to the user who attempts to launch Orca
# from some other environment than the graphical desktop.
CLI_NO_DESKTOP_ERROR = \
_("Cannot start the screen reader because it cannot connect to the Desktop.")
# Translators: This message is presented to the user who attempts to launch Orca
# but the launch fails due to an error related to the settings manager.
CLI_SETTINGS_MANAGER_ERROR = \
_("Could not activate the settings manager. Exiting.")
# Translators: This message is presented to the user when he/she tries to launch
# Orca, but Orca is already running.
CLI_OTHER_ORCAS_ERROR = \
_('Another screen reader process is already running for this ' \
'session.\nRun "orca --replace" to replace that ' \
'process with a new one.')
# Translators: This string indicates to the user what should be provided when
# using the '-p, --profile' command line option.
CLI_PROFILE_NAME = _("NAME")
# Translators: This is the description of command line option '-u, --user-prefs'
# that allows you to specify an alternate location from which to load the user
# preferences.
CLI_LOAD_PREFS = _("Use alternate directory for user preferences")
# Translators: This string indicates to the user what should be provided when
# using the '-u, --user-prefs' command line option.
CLI_PREFS_DIR = _("DIR")
# Translators: This is the description of command line option '-r, --replace'
# which tells Orca to replace any existing Orca process that might be running.
CLI_REPLACE = _("Replace a currently running instance of this screen reader")
# Translators: this is the description of command line option '-h, --help'
# which lists all the available command line options.
CLI_HELP = _("Show this help message and exit")
# Translators: This is the description of command line option '--debug' which
# causes debugging output for Orca to be sent to a file. The YYYY-MM-DD-HH:MM:SS
# portion of the string indicates the file name will be formed from the current
# date and time with 'debug' in front and '.out' at the end. The 'debug' and
# '.out' portions of this string should not be translated (i.e. it should always
# start with 'debug' and end with '.out', regardless of the locale.).
CLI_ENABLE_DEBUG = _("Send debug output to debug-YYYY-MM-DD-HH:MM:SS.out")
# Translators: This is the description of command line option '--debug-file'
# which allows the user to override the default date-based name of the debugging
# output file.
CLI_DEBUG_FILE = _("Send debug output to the specified file")
# Translators: This string indicates to the user what should be provided when
# using the '--debug-file' command line option.
CLI_DEBUG_FILE_NAME = _("FILE")
# Translators: This is the description of command line option '-t, --text-setup'
# that will initially display a list of questions in text form, that the user
# will need to answer, before Orca will startup. For this to happen properly,
# Orca will need to be run from a terminal window.
CLI_SETUP = _("Set up user preferences (text version)")
# Translators: This is the description of command line option '-s, --setup'
# that will place the user in Orca's GUI preferences dialog.
CLI_GUI_SETUP = _("Set up user preferences (GUI version)")
# Translators: This text is the description displayed when Orca is launched
# from the command line and the help text is displayed.
CLI_EPILOG = _("Report bugs to orca-list@gnome.org.")
# Translators: In chat applications, it is often possible to see that a "buddy"
# is typing currently (e.g. via a keyboard icon or status text). Some users like
# to have this typing status announced by Orca; others find that announcement
# unpleasant. Therefore, it is a setting in Orca. This string to be translated
# is presented when the value of the setting is toggled.
CHAT_BUDDY_TYPING_OFF = _("Do not announce when your buddies are typing.")
# Translators: In chat applications, it is often possible to see that a "buddy"
# is typing currently (e.g. via a keyboard icon or status text). Some users like
# to have this typing status announced by Orca; others find that announcement
# unpleasant. Therefore, it is a setting in Orca. This string to be translated
# is presented when the value of the setting is toggled.
CHAT_BUDDY_TYPING_ON = _("announce when your buddies are typing.")
# Translators: In chat applications, Orca automatically presents incoming
# messages in speech and braille. If a user is in multiple conversations or
# channels at the same time, it can be confusing to know what room or channel
# a given message came from just from hearing/reading it. This string to be
# translated is presented to the user to clarify where an incoming message
# came from. The name of the chat room is the string substitution.
CHAT_MESSAGE_FROM_ROOM = _("Message from chat room %s")
# Translators: This message is presented to inform the user that a new chat
# conversation has been added to the existing conversations. The "tab" here
# refers to the tab which contains the label for a GtkNotebook page. The
# label on the tab is the string substitution.
CHAT_NEW_TAB = _("New chat tab %s")
# Translators: In chat applications, Orca automatically presents incoming
# messages in speech and braille. If a user is in multiple conversations or
# channels at the same time, it can be confusing to know what room or channel
# a given message came from just from hearing/reading it. For this reason, Orca
# has an option to present the name of the room first ("#a11y <joanie> hello!"
# instead of "<joanie> hello!"). This string to be translated is presented when
# the value of the setting is toggled.
CHAT_ROOM_NAME_PREFIX_OFF = _("Do not speak chat room name.")
# Translators: In chat applications, Orca automatically presents incoming
# messages in speech and braille. If a user is in multiple conversations or
# channels at the same time, it can be confusing to know what room or channel
# a given message came from just from hearing/reading it. For this reason, Orca
# has an option to present the name of the room first ("#a11y <joanie> hello!"
# instead of "<joanie> hello!"). This string to be translated is presented when
# the value of the setting is toggled.
CHAT_ROOM_NAME_PREFIX_ON = _("speak chat room name.")
# Translators: Orca has a command to review previous chat room messages in
# speech and braille. Some users prefer to have this message history combined
# (e.g. the last ten messages which came in, no matter what room they came
# from). Other users prefer to have specific room history (e.g. the last ten
# messages from #a11y). Therefore, this is a setting in Orca. This string to be
# translated is presented when the value of the setting is toggled.
CHAT_SEPARATE_HISTORIES_OFF = \
_("Do not provide chat room specific message histories.")
# Translators: Orca has a command to review previous chat room messages in
# speech and braille. Some users prefer to have this message history combined
# (e.g. the last ten messages which came in, no matter what room they came
# from). Other users prefer to have specific room history (e.g. the last ten
# messages from #a11y). Therefore, this is a setting in Orca. This string to be
# translated is presented when the value of the setting is toggled.
CHAT_SEPARATE_HISTORIES_ON = _("Provide chat room specific message histories.")
# Translators: this is a regular expression that is intended to match
# a positive 'yes' response from a user at the command line. The expression
# as given means - does it begin with (that's the '^' character) any of
# the characters in the '[' ']'? In this case, we've chosen 'Y', 'y', and
# '1' to mean positive answers, so any string beginning with 'Y', 'y', or
# '1' will match. For an example of translation, assume your language has
# the words 'posolutely' and 'absitively' as common words that mean the
# equivalent of 'yes'. You might make the expression match the upper and
# lower case forms: "^[aApP1]". If the 'yes' and 'no' words for your
# locale begin with the same character, the regular expression should be
# modified to use words. For example: "^(yes|Yes)" (note the change from
# using '[' and ']' to '(' and ')').
#
# Finally, this expression should match what you've chosen for the
# translation of the "Enter y or n:" strings for this file.
CONSOLE_SETUP_YESEXPR = _("^[Yy1]")
# Translators: this is a regular expression that is intended to match
# a positive 'yes' response from a user at the command line. The expression
# as given means - does it begin with (that's the '^' character) any of
# the characters in the '[' ']'? In this case, we've chosen 'Y', 'y', and
# '1' to mean positive answers, so any string beginning with 'Y', 'y', or
# '1' will match. For an example of translation, assume your language has
# the words 'posolutely' and 'absitively' as common words that mean the
# equivalent of 'yes'. You might make the expression match the upper and
# lower case forms: "^[aApP1]". If the 'yes' and 'no' words for your
# locale begin with the same character, the regular expression should be
# modified to use words. For example: "^(yes|Yes)" (note the change from
# using '[' and ']' to '(' and ')').
#
# Finally, this expression should match what you've chosen for the
# translation of the "Enter y or n:" strings for this file.
CONSOLE_SETUP_NOEXPR = _("^[Nn0]")
# Translators: This is prompting for whether the user wants to use a refreshable
# braille display (an external hardware device) or not. It is part of Orca's
# console-based setup.
CONSOLE_SETUP_ENABLE_BRAILLE = _("Enable Braille? Enter y or n: ")
# Translators: If key echo is enabled, Orca will speak the name of a key as the
# user types on the keyboard. This message is presented during Orca's console-
# based setup. If the user wants key echo, they will then be prompted for which
# classes of keys they want echoed.
CONSOLE_SETUP_ENABLE_ECHO_KEY = _("Enable key echo? Enter y or n: ")
# Translators: This is in reference to key echo for alphabetic keys and
# is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_ALPHABETIC_KEYS = \
_("Enable alphabetic keys? Enter y or n: ")
# Translators: This is in reference to key echo for numeric keys and
# is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_NUMERIC_KEYS = \
_("Enable numeric keys? Enter y or n: ")
# Translators: This is in reference to key echo for punctuation keys and
# is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_PUNCTUATION_KEYS = \
_("Enable punctuation keys? Enter y or n: ")
# Translators: This is in reference to key echo for the space key and
# is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_SPACE = \
_("Enable space? Enter y or n: ")
# Translators: This is in reference to key echo for keys such as CTRL, ALT,
# Shift, Insert, etc. It is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_MODIFIER_KEYS = \
_("Enable modifier keys? Enter y or n: ")
# Translators: This is in reference to key echo for function keys (F1-F12).
# It is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_FUNCTION_KEYS = \
_("Enable function keys? Enter y or n: ")
# Translators: This is in reference to key echo for keys that perform actions
# such as enter, escape, tab, backspace, delete, arrow keys, page up/down, etc.
# It is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_ACTION_KEYS = _("Enable action keys? Enter y or n: ")
# Translators: The word echo feature of Orca will speak the word prior to the
# caret when the user types a word delimiter. This message is presented during
# Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_WORD = _("Enable echo by word? Enter y or n: ")
# Translators: This is prompting for a numerical choice to be typed at Orca's
# console-based setup.
CONSOLE_SETUP_ENTER_CHOICE = _("Enter choice: ")
# Translators: This is letting the user they input an invalid integer value on
# the command line and is also requesting they enter a valid integer value.
# This message is part of Orca's console-based setup.
CONSOLE_SETUP_ENTER_VALID_NUMBER = _("Please enter a valid number.")
# Translators: This is letting the user they input an invalid yes/no value on
# the command line and is also requesting they enter a valid one. This message
# is part of Orca's console-based setup.
CONSOLE_SETUP_ENTER_Y_OR_N = _("Please enter y or n.")
# Translators: Orca has two keyboard layouts which impact what keybindings are
# used to perform Orca commands. The two layouts are "Laptop" and "Desktop".
# This message is part of Orca's console-based setup.
CONSOLE_SETUP_SELECT_KEYBOARD_LAYOUT = _("Select desired keyboard layout.")
# Translators: Orca has two keyboard layouts which impact what keybindings are
# used to perform Orca commands. The two layouts are "Laptop" and "Desktop".
# This message is part of Orca's console-based setup.
CONSOLE_SETUP_KEYBOARD_LAYOUT_DESKTOP = _("1. Desktop")
# Translators: Orca has two keyboard layouts which impact what keybindings are
# used to perform Orca commands. The two layouts are "Laptop" and "Desktop".
# This message is part of Orca's console-based setup.
CONSOLE_SETUP_KEYBOARD_LAYOUT_LAPTOP = _("2. Laptop")
# Translators: This is prompting the user for a numerical choice from a list of
# available speech synthesis engines. It is part of Orca's console-based setup.
CONSOLE_SETUP_SELECT_SPEECH_SERVER = _("Select desired speech server.")
# Translators: The speech system represents what general speech wrapper is going
# to be used. Speech-dispatcher is an example of a speech system. It provides
# wrappers around specific speech servers (engines). This message is part of
# Orca's console-based setup.
CONSOLE_SETUP_SELECT_SPEECH_SYSTEM = _("Select desired speech system:")
# Translators: This is prompting for a numerical value from a list of choices of
# speech synthesis voices (e.g., male, female, child). This message is part of
# Orca's console-based setup.
CONSOLE_SETUP_SELECT_VOICE = _("Select desired voice:")
# Translators: This message indicates that no working speech servers (speech
# synthesis engines) can be found. It is part of Orca's console-based setup.
CONSOLE_SETUP_SERVERS_NOT_AVAILABLE = _("No servers available.\n")
# Translators: This message indicates that the speech server (speech synthesis
# engine) is not working properly and no voices (e.g., male, female, child) are
# available. This message is part of Orca's console-based setup.
CONSOLE_SETUP_VOICES_NOT_AVAILABLE = _("No voices available.\n")
# Translators: This message indicates that speech synthesis will not be used.
# It is part of Orca's console-based setup.
CONSOLE_SETUP_SPEECH_NOT_USED = _("Speech will not be used.\n")
# Translators: This message is presented at the beginning of Orca's console-
# based setup.
CONSOLE_SETUP_START = _("Screen reader setup.")
# Translators: This message is presented at the completion of Orca's console-
# based setup.
CONSOLE_SETUP_COMPLETE = _("Setup complete. Press Return to continue.")
DATE_FORMAT_LOCALE = "%x"
DATE_FORMAT_NUMBERS_DM = "%d/%m"
DATE_FORMAT_NUMBERS_MD = "%m/%d"
DATE_FORMAT_NUMBERS_DMY = "%d/%m/%Y"
DATE_FORMAT_NUMBERS_MDY = "%m/%d/%Y"
DATE_FORMAT_NUMBERS_YMD = "%Y/%m/%d"
DATE_FORMAT_FULL_DM = "%A, %-d %B"
DATE_FORMAT_FULL_MD = "%A, %B %-d"
DATE_FORMAT_FULL_DMY = "%A, %-d %B, %Y"
DATE_FORMAT_FULL_MDY = "%A, %B %-d, %Y"
DATE_FORMAT_FULL_YMD = "%Y. %B %-d, %A."
DATE_FORMAT_ABBREVIATED_DM = "%a, %-d %b"
DATE_FORMAT_ABBREVIATED_MD = "%a, %b %-d"
DATE_FORMAT_ABBREVIATED_DMY = "%a, %-d %b, %Y"
DATE_FORMAT_ABBREVIATED_MDY = "%a, %b %-d, %Y"
DATE_FORMAT_ABBREVIATED_YMD = "%Y. %b %-d, %a."
# Translators: The "default" button in a dialog box is the button that gets
# activated when Enter is pressed anywhere within that dialog box.
DEFAULT_BUTTON_IS = _("Default button is %s")
# Translators: This string is part of the presentation of an item that includes
# one or several consecutive subscripted characters. For example, 'X' followed
# by 'subscript 2' followed by 'subscript 3' should be presented to the user as
# 'X subscript 23'.
DIGITS_SUBSCRIPT = _(" subscript %s")
# Translators: This string is part of the presentation of an item that includes
# one or several consecutive superscripted characters. For example, 'X' followed
# by 'superscript 2' followed by 'superscript 3' should be presented to the user
# as 'X superscript 23'.
DIGITS_SUPERSCRIPT = _(" superscript %s")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects the entire
# document by pressing Ctrl+A.
DOCUMENT_SELECTED_ALL = _("entire document selected")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the document by pressing Ctrl+Shift+End.
DOCUMENT_SELECTED_DOWN = _("document selected from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text by pressing Ctrl+Shift+End.
DOCUMENT_UNSELECTED_DOWN = _("document unselected from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the document by pressing Ctrl+Shift+Home.
DOCUMENT_SELECTED_UP = _("document selected to cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text by pressing Ctrl+Shift+Home.
DOCUMENT_UNSELECTED_UP = _("document unselected to cursor position")
# Translators: Orca allows you to dynamically define which row of a spreadsheet
# or table should be treated as containing column headers. This message is
# presented when the user sets the row to a particular row number.
DYNAMIC_COLUMN_HEADER_SET = _("Dynamic column header set for row %d")
# Translators: Orca allows you to dynamically define which row of a spreadsheet
# or table should be treated as containing column headers. This message is
# presented when the user unsets the row so it is no longer treated as if it
# contained column headers.
DYNAMIC_COLUMN_HEADER_CLEARED = _("Dynamic column header cleared.")
# Translators: Orca allows you to dynamically define which column of a
# spreadsheet or table should be treated as containing column headers. This
# message is presented when the user sets the column to a particular column
# number.
DYNAMIC_ROW_HEADER_SET = _("Dynamic row header set for column %s")
# Translators: Orca allows you to dynamically define which column of a
# spreadsheet or table should be treated as containing column headers. This
# message is presented when the user unsets the column so it is no longer
# treated as if it contained row headers.
DYNAMIC_ROW_HEADER_CLEARED = _("Dynamic row header cleared.")
# Translators: this is used to announce that the current input line in a
# spreadsheet is blank/empty.
EMPTY = _("empty")
# Translators: This is the size of a file in kilobytes
FILE_SIZE_KB = _("%.2f kilobytes")
# Translators: This is the size of a file in megabytes
FILE_SIZE_MB = _("%.2f megabytes")
# Translators: This message is presented to the user after performing a file
# search to indicate there were no matches.
FILES_NOT_FOUND = _("No files found.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know that he/she successfully appended the contents under
# flat review onto the existing contents of the clipboard.
FLAT_REVIEW_APPENDED = _("Appended contents to clipboard.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know that he/she successfully copied the contents under flat
# review to the clipboard.
FLAT_REVIEW_COPIED = _("Copied contents to clipboard.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know that he/she attempted to use a flat review command when
# not using flat review.
FLAT_REVIEW_NOT_IN = _("Not using flat review.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know he/she just entered flat review.
FLAT_REVIEW_START = _("Entering flat review.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know he/she just entered flat review.
FLAT_REVIEW_STOP = _("Leaving flat review.")
# Translators: this means a particular cell in a spreadsheet has a formula
# (e.g., "=sum(a1:d1)")
HAS_FORMULA = _("has formula")
# Translators: The following string is spoken to let the user know that he/she
# is on a link within an image map. An image map is an image/graphic which has
# been divided into regions. Each region can be clicked on and has an associated
# link. Please see http://en.wikipedia.org/wiki/Imagemap for more information
# and examples.
IMAGE_MAP_LINK = _("image map link")
# Translators: This is a spoken and/or brailled message letting the user know
# that the key combination (e.g., Ctrl+Alt+f) they just entered has already been
# bound to another command and is thus unavailable. The string substituted in is
# the name of the command which already has the binding.
KB_ALREADY_BOUND = _("The key entered is already bound to %s")
# Translators: This is a spoken and/or brailled message letting the user know
# that Orca has recorded a new key combination (e.g. Alt+Ctrl+g) as a result of
# their input. The string substituted in is the new key combination.
KB_CAPTURED = _("Key captured: %s. Press enter to confirm.")
# Translators: This is a spoken and/or brailled message letting the user know
# that Orca has assigned a new key combination (e.g. Alt+Ctrl+g) as a result of
# their input. The string substituted in is the new key combination.
KB_CAPTURED_CONFIRMATION = _("The new key is: %s")
# Translators: This is a spoken and/or brailled message letting the user know
# Orca is about to delete an existing key combination (e.g. Alt+Ctrl+g) as a
# result of their input.
KB_DELETED = _("Key binding deleted. Press enter to confirm.")
# Translators: This is a spoken and/or brailled message letting the user know
# Orca has deleted an existing key combination (e.g. Alt+Ctrl+g) as a result of
# their input.
KB_DELETED_CONFIRMATION = _("The keybinding has been removed.")
# Translators: This is a spoken and/or brailled message asking the user to press
# a new key combination (e.g., Alt+Ctrl+g) to create a new key binding for an
# Orca command.
KB_ENTER_NEW_KEY = _("enter new key")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_KEY_BRIEF = C_("key echo", "key")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_KEY_FULL = _("Key echo set to key.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_NONE_BRIEF = C_("key echo", "None")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_NONE_FULL = _("Key echo set to None.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_KEY_AND_WORD_BRIEF = C_("key echo", "key and word")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_KEY_AND_WORD_FULL = _("Key echo set to key and word.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_SENTENCE_BRIEF = C_("key echo", "sentence")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_SENTENCE_FULL = _("Key echo set to sentence.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_WORD_BRIEF = C_("key echo", "word")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_WORD_FULL = _("Key echo set to word.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_WORD_AND_SENTENCE_BRIEF = C_("key echo", "word and sentence")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_WORD_AND_SENTENCE_FULL = _("Key echo set to word and sentence.")
# Translators: This phrase is spoken to inform the user of all of the MathML
# enclosure notations associated with a given mathematical expression. For
# instance, the expression x+y could be enclosed by a box, or enclosed by a
# circle. It could also be enclosed by a box and a circle and long division
# sign and have a line on the left and on the right and a vertical strike.
# (Though let's hope not.) Given that we do not know the enclosures, their
# order, or their combination, we'll present them as a list. The string
# substitution is for that list of enclosure types. For more information
# about the MathML 'menclose' element and its notation types, see:
# http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_ENCLOSED_BY = C_("math enclosure", "Enclosed by: %s")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_ACTUARIAL = C_("math enclosure", "an actuarial symbol")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_BOX = C_("math enclosure", "a box")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_CIRCLE = C_("math enclosure", "a circle")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_LONGDIV = C_("math enclosure", "a long division sign")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_RADICAL = C_("math enclosure", "a radical")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_ROUNDEDBOX = C_("math enclosure", "a rounded box")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_HORIZONTALSTRIKE = C_("math enclosure", "a horizontal strike")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_VERTICALSTRIKE = C_("math enclosure", "a vertical strike")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_DOWNDIAGONALSTRIKE = C_("math enclosure", "a down diagonal strike")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_UPDIAGONALSTRIKE = C_("math enclosure", "an up diagonal strike")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_NORTHEASTARROW = C_("math enclosure", "a northeast arrow")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_BOTTOM = C_("math enclosure", "a line at the bottom")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_LEFT = C_("math enclosure", "a line on the left")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_RIGHT = C_("math enclosure", "a line on the right")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_TOP = C_("math enclosure", "a line at the top")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_PHASOR_ANGLE = C_("math enclosure", "a phasor angle")
# Translators: This phrase is spoken to describe one MathML enclosure notation
# associated with a mathematical expression. Because an expression, such as
# x+y, can have one or many enclosure notations (box, circle, long division,
# line on the left, vertical strike), we present them as a list of notations.
# For more information about the MathML 'menclose' element and its notation
# types, see: http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
# This particular string is for the "madruwb" notation type.
MATH_ENCLOSURE_MADRUWB = C_("math enclosure", "an arabic factorial symbol")
# Translators: This phrase is spoken to inform the user of all of the MathML
# enclosure notations associated with a given mathematical expression. For
# instance, the expression x+y could be enclosed by a box, or enclosed by a
# circle. It could also be enclosed by a box and a circle and long division
# sign and have a line on the left and on the right and a vertical strike.
# (Though let's hope not.) Given that we do not know the enclosures, their
# order, or their combination, we'll present them as a list. This string
# will be inserted before the final item in the list if there is more than
# one enclosure notation. For more information about the MathML 'menclose'
# element and its notation types, see:
# http://www.w3.org/TR/MathML3/chapter3.html#presm.menclose
MATH_ENCLOSURE_AND = C_("math enclosure", "and")
# Translators: This phrase is spoken to inform the user that what is about to
# be said is part of a mathematical fraction. For instance, given x+1/y+2, Orca
# would say "fraction start, x+1 over y+2, fraction end."
MATH_FRACTION_START = _("fraction start")
# Translators: This phrase is spoken to inform the user that what is about to
# be said is part of a mathematical fraction whose bar is not displayed. See
# https://en.wikipedia.org/wiki/Combination for an example. Note that the
# comma is inserted here to cause a very brief pause in the speech. Otherwise,
# in English, the resulting speech sounds like we have a fraction which lacks
# the start of the bar. If this is a non-issue for your language, the comma and
# the pause which results is not needed. You should be able to test this with
# "spd-say <your text here>" in a terminal on a machine where speech-dispatcher
# is installed.
MATH_FRACTION_WITHOUT_BAR_START = _("fraction without bar, start")
# Translators: This word refers to the line separating the numerator from the
# denominator in a mathematical fraction. For instance, given x+1/y+2, Orca
# would would say "fraction start, x+1 over y+2, fraction end."
MATH_FRACTION_LINE = C_("math fraction", "over")
# Translators: This phrase is spoken to inform the user that the last spoken
# phrase is the end of a mathematical fraction. For instance, given x+1/y+2,
# Orca would would say "fraction start, x+1 over y+2, fraction end."
MATH_FRACTION_END = _("fraction end")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is a square root. For instance, for √9 Orca would say "square root
# of 9, root end" (assuming the user settings indicate that root endings should
# be spoken). Note that the radicand, which follows the "of", is unknown and
# might not even be a simple string; it might be the square root of another
# expression such as a fraction.
MATH_SQUARE_ROOT_OF = _("square root of")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is a cube root. For instance, for the cube root of 9 Orca would
# say "cube root of 9, root end" (assuming the user settings indicate that root
# endings should be spoken). Note that the radicand, which follows the "of",
# is unknown and might not even be a simple string; it might be the cube root
# of another expression such as a fraction.
MATH_CUBE_ROOT_OF = _("cube root of")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is an nth root. https://en.wikipedia.org/wiki/Nth_root. For instance,
# for the fourth root of 9, Orca would say "fourth root of 9, root end" (assuming
# the user settings indicate that root endings should be spoken). Note that the
# index, which precedes this string, is unknown and might not even be a simple
# expression like "fourth"; the index might instead be a fraction.
MATH_ROOT_OF = _("root of")
# Translators: This phrase is spoken to inform the user that what is about to
# be said is part of a mathematical root (square root, cube root, nth root).
# It is primarily intended to be spoken when the index of the root is not a
# simple expression. For instance, for the fourth root of 9, simply speaking
# "fourth root of 9" may be sufficient for the user. But if the index is not
# 4, but instead the fraction x/4, beginning the phrase with "root start" can
# help the user better understand that x/4 is the index of the root.
MATH_ROOT_START = _("root start")
# Translators: This phrase is spoken to inform the user that the last spoken
# phrase is the end of a mathematical root (square root, cube root, nth root).
# For instance, for the cube root of 9, Orca would say "cube root of 9, root
# end" (assuming the user settings indicate that root endings should be spoken).
MATH_ROOT_END = _("root end")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is subscripted text in a mathematical expression. Note that the
# subscript might be simple text or may itself be a mathematical expression,
# and in this instance we have no additional context through which a more user-
# friendly word or phrase can reliably be chosen.
MATH_SUBSCRIPT = C_("math script generic", "subscript")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is superscripted text in a mathematical expression. Note that the
# superscript might be simple text or may itself be a mathematical expression,
# and in this instance we have no additional context through which a more user-
# friendly word or phrase can reliably be chosen.
MATH_SUPERSCRIPT = C_("math script generic", "superscript")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is subscripted text which precedes the base in a mathematical
# expression. See, for instance, the MathML mmultiscripts element:
# http://www.w3.org/TR/MathML3/chapter3.html#presm.mmultiscripts
# https://developer.mozilla.org/en-US/docs/Web/MathML/Element/mmultiscripts
MATH_PRE_SUBSCRIPT = C_("math script", "pre-subscript")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is superscripted text which precedes the base in a mathematical
# expression. See, for instance, the MathML mmultiscripts element:
# http://www.w3.org/TR/MathML3/chapter3.html#presm.mmultiscripts
# https://developer.mozilla.org/en-US/docs/Web/MathML/Element/mmultiscripts
MATH_PRE_SUPERSCRIPT = C_("math script", "pre-superscript")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is underscripted text in a mathematical expression. Note that the
# underscript might be simple text or may itself be a mathematical expression,
# and in this instance we have no additional context through which a more user-
# friendly word or phrase can reliably be chosen. Examples of underscripts:
# http://www.w3.org/TR/MathML/chapter3.html#presm.munder
# https://reference.wolfram.com/language/ref/Underscript.html
MATH_UNDERSCRIPT = C_("math script generic", "underscript")
# Translators: This phrase is spoken to inform the user that what is about to
# be spoken is overscripted text in a mathematical expression. Note that the
# overscript might be simple text or may itself be a mathematical expression,
# and in this instance we have no additional context through which a more user-
# friendly word or phrase can reliably be chosen. Examples of overscripts:
# http://www.w3.org/TR/MathML/chapter3.html#presm.mover
# https://reference.wolfram.com/language/ref/Overscript.html
MATH_OVERSCRIPT = C_("math script generic", "overscript")
# Translators: This phrase is spoken to inform the user that the last spoken
# phrase is the end of a mathematical table.
MATH_TABLE_END = C_("math table", "table end")
# Translators: This phrase is spoken to inform the user that the last spoken
# phrase is the end of a mathematical table which is nested inside another
# mathematical table.
MATH_NESTED_TABLE_END = C_("math table", "nested table end")
# Translators: Inaccessible means that the application cannot be read by Orca.
# This usually means the application is not friendly to the assistive technology
# infrastructure.
INACCESSIBLE = _("inaccessible")
# Translators: This brief message indicates that indentation and
# justification will be spoken.
INDENTATION_JUSTIFICATION_OFF_BRIEF = \
C_("indentation and justification", "Disabled")
# Translators: This detailed message indicates that indentation and
# justification will not be spoken.
INDENTATION_JUSTIFICATION_OFF_FULL = \
_("Speaking of indentation and justification disabled.")
# Translators: This brief message indicates that indentation and
# justification will be spoken.
INDENTATION_JUSTIFICATION_ON_BRIEF = \
C_("indentation and justification", "Enabled")
# Translators: This detailed message indicates that indentation and
# justification will be spoken.
INDENTATION_JUSTIFICATION_ON_FULL = \
_("Speaking of indentation and justification enabled.")
# Translators: Orca has a "Learn Mode" that will allow the user to type any key
# on the keyboard and hear what the effects of that key would be. The effects
# might be what Orca would do if it had a handler for the particular key
# combination, or they might just be to echo the name of the key if Orca doesn't
# have a handler. This message is what is presented on the braille display when
# entering Learn Mode.
LEARN_MODE_START_BRAILLE = _("Learn mode. Press escape to exit.")
# Translators: Orca has a "Learn Mode" that will allow the user to type any key
# on the keyboard and hear what the effects of that key would be. The effects
# might be what Orca would do if it had a handler for the particular key
# combination, or they might just be to echo the name of the key if Orca doesn't
# have a handler. This message is what is spoken to the user when entering Learn
# Mode.
LEARN_MODE_START_SPEECH = \
_("Entering learn mode. Press any key to hear its function. " \
"To view the screen reader's documentation, press F1. " \
"To get a list of the screen reader's default shortcuts, press F2. " \
"To get a list of the screen reader's shortcuts for the current application, " \
"press F3. " \
"To exit learn mode, press the escape key.")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the line by pressing Shift+Down.
LINE_SELECTED_DOWN = _("line selected down from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the line by pressing Shift+Up.
LINE_SELECTED_UP = _("line selected up from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text from the current location to the end of the paragraph by
# pressing Shift+Down.
LINE_UNSELECTED_DOWN = _("line unselected down from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text from the current location to the start of the paragraph by
# pressing Shift+Up.
LINE_UNSELECTED_UP = _("line unselected up from cursor position")
# Translators: Orca has a "Learn Mode" that will allow the user to type any key
# on the keyboard and hear what the effects of that key would be. The effects
# might be what Orca would do if it had a handler for the particular key
# combination, or they might just be to echo the name of the key if Orca doesn't
# have a handler. This message is what is presented in speech and braille when
# exiting Learn Mode.
LEARN_MODE_STOP = _("Exiting learn mode.")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the line by pressing Ctrl+Shift+Page_Up.
LINE_SELECTED_LEFT = _("line selected from start to previous cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the line by pressing Ctrl+Shift+Page_Down.
LINE_SELECTED_RIGHT = _("line selected to end from previous cursor position")
# Translators: this indicates that this piece of text is a hypertext link.
LINK = _("link")
# Translators: this is an indication that a given link points to an object
# that is on the same page.
LINK_SAME_PAGE = _("same page")
# Translators: this is an indication that a given link points to an object
# that is at the same site (but not on the same page as the link).
LINK_SAME_SITE = _("same site")
# Translators: this is an indication that a given link points to an object
# that is at a different site than that of the link.
LINK_DIFFERENT_SITE = _("different site")
# Translators: this refers to a link to a file, where the first item is the
# protocol (ftp, ftps, or file) and the second item the name of the file being
# linked to.
LINK_TO_FILE = _("%(uri)s link to %(file)s")
# Translators: this message conveys the protocol of a link eg. http, mailto.
LINK_WITH_PROTOCOL = _("%s link")
# Translators: this message conveys the protocol of a link eg. http, mailto.
# along with the visited state of that link.
LINK_WITH_PROTOCOL_VISITED = _("visited %s link")
# Translators: The following string instructs the user how to navigate amongst
# the list of commands presented in learn mode, as well as how to exit the list
# when finished.
LIST_NAVIGATION = \
_("Use Up and Down Arrow to navigate the list. Press Escape to exit.")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed to
# "off" for all of the live regions.
LIVE_REGIONS_ALL_OFF = _("All live regions set to off")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level for all live
# regions has been restored to their original values.
LIVE_REGIONS_ALL_RESTORED = _("live regions politeness levels restored")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user of the "politeness" level for the current live region.
LIVE_REGIONS_LEVEL = _("politeness level %s")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed for
# the current live region.
LIVE_REGIONS_LEVEL_ASSERTIVE = _("setting live region to assertive")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed for
# the current live region.
LIVE_REGIONS_LEVEL_OFF = _("setting live region to off")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed for
# the current live region.
LIVE_REGIONS_LEVEL_POLITE = _("setting live region to polite")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed for
# the current live region.
LIVE_REGIONS_LEVEL_RUDE = _("setting live region to rude")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# Orca has several features to facilitate accessing live regions. This message
# is presented in response to a command that toggles whether or not Orca pays
# attention to changes in live regions. Note that turning off monitoring of live
# events is NOT the same as turning the politeness level to "off". The user can
# opt to have no notifications presented (politeness level of "off") and still
# manually review recent updates to live regions via Orca commands for doing so
# -- as long as the monitoring of live regions is enabled.
LIVE_REGIONS_MONITORING_OFF = _("Live regions monitoring off")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# Orca has several features to facilitate accessing live regions. This message
# is presented in response to a command that toggles whether or not Orca pays
# attention to changes in live regions. Note that turning off monitoring of live
# events is NOT the same as turning the politeness level to "off". The user can
# opt to have no notifications presented (politeness level of "off") and still
# manually review recent updates to live regions via Orca commands for doing so
# -- as long as the monitoring of live regions is enabled.
LIVE_REGIONS_MONITORING_ON = _("Live regions monitoring on")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# Orca has several features to facilitate accessing live regions. This message
# is presented to inform the user that a cached message is not available for the
# the current live region.
LIVE_REGIONS_NO_MESSAGE = _("no live message saved")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# Orca has several features to facilitate accessing live regions. This message
# is presented to inform the user that Orca's live region features have been
# turned off.
LIVE_REGIONS_OFF = _("Live region support is off")
# Translators: Orca has a command that allows the user to move the mouse pointer
# to the current object. This is a brief message which will be presented if for
# some reason Orca cannot identify/find the current location.
LOCATION_NOT_FOUND_BRIEF = C_("location", "Not found")
# Translators: Orca has a command that allows the user to move the mouse pointer
# to the current object. This is a detailed message which will be presented if
# for some reason Orca cannot identify/find the current location.
LOCATION_NOT_FOUND_FULL = _("Could not find current location.")
# Translators: This string is used to present the state of a locking key, such
# as Caps Lock. If Caps Lock is "off", then letters typed will appear in
# lowercase; if Caps Lock is "on", they will instead appear in uppercase. This
# string is also applied to Num Lock and potentially will be applied to similar
# keys in the future.
LOCKING_KEY_STATE_OFF = C_("locking key state", "off")
# Translators: This string is used to present the state of a locking key, such
# as Caps Lock. If Caps Lock is "off", then letters typed will appear in
# lowercase; if Caps Lock is "on", they will instead appear in uppercase. This
# string is also applied to Num Lock and potentially will be applied to similar
# keys in the future.
LOCKING_KEY_STATE_ON = C_("locking key state", "on")
# Translators: This is to inform the user of the presence of the red squiggly
# line which indicates that a given word is not spelled correctly.
MISSPELLED = _("misspelled")
# Translators: Orca tries to provide more compelling output of the spell check
# dialog in some applications. The first thing it does is let the user know
# what the misspelled word is.
MISSPELLED_WORD = _("Misspelled word: %s")
# Translators: Orca tries to provide more compelling output of the spell check
# dialog in some applications. The second thing it does is give the phrase
# containing the misspelled word in the document. This is known as the context.
MISSPELLED_WORD_CONTEXT = _("Context is %s")
# Translators: Orca has a number of commands that override the default
# behavior within an application. For instance, on a web page, "h" moves
# you to the next heading. What should happen when you press an "h" in
# an entry on a web page depends: If you want to resume reading content,
# "h" should move to the next heading; if you want to enter text, "h"
# should not not move you to the next heading. Similarly, if you are
# at the bottom of an entry and press Down arrow, should you leave the
# entry? Again, it depends on if you want to resume reading content or
# if you are editing the text in the entry. Because Orca doesn't know
# what you want to do, it has two modes: In browse mode, Orca treats
# key presses as commands to read the content; in focus mode, Orca treats
# key presses as something that should be handled by the focused widget.
# This string is the message presented when Orca switches to browse mode.
MODE_BROWSE = _("Browse mode")
# Translators: Orca has a number of commands that override the default
# behavior within an application. For instance, on a web page, "h" moves
# you to the next heading. What should happen when you press an "h" in
# an entry on a web page depends: If you want to resume reading content,
# "h" should move to the next heading; if you want to enter text, "h"
# should not not move you to the next heading. Similarly, if you are
# at the bottom of an entry and press Down arrow, should you leave the
# entry? Again, it depends on if you want to resume reading content or
# if you are editing the text in the entry. Because Orca doesn't know
# what you want to do, it has two modes: In browse mode, Orca treats
# key presses as commands to read the content; in focus mode, Orca treats
# key presses as something that should be handled by the focused widget.
# This string is the message presented when Orca switches to focus mode.
MODE_FOCUS = _("Focus mode")
# Translators: Orca has a number of commands that override the default
# behavior within an application. For instance, on a web page, "h" moves
# you to the next heading. What should happen when you press an "h" in
# an entry on a web page depends: If you want to resume reading content,
# "h" should move to the next heading; if you want to enter text, "h"
# should not not move you to the next heading. Similarly, if you are
# at the bottom of an entry and press Down arrow, should you leave the
# entry? Again, it depends on if you want to resume reading content or
# if you are editing the text in the entry. Because Orca doesn't know
# what you want to do, it has two modes: In browse mode, Orca treats
# key presses as commands to read the content; in focus mode, Orca treats
# key presses as something that should be handled by the focused widget.
# This string is a tutorial message presented to the user who has just
# navigated to a widget in browse mode to inform them of the keystroke
# they must press to enable focus mode for the purposes of interacting
# with the widget. The substituted string is a human-consumable keybinding
# such as "Alt+Shift+A."
MODE_FOCUS_TUTORIAL = _("To enable focus mode press %s.")
# Translators: (Please see the previous, detailed translator notes about
# Focus mode and Browse mode.) In order to minimize the amount of work Orca
# users need to do to switch between focus mode and browse mode, Orca attempts
# to automatically switch to the mode which is appropriate to the current
# web element. Sometimes, however, this automatic mode switching is not what
# the user wants. A good example being web apps which have their own keyboard
# navigation and use interaction model. As a result, Orca has a command which
# enables setting a "sticky" focus mode which disables all automatic toggling.
# This string is the message presented when Orca switches to sticky focus mode.
MODE_FOCUS_IS_STICKY = _("Focus mode is sticky.")
# Translators: Hovering the mouse over certain objects on a web page causes a
# new object to appear such as a pop-up menu. Orca has a command will move the
# user to the object which just appeared as a result of the user hovering the
# mouse. If this command fails, Orca will present this message.
MOUSE_OVER_NOT_FOUND = _("Mouse over object not found.")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is a message that will be
# presented to the user when an error (such as the operation timing out) kept us
# from getting these objects.
NAVIGATION_DIALOG_ERROR = _("Error: Could not create list of objects.")
# Translators: This message describes a list item in a document. Nesting level
# is how "deep" the item is (e.g., a level of 2 represents a list item inside a
# list that's inside another list).
NESTING_LEVEL = _("Nesting level %d")
# Translators: Orca has a command that moves the mouse pointer to the current
# location on a web page. If moving the mouse pointer caused an item to appear
# such as a pop-up menu, we want to present that fact.
NEW_ITEM_ADDED = _("New item has been added")
# Translators: This is intended to be a short phrase to present the fact that no
# no accessible component has keyboard focus.
NO_FOCUS = _("No focus")
# Translators: This message presents the fact that no accessible application has
# has keyboard focus.
NO_FOCUSED_APPLICATION = _("No application has focus.")
# Translators: This is for navigating document content by moving from blockquote
# to blockquote. This is a detailed message which will be presented to the user
# if no more blockquotes can be found.
NO_MORE_BLOCKQUOTES = _("No more blockquotes.")
# Translators: This is for navigating document content by moving from button
# to button. This is a detailed message which will be presented to the user
# if no more buttons can be found.
NO_MORE_BUTTONS = _("No more buttons.")
# Translators: This is for navigating document content by moving from check
# box to check box. This is a detailed message which will be presented to the
# user if no more check boxes can be found.
NO_MORE_CHECK_BOXES = _("No more check boxes.")
# Translators: This is for navigating document content by moving from 'large
# object' to 'large object'. A 'large object' is a logical chunk of text,
# such as a paragraph, a list, a table, etc. This is a detailed message which
# will be presented to the user if no more check boxes can be found.
NO_MORE_CHUNKS = _("No more large objects.")
# Translators: This is for navigating document content by moving amongst web
# elements which have an "onClick" action. This is a detailed message which
# will be presented to the user if no more clickable elements can be found.
NO_MORE_CLICKABLES = _("No more clickables.")
# Translators: This is for navigating document content by moving from combo
# box to combo box. This is a detailed message which will be presented to the
# user if no more combo boxes can be found.
NO_MORE_COMBO_BOXES = _("No more combo boxes.")
# Translators: This is for navigating document content by moving from entry
# to entry. This is a detailed message which will be presented to the user
# if no more entries can be found.
NO_MORE_ENTRIES = _("No more entries.")
# Translators: This is for navigating document content by moving from form
# field to form field. This is a detailed message which will be presented to
# the user if no more form fields can be found.
NO_MORE_FORM_FIELDS = _("No more form fields.")
# Translators: This is for navigating document content by moving from heading
# to heading. This is a detailed message which will be presented to the user
# if no more headings can be found.
NO_MORE_HEADINGS = _("No more headings.")
# Translators: This is for navigating document content by moving from heading
# to heading at a particular level (i.e. only <h1> or only <h2>, etc.). This
# is a detailed message which will be presented to the user if no more headings
# at the desired level can be found.
NO_MORE_HEADINGS_AT_LEVEL = _("No more headings at level %d.")
# Translators: This is for navigating document content by moving from image
# to image. This is a detailed message which will be presented to the user
# if no more images can be found.
NO_MORE_IMAGES = _("No more images.")
# Translators: this is for navigating to the previous ARIA role landmark.
# ARIA role landmarks are the W3C defined HTML tag attribute 'role' used to
# identify important part of webpage like banners, main context, search etc.
# This is an indication that one was not found.
NO_LANDMARK_FOUND = _("No landmark found.")
# Translators: This is for navigating document content by moving from link to
# link (regardless of visited state). This is a detailed message which will be
# presented to the user if no more links can be found.
NO_MORE_LINKS = _("No more links.")
# Translators: This is for navigating document content by moving from bulleted/
# numbered list to bulleted/numbered list. This is a detailed message which will
# be presented to the user if no more lists can be found.
NO_MORE_LISTS = _("No more lists.")
# Translators: This is for navigating document content by moving from bulleted/
# numbered list item to bulleted/numbered list item. This is a detailed message
# which will be presented to the user if no more list items can be found.
NO_MORE_LIST_ITEMS = _("No more list items.")
# Translators: This is for navigating document content by moving from live
# region to live region. A live region is an area of a web page that is
# periodically updated, e.g. stock ticker. This is a detailed message which
# will be presented to the user if no more live regions can be found. For
# more info, see http://www.w3.org/TR/wai-aria/terms#def_liveregion
NO_MORE_LIVE_REGIONS = _("No more live regions.")
# Translators: This is for navigating document content by moving from paragraph
# to paragraph. This is a detailed message which will be presented to the user
# if no more paragraphs can be found.
NO_MORE_PARAGRAPHS = _("No more paragraphs.")
# Translators: This is for navigating document content by moving from radio
# button to radio button. This is a detailed message which will be presented to
# the user if no more radio buttons can be found.
NO_MORE_RADIO_BUTTONS = _("No more radio buttons.")
# Translators: This is for navigating document content by moving from separator
# to separator (e.g. <hr> tags). This is a detailed message which will be
# presented to the user if no more separators can be found.
NO_MORE_SEPARATORS = _("No more separators.")
# Translators: This is for navigating document content by moving from table to
# to table. This is a detailed message which will be presented to the user if
# no more tables can be found.
NO_MORE_TABLES = _("No more tables.")
# Translators: This is for navigating document content by moving from unvisited
# link to unvisited link. This is a detailed message which will be presented to
# the user if no more unvisited links can be found.
NO_MORE_UNVISITED_LINKS = _("No more unvisited links.")
# Translators: This is for navigating document content by moving from visited
# link to visited link. This is a detailed message which will be presented to
# the user if no more visited links can be found.
NO_MORE_VISITED_LINKS = _("No more visited links.")
# Translators: Orca has a dedicated command to speak the currently-selected
# text. This message is what Orca will present if the user performs this
# command when no text is selected.
NO_SELECTED_TEXT = _("No selected text.")
# Translators: Orca has a dedicated command to speak detailed information
# about the currently-focused link. This message is what Orca will present
# if the user performs this command when not on a link.
NOT_ON_A_LINK = _("Not on a link.")
# Translators: This message alerts the user to the fact that what will be
# presented next came from a notification.
NOTIFICATION = _("Notification")
# Translators: This is a brief message presented to the user when the bottom of
# the list of notifications is reached.
NOTIFICATION_LIST_BOTTOM = C_("notification", "Bottom")
# Translators: This message is presented to the user to confirm the list of
# notifications mode is being exited.
NOTIFICATION_LIST_EXIT = _("Exiting list notification messages mode.")
# Translators: This is a brief message presented to the user when the top of the
# list of notifications is reached.
NOTIFICATION_LIST_TOP = C_("notification", "Top")
# Translators: This is a tutorial message for the notification list mode.
NOTIFICATION_LIST_HELP = _("Press h for help.\n")
# Translators: The following string instructs the user how to navigate within
# the list notifications mode.
NOTIFICATION_LIST_TUTORIAL = \
_("Use Up, Down, Home or End to navigate in the list.\n"\
"Press Escape to exit.\n"\
"Press Space to repeat the last message read.\n"\
"Press one digit to read a specific message.\n")
# Translators: This message is presented to the user when the notifications list
# is empty.
NOTIFICATION_NO_MESSAGES = _("No notification messages")
# Translators: Orca has a setting through which users can control how a number is
# spoken. The options are digits ("1 2 3") and words ("one hundred and twenty
# three"). There is an associated Orca command for quickly toggling between the
# two options. This string to be translated is the brief message spoken when the
# user has enabled speaking numbers as digits.
NUMBER_STYLE_DIGITS_BRIEF = C_("number style", "digits")
# Translators: Orca has a setting through which users can control how a number is
# spoken. The options are digits ("1 2 3") and words ("one hundred and twenty
# three"). There is an associated Orca command for quickly toggling between the
# two options. This string to be translated is the verbose message spoken when
# the user has enabled speaking numbers as digits.
NUMBER_STYLE_DIGITS_FULL = _("Speak numbers as digits.")
# Translators: Orca has a setting through which users can control how a number is
# spoken. The options are digits ("1 2 3") and words ("one hundred and twenty
# three"). There is an associated Orca command for quickly toggling between the
# two options. This string to be translated is the brief message spoken when the
# user has enabled speaking numbers as words.
NUMBER_STYLE_WORDS_BRIEF = C_("number style", "words")
# Translators: Orca has a setting through which users can control how a number is
# spoken. The options are digits ("1 2 3") and words ("one hundred and twenty
# three"). There is an associated Orca command for quickly toggling between the
# two options. This string to be translated is the verbose message spoken when
# the user has enabled speaking numbers as words.
NUMBER_STYLE_WORDS_FULL = _("Speak numbers as words.")
# Translators: This brief message is presented to indicate the state of widgets
# (checkboxes, push buttons, toggle buttons) on a toolbar which are associated
# with text formatting (bold, italics, underlining, justification, etc.).
OFF = _("off")
# Translators: This brief message is presented to indicate the state of widgets
# (checkboxes, push buttons, toggle buttons) on a toolbar which are associated
# with text formatting (bold, italics, underlining, justification, etc.).
ON = _("on")
# Translators: This message is presented to the user when a web page or similar
# item has started loading.
PAGE_LOADING_START = _("Loading. Please wait.")
# Translators: This message is presented to the user when a web page or similar
# item has finished loading.
PAGE_LOADING_END = _("Finished loading.")
# Translators: This message is presented to the user when a web page or similar
# item has finished loading. The string substitution is for the name of the
# object which has just finished loading (most likely the page's title).
PAGE_LOADING_END_NAMED = _("Finished loading %s.")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the page by pressing Shift+Page_Down.
PAGE_SELECTED_DOWN = _("page selected from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the page by pressing Shift+Page_Up.
PAGE_SELECTED_UP = _("page selected to cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects a previously
# selected page by pressing Shift+Page_Down.
PAGE_UNSELECTED_DOWN = _("page unselected from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects a previously
# selected page by pressing Shift+Page_Up.
PAGE_UNSELECTED_UP = _("page unselected to cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the paragraph by pressing Ctrl+Shift+Down.
PARAGRAPH_SELECTED_DOWN = _("paragraph selected down from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the paragraph by pressing Ctrl+Shift+UP.
PARAGRAPH_SELECTED_UP = _("paragraph selected up from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text from the current location to the end of the paragraph by
# pressing Ctrl+Shift+Down.
PARAGRAPH_UNSELECTED_DOWN = _("paragraph unselected down from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text from the current location to the start of the paragraph by
# pressing Ctrl+Shift+UP.
PARAGRAPH_UNSELECTED_UP = _("paragraph unselected up from cursor position")
# Translators: This message appears in a warning dialog when the user performs
# the command to get into Orca's preferences dialog when the preferences dialog
# is already open.
PREFERENCES_WARNING_DIALOG = \
_('You already have an instance of an Orca preferences dialog ' \
'open.\nPlease close it before opening a new one.')
# Translators: This message is an indication of the position of the focused
# slide and the total number of slides in the presentation.
PRESENTATION_SLIDE_POSITION = _("slide %(position)d of %(count)d")
# Translators: This is a detailed message which will be presented as the user
# cycles amongst his/her saved profiles. A "profile" is a collection of settings
# which apply to a given task, such as a "Spanish" profile which would use
# Spanish text-to-speech and Spanish braille and selected when reading Spanish
# content. The string representing the profile name is created by the user.
PROFILE_CHANGED = _("Profile set to %s.")
# Translators: This is an error message presented when the user attempts to
# cycle among his/her saved profiles, but no profiles can be found. A profile
# is a collection of settings which apply to a given task, such as a "Spanish"
# profile which would use Spanish text-to-speech and Spanish braille and
# selected when reading Spanish content.
PROFILE_NOT_FOUND = _("No profiles found.")
# Translators: this is an index value so that we can present value changes
# regarding a specific progress bar in environments where there are multiple
# progress bars (e.g. in the Firefox downloads dialog).
PROGRESS_BAR_NUMBER = _("Progress bar %d.")
# Translators: This brief message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All punctuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_ALL_BRIEF = C_("spoken punctuation", "All")
# Translators: This detailed message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All punctuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_ALL_FULL = _("Punctuation level set to all.")
# Translators: This brief message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All punctuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_MOST_BRIEF = C_("spoken punctuation", "Most")
# Translators: This detailed message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All punctuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_MOST_FULL = _("Punctuation level set to most.")
# Translators: This brief message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All punctuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_NONE_BRIEF = C_("spoken punctuation", "None")
# Translators: This detailed message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All punctuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_NONE_FULL = _("Punctuation level set to none.")
# Translators: This brief message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All punctuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_SOME_BRIEF = C_("spoken punctuation", "Some")
# Translators: This detailed message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All punctuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_SOME_FULL = _("Punctuation level set to some.")
# Translators: This message is presented to indicate that a search has begun
# or is still taking place.
SEARCHING = _("Searching.")
# Translators: This message is presented to indicate a search executed by the
# user has been completed.
SEARCH_COMPLETE = _("Search complete.")
# Translators: This message is presented to the user when Orca's preferences
# have been reloaded.
SETTINGS_RELOADED = _("Screen reader settings reloaded.")
# Translators: Orca has a dedicated command to speak the currently-selected
# text. This message is spoken by Orca before speaking the text which is
# selected. The string substitution is for the selected text.
SELECTED_TEXT_IS = _("Selected text is: %s")
# Translators: This message is presented to the user when speech synthesis
# has been temporarily turned off.
SPEECH_DISABLED = _("Speech disabled.")
# Translators: This message is presented to the user when speech synthesis
# has been turned back on.
SPEECH_ENABLED = _("Speech enabled.")
# Translators: This string announces speech rate change.
SPEECH_FASTER = _("faster.")
# Translators: This string announces speech rate change.
SPEECH_SLOWER = _("slower.")
# Translators: This string announces speech pitch change.
SPEECH_HIGHER = _("higher.")
# Translators: This string announces speech pitch change.
SPEECH_LOWER = _("lower.")
# Translators: This string announces speech volume change.
SPEECH_LOUDER = _("louder.")
# Translators: This string announces speech volume change.
SPEECH_SOFTER = _("softer.")
# Translators: Orca's verbosity levels control how much (or how little)
# Orca will speak when presenting objects as the user navigates within
# applications and reads content. The two levels are "brief" and "verbose".
# The following string is a message spoken to the user upon toggling
# this setting via command.
SPEECH_VERBOSITY_BRIEF = C_("Speech", "Verbosity level: brief")
# Translators: Orca's verbosity levels control how much (or how little)
# Orca will speak when presenting objects as the user navigates within
# applications and reads content. The two levels are "brief" and "verbose".
# The following string is a message spoken to the user upon toggling
# this setting via command.
SPEECH_VERBOSITY_VERBOSE = C_("Speech", "Verbosity level: verbose")
# Translators: We replace the ellipses (both manual and UTF-8) with a spoken
# string. The extra space you see at the beginning is because we need the
# speech synthesis engine to speak the new string well. For example, "Open..."
# turns into "Open dot dot dot".
SPOKEN_ELLIPSIS = _(" dot dot dot")
# Translators: This message is presented to the user when Orca is launched.
START_ORCA = _("Screen reader on.")
# Translators: This message is presented to the user when Orca is quit.
STOP_ORCA = _("Screen reader off.")
# Translators: This message means speech synthesis is not installed or working.
SPEECH_UNAVAILABLE = _("Speech is unavailable.")
# Translators: the Orca "Find" dialog allows a user to search for text in a
# window and then move focus to that text. For example, they may want to find
# the "OK" button. This message lets them know a string they were searching
# for was not found.
STRING_NOT_FOUND = _("string not found")
# Translators: The structural navigation keys are designed to move the caret
# around document content by object type. H moves you to the next heading,
# Shift H to the previous heading, T to the next table, and so on. Some users
# prefer to turn this off to use Firefox's search when typing feature. This
# message is presented when the user toggles the structural navigation feature
# of Orca. It should be a brief informative message.
STRUCTURAL_NAVIGATION_KEYS_OFF = _("Structural navigation keys off.")
# Translators: The structural navigation keys are designed to move the caret
# around document content by object type. H moves you to the next heading,
# Shift H to the previous heading, T to the next table, and so on. Some users
# prefer to turn this off to use Firefox's search when typing feature. This
# message is presented when the user toggles the structural navigation feature
# of Orca. It should be a brief informative message.
STRUCTURAL_NAVIGATION_KEYS_ON = _("Structural navigation keys on.")
# Translators: Orca has a command that allows the user to move to the next
# structural navigation object. In Orca, "structural navigation" refers to
# quickly moving through a document by jumping amongst objects of a given
# type, such as from link to link, or from heading to heading, or from form
# field to form field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be found.
STRUCTURAL_NAVIGATION_NOT_FOUND = C_("structural navigation", "Not found")
# Translators: This message describes the (row, col) position of a table cell.
TABLE_CELL_COORDINATES = _("Row %(row)d, column %(column)d.")
# Translators: This message is presented to indicate the user is in the last
# cell of a table in a document.
TABLE_END = _("End of table")
# Translators: This message is presented when a user is navigating within a
# table and then navigates out of it.
TABLE_LEAVING = _("leaving table.")
# Translators: When users are navigating a table, they sometimes want the entire
# row of a table read; other times they want just the current cell presented.
# This string is a message presented to the user when this setting is toggled.
TABLE_MODE_CELL = _("Speak cell")
# Translators: When users are navigating a table, they sometimes want the entire
# row of a table read; other times they want just the current cell presented.
# This string is a message presented to the user when this setting is toggled.
TABLE_MODE_ROW = _("Speak row")
# Translators: a uniform table is one in which each table cell occupies one row
# and one column (i.e. a perfect grid). In contrast, a non-uniform table is one
# in which at least one table cell occupies more than one row and/or column.
TABLE_NON_UNIFORM = _("Non-uniform")
# Translators: This is for navigating document content by moving from table cell
# to table cell. If the user gives a table navigation command but is not in a
# table, presents this message.
TABLE_NOT_IN_A = _("Not in a table.")
# Translators: This is a message presented to users when the columns in a table
# have been reordered.
TABLE_REORDERED_COLUMNS = _("Columns reordered")
# Translators: This is a message presented to users when the rows in a table
# have been reordered.
TABLE_REORDERED_ROWS = _("Rows reordered")
# Translators: this is in reference to a column in a table. The substitution
# is the index (e.g. the first column is "column 1").
TABLE_COLUMN = _("column %d")
# Translators: this is in reference to a column in a table. If the user is in
# the first column of a table with five columns, the position is "column 1 of 5"
TABLE_COLUMN_DETAILED = _("column %(index)d of %(total)d")
# Translators: This is for navigating document content by moving from table cell
# to table cell. This is the message presented when the user attempts to move to
# the cell below the current cell and is already in the last row.
TABLE_COLUMN_BOTTOM = _("Bottom of column.")
# Translators: This is for navigating document content by moving from table cell
# to table cell. This is the message presented when the user attempts to move to
# the cell above the current cell and is already in the first row.
TABLE_COLUMN_TOP = _("Top of column.")
# Translators: this is in reference to a row in a table. The substitution is
# the index (e.g. the first row is "row 1").
TABLE_ROW = _("row %d")
# Translators: this is in reference to a row in a table. If the user is in the
# the first row of a table with five rows, the position is "row 1 of 5"
TABLE_ROW_DETAILED = _("row %(index)d of %(total)d")
# Translators: This is for navigating document content by moving from table cell
# to table cell. This is the message presented when the user attempts to move to
# the left of the current cell and is already in the first column.
TABLE_ROW_BEGINNING = _("Beginning of row.")
# Translators: This is for navigating document content by moving from table cell
# to table cell. This is the message presented when the user attempts to move to
# the right of the current cell and is already in the last column.
TABLE_ROW_END = _("End of row.")
# Translators: This message is presented to the user to confirm that he/she just
# deleted a table row.
TABLE_ROW_DELETED = _("Row deleted.")
# Translators: This message is presented to the user to confirm that he/she just
# deleted the last row of a table.
TABLE_ROW_DELETED_FROM_END = _("Last row deleted.")
# Translators: This message is presented to the user to confirm that he/she just
# inserted a table row.
TABLE_ROW_INSERTED = _("Row inserted.")
# Translators: This message is presented to the user to confirm that he/she just
# inserted a table row at the end of the table. This typically happens when the
# user presses Tab from within the last cell of the table.
TABLE_ROW_INSERTED_AT_END = _("Row inserted at the end of the table.")
# Translators: when the user selects (highlights) text in a document, Orca lets
# them know.
TEXT_SELECTED = C_("text", "selected")
# Translators: when the user unselects (un-highlights) text in a document, Orca
# lets them know.
TEXT_UNSELECTED = C_("text", "unselected")
TIME_FORMAT_LOCALE = "%X"
TIME_FORMAT_24_HMS = "%H:%M:%S"
TIME_FORMAT_24_HM = "%H:%M"
TIME_FORMAT_12_HM = "%I:%M %p"
TIME_FORMAT_12_HMS = "%I:%M:%S %p"
# Translators: Orca has a feature to speak the time when the user presses a
# shortcut key. This is one of the alternative formats that the user may wish
# it to be presented with.
TIME_FORMAT_24_HMS_WITH_WORDS = _("%H hours, %M minutes and %S seconds.")
# Translators: Orca has a feature to speak the time when the user presses a
# shortcut key. This is one of the alternative formats that the user may wish
# it to be presented with.
TIME_FORMAT_24_HM_WITH_WORDS = _("%H hours and %M minutes.")
# Translators: this is information about a unicode character reported to the
# user. The value is the unicode number value of this character in hex.
UNICODE = _("Unicode %s")
# Translators: This message presents the Orca version number.
VERSION = _("Screen reader version %s.") % version
# Translators: This is presented when the user has navigated to a line with only
# whitespace characters (space, tab, etc.) on it.
WHITE_SPACE = _("white space")
# Translators: when the user is attempting to locate a particular object and the
# top of a page or list is reached without that object being found, we "wrap" to
# the bottom and continue looking upwards. We need to inform the user when this
# is taking place.
WRAPPING_TO_BOTTOM = _("Wrapping to bottom.")
# Translators: when the user is attempting to locate a particular object and the
# bottom of a page or list is reached without that object being found, we "wrap"
# to the top and continue looking downwards. We need to inform the user when
# this is taking place.
WRAPPING_TO_TOP = _("Wrapping to top.")
# Translators, normally layered panes and tables have items in them. Thus it is
# noteworthy when this is not the case. This message is presented to the user to
# indicate the current layered pane or table contains zero items.
ZERO_ITEMS = _("0 items")
def cellSpan(rowspan, colspan):
spanString = ""
if (colspan > 1) and (rowspan > 1):
# Translators: The cell here refers to a cell within a table within a
# document. We need to announce when the cell occupies or "spans" more
# than a single row and/or column.
spanString = ngettext("Cell spans %d row",
"Cell spans %d rows",
rowspan) % rowspan
# Translators: this represents the number of columns in a table.
spanString += ngettext(" %d column",
" %d columns",
colspan) % colspan
elif (colspan > 1):
# Translators: The cell here refers to a cell within a table within a
# document. We need to announce when the cell occupies or "spans" more
# than a single row and/or column.
spanString = ngettext("Cell spans %d column",
"Cell spans %d columns",
colspan) % colspan
elif (rowspan > 1):
# Translators: The cell here refers to a cell within a table within a
# document. We need to announce when the cell occupies or "spans" more
# than a single row and/or column.
spanString = ngettext("Cell spans %d row",
"Cell spans %d rows",
rowspan) % rowspan
return spanString
def charactersTooLong(count):
# Translators: People can enter a string of text that is too wide to be
# fully displayed in a spreadsheet cell. This message will be spoken if
# such a cell is encountered.
return ngettext("%d character too long",
"%d characters too long",
count) % count
def dialogCountBraille(count):
# Translators: This message informs the user how many unfocused alert and
# dialog windows a newly (re)focused application has. It is added at the
# end of a braille message containing the app which just claimed focus.
return ngettext("(%d dialog)", "(%d dialogs)", count) % count
def dialogCountSpeech(count):
# Translators: This message informs the user how many unfocused alert and
# dialog windows a newly (re)focused application has. It is added at the
# end of a spoken message containing the app which just claimed focus.
return ngettext("%d unfocused dialog", "%d unfocused dialogs", count) % count
def fileSizeBytes(size):
# Translators: This is the size of a file in bytes
return ngettext("%d byte", "%d bytes", size) % size
def filesFound(count):
# Translators: This message informs the user who many files were found as
# a result of a search.
return ngettext("%d file found", "%d files found", count) % count
def formCount(count):
# Translators: This message presents the number of forms in a document.
return ngettext("%d form", "%d forms", count) % count
def headingCount(count):
# Translators: This message presents the number of headings in a document.
return ngettext("%d heading", "%d headings", count) % count
def itemCount(count):
# Translators: This message presents the number of items in a layered pane
# or table.
return ngettext("%d item", "%d items", count) % count
def itemsFound(count):
# Translators: Orca has several commands that search for, and present a list
# of, objects based on one or more criteria. This is a message that will be
# presented to the user to indicate how many matching items were found.
return ngettext("%d item found", "%d items found", count) % count
def listItemCount(count):
# Translators: This message describes a bulleted or numbered list.
return ngettext("List with %d item", "List with %d items", count) % count
def mathTableSize(nRows, nColumns):
# Translators: this represents the number of rows in a mathematical table.
# See http://www.w3.org/TR/MathML3/chapter3.html#presm.mtable
rowString = ngettext("math table with %d row",
"math table with %d rows",
nRows) % nRows
# Translators: this represents the number of columns in a mathematical table.
# See http://www.w3.org/TR/MathML3/chapter3.html#presm.mtable
colString = ngettext("%d column",
"%d columns",
nColumns) % nColumns
return rowString + " " + colString
def mathNestedTableSize(nRows, nColumns):
# Translators: this represents the number of rows in a mathematical table
# which is nested inside another mathematical table.
# See http://www.w3.org/TR/MathML3/chapter3.html#presm.mtable
rowString = ngettext("nested math table with %d row",
"nested math table with %d rows",
nRows) % nRows
# Translators: this represents the number of rows in a mathematic table
# which is nested inside another mathematical table.
# See http://www.w3.org/TR/MathML3/chapter3.html#presm.mtable
colString = ngettext("%d column",
"%d columns",
nColumns) % nColumns
return rowString + " " + colString
def messagesCount(count):
# Translators: This message is presented to inform the user of the number of
# messages in a list.
return ngettext("%d message.\n", "%d messages.\n", count) % count
def percentage(value):
# Translators: This message is presented to inform the user of the value of
# a slider, progress bar, or other such component.
return ngettext("%d percent.", "%d percent.", value) % value
def percentRead(value):
# Translators: This message announces the percentage of the document that
# has been read. The value is calculated by knowing the index of the current
# position divided by the total number of objects on the page.
return ngettext ("%d percent of document read",
"%d percent of document read",
value) % value
def pixelCount(nPixels):
# Translators: this represents a text attribute expressed in pixels, such as
# a margin, indentation, font size, etc.
return ngettext("%d pixel", "%d pixels", nPixels) % nPixels
def repeatedCharCount(repeatChar, count):
# Translators: Orca will tell you how many characters are repeated on a line
# of text. For example: "22 space characters". The %d is the number and the
# %s is the spoken word for the character.
return ngettext("%(count)d %(repeatChar)s character",
"%(count)d %(repeatChar)s characters",
count) % {"count" : count, "repeatChar": repeatChar}
def selectedItemsCount(selected, total):
# Translators: This message is presented to indicate the number of selected
# objects (e.g. icons) and the total number of those objects.
return ngettext("%(index)d of %(total)d item selected",
"%(index)d of %(total)d items selected",
total) % {"index" : selected, "total" : total}
def shortcutsFoundOrca(count):
# Translators: This message is presented when the user is in a list of
# shortcuts associated with Orca commands which are not specific to the
# current application. It appears as the title of the dialog containing
# the list.
return ngettext("%d Screen reader default shortcut found.",
"%d Screen reader default shortcuts found.",
count) % count
def shortcutsFoundApp(count, appName):
# Translators: This message is presented when the user is in a list of
# shortcuts associated with Orca commands specific to the current
# application. It appears as the title of the dialog containing the list.
return ngettext("%(count)d Screen reader shortcut for %(application)s found.",
"%(count)d Screen reader shortcuts for %(application)s found.",
count) % {"count" : count, "application" : appName}
def spacesCount(count):
# Translators: This message is presented to inform the user of the number of
# space characters in a string.
return ngettext("%d space", "%d spaces", count) % count
def tabsCount(count):
# Translators: This message is presented to inform the user of the number of
# tab characters in a string.
return ngettext("%d tab", "%d tabs", count) % count
def tableCount(count):
# Translators: This message presents the number of tables in a document.
return ngettext("%d table", "%d tables", count) % count
def tableSize(nRows, nColumns):
# Translators: this represents the number of rows in a table.
rowString = ngettext("table with %d row",
"table with %d rows",
nRows) % nRows
# Translators: this represents the number of columns in a table.
colString = ngettext("%d column",
"%d columns",
nColumns) % nColumns
return rowString + " " + colString
def unvisitedLinkCount(count):
# Translators: This message presents the number of unvisited links in a
# document.
return ngettext("%d unvisited link", "%d unvisited links", count) % count
def visitedLinkCount(count):
# Translators: This message presents the number of visited links in a
# document.
return ngettext("%d visited link", "%d visited links", count) % count
|
chrys87/orca-beep
|
src/orca/messages.py
|
Python
|
lgpl-2.1
| 119,157
|
[
"ORCA"
] |
64df7fc368524c44b1be889bd0414a43c647d9a3e551b36dec2dd028b4e9826b
|
from __future__ import print_function
__author__ = """Alex "O." Holcombe, Charles Ludowici, """ ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import time, sys, platform, os
from math import atan, atan2, pi, cos, sin, sqrt, ceil, radians, degrees
import numpy as np
import psychopy, psychopy.info
import copy
from psychopy import visual, sound, monitors, logging, gui, event, core, data
from string import ascii_uppercase
try:
from helpersAOH import accelerateComputer, openMyStimWindow
except Exception as e:
print(e); print('Problem loading helpersAOH. Check that the file helpersAOH.py in the same directory as this file')
print('Current directory is ',os.getcwd())
eyeTracking = False
if eyeTracking:
try:
import eyelinkEyetrackerForPsychopySUPA3
except Exception as e:
print(e)
print('Problem loading eyelinkEyetrackerForPsychopySUPA3. Check that the file eyelinkEyetrackerForPsychopySUPA3.py in the same directory as this file')
print('While a different version of pylink might make your eyetracking code work, your code appears to generally be out of date. Rewrite your eyetracker code based on the SR website examples')
#Psychopy v1.83.01 broke this, pylink version prevents EyelinkEyetrackerForPsychopySUPA3 stuff from importing. But what really needs to be done is to change eyetracking code to more modern calls, as indicated on SR site
eyeTracking = False
expname= "dot-jump"
demo = False; exportImages = False
autopilot = False
subject='test'
###############################
### Setup the screen parameters ##########
###############################
allowGUI = False
units='deg' #'cm'
waitBlank=False
refreshRate= 85 *1.0; #160 #set to the framerate of the monitor
fullscrn=True; #show in small window (0) or full screen (1)
scrn=True
stimulusType = 'circle'
if True: #just so I can indent all the below
#which screen to display the stimuli. 0 is home screen, 1 is second screen
# create a dialog from dictionary
infoFirst = { 'Autopilot':autopilot, 'Check refresh etc':True, 'Use second screen':scrn, 'Fullscreen (timing errors if not)': fullscrn, 'Screen refresh rate': refreshRate, 'Stimulus type': stimulusType}
OK = gui.DlgFromDict(dictionary=infoFirst,
title='MOT',
order=['Autopilot','Check refresh etc', 'Use second screen', 'Screen refresh rate', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating',
'Use second Screen': ''},
)
if not OK.OK:
print('User cancelled from dialog box'); logging.info('User cancelled from dialog box'); core.quit()
autopilot = infoFirst['Autopilot']
checkRefreshEtc = infoFirst['Check refresh etc']
scrn = infoFirst['Use second screen']
print('scrn = ',scrn, ' from dialog box')
fullscrn = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
stimulusType = infoFirst['Stimulus type']
#monitor parameters
widthPix = 1024 #1440 #monitor width in pixels
heightPix =768 #900 #monitor height in pixels
monitorwidth = 37 #28.5 #monitor width in centimeters
viewdist = 55.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
bgColor = [-1,-1,-1] #black background
monitorname = 'testMonitor' # 'mitsubishi' #in psychopy Monitors Center
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
myWin.setRecordFrameIntervals(False)
trialsPerCondition = 2 #default value
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
print('Finished runInfo- which assesses the refresh and processes of this computer')
refreshMsg1 = 'Median frames per second ='+ str( np.round(1000./runInfo["windowRefreshTimeMedian_ms"],1) )
refreshRateTolerancePct = 3
pctOff = abs( (1000./runInfo["windowRefreshTimeMedian_ms"]-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
##
### END Setup of the screen parameters ##############################################################################################
####################################
askUserAndConfirmExpParams = True
if autopilot:
subject = 'autoTest'
###############################
### Ask user exp params ##############################################################################################
## askUserAndConfirmExpParams
if askUserAndConfirmExpParams:
dlgLabelsOrdered = list() #new dialog box
myDlg = gui.Dlg(title=expname, pos=(200,400))
if not autopilot:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
else:
myDlg.addField('Subject code :', subject)
dlgLabelsOrdered.append('subject')
myDlg.addField('autoPilotTime:', 0, tip='Auto response time relative to cue')
myDlg.addField('randomTime:',False, tip = 'Add (rounded) gaussian N(0,2) error to time offset?')
myDlg.addField('autoPilotSpace:',0, tip='Auto response position relative to cue')
myDlg.addField('randomSpace:',False, tip = 'Add (rounded) gaussian N(0,2) error to space offset?')
dlgLabelsOrdered.append('autoPilotTime')
dlgLabelsOrdered.append('randomTime')
dlgLabelsOrdered.append('autoPilotSpace')
dlgLabelsOrdered.append('randomSpace')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
msgWrongResolution = ''
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Instead of desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels, screen apparently '+ str(myWinRes[0])+ 'x'+ str(myWinRes[1])
myDlg.addText(msgWrongResolution, color='Red')
print(msgWrongResolution); logging.info(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at response time', color='DimGrey') #works in PsychoPy1.84
#myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) #color names not working for some pre-1.84 versions
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition ='+str(trialsPerCondition))
if autopilot:
autoSpace = thisInfo[dlgLabelsOrdered.index('autoPilotSpace')]
autoTime = thisInfo[dlgLabelsOrdered.index('autoPilotTime')]
randomTime = thisInfo[dlgLabelsOrdered.index('randomTime')]
randomSpace = thisInfo[dlgLabelsOrdered.index('randomSpace')]
else:
print('User cancelled from dialog box.'); logging.info('User cancelled from dialog box')
logging.flush()
core.quit()
### Ask user exp params
## END askUserAndConfirmExpParams ###############################
##############################################################################################
if os.path.isdir('.'+os.sep+'dataRaw'):
dataDir='dataRaw'
else:
msg= 'dataRaw directory does not exist, so saving data in present working directory'
print(msg); logging.info(msg)
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
fileNameWithPath = dataDir+os.sep+subject+ '_' + expname+timeAndDateStr
if not demo and not exportImages:
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileNameWithPath + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logF = logging.LogFile(fileNameWithPath+'.log',
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#info, data, warnings, and errors will be sent to this logfile
if demo or exportImages:
logging.console.setLevel(logging.ERROR) #only show this level's and higher messages
logging.console.setLevel(logging.WARNING) #DEBUG means set the console to receive nearly all messges, INFO is for everything else, INFO, EXP, DATA, WARNING and ERROR
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
longerThanRefreshTolerance = 0.27
longFrameLimit = round(1000./refreshRate*(1.0+longerThanRefreshTolerance),3) # round(1000/refreshRate*1.5,2)
msg = 'longFrameLimit='+ str(longFrameLimit) +' Recording trials where one or more interframe interval exceeded this figure '
logging.info(msg); print(msg)
if msgWrongResolution != '':
logging.error(msgWrongResolution)
myWin = openMyStimWindow(mon,widthPix,heightPix,bgColor,allowGUI,units,fullscrn,scrn,waitBlank)
runInfo = psychopy.info.RunTimeInfo(
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
msg = 'second window opening runInfo mean ms='+ str( runInfo["windowRefreshTimeAvg_ms"] )
logging.info(msg); print(msg)
logging.info(runInfo)
logging.info('gammaGrid='+str(mon.getGammaGrid()))
logging.info('linearizeMethod='+str(mon.getLinearizeMethod()))
####Functions. Save time by automating processes like stimulus creation and ordering
############################################################################
def oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, trialObjects):
#n: the frame
#trialObjects: List of stimuli to display
#cuePos: cue serial temporal position
#cueFrames: Number of frames to display the cue
#itemFrames: Number of frames to display the item
#SOAFrames: Stimulus Onset Asynchrony in frames
cueFrame = cuePos * SOAFrames
cueMax = cueFrame + cueFrames
showIdx = int(np.floor(n/SOAFrames))
obj = trialObjects[showIdx]
drawObject = n%SOAFrames < itemFrames
if drawObject:
if n >= cueFrame and n < cueMax:
obj.draw()
cue.draw()
else:
obj.draw()
return True
def oneTrial(stimuli):
dotOrder = np.arange(len(stimuli))
np.random.shuffle(dotOrder)
shuffledStimuli = [stimuli[i] for i in dotOrder]
ts = []
myWin.flip(); myWin.flip() #Make sure raster at top of screen (unless not in blocking mode), and give CPU a chance to finish other tasks
t0 = trialClock.getTime()
for n in range(trialFrames):
fixation.draw()
oneFrameOfStim(n, itemFrames, SOAFrames, cueFrames, cuePos, shuffledStimuli)
myWin.flip()
ts.append(trialClock.getTime() - t0)
return True, shuffledStimuli, dotOrder, ts
def getResponse(trialStimuli):
if autopilot:
spacing = 360./nDots
autoResponseIdx = cuePos + autoTime #The serial position of the response in the stream
if randomTime:
autoResponseIdx += int(round( np.random.normal(0,2) ))
itemAtTemporalSelection = trialStimuli[autoResponseIdx]
unshuffledPositions = [dot.pos.tolist() for dot in stimuli]
itemSpatial = unshuffledPositions.index(itemAtTemporalSelection.pos.tolist())
itemSpatial = itemSpatial + autoSpace
if randomSpace:
itemSpatial += int(round( np.random.normal(0,2) ))
while itemSpatial>23:
itemSpatial = itemSpatial - 23
#Once we have temporal pos of selected item relative to start of the trial
#Need to get the serial spatial pos of this item, so that we can select items around it based on the autoSpace offset
selectionTemporal = trialStimuli.index(stimuli[itemSpatial]) #This seems redundant, but it tests that the item we've selected in space is the cued item in time. if the temporal and spatial offsets are 0, it should be the same as cuePos.
accuracy = cuePos == selectionTemporal
mousePos = (stimuli[itemSpatial].pos[0],stimuli[itemSpatial].pos[1])
expStop = False
item = stimuli[itemSpatial]
return accuracy, item, expStop, mousePos
elif not autopilot:
myMouse = event.Mouse(visible = False,win=myWin)
responded = False
expStop = False
event.clearEvents()
mousePos = (1e6,1e6)
escape = event.getKeys()
myMouse.setPos((0,0))
myMouse.setVisible(True)
while not responded:
for item in trialStimuli:
item.draw()
instruction.draw()
if drawProgress: #Draw progress message
progress.draw()
myWin.flip()
button = myMouse.getPressed()
mousePos = myMouse.getPos()
escapeKey = event.getKeys()
if button[0]:
print('click detected')
responded = True
print('getResponse mousePos:',mousePos)
elif len(escapeKey)>0:
if escapeKey[0] == 'q':
expStop = True
responded = True
return False, np.random.choice(trialStimuli), expStop, (0,0)
clickDistances = []
for item in trialStimuli:
x = mousePos[0] - item.pos[0]
y = mousePos[1] - item.pos[1]
distance = sqrt(x**2 + y**2)
clickDistances.append(distance)
if not expStop:
minDistanceIdx = clickDistances.index(min(clickDistances))
accuracy = minDistanceIdx == cuePos
item = trialStimuli[minDistanceIdx]
myMouse.setVisible(False)
return accuracy, item, expStop, mousePos
def drawStimuli(nDots, radius, center, stimulusObject, sameEachTime = True):
if len(center) > 2 or len(center) < 2:
print('Center coords must be list of length 2')
return None
if not sameEachTime and not isinstance(stimulusObject, (list, tuple)):
print('You want different objects in each position, but your stimuli is not a list or tuple')
return None
if not sameEachTime and isinstance(stimulusObject, (list, tuple)) and len(stimulusObject)!=nDots:
print('You want different objects in each position, but the number of positions does not equal the number of items')
return None
spacing = 360./nDots
stimuli = []
for dot in range(nDots): #have to specify positions for multiples of 90deg because python (computers in general?) can't store exact value of pi and thus cos(pi/2) = 6.123e-17, not 0
angle = dot*spacing
if angle == 0:
xpos = radius
ypos = 0
elif angle == 90:
xpos = 0
ypos = radius
elif angle == 180:
xpos = -radius
ypos = 0
elif angle == 270:
xpos = 0
ypos = -radius
elif angle%90!=0:
xpos = radius*cos(radians(angle))
ypos = radius*sin(radians(angle))
if sameEachTime:
stim = copy.copy(stimulusObject)
elif not sameEachTime:
stim = stimulusObject[dot]
stim.pos = (xpos,ypos)
stimuli.append(stim)
return stimuli
def checkTiming(ts):
interframeIntervals = np.diff(ts) * 1000
#print(interframeIntervals)
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervals > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong > 0:
print(numCasesInterframeLong,'frames of', trialFrames,'were longer than',str(1000/refreshRate*(1.0+frameTimeTolerance)))
return numCasesInterframeLong
######Create visual objects, noise masks, response prompts etc. ###########
######Draw your stimuli here if they don't change across trials, but other parameters do (like timing or distance)
######If you want to automate your stimuli. Do it in a function below and save clutter.
######For instance, maybe you want random pairs of letters. Write a function!
###########################################################################
#Calculate size of stimuli in original experiment
OGWidth = 1024
OGHeight = 768
OGDiag = sqrt(OGWidth**2 + OGHeight**2)
OGDiagInch = 17
OGDiagCM = OGDiagInch * 2.54
OGpixelPerDegree = OGDiag/((atan(OGDiagCM/57.))*(180/np.pi))
print('OGPPD', OGpixelPerDegree)
radiusPix = 200
radius = radiusPix/OGpixelPerDegree #circle radius
center = (0,0) #circle centre
fixSize = .1
fixation= visual.Circle(myWin, radius = fixSize , fillColor = (1,1,1), units=units)
cueRadiusPix = 360
cueRadiusDeg = cueRadiusPix/OGpixelPerDegree
cue = visual.Circle(myWin, radius = cueRadiusDeg, fillColor = None, lineColor = (1,1,1), units = units)
instruction = visual.TextStim(myWin,pos=(0, -(radius+1)),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.75,units=units)
instructionText = 'Click the dot that was on screen with the cue.'
instruction.text = instructionText
progress = visual.TextStim(myWin,pos=(0, -(radius+2.5)),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.75,units=units)
##Set up stimuli
stimulusSizePix = 20.
stimulusSizeDeg = stimulusSizePix/OGpixelPerDegree
stimulus = visual.Circle(myWin, radius = stimulusSizeDeg, fillColor = (1,1,1) )
nDots = 24
sameEachTime = True #same item each position?
if stimulusType=='circle':
stimulus = visual.Circle(myWin, radius = stimulusSizeDeg, units = units, fillColor = (1,1,1) )
stimuli = drawStimuli(nDots, radius, center, stimulus, sameEachTime)
stimForDataFile = 'circle'
if stimulusType=='letter':
letter = np.random.choice([i for i in ascii_uppercase], size = 1)[0]
stimulus = visual.TextStim(myWin, text = letter, font ='Sloan', height = stimulusSizeDeg*2, units = units, color = (1,1,1), alignHoriz='center', alignVert='center' )
stimuli = drawStimuli(nDots, radius, center, stimulus, sameEachTime)
stimForDataFile = letter
###Trial timing parameters
SOAMS = 66.667
itemMS = 22.222
ISIMS = SOAMS - itemMS
trialMS = SOAMS * nDots
cueMS = itemMS
SOAFrames = int(np.floor(SOAMS/(1000./refreshRate)))
itemFrames = int(np.floor(itemMS/(1000./refreshRate)))
ISIFrames = int(np.floor(ISIMS/(1000./refreshRate)))
trialFrames = int(nDots*SOAFrames)
cueFrames = int(np.floor(cueMS/(1000./refreshRate)))
print('cueFrames=',cueFrames)
print('itemFrames=',itemFrames)
print('refreshRate =', refreshRate)
print('cueMS from frames =', cueFrames*(1000./refreshRate))
print('num of SOAs in the trial:', trialFrames/SOAFrames)
###############
## Factorial design ###
###############
numResponsesPerTrial = 1 #default. Used to create headers for dataFile
stimList = []
#cuePositions = [dot for dot in range(nDots) if dot not in [0,nDots-1]]
cuePositions = [10]
print('cuePositions: ',cuePositions)
#cuePositions = cuePositions[2:(nDots-3)] #drop the first and final two dots
#Set up the factorial design (list of all conditions)
for cuePos in cuePositions:
stimList.append({'cuePos':cuePos})
trials = data.TrialHandler(stimList, nReps = trialsPerCondition)
####Create output file###
#########################################################################
dataFile = open(fileNameWithPath + '.txt', 'w')
numResponsesPerTrial = 1
#headers for initial datafile rows, they don't get repeated. These appear in the file in the order they appear here.
oneOffHeaders = [
'subject',
'task',
'staircase',
'trialNum',
'stimulus',
'monitorWidth',
'monitorHeight',
'stimSize',
'ISI',
'SOA'
]
for header in oneOffHeaders:
print(header, '\t', end='', file=dataFile)
#Headers for duplicated datafile rows. These are repeated using numResponsesPerTrial. For instance, we might have two responses in a trial.
duplicatedHeaders = [
'responseSpatialPos',
'responseX',
'responseY',
'correctX',
'correctY',
'clickX',
'clickY',
'accuracy',
'responsePosInStream',
'correctPosInStream'
]
if numResponsesPerTrial == 1:
for header in duplicatedHeaders:
print(header, '\t', end='', file=dataFile)
elif numResponsesPerTrial > 1:
for response in range(numResponsesPerTrial):
for header in duplicatedHeaders:
print(header+str(response), '\t', end='', file=dataFile)
for pos in range(nDots):
print('position'+str(pos),'\t',end='',file=dataFile)
#Headers done. Do a new line
print('longFrames',file=dataFile)
expStop = False
drawProgress = False #draw the progress message?
trialNum=0; numTrialsCorrect=0; framesSaved=0;
print('Starting experiment of',trials.nTotal,'trials. Current trial is trial ',trialNum)
#NextRemindCountText.setText( str(trialNum) + ' of ' + str(trials.nTotal) )
#NextRemindCountText.draw()
myWin.flip()
#end of header
trialClock = core.Clock()
stimClock = core.Clock()
if eyeTracking:
if getEyeTrackingFileFromEyetrackingMachineAtEndOfExperiment:
eyeMoveFile=('EyeTrack_'+subject+'_'+timeAndDateStr+'.EDF')
tracker=Tracker_EyeLink(myWin,trialClock,subject,1, 'HV5',(255,255,255),(0,0,0),False,(widthPix,heightPix))
while trialNum < trials.nTotal and expStop==False:
print(float(trialNum)/trials.nTotal)
if trials.nTotal > 0 and trialNum > 0:
if(float(trialNum)/trials.nTotal)%.2 == 0:
print('setting progress text')
progress.text = 'You have completed ' + str(trialNum) + ' of ' + str(trials.nTotal) + ' trials.'
drawProgress = True
else:
drawProgress = False
fixation.draw()
myWin.flip()
if not autopilot:
core.wait(1)
trial = trials.next()
# print('trial idx is',trials.thisIndex)
cuePos = trial.cuePos
# print(cuePos)
print("Doing trialNum",trialNum)
trialDone, trialStimuli, trialStimuliOrder, ts = oneTrial(stimuli)
#Shift positions so that the list starts at 1, which is positioned at (0,radius), and increases clockwise. This is what the MM code expects
MMPositions = list() #Mixture modelling positions
for dotPos in trialStimuliOrder:
if dotPos < (nDots/4):
MMPositions.append(dotPos + 19)
elif dotPos >= (nDots/4):
MMPositions.append(dotPos -5)
nBlips = checkTiming(ts)
if trialDone:
accuracy, response, expStop, clickPos = getResponse(trialStimuli)
responseCoord = response.pos.tolist()
spatialCoords= [item.pos.tolist() for item in stimuli]
try:
responseSpatialRelativeToXAxis = spatialCoords.index(responseCoord)
except ValueError:
print('coord not in list')
if responseSpatialRelativeToXAxis < (nDots/4):
responseSpatial = responseSpatialRelativeToXAxis + 19
elif responseSpatialRelativeToXAxis >= (nDots/4):
responseSpatial = responseSpatialRelativeToXAxis - 5
trialPositions = [item.pos.tolist() for item in trialStimuli]
responseTemporal = trialPositions.index(responseCoord)
# print('trial positions in sequence:',trialPositions)
# print('position of item nearest to click:',responseSpatial)
# print('Position in sequence of item nearest to click:',responseTemporal)
correctSpatial = trialStimuli[cuePos].pos
correctTemporal = cuePos
print(subject,'\t',
'dot-jump','\t',
False,'\t',
trialNum,'\t',
stimForDataFile,'\t',
widthPix,'\t',
heightPix,'\t',
stimulusSizeDeg,'\t',
ISIMS,'\t',
SOAMS,'\t',
responseSpatial,'\t',
responseCoord[0],'\t',
responseCoord[1],'\t',
correctSpatial[0],'\t',
correctSpatial[1],'\t',
clickPos[0],'\t',
clickPos[1],'\t',
accuracy,'\t',
responseTemporal,'\t',
correctTemporal,'\t',
end='',
file = dataFile
)
for dot in range(nDots):
print(MMPositions[dot], '\t',end='', file=dataFile)
print(nBlips, file=dataFile)
trialNum += 1
dataFile.flush()
if expStop:
print('Participant cancelled experiment on trial', trialNum)
dataFile.flush()
|
alexholcombe/dot-jump
|
dataRaw/Fixed Cue/autoTest_dot-jump21Nov2016_14-32.py
|
Python
|
gpl-3.0
| 26,455
|
[
"Gaussian"
] |
3521ba0c76a0583ebef92b910d2bf5903ca0000b63b60ddbc78b2432b615c371
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
test_targets: unqualified target names to search for. Any target in this list
that depends upon a file in |files| is output regardless of the type of target
or chain of dependencies.
additional_compile_targets: Unqualified targets to search for in addition to
test_targets. Targets in the combined list that depend upon a file in |files|
are not necessarily output. For example, if the target is of type none then the
target is not output (but one of the descendants of the target will be).
The following is output:
error: only supplied if there is an error.
compile_targets: minimal set of targets that directly or indirectly (for
targets of type none) depend on the files in |files| and is one of the
supplied targets or a target that one of the supplied targets depends on.
The expectation is this set of targets is passed into a build step. This list
always contains the output of test_targets as well.
test_targets: set of targets from the supplied |test_targets| that either
directly or indirectly depend upon a file in |files|. This list if useful
if additional processing needs to be done for certain targets after the
build, such as running tests.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case test_targets and compile_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets that were not found.
Example:
Consider a graph like the following:
A D
/ \
B C
A depends upon both B and C, A is of type none and B and C are executables.
D is an executable, has no dependencies and nothing depends on it.
If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and
files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then
the following is output:
|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc
and the supplied target A depends upon it. A is not output as a build_target
as it is of type none with no rules and actions.
|test_targets| = ["B"] B directly depends upon the change file b.cc.
Even though the file d.cc, which D depends upon, has changed D is not output
as it was not supplied by way of |additional_compile_targets| or |test_targets|.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
In Gyp the "all" target is shorthand for the root targets in the files passed
to gyp. For example, if file "a.gyp" contains targets "a1" and
"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency
on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2".
Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not
directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp
then the "all" target includes "b1" and "b2".
"""
from __future__ import print_function
import json
import os
import posixpath
import gyp.msvs_emulation
import gyp.common
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print('AddSource', org_source, result[len(result) - 1])
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print('ExtractSources', target, base_path)
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
self.additional_compile_target_names = set()
self.test_target_names = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.additional_compile_target_names = set(
config.get('additional_compile_targets', []))
self.test_target_names = set(config.get('test_targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print('gyp file modified', build_file)
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print('included gyp file modified, gyp_file=', build_file, 'included file=', rel_include_file)
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Targets that constitute the 'all' target. See description at top of file
for details on the 'all' target.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
name_to_target = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(name_to_target,
target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print('matching target from modified build file', target_name)
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if _ToGypPath(os.path.normpath(source)) in files:
print('target', target_name, 'matches', source)
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target,
dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return name_to_target, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a tuple of the following:
. mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|.
. any target names not found. If this is empty all targets were found."""
result = {}
if not to_find:
return {}, []
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result, []
return result, [x for x in to_find]
def _DoesTargetDependOnMatchingTargets(target):
"""Returns true if |target| or any of its dependencies is one of the
targets containing the files supplied as input to analyzer. This updates
|matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOnMatchingTargets(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print('\t', target.name, 'matches by dep', dep.name)
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOnMatchingTargets(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on at least one of the targets containing the files
supplied as input to analyzer.
possible_targets: targets to search from."""
found = []
print('Targets that matched by dependency:')
for target in possible_targets:
if _DoesTargetDependOnMatchingTargets(target):
found.append(target)
return found
def _AddCompileTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = target in roots
for back_dep_target in target.back_deps:
_AddCompileTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print('\t\tadding to compile targets', target.name, 'executable',
target.is_executable, 'added_to_compile_targets',
target.added_to_compile_targets, 'add_if_no_ancestor',
add_if_no_ancestor, 'requires_build', target.requires_build,
'is_static_library', target.is_static_library,
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
)
result.add(target)
target.added_to_compile_targets = True
def _GetCompileTargets(matching_targets, supplied_targets):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
supplied_targets: set of targets supplied to analyzer to search from."""
result = set()
for target in matching_targets:
print('finding compile targets for match', target.name)
_AddCompileTargets(target, supplied_targets, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print('Error:', values['error'])
if 'status' in values:
print(values['status'])
if 'targets' in values:
values['targets'].sort()
print('Supplied targets that depend on changed files:')
for target in values['targets']:
print('\t', target)
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print('The following targets were not found:')
for target in values['invalid_targets']:
print('\t', target)
if 'build_targets' in values:
values['build_targets'].sort()
print('Targets that require a build:')
for target in values['build_targets']:
print('\t', target)
if 'compile_targets' in values:
values['compile_targets'].sort()
print('Targets that need to be built:')
for target in values['compile_targets']:
print('\t', target)
if 'test_targets' in values:
values['test_targets'].sort()
print('Test targets:')
for target in values['test_targets']:
print('\t', target)
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print(json.dumps(values))
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print('Error writing to output file', output_path, str(e))
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(os.path.normpath(include)) in files:
print('Include file modified, assuming all changed', include)
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# # Copy additional generator configuration data from VS, which is shared by the Windows Ninja generator.
# import gyp.generator.msvs as msvs_generator
# generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', [])
# generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
class TargetCalculator(object):
"""Calculates the matching test_targets and matching compile_targets."""
def __init__(self, files, additional_compile_target_names, test_target_names,
data, target_list, target_dicts, toplevel_dir, build_files):
self._additional_compile_target_names = set(additional_compile_target_names)
self._test_target_names = set(test_target_names)
self._name_to_target, self._changed_targets, self._root_targets = (
_GenerateTargets(data, target_list, target_dicts, toplevel_dir,
frozenset(files), build_files))
self._unqualified_mapping, self.invalid_targets = (
_GetUnqualifiedToTargetMapping(self._name_to_target,
self._supplied_target_names_no_all()))
def _supplied_target_names(self):
return self._additional_compile_target_names | self._test_target_names
def _supplied_target_names_no_all(self):
"""Returns the supplied test targets without 'all'."""
result = self._supplied_target_names();
result.discard('all')
return result
def is_build_impacted(self):
"""Returns true if the supplied files impact the build at all."""
return self._changed_targets
def find_matching_test_target_names(self):
"""Returns the set of output test targets."""
assert self.is_build_impacted()
# Find the test targets first. 'all' is special cased to mean all the
# root targets. To deal with all the supplied |test_targets| are expanded
# to include the root targets during lookup. If any of the root targets
# match, we remove it and replace it with 'all'.
test_target_names_no_all = set(self._test_target_names)
test_target_names_no_all.discard('all')
test_targets_no_all = _LookupTargets(test_target_names_no_all,
self._unqualified_mapping)
test_target_names_contains_all = 'all' in self._test_target_names
if test_target_names_contains_all:
test_targets = [x for x in (set(test_targets_no_all) |
set(self._root_targets))]
else:
test_targets = [x for x in test_targets_no_all]
print('supplied test_targets')
for target_name in self._test_target_names:
print('\t', target_name)
print('found test_targets')
for target in test_targets:
print('\t', target.name)
print('searching for matching test targets')
matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets)
matching_test_targets_contains_all = (test_target_names_contains_all and
set(matching_test_targets) &
set(self._root_targets))
if matching_test_targets_contains_all:
# Remove any of the targets for all that were not explicitly supplied,
# 'all' is subsequentely added to the matching names below.
matching_test_targets = [x for x in (set(matching_test_targets) &
set(test_targets_no_all))]
print('matched test_targets')
for target in matching_test_targets:
print('\t', target.name)
matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matching_test_targets]
if matching_test_targets_contains_all:
matching_target_names.append('all')
print('\tall')
return matching_target_names
def find_matching_compile_target_names(self):
"""Returns the set of output compile targets."""
assert self.is_build_impacted();
# Compile targets are found by searching up from changed targets.
# Reset the visited status for _GetBuildTargets.
for target in self._name_to_target.values():
target.visited = False
supplied_targets = _LookupTargets(self._supplied_target_names_no_all(),
self._unqualified_mapping)
if 'all' in self._supplied_target_names():
supplied_targets = [x for x in (set(supplied_targets) |
set(self._root_targets))]
print('Supplied test_targets & compile_targets')
for target in supplied_targets:
print('\t', target.name)
print('Finding compile targets')
compile_targets = _GetCompileTargets(self._changed_targets,
supplied_targets)
return [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in compile_targets]
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print('toplevel_dir', toplevel_dir)
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'test_targets': list(config.test_target_names),
'compile_targets': list(
config.additional_compile_target_names |
config.test_target_names) }
_WriteOutput(params, **result_dict)
return
calculator = TargetCalculator(config.files,
config.additional_compile_target_names,
config.test_target_names, data,
target_list, target_dicts, toplevel_dir,
params['build_files'])
if not calculator.is_build_impacted():
result_dict = { 'status': no_dependency_string,
'test_targets': [],
'compile_targets': [] }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
return
test_target_names = calculator.find_matching_test_target_names()
compile_target_names = calculator.find_matching_compile_target_names()
found_at_least_one_target = compile_target_names or test_target_names
result_dict = { 'test_targets': test_target_names,
'status': found_dependency_string if
found_at_least_one_target else no_dependency_string,
'compile_targets': list(
set(compile_target_names) |
set(test_target_names)) }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
|
wiltonlazary/arangodb
|
3rdParty/V8/gyp/generator/analyzer.py
|
Python
|
apache-2.0
| 30,587
|
[
"VisIt"
] |
bb8aa790aa978f7d93159480d96d5b8bdacb6a92b55d4cfee1530ee4f556113b
|
import ase.db
from ase.units import kcal, mol
from ase.data.g2_1_ref import ex_atomization, atomization
c = ase.db.connect('results.db')
# Energy of atoms:
atoms = {}
for d in c.select(natoms=1):
atoms[d.numbers[0]] = [d.energy, d.exx]
maepbe = 0.0
maeexx = 0.0
print(' PBE EXX')
print('-' * 48)
for d in c.select('natoms>1'):
epberef = atomization[d.name][2] * kcal / mol
eexxref = ex_atomization[d.name][0] * kcal / mol
epbe = sum(atoms[atom][0] for atom in d.numbers) - d.energy
eexx = sum(atoms[atom][1] for atom in d.numbers) - d.exx
maepbe += abs(epbe - epberef) / len(ex_atomization)
maeexx += abs(eexx - eexxref) / len(ex_atomization)
print('%-4s %10.3f %10.3f %10.3f %10.3f' %
(d.name, epbe, epbe - epberef, eexx, eexx - eexxref))
print('-' * 48)
print('MAE %10.3f %10.3f %10.3f %10.3f' % (0, maepbe, 0, maeexx))
assert maepbe < 0.025
assert maeexx < 0.05
|
robwarm/gpaw-symm
|
gpaw/test/big/kpb/check.py
|
Python
|
gpl-3.0
| 955
|
[
"ASE"
] |
43d5dcf45a521f7e07eee041f4fec7638321a0e267547f71eee6d2a17832a365
|
"""Collection of DIRAC useful list related modules.
By default on Error they return None.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import six
import random
import sys
def uniqueElements(aList):
"""Utility to retrieve list of unique elements in a list (order is kept).
:param aList: list of elements
:type aList: python:list
:return: list of unique elements
"""
result = []
seen = set()
try:
for i in aList:
if i not in seen:
result.append(i)
seen.add(i)
return result
except Exception:
return None
def appendUnique(aList, anObject):
""" Append to list if object does not exist.
:param aList: list of elements
:type aList: python:list
:param anObject: object you want to append
"""
if anObject not in aList:
aList.append(anObject)
def fromChar(inputString, sepChar=","):
"""Generates a list splitting a string by the required character(s)
resulting string items are stripped and empty items are removed.
:param string inputString: list serialised to string
:param string sepChar: separator
:return: list of strings or None if sepChar has a wrong type
"""
# to prevent getting an empty String as argument
if not (isinstance(inputString, six.string_types) and isinstance(sepChar, six.string_types) and sepChar):
return None
return [fieldString.strip() for fieldString in inputString.split(sepChar) if len(fieldString.strip()) > 0]
def randomize(aList):
"""Return a randomly sorted list.
:param aList: list to permute
:type aList: python:list
"""
tmpList = list(aList)
random.shuffle(tmpList)
return tmpList
def pop(aList, popElement):
""" Pop the first element equal to popElement from the list.
:param aList: list
:type aList: python:list
:param popElement: element to pop
"""
if popElement in aList:
return aList.pop(aList.index(popElement))
def stringListToString(aList):
"""This function is used for making MySQL queries with a list of string elements.
:param aList: list to be serialized to string for making queries
:type aList: python:list
"""
return ",".join("'%s'" % x for x in aList)
def intListToString(aList):
"""This function is used for making MySQL queries with a list of int elements.
:param aList: list to be serialized to string for making queries
:type aList: python:list
"""
return ",".join(str(x) for x in aList)
def getChunk(aList, chunkSize):
"""Generator yielding chunk from a list of a size chunkSize.
:param aList: list to be splitted
:type aList: python:list
:param int chunkSize: lenght of one chunk
:raise: StopIteration
Usage:
>>> for chunk in getChunk( aList, chunkSize=10):
process( chunk )
"""
for i in range(0, len(aList), chunkSize):
yield aList[i:i + chunkSize]
def breakListIntoChunks(aList, chunkSize):
"""This function takes a list as input and breaks it into list of size 'chunkSize'.
It returns a list of lists.
:param aList: list of elements
:type aList: python:list
:param int chunkSize: len of a single chunk
:return: list of lists of length of chunkSize
:raise: RuntimeError if numberOfFilesInChunk is less than 1
"""
if chunkSize < 1:
raise RuntimeError("chunkSize cannot be less than 1")
if isinstance(aList, (set, dict, tuple, {}.keys().__class__,
{}.items().__class__, {}.values().__class__)):
aList = list(aList)
return [chunk for chunk in getChunk(aList, chunkSize)]
def getIndexInList(anItem, aList):
""" Return the index of the element x in the list l
or sys.maxint if it does not exist
:param anItem: element to look for
:param list aList: list to look into
:return: the index or sys.maxint
"""
# try:
if anItem in aList:
return aList.index(anItem)
else:
return sys.maxsize
# except ValueError:
# return sys.maxsize
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/List.py
|
Python
|
gpl-3.0
| 4,015
|
[
"DIRAC"
] |
26ee456b02878fa6e5e5c24177a3f3c416e712a2b68471ec587c4280c9789845
|
# pykarta/maps/layers/vector.py
# An editable vector layer
# Copyright 2013--2017, Trinity College
# Last modified: 2 February 2017
import gtk
import cairo
import math
import weakref
from pykarta.maps.layers import MapLayer
from pykarta.geometry import Point, BoundingBox, LineString, Polygon
import pykarta.draw
#============================================================================
# A container layer which can hold vector objects
#============================================================================
class MapLayerVector(MapLayer):
def __init__(self, tool_done_cb=None, obj_modified_cb=None):
MapLayer.__init__(self)
self.layer_objs = []
self.visible_objs = []
self.dragger = None
self.drawing_tool = None
self.tool_done_cb = tool_done_cb
self.obj_modified_cb = obj_modified_cb
# Add a vector object to the vector layer
def add_obj(self, obj):
self.layer_objs.append(obj)
self.set_stale()
# Remove a vector object from the vector layer
def remove_obj(self, obj):
self.layer_objs.remove(obj)
self.set_stale()
# Raise a vector object to the top of the Z order
def raise_obj(self, obj):
self.layer_objs.remove(obj)
self.layer_objs.append(obj)
self.set_stale()
# Remove all of the vector objects from the vector layer
def clear(self):
self.layer_objs = []
self.set_stale()
# Set the current drawing tool. Use None to deactivate.
def set_tool(self, tool):
#print "set_tool(%s)" % str(tool)
self.drawing_tool = tool
if tool is not None:
tool.activate(weakref.proxy(self))
self.containing_map.set_cursor(tool.cursor)
else:
self.containing_map.set_cursor(None)
self.editing_off() # also triggers redraw
# Turn on editing for the indicated object and move it to the top.
def edit_obj(self, obj):
obj.set_editable(True)
self.raise_obj(obj)
# Disabling editing of all of the objects.
def editing_off(self):
for obj in self.layer_objs:
obj.set_editable(False)
self.redraw()
# The drawing tools call this function when they complete an operation.
# tool--the tool which performed the operation
# obj--the object which it selected, deleted, or created
# If the user supplied a callback function, it is called.
# Default actions are also provided.
def drawing_tool_done(self, tool, obj):
# If there is no callback function or it returns False, do default action.
if self.tool_done_cb is None or not self.tool_done_cb(tool, obj):
if type(tool) is MapToolSelect:
self.editing_off()
obj.set_editable(True)
self.redraw()
elif type(tool) is MapToolDelete:
self.remove_obj(obj)
else: # new object created
self.add_obj(obj)
# Viewport has changed
# Figure out which objects are now visible.
def do_viewport(self):
map_bbox = self.containing_map.get_bbox()
self.visible_objs = []
for obj in self.layer_objs:
if obj.geometry.get_bbox().overlaps(map_bbox):
obj.project(self.containing_map)
self.visible_objs.append(obj)
if self.drawing_tool is not None:
self.drawing_tool.project(self.containing_map)
# Draw the objects selected by do_viewport().
def do_draw(self, ctx):
for obj in self.visible_objs:
obj.draw(ctx)
if self.drawing_tool is not None:
self.drawing_tool.draw(ctx)
# Mouse button pressed down while pointer is over map.
# If this function takes any action in response, it returns True
# so that the button press will not be interpreted as the start
# of an attempt to drag the map.
def on_button_press(self, gdkevent):
if gdkevent.button == 1: # left-hand button
# If hit is near a vertex of an object with editing enabled, start dragging it.
for obj in reversed(self.visible_objs):
if obj._editable:
i = obj.get_draggable_point(gdkevent)
if i is not None:
self.dragger = MapDragger(obj, i)
self.containing_map.set_cursor(gtk.gdk.FLEUR)
#break
# We bail out here so that if the select tool is active, we will not
# accidently select a nearby object when we move or delete a point.
return True
# If a drawing tool is active, send it the new point (after snapping).
if self.drawing_tool is not None:
x, y, pt = self.snap_search(gdkevent, None, self.drawing_tool.use_snapping, True)
return self.drawing_tool.on_button_press(gdkevent, x, y, pt)
return False
# Mouse pointer moving over map
def on_motion(self, gdkevent):
stop_propagation = False
if self.drawing_tool:
x, y, pt = self.snap_search(gdkevent, None, self.drawing_tool.use_snapping, False)
stop_propagation = self.drawing_tool.on_motion(gdkevent, x, y)
if self.dragger:
x, y, pt = self.snap_search(gdkevent, self.dragger.obj, self.dragger.obj.snap, False)
self.dragger.obj.move(self.dragger.i, x, y)
self.dragger.moved = True
self.redraw()
stop_propagation = True
return stop_propagation
# Mouse button released while pointer is over map
def on_button_release(self, gdkevent):
if gdkevent.button == 1:
if self.drawing_tool:
self.drawing_tool.on_button_release(gdkevent)
if self.dragger: # if mouse was down on a point,
if self.dragger.moved:
x, y, pt = self.snap_search(gdkevent, self.dragger.obj, self.dragger.obj.snap, True)
self.dragger.obj.drop(self.dragger.i, pt, self.containing_map)
else: # clicked but not dragged
self.dragger.obj.delete(self.dragger.i, self.containing_map)
if self.obj_modified_cb:
self.obj_modified_cb(self.dragger.obj)
self.dragger = None
self.containing_map.set_cursor(None)
self.redraw()
return False
# Look for a point (belonging to a vector object) which is near the location
# of the gdkevent. Exclude points belonging to source_obj since we do not
# want to snap it to itself. If such a point is found, return its coordinates.
# Otherwise return the coordinates of the event.
#
# If enabled is False, this function does not snapping, it just
# returns the coordinates of the event.
def snap_search(self, gdkevent, source_obj, enable, need_pt):
if enable:
for obj in self.visible_objs:
if obj is not source_obj:
snap = obj.snap_search(gdkevent)
if snap is not None:
#print "Snap:", gdkevent.x, gdkevent.y, map(str,snap)
x, y, pt = snap
if need_pt:
return (x, y, Point(pt))
else:
return (x, y, None)
if need_pt:
return (gdkevent.x, gdkevent.y, self.containing_map.unproject_point(gdkevent.x, gdkevent.y))
else:
return (gdkevent.x, gdkevent.y, None)
# This class describes an object and its point on which the user has
# bought the left mouse button down. If he moves the mouse before
# letting it up, the action will be considered a drag. If not,
# we will delete the point.
class MapDragger(object):
def __init__(self, obj, i):
self.obj = obj # object to which point belongs
self.i = i # index of its point which is dragged
self.moved = False # mouse motion while left button down?
#============================================================================
# The Vector Objects
# This follow GeoJSON
#============================================================================
# Base class for vector objects
class MapVectorObj(object):
snap = True # snap this object's points to other objects
min_points = 0 # when to stop allowing point deletion
unclosed = 1 # 1 for open figures, 0 for closed figures
def __init__(self, properties):
self._editable = False
self.geometry = None
self.properties = {}
if properties is not None:
self.properties.update(properties)
self.projected_points = []
def set_editable(self, editable):
self._editable = editable
self.update_phantoms()
# Project this vector object's points to pixel space
def project(self, containing_map):
self.projected_points = containing_map.project_points(self.geometry.points)
self.update_phantoms()
# Override this to draw the object from self.projected_points.
def draw(self, ctx):
pass
# Override this if you want the object to be clickable.
# Return True if hit.
# Test lat_lon (a Point) or gdkevent's x and y members.
def obj_hit_detect(self, gdkevent, lat_lon):
return False
# Did this click hit one of the object's points? If so, return its index.
# If a phantom point was hit, make it a real point first.
def get_draggable_point(self, gdkevent):
evpoint = (gdkevent.x, gdkevent.y)
# Real points
i = 0
for point in self.projected_points:
if points_close(evpoint, point):
return i
i += 1
i = 0
# Phantom points
for point in self.phantom_points:
if points_close(evpoint, point):
self.projected_points.insert(i+1, self.phantom_points[i]) # make it a real point
self.geometry.points.insert(i+1, Point(0.0, 0.0)) # FIXME: should be able to use None here
return i+1
i += 1
return None
# Is this click close to one of the object's points? If so, return that point.
def snap_search(self, gdkevent):
i = 0
for point in self.projected_points:
if points_close((gdkevent.x, gdkevent.y), point):
return point + (self.geometry.points[i],)
i += 1
return None
# Temporarily move point i of the object to the location specified by x, y.
def move(self, i, x, y):
#print "move:", x, y
self.projected_points[i] = (x, y)
self.update_phantoms()
# Finalize the position of a moved point.
def drop(self, i, pt, containing_map):
#print "drop:", pt, containing_map.project_point(pt)
self.geometry.points[i] = pt
self.geometry.bbox = None
# Delete point i of this object.
def delete(self, i, containing_map):
if len(self.geometry.points) > self.min_points:
self.geometry.points.pop(i)
self.geometry.bbox = None
self.projected_points.pop(i)
self.update_phantoms()
# Update the locations of the intermediate points which can be dragged to add points.
def update_phantoms(self):
self.phantom_points = []
if self._editable:
i = 0
while i < (len(self.projected_points) - self.unclosed):
p1 = self.projected_points[i]
p2 = self.projected_points[(i+1)%len(self.projected_points)]
self.phantom_points.append(( (p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2))
i += 1
# Adapter to make a point look like a one-point line. This way we
# do not have to write a lot of exceptions for the MapVectorMarker object.
class PointWrapper(object):
def __init__(self, *args):
self.points = [Point(*args)]
self.bbox = None
def get_bbox(self):
if self.bbox is None:
self.bbox = BoundingBox()
self.bbox.add_points(self.points)
return self.bbox
def as_geojson(self):
return self.points[0].as_geojson()
# Draws a representation of a pykarta.geometry.Point with optional label.
# Can construct a Point from point.
class MapVectorMarker(MapVectorObj):
min_points = 1
def __init__(self, point, properties=None, style=None):
MapVectorObj.__init__(self, properties)
self.geometry = PointWrapper(point)
self.style = {}
if style is not None:
self.style.update(style)
self.symbol = None
self.label = self.style.get("label")
def project(self, containing_map):
MapVectorObj.project(self, containing_map)
if self.symbol is None:
symbol_name = self.style.get("symbol", "Dot")
self.symbol = containing_map.symbols.get_symbol(symbol_name, "Dot")
self.symbol_renderer = self.symbol.get_renderer(containing_map)
def obj_hit_detect(self, gdkevent, lat_lon):
return points_close((gdkevent.x, gdkevent.y), self.projected_points[0])
def draw(self, ctx):
x, y = self.projected_points[0]
self.symbol_renderer.blit(ctx, x, y)
if self.label:
pykarta.draw.poi_label(ctx, x+self.symbol_renderer.label_offset, y, self.label)
# Draws a representation of a pykarta.geometry.LineString.
# Can construct a LineString from line_string.
class MapVectorLineString(MapVectorObj):
min_points = 2
def __init__(self, line_string, properties=None, style=None):
MapVectorObj.__init__(self, properties)
if isinstance(line_string, LineString):
self.geometry = line_string
else:
self.geometry = LineString(line_string)
self.style = {"line-width":1}
if style is not None:
self.style.update(style)
def obj_hit_detect(self, gdkevent, lat_lon):
testpt = (gdkevent.x, gdkevent.y)
points = self.projected_points
i = 0
limit = len(points) - 1
while i < limit:
if pykarta.geometry.plane_lineseg_distance(testpt, points[i], points[i+1]) < 10:
return True
i += 1
return False
def draw(self, ctx):
pykarta.draw.line_string(ctx, self.projected_points)
if self.style.get("arrows", False):
pykarta.draw.line_string_arrows(ctx, self.projected_points)
pykarta.draw.stroke_with_style(ctx, self.style)
if self._editable:
pykarta.draw.node_pluses(ctx, self.phantom_points, style={})
pykarta.draw.node_dots(ctx, self.projected_points, style={})
elif False: # FIXME: slow, not disablable
pykarta.draw.node_dots(ctx, self.projected_points, style={"diameter":2.0,"fill-color":(0.0,0.0,0.0,1.0)})
# Draws a representation of a pykarta.geometry.Polygon.
# Can construct a Polygon from polygon.
# Holes are drawn, but editing of holes is not yet supported.
class MapVectorPolygon(MapVectorObj):
min_points = 3
unclosed = 0
def __init__(self, polygon, properties=None, style=None):
MapVectorObj.__init__(self, properties)
if isinstance(polygon, Polygon):
self.geometry = polygon
else:
self.geometry = Polygon(polygon)
self.style = { "line-width":1 }
if style is not None:
self.style.update(style)
self.label = self.style.get("label",None)
self.label_center = None
self.projected_label_center = None
self.label_fontsize = None
def set_label(self, label):
self.label = label
def get_label_center(self):
if self.label_center is None:
self.label_center = self.geometry.choose_label_center()
return self.label_center
def project_label_center(self, containing_map):
if self.label:
zoom = containing_map.get_zoom()
if zoom > self.style.get('label-min-zoom', 8):
self.projected_label_center = containing_map.project_point(self.get_label_center())
self.label_fontsize = self.style.get('label-font-size', 1.0) * zoom
else:
self.projected_label_center = None
def project(self, containing_map):
MapVectorObj.project(self, containing_map)
self.holes_projected_points = []
for hole in self.geometry.holes:
self.holes_projected_points.append( containing_map.project_points(hole) )
self.project_label_center(containing_map)
def obj_hit_detect(self, gdkevent, lat_lon):
return self.geometry.contains_point(lat_lon)
def draw(self, ctx):
pykarta.draw.polygon(ctx, self.projected_points)
for hole_points in self.holes_projected_points:
pykarta.draw.polygon(ctx, hole_points)
ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
pykarta.draw.fill_with_style(ctx, self.style, preserve=True)
pykarta.draw.stroke_with_style(ctx, self.style)
if self._editable:
pykarta.draw.node_pluses(ctx, self.phantom_points, style={})
pykarta.draw.node_dots(ctx, self.projected_points, style={})
else:
node_dots_style = self.style.get('node-dots-style', None)
if node_dots_style is not None:
pykarta.draw.node_dots(ctx, self.projected_points, style=node_dots_style)
if self.projected_label_center:
x, y = self.projected_label_center
pykarta.draw.centered_label(ctx, x, y, self.label, style={'font-size':self.label_fontsize})
def drop(self, i, pt, containing_map):
MapVectorObj.drop(self, i, pt, containing_map)
self.label_center = None
self.project_label_center(containing_map)
def delete(self, i, containing_map):
MapVectorObj.delete(self, i, containing_map)
self.label_center = None
self.project_label_center(containing_map)
# Draws a representation of a pykarta.geometry.BoundingBox.
class MapVectorBoundingBox(MapVectorObj):
snap = False
min_points = 4
x_map = (3, 2, 1, 0)
y_map = (1, 0, 3, 2)
def __init__(self, bbox, properties=None, style=None):
MapVectorObj.__init__(self, properties)
self.orig_bbox = bbox
self.geometry = Polygon((
Point(bbox.max_lat, bbox.min_lon), # NW
Point(bbox.max_lat, bbox.max_lon), # NE
Point(bbox.min_lat, bbox.max_lon), # SE
Point(bbox.min_lat, bbox.min_lon), # SW
))
self.style = {
"line-width":1,
"line-dasharray":(3,2)
}
if style is not None:
self.style.update(style)
def obj_hit_detect(self, gdkevent, lat_lon):
return self.geometry.get_bbox().contains_point(lat_lon)
def snap_search(self, gdkevent):
# Snapping to bounding boxes does not make sense.
return None
def draw(self, ctx):
pykarta.draw.polygon(ctx, self.projected_points)
pykarta.draw.stroke_with_style(ctx, self.style)
if self._editable:
pykarta.draw.node_dots(ctx, self.projected_points)
def update_phantoms(self):
self.phantom_points = []
def move(self, i, x, y):
# Figure out by how much the dragged point will move.
start_x, start_y = self.projected_points[i]
x_dist = x - start_x
y_dist = y - start_y
# Move the dragged point.
self.projected_points[i] = (x, y)
# Move the points at the nearest corners by the same amount, each along only one axis.
x_i = self.x_map[i]
self.projected_points[x_i] = (self.projected_points[x_i][0] + x_dist, self.projected_points[x_i][1])
y_i = self.y_map[i]
self.projected_points[y_i] = (self.projected_points[y_i][0], self.projected_points[y_i][1] + y_dist)
def drop(self, i, pt, containing_map):
self.geometry.points[1] = pt
self.orig_bbox.reset()
self.orig_bbox.add_points(self.geometry.points)
self.geometry.bbox = None # recompute
#============================================================================
# The drawing tools
#============================================================================
# All drawing and selection tools are derived from this.
class MapToolBase(object):
use_snapping = False
cursor = None
def __init__(self, style=None):
self.style = style
self.layer = None
def activate(self, layer):
self.layer = layer
self.reset()
def reset(self):
pass
def on_button_press(self, gdkevent, x, y, pt):
return False
def on_motion(self, gdkevent, x, y):
return False
def on_button_release(self, gdkevent):
pass
def project(self, containing_map):
pass
def draw(self, ctx):
pass
def fire_done(self, obj):
self.layer.drawing_tool_done(self, obj)
self.reset()
class MapToolSelect(MapToolBase):
def __init__(self):
MapToolBase.__init__(self)
self.down = False
def on_button_press(self, gdkevent, x, y, pt):
self.down = True
return False
def on_motion(self, gdkevent, x, y):
self.down = False # this is a drag action
return False
def on_button_release(self, gdkevent):
if self.down:
lat_lon = self.layer.containing_map.unproject_point(gdkevent.x, gdkevent.y)
if gdkevent.state & gtk.gdk.CONTROL_MASK:
objs = self.layer.visible_objs # bottom to top layer
else:
objs = reversed(self.layer.visible_objs) # top to bottom layer
for obj in objs:
if obj.obj_hit_detect(gdkevent, lat_lon):
self.fire_done(obj)
break
self.down = False
# The delete tool is just the select tool with a scary cursor. It derives its
# destructive power from the fact that the default tool done handler treats
# it differently.
class MapToolDelete(MapToolSelect):
cursor = gtk.gdk.X_CURSOR
# All drawing tools are derived from this.
class MapDrawBase(MapToolBase):
def reset(self):
self.points = []
self.projected_points = []
self.hover_point = None
def project(self, containing_map):
self.projected_points = containing_map.project_points(self.points)
def on_motion(self, gdkevent, x, y):
if len(self.projected_points):
self.hover_point = (x, y)
self.layer.redraw()
return False
# Place a new map marker.
class MapDrawMarker(MapDrawBase):
use_snapping = True
cursor = gtk.gdk.PENCIL
def on_button_press(self, gdkevent, x, y, pt):
self.fire_done(MapVectorMarker(pt, self.style))
return True
class MapDrawLineString(MapDrawBase):
use_snapping = True
cursor = gtk.gdk.PENCIL
def on_button_press(self, gdkevent, x, y, pt):
self.projected_points.append((x,y))
self.points.append(pt)
self.hover_point = None
if gdkevent.state & gtk.gdk.SHIFT_MASK: # shift-click for last point
self.fire_done(MapVectorLineString(self.points, self.style))
self.layer.redraw()
return True
def draw(self, ctx):
if len(self.projected_points) > 1:
pykarta.draw.line_string(ctx, self.projected_points)
pykarta.draw.stroke_with_style(ctx, {"line-width":1,"line-color":(0.0,0.0,1.0)})
if len(self.projected_points) > 0 and self.hover_point is not None:
pykarta.draw.node_dots(ctx, self.projected_points)
ctx.move_to(*(self.projected_points[-1]))
ctx.line_to(*(self.hover_point))
pykarta.draw.stroke_with_style(ctx, {"line-width":1,"line-dasharray":(3,2)})
class MapDrawPolygon(MapDrawBase):
use_snapping = True
cursor = gtk.gdk.PENCIL
def on_button_press(self, gdkevent, x, y, pt):
done = False
if len(self.projected_points) >= 3 and points_close((x, y), self.projected_points[0]):
done = True
else:
self.projected_points.append((x, y))
self.points.append(pt)
self.hover_point = None
if gdkevent.state & gtk.gdk.SHIFT_MASK:
done = True
if done:
self.fire_done(MapVectorPolygon(self.points, self.style))
self.layer.redraw()
return True
def draw(self, ctx):
if len(self.projected_points) > 1:
pykarta.draw.line_string(ctx, self.projected_points)
pykarta.draw.fill_with_style(ctx, {"fill-color":(1.0,1.0,1.0,0.5)}, preserve=True)
pykarta.draw.stroke_with_style(ctx, {"line-width":1})
if len(self.projected_points) > 0 and self.hover_point is not None:
pykarta.draw.node_dots(ctx, self.projected_points)
ctx.move_to(*(self.projected_points[-1]))
ctx.line_to(*(self.hover_point))
pykarta.draw.stroke_with_style(ctx, {"line-width":1,"line-dasharray":(3,2)})
class MapDrawBoundingBox(MapDrawBase):
cursor = gtk.gdk.SIZING
def on_button_press(self, gdkevent, x, y, pt):
self.projected_points = [(x,y)]
self.points = [pt]
self.hover_point = None
self.layer.redraw()
return True
def on_motion(self, gdkevent, x, y):
MapDrawBase.on_motion(self, gdkevent, x, y)
return True # <-- prevent map motion
def on_button_release(self, gdkevent):
if self.hover_point is not None:
self.projected_points.append(self.hover_point)
self.points.append(self.layer.containing_map.unproject_points(*(self.hover_point)))
bbox = BoundingBox()
bbox.add_points(self.points)
self.fire_done(MapVectorBoundingBox(bbox))
def draw(self, ctx):
if len(self.projected_points) > 0 and self.hover_point is not None:
start_x, start_y = self.projected_points[0]
hover_x, hover_y = self.hover_point
ctx.rectangle(start_x, start_y, hover_x - start_x, hover_y - start_y)
pykarta.draw.stroke_with_style(ctx, {"line-width":1,"line-dasharray":(3,2)})
def points_close(p1, p2, tolerance=10):
return abs(p1[0] - p2[0]) <= tolerance and abs(p1[1] - p2[1]) <= tolerance
|
david672orford/pykarta
|
pykarta/maps/layers/vector.py
|
Python
|
gpl-2.0
| 22,875
|
[
"FLEUR"
] |
0642665be98147b081dadeeb705ed748257d5ffd3629d0d66e70e723075185fa
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class bigtableCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ),
'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ),
'mutate_rows': ('table_name', 'entries', 'app_profile_id', ),
'ping_and_warm': ('name', 'app_profile_id', ),
'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ),
'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ),
'sample_row_keys': ('table_name', 'app_profile_id', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=bigtableCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the bigtable client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
googleapis/python-bigtable
|
scripts/fixup_bigtable_v2_keywords.py
|
Python
|
apache-2.0
| 6,488
|
[
"VisIt"
] |
9acee9b98667757be89213d6dfa159755838a254c3708780394af120e138c849
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
import MDAnalysis as mda
import MDAnalysis.analysis.psa as PSA
from numpy.testing import (assert_array_less,
assert_almost_equal, assert_equal)
import numpy as np
import scipy
import scipy.spatial
import matplotlib
from MDAnalysisTests.datafiles import PSF, DCD, DCD2
from MDAnalysis import NoDataError
class TestPSAnalysis(object):
iu1 = np.triu_indices(3, k=1)
@pytest.fixture()
def psa(self, tmpdir):
universe1 = mda.Universe(PSF, DCD)
universe2 = mda.Universe(PSF, DCD2)
universe_rev = mda.Universe(PSF, DCD)
psa = PSA.PSAnalysis([universe1, universe2, universe_rev],
path_select='name CA',
targetdir=str(tmpdir))
psa.generate_paths(align=True)
psa.paths[-1] = psa.paths[-1][::-1, :, :] # reverse third path
return psa
@pytest.fixture()
def hausd_matrix(self, psa):
psa.run(metric='hausdorff')
return psa.get_pairwise_distances()
@pytest.fixture()
def hausd_dists(self, hausd_matrix):
return hausd_matrix[self.iu1]
@pytest.fixture()
def frech_matrix(self, psa):
psa.run(metric='discrete_frechet')
return psa.get_pairwise_distances()
@pytest.fixture()
def frech_dists(self, frech_matrix):
return frech_matrix[self.iu1]
@pytest.fixture()
def plot_data(self, psa, tmpdir):
psa.run(metric='hausdorff')
psa.run(metric='discrete_frechet')
with tmpdir.as_cwd():
results = psa.plot(filename="distmat.png")
return results
@pytest.fixture()
def plot_annotated_heatmap(self, psa, tmpdir):
pytest.importorskip('seaborn')
psa.run(metric='hausdorff')
with tmpdir.as_cwd():
results = psa.plot_annotated_heatmap(filename="annotated.png")
return results
@pytest.fixture()
def plot_nearest_neighbors(self, psa, tmpdir):
pytest.importorskip('seaborn')
psa.run(metric='hausdorff')
psa.run_pairs_analysis(neighbors=True)
with tmpdir.as_cwd():
results = psa.plot_nearest_neighbors(filename="nn.png")
return results
@pytest.fixture()
def hausd_pairs_dists(self, psa):
psa.run_pairs_analysis(neighbors=True, hausdorff_pairs=True)
hausd_pairs_matrix = psa.get_pairwise_distances()
return hausd_pairs_matrix[self.iu1]
def test_hausdorff_bound(self, hausd_dists, frech_dists):
"""Test whether Frechet distances are smaller than corresponding
Hausdorff distances"""
err_msg = ("Some Frechet distances are smaller than corresponding "
"Hausdorff distances")
assert_array_less(hausd_dists, frech_dists, err_msg)
def test_explicit_metric(self, psa, hausd_dists):
"""Test whether explicitly specifying Hausdorff metric gives same result
as specifying Hausdorff metric with string name"""
psa.run(metric=PSA.hausdorff)
hausd_matrix_explicit = psa.get_pairwise_distances()
hausd_explicit_dists = hausd_matrix_explicit[self.iu1]
err_msg = ("Specifying Python function for Hausdoff gives different "
"distances than specifying Hausdorff with string name")
assert_equal(hausd_dists, hausd_explicit_dists, err_msg)
def test_reversal_hausdorff(self, hausd_matrix):
"""Test whether Hausdorff distances are invariant to path reversal"""
err_msg = "Hausdorff distances changed after path reversal"
assert_almost_equal(hausd_matrix[1,2],
hausd_matrix[0,1],
decimal=3, err_msg=err_msg)
def test_reversal_frechet(self, frech_matrix):
"""Test whether Frechet distances are same/larger after path reversal"""
err_msg = "Frechet distances did not increase after path reversal"
assert frech_matrix[1,2] >= frech_matrix[0,1], err_msg
def test_get_num_paths(self, psa):
assert psa.get_num_paths() == 3
def test_get_paths(self, psa):
paths = psa.get_paths()
assert len(paths) == 3
assert isinstance(paths, list)
def test_psa_pairs_ValueError(self, psa):
with pytest.raises(ValueError):
psa.psa_pairs
def test_psa_pairs(self, psa):
psa.run_pairs_analysis()
assert len(psa.psa_pairs) == 3
def test_hausdorff_pairs_ValueError(self, psa):
with pytest.raises(ValueError):
psa.hausdorff_pairs
def test_hausdorff_pairs(self, psa):
psa.run_pairs_analysis(hausdorff_pairs=True)
assert len(psa.hausdorff_pairs) == 3
def test_nearest_neighbors_ValueError(self, psa):
with pytest.raises(ValueError):
psa.nearest_neighbors
def test_nearest_neighbors(self, psa):
psa.run_pairs_analysis(neighbors=True)
assert len(psa.nearest_neighbors) == 3
@pytest.mark.parametrize('stored', [True, False])
def test_load(self, stored, tmpdir):
"""Test that the automatically saved files can be loaded"""
# To allow for testing the store keyword, ignore fixture
universe1 = mda.Universe(PSF, DCD)
universe2 = mda.Universe(PSF, DCD2)
universe_rev = mda.Universe(PSF, DCD)
psa = PSA.PSAnalysis([universe1, universe2, universe_rev],
path_select='name CA',
targetdir=str(tmpdir))
psa2 = PSA.PSAnalysis([universe1, universe2, universe_rev],
path_select='name CA',
targetdir=str(tmpdir))
psa.generate_paths(align=True, store=stored)
# Make copies to the existing data
# Note: path names are set after save_paths has been called
expected_paths = [p.copy() for p in psa.paths]
if not stored:
psa.save_paths()
expected_path_names = psa.path_names[:]
# Load data in the empty PSAnalysis object
psa2.load()
assert psa2.path_names == expected_path_names
assert len(psa2.paths) == len(expected_paths)
for ipath, (observed, expected) in enumerate(zip(psa2.paths,
expected_paths)):
assert_almost_equal(observed, expected, decimal=6,
err_msg=("loaded path {} does not agree with "
"input").format(ipath))
def test_load_nofile(self, psa):
"""Test case where save_paths hasn't been called before load"""
match_exp = "Fitted trajectories cannot be loaded"
with pytest.raises(NoDataError, match=match_exp):
psa.load()
def test_save_nopaths(self, tmpdir):
"""Test case were save_paths is called without calcualted paths"""
match_exp = "Paths have not been calculated yet"
with pytest.raises(NoDataError, match=match_exp):
universe1 = mda.Universe(PSF, DCD)
universe2 = mda.Universe(PSF, DCD2)
universe_rev = mda.Universe(PSF, DCD)
psa = PSA.PSAnalysis([universe1, universe2, universe_rev],
path_select='name CA',
targetdir=str(tmpdir))
psa.save_paths()
def test_dendrogram_produced(self, plot_data):
"""Test whether Dendrogram dictionary object was produced"""
err_msg = "Dendrogram dictionary object was not produced"
assert isinstance(plot_data[1], dict), err_msg
def test_dendrogram_produced_annotated(self, plot_annotated_heatmap):
"""Test whether Dendrogram dictionary object was produced"""
err_msg = "Dendrogram dictionary object was not produced"
assert isinstance(plot_annotated_heatmap[1], dict), err_msg
def test_plot_nearest_neighbors(self, plot_nearest_neighbors):
assert isinstance(plot_nearest_neighbors, matplotlib.axes.Axes)
def test_dist_mat_to_vec_i_less_j(self):
"""Test the index of corresponding distance vector is correct if i < j"""
err_msg = "dist_mat_to_vec function returning wrong values"
assert_equal(PSA.dist_mat_to_vec(5, 3, 4), 9, err_msg)
def test_dist_mat_to_vec_i_greater_j(self):
"""Test the index of corresponding distance vector is correct if i > j"""
err_msg = "dist_mat_to_vec function returning wrong values"
assert_equal(PSA.dist_mat_to_vec(5, 4, 3), 9, err_msg)
def test_dist_mat_to_vec_input_numpy_integer_32(self):
"""Test whether inputs are supported as numpy integers rather than normal Integers"""
err_msg = "dist_mat_to_vec function returning wrong values"
assert_equal(
PSA.dist_mat_to_vec(np.int32(5), np.int32(3), np.int32(4)), np.int32(9),
err_msg)
def test_dist_mat_to_vec_input_numpy_integer_16(self):
"""Test whether inputs are supported as numpy integers rather than normal Integers"""
err_msg = "dist_mat_to_vec function returning wrong values"
assert_equal(
PSA.dist_mat_to_vec(np.int16(5), np.int16(3), np.int16(4)), np.int16(9),
err_msg)
def test_hausdorff_pairs_distances(self, hausd_dists, hausd_pairs_dists):
"""Test whether Hausdorff pairs analysis distances are
identical to those from standard Hausdorff metric"""
err_msg = ("Some Hausdorff distances from pairs analysis vary "
"significantly from usual Hausdorff calculation")
assert_almost_equal(hausd_dists, hausd_pairs_dists,
decimal=6, err_msg=err_msg)
def test_distances_from_hausdorff_pairs_frames(self, psa):
"""Test whether actual distances between frames of Hausdorff
pairs of a path give the expected Hausdorff distance"""
psa.run_pairs_analysis(neighbors=True, hausdorff_pairs=True)
hausd_pairs = psa.hausdorff_pairs
npairs = int(psa.npaths * (psa.npaths - 1) / 2)
hausd_pairs_dists2 = np.array([hausd_pairs[i]['distance']
for i in range(npairs)])
err_msg = ("A Hausdorff pair analysis distance when accessed "
"by frame varies from expected Hausdorff distance")
dists = np.zeros((psa.npaths, psa.npaths))
for i in range(0, psa.npaths-1):
for j in range(i+1, psa.npaths):
pairidx = PSA.dist_mat_to_vec(psa.npaths, i, j)
p, q = hausd_pairs[pairidx]['frames']
dists[i,j] = (PSA.sqnorm(psa.paths[i][p,:,:] -
psa.paths[j][q,:,:]) /
psa.natoms)**0.5
assert_almost_equal(hausd_pairs_dists2,
dists[self.iu1],
decimal=6, err_msg=err_msg)
class TestPSAExceptions(object):
'''Tests for exceptions that should be raised
or caught by code in the psa module.'''
def test_get_path_metric_func_bad_key(self):
'''Test that KeyError is caught by
get_path_metric_func().'''
with pytest.raises(KeyError):
PSA.get_path_metric_func('123456')
def test_get_coord_axes_bad_dims(self):
"""Test that ValueError is raised when
numpy array with incorrect dimensions
is fed to get_coord_axes()."""
with pytest.raises(ValueError):
PSA.get_coord_axes(np.zeros((5,5,5,5)))
@pytest.mark.parametrize('N, i, j', (
(5, 6, 4),
(5, 4, 6),
(5, 6, 7),
(5, -1, 2),
(5, 1, -2),
(1, 0, 0)
))
def test_dist_mat_to_vec_func_out_of_bounds(self, N, i, j):
"""Test that ValueError is raised when i or j or both are
out of bounds of N"""
# Check if i is out of bounds of N
with pytest.raises(ValueError):
PSA.dist_mat_to_vec(N, i, j)
@pytest.mark.parametrize('N, i, j', (
(5, 4, 4),
(4, 6, 4)
))
def test_dist_mat_to_vec_func_i_equals_j(self, N, i, j):
"""Test that ValueError is raised when i == j or i,j == N"""
with pytest.raises(ValueError):
PSA.dist_mat_to_vec(N, i, j)
def test_dist_mat_to_vec_func_bad_integers(self):
"""Test that ValueError is raised when i or j are
not Integers"""
with pytest.raises(ValueError) as err:
PSA.dist_mat_to_vec(5, '6', '7')
assert 'all must be of type int' in str(err.value)
with pytest.raises(ValueError):
PSA.dist_mat_to_vec(5, float(6), 7)
class _BaseHausdorffDistance(object):
'''Base Class setup and unit tests
for various Hausdorff distance
calculation properties.'''
@pytest.fixture()
def random_angles(self):
return np.random.random((100,)) * np.pi * 2
@staticmethod
@pytest.fixture()
def path_1(random_angles):
random_columns = np.column_stack((random_angles,
random_angles,
np.zeros((100,))))
random_columns[..., 0] = np.cos(random_columns[..., 0])
random_columns[..., 1] = np.sin(random_columns[..., 1])
return random_columns
@staticmethod
@pytest.fixture()
def path_2(random_angles):
random_columns_2 = np.column_stack((random_angles,
random_angles,
np.zeros((100,))))
random_columns_2[1:, 0] = np.cos(random_columns_2[1:, 0]) * 2.0
random_columns_2[1:, 1] = np.sin(random_columns_2[1:, 1]) * 2.0
# move one point farther out so we don't have two perfect circles
random_columns_2[0, 0] = np.cos(random_columns_2[0, 0]) * 3.3
random_columns_2[0, 1] = np.sin(random_columns_2[0, 1]) * 3.3
return random_columns_2
def test_symmetry(self, path_1, path_2, h):
'''Ensure that the undirected (symmetric)
Hausdorff distance is actually symmetric
for a given Hausdorff metric, h.'''
forward = h(path_1, path_2)
reverse = h(path_2, path_1)
# lower precision on 32bit
assert_almost_equal(forward, reverse, decimal=15)
def test_hausdorff_value(self, path_1, path_2, h, expected):
'''Test that the undirected Hausdorff
distance matches expected value for
the simple case here.'''
actual = h(path_1, path_2)
# unless I pin down the random generator
# seems unstable to use decimal > 2
assert_almost_equal(actual, expected, decimal=2)
class TestHausdorffSymmetric(_BaseHausdorffDistance):
'''Tests for conventional and symmetric (undirected)
Hausdorff distance between point sets in 3D.'''
# expected = 2.3
@pytest.fixture()
def expected(self):
return 2.3
@pytest.fixture()
def h(self):
return PSA.hausdorff
class TestWeightedAvgHausdorffSymmetric(_BaseHausdorffDistance):
'''Tests for weighted average and symmetric (undirected)
Hausdorff distance between point sets in 3D.'''
@pytest.fixture()
def expected(self, path_1, path_2):
distance_matrix = scipy.spatial.distance.cdist(path_1, path_2)
return (np.mean(np.amin(distance_matrix, axis=0)) +
np.mean(np.amin(distance_matrix, axis=1))) / 2.0
# params instead of 2 (I think it sends self as a parameter too.)
@pytest.fixture()
def h(self):
return PSA.hausdorff_wavg
def test_asymmetric_weight(self, path_1, path_2, h):
'''Test for WAvg Hausdorff to ensure that increasing N points in one
of the paths does NOT increase the weight of its contributions.'''
inflated_path_1 = np.concatenate((path_1, path_1))
inflated_path_2 = np.concatenate((path_2, path_2))
d_inner_inflation = h(inflated_path_1, path_2)
d_outer_inflation = h(path_1, inflated_path_2)
assert_almost_equal(d_inner_inflation, d_outer_inflation)
class TestAvgHausdorffSymmetric(_BaseHausdorffDistance):
'''Tests for unweighted average and symmetric (undirected)
Hausdorff distance between point sets in 3D.'''
@pytest.fixture()
def expected(self, path_1, path_2):
distance_matrix = scipy.spatial.distance.cdist(path_1, path_2)
return np.mean(np.append(np.amin(distance_matrix, axis=0),
np.amin(distance_matrix, axis=1)))
# params instead of 2 (I think it sends self as a parameter too.)
@pytest.fixture()
def h(self):
return PSA.hausdorff_avg
def test_asymmetric_weight(self, path_1, path_2, h):
'''Test to ensure that increasing N points in one of the paths
increases the weight of its contributions.'''
inflated_path_1 = np.concatenate((path_1, path_1))
inflated_path_2 = np.concatenate((path_2, path_2))
d_inner_inflation = h(inflated_path_1, path_2)
d_outer_inflation = h(path_1, inflated_path_2)
assert_array_less(d_inner_inflation, d_outer_inflation)
class DiscreteFrechetDistance(object):
@staticmethod
@pytest.fixture()
def random_angles():
return np.random.random((100,)) * np.pi * 2
@staticmethod
@pytest.fixture()
def path_1(random_angles):
random_columns = np.column_stack((random_angles, random_angles,
np.zeros((100,))))
random_columns[..., 0] = np.cos(random_columns[..., 0])
random_columns[..., 1] = np.sin(random_columns[..., 1])
return random_columns
@staticmethod
@pytest.fixture()
def path_2(random_angles):
random_columns_2 = np.column_stack((random_angles, random_angles,
np.zeros((100,))))
random_columns_2[..., 0] = np.cos(random_columns_2[..., 0]) * 5.5
random_columns_2[..., 1] = np.sin(random_columns_2[..., 1]) * 5.5
return random_columns_2
def test_discrete_Frechet_concentric_circles(self, path_1, path_2):
# test for the simple case of the discrete Frechet distance
# between concentric circular paths, which for a sufficiently
# high random discrete point density around each circle
# should be the absolute difference between their respective
# radii
expected = 4.5
actual = PSA.discrete_frechet(path_1, path_2)
assert_almost_equal(actual, expected)
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_psa.py
|
Python
|
gpl-2.0
| 19,652
|
[
"MDAnalysis"
] |
5a40ea42ea574cf4983486ca487e6e94fe35df41a220a717963029c8a09d62c8
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import django.dispatch
daily_cleanup = django.dispatch.Signal()
|
brianmay/karaage
|
karaage/signals.py
|
Python
|
gpl-3.0
| 804
|
[
"Brian"
] |
e4b196ad0ab04c5e506c901bb7524de9750baaeaf11bad37401ae71b809d209b
|
#!/usr/bin/env python
# Media Dragon - the modular media manager
# Copyright (C) 2012 Brian Hrebec
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Qt Graphical interface
|
bhrebec/mediadragon
|
src/lib/ui/qt.py
|
Python
|
gpl-3.0
| 767
|
[
"Brian"
] |
e3b307a409569fd3fed541fa793ab995c89881b5740f4c539bda0215509e1a19
|
#! /usr/bin/python
#
# Copyrighted David Cournapeau
# Last Change: Sat Jun 09 10:00 PM 2007 J
"""This module implements some function of densities module in C for efficiency
reasons. gaussian, such as pdf estimation, confidence interval/ellipsoids,
etc..."""
__docformat__ = 'restructuredtext'
# This module uses a C implementation through ctypes, for diagonal cases
# TODO:
# - portable way to find/open the shared library
# - full cov matrice
# - test before inclusion
import numpy as N
import numpy.linalg as lin
#from numpy.random import randn
#from scipy.stats import chi2
#import densities as D
import ctypes
from ctypes import c_uint, c_int
from numpy.ctypeslib import ndpointer, load_library
ctypes_major = int(ctypes.__version__.split('.')[0])
if ctypes_major < 1:
raise ImportError(msg = "version of ctypes is %s, expected at least %s"\
% (ctypes.__version__, '1.0.1'))
# Requirements for diag gden
_gden = load_library('c_gden.so', __file__)
arg1 = ndpointer(dtype=N.float64)
arg2 = c_uint
arg3 = c_uint
arg4 = ndpointer(dtype=N.float64)
arg5 = ndpointer(dtype=N.float64)
arg6 = ndpointer(dtype=N.float64)
_gden.gden_diag.argtypes = [arg1, arg2, arg3, arg4, arg5, arg6]
_gden.gden_diag.restype = c_int
# Error classes
class DenError(Exception):
"""Base class for exceptions in this module.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
# The following function do all the fancy stuff to check that parameters
# are Ok, and call the right implementation if args are OK.
def gauss_den(x, mu, va, log = False):
""" Compute multivariate Gaussian density at points x for
mean mu and variance va.
Vector are row vectors, except va which can be a matrix
(row vector variance for diagonal variance)
If log is True, than the log density is returned
(useful for underflow ?)"""
mu = N.atleast_2d(mu)
va = N.atleast_2d(va)
x = N.atleast_2d(x)
#=======================#
# Checking parameters #
#=======================#
if len(N.shape(mu)) != 2:
raise DenError("mu is not rank 2")
if len(N.shape(va)) != 2:
raise DenError("va is not rank 2")
if len(N.shape(x)) != 2:
raise DenError("x is not rank 2")
(n, d) = N.shape(x)
(dm0, dm1) = N.shape(mu)
(dv0, dv1) = N.shape(va)
# Check x and mu same dimension
if dm0 != 1:
msg = "mean must be a row vector!"
raise DenError(msg)
if dm1 != d:
msg = "x and mu not same dim"
raise DenError(msg)
# Check va and mu same size
if dv1 != d:
msg = "mu and va not same dim"
raise DenError(msg)
if dv0 != 1 and dv0 != d:
msg = "va not square"
raise DenError(msg)
#===============#
# Computation #
#===============#
if d == 1:
# scalar case
return _scalar_gauss_den(x[:, 0], mu[0, 0], va[0, 0], log)
elif dv0 == 1:
# Diagonal matrix case
return _diag_gauss_den(x, mu, va, log)
elif dv1 == dv0:
# full case
return _full_gauss_den(x, mu, va, log)
else:
raise DenError("variance mode not recognized, this is a bug")
# Those 3 functions do almost all the actual computation
def _scalar_gauss_den(x, mu, va, log):
""" This function is the actual implementation
of gaussian pdf in scalar case. It assumes all args
are conformant, so it should not be used directly
** Expect centered data (ie with mean removed) **
Call gauss_den instead"""
d = mu.size
inva = 1/va
fac = (2*N.pi) ** (-d/2.0) * N.sqrt(inva)
y = ((x-mu) ** 2) * -0.5 * inva
if not log:
y = fac * N.exp(y)
else:
y = y + log(fac)
return y
def _diag_gauss_den(x, mu, va, log):
""" This function is the actual implementation
of gaussian pdf in scalar case. It assumes all args
are conformant, so it should not be used directly
** Expect centered data (ie with mean removed) **
Call gauss_den instead"""
# Diagonal matrix case
d = mu.size
n = x.shape[0]
if not log:
y = N.zeros(n)
vat = va.copy()
# _gden.gden_diag(N.require(x, requirements = 'C'), n, d,
# N.require(mu, requirements = 'C'),
# N.require(inva, requirements = 'C'),
# N.require(y, requirements = 'C'))
x = N.require(x, requirements = 'C')
mu = N.require(mu, requirements = 'C')
vat = N.require(vat, requirements = 'C')
y = N.require(y, requirements = 'C')
_gden.gden_diag(x, n, d, mu, vat, y)
return y
# _gden.gden_diag.restype = c_int
# _gden.gden_diag.argtypes = [POINTER(c_double), c_uint, c_uint,
# POINTER(c_double), POINTER(c_double), POINTER(c_double)]
# y = N.zeros(n)
# inva= 1/va
# _gden.gden_diag(x.ctypes.data_as(POINTER(c_double)),
# n, d,
# mu.ctypes.data_as(POINTER(c_double)),
# inva.ctypes.data_as(POINTER(c_double)),
# y.ctypes.data_as(POINTER(c_double)))
else:
y = _scalar_gauss_den(x[:, 0], mu[0, 0], va[0, 0], log)
for i in range(1, d):
y += _scalar_gauss_den(x[:, i], mu[0, i], va[0, i], log)
return y
def _full_gauss_den(x, mu, va, log):
""" This function is the actual implementation
of gaussian pdf in full matrix case.
It assumes all args are conformant, so it should
not be used directly Call gauss_den instead
** Expect centered data (ie with mean removed) **
Does not check if va is definite positive (on inversible
for that matter), so the inverse computation and/or determinant
would throw an exception."""
d = mu.size
inva = lin.inv(va)
fac = 1 / N.sqrt( (2*N.pi) ** d * N.fabs(lin.det(va)))
# we are using a trick with sum to "emulate"
# the matrix multiplication inva * x without any explicit loop
y = N.dot((x-mu), inva)
y = -0.5 * N.sum(y * (x-mu), 1)
if not log:
y = fac * N.exp(y)
else:
y = y + N.log(fac)
return y
if __name__ == "__main__":
pass
##=========================================
## Test accuracy between pure and C python
##=========================================
#mu = N.array([2.0, 3])
#va = N.array([5.0, 3])
## Generate a multivariate gaussian of mean mu and covariance va
#nframes = 1e4
#X = randn(nframes, 2)
#Yc = N.dot(N.diag(N.sqrt(va)), X.transpose())
#Yc = Yc.transpose() + mu
#Y = D.gauss_den(Yc, mu, va)
#Yt = gauss_den(Yc, mu, va)
#print "Diff is " + str(N.sqrt(N.sum((Y-Yt) ** 2))/nframes/2)
|
jhmadhav/pynopticon
|
src/em/_c_densities.py
|
Python
|
gpl-3.0
| 7,087
|
[
"Gaussian"
] |
67c06253e7ddd9fb961219dc54d245f53206add26fff26dbfa9ef347303c9376
|
#! /usr/bin/env python
"""
Module with fake companion injection functions.
"""
__author__ = 'Carlos Alberto Gomez Gonzalez, Valentin Christiaens'
__all__ = ['collapse_psf_cube',
'normalize_psf',
'cube_inject_companions',
'generate_cube_copies_with_injections',
'frame_inject_companion']
import numpy as np
from scipy import stats
from scipy.interpolate import interp1d
from packaging import version
import photutils
if version.parse(photutils.__version__) >= version.parse('0.3'):
# for photutils version >= '0.3' use photutils.centroids.centroid_com
from photutils.centroids import centroid_com as cen_com
else:
# for photutils version < '0.3' use photutils.centroid_com
import photutils.centroid_com as cen_com
from ..preproc import (cube_crop_frames, frame_shift, frame_crop, cube_shift,
frame_rotate)
from ..var import (frame_center, fit_2dgaussian, fit_2dairydisk, fit_2dmoffat,
get_circle, get_annulus_segments, dist_matrix)
from ..config.utils_conf import print_precision, check_array
def cube_inject_companions(array, psf_template, angle_list, flevel, plsc,
rad_dists, n_branches=1, theta=0, imlib='vip-fft',
interpolation='lanczos4', transmission=None,
full_output=False, verbose=True):
""" Injects fake companions in branches, at given radial distances.
Parameters
----------
array : 3d/4d numpy ndarray
Input cube. This is copied before the injections take place, so
``array`` is never modified.
psf_template : numpy ndarray
2d array with the normalized PSF template, with an odd or even shape.
The PSF image must be centered wrt to the array! Therefore, it is
recommended to run the function ``normalize_psf`` to generate a centered
and flux-normalized PSF template.
It can also be a 3D array, but length should match ADI cube.
In the ADI+mSDI (4D input cube) case it must be a 3d array.
angle_list : 1d numpy ndarray
List of parallactic angles, in degrees.
flevel : float or list/1d array
Factor for controlling the brightness of the fake companions. If a float,
the same flux is used for all frames. For a 3D input cube: if a
list/1d array is provided, it should have same length as number of
frames in the 3D cube. For a 4D (ADI+mSDI) input cube, a list/1d array
should have the same length as the number of spectral channels
(i.e. provide a spectrum).
plsc : float
Value of the plsc in arcsec/px. Only used for printing debug output when
``verbose=True``.
rad_dists : float, list or array 1d
Vector of radial distances of fake companions in pixels.
n_branches : int, optional
Number of azimutal branches.
theta : float, optional
Angle in degrees for rotating the position of the first branch that by
default is located at zero degrees. Theta counts counterclockwise from
the positive x axis.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
transmission: numpy array, optional
Radial transmission of the coronagraph, if any. Array with 2 columns.
First column is the radial separation in pixels. Second column is the
corresponding off-axis transmission (between 0 and 1).
full_output : bool, optional
Returns the ``x`` and ``y`` coordinates of the injections, additionally
to the new array.
verbose : bool, optional
If True prints out additional information.
Returns
-------
array_out : numpy ndarray
Output array with the injected fake companions.
positions : list of tuple(y, x)
[full_output] Coordinates of the injections in the first frame (and
first wavelength for 4D cubes).
psf_trans: array with injected psf affected by transmission (only returned
if transmission is not None)
"""
check_array(array, dim=(3, 4), msg="array")
check_array(psf_template, dim=(2, 3), msg="psf_template")
if array.ndim == 4 and psf_template.ndim != 3:
raise ValueError('`psf_template` must be a 3d array')
if not isinstance(plsc, float):
raise TypeError("`plsc` must be a float")
if not np.isscalar(flevel):
if len(flevel) != array.shape[0]:
msg = "if not scalar `flevel` must have same length as array"
raise TypeError(msg)
## set imlib for rotation & shift (rotation used if transmission!=None)
if imlib == 'opencv':
imlib_sh = imlib
imlib_rot = imlib
elif imlib == 'skimage' or imlib == 'ndimage-interp':
imlib_sh = 'ndimage-interp'
imlib_rot = 'skimage'
elif imlib == 'vip-fft' or imlib == 'ndimage-fourier':
imlib_sh = 'ndimage-fourier'
imlib_rot = 'vip-fft'
else:
raise TypeError("Interpolation not recognized.")
rad_dists = np.asarray(rad_dists).reshape(-1) # forces ndim=1
positions = []
if transmission is not None:
if transmission.shape[0] != 2:
raise ValueError("transmission should be a (2,N) ndarray")
# if transmission doesn't have right format for interpolation, adapt it
if transmission[0,0] != 0 or transmission[0,-1] < np.sqrt(2)*array.shape[-1]:
trans_rad_list = transmission[0].tolist()
trans_list = transmission[1].tolist()
## should have a zero point
if transmission[0,0] != 0:
trans_rad_list = [0]+trans_rad_list
trans_list = [0]+trans_list
## last point should be max possible distance between fc and star
if transmission[0,-1] < np.sqrt(2)*array.shape[-1]:
trans_rad_list = trans_rad_list+[np.sqrt(2)*array.shape[-1]]
trans_list = trans_list+[1]
transmission = np.array([trans_rad_list,trans_list])
## last radial separation should be beyond the edge of frame
interp_trans = interp1d(transmission[0],transmission[1])
# ADI case
if array.ndim == 3:
ceny, cenx = frame_center(array[0])
if not rad_dists[-1] < array[0].shape[0] / 2:
raise ValueError('rad_dists last location is at the border (or '
'outside) of the field')
size_fc = psf_template.shape[1]
nframes = array.shape[0]
w = int(np.ceil(size_fc/2))
if size_fc%2: # new convention
w -= 1
sty = int(ceny) - w
stx = int(cenx) - w
# fake companion in the center of a zeros frame
fc_fr = np.zeros_like(array)
if psf_template.ndim == 2:
try:
for fr in range(nframes):
fc_fr[fr, sty:sty+size_fc, sty:sty+size_fc] = psf_template
except ValueError as e:
print("cannot place PSF on frame. Please verify the shapes! "
"psf shape: {}, array shape: {}".format(psf_template.shape,
array.shape))
raise e
else:
try:
for fr in range(nframes):
fc_fr[fr,sty:sty+size_fc, stx:stx+size_fc] = psf_template[fr]
except ValueError as e:
print("cannot place PSF on frames. Please verify the shapes!"
"psf shape: {}, array shape: {}".format(psf_template.shape,
array.shape))
raise e
array_out = array.copy()
for branch in range(n_branches):
ang = (branch * 2 * np.pi / n_branches) + np.deg2rad(theta)
if verbose:
print('Branch {}:'.format(branch+1))
for rad in rad_dists:
if transmission is not None:
fc_fr_rad = fc_fr.copy()
y_star = ceny
x_star = cenx - rad
d = dist_matrix(fc_fr.shape[1],x_star,y_star)
for i in range(d.shape[0]):
fc_fr_rad[:,i] = interp_trans(d[i])*fc_fr[:,i]
if full_output:
# check the effect of transmission on a single PSF tmp
psf_trans = frame_rotate(fc_fr_rad[0],
-(np.rad2deg(ang)),
imlib=imlib_rot,
interpolation=interpolation)
shift_y = rad * np.sin(ang)
shift_x = rad * np.cos(ang)
psf_trans = frame_shift(psf_trans, shift_y, shift_x,
imlib_sh, interpolation)
for fr in range(nframes):
shift_y = rad * np.sin(ang - np.deg2rad(angle_list[fr]))
shift_x = rad * np.cos(ang - np.deg2rad(angle_list[fr]))
if transmission is not None:
fc_fr_ang = frame_rotate(fc_fr_rad[fr],
-(ang*180/np.pi-angle_list[fr]),
imlib=imlib_rot,
interpolation=interpolation)
else:
fc_fr_ang = fc_fr[fr]
if np.isscalar(flevel):
array_out[fr] += (frame_shift(fc_fr_ang, shift_y,
shift_x, imlib_sh,
interpolation)
* flevel)
else:
array_out[fr] += (frame_shift(fc_fr_ang, shift_y,
shift_x, imlib_sh,
interpolation)
* flevel[fr])
pos_y = rad * np.sin(ang) + ceny
pos_x = rad * np.cos(ang) + cenx
rad_arcs = rad * plsc
positions.append((pos_y, pos_x))
if verbose:
print('\t(X,Y)=({:.2f}, {:.2f}) at {:.2f} arcsec '
'({:.2f} pxs from center)'.format(pos_x, pos_y,
rad_arcs, rad))
# ADI+mSDI (IFS) case
if array.ndim == 4 and psf_template.ndim == 3:
ceny, cenx = frame_center(array[0, 0])
if not rad_dists[-1] < array[0,0].shape[0] / 2:
raise ValueError('rad_dists last location is at the border (or '
'outside) of the field')
sizey = array.shape[2]
sizex = array.shape[3]
size_fc = psf_template.shape[2] # considering square frames
nframes_wav = array.shape[0]
nframes_adi = array.shape[1]
fc_fr = np.zeros((nframes_wav, sizey, sizex), dtype=np.float64) # -> 3d
for i in range(nframes_wav):
w = int(np.floor(size_fc/2.))
# fake companion in the center of a zeros frame
if (psf_template[0].shape[1] % 2) == 0:
fc_fr[i, int(ceny)-w:int(ceny)+w,
int(cenx)-w:int(cenx)+w] = psf_template[i]
else:
fc_fr[i, int(ceny)-w:int(ceny)+w+1,
int(cenx)-w:int(cenx)+w+1] = psf_template[i]
array_out = array.copy()
for branch in range(n_branches):
ang = (branch * 2 * np.pi / n_branches) + np.deg2rad(theta)
if verbose:
print('Branch {}:'.format(branch+1))
for rad in rad_dists:
for fr in range(nframes_adi):
shift_y = rad * np.sin(ang - np.deg2rad(angle_list[fr]))
shift_x = rad * np.cos(ang - np.deg2rad(angle_list[fr]))
shift = cube_shift(fc_fr, shift_y, shift_x, imlib_sh,
interpolation)
if np.isscalar(flevel):
array_out[:, fr] += shift * flevel
else:
for i in range(len(flevel)):
array_out[i, fr] += shift[i] * flevel[i]
pos_y = rad * np.sin(ang) + ceny
pos_x = rad * np.cos(ang) + cenx
rad_arcs = rad * plsc
positions.append((pos_y, pos_x))
if verbose:
print('\t(X,Y)=({:.2f}, {:.2f}) at {:.2f} arcsec '
'({:.2f} pxs from center)'.format(pos_x, pos_y,
rad_arcs, rad))
if full_output:
if transmission is not None:
return array_out, positions, psf_trans
else:
return array_out, positions
else:
return array_out
def generate_cube_copies_with_injections(array, psf_template, angle_list, plsc,
n_copies=100, inrad=8, outrad=12,
dist_flux=("uniform", 2, 500)):
"""
Create multiple copies of ``array`` with different random injections.
This is a wrapper around ``metrics.cube_inject_companions``, which deals
with multiple copies of the original data cube and generates random
parameters.
Parameters
----------
array : 3d/4d numpy ndarray
Original input cube.
psf_template : 2d/3d numpy ndarray
Array with the normalized psf template. It should have an odd shape.
It's recommended to run the function ``normalize_psf`` to get a proper
PSF template. In the ADI+mSDI case it must be a 3d array.
angle_list : 1d numpy ndarray
List of parallactic angles, in degrees.
plsc : float
Value of the plsc in arcsec/px. Only used for printing debug output when
``verbose=True``.
n_copies : int
This is the number of 'cube copies' returned.
inrad,outrad : float
Inner and outer radius of the injections. The actual injection position
is chosen randomly.
dist_flux : tuple('method', *params)
Tuple describing the flux selection. Method can be a function, the
``*params`` are passed to it. Method can also be a string, for a
pre-defined random function:
``("skewnormal", skew, mean, var)``
uses scipy.stats.skewnorm.rvs
``("uniform", low, high)``
uses np.random.uniform
``("normal", loc, scale)``
uses np.random.normal
Yields
------
fake_data : dict
Represents a copy of the original ``array``, with fake injections. The
dictionary keys are:
``cube``
Array shaped like the input ``array``, with the fake injections.
``position`` : list of tuples(y,x)
List containing the positions of the injected companions, as
(y,x) tuples.
``dist`` : float
The distance of the injected companions, which was passed to
``cube_inject_companions``.
``theta`` : float, degrees
The initial angle, as passed to ``cube_inject_companions``.
``flux`` : float
The flux passed to ``cube_inject_companions``.
"""
# TODO: 'mask' parameter for known companions?
width = outrad - inrad
yy, xx = get_annulus_segments(array[0], inrad, width)[0]
num_patches = yy.shape[0]
# Defining Fluxes according to chosen distribution
dist_fkt = dict(skewnormal=stats.skewnorm.rvs,
normal=np.random.normal,
uniform=np.random.uniform).get(dist_flux[0],
dist_flux[0])
fluxes = sorted(dist_fkt(*dist_flux[1:], size=n_copies))
inds_inj = np.random.randint(0, num_patches, size=n_copies)
# Injections
for n in range(n_copies):
injx = xx[inds_inj[n]] - frame_center(array[0])[1]
injy = yy[inds_inj[n]] - frame_center(array[0])[0]
dist = np.sqrt(injx**2 + injy**2)
theta = np.mod(np.arctan2(injy, injx) / np.pi * 180, 360)
fake_cube, positions = cube_inject_companions(
array, psf_template, angle_list, plsc=plsc,
flevel=fluxes[n], theta=theta,
rad_dists=dist, n_branches=1, # TODO: multiple injections?
full_output=True, verbose=False
)
yield dict(
positions=positions,
dist=dist, theta=theta, flux=fluxes[n],
cube=fake_cube
)
def frame_inject_companion(array, array_fc, pos_y, pos_x, flux,
imlib='vip-fft', interpolation='lanczos4'):
""" Injects a fake companion in a single frame (it could be a single
multi-wavelength frame) at given coordinates.
"""
if not (array.ndim == 2 or array.ndim == 3):
raise TypeError('Array is not a 2d or 3d array.')
if array.ndim == 2:
size_fc = array_fc.shape[0]
ceny, cenx = frame_center(array)
ceny = int(ceny)
cenx = int(cenx)
fc_fr = np.zeros_like(array)
w = int(np.floor(size_fc/2.))
odd = size_fc%2
# fake companion in the center of a zeros frame
fc_fr[ceny-w:ceny+w+odd, cenx-w:cenx+w+odd] = array_fc
array_out = array + frame_shift(fc_fr, pos_y-ceny, pos_x-cenx, imlib,
interpolation) * flux
if array.ndim == 3:
size_fc = array_fc.shape[1]
ceny, cenx = frame_center(array[0])
ceny = int(ceny)
cenx = int(cenx)
fc_fr = np.zeros_like(array)
w = int(np.floor(size_fc/2.))
odd = size_fc%2
# fake companion in the center of a zeros frame
fc_fr[:, ceny-w:ceny+w+odd, cenx-w:cenx+w+odd] = array_fc
array_out = array + cube_shift(fc_fr, pos_y - ceny, pos_x - cenx,
imlib, interpolation) * flux
return array_out
def collapse_psf_cube(array, size, fwhm=4, verbose=True, collapse='mean'):
""" Creates a 2d PSF template from a cube of non-saturated off-axis frames
of the star by taking the mean and normalizing the PSF flux.
Parameters
----------
array : numpy ndarray, 3d
Input cube.
size : int
Size of the squared subimage.
fwhm: float, optional
The size of the Full Width Half Maximum in pixel.
verbose : {True,False}, bool optional
Whether to print to stdout information about file opening, cropping and
completion of the psf template.
collapse : {'mean','median'}, string optional
Defines the way the frames are collapsed.
Returns
-------
psf_normd : numpy ndarray
Normalized PSF.
"""
if array.ndim != 3 and array.ndim != 4:
raise TypeError('Array is not a cube, 3d or 4d array.')
n = array.shape[0]
psf = cube_crop_frames(array, size=size, verbose=verbose)
if collapse == 'mean':
psf = np.mean(psf, axis=0)
elif collapse == 'median':
psf = np.median(psf, axis=0)
else:
raise TypeError('Collapse mode not recognized.')
psf_normd = normalize_psf(psf, size=size, fwhm=fwhm)
if verbose:
print("Done scaled PSF template from the average of", n, "frames.")
return psf_normd
def normalize_psf(array, fwhm='fit', size=None, threshold=None, mask_core=None,
model='gauss', imlib='vip-fft', interpolation='lanczos4',
force_odd=True, full_output=False, verbose=True, debug=False):
""" Normalizes a PSF (2d or 3d array), to have the flux in a 1xFWHM
aperture equal to one. It also allows to crop the array and center the PSF
at the center of the array(s).
Parameters
----------
array: numpy ndarray
The PSF, 2d (ADI data) or 3d array (IFS data).
fwhm: int, float, 1d array or str, optional
The the Full Width Half Maximum in pixels. It can handle a different
FWHM value for different wavelengths (IFS data). If set to 'fit' then
a ``model`` (assuming the PSF is centered in the array) is fitted to
estimate the FWHM in 2D or 3D PSF arrays.
size : int or None, optional
If int it will correspond to the size of the centered sub-image to be
cropped form the PSF array. The PSF is assumed to be rougly centered wrt
the array.
threshold : None of float, optional
Sets to zero small values, trying to leave only the core of the PSF.
mask_core : None of float, optional
Sets the radius of a circular aperture for the core of the PSF,
everything else will be set to zero.
model : {'gauss', 'moff', 'airy'}, str optional
The assumed model used to fit the PSF: either a Gaussian, a Moffat
or an Airy 2d distribution.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
force_odd : str, optional
If True the resulting array will have odd size (and the PSF will be
placed at its center). If False, and the frame size is even, then the
PSF will be put at the center of an even-sized frame.
full_output : bool, optional
If True the flux in a FWHM aperture is returned along with the
normalized PSF.
verbose : bool, optional
If True intermediate results are printed out.
debug : bool, optional
If True the fitting will output additional information and a diagnostic
plot will be shown (this might cause a long output if ``array`` is 3d
and has many slices).
Returns
-------
psf_norm : numpy ndarray
The normalized PSF (2d or 3d array).
fwhm_flux : numpy ndarray
[full_output=True] The flux in a FWHM aperture (it can be a single
value or a vector).
fwhm : numpy ndarray
[full_output=True] The FWHM size. If ``fwhm`` is set to 'fit' then it
is the fitted FWHM value according to the assumed ``model`` (the mean in
X and Y is returned when ``model`` is set to 'gauss').
"""
def psf_norm_2d(psf, fwhm, threshold, mask_core, full_output, verbose):
""" 2d case """
# we check if the psf is centered and fix it if needed
cy, cx = frame_center(psf, verbose=False)
xcom, ycom = cen_com(psf)
if not (np.allclose(cy, ycom, atol=1e-2) or
np.allclose(cx, xcom, atol=1e-2)):
# first we find the centroid and put it in the center of the array
centry, centrx = fit_2d(psf, full_output=False, debug=False)
shiftx, shifty = centrx - cx, centry - cy
psf = frame_shift(psf, -shifty, -shiftx, imlib=imlib,
interpolation=interpolation)
for _ in range(2):
centry, centrx = fit_2d(psf, full_output=False, debug=False)
cy, cx = frame_center(psf, verbose=False)
shiftx, shifty = centrx - cx, centry - cy
psf = frame_shift(psf, -shifty, -shiftx, imlib=imlib,
interpolation=interpolation)
# we check whether the flux is normalized and fix it if needed
fwhm_aper = photutils.CircularAperture((cx,cy), fwhm/2)
fwhm_aper_phot = photutils.aperture_photometry(psf, fwhm_aper,
method='exact')
fwhm_flux = np.array(fwhm_aper_phot['aperture_sum'])
if fwhm_flux > 1.1 or fwhm_flux < 0.9:
psf_norm_array = psf / np.array(fwhm_aper_phot['aperture_sum'])
else:
psf_norm_array = psf
if threshold is not None:
psf_norm_array[np.where(psf_norm_array < threshold)] = 0
if mask_core is not None:
psf_norm_array = get_circle(psf_norm_array, radius=mask_core)
if verbose:
print("Flux in 1xFWHM aperture: {:.3f}".format(fwhm_flux[0]))
if full_output:
return psf_norm_array, fwhm_flux, fwhm
else:
return psf_norm_array
###########################################################################
if model == 'gauss':
fit_2d = fit_2dgaussian
elif model == 'moff':
fit_2d = fit_2dmoffat
elif model == 'airy':
fit_2d = fit_2dairydisk
else:
raise ValueError('`Model` not recognized')
if array.ndim == 2:
y, x = array.shape
if size is not None:
if force_odd and size % 2 == 0:
size += 1
msg = "`Force_odd` is True therefore `size` was set to {}"
print(msg.format(size))
else:
if force_odd and y % 2 == 0:
size = y - 1
msg = "`Force_odd` is True and frame size is even, therefore "
msg += "new frame size was set to {}"
print(msg.format(size))
if size is not None:
if size < array.shape[0]:
array = frame_crop(array, size, force=True, verbose=False)
else:
array = array.copy()
else:
array = array.copy()
if fwhm == 'fit':
fit = fit_2d(array, full_output=True, debug=debug)
if model == 'gauss':
fwhm = np.mean((fit['fwhm_x'], fit['fwhm_y']))
if verbose:
print("\nMean FWHM: {:.3f}".format(fwhm))
elif model == 'moff' or model == 'airy':
fwhm = fit.fwhm.at[0]
if verbose:
print("FWHM: {:.3f}".format(fwhm))
res = psf_norm_2d(array, fwhm, threshold, mask_core, full_output,
verbose)
return res
elif array.ndim == 3:
n, y, x = array.shape
if size is not None:
if force_odd and size % 2 == 0:
size += 1
msg = "`Force_odd` is True therefore `size` was set to {}"
print(msg.format(size))
else:
if force_odd and y % 2 == 0:
size = y - 1
msg = "`Force_odd` is True and frame size is even, therefore "
msg += "new frame size was set to {}"
print(msg.format(size))
if size is not None:
if size < array.shape[1]:
array = cube_crop_frames(array, size, force=True, verbose=False)
else:
array = array.copy()
if isinstance(fwhm, (int, float)):
fwhm = [fwhm] * array.shape[0]
elif fwhm == 'fit':
fits_vect = [fit_2d(array[i], full_output=True, debug=debug) for i
in range(n)]
if model == 'gauss':
fwhmx = [fits_vect[i]['fwhm_x'] for i in range(n)]
fwhmy = [fits_vect[i]['fwhm_y'] for i in range(n)]
fwhm_vect = [np.mean((fwhmx[i], fwhmy[i])) for i in range(n)]
fwhm = np.array(fwhm_vect)
if verbose:
print("Mean FWHM per channel: ")
print_precision(fwhm)
elif model == 'moff' or model == 'airy':
fwhm_vect = [fits_vect[i]['fwhm'] for i in range(n)]
fwhm = np.array(fwhm_vect)
fwhm = fwhm.flatten()
if verbose:
print("FWHM per channel:")
print_precision(fwhm)
array_out = []
fwhm_flux = np.zeros(n)
for fr in range(array.shape[0]):
restemp = psf_norm_2d(array[fr], fwhm[fr], threshold, mask_core,
True, False)
array_out.append(restemp[0])
fwhm_flux[fr] = restemp[1]
array_out = np.array(array_out)
if verbose:
print("Flux in 1xFWHM aperture: ")
print_precision(fwhm_flux)
if full_output:
return array_out, fwhm_flux, fwhm
else:
return array_out
|
vortex-exoplanet/VIP
|
vip_hci/fm/fakecomp.py
|
Python
|
mit
| 28,708
|
[
"Gaussian"
] |
d3c21d279dd8a4f38151a9263a62c05a58aedeaa20e46e1bc57152d03d36d2de
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, Qiyun Zhu and Katharina Dittmar.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import TestCase, main
from os import remove, makedirs
from os.path import join, isdir, isfile, dirname, realpath
from shutil import rmtree, copy, move
from tempfile import mkdtemp
import numpy as np
import pandas as pd
from sklearn.neighbors import KernelDensity
from hgtector.analyze import Analyze
from hgtector.util import (
load_configs, add_children, get_descendants, taxdump_from_text)
class AnalyzeTests(TestCase):
def setUp(self):
self.tmpdir = mkdtemp()
self.datadir = join(dirname(realpath(__file__)), 'data')
np.random.seed(42)
self.dist_norm1 = np.random.normal(5.0, 2.0, 1500)
self.dist_norm2 = np.random.normal(1.0, 0.5, 500)
self.dist_lognorm = np.random.lognormal(0.0, 1.0, 1000)
self.dist_gamma = np.random.gamma(2, 2, 800)
def tearDown(self):
rmtree(self.tmpdir)
def test___call__(self):
# run Ecoli sample using the Silverman method
me = Analyze()
def args(): return None
args.input = join(self.datadir, 'Ecoli', 'search')
args.output = join(self.tmpdir, 'output')
args.taxdump = join(self.datadir, 'Ecoli', 'taxdump')
args.input_tax = None
args.self_tax = None
args.close_tax = None
args.self_rank = None
args.close_size = None
args.distal_top = None
args.bandwidth = 'silverman'
args.from_scores = False
me(args)
self.assertEqual(me.df[me.df['hgt']].shape[0], 16)
# use existing score table, run grid search
args.input = None
args.from_scores = True
args.bandwidth = 'grid'
me(args)
self.assertEqual(me.df[me.df['hgt']].shape[0], 18)
rmtree(args.output)
def test_set_parameters(self):
me = Analyze()
me.cfg = load_configs()
def args(): return None
# input is file
infile = join(self.datadir, 'DnaK', 'search', 'sample.tsv')
outdir = join(self.tmpdir, 'output')
args.input = infile
args.output = outdir
args.noise = 0.75
me.set_parameters(args)
self.assertEqual(me.input, infile)
self.assertEqual(me.output, outdir)
self.assertTrue(isdir(outdir))
self.assertDictEqual(me.input_map, {'sample': infile})
self.assertEqual(me.noise, 75)
# coverage threshold too small
args.input_cov = 25
with self.assertRaises(ValueError) as ctx:
me.set_parameters(args)
msg = 'Taxonomy coverage for auto-interence must be at least 50%.'
self.assertEqual(str(ctx.exception), msg)
args.input_cov = 75
# input is directory
indir = join(self.datadir, 'DnaK', 'search')
args.input = indir
me.set_parameters(args)
self.assertEqual(me.input, indir)
self.assertDictEqual(me.input_map, {'sample': infile})
rmtree(outdir)
# input is invalid
not_path = 'I am not a path'
args.input = not_path
with self.assertRaises(ValueError) as ctx:
me.set_parameters(args)
msg = f'Invalid input data file or directory: {not_path}.'
self.assertEqual(str(ctx.exception), msg)
# input has no search result
args.input = self.tmpdir
with self.assertRaises(ValueError) as ctx:
me.set_parameters(args)
msg = f'No input data are found under: {self.tmpdir}.'
self.assertEqual(str(ctx.exception), msg)
# no input (which is okay)
delattr(me, 'input_map')
args.input = None
me.set_parameters(args)
self.assertFalse(hasattr(me, 'input_map'))
def test_read_input(self):
me = Analyze()
def batch_assert():
self.assertEqual(len(me.taxdump), 76)
self.assertEqual(me.data['sample'][0]['id'], 'WP_000516135.1')
self.assertEqual(me.data['sample'][0]['hits'].shape, (12, 5))
# DnaK - default mode
me.taxdump = join(self.datadir, 'DnaK', 'taxdump')
me.input_map = {'sample': join(
self.datadir, 'DnaK', 'search', 'sample.tsv')}
me.read_input()
batch_assert()
# missing taxonomy
copy(join(self.datadir, 'DnaK', 'search', 'sample.tsv'),
join(self.tmpdir, 'sample.tsv'))
me.input = self.tmpdir
me.taxdump = None
with self.assertRaises(ValueError) as ctx:
me.read_input()
msg = 'Missing taxonomy database.'
self.assertEqual(str(ctx.exception), msg)
# taxonomy in same directory as search result
copy(join(self.datadir, 'DnaK', 'taxdump', 'nodes.dmp'),
join(self.tmpdir, 'nodes.dmp'))
copy(join(self.datadir, 'DnaK', 'taxdump', 'names.dmp'),
join(self.tmpdir, 'names.dmp'))
me.input_map = {'sample': join(self.tmpdir, 'sample.tsv')}
me.read_input()
batch_assert()
# taxonomy in parent directory as search result
indir = join(self.tmpdir, 'search')
makedirs(indir)
move(join(self.tmpdir, 'sample.tsv'), join(indir, 'sample.tsv'))
me.input = indir
me.input_map = {'sample': join(indir, 'sample.tsv')}
me.taxdump = None
me.read_input()
batch_assert()
rmtree(indir)
remove(join(self.tmpdir, 'nodes.dmp'))
remove(join(self.tmpdir, 'names.dmp'))
def test_read_search_results(self):
file = join(self.datadir, 'DnaK', 'search', 'sample.tsv')
obs = Analyze.read_search_results(file)
self.assertEqual(len(obs), 1)
self.assertEqual(obs[0]['id'], 'WP_000516135.1')
self.assertAlmostEqual(obs[0]['score'], 1092.8)
self.assertTupleEqual(obs[0]['hits'].shape, (12, 5))
self.assertEqual(obs[0]['hits'].iloc[2].name, 'NP_454622.1')
self.assertAlmostEqual(obs[0]['hits']['evalue']['NP_230502.1'],
5.9e-282)
self.assertEqual(obs[0]['hits']['taxid']['NP_384288.1'], '266834')
# maximum number of hits
obs = Analyze.read_search_results(file, maxhits=8)
self.assertEqual(len(obs[0]['hits']), 8)
# alignment thresholds
obs = Analyze.read_search_results(
file, evalue=1e-50, identity=95, coverage=95)
self.assertEqual(len(obs[0]['hits']), 5)
def test_assign_taxonomy(self):
# input are two genomes with defined taxonomy
me = Analyze()
me.input_tax = 'S1:561,S2:620' # Escherichia and Shigella
me.data = {}
me.taxdump = taxdump_from_text(taxdump_proteo)
me.assign_taxonomy()
# test input taxonomy extraction
self.assertDictEqual(me.input_tax, {'S1': '561', 'S2': '620'})
# test taxonomy refinement
exp = {'1', '131567', '2', '1224', '1236', '91347', '543', '561',
'620'}
self.assertSetEqual(set(me.taxdump.keys()), exp)
# test LCA discovery
self.assertEqual(me.lca, '543')
# helper for making hit table
def _hits_df(d):
return pd.Series(d, name='taxid', dtype=object).to_frame()
# input is one genome with defined taxonomy
me = Analyze()
me.data = {'S1': [{'hits': pd.DataFrame(columns=['taxid'])}]}
me.input_tax = '561' # Escherichia
me.taxdump = taxdump_from_text(taxdump_proteo)
me.assign_taxonomy()
self.assertDictEqual(me.input_tax, {'S1': '561'})
# input taxonomy not found in database
me.input_tax = '1234'
me.taxdump = taxdump_from_text(taxdump_proteo)
with self.assertRaises(ValueError) as ctx:
me.assign_taxonomy()
msg = 'TaxID 1234 is not present in taxonomy database.'
self.assertEqual(str(ctx.exception), msg)
# input are two genome whose taxonomies are to be inferred based on
# search results
me = Analyze()
me.input_tax = None
me.data = {'S1': [{'hits': _hits_df({'P1': '561', 'P2': '562'})},
{'hits': _hits_df({'P3': '543', 'P4': '561'})}],
'S2': [{'hits': _hits_df({'P5': '562', 'P6': '585056'})},
{'hits': _hits_df({'P7': '561', 'P8': '1038927'})},
{'hits': _hits_df({'P9': '2580236'})}]}
me.input_cov = 75
me.taxdump = taxdump_from_text(taxdump_proteo)
me.assign_taxonomy()
self.assertDictEqual(me.input_tax, {'S1': '543', 'S2': '561'})
self.assertEqual(me.lca, '543')
# cannot auto-infer taxonomy
me.data['S3'] = [{'hits': _hits_df({})}]
me.taxdump = taxdump_from_text(taxdump_proteo)
with self.assertRaises(ValueError) as ctx:
me.assign_taxonomy()
msg = 'Cannot auto-infer taxonomy for S3. Please specify manually.'
self.assertEqual(str(ctx.exception), msg)
# invalid input taxonomy string
me.input_tax = '561'
with self.assertRaises(ValueError) as ctx:
me.assign_taxonomy()
msg = 'Invalid input taxonomy format.'
self.assertEqual(str(ctx.exception), msg)
def test_infer_genome_tax(self):
taxdump = taxdump_from_text(taxdump_proteo)
# five proteins, in which four have hits
taxids = [['562', '620', '570'], # E. coli
['562', '585056', '1038927', '2'], # E. coli
['561', '543', '776'], # Escherichia
['548', '570', '1236'], # K. aerogenes
[]]
prots = [{'hits': pd.DataFrame(x, columns=['taxid'])} for x in taxids]
obs = Analyze.infer_genome_tax(prots, taxdump, 75)
exp = ('561', 75.0) # 3 / 4 best hits assigned to Escherichia
self.assertTupleEqual(obs, exp)
# reduce coverage threshold
obs = Analyze.infer_genome_tax(prots, taxdump, 50)
exp = ('562', 50.0) # 2 / 4 best hits assigned to Escherichia
self.assertTupleEqual(obs, exp)
# remove one protein that best matches E. coli
prots.pop(0)
obs = Analyze.infer_genome_tax(prots, taxdump, 75)
exp = ('543', 100.0) # 3 / 3 best hits assigned to Enterobacteriaceae
self.assertTupleEqual(obs, exp)
# no input protein
with self.assertRaises(ValueError) as ctx:
Analyze.infer_genome_tax({}, taxdump, 75)
msg = 'Cannot auto-infer taxonomy.'
self.assertEqual(str(ctx.exception), msg)
def test_sum_taxids(self):
me = Analyze()
me.input_tax = {'S1': '1', 'S2': '3'}
def _hits_df(d):
return pd.Series(d, name='taxid').to_frame()
me.data = {'S1': [{'hits': _hits_df({'a': '4', 'b': '6'})},
{'hits': _hits_df({'a': '4', 'c': '8'})}],
'S2': [{'hits': _hits_df({'b': '6', 'd': '1'})}]}
obs = me.sum_taxids()
exp = {'1', '3', '4', '6', '8'}
self.assertSetEqual(obs, exp)
def test_define_groups(self):
me = Analyze()
me.taxdump = taxdump_from_text(taxdump_proteo)
add_children(me.taxdump)
me.groups = {}
# user defined groups:
# self: genera Escherichia and Shigella
# close: family Enterobacteriaceae
me.groups = {}
me.self_tax = '561,620'
me.close_tax = '543'
me.define_groups()
self.assertListEqual(me.self_tax, ['561', '620'])
exp = {'561', '562', '585056', '1038927', '2580236', '620', '622'}
self.assertSetEqual(me.groups['self'], exp)
self.assertListEqual(me.close_tax, ['543'])
exp = {'543', '548', '570'}
self.assertSetEqual(me.groups['close'], exp)
# auto-infer groups
me.self_tax = {}
me.close_tax = {}
me.lca = '562' # all inputs are E. coli
me.self_rank = 'genus' # but we want to raise self to genus
me.close_size = 2 # close group must be this big or bigger
me.define_groups()
self.assertListEqual(me.self_tax, ['561'])
exp = {'561', '562', '585056', '1038927', '2580236'}
self.assertSetEqual(me.groups['self'], exp)
self.assertListEqual(me.close_tax, ['543'])
exp = {'543', '548', '570', '620', '622'}
self.assertSetEqual(me.groups['close'], exp)
def test_infer_self_group(self):
me = Analyze()
me.taxdump = taxdump_from_text(taxdump_proteo)
add_children(me.taxdump)
# assign to LCA of all genomes (E. coli)
me.self_tax = None
me.lca = '562'
me.self_rank = None
me.infer_self_group()
self.assertListEqual(me.self_tax, ['562'])
# raise LCA to genus level (Escherichia)
me.self_tax = None
me.lca = '562'
me.self_rank = 'genus'
me.infer_self_group()
self.assertListEqual(me.self_tax, ['561'])
# LCA (Enterobacteriaceae) is already above designated rank (genus)
me.self_tax = None
me.lca = '543'
me.self_rank = 'genus'
me.infer_self_group()
self.assertListEqual(me.self_tax, ['543'])
def test_infer_close_group(self):
me = Analyze()
me.taxdump = taxdump_from_text(taxdump_proteo)
add_children(me.taxdump)
me.groups = {}
# close group is parent of LCA of self group
me.self_tax = ['562'] # E. coli
me.groups['self'] = set(['562'] + get_descendants('562', me.taxdump))
me.close_tax = None
me.close_size = None
me.infer_close_group()
self.assertListEqual(me.close_tax, ['561']) # Escherichia
self.assertSetEqual(me.groups['close'], {'561', '2580236'})
# close group must have at least 5 taxa
me.close_tax = None
me.groups['close'] = None
me.close_size = 5
me.infer_close_group()
self.assertListEqual(me.close_tax, ['543']) # Enterobacteriaceae
exp = {'543', '620', '622', '570', '548', '561', '2580236'}
self.assertSetEqual(me.groups['close'], exp)
# close group is LCA of multiple self groups
me.self_tax = ['561', '620'] # Escherichia and Shigella
me.groups['self'] = set().union(*[[x] + get_descendants(
x, me.taxdump) for x in me.self_tax])
me.close_tax = None
me.groups['close'] = None
me.close_size = None
me.infer_close_group()
self.assertListEqual(me.close_tax, ['543']) # Enterobacteriaceae
exp = {'543', '570', '548'}
self.assertSetEqual(me.groups['close'], exp)
def test_calc_scores(self):
columns = ('id', 'taxid', 'score')
# helper for making hit table
def _hits_df(data):
return pd.DataFrame(data, columns=columns).set_index('id')
me = Analyze()
me.taxdump = taxdump_from_text(taxdump_proteo)
add_children(me.taxdump)
me.groups = {'self': {'561', '562', '585056'},
'close': {'543', '91347', '1236'}}
me.data = {'S1': [
{'score': 100, 'hits': _hits_df((('P1', '561', 100),
('P2', '562', 95)))},
{'score': 90, 'hits': _hits_df((('P3', '561', 81),
('P4', '543', 72)))}],
'S2': [
{'score': 96, 'hits': _hits_df((('P5', '561', 90),
('P6', '543', 84),
('P7', '620', 66)))}]}
me.weighted = True
me.match_th = 0.9
me.calc_scores()
# helper for get scores
def _prot_scores(prot):
return [prot[x] for x in ('self', 'close', 'distal')]
s1_1 = me.data['S1'][0]
self.assertListEqual(s1_1['hits']['group'].tolist(), ['self', 'self'])
self.assertListEqual(_prot_scores(s1_1), [1.95, 0.0, 0.0])
self.assertEqual(s1_1['match'], '0')
s1_2 = me.data['S1'][1]
self.assertListEqual(s1_2['hits']['group'].tolist(), ['self', 'close'])
self.assertListEqual(_prot_scores(s1_2), [0.9, 0.8, 0.0])
self.assertEqual(s1_2['match'], '0')
s2_1 = me.data['S2'][0]
self.assertListEqual(s2_1['hits']['group'].tolist(),
['self', 'close', 'distal'])
self.assertListEqual(_prot_scores(s2_1), [0.9375, 0.875, 0.6875])
self.assertEqual(s2_1['match'], '620')
def test_find_match(self):
me = Analyze()
me.taxdump = taxdump_from_text(taxdump_proteo)
add_children(me.taxdump)
df = pd.DataFrame(
[[100, '585056'], # E. coli UMN026
[99, '1038927'], # E. coli O104:H4
[97, '562'], # Escherichia coli
[95, '622'], # Shigella dysenteriae
[92, '543'], # Enterobacteriaceae
[88, '548'], # Klebsiella aerogenes
[80, '766']], # Rickettsiales
columns=['score', 'taxid'])
# keep top 1% hits
me.match_th = 0.99
self.assertEqual(me.find_match(df), '562')
# keep top 10% hits
me.match_th = 0.9
self.assertEqual(me.find_match(df), '543')
# keep top 20% hits
me.match_th = 0.8
self.assertEqual(me.find_match(df), '1224')
# input DataFrame is empty
self.assertEqual(me.find_match(pd.DataFrame()), '0')
def test_make_score_table(self):
me = Analyze()
me.output = self.tmpdir
me.data = {'S1': [{'id': 'P1', 'length': 100, 'match': '0',
'self': 1.5, 'close': 0.75, 'distal': 0.0,
'hits': pd.DataFrame([0] * 3)},
{'id': 'P2', 'length': 120, 'match': '1224',
'self': 1.625, 'close': 0.225, 'distal': 0.375,
'hits': pd.DataFrame([0] * 5)}],
'S2': [{'id': 'P1', 'length': 225, 'match': '620',
'self': 2.35, 'close': 1.05, 'distal': 0.75,
'hits': pd.DataFrame([0] * 6)}]}
me.make_score_table()
obs = me.df.values.tolist()
exp = [['S1', 'P1', 100, 3, 1.5, 0.75, 0, '0'],
['S1', 'P2', 120, 5, 1.625, 0.225, 0.375, '1224'],
['S2', 'P1', 225, 6, 2.35, 1.05, 0.75, '620']]
self.assertListEqual(obs, exp)
fp = join(self.tmpdir, 'scores.tsv')
with open(fp, 'r') as f:
obs = [x.split('\t') for x in f.read().splitlines()[1:]]
exp = [[str(y) for y in x] for x in exp]
self.assertListEqual(obs, exp)
remove(fp)
def test_remove_orphans(self):
me = Analyze()
me.df = pd.DataFrame([
[1.0, 0.2], [0.5, 0.4], [0.0, 0.0], [0.8, 0.0], [0.0, 0.7]],
columns=['close', 'distal'])
me.remove_orphans()
self.assertListEqual(me.df.values.tolist(), [
[1.0, 0.2], [0.5, 0.4], [0.8, 0.0], [0.0, 0.7]])
def test_remove_outliers(self):
me = Analyze()
me.self_low = False
df = pd.DataFrame(np.array([self.dist_gamma,
self.dist_lognorm[:800]]).T,
columns=['close', 'distal'])
# Z-score
me.df = df.copy()
me.outliers = 'zscore'
me.remove_outliers()
self.assertEqual(me.df.shape[0], 781)
# boxplot
me.df = df.copy()
me.outliers = 'boxplot'
me.remove_outliers()
self.assertEqual(me.df.shape[0], 710)
def test_relevant_groups(self):
me = Analyze()
me.self_low = False
self.assertListEqual(me.relevant_groups(), ['close', 'distal'])
me.self_low = True
self.assertListEqual(me.relevant_groups(), ['self', 'close', 'distal'])
def test_outliers_zscore(self):
df = pd.DataFrame(np.array([self.dist_gamma,
self.dist_lognorm[:800]]).T,
columns=['close', 'distal'])
obs = Analyze.outliers_zscore(df, ['close', 'distal'])
self.assertEqual(obs.shape[0], 781)
def test_outliers_boxplot(self):
df = pd.DataFrame(np.array([self.dist_gamma,
self.dist_lognorm[:800]]).T,
columns=['close', 'distal'])
obs = Analyze.outliers_boxplot(df, ['close', 'distal'])
self.assertEqual(obs.shape[0], 710)
def test_predict_hgt(self):
me = Analyze()
# populate score table
n = 1000
data = {'sample': ['S1'] * n,
'protein': [f'P{x}' for x in range(n)],
'self': np.random.choice(self.dist_gamma, n),
'close': np.concatenate((
np.random.choice(self.dist_norm1, int(n / 2)) / 3,
np.random.choice(self.dist_norm2, int(n / 2)))),
'distal': np.concatenate((
np.random.choice(self.dist_lognorm, int(n * 3 / 4)),
np.random.choice(self.dist_gamma, int(n / 4)) / 2))}
me.df = pd.DataFrame(data)
# default setting
me.output = self.tmpdir
me.self_low = False
me.bandwidth = 'auto'
me.bw_steps = 20
me.low_part = 75
me.fixed = 25
me.noise = 50
me.silhouette = 0.5
# run prediction
self.assertEqual(me.predict_hgt(), 96)
groups = ['self', 'close', 'distal']
for group in groups[1:]:
fp = join(self.tmpdir, f'{group}.hist.png')
self.assertTrue(isfile(fp))
remove(fp)
fp = join(self.tmpdir, 'scatter.png')
self.assertTrue(isfile(fp))
remove(fp)
fp = join(self.tmpdir, 'hgts')
self.assertTrue(isfile(join(fp, 'S1.txt')))
rmtree(fp)
# constant values
me.df['close'] = 1
me.df.drop('hgt', axis=1, inplace=True)
self.assertEqual(me.predict_hgt(), 0)
self.assertNotIn('hgt', me.df.columns)
remove(join(self.tmpdir, 'close.hist.png'))
def test_cluster_kde(self):
me = Analyze()
data = np.concatenate([self.dist_norm1, self.dist_norm2])
me.df = pd.Series(data, name='group').to_frame()
me.bw_steps = 10
me.noise = 50
me.low_part = 75
me.output = self.tmpdir
# grid search
me.bandwidth = 'grid'
obs = me.cluster_kde('group')
self.assertAlmostEqual(obs, 1.855525575742988)
# Silverman's rule-of-thumb
me.bandwidth = 'silverman'
obs = me.cluster_kde('group')
self.assertAlmostEqual(obs, 2.2279977615745703)
# fixed value
me.bandwidth = 0.5
obs = me.cluster_kde('group')
self.assertAlmostEqual(obs, 2.2507008281395433)
# smart KDE
me.bandwidth = 'auto'
obs = me.cluster_kde('group')
self.assertAlmostEqual(obs, 2.1903958075763343)
# clean up
remove(join(self.tmpdir, 'group.kde.png'))
# cannot find threshold (unimodal distribution)
me.df = pd.Series(self.dist_norm1, name='group').to_frame()
me.bandwidth = 'silverman'
obs = me.cluster_kde('group')
self.assertEqual(obs, 0)
def test_perform_kde(self):
me = Analyze()
me.bw_steps = 10
data = np.concatenate([self.dist_norm1, self.dist_norm2])
# grid search
me.bandwidth = 'grid'
obs = me.perform_kde(data)[2]
self.assertAlmostEqual(obs, 0.21544346900318834)
# Silverman's rule-of-thumb
me.bandwidth = 'silverman'
obs = me.perform_kde(data)[2]
self.assertAlmostEqual(obs, 0.48713295460585126)
# fixed value
me.bandwidth = 0.5
obs = me.perform_kde(data)[2]
self.assertAlmostEqual(obs, 0.5)
# invalid bandwidth
me.bandwidth = 100
with self.assertRaises(ValueError) as ctx:
me.perform_kde(data)
msg = 'Invalid bandwidth: 100.'
self.assertEqual(str(ctx.exception), msg)
def test_grid_kde(self):
estimator = KernelDensity(kernel='gaussian')
# unimodal
data = self.dist_gamma[:, np.newaxis]
obs = Analyze.grid_kde(data, estimator, 10).bandwidth
self.assertAlmostEqual(obs, 0.774263682681127)
# bimodal
data = np.concatenate([
self.dist_norm1, self.dist_norm2])[:, np.newaxis]
obs = Analyze.grid_kde(data, estimator, 10).bandwidth
self.assertAlmostEqual(obs, 0.46415888336127786)
data = np.array([1, 2, 3, 4, 5])[:, np.newaxis]
obs = Analyze.grid_kde(data, estimator, 5).bandwidth
self.assertAlmostEqual(obs, 1.0)
# very few data points (bw = high end)
data = np.array([1, 2, 3, 4, 5])[:, np.newaxis]
obs = Analyze.grid_kde(data, estimator, 5).bandwidth
self.assertAlmostEqual(obs, 1.0)
# constant values (bw = low end)
data = np.array([1, 1, 1, 1, 1])[:, np.newaxis]
obs = Analyze.grid_kde(data, estimator, 5).bandwidth
self.assertAlmostEqual(obs, 0.1)
# too few data points (less than splits)
data = np.array([1, 2, 3])[:, np.newaxis]
with self.assertRaises(ValueError) as ctx:
Analyze.grid_kde(data, estimator, 5)
msg = 'Cannot perform grid search on 3 data point(s).'
self.assertEqual(str(ctx.exception), msg)
def test_silverman_bw(self):
# unimodal
obs = Analyze.silverman_bw(self.dist_gamma)
self.assertAlmostEqual(obs, 0.6148288686346546)
obs = Analyze.silverman_bw(self.dist_lognorm)
self.assertAlmostEqual(obs, 0.2384666552244172)
# bimodal
obs = Analyze.silverman_bw(np.concatenate([
self.dist_norm1, self.dist_norm2]))
self.assertAlmostEqual(obs, 0.48713295460585126)
# constant values
obs = Analyze.silverman_bw([1, 1, 1, 1, 1])
self.assertAlmostEqual(obs, 0.652301697309926)
# IQR = 0
obs = Analyze.silverman_bw([1, 3, 3, 3, 5])
self.assertAlmostEqual(obs, 0.9224939070946869)
# one element
with self.assertRaises(ValueError) as ctx:
Analyze.silverman_bw([5])
msg = 'Cannot calculate bandwidth on 1 data point.'
self.assertEqual(str(ctx.exception), msg)
def test_density_func(self):
data = self.dist_norm1[:, np.newaxis]
estimator = KernelDensity(kernel='gaussian', bandwidth=0.5)
kde = estimator.fit(data)
obs = Analyze.density_func(data, kde, 10)
exp = (np.array([-1.48253468, 0.0939095, 1.67035369, 3.24679787,
4.82324206, 6.39968624, 7.97613043, 9.55257461,
11.1290188, 12.70546298]),
np.array([0.00104342, 0.00788705, 0.0496806, 0.13173376,
0.19176352, 0.15754466, 0.06992292, 0.02140856,
0.00150463, 0.00053637]))
np.testing.assert_array_almost_equal(obs, exp)
def test_first_hill(self):
# typical bimodal distribution
data = np.concatenate([
self.dist_norm1, self.dist_norm2])[:, np.newaxis]
estimator = KernelDensity(kernel='gaussian', bandwidth=0.5)
kde = estimator.fit(data)
x, y = Analyze.density_func(data, kde, 100)
obs_x, obs_y = Analyze.first_hill(x, y)
exp_x, exp_y = 1.0971012583068704, 2.5302323352207674
self.assertAlmostEqual(obs_x, exp_x)
self.assertAlmostEqual(obs_y, exp_y)
# peak larger than valley
data = np.negative(data)
kde = estimator.fit(data)
x, y = Analyze.density_func(data, kde, 100)
with self.assertRaises(ValueError) as ctx:
Analyze.first_hill(x, y)
msg = 'Peak is larger than valley.'
self.assertEqual(str(ctx.exception), msg)
# unimodal distribution
data = self.dist_norm1[:, np.newaxis]
kde = estimator.fit(data)
x, y = Analyze.density_func(data, kde, 100)
with self.assertRaises(ValueError) as ctx:
Analyze.first_hill(x, y)
msg = 'Cannot identify at least two peaks.'
self.assertEqual(str(ctx.exception), msg)
def test_plot_hist(self):
fp = join(self.tmpdir, 'tmp.png')
Analyze.plot_hist(self.dist_gamma, fp)
self.assertTrue(isfile(fp))
remove(fp)
def test_plot_density(self):
data = np.concatenate([
self.dist_norm1, self.dist_norm2])[:, np.newaxis]
estimator = KernelDensity(kernel='gaussian', bandwidth=0.5)
kde = estimator.fit(data)
x, y = Analyze.density_func(data, kde, 100)
peak, valley = Analyze.first_hill(x, y)
th = valley - (valley - peak) * 0.5 / 100
fp = join(self.tmpdir, 'tmp.png')
Analyze.plot_density(x, y, peak, valley, th, fp)
self.assertTrue(isfile(fp))
remove(fp)
def test_smart_kde(self):
me = Analyze()
# typical case (bimodal distribution)
me.df = pd.Series(np.concatenate([
self.dist_norm1, self.dist_norm2]), name='group').to_frame()
me.bw_steps = 10
me.noise = 50
me.low_part = 75
me.output = self.tmpdir
obs = me.smart_kde('group')
self.assertAlmostEqual(obs, 2.1903958075763343)
file = join(self.tmpdir, 'group.kde.png')
self.assertTrue(isfile(file))
remove(file)
# unable to determine threshold
me.low_part = 0.001
me.df = pd.Series(self.dist_norm1, name='group').to_frame()
self.assertEqual(me.smart_kde('group'), 0)
def test_calc_cluster_props(self):
me = Analyze()
me.self_low = False
me.df = pd.DataFrame(np.array(
[self.dist_gamma, self.dist_lognorm[:800]]).T,
columns=['close', 'distal'])
me.df['hgt'] = (me.df['close'] < 2) & (me.df['distal'] > 2)
obs = me.calc_cluster_props()
self.assertAlmostEqual(obs[0], 1.094658052928843)
self.assertAlmostEqual(obs[1], 4.30076698399293)
obs = me.df['silh'].describe()
self.assertAlmostEqual(obs['mean'], 0.312495082044277)
self.assertAlmostEqual(obs['std'], 0.21945541659155993)
self.assertEqual(me.df.query('hgt & silh < 0.5').shape[0], 35)
def test_refine_cluster(self):
me = Analyze()
# only close and distal
me.self_low = False
me.silhouette = 0.5
me.df = pd.DataFrame(np.array(
[self.dist_gamma, self.dist_lognorm[:800]]).T,
columns=['close', 'distal'])
me.df['hgt'] = (me.df['close'] < 2) & (me.df['distal'] > 2)
me.refine_cluster(me.calc_cluster_props())
self.assertEqual(me.df[me.df['hgt']].shape[0], 11)
# all three groups
me.self_low = True
me.df = pd.DataFrame(np.array([
self.dist_norm1[:800], self.dist_gamma,
self.dist_lognorm[:800]]).T,
columns=['self', 'close', 'distal'])
me.df['hgt'] = (me.df['close'] < 2) & (me.df['distal'] > 2)
me.refine_cluster(me.calc_cluster_props())
self.assertEqual(me.df[me.df['hgt']].shape[0], 4)
def test_plot_hgts(self):
me = Analyze()
me.output = self.tmpdir
me.df = pd.DataFrame(np.array(
[self.dist_gamma, self.dist_lognorm[:800]]).T,
columns=['close', 'distal'])
me.df['hgt'] = (me.df['close'] < 2) & (me.df['distal'] > 2)
me.plot_hgts()
fp = join(self.tmpdir, 'scatter.png')
self.assertTrue(isfile(fp))
remove(fp)
"""Constants"""
taxdump_proteo = (
'1,root,1,no rank',
'131567,cellular organisms,1,no rank',
'2,Bacteria,131567,superkingdom',
'1224,Proteobacteria,2,phylum',
'28211,Alphaproteobacteria,1224,class',
'766,Rickettsiales,28211,order',
'1236,Gammaproteobacteria,1224,class',
'91347,Enterobacterales,1236,order',
'543,Enterobacteriaceae,91347,family',
'561,Escherichia,543,genus',
'562,Escherichia coli,561,species',
'585056,Escherichia coli UMN026,562,no rank',
'1038927,Escherichia coli O104:H4,562,no rank',
'2580236,synthetic Escherichia coli Syn61,561,species',
'620,Shigella,543,genus',
'622,Shigella dysenteriae,620,species',
'570,Klebsiella,543,genus',
'548,Klebsiella aerogenes,570,species',
'118884,unclassified Gammaproteobacteria,1236,no rank',
'126792,Plasmid pPY113,1,species')
if __name__ == '__main__':
main()
|
DittmarLab/HGTector
|
hgtector/tests/test_analyze.py
|
Python
|
bsd-3-clause
| 33,093
|
[
"Gaussian"
] |
7abef91a94ccdaf327a5285a8bb2ff8d888dfddd610bcaf39d8e7c228ccdaa7b
|
#!/usr/bin/env python
#
# fetchfa - Fetch fasta files from Entrez
#
# Copyright (C) 2013, Jian-Long Huang
# Licensed under The MIT License
# http://opensource.org/licenses/MIT
#
# Author: Jian-Long Huang (jianlong@ntu.edu.tw)
# Version: 0.4
# Created: 2013.1.24
#
# Required:
# * Biopython: http://biopython.org
#
# Usage: batchgfa <input.blastaccmap> [options]
# -d, --db STR: database (default: protein)
# -q, --query STR: accessions to be fetched. If this option is specifid, the script will use the values
# to fetch data, and no input file is required to be handled.
# Support multiple accession. (comma-separated).
# -o, --output STR: output directory or file name. If this option is not specified, the script will generate
# one with unique identifier at current directory.
# -l, --log STR: log file name
#
# File formats:
# * input.blastaccmap: blastaccmap
# * output: fasta
import os
import sys
import argparse
from Bio import Entrez
from fhandle import name, logmsg
def main():
proglog = logmsg.message(prog='fetchfa', cmd=' '.join(sys.argv))
parser = argparse.ArgumentParser(description='fetchfa - Fetch fasta files from Entrez')
parser.add_argument('input_file', nargs='?')
parser.add_argument('-d', '--db', dest='database', default='protein',
help='database (default: protein)')
parser.add_argument('-q', '--query', dest='query_id',
help='accessions to be fetched. If this option is specifid, the script will use the values '
'to fetch data, and no input file is required to be handled.')
parser.add_argument('-o', '--output', dest='output', default='fetchfa_out_' + name.genid(),
help='output directory or file name. If this option is not specified, the script will generate '
'one with unique identifier at current directory.')
parser.add_argument('-l', '--log', dest='log_file',
help='log file name')
args = parser.parse_args()
if args.log_file is None:
fwlog = open(args.output + '.log', 'w')
else:
fwlog = open(args.log_file, 'w')
for i in proglog.start_message():
fwlog.write(i)
fwlog.flush()
Entrez.email = name.genid() + '@example.com'
if args.query_id is not None:
with open(args.output + '.fa', 'w') as fw, open(args.output + '.log', 'w') as fwlog:
handle = Entrez.efetch(db=args.database,
id=args.query_id,
rettype='fasta',
retmode='text')
fw.write(handle.read())
fw.flush()
fwlog.write('# Fetched sequences: ' + str(len(args.query_id.split(','))) + '\n')
fwlog.write('#\n')
for i in proglog.end_message():
fwlog.write(i)
fwlog.flush()
else:
if not os.path.exists(args.output):
os.makedirs(args.output)
with open(args.input_file, 'r') as fin:
query_num = 0
for line in fin:
if line.lstrip() == '' or line.lstrip()[0] in ('#', 'a'):
continue
query_num += 1
with open(os.path.abspath(args.output) + '/' + line.split('\t')[0] + '.fa', 'w') as fw:
alist = line.rstrip().split('\t')[1].split(',')
while len(alist) > 30:
alist_part = alist[0:30]
alist = alist[30:len(alist)]
handle = Entrez.efetch(db=args.database,
id=','.join(alist_part),
rettype='fasta',
retmode='text')
fw.write(handle.read())
fw.flush()
handle.close()
handle = Entrez.efetch(db=args.database,
id=','.join(alist),
rettype='fasta',
retmode='text')
fw.write(handle.read())
fw.flush()
handle.close()
fwlog.write('# Fetched queries: ' + str(query_num) + '\n')
fwlog.write('#\n')
for i in proglog.end_message():
fwlog.write(i)
fwlog.flush()
fwlog.close()
if __name__ == '__main__':
main()
|
jlhg/bdorpy
|
bdorpy/fetchfa.py
|
Python
|
mit
| 4,623
|
[
"Biopython"
] |
d1478fdc93211edcde3103e6f76a294b7713bde8b13033bf26fdae4ea233e95e
|
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, redirect
from django.template import loader
import json
from cocomapapp.models import Tag, Topic, Post, Relation, Vote, Visit
from django.contrib.auth.models import User
from cocomapapp.serializers import UserSerializer, TagSerializer, TopicSerializer, TopicNestedSerializer, HotTopicsSerializer, PostSerializer, PostNestedSerializer, RelationSerializer, VoteSerializer, VisitSerializer, RelationBulkSerializer
from rest_framework import generics
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.views.decorators.csrf import csrf_exempt
from django.core import serializers
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from .forms import RegisterForm, LoginForm
from django.template import RequestContext
from django.views.decorators.csrf import ensure_csrf_cookie
from functools import reduce
import operator
from django.utils import timezone
import requests
from io import StringIO
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest_framework.views import APIView
from rest_framework_bulk import (
BulkListSerializer,
BulkSerializerMixin,
ListBulkCreateUpdateDestroyAPIView,
)
class ReadNestedWriteFlatMixin(object):
"""
Mixin that sets the depth of the serializer to 0 (flat) for writing operations.
For all other operations it keeps the depth specified in the serializer_class
"""
def get_serializer_class(self, *args, **kwargs):
serializer_class = super(ReadNestedWriteFlatMixin, self).get_serializer_class(*args, **kwargs)
if self.request.method in ['PATCH', 'POST', 'PUT']:
serializer_class.Meta.depth = 0
else:
serializer_class.Meta.depth = 1
return serializer_class
class TopicList(ReadNestedWriteFlatMixin, generics.ListAPIView):
"""
A view that lists all topics.
"""
queryset = Topic.objects.all()
serializer_class = TopicNestedSerializer
class TopicCreate(ReadNestedWriteFlatMixin, generics.CreateAPIView):
"""
A view that allows users to create topics.
"""
serializer_class = TopicSerializer
class TopicRetrieve(ReadNestedWriteFlatMixin, generics.RetrieveAPIView):
"""
A view that retrieves details of a topic given its id.
"""
queryset = Topic.objects.all()
serializer_class = TopicNestedSerializer
class PostCreate(generics.CreateAPIView):
"""
A view that allows users to create a post to a topic.
"""
serializer_class = PostSerializer
serializer_class.Meta.depth = 0
def post(self, request, format=None):
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
obj = serializer.save()
newSerializer = PostNestedSerializer(obj)
return Response(newSerializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#queryset = Post.objects.all()
#serializer_class = PostSerializer
class PostRetrieve(ReadNestedWriteFlatMixin,generics.RetrieveAPIView):
"""
A view that retrieves details of a post given its id.
"""
queryset = Post.objects.all()
serializer_class = PostNestedSerializer
class PostUpdate(ReadNestedWriteFlatMixin,generics.UpdateAPIView):
"""
A view that updates details of a post.
"""
queryset = Post.objects.all()
serializer_class = PostSerializer
class PostDelete(ReadNestedWriteFlatMixin,generics.DestroyAPIView):
"""
A view that deletes a post.
"""
serializer_class = PostSerializer
def get_queryset(self, *args, **kwargs):
data = JSONParser().parse(self.request)
if data['user_id']:
user = User.objects.get(id = data['user_id'])
else:
user = self.request.user
post = Post.objects.filter(id = self.kwargs['pk'], user = user)
return post
class RelationRetrieve(ReadNestedWriteFlatMixin,generics.RetrieveAPIView):
"""
A view that retrieves details of a relation given its id.
"""
queryset = Relation.objects.all()
serializer_class = RelationSerializer
class RelationList(ReadNestedWriteFlatMixin,generics.ListAPIView):
"""
A view that lists all relations.
"""
serializer_class = RelationSerializer
def get_queryset(self, *args, **kwargs):
queryset_list = Relation.objects.all()
query = self.request.GET.get("topic_id")
if query:
queryset_list = queryset_list.filter(Q(topic_to__id=query) | Q(topic_from__id=query))
return queryset_list
#DEPRECATED
class RecommendedTopics(ReadNestedWriteFlatMixin,generics.ListAPIView):
serializer_class = TopicNestedSerializer
def get_queryset(self, *args, **kwargs):
query = self.request.GET.get("user_id")
if query:
last_5_posts = Post.objects.filter(Q(user__id=query)).order_by('-created_at')[:5]
last_5_topic_ids = []
for post in last_5_posts:
last_5_topic_ids.append(post.topic.id)
queryset_list = Topic.objects.filter(id__in=last_5_topic_ids)
else:
queryset_list = Topic.objects.order_by('-updated_at')[:5]
return queryset_list
#DEPRECATED
class RecommendedPosts(ReadNestedWriteFlatMixin,generics.ListAPIView):
serializer_class = PostNestedSerializer
def get_queryset(self, *args, **kwargs):
query = self.request.GET.get("user_id")
if query:
last_5_posts = Post.objects.filter(Q(user__id=query)).order_by('-created_at')[:5]
last_5_topic_ids = []
for post in last_5_posts:
last_5_topic_ids.append(post.topic.id)
recommended_topics = Topic.objects.filter(id__in=last_5_topic_ids)
recommended_post_ids = []
for topic in recommended_topics:
recommended_post_ids.append(sorted(topic.posts, key=lambda t: t.positive_reaction_count)[:1])
queryset_list = Post.objects.filter(id__in=recommended_post_ids)
else:
queryset_list = sorted(Post.objects.all(), key=lambda t: -t.positive_reaction_count)[:5]
return queryset_list
class RelationCreate(ListBulkCreateUpdateDestroyAPIView):
"""
A view to create a relation between two topics.
"""
queryset = Relation.objects.all()
serializer_class = RelationBulkSerializer
class TagCreate(ReadNestedWriteFlatMixin,generics.CreateAPIView):
"""
A view to create a tag.
"""
serializer_class = TagSerializer
class TagRetrieve(ReadNestedWriteFlatMixin,generics.RetrieveAPIView):
"""
A view to retrieve details of a tag.
"""
queryset = Tag.objects.all()
serializer_class = TagSerializer
class VisitCreate(ReadNestedWriteFlatMixin,generics.CreateAPIView):
"""
A view that allows visit events to be created when a user enters a topic page.
"""
serializer_class = VisitSerializer
#@csrf_exempt
@api_view(['POST'])
def post_vote(request):
"""
A view to create (or update) authenticated user's vote on a post.
If that user does not have a previous vote on that post, create it.
If that user's existing vote is different from the one being created, update it.
Otherwise, reset that user's vote.
"""
if request.method == 'POST':
user = request.user
post_id = request.data['post_id']
try:
post = Post.objects.get(pk=post_id)
except Post.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
is_positive = request.data['is_positive'].title() == "True"
try:
oldVote = Vote.objects.get(user = user, post = post)
if oldVote.is_positive == is_positive:
oldVote.delete()
#return Response(oldVote.delete())
else:
oldVote.is_positive = is_positive
oldVote.save()
newVote = oldVote
except Vote.DoesNotExist:
newVote = Vote.objects.create(user=user, post=post, is_positive=is_positive)
serializer = PostNestedSerializer(post)
serializer.Meta.depth = 1;
return Response(serializer.data)
#serializer = VoteSerializer(newVote)
#return Response(serializer.data)
@api_view(['GET'])
def getRecommendedTopics(request, limit):
"""
A view to recommend <limit> amount of topics to the authenticated user.
The formula depends on the hotness of the topic as well as the number of times and the
most recent time the user has visited any topic related to the topic to be recommended.
Each post is sorted by that formula, and the top <limit> are returned.
"""
if request.method == 'GET':
user = request.user;
scores = {};
for topic in Topic.objects.all():
neighbor_visits = Visit.objects.filter(user=user, topic__relates_to__topic_to=topic)
neighbor_visits_count = len(neighbor_visits);
if neighbor_visits_count > 0:
last_neighbor_visit = neighbor_visits.order_by('-visit_date')[0].visit_date;
else:
last_neighbor_visit = topic.created_at
relevance_score = 5*neighbor_visits_count - (timezone.now()-last_neighbor_visit).total_seconds()/3600
recommendation = relevance_score + topic.hotness
scores[topic] = recommendation;
sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)[:int(limit)]
recommended_topics = [key for key, value in sorted_scores]
#print(recommended_topics)
serializer = TopicNestedSerializer(recommended_topics, many=True);
return Response(serializer.data)
@api_view(['GET'])
def getRecommendedPosts(request, limit):
"""
A view to recommend <limit> amount of posts to the authenticated user.
Posts created after the user has visited a topic are considered only.
Those posts are sorted by a formula related to the latest time the user has
visited the post's topic, and the accuracy of the post.
The top <limit> posts are returned.
"""
if request.method == 'GET':
user = request.user;
scores = {};
for topic in Topic.objects.filter(visits__user=user).distinct():
last_visit = topic.visits.filter(user=user).order_by('-visit_date')[0].visit_date;
unread_posts = topic.posts.filter(created_at__gt=last_visit)
for post in unread_posts:
score = 10 * post.accuracy - (timezone.now()-last_visit).total_seconds()/3600
scores[post] = score;
extraPosts = Post.objects.exclude(topic__visits__user=user).order_by('-created_at')
sorted_scores = sorted(scores.items(), key=operator.itemgetter(1), reverse=True)
recommended_posts = [key for key, value in sorted_scores]
recommended_posts += extraPosts
recommended_posts = recommended_posts[:int(limit)]
TopicNestedSerializer.Meta.depth = 1
PostNestedSerializer.Meta.depth = 1
serializer = PostNestedSerializer(recommended_posts, many=True);
return Response(serializer.data)
@api_view(['GET'])
def listTopicRelevance(request):
"""
An unused API to return statistics related to the authenticated user and all topics.
"""
if request.method == 'GET':
user = request.user;
data = [];
for topic in Topic.objects.all():
row = {};
topicSerializer = TopicNestedSerializer(topic)
topicSerializer.Meta.depth = 1;
#row['topic'] = topicSerializer.data;
user_visits = topic.visits.filter(user=user)
visitSerializer = VisitSerializer(user_visits, many=True)
#visitSerializer.Meta.depth = 1;
row['visit_count'] = len(user_visits);
if row['visit_count'] > 0:
row['last_visit'] = user_visits.order_by('-visit_date')[0].visit_date
else:
row['last_visit'] = topic.created_at
neighbor_visits = Visit.objects.filter(user=user, topic__relates_to__topic_to=topic)
row['neighbor_visits_count'] = len(neighbor_visits);
if row['neighbor_visits_count'] > 0:
row['last_neighbor_visit'] = neighbor_visits.order_by('-visit_date')[0].visit_date;
else:
row['last_neighbor_visit'] = topic.created_at
row['post_count'] = len(topic.posts.filter(user=user))
row['like_count'] = len(topic.posts.filter(votes__user=user))
row['relevance_score'] = 5*row['neighbor_visits_count'] - (timezone.now()-row['last_neighbor_visit']).total_seconds()/3600
row['recommendation'] = row['relevance_score'] + topic.hotness
data.append(row)
print(data)
return Response(data)
#@csrf_exempt
#@api_view(['PUT'])
#def post_downvote(request, pk):
# try:
# post = Post.objects.get(pk=pk)
# except Post.DoesNotExist:
# return Response(status=status.HTTP_404_NOT_FOUND)
# if request.method == 'PUT':
# post.negative_reaction_count += 1
# post.save()
# serializer = PostSerializer(post)
# return Response(serializer.data)
@api_view(['PATCH'])
def update_post(request, pk):
"""
A view to update a post given its id.
The authenticated user must be the post's owner.
"""
data = JSONParser().parse(request)
if request.method == 'PATCH':
if data['user_id']:
user = User.objects.get(id = data['user_id'])
else:
user = request.user
try:
postObject = Post.objects.filter(id=pk, user = user).first()
except Post.DoesNotExist:
content = {'user forbidden': 'you should be user of the requested post.'}
return Response(content, status=status.HTTP_403_FORBIDDEN)
postObject.content = data['content']
postObject.tags.clear()
for tag in data["tags"]:
if len(tag)>0:
if tag['name'] == '':
continue
try:
tagObject = Tag.objects.get(wikidataID=tag['wikidataID'])
except ObjectDoesNotExist:
tagObject = Tag.objects.create(wikidataID=tag['wikidataID'], name=tag['name'])
except MultipleObjectsReturned:
return HttpResponse("Multiple tags exist for." + tag + " Invalid State.")
unique_hidden_tags = list(set(tag['hidden_tags']))
if unique_hidden_tags:
tagObject.hidden_tags = unique_hidden_tags
tagObject.save()
postObject.tags.add(tagObject)
postObject.save()
return Response(status=status.HTTP_200_OK)
@api_view(['PUT'])
def relation_upvote(request, pk):
"""
A view to upvote a relation.
"""
try:
relation = Relation.objects.get(pk=pk)
except Relation.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
relation.positive_reaction_count += 1
relation.save()
serializer = RelationSerializer(relation)
return Response(serializer.data)
@api_view(['PUT'])
def relation_downvote(request, pk):
"""
A view to downvote a relation.
"""
try:
relation = Relation.objects.get(pk=pk)
except Relation.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'PUT':
relation.negative_reaction_count += 1
relation.save()
serializer = RelationSerializer(relation)
return Response(serializer.data)
@api_view(['GET'])
def wikidata_search(request, str):
"""
A view to search a string in wikidata.
"""
url_head = 'https://www.wikidata.org/w/api.php?action=wbsearchentities&search='
url_tail = '&language=en&format=json'
if request.method == 'GET':
r = requests.get(url_head+str+url_tail);
return Response(r.json()['search'])
#print r
@api_view(['GET'])
def topic_get_hot(request, limit):
"""
A view to get the top <limit> hottest topics.
The hotness depends on the time the topic is created,
the number of posts, views and likes of the topic.
"""
if request.method == 'GET':
all_topics = Topic.objects.all()
if int(limit) == 0:
hot_topics = sorted(all_topics, key=lambda t: -t.hotness)
else:
hot_topics = sorted(all_topics, key=lambda t: -t.hotness)[:int(limit)]
#hot_topics = Topic.objects.order_by('hotness')[:5]
TopicNestedSerializer.Meta.depth = 2
RelationSerializer.Meta.depth = 1
serializer = TopicNestedSerializer(hot_topics, many=True)
return Response(serializer.data)
@api_view(['GET'])
def post_get_recent(requst, limit):
"""
A view to get the top <limit> most recent posts.
"""
if requst.method == 'GET':
recent_posts = Post.objects.order_by('-created_at')[:int(limit)]
TopicNestedSerializer.Meta.depth = 1
PostNestedSerializer.Meta.depth = 1
serializer = PostNestedSerializer(recent_posts, many=True)
return Response(serializer.data)
@api_view(['GET'])
def wikidata_query(request, str):
"""
A view to get the wikidata relations of a tag.
"""
url_head = 'https://query.wikidata.org/sparql?query=PREFIX%20entity:%20<http://www.wikidata.org/entity/>%20SELECT%20?propUrl%20?propLabel%20?valUrl%20?valLabel%20?picture%20WHERE%20{%20hint:Query%20hint:optimizer%20%27None%27%20.%20{%20BIND(entity:';
url_second = '%20AS%20?valUrl)%20.%20BIND("N/A"%20AS%20?propUrl%20)%20.%20BIND("identity"@en%20AS%20?propLabel%20)%20.%20}%20UNION%20{%20entity:';
url_tail = '%20?propUrl%20?valUrl%20.%20?property%20?ref%20?propUrl%20.%20?property%20a%20wikibase:Property%20.%20?property%20rdfs:label%20?propLabel%20}%20?valUrl%20rdfs:label%20?valLabel%20FILTER%20(LANG(?valLabel)%20=%20%27en%27)%20.%20OPTIONAL{%20?valUrl%20wdt:P18%20?picture%20.}%20FILTER%20(lang(?propLabel)%20=%20%27en%27%20)%20}&format=json'
if request.method == 'GET':
r = requests.get(url_head+str+url_second+str+url_tail);
return Response(r.json()['results']['bindings'])
#print r
@api_view(['POST'])
def search_by_tags(request):
"""
A view to get the search the posts and topics by wikidata tags.
"""
resultTopics = []
resultPosts = []
if request.method == 'POST':
data = request.data
print(data)
search_query = data['query']
data_tags = list(set(data['tags']))
print(data_tags)
tagObjects = []
if len(data_tags) > 0:
tagObjects = Tag.objects.filter(hidden_tags__overlap=data_tags) | Tag.objects.filter(reduce(operator.and_, (Q(wikidataID=tag_id) for tag_id in data_tags)))
for tagObject in tagObjects:
print("LOL")
tag_topics = tagObject.topics.all()
tag_posts = tagObject.posts.all()
for topic in tag_topics:
if topic not in resultTopics:
resultTopics.append(topic)
for post in tag_posts:
if post not in resultPosts:
resultPosts.append(post)
# for tag in data["tags"]:
# try:
# tagObjects = Tag.objects.filter(wikidataID=tag)
# except Tag.DoesNotExist:
# continue;
# for tagObject in tagObjects:
# tag_topics = tagObject.topics.all()
# tag_posts = tagObject.posts.all()
# for topic in tag_topics:
# if topic not in resultTopics:
# resultTopics.append(topic)
# for post in tag_posts:
# if post not in resultPosts:
# resultPosts.append(post)
print(resultTopics);
print(resultPosts);
query_topics = Topic.objects.filter(name__icontains=search_query)
query_posts = Post.objects.filter(content__icontains=search_query)
for topic in query_topics:
if topic not in resultTopics:
resultTopics.append(topic)
for post in query_posts:
if post not in resultPosts:
resultPosts.append(post)
all_relations = Relation.objects.all()
for topic in resultTopics:
for relation in all_relations:
if (topic == relation.topic_from) and (relation.topic_to not in resultTopics):
resultTopics.append(relation.topic_to)
if (topic == relation.topic_to) and (relation.topic_from not in resultTopics):
resultTopics.append(relation.topic_from)
TopicSerializer.Meta.depth = 1
PostNestedSerializer.Meta.depth = 1
topicSerializer = TopicNestedSerializer(resultTopics, many=True)
#topicSerializer.Meta.depth = 1
postSerializer = PostNestedSerializer(resultPosts, many=True)
#postSerializer.Meta.depth = 1
return Response({'topics':topicSerializer.data, 'posts':postSerializer.data})
def index(request):
"""
Main page of cocomapapp which includes hot_topics and a random_topic .
"""
template = loader.get_template('global.html')
hot_topics = serializers.serialize("json", Topic.objects.order_by('-updated_at')[:5])
random_topic = serializers.serialize("json", Topic.objects.order_by('?')[:1])
context = {
'hot_topics': hot_topics,
'random_topic': random_topic,
'request': request,
}
return HttpResponse(template.render(context, request))
# def login(request):
# if request.method =='POST':
# form = LoginForm(request.POST)
# if form.is_valid():
# user = User()
# user.email = form.cleaned_data['email']
# user.password = form.cleaned_data['password']
# checkUser = User.objects.get(email=user.email)
# if checkUser == User.DoesNotExist:
# return HttpResponseRedirect('/cocomapapp/login')
# if checkUser.password == user.password:
# request.session['username'] = checkUser.first_name
# return HttpResponseRedirect('/cocomapapp/')
# return HttpResponseRedirect('/cocomapapp/login')
# else:
# template = loader.get_template('login.html')
# registerForm = RegisterForm()
# loginForm = LoginForm()
# context = {
# 'loginForm': loginForm,
# 'registerForm': registerForm,
# }
# return HttpResponse(template.render(context, request))
# def signup(request):
# if request.method =='POST':
# form = RegisterForm(request.POST)
# if form.is_valid():
# newuser = User()
# newuser.email = form.cleaned_data['email']
# newuser.first_name = form.cleaned_data['first_name']
# newuser.last_name = form.cleaned_data['last_name']
# newuser.password = form.cleaned_data['password']
# newuser.save()
# return HttpResponseRedirect('/cocomapapp/login')
# else:
# template = loader.get_template('signup.html')
# registerForm = RegisterForm()
# loginForm = LoginForm()
# context = {
# 'loginForm': loginForm,
# 'registerForm': registerForm,
# }
# return HttpResponse(template.render(context, request))
@csrf_exempt
def show_topic(request, id):
"""
A view that shows the a topic's page. It includes all post written
in that topic.
"""
template = loader.get_template('topic.html')
if request.method == "POST":
print("POSTING")
try:
user = User.objects.get(username=request.user)
except ObjectDoesNotExist:
return HttpResponse("You should login to post!")
requested_topic = Topic.objects.get(id=id)
postObject = Post.objects.create(user_id=user.id, topic_id=requested_topic.id,content=request.POST.get("content", ""))
tags = request.POST.get("tags", "").split(",");
for tag in tags:
if len(tag)>0:
try:
tagObject = Tag.objects.get(wikidataID=tag)
except ObjectDoesNotExist:
tagObject = Tag.objects.create(wikidataID=tag, name='Unknown')
except MultipleObjectsReturned:
return HttpResponse("Multiple tags exist for." + tag + " Invalid State.")
unique_hidden_tags = list(set(tag['hidden_tags']))
if unique_hidden_tags:
tagObject.hidden_tags = unique_hidden_tags
tagObject.save()
postObject.tags.add(tagObject)
try:
topic = Topic.objects.get(id=id)
TopicNestedSerializer.Meta.depth = 1
serialized_topic = TopicNestedSerializer(topic)
print(serialized_topic.data)
topic_json = JSONRenderer().render(serialized_topic.data)
except ObjectDoesNotExist:
return HttpResponse("This topic doesn't exists!")
hot_topics = Topic.objects.order_by('-updated_at')[:5]
serialized_hot_topics = TopicNestedSerializer(hot_topics, many=True)
hot_topics_json = JSONRenderer().render(serialized_hot_topics.data)
context = {
'topic': topic_json,
'hot_topics': hot_topics_json
}
return HttpResponse(template.render(context, request))
@csrf_exempt
def add_topic(request):
"""
A view that helps user to add new topics.
"""
template = loader.get_template('topicAdd.html')
try:
topic = serializers.serialize("json", Topic.objects.filter())
except ObjectDoesNotExist:
return HttpResponse("This topic doesn't exists!")
context = {
'topics': topic
}
if request.method == "POST":
data = JSONParser().parse(request)
# Add topic to database.
try:
Topic.objects.get(name=data["name"])
print("topic exists")
return HttpResponse("This topic exists")
except ObjectDoesNotExist:
try:
user = User.objects.get(username=request.user)
except ObjectDoesNotExist:
return JsonResponse({'status':'false','message':'You should login to create a topic!'}, status=401)
name = data["name"]
topicObject = Topic.objects.create(name=name, user=user)
for tag in data["tags"]:
tag_name = tag['label']
if tag_name == '':
continue
tag_wiki_id = tag['id']
try:
tagObject = Tag.objects.get(wikidataID=tag_wiki_id)
except ObjectDoesNotExist:
tagObject = Tag.objects.create(name=tag_name, wikidataID=tag_wiki_id)
except MultipleObjectsReturned:
return HttpResponse("Multiple tags exist for." + tag + " Invalid State.")
#hidden tags
unique_hidden_tags = list(set(tag['hidden_tags']))
if unique_hidden_tags:
tagObject.hidden_tags = unique_hidden_tags
# for hidden_tag in unique_hidden_tags:
# try:
# hiddenTagObject = Tag.objects.get(wikidataID=hidden_tag)
# except ObjectDoesNotExist:
# hiddenTagObject = Tag.objects.create(wikidataID=hidden_tag, hidden=True)
# hiddenTagObject.save()
tagObject.save()
topicObject.tags.add(tagObject)
context = {
}
# Add relationship to database.
relates_to = data["relates_to"]
for relation in data["relates_to"]:
if relation['topic_id'] == '':
continue
try:
relatedTopicObject = Topic.objects.get(pk=relation['topic_id'])
label = relation['rel_name']
relationObject = Relation.objects.create(topic_from=topicObject, topic_to=relatedTopicObject, label=label)
except ObjectDoesNotExist:
print("error")
return HttpResponse("Related topic does not exist");
except MultipleObjectsReturned:
print("error")
return HttpResponse("This topic exists")
# End of add relationship to database.
# Adding a post to new created topic
if data["postAdd"] == True:
postStuff = data["post"]
content = postStuff["post_content"]
postObject = Post.objects.create(content=content, user=user, topic=topicObject)
for tag in postStuff["post_tags"]:
if len(tag)>0:
if tag['label'] == '':
continue
try:
tagObject = Tag.objects.get(wikidataID=tag['id'])
except ObjectDoesNotExist:
tagObject = Tag.objects.create(wikidataID=tag['id'], name=tag['label'])
except MultipleObjectsReturned:
return HttpResponse("Multiple tags exist for." + tag + " Invalid State.")
unique_hidden_tags = list(set(tag['hidden_tags']))
if unique_hidden_tags:
tagObject.hidden_tags = unique_hidden_tags
tagObject.save()
postObject.tags.add(tagObject)
# End of adding a post to new created topic
return HttpResponse(template.render(context, request))
return HttpResponse(template.render(context, request))
@csrf_exempt
def add_post(request, id):
"""
A view that adds a post to a topic.
"""
template = loader.get_template('topic.html')
if request.method == "POST":
data = JSONParser().parse(request)
try:
user = User.objects.get(username=request.user)
except ObjectDoesNotExist:
return HttpResponse("You should login to post!")
requested_topic = Topic.objects.get(id=data["topic_id"])
postObject = Post.objects.create(user_id=user.id, topic_id=requested_topic.id,content=data["content"])
for tag in data["tags"]:
if len(tag)>0:
if tag['label'] == '':
continue
try:
tagObject = Tag.objects.get(wikidataID=tag['id'])
except ObjectDoesNotExist:
tagObject = Tag.objects.create(wikidataID=tag['id'], name=tag['label'])
except MultipleObjectsReturned:
return HttpResponse("Multiple tags exist for." + tag + " Invalid State.")
unique_hidden_tags = list(set(tag['hidden_tags']))
if unique_hidden_tags:
tagObject.hidden_tags = unique_hidden_tags
tagObject.save()
postObject.tags.add(tagObject)
try:
topic = Topic.objects.get(id=id)
serialized_topic = TopicNestedSerializer(topic)
topic_json = JSONRenderer().render(serialized_topic.data)
except ObjectDoesNotExist:
return HttpResponse("This topic doesn't exists!")
hot_topics = Topic.objects.order_by('-updated_at')[:5]
serialized_hot_topics = HotTopicsSerializer(hot_topics, many=True)
hot_topics_json = JSONRenderer().render(serialized_hot_topics.data)
context = {
'topic': topic_json,
'hot_topics': hot_topics_json
}
return HttpResponse(template.render(context, request))
@csrf_exempt
def search(request):
"""
A view that helps user to search something.
"""
template = loader.get_template('searchresult.html')
context = {
'asd': 'asd',
}
return HttpResponse(template.render(context, request))
@csrf_exempt
def add_relation(request,id):
"""
A view that adds relation between to topics.
"""
template = loader.get_template('addRelation.html')
requested_topic = Topic.objects.get(id=id)
context = {
'requested_topic': requested_topic,
}
return HttpResponse(template.render(context, request))
@csrf_exempt
def infocus(request, id):
"""
A view that moves the clicked topic to center of the screen.
"""
template = loader.get_template('infocus.html')
try:
topic = Topic.objects.get(id=id)
serialized_topic = TopicNestedSerializer(topic)
topic_json = JSONRenderer().render(serialized_topic.data)
except ObjectDoesNotExist:
return HttpResponse("This topic doesn't exists!")
hot_topics = serializers.serialize("json", Topic.objects.order_by('-updated_at')[:5])
random_topic = serializers.serialize("json", Topic.objects.order_by('?')[:1])
context = {
'hot_topics': hot_topics,
'random_topic': random_topic,
'request': request,
}
return HttpResponse(template.render(context, request))
# def second_topic(request):
# template = loader.get_template('secondTopic.html')
# context = {
# 'asd': 'asd',
# }
# return HttpResponse(template.render(context, request))
# @csrf_exempt
# def math_topic(request):
# template = loader.get_template('topicMath.html')
# context = {
# 'asd': 'asd',
# }
# return HttpResponse(template.render(context, request))
|
bounswe/bounswe2016group11
|
cocomapapp/views.py
|
Python
|
apache-2.0
| 34,067
|
[
"VisIt"
] |
f5ea5b71e800f4a03bc3b005d43de4a9e43b3406027a4dc31c4353990f52b239
|
# -*- coding: utf-8 -*-
# Begin CVS Header
# $Source: /Volumes/Home/Users/shoops/cvs/copasi_dev/copasi/bindings/python/unittests/Test_CReportDefinition.py,v $
# $Revision: 1.10 $
# $Name: $
# $Author: gauges $
# $Date: 2008/04/21 10:27:07 $
# End CVS Header
# Copyright (C) 2008 by Pedro Mendes, Virginia Tech Intellectual
# Properties, Inc., EML Research, gGmbH, University of Heidelberg,
# and The University of Manchester.
# All rights reserved.
import COPASI
import unittest
from types import *
class Test_CReportDefinition(unittest.TestCase):
def setUp(self):
self.repdef=COPASI.CReportDefinition("reportDefinition")
self.repdef.setComment("This is a test")
def test_getComment(self):
comment=self.repdef.getComment()
self.assert_(type(comment)==StringType)
def test_setComment(self):
comment="comment"
self.repdef.setComment(comment)
self.assert_(self.repdef.getComment()==comment)
def test_setTaskType(self):
tt=COPASI.CCopasiTask.mca
self.repdef.setTaskType(tt)
self.assert_(self.repdef.getTaskType()==tt)
def test_getTaskType(self):
tt=self.repdef.getTaskType()
self.assert_(type(tt)==IntType)
def test_setSeparator(self):
text="zzz"
sep=COPASI.CCopasiReportSeparator(text)
self.repdef.setSeparator(sep)
self.assert_(self.repdef.getSeparator().getStaticString()==text)
def test_getSeparator(self):
sep=self.repdef.getSeparator()
self.assert_(sep.__class__==COPASI.CCopasiReportSeparator)
def test_getTitle(self):
v=self.repdef.getTitle()
self.assert_(type(v)==BooleanType)
def test_setTitle(self):
v=True
self.repdef.setTitle(v)
self.assert_(self.repdef.getTitle()==v)
v=False
self.repdef.setTitle(v)
self.assert_(self.repdef.getTitle()==v)
def test_isTable(self):
v=self.repdef.isTable()
self.assert_(type(v)==BooleanType)
def test_setIsTable(self):
v=True
self.repdef.setIsTable(v)
self.assert_(self.repdef.isTable()==v)
v=False
self.repdef.setIsTable(v)
self.assert_(self.repdef.isTable()==v)
def test_getPrecision(self):
p=self.repdef.getPrecision()
self.assert_(type(p)==IntType)
def test_setPrecision(self):
p=13
self.repdef.setPrecision(p)
self.assert_(self.repdef.getPrecision()==p)
def test_getKey(self):
key=self.repdef.getKey()
self.assert_(type(key)==StringType)
def test_getTableAddr(self):
a=self.repdef.getTableAddr()
self.assert_(a.__class__==COPASI.ReportItemVector)
def test_getFooterAddr(self):
a=self.repdef.getFooterAddr()
self.assert_(a.__class__==COPASI.ReportItemVector)
def test_getHeaderAddr(self):
a=self.repdef.getHeaderAddr()
self.assert_(a.__class__==COPASI.ReportItemVector)
def test_getBodyAddr(self):
a=self.repdef.getBodyAddr()
self.assert_(a.__class__==COPASI.ReportItemVector)
def suite():
tests=[
'test_getComment'
,'test_setComment'
,'test_getTaskType'
,'test_setTaskType'
,'test_getSeparator'
,'test_setSeparator'
,'test_getTitle'
,'test_setTitle'
,'test_isTable'
,'test_setIsTable'
,'test_getPrecision'
,'test_setPrecision'
,'test_getKey'
,'test_getTableAddr'
,'test_getFooterAddr'
,'test_getHeaderAddr'
,'test_getBodyAddr'
]
return unittest.TestSuite(map(Test_CReportDefinition,tests))
if(__name__ == '__main__'):
unittest.TextTestRunner(verbosity=2).run(suite())
|
jonasfoe/COPASI
|
copasi/bindings/python/unittests/Test_CReportDefinition.py
|
Python
|
artistic-2.0
| 3,580
|
[
"COPASI"
] |
9869216fa80c5c5b8467f8c2d0954f3baa205afbcd65b3691f046552e3b7f821
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
import os
import unittest
import mooseutils
import MooseDocs
from MooseDocs.common import nodes
from MooseDocs.testing import LogTestCase
class TestNodes(LogTestCase):
"""
Tests the markdown conversion node objects.
"""
def testNodeCore(self):
root = nodes.NodeCore('')
a = nodes.NodeCore('a', parent=root)
b = nodes.NodeCore('b', parent=a)
self.assertEqual(root.name, '')
self.assertEqual(a.name, 'a')
self.assertEqual(b.name, 'b')
self.assertIsNone(root.parent)
self.assertEqual(a.parent, root)
self.assertEqual(b.parent, a)
self.assertEqual(root.full_name, '')
self.assertEqual(a.full_name, '/a')
self.assertEqual(b.full_name, '/a/b')
c = nodes.NodeCore('c', parent=b)
self.assertEqual(c.findall(), [root,a,b,c])
self.assertEqual(c.findall('/b'), [b])
f = lambda n: 'a' in n.full_name
self.assertEqual(c.findall(filter_=f), [a,b,c])
def testMarkdownNode(self):
node = nodes.MarkdownNode('foo', content='bar')
self.assertEqual(node.name, 'foo')
self.assertEqual(node.content, 'bar')
def testDirectoryNode(self):
node = nodes.DirectoryNode('foo')
self.assertEqual(node.name, 'foo')
def testMarkdownFileNodeBase(self):
node = nodes.MarkdownFileNodeBase('foo', 'the/base/dir')
self.assertEqual(node.basename, os.path.join(MooseDocs.ROOT_DIR, 'the/base/dir/foo'))
self.assertEqual(node.destination, 'foo/index.html')
with self.assertRaises(NotImplementedError) as e:
node.filename
self.assertEqual(str(e.exception), "The 'filename' property must be defined.")
with self.assertRaises(NotImplementedError) as e:
node.content
self.assertEqual(str(e.exception), "The 'filename' property must be defined.")
node2 = nodes.MarkdownFileNodeBase('bar', 'the/base/dir', parent=node)
self.assertEqual(node2.destination, 'foo/bar/index.html')
def testMarkdownFileIndexNode(self):
node = nodes.MarkdownFileIndexNode('foo', 'the/base/dir')
self.assertEqual(node.filename,
os.path.join(MooseDocs.ROOT_DIR, 'the/base/dir/foo/index.md'))
def testMarkdownFilePageNode(self):
node = nodes.MarkdownFilePageNode('foo', 'the/base/dir')
self.assertEqual(node.filename,
os.path.join(MooseDocs.ROOT_DIR, 'the/base/dir/foo.md'))
def testSyntaxNodeBase(self):
node = nodes.SyntaxNodeBase('foo')
self.assertFalse(node.hidden)
node.hidden = True
self.assertTrue(node.hidden)
with self.assertRaises(TypeError) as e:
node.hidden = 'foo'
self.assertEqual(str(e.exception),
'The supplied value must be a boolean.')
with self.assertRaises(NotImplementedError) as e:
node.markdown('foo')
self.assertEqual(str(e.exception),
"The 'markdown' method must return the expected markdown filename.")
node.hidden = False
with self.assertRaises(NotImplementedError) as e:
node.check('foo')
self.assertEqual(str(e.exception),
"The 'markdown' method must return the expected markdown filename.")
self.assertEqual(node.groups, dict())
self.assertEqual(node.syntax(), [])
self.assertEqual(node.objects(), [])
self.assertEqual(node.actions(), [])
def testSyntaxNode(self):
site = os.path.join('python', 'MooseDocs', 'tests', 'common', 'nodes', 'site')
tmp = os.path.join(MooseDocs.ROOT_DIR, site, 'foo', 'index.md')
if os.path.exists(tmp):
os.remove(tmp)
root = nodes.SyntaxNode('')
node = nodes.SyntaxNode('foo', parent=root)
self.assertEqual(node.markdown(site), tmp)
self.assertEqual(root.syntax(), [node])
self.assertEqual(root.objects(), [])
self.assertEqual(root.actions(), [])
# Un-documented, no file
node.check(site)
self.assertInLogError("No documentation for /foo, documentation")
# Generated file
node.check(site, generate=True)
self.assertInLogInfo("Creating stub page for")
self.assertTrue(os.path.exists(tmp))
# Check content
with open(tmp, 'r') as fid:
content = fid.read()
self.assertIn('!syntax objects /foo', content)
self.assertIn('!syntax actions /foo', content)
self.assertIn('!syntax subsystems /foo', content)
# Un-documented, file exists
node.check(site)
self.assertInLogError("A MOOSE generated stub page")
def testObjectNode(self):
item = dict()
item['description'] = 'description'
item['parameters'] = {'param':1}
item['file_info'] = {'some/path/FrogApp.C': 52}
item['class'] = 'FrogFoo'
site = os.path.join('python', 'MooseDocs', 'tests', 'common', 'nodes', 'site')
tmp = os.path.join(MooseDocs.ROOT_DIR, site, 'frog', 'foo.md')
if os.path.exists(tmp):
os.remove(tmp)
root = nodes.SyntaxNode('')
node = nodes.MooseObjectNode('foo', item, parent=root)
action = nodes.ActionNode('action', item, parent=root)
self.assertEqual(root.syntax(), [])
self.assertEqual(root.objects(), [node])
self.assertEqual(root.actions(), [action])
self.assertEqual(node.class_name, 'FrogFoo')
self.assertEqual(node.description, 'description')
self.assertEqual(node.parameters, {'param':1})
self.assertEqual(node.markdown(site), tmp)
self.assertEqual(node.groups, {'frog':'Frog'})
node.check(site, generate=True)
self.assertInLogInfo("Creating stub page for")
self.assertTrue(os.path.exists(tmp))
# Check content
with open(tmp, 'r') as fid:
content = fid.read()
self.assertIn('!syntax description /foo', content)
self.assertIn('!syntax parameters /foo', content)
self.assertIn('!syntax inputs /foo', content)
self.assertIn('!syntax children /foo', content)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
liuwenf/moose
|
python/MooseDocs/tests/common/nodes/test_nodes.py
|
Python
|
lgpl-2.1
| 7,672
|
[
"MOOSE"
] |
9481dd80cd8fa0d6ef4fe8707acb51304de64b340a52b446b94a94d73533a268
|
#***********************************************************************
# This code is part of pyCMPL
#
# Copyright (C) 2013
# Mike Steglich - Technical University of Applied Sciences
# Wildau, Germany
#
# pyCMPL is a project of the Technical University of
# Applied Sciences Wildau and the Institute for Operations Research
# and Business Management at the Martin Luther University
# Halle-Wittenberg.
# Please visit the project homepage <www.coliop.org>
#
# pyCMPL is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyCMPL is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#**********************************************************************
#!/usr/bin/python
from __future__ import division
from math import *
from CmplException import *
from CmplTools import *
#*************** CmplSet *****************************************
class CmplSet(object):
#*********** constructor ********
def __init__(self, name, rank=1):
self.__name = ""
self.__type = 0 # 0 enumeration set - 1 tuple set - 2 alg set n..m - 3 alg set n(k)m
self.__valueList = []
self.__len = 0
if type(name) != str:
raise CmplException("not a valid name for set: " + str(name) )
elif CmplTools.stringContainsWhitespace(name):
raise CmplException("set name " + str(name) + " contains whitespaces")
else:
self.__name = name
self.__rank=rank
#*********** End constructor ****
#*********** count **************
def count(self):
_count = 0
if self.__type == 0:
_count = len(self.__valueList)/self.__rank
elif self.__type == 1:
_count = len(self.__valueList)
elif self.__type == 2:
_count = self.__valueList[1]-self.__valueList[0]+1
elif self.__type == 3:
_count = ceil( (fabs( self.__valueList[2] - self.__valueList[0]) +1 ) / fabs(self.__valueList[1]))
return _count
#*********** end count **********
#*********** values *************
def setValues(self, val1, val2=None, val3=None):
self.__valueList = []
if val2 == None and val3 == None:
if not 'LIST' in str(type(val1)).upper():
#if type(val1) != list:
raise CmplException("unexpected values for set " + self.__name + " : " + str(val1) + " is not a list")
else:
if 'LIST' in str(type(val1[0])).upper() or 'TUPLE' in str(type(val1[0])).upper():
#if type(val1[0]) == list or type(val1[0]) == tuple:
if len(val1[0])!=self.__rank:
raise CmplException("Rank and number of indexing entries for set " + self.__name + " : " + str(val1[0]) + " don't match.")
self.__type = 1
self.__len = len(val1) / self.__rank
else:
self.__type = 0
if self.__rank>1:
raise CmplException( "incorrect definition of a tuple set " + self.__name)
self.__valueList = val1
elif val2 != None and val3 == None:
self.__type = 2
self.__rank = 1
if type(val1) == int and type(val2) == int:
if val2 < val1:
raise CmplException( "unexpected values for set " + self.__name + " : end value "+ str(val2) + " is less then start value " + str(val1) )
self.__valueList.append(val1)
self.__valueList.append(val2)
else:
raise CmplException( "unexpected values for set " + self.__name + " : "+ str(val1) + "/" + str(val2) + "is not an valid integer combination")
else:
self.__type = 3
self.__rank = 1
if type(val1) == int and type(val2) == int and type(val3) == int:
self.__valueList.append(val1)
self.__valueList.append(val2)
self.__valueList.append(val3)
if val2>0:
if val3 <= val1:
raise CmplException( "unexpected values for set " + self.__name + " : end value "+ str(val3) + " is less than or equal to start value " + str(val1) +" and the increment is positive "+ str(val2) )
else:
if val2<0:
if val1 <= val3:
raise CmplException( "unexpected values for set " + self.__name + " : end value "+ str(val3) + " is greater than or equal to start value " + str(val1) +" and the increment is negative "+ str(val2) )
else:
raise CmplException( "unexpected values for the set " + self.__name + " : increment/decrement equals zero " )
else:
raise CmplException( "unexpected values for set " + self.__name + " : "+ str(val1) + "/" + str(val2) + "/" + str(val3) + "is not an valid integer combination")
#*********** end values *********
#*********** valueList **********
@property
def valueList(self):
return self.__valueList
#*********** End valueList ******
#*********** set **********
@property
def values(self):
# if self.__type == 0 or self.__type == 1:
# return self.__valueList
if self.__type == 0:
return self.__valueList
elif self.__type == 1:
tmpList=[]
for c in self.__valueList:
tmpTuple="("
for e in range(len(c)):
if e>0:
tmpTuple=tmpTuple+","
if type(c[e])==str:
tmpTuple=tmpTuple+"\""+c[e]+"\""
else:
tmpTuple=tmpTuple+str(c[e])
tmpTuple=tmpTuple+")"
tmpList.append(eval(tmpTuple))
return tmpList
elif self.__type == 2:
return range(self.__valueList[0],self.__valueList[1]+1)
elif self.__type == 3:
if self.__valueList[1]>0:
return range(self.__valueList[0],self.__valueList[2]+1,self.__valueList[1])
else:
return range(self.__valueList[0],self.__valueList[2]-1,self.__valueList[1])
#*********** End valueList ******
#*********** name ***************
@property
def name(self):
return self.__name
#*********** end name ***********
#*********** type ***************
@property
def type(self):
return self.__type
#*********** end type ***********
#*********** rank ***************
@property
def rank(self):
return self.__rank
#*********** end rank ***********
#*********** len ****************
@property
def len(self):
if self.__type == 0 or self.__type == 1:
return len(self.__valueList)
elif self.__type == 2:
return len(range(self.__valueList[0],self.__valueList[1]+1))
elif self.__type == 3:
if self.__valueList[1]>0:
return len(range(self.__valueList[0],self.__valueList[2]+1,self.__valueList[1]))
else:
return len(range(self.__valueList[0],self.__valueList[2]-1,self.__valueList[1]))
#*********** end len ************
#*************** End CmplSet *****************************************
|
Mangara/ArboralExplorer
|
lib/Cmpl/pyCmpl/lib/pyCmpl/CmplSet.py
|
Python
|
apache-2.0
| 6,901
|
[
"VisIt"
] |
205c1161ebb1699eb92a3fff05f7f3a2afcfa2d02e7e3b7986fc3a705fd0aa2a
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
from stoqlib.gui.editors.fiscaleditor import CfopEditor
from stoqlib.gui.test.uitestutils import GUITest
class TestCfopEditor(GUITest):
def test_create(self):
editor = CfopEditor(self.store)
self.check_editor(editor, 'editor-cfop-create')
|
andrebellafronte/stoq
|
stoqlib/gui/test/test_cfopeditor.py
|
Python
|
gpl-2.0
| 1,154
|
[
"VisIt"
] |
bb8c43abf652d78dd96bd5639ca5fdcbff129ebee43a377821b8bf4662862810
|
# YASARA PLUGIN
# TOPIC: pKaTool/EM_effect titration interface
# TITLE: Start pKaTool or EM_effect
# AUTHOR: Jens Nielsen
# LICENSE: GPL
# DESCRIPTION: This plugin starts EM_effect or pKaTool
#
# This is a YASARA plugin to be placed in the /plg subdirectory
# Go to www.yasara.com/plugins for documentation and downloads
#
"""
MainMenu after Analyse: _p_Ka/NMR
PullDownMenu : _p_KaTool
Request: StartpKaTool
PullDownMenu : _E_Meffect
Request: StartEMeffect
"""
#
# Add the Yasara directory to the search path
#
import sys,os
script_location=sys.argv[1]
plgdir=os.path.split(script_location)[0]
yasara_dir=os.path.split(plgdir)[0]
sys.path.append(yasara_dir)
sys.path.append(plgdir)
#
# Import Yasara
#
import yasara,string,ftplib,urllib,disk,time
#
# Get the location of the pKaTool/EM_effect
#
pKadir='/home/people/nielsen/lib/development/pKaTool/'
sys.path=[pKadir]+sys.path
if (yasara.request=="StartpKaTool"):
import pkaTool
pKaTool
elif (yasara.request=="StartEMeffect"):
import EM_effect
EM_effect.EM_effect(Yasara=yasara).mainloop()
# THIS MUST ALWAYS BE THE LAST COMMAND
yasara.plugin.end()
|
dmnfarrell/peat
|
pKaTool/EM_effect_yasara.py
|
Python
|
mit
| 1,142
|
[
"YASARA"
] |
4a6cfd298870ce11f211f6633ec5fb4e1d280728df6daa6f1b611456c92766c4
|
"""
Enterprise Registration tests
"""
from regression.pages.enterprise.enterprise_const import (
ENT_COURSE_TITLE,
ENT_PORTAL_PASSWORD,
ENT_PORTAL_USERNAME
)
from regression.tests.enterprise.ent_test_base import EnterpriseTestBase
class TestEnterpriseRegistration(EnterpriseTestBase):
"""
Test Enterprise Registration
"""
def test_enterprise_unlinked_user_registration(self):
"""
Scenario: To verify that user is able to use enterprise portal to
register into edX and link accounts
Given a user does not have an edx account
When this user logs in to the Enterprise portal
And clicks on the course enrollment link
Then the user is taken directly to edx customized logistration
page
And user can register here and go to course enrollment page
"""
# The edX site is visited just to make sure that when user jumps to edX
# from portal we don't have to handle authentication popup
self.lms_login.visit()
# Enterprise portal flow
self.login_to_ent_portal(
ENT_PORTAL_USERNAME,
ENT_PORTAL_PASSWORD)
self.access_course()
self.ent_edx_login.wait_for_page()
self.register_ent_edx_user()
# Call the fixture to unlink existing account for the user
self.addCleanup(self.unlink_account)
# Verify that user is on course enrollment page and correct course
# is displayed there
self.ent_course_enrollment.wait_for_page()
self.assertEqual(
ENT_COURSE_TITLE,
self.ent_course_enrollment.get_course_title()
)
|
edx/edx-e2e-tests
|
regression/tests/enterprise/test_ent_registration.py
|
Python
|
agpl-3.0
| 1,700
|
[
"VisIt"
] |
fb005aa330e2993de0f1b2906f167b120fafa66cb2859eb8b00dc812bc69d522
|
# Functions for finding random points and orientations.
#
# Written by: Konrad Hinsen
# Last revision: 2000-8-9
#
"""This module defines various random quantities that are useful in
molecular simulations. For obtaining random numbers, it tries to use
the RNG module, which is part of the LLNL package distribution, which
also contains Numerical Python. If RNG is not available, it
uses the random number generators in modules RandomArray (part of
Numerical Python) and whrandom (in the Python standard library).
"""
import Numeric
from Scientific.Geometry import Vector
from Numeric import dot
from Scientific.Geometry.Transformation import Rotation
import ParticleProperties, Units
try:
import RNG
except ImportError:
RNG = None
if RNG is None:
random = __import__('random')
import whrandom
from RandomArray import uniform, seed
seed(1, 1)
whrandom.seed(1, 1, 1)
def initializeRandomNumbersFromTime():
whrandom.seed(0, 0, 0)
seed(0, 0)
def gaussian(mean, std, shape=None):
if shape is None:
x = random.normalvariate(0., 1.)
else:
x = Numeric.zeros(shape, Numeric.Float)
xflat = Numeric.ravel(x)
for i in range(len(xflat)):
xflat[i] = random.normalvariate(0., 1.)
return mean + std*x
else:
_uniform_generator = \
RNG.CreateGenerator(-1, RNG.UniformDistribution(0., 1.))
_gaussian_generator = \
RNG.CreateGenerator(-1, RNG.NormalDistribution(0., 1.))
def initializeRandomNumbersFromTime():
global _uniform_generator, _gaussian_generator
_uniform_generator = \
RNG.CreateGenerator(0, RNG.UniformDistribution(0., 1.))
_gaussian_generator = \
RNG.CreateGenerator(0, RNG.NormalDistribution(0., 1.))
def uniform(x1, x2, shape=None):
if shape is None:
x = _uniform_generator.ranf()
else:
n = Numeric.multiply.reduce(shape)
x = _uniform_generator.sample(n)
x.shape = shape
return x1+(x2-x1)*x
def gaussian(mean, std, shape=None):
if shape is None:
x = _gaussian_generator.ranf()
else:
n = Numeric.multiply.reduce(shape)
x = _gaussian_generator.sample(n)
x.shape = shape
return mean+std*x
#
# Random point in a rectangular box centered around the origin
#
def randomPointInBox(a, b = None, c = None):
"""Returns a vector drawn from a uniform distribution within a
rectangular box with edge lengths |a|, |b|, |c|. If |b| and/or |c|
are omitted, they are taken to be equal to |a|."""
if b is None: b = a
if c is None: c = a
x = uniform(-0.5*a, 0.5*a)
y = uniform(-0.5*b, 0.5*b)
z = uniform(-0.5*c, 0.5*c)
return Vector(x, y, z)
#
# Random point in a sphere around the origin.
#
def randomPointInSphere(r):
"""Returns a vector drawn from a uniform distribution within
a sphere of radius |r|."""
rsq = r*r
while 1:
x = uniform(-r, r, (3,))
if dot(x, x) < rsq: break
return Vector(x)
#
# Random direction (unit vector).
#
def randomDirection():
"""Returns a vector drawn from a uniform distribution on
the surface of a unit sphere."""
r = randomPointInSphere(1.)
return r.normal()
def randomDirections(n):
"""Returns a list of |n| vectors drawn from a uniform distribution on
the surface of a unit sphere. If |n| is negative, return a deterministic
list of not more than -|n| vectors of unit length (useful for
testing purposes)."""
if n < 0:
list = [Vector(1., 0., 0.), Vector(0., -1., 0.), Vector(0., 0., 1.),
Vector(-1., 1., 0.).normal(), Vector(-1., 0., 1.).normal(),
Vector(0., 1., -1.).normal(), Vector(1., -1., 1.).normal()]
list = list[:-n]
else:
list = []
for i in range(n):
list.append(randomDirection())
return list
#
# Random rotation.
#
def randomRotation(max_angle = Numeric.pi):
"""Returns a Rotation object describing a random rotation
with a uniform axis distribution and angles drawn from
a uniform distribution between -|max_angle| and |max_angle|."""
return Rotation(randomDirection(), uniform(-max_angle, max_angle))
#
# Random velocity (gaussian)
#
def randomVelocity(temperature, mass):
"""Returns a random velocity vector for a particle of a given
|mass|, drawn from a Boltzmann distribution for the given
|temperature|."""
sigma = Numeric.sqrt((temperature*Units.k_B)/(mass*Units.amu))
return Vector(gaussian(0., sigma, (3,)))
#
# Random ParticleVector (gaussian)
#
def randomParticleVector(universe, width):
"""Returns a ParticleVector object in which each vector is
drawn from a Gaussian distribution with a given |width| centered
around zero."""
data = gaussian(0., 0.577350269189*width, (universe.numberOfPoints(), 3))
return ParticleProperties.ParticleVector(universe, data)
#
# Test code
#
if __name__ == '__main__':
mean = 1.
std = 5.
n = 10000
values = gaussian(mean, std, (n,))
m = Numeric.sum(values)/n
print mean, m
print std, Numeric.sqrt(Numeric.sum((values-m)**2)/n)
|
fxia22/ASM_xf
|
PythonD/site_python/MMTK/Random.py
|
Python
|
gpl-2.0
| 5,293
|
[
"Gaussian"
] |
955d2752054ef218d230d0113d65a213b9aff1f3d2b991580aefda99b838855c
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawPoints(myscreen, clpoints, ccpoints):
c=camvtk.PointCloud( pointlist=clpoints, collist=ccpoints)
c.SetPoints()
myscreen.addActor(c )
def drawFiber(myscreen, f, fibercolor=camvtk.red):
inter = f.getInts()
print "fiber has ", len(inter) , " intervals"
for i in inter:
if not i.empty():
ip1 = f.point( i.lower )
ip2 = f.point( i.upper )
myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=fibercolor) )
myscreen.addActor( camvtk.Sphere(center=(ip1.x,ip1.y,ip1.z),radius=0.005, color=camvtk.clColor( i.lower_cc) ) )
myscreen.addActor( camvtk.Sphere(center=(ip2.x,ip2.y,ip2.z),radius=0.005, color=camvtk.clColor( i.upper_cc) ) )
#cc1 = i.lower_cc
#cc2 = i.upper_cc
#myscreen.addActor( camvtk.Sphere(center=(cc1.x,cc1.y,cc1.z),radius=0.005, color=camvtk.lgreen ) )
#myscreen.addActor( camvtk.Sphere(center=(cc2.x,cc2.y,cc2.z),radius=0.005, color=camvtk.lgreen ) )
# cutter circle
#c1 = camvtk.Circle(center=(ip1.x,ip1.y,ip1.z), radius = 0.3/2, color=fibercolor)
#myscreen.addActor(c1)
#c2 = camvtk.Circle(center=(ip2.x,ip2.y,ip2.z), radius = 0.3/2, color=fibercolor)
#myscreen.addActor(c2)
def drawFiber_clpts(myscreen, f, fibercolor=camvtk.red):
inter = f.getInts()
#print "fiber has ", len(inter) , " intervals"
for i in inter:
if not i.empty():
ip1 = f.point( i.lower )
ip2 = f.point( i.upper )
#myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=fibercolor) )
myscreen.addActor( camvtk.Sphere(center=(ip1.x,ip1.y,ip1.z),radius=0.005, color=camvtk.clColor( i.lower_cc) ) )
myscreen.addActor( camvtk.Sphere(center=(ip2.x,ip2.y,ip2.z),radius=0.005, color=camvtk.clColor( i.upper_cc) ) )
#cc1 = i.lower_cc
#cc2 = i.upper_cc
#myscreen.addActor( camvtk.Sphere(center=(cc1.x,cc1.y,cc1.z),radius=0.005, color=camvtk.pink ) )
#myscreen.addActor( camvtk.Sphere(center=(cc2.x,cc2.y,cc2.z),radius=0.005, color=camvtk.pink ) )
def yfiber(yvals,t,zh,myscreen):
for y in yvals:
f1 = ocl.Point(-0.5,y,zh) # start point of fiber
f2 = ocl.Point(1.5,y,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
i = ocl.Interval()
#cutter.vertexPush(f,i,t)
#cutter.facetPush(f,i,t)
#cutter.edgePush(f,i,t)
cutter.pushCutter(f,i,t)
f.addInterval(i)
drawFiber_clpts(myscreen, f, camvtk.red)
def xfiber(xvals,t,zh,myscreen):
for x in xvals:
f1 = ocl.Point(x,-0.5,zh) # start point of fiber
f2 = ocl.Point(x,1.5,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
i = ocl.Interval()
#cutter.vertexPush(f,i,t)
#cutter.facetPush(f,i,t)
#cutter.edgePush(f,i,t)
cutter.pushCutter(f,i,t)
f.addInterval(i)
drawFiber_clpts(myscreen, f, camvtk.lblue)
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
a = ocl.Point(0,1,0.3)
b = ocl.Point(1,0.5,0.0)
c = ocl.Point(0.1,0.1,0.0)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
t = ocl.Triangle(b,c,a)
angle = math.pi/4
diameter=0.3
length=5
#cutter = ocl.BallCutter(diameter, length)
cutter = ocl.CylCutter(diameter, length)
#cutter = ocl.BullCutter(diameter, diameter/4, length)
#cutter = ocl.ConeCutter(diameter, angle, length)
#cutter = cutter.offsetCutter( 0.1 )
print "cutter= ", cutter
print "lengt=", cutter.getLength()
print "fiber...",
range=2
Nmax = 100
yvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
zmin = -0.2082
zmax = 0.3115
zNmax = 20
dz = (zmax-zmin)/(zNmax-1)
zvals=[]
for n in xrange(0,zNmax):
zvals.append(zmin+n*dz)
for zh in zvals:
yfiber(yvals,t,zh,myscreen)
xfiber(xvals,t,zh,myscreen)
print "done."
print "rendering...",
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
print "done."
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
davidwusea/opencamlib
|
scripts/fiber_03_onetri_many-z-levels.py
|
Python
|
gpl-3.0
| 5,099
|
[
"VTK"
] |
bf5f1324c2c4e26955e373239abe4ffae8a1b98165abeb633c8028ec0b975362
|
# -*- coding: utf-8 -*-
"""
This is an example of VAE, whose p(z) is a mixture of Gaussian:
.. math::
p(z) = \\sum_{k=1}^K \\pi(k) p_{k}(z)
"""
import functools
import sys
from argparse import ArgumentParser
import tensorflow as tf
from pprint import pformat
from tensorflow.contrib.framework import arg_scope, add_arg_scope
import tfsnippet as spt
from tfsnippet.examples.utils import (MLResults,
save_images_collection,
bernoulli_as_pixel,
bernoulli_flow,
print_with_title)
class ExpConfig(spt.Config):
# model parameters
z_dim = 40
x_dim = 784
z_logstd_min = -1.
n_mixture_components = 3
# training parameters
result_dir = None
write_summary = False
max_epoch = 3000
max_step = None
batch_size = 128
l2_reg = 0.0001
initial_lr = 0.001
lr_anneal_factor = 0.5
lr_anneal_epoch_freq = 300
lr_anneal_step_freq = None
# evaluation parameters
test_n_z = 500
test_batch_size = 128
config = ExpConfig()
@spt.global_reuse
@add_arg_scope
def q_net(x, observed=None, n_z=None, is_initializing=False):
net = spt.BayesianNet(observed=observed)
normalizer_fn = functools.partial(
spt.layers.act_norm, initializing=is_initializing)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
normalizer_fn=normalizer_fn,
weight_norm=True,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_x = tf.to_float(x)
h_x = spt.layers.dense(h_x, 500)
h_x = spt.layers.dense(h_x, 500)
# sample z ~ q(z|x)
z_mean = spt.layers.dense(h_x, config.z_dim, name='z_mean')
z_logstd = spt.layers.dense(h_x, config.z_dim, name='z_logstd')
z = net.add('z', spt.Normal(mean=z_mean, logstd=z_logstd), n_samples=n_z,
group_ndims=1)
return net
@spt.global_reuse
@add_arg_scope
def p_net(observed=None, n_z=None, is_initializing=False):
net = spt.BayesianNet(observed=observed)
normalizer_fn = functools.partial(
spt.layers.act_norm, initializing=is_initializing)
# sample z ~ p(z)
def make_component(i):
normal = spt.Normal(
mean=tf.get_variable('mean_{}'.format(i), shape=[1, config.z_dim],
dtype=tf.float32, trainable=True),
logstd=tf.maximum(
tf.get_variable('logstd_{}'.format(i), shape=[1, config.z_dim],
dtype=tf.float32, trainable=True),
config.z_logstd_min
)
)
return normal.expand_value_ndims(1)
components = [make_component(i) for i in range(config.n_mixture_components)]
mixture = spt.Mixture(
categorical=spt.Categorical(
logits=tf.zeros([1, config.n_mixture_components])),
components=components,
is_reparameterized=True
)
z = net.add('z', mixture, n_samples=n_z)
# compute the hidden features
with arg_scope([spt.layers.dense],
activation_fn=tf.nn.leaky_relu,
normalizer_fn=normalizer_fn,
weight_norm=True,
kernel_regularizer=spt.layers.l2_regularizer(config.l2_reg)):
h_z = z
h_z = spt.layers.dense(h_z, 500)
h_z = spt.layers.dense(h_z, 500)
# sample x ~ p(x|z)
x_logits = spt.layers.dense(h_z, config.x_dim, name='x_logits')
x = net.add('x', spt.Bernoulli(logits=x_logits), group_ndims=1)
return net
def main():
# parse the arguments
arg_parser = ArgumentParser()
spt.register_config_arguments(config, arg_parser, title='Model options')
spt.register_config_arguments(spt.settings, arg_parser, prefix='tfsnippet',
title='TFSnippet options')
arg_parser.parse_args(sys.argv[1:])
# print the config
print_with_title('Configurations', pformat(config.to_dict()), after='\n')
# open the result object and prepare for result directories
results = MLResults(config.result_dir)
results.save_config(config) # save experiment settings for review
results.make_dirs('plotting', exist_ok=True)
results.make_dirs('train_summary', exist_ok=True)
# input placeholders
input_x = tf.placeholder(
dtype=tf.int32, shape=(None, config.x_dim), name='input_x')
learning_rate = spt.AnnealingVariable(
'learning_rate', config.initial_lr, config.lr_anneal_factor)
# derive the output for initialization
with tf.name_scope('initialization'), \
spt.utils.scoped_set_config(spt.settings, auto_histogram=False):
init_q_net = q_net(input_x, is_initializing=True)
init_chain = init_q_net.chain(
p_net, observed={'x': input_x}, is_initializing=True)
init_lb = tf.reduce_mean(init_chain.vi.lower_bound.elbo())
# derive the loss and lower-bound for training
with tf.name_scope('training'):
train_q_net = q_net(input_x)
train_chain = train_q_net.chain(p_net, observed={'x': input_x})
vae_loss = tf.reduce_mean(train_chain.vi.training.sgvb())
loss = vae_loss + tf.losses.get_regularization_loss()
# derive the nll and logits output for testing
with tf.name_scope('testing'):
test_q_net = q_net(input_x, n_z=config.test_n_z)
test_chain = test_q_net.chain(
p_net, latent_axis=0, observed={'x': input_x})
test_nll = -tf.reduce_mean(test_chain.vi.evaluation.is_loglikelihood())
test_lb = tf.reduce_mean(test_chain.vi.lower_bound.elbo())
# derive the optimizer
with tf.name_scope('optimizing'):
optimizer = tf.train.AdamOptimizer(learning_rate)
params = tf.trainable_variables()
grads = optimizer.compute_gradients(loss, var_list=params)
with tf.control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = optimizer.apply_gradients(grads)
# derive the plotting function
with tf.name_scope('plotting'):
plot_p_net = p_net(n_z=100)
x_plots = tf.reshape(bernoulli_as_pixel(plot_p_net['x']), (-1, 28, 28))
def plot_samples(loop):
with loop.timeit('plot_time'):
images = session.run(x_plots)
save_images_collection(
images=images,
filename='plotting/{}.png'.format(loop.epoch),
grid_size=(10, 10),
results=results
)
# prepare for training and testing data
(x_train, y_train), (x_test, y_test) = \
spt.datasets.load_mnist(x_shape=[784])
train_flow = bernoulli_flow(
x_train, config.batch_size, shuffle=True, skip_incomplete=True)
test_flow = bernoulli_flow(
x_test, config.test_batch_size, sample_now=True)
with spt.utils.create_session().as_default() as session, \
train_flow.threaded(5) as train_flow:
spt.utils.ensure_variables_initialized()
# initialize the network
for [x] in train_flow:
print('Network initialized, first-batch loss is {:.6g}.\n'.
format(session.run(init_lb, feed_dict={input_x: x})))
break
# train the network
with spt.TrainLoop(params,
var_groups=['q_net', 'p_net'],
max_epoch=config.max_epoch,
max_step=config.max_step,
summary_dir=(results.system_path('train_summary')
if config.write_summary else None),
summary_graph=tf.get_default_graph(),
early_stopping=False) as loop:
trainer = spt.Trainer(
loop, train_op, [input_x], train_flow,
metrics={'loss': loss},
summaries=tf.summary.merge_all(spt.GraphKeys.AUTO_HISTOGRAM)
)
trainer.anneal_after(
learning_rate,
epochs=config.lr_anneal_epoch_freq,
steps=config.lr_anneal_step_freq
)
evaluator = spt.Evaluator(
loop,
metrics={'test_nll': test_nll, 'test_lb': test_lb},
inputs=[input_x],
data_flow=test_flow,
time_metric_name='test_time'
)
evaluator.events.on(
spt.EventKeys.AFTER_EXECUTION,
lambda e: results.update_metrics(evaluator.last_metrics_dict)
)
trainer.evaluate_after_epochs(evaluator, freq=10)
trainer.evaluate_after_epochs(
functools.partial(plot_samples, loop), freq=10)
trainer.log_after_epochs(freq=1)
trainer.run()
# print the final metrics and close the results object
print_with_title('Results', results.format_metrics(), before='\n')
results.close()
if __name__ == '__main__':
main()
|
korepwx/tfsnippet
|
tfsnippet/examples/auto_encoders/mixture_vae.py
|
Python
|
mit
| 9,150
|
[
"Gaussian"
] |
5b064d6e0087e6cff45e5defa3e22d5969dff1e746ee149fb071f41e2417e8c0
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
from scipy.special import comb
from scipy.misc.doccer import inherit_docstring_from
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import (gammaln as gamln, gamma as gam)
from numpy import (where, arange, putmask, ravel, sum, shape,
log, sqrt, exp, arctanh, tan, sin, arcsin, arctan,
tanh, cos, cosh, sinh, log1p, expm1)
from numpy import polyval, place, extract, any, asarray, nan, inf, pi
import numpy as np
import numpy.random as mtrand
from . import vonmises_cython
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (
rv_continuous, valarray,
_skew, _kurtosis, _lazywhere,
_ncx2_log_pdf, _ncx2_pdf, _ncx2_cdf,
)
from ._constants import _XMIN, _EULER, _ZETA3
__all__ = [
'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'pearson3', 'powerlaw', 'powerlognorm',
'powernorm', 'rdist', 'rayleigh', 'reciprocal', 'rice',
'recipinvgauss', 'semicircular', 'triang', 'truncexpon',
'truncnorm', 'tukeylambda', 'uniform', 'vonmises', 'vonmises_line',
'wald', 'wrapcauchy']
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - special.smirnov(n, x)
def _ppf(self, q, n):
return special.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0-special.kolmogorov(x)
def _sf(self, x):
return special.kolmogorov(x)
def _ppf(self, q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return special.log_ndtr(x)
def _norm_ppf(q):
return special.ndtri(q)
def _norm_sf(x):
return special.ndtr(-x)
def _norm_logsf(x):
return special.log_ndtr(-x)
def _norm_isf(q):
return -special.ndtri(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
%(example)s
"""
def _rvs(self):
return mtrand.standard_normal(self._size)
def _pdf(self, x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
@inherit_docstring_from(rv_continuous)
def fit(self, data, **kwds):
"""%(super)s
This function (norm_gen.fit) uses explicit formulas for the maximum
likelihood estimation of the parameters, so the `optimizer` argument
is ignored.
"""
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/asarray(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(example)s
"""
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit')
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = special.psi(a + b)
func = s1 - n * (-psiab + special.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = special.psi(a + b)
func = [s1 - n * (-psiab + special.psi(a)),
s2 - n * (-psiab + special.psi(b))]
return func
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) *
(1-x)**(b-1),
for ``0 < x < 1``, ``a > 0``, ``b > 0``.
%(example)s
"""
def _rvs(self, a, b):
return mtrand.beta(a, b, self._size)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = special.xlog1py(b-1.0, -x) + special.xlogy(a-1.0, x)
lPx -= special.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a, b, x)
def _ppf(self, q, a, b):
return special.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
"""%(super)s
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.
"""
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = kwds.get('f0', None)
f1 = kwds.get('f1', None)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = np.log(1 - data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
%(example)s
"""
def _rvs(self, a, b):
u1 = gamma.rvs(a, size=self._size)
u2 = gamma.rvs(b, size=self._size)
return (u1 / u2)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return (special.xlogy(a-1.0, x) - special.xlog1py(a+b, x) -
special.betaln(a, b))
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x == 1.0, 1.0-1e-6, x)
return pow(x, a)*special.hyp2f1(a+b, a, 1+a, -x)/a/special.beta(a, b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0)
* (b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3)
+ 6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
"""A Burr continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of `burr` with ``d = 1``
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * special.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
%(before_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
def _fitstart(self, data, args=None):
return (0, 1)
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x, df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
Special cases of `chi` are:
- ``chi(1, loc, scale) = `halfnormal`
- ``chi(2, 0, scale) = `rayleigh`
- ``chi(3, 0, scale) : `maxwell`
%(example)s
"""
def _rvs(self, df):
return sqrt(chi2.rvs(df, size=self._size))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5, 0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5, q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
%(example)s
"""
def _rvs(self, df):
return mtrand.chisquare(df, self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return special.xlogy(df/2.-1, x) - x/2. - gamln(df/2.) - (log(2)*df)/2.
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(example)s
"""
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi, b=pi, name='cosine')
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
%(example)s
"""
def _rvs(self, a):
u = mtrand.random_sample(size=self._size)
return (gamma.rvs(a, size=self._size)*where(u >= 0.5, 1, -1))
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return special.xlogy(a-1.0, ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a, abs(x))
return where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a, abs(x))
return where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a, 1-abs(2*q-1))
return where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
%(example)s
"""
def _rvs(self, c):
u = mtrand.random_sample(size=self._size)
return weibull_min.rvs(c, size=self._size) * (where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + special.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * exp(-abs(x)**c)
return where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * where(q <= 0.5, q, 1. - q)
fac = np.power(-log(fac), 1.0 / c)
return where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * special.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = lambda * exp(- lambda*x)
for ``x >= 0``.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`expon` does not have shape parameters.
%(example)s
"""
def _rvs(self):
return mtrand.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -expm1(-x)
def _ppf(self, q):
return -log1p(-q)
def _sf(self, x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, a, c):
exc = exp(-x**c)
return a*c*(1-exc)**asarray(a-1) * exc * x**(c-1)
def _logpdf(self, x, a, c):
exc = exp(-x**c)
return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x)
def _cdf(self, x, a, c):
exm1c = -expm1(-x**c)
return (exm1c)**a
def _ppf(self, q, a, c):
return (-log1p(-q**(1.0/a)))**asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
for ``x >= 0``, ``b > 0``. Note that this is a different distribution
from the exponential power distribution that is also known under the names
"generalized normal" or "generalized Gaussian".
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
xbm1 = x**(b-1.0)
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b):
xb = x**(b-1.0)*x
return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb)
def _cdf(self, x, b):
return -expm1(-expm1(x**b))
def _sf(self, x, b):
return exp(-expm1(x**b))
def _isf(self, x, b):
return (log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(log1p(-log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Sanders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x, c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
%(example)s
"""
def _rvs(self, c):
z = mtrand.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) -
0.5*(log(2*pi) + 3*log(x)))
def _cdf(self, x, c):
return special.ndtr(1.0 / c * (sqrt(x) - 1.0/sqrt(x)))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25 * (tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd):
return mtrand.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * log(m) + n/2 * log(n) + (n/2 - 1) * log(x)
lPx -= ((n+m)/2) * log(m + n*x) + special.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
%(example)s
"""
def _argcheck(self, c):
return (c >= 0)
def _rvs(self, c):
return abs(mtrand.standard_normal(self._size) + c)
def _pdf(self, x, c):
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# http://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*pi)
mu = 2.*expfac + c * special.erf(c/sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x, c-1)*exp(-pow(x, c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x, c)
def _cdf(self, x, c):
return -expm1(-pow(x, c))
def _ppf(self, q, c):
return pow(-log1p(-q), 1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x, c-1)*exp(-pow(-x, c))
def _cdf(self, x, c):
return exp(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-log(q), 1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2):
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2, c)
g1 = -2*zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = pi**4/15.0 + 6*zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
for ``c != 0``, and for ``x >= 0`` for all c,
and ``x < 1/abs(c)`` for ``c < 0``.
%(example)s
"""
def _argcheck(self, c):
c = asarray(c)
self.b = where(c < 0, 1.0/abs(c), inf)
return where(c == 0, 0, 1)
def _pdf(self, x, c):
Px = pow(1+c*x, asarray(-1.0-1.0/c))
return Px
def _logpdf(self, x, c):
return (-1.0-1.0/c) * np.log1p(c*x)
def _cdf(self, x, c):
return 1.0 - pow(1+c*x, asarray(-1.0/c))
def _ppf(self, q, c):
vals = 1.0/c * (pow(1-q, -c)-1)
return vals
def _munp(self, n, c):
k = arange(0, n+1)
val = (-1.0/c)**n * sum(comb(n, k)*(-1)**k / (1.0-c*k), axis=0)
return where(c*n < 1, val, inf)
def _entropy(self, c):
if (c > 0):
return 1+c
else:
self.b = -1.0 / c
return rv_continuous._entropy(self, c)
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a, b, c > 0``.
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -expm1((-a-b)*x + b*(-expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
%(example)s
"""
def _argcheck(self, c):
min = np.minimum
max = np.maximum
self.b = where(c > 0, 1.0 / max(c, _XMIN), inf)
self.a = where(c < 0, 1.0 / min(c, -_XMIN), -inf)
return where(abs(c) == inf, 0, 1)
def _pdf(self, x, c):
cx = c*x
logex2 = where((c == 0)*(x == x), 0.0, log1p(-cx))
logpex2 = where((c == 0)*(x == x), -x, logex2/c)
pex2 = exp(logpex2)
# Handle special cases
logpdf = where((cx == 1) | (cx == -inf), -inf, -pex2+logpex2-logex2)
putmask(logpdf, (c == 1) & (x == 1), 0.0)
return exp(logpdf)
def _cdf(self, x, c):
loglogcdf = where((c == 0)*(x == x), -x, log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
x = -log(-log(q))
return where((c == 0)*(x == x), x, -expm1(-c*x)/c)
def _stats(self, c):
g = lambda n: gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = where(abs(c) < 1e-7, (c*pi)**2.0/6.0, g2-g1**2.0)
gam2k = where(abs(c) < 1e-7, pi**2.0/6.0,
expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0)
eps = 1e-14
gamk = where(abs(c) < eps, -_EULER, expm1(gamln(c+1))/c)
m = where(c < -1.0, nan, -gamk)
v = where(c < -0.5, nan, g1**2.0*gam2k)
# skewness
sk1 = where(c < -1./3, nan,
np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)))
sk = where(abs(c) <= eps**0.29, 12*sqrt(6)*_ZETA3/pi**3, sk1)
# kurtosis
ku1 = where(c < -1./4, nan,
(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _munp(self, n, c):
k = arange(0, n+1)
vals = 1.0/c**n * sum(
comb(n, k) * (-1)**k * special.gamma(c*k + 1),
axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: special.digamma(x) - y
if y > -0.125:
x0 = exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = lambda**a * x**(a-1) * exp(-lambda*x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`gamma` has a shape parameter `a` which needs to be set explicitly. For
instance:
>>> from scipy.stats import gamma
>>> rv = gamma(3., loc = 0., scale = 2.)
produces a frozen form of `gamma` with shape ``a = 3.``, ``loc =0.``
and ``lambda = 1./scale = 1./2.``.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(example)s
"""
def _rvs(self, a):
return mtrand.standard_gamma(a, self._size)
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return special.xlogy(a-1.0, x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _sf(self, x, a):
return special.gammaincc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + a + gamln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
f0 = kwds.get('f0', None)
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# log(a) - special.digamma(a) - log(xbar) + log(data.mean) = 0
s = log(xbar) - log(data).mean()
func = lambda a: log(a) - special.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# special.digamma(a) - log(data).mean() + log(fscale) = 0
c = log(data).mean() - log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x > 0``, ``a > 0``, and ``c != 0``.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return abs(c) * exp((c*a-1)*log(x)-x**c - gamln(a))
def _cdf(self, x, a, c):
val = special.gammainc(a, x**c)
cond = c + 0*val
return where(cond > 0, val, 1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a, q)
val2 = special.gammaincinv(a, 1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0, val1**ic, val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a, c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return log(c) + x - c * (exp(x) - 1.)
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, 12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# http://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - np.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return log(2) - x - 2. * special.log1p(exp(-x))
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n == 1:
return 2*log(2)
if n == 2:
return pi*pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*special.gamma(n+1)*special.zeta(n, 1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
`halfnorm` is a special case of `chi` with ``df == 1``.
%(example)s
"""
def _rvs(self):
return abs(mtrand.standard_normal(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return (sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5,
8*(pi-3)/(pi-2)**2)
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(example)s
"""
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a, b) F[2, 1](c, a; a+b; -z))``
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a, b) / special.beta(a, b)
num = special.hyp2f1(c, a+n, a+b+n, -z)
den = special.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(example)s
"""
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return (-(a+1) * log(x) - gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0 - special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0 / special.gammaincinv(a, 1.-q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
When `mu` is too small, evaluating the cumulative density function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
def _rvs(self, mu):
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
def _pdf(self, x, c):
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return exp(-xc1)
def _ppf(self, q, c):
return np.power(-log(q), -1.0/c)
def _munp(self, n, c):
return special.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a, b > 0``, and ``phi`` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
trm = _norm_pdf(a + b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
x2 = x*x
trm = _norm_pdf(a + b * log(x + sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * log(x + sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(example)s
"""
def _rvs(self):
return mtrand.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(example)s
"""
def _pdf(self, x):
return 1 / sqrt(2*pi*x) / x * exp(-1/(2*x))
def _cdf(self, x):
return 2 * (1 - _norm_cdf(1 / sqrt(x)))
def _ppf(self, q):
val = _norm_ppf(1 - q / 2.0)
return 1.0 / (val * val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(example)s
"""
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(example)s
"""
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0, scale=pi, size=sz)
W = expon.rvs(size=sz)
if alpha == 1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
ialpha = 1.0/alpha
aTH = alpha*TH
if beta == 0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(example)s
"""
def _rvs(self):
return mtrand.logistic(size=self._size)
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * special.log1p(exp(-x))
def _cdf(self, x):
return special.expit(x)
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
# http://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
%(example)s
"""
def _rvs(self, c):
return log(mtrand.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = special.digamma(c)
var = special.polygamma(1, c)
skewness = special.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = special.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return -log(x)**2 / (2*s**2) + np.where(x == 0, 0, -log(s*x*sqrt(2*pi)))
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
If ``log(x)`` is normally distributed with mean ``mu`` and variance
``sigma**2``, then ``x`` is log-normally distributed with shape parameter
sigma and scale parameter ``exp(mu)``.
%(example)s
"""
def _rvs(self, s):
return exp(s * mtrand.standard_normal(self._size))
def _pdf(self, x, s):
return exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(log(x) / s)
def _ppf(self, q, s):
return exp(s * _norm_ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + log(2*pi) + 2 * log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(example)s
"""
def _rvs(self):
return exp(mtrand.standard_normal(self._size))
def _pdf(self, x):
return exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(log(x))
def _ppf(self, q):
return exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = sqrt(p)
mu2 = p * (p - 1)
g1 = sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * log(2 * pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5, q))
def _stats(self):
val = 3*pi-8
return (2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5,
(-12*pi*pi + 160*pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu, q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+df)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return mtrand.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2)))
* df1**(df1/2) * df2**(df2/2) * x**(df1/2-1)
* (df2+df1*x)**(-(df1+df2)/2)
* gamma(df1/2)*gamma(1+df2/2)
* L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2)))
/ (B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return mtrand.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= special.beta(n1/2, n2/2)
# this function does not have a return
# drop it for now, the generic function seems to work ok
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <= 4, inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
%(example)s
"""
def _rvs(self, df):
return mtrand.standard_t(df, size=self._size)
def _pdf(self, x, df):
r = asarray(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``.
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
return (norm.rvs(loc=nc, size=self._size) * sqrt(df) /
sqrt(chi2.rvs(df, size=self._size)))
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1, 1.5, valF)
trm1 /= asarray(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= asarray(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. http://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = gam(df/2.-0.5) / gam(df/2.)
c11 = sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
#kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask, b)
mu = valarray(shape(b), value=inf)
place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract(mask, b)
mu2 = valarray(shape(b), value=inf)
place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract(mask, b)
g1 = valarray(shape(b), value=nan)
vals = 2 * (bt + 1.0) * sqrt(bt - 2.0) / ((bt - 3.0) * sqrt(bt))
place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract(mask, b)
g2 = valarray(shape(b), value=nan)
vals = (6.0*polyval([1.0, 1.0, -6, -2], bt) /
polyval([1.0, -7.0, 12.0, 0.0], bt))
place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q, -1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is::
pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
(beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
where::
beta = 2 / (skew * stddev)
alpha = (stddev * beta)**2
zeta = loc - alpha / beta
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, skew, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*log(beta*(x - zeta)) + (a - 1)*log(x)
# - beta*(x - zeta) - x
# - gamln(alpha) - gamln(a)
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
ans, x, transx, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
if mask[0]:
return mtrand.standard_normal(self._size)
ans = mtrand.standard_gamma(alpha, self._size)/beta + zeta
if ans.size == 1:
return ans[0]
return ans
def _ppf(self, q, skew):
ans, q, transq, skew, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = special.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
`powerlaw` is a special case of `beta` with ``d == 1``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + (a-1)*log(x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * sqrt((a + 2.0) / a),
6 * polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
%(example)s
"""
def _pdf(self, x, c, s):
return (c/(x*s) * _norm_pdf(log(x)/s) *
pow(_norm_cdf(-log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return (c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0)))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0 - x**2), c / 2.0 - 1) / special.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / special.beta(0.5, c / 2.0)
res = 0.5 + term1 * special.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * special.beta((n + 1.0) / 2, c / 2.0)
return numerator / special.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(example)s
"""
def _rvs(self):
return chi.rvs(2, size=self._size)
def _pdf(self, r):
return r * exp(-0.5 * r**2)
def _cdf(self, r):
return 1 - exp(-0.5 * r**2)
def _ppf(self, q):
return sqrt(-2 * log(1 - q))
def _stats(self):
val = 4 - pi
return (np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5,
6*pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# http://en.wikipedia.org/wiki/Rice_distribution
sz = self._size if self._size else 1
t = b/np.sqrt(2) + mtrand.standard_normal(size=(2, sz))
return np.sqrt((t*t).sum(axis=0))
def _pdf(self, x, b):
return x * exp(-(x-b)*(x-b)/2.0) * special.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * exp(-b2) * special.gamma(n1) *
special.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, mu):
return 1.0/mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return mtrand.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5 * np.power((1.0-c+c*c), 1.5)), -3.0/5.0
def _entropy(self, c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
%(example)s
"""
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return gam(n+1)-special.gammainc(1+n, b)
if n == 1:
return (1-(b+1)*exp(-b))/(-expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
if self.a > 0:
self._delta = -(self._sb - self._sa)
else:
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
if self.a > 0:
return _norm_isf(q*self._sb + self._sa*(1.0-q))
else:
return _norm_ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = asarray(special.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (asarray(1-Fx))**(lam-1.0)
Px = 1.0/asarray(Px)
return where((lam <= 0) | (abs(x) < 1.0/asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0) & (q == q), vals2, vals1)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return mtrand.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
for ``-pi <= x <= pi``, ``kappa > 0``.
See Also
--------
vonmises_line : The same distribution, defined on a [-pi, pi] segment
of the real line.
%(example)s
"""
def _rvs(self, kappa):
return mtrand.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
return exp(kappa * cos(x)) / (2*pi*special.i0(kappa))
def _cdf(self, x, kappa):
return vonmises_cython.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(example)s
"""
def _rvs(self):
return mtrand.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x < pi
c2 = 1-c1
xp = extract(c1, x)
xn = extract(c2, x)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy')
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/scipy/stats/_continuous_distns.py
|
Python
|
agpl-3.0
| 105,558
|
[
"Gaussian"
] |
e0bb556c7a48c614c1c009113e95f128ea2fcf4c17875359496466e47eb8f852
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
""" Regression tests for hf stuff. """
from pytest import mark
def test_indices():
from random import randint
from numpy import all, abs, dot, array
from pytest import raises
from pylada.crystal import HFTransform
unitcell = array([[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]])
supercell = array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
a = HFTransform(unitcell, supercell)
assert all(abs(a.transform - [[1, 1, -1], [0, 2, 0], [0, 0, 2]]) < 1e-8)
assert all(abs(a.quotient - [1, 2, 2]) < 1e-8)
for i in range(20):
vec = dot(supercell, array(
[randint(-20, 20), randint(-20, 20), randint(-20, 20)], dtype="float64"))
vec += [0, -0.5, 0.5]
assert all(abs(a.indices(vec) - [0, 1, 1]) < 1e-8)
with raises(ValueError):
a.indices(vec + [0.1, 0.1, 0])
def test_supercell_indices():
from pytest import raises
from random import randint
from numpy import all, abs, dot, array
from pylada.crystal import HFTransform, Structure, supercell
unitcell = array([[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]])
lattice = Structure(unitcell).add_atom(0, 0, 0, "Si")
supercell = supercell(lattice, dot(lattice.cell, [[3, 0, 5], [0, 0, -1], [-2, 1, 2]]))
a = HFTransform(unitcell, supercell)
assert all(abs(a.transform - [[0, 2, 0], [1, 5, -1], [-2, -4, 0]]) < 1e-8)
assert all(abs(a.quotient - [1, 1, 3]) < 1e-8)
all_indices = set()
for atom in supercell:
indices = a.indices(atom.pos)
index = a.index(atom.pos)
assert index not in all_indices, (index, all_indices)
assert all(indices >= 0)
assert all(indices <= a.quotient)
assert index == a.flatten_indices(*indices)
all_indices.add(index)
for i in range(20):
vec = dot(supercell.cell, array(
[randint(-20, 20), randint(-20, 20), randint(-20, 20)], dtype="float64"))
vec += atom.pos
assert all(abs(a.indices(vec) - indices) < 1e-8)
with raises(ValueError):
a.indices(vec + [0.1, 0.1, 0])
assert index == a.index(vec)
with raises(ValueError):
a.index(vec + [0.1, 0.1, 0])
assert len(all_indices) == len(supercell)
def b5(u=0.25):
from pylada.crystal import Structure
x, y = u, 0.25 - u
structure = Structure([[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]]) \
.add_atom(5.000000e-01, 5.000000e-01, 5.000000e-01, "A") \
.add_atom(5.000000e-01, 2.500000e-01, 2.500000e-01, "A") \
.add_atom(2.500000e-01, 5.000000e-01, 2.500000e-01, "A") \
.add_atom(2.500000e-01, 2.500000e-01, 5.000000e-01, "A") \
.add_atom(8.750000e-01, 8.750000e-01, 8.750000e-01, "B") \
.add_atom(1.250000e-01, 1.250000e-01, 1.250000e-01, "B") \
.add_atom( x, x, x, "X") \
.add_atom( x, y, y, "X") \
.add_atom( y, x, y, "X") \
.add_atom( y, y, x, "X") \
.add_atom( -x, -x, -x, "X") \
.add_atom( -x, -y, -y, "X") \
.add_atom( -y, -x, -y, "X") \
.add_atom(-y, -y, -x, "X")
return structure
@mark.parametrize('u', [0.25, 0.23])
def test_deformed_b5(u):
from pytest import raises
from random import randint
from numpy import all, abs, dot, array, concatenate
from pylada.crystal import HFTransform, supercell
lattice = b5(u)
supercell = supercell(lattice, dot(lattice.cell, [[2, 2, 0], [0, 2, 2], [4, 0, 4]]))
a = HFTransform(lattice.cell, supercell)
assert all(abs(a.transform - [[-1, 1, 1], [1, -1, 1], [5, -3, -1]]) < 1e-8)
assert all(abs(a.quotient - [2, 2, 8]) < 1e-8)
all_indices = set()
others = set()
for atom in supercell:
indices = a.indices(atom.pos - lattice[atom.site].pos)
index = a.index(atom.pos - lattice[atom.site].pos, atom.site)
assert index not in all_indices, (index, all_indices)
assert all(indices >= 0)
assert all(indices <= a.quotient)
all_indices.add(index)
assert str(concatenate((indices, [atom.site]))) not in others
others.add(str(concatenate((indices, [atom.site]))))
for i in range(20):
vec = dot(supercell.cell, array(
[randint(-20, 20), randint(-20, 20), randint(-20, 20)], dtype="float64"))
vec += atom.pos - lattice[atom.site].pos
assert all(abs(a.indices(vec) - indices) < 1e-8)
with raises(ValueError):
a.indices(vec + [0.1, 0.1, 0])
assert index == a.index(vec, atom.site)
with raises(ValueError):
a.index(vec + [0.1, 0.1, 0])
assert len(all_indices) == len(supercell)
|
pylada/pylada-light
|
tests/crystal/test_hart-forcade.py
|
Python
|
gpl-3.0
| 5,939
|
[
"CRYSTAL",
"VASP"
] |
2565f53dde56348181e9cc8764503caab7409915644f6c1c1a99753a83877933
|
# A simple test for a vtkTkRenderWidget. Run it like so:
# python TestTkRenderWindowInteractor.py -B $VTK_DATA_ROOT/Baseline/Rendering
import os
import vtk
from vtk.test import Testing
import Tkinter
from vtk.tk.vtkTkRenderWindowInteractor import vtkTkRenderWindowInteractor
class TestTkRenderWindowInteractor(Testing.vtkTest):
# Stick your VTK pipeline here if you want to create the pipeline
# only once. If you put it in the constructor or in the function
# the pipeline will be created afresh for each and every test.
# create a dummy Tkinter root window.
root = Tkinter.Tk()
# create a rendering window and renderer
ren = vtk.vtkRenderer()
tkrw = vtkTkRenderWindowInteractor(root, width=300, height=300)
tkrw.Initialize()
tkrw.pack()
rw = tkrw.GetRenderWindow()
rw.AddRenderer(ren)
# create an actor and give it cone geometry
cs = vtk.vtkConeSource()
cs.SetResolution(8)
map = vtk.vtkPolyDataMapper()
map.SetInputConnection(cs.GetOutputPort())
act = vtk.vtkActor()
act.SetMapper(map)
# assign our actor to the renderer
ren.AddActor(act)
def testvtkTkRenderWindowInteractor(self):
"Test if vtkTkRenderWindowInteractor works."
self.tkrw.Start()
self.tkrw.Render()
self.root.update()
img_file = "TestTkRenderWindowInteractor.png"
Testing.compareImage(self.rw, Testing.getAbsImagePath(img_file))
Testing.interact()
# These are useful blackbox tests (not dummy ones!)
def testParse(self):
"Test if vtkTkRenderWindowInteractor is parseable"
self._testParse(self.tkrw)
def testGetSet(self):
"Testing Get/Set methods of vtkTkRenderWindowInteractor"
self._testGetSet(self.tkrw)
def testBoolean(self):
"Testing Boolean methods of vtkTkRenderWindowInteractor"
self._testBoolean(self.tkrw)
if __name__ == "__main__":
cases = [(TestTkRenderWindowInteractor, 'test')]
del TestTkRenderWindowInteractor
Testing.main(cases)
|
aashish24/VTK-old
|
Rendering/Tk/Testing/Python/TestTkRenderWindowInteractor.py
|
Python
|
bsd-3-clause
| 2,047
|
[
"VTK"
] |
baab87c42a17d5698e4a31ec0a06199207094745759669d0e0560bf056f33a08
|
#!/usr/bin/env python
#
# Copyright (c) 2010 Brian Knox (taotetek@gmail.com)
# License: GNU LGPLv3
#
# This file is part of Multi-Mechanize
#
"""a collection of functions and classes for multi-mechanize results files"""
import re
import fileinput
from datetime import datetime
try:
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relation
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String, Float, DateTime
from sqlalchemy import ForeignKey, UniqueConstraint
except ImportError:
print "(optional: please install sqlalchemy to enable db logging)"
Base = declarative_base()
class GlobalConfig(Base):
"""class representing a muli-mechanize global config"""
__tablename__ = 'mechanize_global_configs'
id = Column(Integer, nullable=False, primary_key=True)
run_time = Column(Integer, nullable=False)
rampup = Column(Integer, nullable=False)
results_ts_interval = Column(Integer, nullable=False)
user_group_configs = relation("UserGroupConfig",
primaryjoin="UserGroupConfig.mechanize_global_configs_id==GlobalConfig.id")
results = relation("ResultRow",
primaryjoin="GlobalConfig.id==ResultRow.mechanize_global_configs_id")
def __init__(self, run_time=None, rampup=None, results_ts_interval=None):
self.run_time = str(run_time)
self.rampup = int(rampup)
"""rampup time for the rest run"""
self.results_ts_interval = int(results_ts_interval)
def __repr__(self):
return "<GlobalConfig('%i', '%i', '%i')>" % (
self.run_time, self.rampup, self.results_ts_interval)
class UserGroupConfig(Base):
"""class representing a multi-mechanize user group config"""
__tablename__ = 'mechanize_user_group_configs'
id = Column (Integer, nullable=False, primary_key=True)
mechanize_global_configs_id = Column(Integer, ForeignKey('mechanize_global_configs.id'), nullable=False)
user_group = Column(String(50), nullable=False)
threads = Column(Integer, nullable=False)
script = Column(String(50), nullable=False)
def __init__(self, user_group=None, threads=None, script=None):
self.user_group = str(user_group)
self.threads = int(threads)
self.script = str(script)
def __repr__(self):
return "<UserGroupConfig('%s','%s','%s')>" % (
self.user_group, self.threads, self.script)
class ResultRow(Base):
"""class representing a multi-mechanize results.csv row"""
__tablename__ = 'mechanize_results'
__table_args__ = (
UniqueConstraint('run_id','trans_count', name='uix_1')
)
id = Column(Integer, nullable=False, primary_key=True)
mechanize_global_configs_id = Column(Integer,
ForeignKey('mechanize_global_configs.id'), nullable=False)
project_name = Column(String(50), nullable=False, index=True)
run_id = Column(DateTime, nullable=False, index=True)
trans_count = Column(Integer, nullable=False, index=True)
elapsed = Column(Float, nullable=False, index=True)
epoch = Column(Float, nullable=False, index=True)
user_group_name = Column(String(50), nullable=False)
scriptrun_time = Column(Float, nullable=False)
error = Column(String(255))
custom_timers = Column(String(50))
global_config = relation("GlobalConfig",
primaryjoin="ResultRow.mechanize_global_configs_id==GlobalConfig.id")
timers = relation("TimerRow",
primaryjoin="ResultRow.id==TimerRow.mechanize_results_id")
def __init__(self, project_name=None, run_id=None, trans_count=None,
elapsed=None, epoch=None, user_group_name=None,
scriptrun_time=None, error=None, custom_timers=None):
self.project_name = str(project_name)
self.run_id = run_id
self.trans_count = int(trans_count)
self.elapsed = float(elapsed)
self.epoch = int(epoch)
self.user_group_name = str(user_group_name)
self.scriptrun_time = float(scriptrun_time)
self.error = str(error)
self.custom_timers = str(custom_timers)
def __repr__(self):
return "<ResultRow('%s','%s','%i','%.3f','%i','%s','%.3f','%s','%s')>" % (
self.project_name, self.run_id, self.trans_count, self.elapsed,
self.epoch, self.user_group_name, self.scriptrun_time,
self.error, self.custom_timers)
class TimerRow(Base):
"""class representing a multi-mechanize custom timer result"""
__tablename__ = 'mechanize_custom_timers'
id = Column(Integer, nullable=False, primary_key=True)
mechanize_results_id = Column(Integer,
ForeignKey('mechanize_results.id'), nullable=False)
timer_name = Column(String(50), nullable=False, index=True)
elapsed = Column(Float, nullable=False, index=True)
def __init__(self, timer_name=None, elapsed=None):
self.timer_name = str(timer_name)
self.elapsed = int(elapsed)
def __repr__(self):
return "<TimerRow('%s', '%s')>" % (self.timer_name, self.elapsed)
result_rows = relation("ResultRow",
primaryjoin="TimerRow.mechanize_results_id==ResultRow.id")
def load_results_database(project_name, run_localtime, results_dir,
results_database, run_time, rampup, results_ts_interval,
user_group_configs):
"""parse and load a multi-mechanize results csv file into a database"""
logline_re = re.compile('(.+),(.+),(.+),(.+),(.+),(.?),(\{.+\})')
engine = create_engine(results_database, echo=False)
ResultRow.metadata.create_all(engine)
TimerRow.metadata.create_all(engine)
GlobalConfig.metadata.create_all(engine)
UserGroupConfig.metadata.create_all(engine)
sa_session = sessionmaker(bind=engine)
sa_current_session = sa_session()
run_id = datetime(run_localtime.tm_year, run_localtime.tm_mon,
run_localtime.tm_mday, run_localtime.tm_hour, run_localtime.tm_min,
run_localtime.tm_sec)
results_file = results_dir + 'results.csv'
global_config = GlobalConfig(run_time, rampup, results_ts_interval)
sa_current_session.add(global_config)
for i, ug_config in enumerate(user_group_configs):
user_group_config = UserGroupConfig(ug_config.name,
ug_config.num_threads, ug_config.script_file)
global_config.user_group_configs.append(user_group_config)
for line in fileinput.input([results_file]):
line = line.rstrip()
match = logline_re.match(line)
if match:
result_row = ResultRow(project_name, run_id, match.group(1),
match.group(2), match.group(3), match.group(4),
match.group(5), match.group(6), match.group(7))
global_config.results.append(result_row)
timer_data = eval(match.group(7))
for index in timer_data:
timer_row = TimerRow(index, timer_data[index])
result_row.timers.append(timer_row)
sa_current_session.add(result_row)
sa_current_session.commit()
sa_current_session.close()
|
seznam/mcache-client
|
powertest/lib/resultsloader.py
|
Python
|
lgpl-3.0
| 7,162
|
[
"Brian"
] |
7fe2ec02f1958611e030a931664532a188e806f29ddb9cbaa21d5e39161321ce
|
import sys
import matplotlib as mpl
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from torch import nn
from torch.autograd import Variable
from tqdm import trange
import gaussian
import util
from util import sparsemm
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from argparse import ArgumentParser
import networkx as nx
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
"""
Simple Graph convolution experiment. Given a set of random vectors, learn to express each as the sum of some of the
others
"""
def clean(axes=None):
if axes is None:
axes = plt.gca()
[s.set_visible(False) for s in axes.spines.values()]
axes.tick_params(top=False, bottom=False, left=False, right=False, labelbottom=False, labelleft=False)
def densities(points, means, sigmas):
"""
Compute the unnormalized PDFs of the points under the given MVNs
(with sigma a diagonal matrix per MVN)
:param means:
:param sigmas:
:param points:
:return:
"""
# n: number of MVNs
# d: number of points per MVN
# rank: dim of points
batchsize, n, rank = points.size()
batchsize, k, rank = means.size()
# batchsize, k, rank = sigmas.size()
points = points.unsqueeze(2).expand(batchsize, n, k, rank)
means = means.unsqueeze(1).expand_as(points)
sigmas = sigmas.unsqueeze(1).expand_as(points)
sigmas_squared = torch.sqrt(1.0/(gaussian.EPSILON + sigmas))
points = points - means
points = points * sigmas_squared
# Compute dot products for all points
# -- unroll the batch/n dimensions
points = points.view(-1, 1, rank)
# -- dot prod
products = torch.bmm(points, points.transpose(1,2))
# -- reconstruct shape
products = products.view(batchsize, n, k)
num = torch.exp(- 0.5 * products)
return num
class MatrixHyperlayer(nn.Module):
"""
Constrained version of the matrix hyperlayer. Each output get exactly k inputs
"""
def duplicates(self, tuples):
"""
Takes a list of tuples, and for each tuple that occurs mutiple times
marks all but one of the occurences (in the mask that is returned).
:param tuples: A size (batch, k, rank) tensor of integer tuples
:return: A size (batch, k) mask indicating the duplicates
"""
b, k, r = tuples.size()
primes = self.primes[:r]
primes = primes.unsqueeze(0).unsqueeze(0).expand(b, k, r)
unique = ((tuples+1) ** primes).prod(dim=2) # unique identifier for each tuple
sorted, sort_idx = torch.sort(unique, dim=1)
_, unsort_idx = torch.sort(sort_idx, dim=1)
mask = sorted[:, 1:] == sorted[:, :-1]
zs = torch.zeros(b, 1, dtype=torch.uint8, device='cuda' if self.use_cuda else 'cpu')
mask = torch.cat([zs, mask], dim=1)
return torch.gather(mask, 1, unsort_idx)
def cuda(self, device_id=None):
self.use_cuda = True
super().cuda(device_id)
def __init__(self, in_num, out_num, k, radditional=0, gadditional=0, region=(128,),
sigma_scale=0.2, min_sigma=0.0, fix_value=False):
super().__init__()
self.min_sigma = min_sigma
self.use_cuda = False
self.in_num = in_num
self.out_num = out_num
self.k = k
self.radditional = radditional
self.region = region
self.gadditional = gadditional
self.sigma_scale = sigma_scale
self.fix_value = fix_value
self.weights_rank = 2 # implied rank of W
self.params = Parameter(torch.randn(k * out_num, 3))
outs = torch.arange(out_num).unsqueeze(1).expand(out_num, k * (2 + radditional + gadditional)).contiguous().view(-1, 1)
self.register_buffer('outs', outs.long())
outs_inf = torch.arange(out_num).unsqueeze(1).expand(out_num, k).contiguous().view(-1, 1)
self.register_buffer('outs_inf', outs_inf.long())
self.register_buffer('primes', torch.tensor(util.PRIMES))
def size(self):
return (self.out_num, self.in_num)
def generate_integer_tuples(self, means,rng=None, use_cuda=False):
dv = 'cuda' if use_cuda else 'cpu'
c, k, rank = means.size()
assert rank == 1
# In the following, we cut the first dimension up into chunks of size self.k (for which the row index)
# is the same. This then functions as a kind of 'batch' dimension, allowing us to use the code from
# globalsampling without much adaptation
"""
Sample the 2 nearest points
"""
floor_mask = torch.tensor([1, 0], device=dv, dtype=torch.uint8)
fm = floor_mask.unsqueeze(0).unsqueeze(2).expand(c, k, 2, 1)
neighbor_ints = means.data.unsqueeze(2).expand(c, k, 2, 1).contiguous()
neighbor_ints[fm] = neighbor_ints[fm].floor()
neighbor_ints[~fm] = neighbor_ints[~fm].ceil()
neighbor_ints = neighbor_ints.long()
"""
Sample uniformly from a small range around the given index tuple
"""
rr_ints = torch.cuda.FloatTensor(c, k, self.radditional, 1) if use_cuda else torch.FloatTensor(c, k, self.radditional, 1)
rr_ints.uniform_()
rr_ints *= (1.0 - gaussian.EPSILON)
rng = torch.cuda.FloatTensor(rng) if use_cuda else torch.FloatTensor(rng)
rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints) # bounds of the tensor
rrng = torch.cuda.FloatTensor(self.region) if use_cuda else torch.FloatTensor(self.region) # bounds of the range from which to sample
rrng = rrng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(rr_ints)
mns_expand = means.round().unsqueeze(2).expand_as(rr_ints)
# upper and lower bounds
lower = mns_expand - rrng * 0.5
upper = mns_expand + rrng * 0.5
# check for any ranges that are out of bounds
idxs = lower < 0.0
lower[idxs] = 0.0
idxs = upper > rngxp
lower[idxs] = rngxp[idxs] - rrng[idxs]
rr_ints = (rr_ints * rrng + lower).long()
"""
Sample uniformly from all index tuples
"""
g_ints = torch.cuda.FloatTensor(c, k, self.gadditional, 1) if use_cuda else torch.FloatTensor(c, k, self.gadditional, 1)
rngxp = rng.unsqueeze(0).unsqueeze(0).unsqueeze(0).expand_as(g_ints) # bounds of the tensor
g_ints.uniform_()
g_ints *= (1.0 - gaussian.EPSILON) * rngxp
g_ints = g_ints.long()
ints = torch.cat([neighbor_ints, rr_ints, g_ints], dim=2)
return ints.view(c, -1, rank)
def forward(self, input, train=True):
### Compute and unpack output of hypernetwork
means, sigmas, values = self.hyper(input)
nm = means.size(0)
c = nm // self.k
means = means.view(c, self.k, 1)
sigmas = sigmas.view(c, self.k, 1)
values = values.view(c, self.k)
rng = (self.in_num, )
assert input.size(0) == self.in_num
if train:
indices = self.generate_integer_tuples(means, rng=rng, use_cuda=self.use_cuda)
indfl = indices.float()
# Mask for duplicate indices
dups = self.duplicates(indices)
props = densities(indfl, means, sigmas).clone() # result has size (c, indices.size(1), means.size(1))
props[dups] = 0
props = props / props.sum(dim=1, keepdim=True)
values = values.unsqueeze(1).expand(c, indices.size(1), means.size(1))
values = props * values
values = values.sum(dim=2)
# unroll the batch dimension
indices = indices.view(-1, 1)
values = values.view(-1)
indices = torch.cat([self.outs, indices.long()], dim=1)
else:
indices = means.round().long().view(-1, 1)
values = values.squeeze().view(-1)
indices = torch.cat([self.outs_inf, indices.long()], dim=1)
if self.use_cuda:
indices = indices.cuda()
# Kill anything on the diagonal
values[indices[:, 0] == indices[:, 1]] = 0.0
# if self.symmetric:
# # Add reverse direction automatically
# flipped_indices = torch.cat([indices[:, 1].unsqueeze(1), indices[:, 0].unsqueeze(1)], dim=1)
# indices = torch.cat([indices, flipped_indices], dim=0)
# values = torch.cat([values, values], dim=0)
### Create the sparse weight tensor
# Prevent segfault
assert not util.contains_nan(values.data)
vindices = Variable(indices.t())
sz = Variable(torch.tensor((self.out_num, self.in_num)))
spmm = sparsemm(self.use_cuda)
output = spmm(vindices, values, sz, input)
return output
def hyper(self, input=None):
"""
Evaluates hypernetwork.
"""
k, width = self.params.size()
means = F.sigmoid(self.params[:, 0:1])
# Limits for each of the w_rank indices
# and scales for the sigmas
s = torch.cuda.FloatTensor((self.in_num,)) if self.use_cuda else torch.FloatTensor((self.in_num,))
s = Variable(s.contiguous())
ss = s.unsqueeze(0)
sm = s - 1
sm = sm.unsqueeze(0)
means = means * sm.expand_as(means)
sigmas = nn.functional.softplus(self.params[:, 1:2] + gaussian.SIGMA_BOOST) + gaussian.EPSILON
values = self.params[:, 2:] # * 0.0 + 1.0
sigmas = sigmas.expand_as(means)
sigmas = sigmas * ss.expand_as(sigmas)
sigmas = sigmas * self.sigma_scale + self.min_sigma
return means, sigmas, values * 0.0 + 1.0/self.k if self.fix_value else values
class GraphConvolution(Module):
"""
Code adapted from pyGCN, see https://github.com/tkipf/pygcn
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True, has_weight=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features)) if has_weight else None
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
if self.weight is not None:
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_() # different from the default implementation
def forward(self, input, adj, train=True):
if input is None: # The input is the identity matrix
support = self.weight
elif self.weight is not None:
support = torch.mm(input, self.weight)
else:
support = input
output = adj(support, train=train)
if self.bias is not None:
return output + self.bias
else:
return output
class ConvModel(nn.Module):
def __init__(self, data_size, k, radd=32, gadd=32, range=128, min_sigma=0.0):
super().__init__()
n, e = data_size
self.adj = MatrixHyperlayer(n, n, k, radditional=radd, gadditional=gadd, region=(range,),
min_sigma=min_sigma, fix_value=True)
def freeze(self):
for param in self.encoder_conv.parameters():
param.requires_grad = False
for param in self.decoder_conv.parameters():
param.requires_grad = False
def forward(self, x, depth=1, train=True):
n, e = x.size()
results = []
for _ in range(1, depth):
x = self.adj(x, train=train)
results.append(x)
return results
def cuda(self):
super().cuda()
self.adj.apply(lambda t: t.cuda())
PLOT_MAX = 2000 # max number of data points for the latent space plot
def go(arg):
MARGIN = 0.1
util.makedirs('./conv-simple/')
torch.manual_seed(arg.seed)
writer = SummaryWriter()
data = torch.randn(arg.size, arg.width)
model = ConvModel(data.size(), k=arg.k,
gadd=arg.gadditional, radd=arg.radditional, range=arg.range,
min_sigma=arg.min_sigma)
if arg.cuda:
model.cuda()
data = data.cuda()
data, target = Variable(data), Variable(data)
optimizer = optim.Adam(list(model.parameters()), lr=arg.lr)
n, e = data.size()
for epoch in trange(arg.epochs):
optimizer.zero_grad()
outputs = model(data, depth=arg.depth)
loss = 0.0
for i, o in enumerate(outputs):
loss += F.mse_loss(o, data)
# regularize sigmas
_, sigmas, _ = model.adj.hyper()
reg = sigmas.norm().mean()
# print(loss.item(), reg.item())
# sys.exit()
tloss = loss + 0.0001 * reg
tloss.backward()
optimizer.step()
writer.add_scalar('conv-simple/train-tloss', tloss.item(), epoch)
writer.add_scalar('conv-simple/train-loss', loss.item(), epoch)
writer.add_scalar('conv-simple/train-reg', reg.item(), epoch)
if epoch % arg.plot_every == 0:
print('data')
print(data[:3, :3].data)
print()
for o in outputs:
print(o[:3, :3].data)
# Plot the results
with torch.no_grad():
outputs = model(data, depth=arg.depth, train=False)
plt.figure(figsize=(8, 8))
means, sigmas, values = model.adj.hyper()
means, sigmas, values = means.data, sigmas.data, values.data
means = torch.cat([model.adj.outs_inf.data.float(), means], dim=1)
plt.cla()
s = model.adj.size()
util.plot1d(means, sigmas, values.squeeze(), shape=s)
plt.xlim((-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN)))
plt.ylim((-MARGIN * (s[0] - 1), (s[0] - 1) * (1.0 + MARGIN)))
plt.savefig('./conv-simple/means.{:05}.pdf'.format(epoch))
print('Finished Training.')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of epochs",
default=1000, type=int)
parser.add_argument("-W", "--width",
dest="width",
help="Width of the data.",
default=16, type=int)
parser.add_argument("-k", "--num-points",
dest="k",
help="Number of index tuples",
default=3, type=int)
parser.add_argument("-S", "--size",
dest="size",
help="Number of data points",
default=128, type=int)
parser.add_argument("-a", "--gadditional",
dest="gadditional",
help="Number of additional points sampled globally per index-tuple",
default=32, type=int)
parser.add_argument("-A", "--radditional",
dest="radditional",
help="Number of additional points sampled locally per index-tuple",
default=16, type=int)
parser.add_argument("-R", "--range",
dest="range",
help="Range in which the local points are sampled",
default=128, type=int)
parser.add_argument("-d", "--depth",
dest="depth",
help="Number of graph convolutions",
default=5, type=int)
parser.add_argument("-p", "--plot-every",
dest="plot_every",
help="Numer of epochs to wait between plotting",
default=100, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.01, type=float)
parser.add_argument("-r", "--seed",
dest="seed",
help="Random seed",
default=4, type=int)
parser.add_argument("-c", "--cuda", dest="cuda",
help="Whether to use cuda.",
action="store_true")
parser.add_argument("-M", "--min-sigma",
dest="min_sigma",
help="Minimal sigma value",
default=0.0, type=float)
args = parser.parse_args()
print('OPTIONS', args)
go(args)
|
MaestroGraph/sparse-hyper
|
experiments/gconv-simple.py
|
Python
|
mit
| 17,048
|
[
"Gaussian"
] |
0643494d65badd9cf080d059f6fe41552dea1b4c4e49f597ed7dcba4904071c2
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Mixture Density Networks in GPflow
#
# In this notebook we explain how to implement a Mixture Density Network (MDN) [1] using GPflow. In theory, this is similar to [this blog post](http://blog.otoro.net/2015/11/24/mixture-density-networks-with-tensorflow/) from 2015, but instead of using TensorFlow directly we'll use GPflow. GPflow is typically used for building Gaussian Process-based models, but the framework contains many useful methods and classes that can be used to quickly prototype a wide variety of ML algorithms. Excellent for doing research!
#
# We start by explaining why MDNs can be useful. We then examine a GPflow implementation of the model and use it for a couple of toy experiments.
#
# %% [markdown]
# ## Conditional Density Estimation models
# Imagine we are interested in performing regression on the following dataset.
# %%
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
np.random.seed(1) # for reproducibility of this notebook
CMAP = plt.get_cmap("Blues")
# %%
N = 200
NOISE_STD = 5.0e-2
def sinusoidal_data(N, noise):
Y = np.linspace(-2, 2, N)[:, None]
X = np.sin(4 * Y) * 2.0 + Y * 0.5
X += np.random.randn(N, 1) * noise
Y += np.random.randn(N, 1) * noise
return X, Y
X, Y = data = sinusoidal_data(N, NOISE_STD)
plt.plot(X, Y, "ro", alpha=0.3)
plt.xlabel("$x$")
_ = plt.ylabel("$y$")
# %% [markdown]
# At first sight, this dataset doesn't seem overly complex. Both input and output have a single dimension, and the data has a clear sinusoidal pattern. However, notice that a single input $x$ can correspond to multiple output values $y$, so for example $x=0$ can yield any of the values $\{-1.5, -3/4, 0, 0.8, 1.5\}$. Typical regression algorithms such as Linear Regression, Gaussian Process regression and Multilayer Perceptrons (MLPs) struggle as they can only predict one output value for every input.
#
# To model this dataset we can use a Conditional Density Estimation (CDE) model. CDE models infer $p(f(x)|x)$ instead of just calculating the expectation $E[f(x) | x]$. Modeling the complete distribution $p(f(x)|x)$ is typically harder but it reveals more interesting properties, such as the modes, outlier boundaries and samples. A real-world example might be modeling taxi drop-offs, conditioned on the pick-up location. We would expect a taxi drop-off location to be multi-modal as passengers need to go to different destinations (airport/city center/suburbs and so on) and the density depends on the starting location [2].
# %% [markdown]
# ## Mixture Density Network models
#
# Mixture Density Networks (MDNs) are a parametric class of models that allow for conditional density estimation. They consist of two parts: a neural net and a Mixture of Gaussians (MoG). The neural net is responsible for producing the characteristics of the MoG. In practice, given that the MoG consists of $M$ Gaussians, the neural net will output a collection of $M$ means, variances and weights $\{\mu_m, \sigma_m^2, \pi_m\}_{m=1}^M$. These means, variances and weights are used to define the conditional probability distribution function:
# \begin{equation}
# p(Y = y\,|\,X = x) = \sum_{m=1}^{M} \pi_{m}(x)\,\mathcal{N}\big(y\, \left|\,\mu_{m}(x), \sigma_{m}^2(x)\big)\right.
# \end{equation}
#
# Each of the parameters $\pi_{m}(x), \mu_{m}(x), \sigma_{m}(x)$ of the distribution are determined by the neural net, as a function of the input $x$.
#
# We train the MDN's neural net by optimizing the model's likelihood:
# \begin{equation}
# \mathcal{L} \triangleq \text{argmax}_{\Theta} \prod_{n=1}^N p(Y = y_n | X = x_n)
# \end{equation}
#
# where $\Theta$ collects the neural net's weights and biases and $\{x_n, y_n\}_{n=1}^N$ represents our training dataset.
# %% [markdown]
# ## A GPflow MDN implementation
#
# GPflow doesn't reinvent the wheel; most of what follows is just plain Python/TensorFlow code. We choose to use GPflow, however, because it provides us with functionality to easily define a model. Once we have a GPflow model, we can specify its objective function, parameters and dataset. This extra layer of abstraction makes interacting with the model much easier, for example optimizing or performing inference.
#
# We begin by importing the required packages from TensorFlow and GPflow.
# %%
import tensorflow as tf
# %%
import gpflow
from gpflow.models import BayesianModel, ExternalDataTrainingLossMixin
from gpflow.base import Parameter
# %% [markdown]
# Next, we create a `MDN` class that inherits from GPflow's `Model` class. We need to do the following:
# 1. Store each of the feature and target matrices (X, Y) as a `DataHolder` object.
# 2. Define our model's parameters using GPflow's `Parameter` and `ParamList` objects.
# 3. Define the objective function using the `_build_likelihood` method. When we optimize the model the negative of this function will be minimized.
# %%
from typing import Callable, Optional, Tuple
class MDN(BayesianModel, ExternalDataTrainingLossMixin):
def __init__(
self,
num_mixtures: Optional[int] = 5,
inner_dims: Optional[list] = [10, 10],
activation: Optional[Callable[[tf.Tensor], tf.Tensor]] = tf.keras.activations.relu,
):
super().__init__()
# `self.dims` collects the neural net's input, hidden and output dimensions.
# The number of output dims `self.dims[-1]` equals `num_mixtures` means +
# `num _mixtures` variances + `num_mixtures` weights, a total of
# 3 times `num_mixtures` variables.
self.dims = [1] + list(inner_dims) + [3 * num_mixtures]
self.activation = activation
self._create_network()
def _create_network(self):
self.Ws, self.bs = [], []
for dim_in, dim_out in zip(self.dims[:-1], self.dims[1:]):
init_xavier_std = (2.0 / (dim_in + dim_out)) ** 0.5
self.Ws.append(Parameter(np.random.randn(dim_in, dim_out) * init_xavier_std))
self.bs.append(Parameter(np.zeros(dim_out)))
def eval_network(self, X):
for i, (W, b) in enumerate(zip(self.Ws, self.bs)):
X = tf.matmul(X, W) + b
if i < len(self.bs) - 1:
X = self.activation(X)
pis, mus, sigmas = tf.split(X, 3, axis=1)
pis = tf.nn.softmax(pis) # make sure they normalize to 1
sigmas = tf.exp(sigmas) # make sure std. dev. are positive
return pis, mus, sigmas
def maximum_log_likelihood_objective(self, data: Tuple[tf.Tensor, tf.Tensor]):
x, y = data
pis, mus, sigmas = self.eval_network(x)
Z = (2 * np.pi) ** 0.5 * sigmas
log_probs_mog = (-0.5 * (mus - y) ** 2 / sigmas ** 2) - tf.math.log(Z) + tf.math.log(pis)
log_probs = tf.reduce_logsumexp(log_probs_mog, axis=1)
return tf.reduce_sum(log_probs)
# %% [markdown]
# ### Notes
# - Given we are dealing with a MoG, the neural net output must comply with the following restrictions:
# \begin{equation}
# \sum_{m=1}^{M} \pi_{m}(x) = 1, \pi_m \ge 0\ \text{and}\ \sigma_m\ \forall\ m
# \end{equation}
# We achieve this by applying the `softmax` operator to the $\pi$'s and by taking the `exp` to the $\sigma$'s.
#
# - We use the "Xavier" initialization for the neural net's weights. (Glorot and Bengio, 2010).
#
# - Instead of calculating the pdf of the Gaussians, we work with the pdf `log` and use `tf.reduce_logsumexp`. This is mainly for numerical stability.
# %% [markdown]
# ## Experiment 1: The sinusoidal dataset
#
# Let's see how our model works in practice with the sinusoidal dataset presented earlier. We do this by initializing a new instance of our MDN model, and then specifying the dataset $(X, Y)$, the number of hidden units of the MDN's neural net, and the number of mixture components $M$.
# %%
model = MDN(inner_dims=[100, 100], num_mixtures=5)
from gpflow.utilities import print_summary
print_summary(model)
# %% [markdown]
# The objective function for MDN instances is the `maximum_log_likelihood_objective`, which we use for optimization of the parameters. GPflow ensures that only the variables stored in `Parameter` objects are optimized. For the MDN, the only parameters are the weights and the biases of the neural net.
#
# We use the `Scipy` optimizer, which is a wrapper around SciPy's L-BFGS optimization algorithm. Note that GPflow supports other TensorFlow optimizers such as `Adam`, `Adagrad`, and `Adadelta` as well.
# %%
from gpflow.optimizers import Scipy
from gpflow.ci_utils import ci_niter
Scipy().minimize(
model.training_loss_closure(data, compile=True),
model.trainable_variables,
options=dict(maxiter=ci_niter(1500)),
)
print("Final Likelihood", model.maximum_log_likelihood_objective(data).numpy())
# %% [markdown]
# To evaluate the validity of our model, we draw the posterior density. We also plot $\mu(x)$ of the optimized neural net. Remember that for every $x$ the neural net outputs $M$ means $\mu_m(x)$. These determine the location of the Gaussians. We plot all $M$ means and use their corresponding mixture weight $\pi_m(X)$ to determine their size. Larger dots will have more impact in the Gaussian ensemble.
# %%
try:
from mdn_plotting import plot
except:
# VS CODE's root directory is GPflow's top-level directory
from doc.source.notebooks.tailor.mdn_plotting import plot
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
for a in axes:
a.set_xlim(-4, 4)
a.set_ylim(-3, 3)
plot(model, X, Y, axes, cmap=CMAP)
# %% [markdown]
# ## Experiment 2: The half moon dataset
#
# The half moon dataset is available in the `scikit-learn` package.
# %%
from sklearn.datasets import make_moons
def moon_data(N, noise):
data, _ = make_moons(n_samples=N, shuffle=True, noise=noise)
X, Y = data[:, 0].reshape(-1, 1), data[:, 1].reshape(-1, 1)
return X, Y
# %%
X, Y = data = moon_data(N, NOISE_STD)
plt.plot(X, Y, "ro", alpha=0.3)
plt.xlabel("$x$")
_ = plt.ylabel("$y$")
# %% [markdown]
# The only difference in the MDN's setup is that we lower the number of mixture components.
# %%
model = MDN(inner_dims=[100, 100], num_mixtures=5)
# %%
Scipy().minimize(
model.training_loss_closure(data, compile=True),
model.trainable_variables,
options=dict(maxiter=ci_niter(int(10e3))),
)
print("Final Likelihood", model.maximum_log_likelihood_objective(data).numpy())
# %%
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
for a in axes:
a.set_xlim(-2, 3)
a.set_ylim(-1.5, 2)
plot(model, X, Y, axes, cmap=CMAP)
# %% [markdown]
# ## References
#
# [1] Bishop, Christopher M. Mixture density networks. Technical Report NCRG/4288, Aston University, Birmingham, UK, 1994.
#
# [2] Dutordoir, Vincent, et al. "Gaussian Process Conditional Density Estimation." Advances in Neural Information Processing Systems. 2018.
|
GPflow/GPflow
|
doc/source/notebooks/tailor/mixture_density_network.pct.py
|
Python
|
apache-2.0
| 11,118
|
[
"Gaussian"
] |
d36ab438c613d86fdc72d14891826e5af7021d1a422986f979dc0ebf9c10ab36
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
import os
import stat
import fnmatch
import time
import re
import shutil
DOCUMENTATION = '''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: return a list of files based on specific criteria
requirements: []
description:
- Return a list files based on specific criteria. Multiple criteria are AND'd together.
options:
age:
required: false
default: null
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
required: false
default: '*'
description:
- One or more (shell type) file glob patterns, which restrict the list of files to be returned to
those whose basenames match at least one of the patterns specified. Multiple patterns can be
specified using a list.
contains:
required: false
default: null
description:
- One or more re patterns which should be matched against the file content
paths:
required: true
aliases: [ "name" ]
description:
- List of paths to the file or directory to search. All paths must be fully qualified.
file_type:
required: false
description:
- Type of file to select
choices: [ "file", "directory" ]
default: "file"
recurse:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- If target is a directory, recursively descend into the directory looking for files.
size:
required: false
default: null
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes, but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
required: false
default: "mtime"
choices: [ "atime", "mtime", "ctime" ]
description:
- Choose the file property against which we compare age. Default is mtime.
hidden:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
follow:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to follow symlinks in path for systems with python 2.6+
get_checksum:
required: false
default: "False"
choices: [ True, False ]
description:
- Set this to true to retrieve a file's sha1 checksum
'''
EXAMPLES = '''
# Recursively find /tmp files older than 2 days
- find: paths="/tmp" age="2d" recurse=yes
# Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
- find: paths="/tmp" age="4w" size="1m" recurse=yes
# Recursively find /var/tmp files with last access time greater than 3600 seconds
- find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes
# find /var/log files equal or greater than 10 megabytes ending with .log or .log.gz
- find: paths="/var/tmp" patterns="*.log","*.log.gz" size="10m"
'''
RETURN = '''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list of dictionaries
sample: [
{ path="/var/tmp/test1",
mode=0644,
...,
checksum=16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path="/var/tmp/test2",
...
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
def pfilter(f, patterns=None):
'''filter using glob patterns'''
if patterns is None:
return True
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None or \
(age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age)) or \
(age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age)):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None or \
(size >= 0 and st.st_size >= abs(size)) or \
(size < 0 and st.st_size <= abs(size)):
return True
return False
def contentfilter(fsname, pattern):
'''filter files which contain the given expression'''
if pattern is None: return True
try:
f = open(fsname)
prog = re.compile(pattern)
for line in f:
if prog.match (line):
f.close()
return True
f.close()
except:
pass
return False
def statinfo(st):
return {
'mode' : "%04o" % stat.S_IMODE(st.st_mode),
'isdir' : stat.S_ISDIR(st.st_mode),
'ischr' : stat.S_ISCHR(st.st_mode),
'isblk' : stat.S_ISBLK(st.st_mode),
'isreg' : stat.S_ISREG(st.st_mode),
'isfifo' : stat.S_ISFIFO(st.st_mode),
'islnk' : stat.S_ISLNK(st.st_mode),
'issock' : stat.S_ISSOCK(st.st_mode),
'uid' : st.st_uid,
'gid' : st.st_gid,
'size' : st.st_size,
'inode' : st.st_ino,
'dev' : st.st_dev,
'nlink' : st.st_nlink,
'atime' : st.st_atime,
'mtime' : st.st_mtime,
'ctime' : st.st_ctime,
'wusr' : bool(st.st_mode & stat.S_IWUSR),
'rusr' : bool(st.st_mode & stat.S_IRUSR),
'xusr' : bool(st.st_mode & stat.S_IXUSR),
'wgrp' : bool(st.st_mode & stat.S_IWGRP),
'rgrp' : bool(st.st_mode & stat.S_IRGRP),
'xgrp' : bool(st.st_mode & stat.S_IXGRP),
'woth' : bool(st.st_mode & stat.S_IWOTH),
'roth' : bool(st.st_mode & stat.S_IROTH),
'xoth' : bool(st.st_mode & stat.S_IXOTH),
'isuid' : bool(st.st_mode & stat.S_ISUID),
'isgid' : bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec = dict(
paths = dict(required=True, aliases=['name'], type='list'),
patterns = dict(default=['*'], type='list'),
contains = dict(default=None, type='str'),
file_type = dict(default="file", choices=['file', 'directory'], type='str'),
age = dict(default=None, type='str'),
age_stamp = dict(default="mtime", choices=['atime','mtime','ctime'], type='str'),
size = dict(default=None, type='str'),
recurse = dict(default='no', type='bool'),
hidden = dict(default="False", type='bool'),
follow = dict(default="False", type='bool'),
get_checksum = dict(default="False", type='bool'),
),
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match("^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match("^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root,dirs,files in (sys.version_info < (2,6,0) and os.walk(npath)) or \
os.walk( npath, followlinks=params['follow']):
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname=os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
st = os.stat(fsname)
r = {'path': fsname}
if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and \
contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
if not params['recurse']:
break
else:
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n"
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
# import module snippets
from ansible.module_utils.basic import *
main()
|
garyjyao1/ansible
|
lib/ansible/modules/core/files/find.py
|
Python
|
gpl-3.0
| 11,313
|
[
"Brian"
] |
a0c10ea7c3552cd12cfec5af66d5531ede91b61111759ae83f5150091c9e83f8
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
from pymatgen.core.bonds import (
CovalentBond,
get_bond_length,
get_bond_order,
obtain_all_bond_lengths,
)
from pymatgen.core.periodic_table import Element
from pymatgen.core.sites import Site
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 26, 2012"
class CovalentBondTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_length(self):
site1 = Site("C", [0, 0, 0])
site2 = Site("H", [0, 0.7, 0.6])
self.assertAlmostEqual(CovalentBond(site1, site2).length, 0.92195444572928864)
def test_get_bond_order(self):
site1 = Site("C", [0, 0, 0])
site2 = Site("H", [0, 0, 1.08])
self.assertAlmostEqual(CovalentBond(site1, site2).get_bond_order(), 1)
bond = CovalentBond(Site("C", [0, 0, 0]), Site("Br", [0, 0, 2]))
self.assertAlmostEqual(bond.get_bond_order(0.5, 1.9), 0.894736842105263)
def test_is_bonded(self):
site1 = Site("C", [0, 0, 0])
site2 = Site("H", [0, 0, 1])
self.assertTrue(CovalentBond.is_bonded(site1, site2))
site2 = Site("H", [0, 0, 1.5])
self.assertFalse(CovalentBond.is_bonded(site1, site2))
site1 = Site("U", [0, 0, 0])
self.assertRaises(ValueError, CovalentBond.is_bonded, site1, site2)
self.assertTrue(CovalentBond.is_bonded(site1, site2, default_bl=2))
def test_str(self):
site1 = Site("C", [0, 0, 0])
site2 = Site("H", [0, 0.7, 0.6])
self.assertIsNotNone(CovalentBond(site1, site2))
class FuncTest(unittest.TestCase):
def test_get_bond_length(self):
self.assertAlmostEqual(get_bond_length("C", "C", 1), 1.54)
self.assertAlmostEqual(get_bond_length("C", "C", 2), 1.34)
self.assertAlmostEqual(get_bond_length("C", "H", 1), 1.08)
self.assertEqual(get_bond_length("C", "H", 2), 0.95)
self.assertAlmostEqual(get_bond_length("C", "Br", 1), 1.85)
def test_obtain_all_bond_lengths(self):
self.assertDictEqual(obtain_all_bond_lengths("C", "C"), {1.0: 1.54, 2.0: 1.34, 3.0: 1.2})
self.assertRaises(ValueError, obtain_all_bond_lengths, "Br", Element("C"))
self.assertDictEqual(obtain_all_bond_lengths("C", Element("Br"), 1.76), {1: 1.76})
bond_lengths_dict = obtain_all_bond_lengths("C", "N")
bond_lengths_dict[4] = 999
self.assertDictEqual(obtain_all_bond_lengths("C", "N"), {1.0: 1.47, 2.0: 1.3, 3.0: 1.16})
def test_get_bond_order(self):
self.assertAlmostEqual(get_bond_order("C", "C", 1), 3)
self.assertAlmostEqual(get_bond_order("C", "C", 1.2), 3)
self.assertAlmostEqual(get_bond_order("C", "C", 1.25), 2.642857142857143)
self.assertAlmostEqual(get_bond_order("C", "C", 1.34), 2)
self.assertAlmostEqual(get_bond_order("C", "C", 1.4), 1.7) # bond length in benzene
self.assertAlmostEqual(get_bond_order("C", "C", 1.54), 1)
self.assertAlmostEqual(get_bond_order("C", "C", 2.5), 0)
self.assertAlmostEqual(get_bond_order("C", "C", 9999), 0)
self.assertAlmostEqual(get_bond_order("C", "Br", 1.9, default_bl=1.9), 1)
self.assertAlmostEqual(get_bond_order("C", "Br", 2, default_bl=1.9), 0.7368421052631575)
self.assertAlmostEqual(get_bond_order("C", "Br", 1.9, tol=0.5, default_bl=1.9), 1)
self.assertAlmostEqual(get_bond_order("C", "Br", 2, tol=0.5, default_bl=1.9), 0.894736842105263)
self.assertRaises(ValueError, get_bond_order, "C", "Br", 1.9)
self.assertAlmostEqual(get_bond_order("N", "N", 1.25), 2)
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/core/tests/test_bonds.py
|
Python
|
mit
| 3,958
|
[
"pymatgen"
] |
ca6558fdfc63f59b27253b426f125961a53725e4568b26c28a2f31b4b4373d3b
|
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import os
import re
import pep8
import six
"""
Guidelines for writing new hacking checks
- Use only for Nova specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to nova/tests/unit/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
# Excludes oslo.config OptGroup objects
cfg_opt_re = re.compile(r".*[\s\[]cfg\.[a-zA-Z]*Opt\(")
rule_default_re = re.compile(r".*RuleDefault\(")
policy_enforce_re = re.compile(r".*_ENFORCER\.enforce\(")
vi_header_re = re.compile(r"^#\s+vim?:.+")
virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/")
virt_import_re = re.compile(
r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)")
virt_config_re = re.compile(
r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)")
asse_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
"(\w|\.|\'|\"|\[|\])+\)\)")
asse_equal_type_re = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
"(\w|\.|\'|\"|\[|\])+\)")
asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\("
r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\("
r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
asse_equal_end_with_none_re = re.compile(
r"assertEqual\(.*?,\s+None\)$")
asse_equal_start_with_none_re = re.compile(
r"assertEqual\(None,")
# NOTE(snikitin): Next two regexes weren't united to one for more readability.
# asse_true_false_with_in_or_not_in regex checks
# assertTrue/False(A in B) cases where B argument has no spaces
# asse_true_false_with_in_or_not_in_spaces regex checks cases
# where B argument has spaces and starts/ends with [, ', ".
# For example: [1, 2, 3], "some string", 'another string'.
# We have to separate these regexes to escape a false positives
# results. B argument should have spaces only if it starts
# with [, ", '. Otherwise checking of string
# "assertFalse(A in B and C in D)" will be false positives.
# In this case B argument is "B and C in D".
asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)"
r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
log_translation = re.compile(
r"(.)*LOG\.(audit|error|critical)\(\s*('|\")")
log_translation_info = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_exception = re.compile(
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)"
"\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
import_translation_for_log_or_exception = re.compile(
r"(.)*(from\snova.i18n\simport)\s_")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
api_version_re = re.compile(r"@.*api_version")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
decorator_re = re.compile(r"@.*")
http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(")
spawn_re = re.compile(
r".*(eventlet|greenthread)\.(?P<spawn_part>spawn(_n)?)\(.*\)")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def import_no_db_in_virt(logical_line, filename):
"""Check for db calls from nova/virt
As of grizzly-2 all the database calls have been removed from
nova/virt, and we want to keep it that way.
N307
"""
if "nova/virt" in filename and not filename.endswith("fake.py"):
if logical_line.startswith("from nova import db"):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
def no_db_session_in_public_api(logical_line, filename):
if "db/api.py" in filename:
if session_check.match(logical_line):
yield (0, "N309: public db api methods may not accept session")
def use_timeutils_utcnow(logical_line, filename):
# tools are OK to use the standard datetime module
if "/tools/" in filename:
return
msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()"
datetime_funcs = ['now', 'utcnow']
for f in datetime_funcs:
pos = logical_line.find('datetime.%s' % f)
if pos != -1:
yield (pos, msg % f)
def _get_virt_name(regex, data):
m = regex.match(data)
if m is None:
return None
driver = m.group(1)
# Ignore things we mis-detect as virt drivers in the regex
if driver in ["test_virt_drivers", "driver", "firewall",
"disk", "api", "imagecache", "cpu", "hardware",
"image"]:
return None
return driver
def import_no_virt_driver_import_deps(physical_line, filename):
"""Check virt drivers' modules aren't imported by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not access those drivers. Any code that
is to be shared should be refactored into a common
module
N311
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_import_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N311: importing code from other virt drivers forbidden")
def import_no_virt_driver_config_deps(physical_line, filename):
"""Check virt drivers' config vars aren't used by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not use their config vars. Any config vars
that are to be shared should be moved into a common module
N312
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_config_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N312: using config vars from other virt drivers forbidden")
def capital_cfg_help(logical_line, tokens):
msg = "N313: capitalize help string"
if cfg_re.match(logical_line):
for t in range(len(tokens)):
if tokens[t][1] == "help":
txt = tokens[t + 2][1]
if len(txt) > 1 and txt[1].islower():
yield(0, msg)
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
N314
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if vi_header_re.match(physical_line):
return 0, "N314: Don't put vi configuration in source files"
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
N316
"""
if asse_trueinst_re.match(logical_line):
yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed")
def assert_equal_type(logical_line):
"""Check for assertEqual(type(A), B) sentences
N317
"""
if asse_equal_type_re.match(logical_line):
yield (0, "N317: assertEqual(type(A), B) sentences not allowed")
def assert_equal_none(logical_line):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
N318
"""
res = (asse_equal_start_with_none_re.search(logical_line) or
asse_equal_end_with_none_re.search(logical_line))
if res:
yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) "
"sentences not allowed")
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_import_translation_in_tests(logical_line, filename):
"""Check for 'from nova.i18n import _'
N337
"""
if 'nova/tests/' in filename:
res = import_translation_for_log_or_exception.match(logical_line)
if res:
yield(0, "N337 Don't import translation in tests")
def no_setting_conf_directly_in_tests(logical_line, filename):
"""Check for setting CONF.* attributes directly in tests
The value can leak out of tests affecting how subsequent tests run.
Using self.flags(option=value) is the preferred method to temporarily
set config options in tests.
N320
"""
if 'nova/tests/' in filename:
res = conf_attribute_set_re.match(logical_line)
if res:
yield (0, "N320: Setting CONF.* attributes directly in tests is "
"forbidden. Use self.flags(option=value) instead")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
# and the Xen utilities
if ("nova/tests" in filename or
"plugins/xenserver/xenapi/etc/xapi.d" in filename):
return
if pep8.noqa(physical_line):
return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_info.match(logical_line):
yield (0, msg)
msg = "N329: LOG.exception messages require translations `_LE()`!"
if log_translation_exception.match(logical_line):
yield (0, msg)
msg = "N330: LOG.warning, LOG.warn messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "N321: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
def use_jsonutils(logical_line, filename):
# the code below that path is not meant to be executed from neutron
# tree where jsonutils module is present, so don't enforce its usage
# for this subdirectory
if "plugins/xenserver" in filename:
return
# tools are OK to use the standard json module
if "/tools/" in filename:
return
msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def check_api_version_decorator(logical_line, previous_logical, blank_before,
filename):
msg = ("N332: the api_version decorator must be the first decorator"
" on a method.")
if blank_before == 0 and re.match(api_version_re, logical_line) \
and re.match(decorator_re, previous_logical):
yield(0, msg)
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
# Python 2 produces ast.TryExcept and ast.TryFinally nodes, but Python 3
# only produces ast.Try nodes.
if six.PY2:
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
else:
def visit_Try(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('N326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
class _FindVariableReferences(ast.NodeVisitor):
def __init__(self):
super(_FindVariableReferences, self).__init__()
self._references = []
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load):
# This means the value of a variable was loaded. For example a
# variable 'foo' was used like:
# mocked_thing.bar = foo
# foo()
# self.assertRaises(excepion, foo)
self._references.append(node.id)
super(_FindVariableReferences, self).generic_visit(node)
class CheckForUncalledTestClosure(BaseASTChecker):
"""Look for closures that are never called in tests.
A recurring pattern when using multiple mocks is to create a closure
decorated with mocks like:
def test_thing(self):
@mock.patch.object(self.compute, 'foo')
@mock.patch.object(self.compute, 'bar')
def _do_test(mock_bar, mock_foo):
# Test things
_do_test()
However it is easy to leave off the _do_test() and have the test pass
because nothing runs. This check looks for methods defined within a test
method and ensures that there is a reference to them. Only methods defined
one level deep are checked. Something like:
def test_thing(self):
class FakeThing:
def foo(self):
would not ensure that foo is referenced.
N349
"""
def __init__(self, tree, filename):
super(CheckForUncalledTestClosure, self).__init__(tree, filename)
self._filename = filename
def visit_FunctionDef(self, node):
# self._filename is 'stdin' in the unit test for this check.
if (not os.path.basename(self._filename).startswith('test_') and
not 'stdin'):
return
closures = []
references = []
# Walk just the direct nodes of the test method
for child_node in ast.iter_child_nodes(node):
if isinstance(child_node, ast.FunctionDef):
closures.append(child_node.name)
# Walk all nodes to find references
find_references = _FindVariableReferences()
find_references.generic_visit(node)
references = find_references._references
missed = set(closures) - set(references)
if missed:
self.add_error(node, 'N349: Test closures not called: %s'
% ','.join(missed))
def assert_true_or_false_with_in(logical_line):
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N334
"""
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N334: Use assertIn/NotIn(A, B) rather than "
"assertTrue/False(A in/not in B) when checking collection "
"contents.")
def assert_raises_regexp(logical_line):
"""Check for usage of deprecated assertRaisesRegexp
N335
"""
res = asse_raises_regexp.search(logical_line)
if res:
yield (0, "N335: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
N338
"""
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
asse_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "N338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
def check_http_not_implemented(logical_line, physical_line, filename):
msg = ("N339: HTTPNotImplemented response must be implemented with"
" common raise_feature_not_supported().")
if pep8.noqa(physical_line):
return
if ("nova/api/openstack/compute" not in filename):
return
if re.match(http_not_implemented_re, logical_line):
yield(0, msg)
def check_greenthread_spawns(logical_line, physical_line, filename):
"""Check for use of greenthread.spawn(), greenthread.spawn_n(),
eventlet.spawn(), and eventlet.spawn_n()
N340
"""
msg = ("N340: Use nova.utils.%(spawn)s() rather than "
"greenthread.%(spawn)s() and eventlet.%(spawn)s()")
if "nova/utils.py" in filename or "nova/tests/" in filename:
return
match = re.match(spawn_re, logical_line)
if match:
yield (0, msg % {'spawn': match.group('spawn_part')})
def check_no_contextlib_nested(logical_line, filename):
msg = ("N341: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested for "
"more information. nova.test.nested() is an alternative as well.")
if contextlib_nested.match(logical_line):
yield(0, msg)
def check_config_option_in_central_place(logical_line, filename):
msg = ("N342: Config options should be in the central location "
"'/nova/conf/*'. Do not declare new config options outside "
"of that folder.")
# That's the correct location
if "nova/conf/" in filename:
return
# TODO(markus_z) This is just temporary until all config options are
# moved to the central place. To avoid that a once cleaned up place
# introduces new config options, we do a check here. This array will
# get quite huge over the time, but will be removed at the end of the
# reorganization.
# You can add the full path to a module or folder. It's just a substring
# check, which makes it flexible enough.
cleaned_up = ["nova/console/serial.py",
"nova/cmd/serialproxy.py",
]
if not any(c in filename for c in cleaned_up):
return
if cfg_opt_re.match(logical_line):
yield(0, msg)
def check_policy_registration_in_central_place(logical_line, filename):
msg = ('N350: Policy registration should be in the central location '
'"/nova/policies/*".')
# This is where registration should happen
if "nova/policies/" in filename:
return
# A couple of policy tests register rules
if "nova/tests/unit/test_policy.py" in filename:
return
if rule_default_re.match(logical_line):
yield(0, msg)
def check_policy_enforce(logical_line, filename):
"""Look for uses of nova.policy._ENFORCER.enforce()
Now that policy defaults are registered in code the _ENFORCER.authorize
method should be used. That ensures that only registered policies are used.
Uses of _ENFORCER.enforce could allow unregistered policies to be used, so
this check looks for uses of that method.
N351
"""
msg = ('N351: nova.policy._ENFORCER.enforce() should not be used. '
'Use the authorize() method instead.')
if policy_enforce_re.match(logical_line):
yield(0, msg)
def check_doubled_words(physical_line, filename):
"""Check for the common doubled-word typos
N343
"""
msg = ("N343: Doubled word '%(word)s' typo found")
match = re.search(doubled_words_re, physical_line)
if match:
return (0, msg % {'word': match.group(1)})
def check_python3_no_iteritems(logical_line):
msg = ("N344: Use six.iteritems() instead of dict.iteritems().")
if re.search(r".*\.iteritems\(\)", logical_line):
yield(0, msg)
def check_python3_no_iterkeys(logical_line):
msg = ("N345: Use six.iterkeys() instead of dict.iterkeys().")
if re.search(r".*\.iterkeys\(\)", logical_line):
yield(0, msg)
def check_python3_no_itervalues(logical_line):
msg = ("N346: Use six.itervalues() instead of dict.itervalues().")
if re.search(r".*\.itervalues\(\)", logical_line):
yield(0, msg)
def no_os_popen(logical_line):
"""Disallow 'os.popen('
Deprecated library function os.popen() Replace it using subprocess
https://bugs.launchpad.net/tempest/+bug/1529836
N348
"""
if 'os.popen(' in logical_line:
yield(0, 'N348 Deprecated library function os.popen(). '
'Replace it using subprocess module. ')
def no_log_warn(logical_line):
"""Disallow 'LOG.warn('
Deprecated LOG.warn(), instead use LOG.warning
https://bugs.launchpad.net/senlin/+bug/1508442
N352
"""
msg = ("N352: LOG.warn is deprecated, please use LOG.warning!")
if "LOG.warn(" in logical_line:
yield (0, msg)
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
register(use_timeutils_utcnow)
register(import_no_virt_driver_import_deps)
register(import_no_virt_driver_config_deps)
register(capital_cfg_help)
register(no_vi_headers)
register(no_import_translation_in_tests)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)
register(assert_raises_regexp)
register(no_translate_debug_logs)
register(no_setting_conf_directly_in_tests)
register(validate_log_translations)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(use_jsonutils)
register(check_api_version_decorator)
register(CheckForStrUnicodeExc)
register(CheckForTransAdd)
register(assert_true_or_false_with_in)
register(dict_constructor_with_list_copy)
register(assert_equal_in)
register(check_http_not_implemented)
register(check_no_contextlib_nested)
register(check_greenthread_spawns)
register(check_config_option_in_central_place)
register(check_policy_registration_in_central_place)
register(check_policy_enforce)
register(check_doubled_words)
register(check_python3_no_iteritems)
register(check_python3_no_iterkeys)
register(check_python3_no_itervalues)
register(no_os_popen)
register(no_log_warn)
register(CheckForUncalledTestClosure)
|
xuweiliang/Codelibrary
|
nova/hacking/checks.py
|
Python
|
apache-2.0
| 28,873
|
[
"VisIt"
] |
82cfc4c57b17605b61dad79cde0641d3d1a34dfcb80f179637caaa31d1e43b38
|
from __future__ import print_function
import os
import sys
import pickle
from math import pi
import numpy as np
from ase.units import Hartree, Bohr
import gpaw.mpi as mpi
from gpaw.response.chi0 import Chi0
from gpaw.response.kernel2 import calculate_Kxc
from gpaw.response.wstc import WignerSeitzTruncatedCoulomb
class DielectricFunction:
"""This class defines dielectric function related physical quantities."""
def __init__(self, calc, name=None, frequencies=None, domega0=0.1,
omega2=10.0, omegamax=None, ecut=50,
hilbert=True, nbands=None,
eta=0.2, ftol=1e-6, intraband=True, world=mpi.world,
txt=sys.stdout, nthreads=1, gate_voltage=None):
self.chi0 = Chi0(calc, frequencies, domega0=domega0,
omega2=omega2, omegamax=omegamax,
ecut=ecut, hilbert=hilbert, nbands=nbands,
eta=eta, ftol=ftol,
intraband=intraband, nthreads=nthreads,
world=world, txt=txt,
gate_voltage=gate_voltage)
self.name = name
def calculate_chi0(self, q_c):
if self.name:
kd = self.chi0.calc.wfs.kd
name = self.name + '%+d%+d%+d.pckl' % tuple((q_c * kd.N_c).round())
if os.path.isfile(name):
try:
omega_w, pd, chi0_wGG, chi0_wxvG, chi0_wvv = \
pickle.load(open(name))
print('Reading from file ', name)
except EOFError:
pass
else:
return pd, chi0_wGG, chi0_wxvG, chi0_wvv
pd, chi0_wGG, chi0_wxvG, chi0_wvv = self.chi0.calculate(q_c)
self.chi0.timer.write(self.chi0.fd)
if self.name and mpi.rank == 0:
with open(name, 'wb') as fd:
pickle.dump((self.chi0.omega_w, pd,
chi0_wGG, chi0_wxvG, chi0_wvv), fd,
pickle.HIGHEST_PROTOCOL)
# Wait for rank 0 to save Chi
mpi.world.barrier()
# Not returning frequencies will work for now
return pd, chi0_wGG, chi0_wxvG, chi0_wvv
def get_chi(self, xc='RPA', q_c=[0, 0, 0], direction='x',
wigner_seitz_truncation=False):
pd, chi0_wGG, chi0_wxvG, chi0_wvv = self.calculate_chi0(q_c)
G_G = pd.G2_qG[0]**0.5
nG = len(G_G)
if pd.kd.gamma:
G_G[0] = 1.0
if isinstance(direction, str):
d_v = {'x': [1, 0, 0],
'y': [0, 1, 0],
'z': [0, 0, 1]}[direction]
else:
d_v = direction
d_v = np.asarray(d_v) / np.linalg.norm(d_v)
chi0_wGG[:, 0] = np.dot(d_v, chi0_wxvG[:, 0])
chi0_wGG[:, :, 0] = np.dot(d_v, chi0_wxvG[:, 1])
chi0_wGG[:, 0, 0] = np.dot(d_v, np.dot(chi0_wvv, d_v).T)
G_G /= (4 * pi)**0.5
if wigner_seitz_truncation:
kernel = WignerSeitzTruncatedCoulomb(pd.gd.cell_cv,
self.chi0.calc.wfs.kd.N_c)
K_G = kernel.get_potential(pd)
K_G *= G_G**2
if pd.kd.gamma:
K_G[0] = 0.0
else:
K_G = np.ones(nG)
K_GG = np.zeros((nG, nG), dtype=complex)
for i in range(nG):
K_GG[i, i] = K_G[i]
if xc != 'RPA':
R_av = self.chi0.calc.atoms.positions / Bohr
nt_sG = self.chi0.calc.density.nt_sG
K_GG += calculate_Kxc(pd, nt_sG, R_av, self.chi0.calc.wfs.setups,
self.chi0.calc.density.D_asp,
functional=xc) * G_G * G_G[:, np.newaxis]
chi_wGG = []
for chi0_GG in chi0_wGG:
chi0_GG[:] = chi0_GG / G_G / G_G[:, np.newaxis]
chi_wGG.append(np.dot(np.linalg.inv(np.eye(nG) -
np.dot(chi0_GG, K_GG)),
chi0_GG))
return chi0_wGG, np.array(chi_wGG)
def get_dielectric_matrix(self, xc='RPA', q_c=[0, 0, 0],
direction='x', wigner_seitz_truncation=False,
symmetric=True):
"""Returns the symmetrized dielectric matrix.
::
\tilde\epsilon_GG' = v^{-1/2}_G \epsilon_GG' v^{-1/2}_G',
where::
epsilon_GG' = 1 - v_G * P_GG' and P_GG'
is the polarization.
::
In RPA: P = chi^0
In TDDFT: P = (1 - chi^0 * f_xc)^{-1} chi^0
The head of the inverse symmetrized dielectric matrix is equal
to the head of the inverse dielectric matrix (inverse dielectric
function)
"""
pd, chi0_wGG, chi0_wxvG, chi0_wvv = self.calculate_chi0(q_c)
G_G = pd.G2_qG[0]**0.5
nG = len(G_G)
if pd.kd.gamma:
G_G[0] = 1.0
if isinstance(direction, str):
d_v = {'x': [1, 0, 0],
'y': [0, 1, 0],
'z': [0, 0, 1]}[direction]
else:
d_v = direction
d_v = np.asarray(d_v) / np.linalg.norm(d_v)
chi0_wGG[:, 0] = np.dot(d_v, chi0_wxvG[:, 0])
chi0_wGG[:, :, 0] = np.dot(d_v, chi0_wxvG[:, 1])
chi0_wGG[:, 0, 0] = np.dot(d_v, np.dot(chi0_wvv, d_v).T)
if wigner_seitz_truncation:
kernel = WignerSeitzTruncatedCoulomb(pd.gd.cell_cv,
self.chi0.calc.wfs.kd.N_c)
K_G = kernel.get_potential(pd)**0.5
if pd.kd.gamma:
K_G[0] = 0.0
else:
K_G = (4 * pi)**0.5 / G_G
if xc != 'RPA':
R_av = self.chi0.calc.atoms.positions / Bohr
nt_sG = self.chi0.calc.density.nt_sG
Kxc_sGG = calculate_Kxc(pd, nt_sG, R_av,
self.chi0.calc.wfs.setups,
self.chi0.calc.density.D_asp,
functional=xc)
for chi0_GG in chi0_wGG:
if xc == 'RPA':
P_GG = chi0_GG
else:
P_GG = np.dot(np.linalg.inv(np.eye(nG) -
np.dot(chi0_GG, Kxc_sGG[0])),
chi0_GG)
if symmetric:
e_GG = np.eye(nG) - P_GG * K_G * K_G[:, np.newaxis]
else:
K_GG = (K_G**2 * np.ones([nG, nG])).T
e_GG = np.eye(nG) - P_GG * K_GG
chi0_GG[:] = e_GG
# chi0_wGG is now the dielectric matrix
return chi0_wGG
def get_dielectric_function(self, xc='RPA', q_c=[0, 0, 0],
direction='x', filename='df.csv',
wigner_seitz_truncation=False):
"""Calculate the dielectric function.
Returns dielectric function without and with local field correction:
df_NLFC_w, df_LFC_w = DielectricFunction.get_dielectric_function()
"""
e_wGG = self.get_dielectric_matrix(xc, q_c, direction,
wigner_seitz_truncation)
df_NLFC_w = np.zeros(len(e_wGG), dtype=complex)
df_LFC_w = np.zeros(len(e_wGG), dtype=complex)
for w, e_GG in enumerate(e_wGG):
df_NLFC_w[w] = e_GG[0, 0]
df_LFC_w[w] = 1 / np.linalg.inv(e_GG)[0, 0]
if filename is not None and mpi.rank == 0:
with open(filename, 'w') as fd:
for omega, nlfc, lfc in zip(self.chi0.omega_w * Hartree,
df_NLFC_w,
df_LFC_w):
print('%.6f, %.6f, %.6f, %.6f, %.6f' %
(omega, nlfc.real, nlfc.imag, lfc.real, lfc.imag),
file=fd)
return df_NLFC_w, df_LFC_w
def get_macroscopic_dielectric_constant(self, xc='RPA', direction='x',
wigner_seitz_truncation=False):
"""Calculate macroscopic dielectric constant.
Returns eM_NLFC and eM_LFC.
Macroscopic dielectric constant is defined as the real part
of dielectric function at w=0.
Parameters:
eM_LFC: float
Dielectric constant without local field correction. (RPA, ALDA)
eM2_NLFC: float
Dielectric constant with local field correction.
"""
wst = wigner_seitz_truncation
fd = self.chi0.fd
print('', file=fd)
print('%s Macroscopic Dielectric Constant:' % xc, file=fd)
df_NLFC_w, df_LFC_w = self.get_dielectric_function(
xc=xc,
filename=None,
direction=direction,
wigner_seitz_truncation=wst)
eps0 = np.real(df_NLFC_w[0])
eps = np.real(df_LFC_w[0])
print(' %s direction' % direction, file=fd)
print(' Without local field: %f' % eps0, file=fd)
print(' Include local field: %f' % eps, file=fd)
return eps0, eps
def get_eels_spectrum(self, xc='RPA', q_c=[0, 0, 0],
direction='x', filename='eels.csv',
wigner_seitz_truncation=False):
"""Calculate EELS spectrum. By default, generate a file 'eels.csv'.
EELS spectrum is obtained from the imaginary part of the inverse
of dielectric function. Returns EELS spectrum without and with
local field corrections:
df_NLFC_w, df_LFC_w = DielectricFunction.get_eels_spectrum()
"""
# Calculate dielectric function
df_NLFC_w, df_LFC_w = self.get_dielectric_function(
xc=xc, q_c=q_c,
direction=direction,
filename=None,
wigner_seitz_truncation=wigner_seitz_truncation)
Nw = df_NLFC_w.shape[0]
# Calculate eels
eels_NLFC_w = -(1 / df_NLFC_w).imag
eels_LFC_w = -(1 / df_LFC_w).imag
# Write to file
if filename is not None and mpi.rank == 0:
fd = open(filename, 'w')
print('# energy, eels_NLFC_w, eels_LFC_w', file=fd)
for iw in range(Nw):
print('%.6f, %.6f, %.6f' %
(self.chi0.omega_w[iw] * Hartree,
eels_NLFC_w[iw], eels_LFC_w[iw]), file=fd)
fd.close()
return eels_NLFC_w, eels_LFC_w
def get_polarizability(self, xc='RPA', direction='x',
wigner_seitz_truncation=False,
filename='polarizability.csv', pbc=None):
"""Calculate the polarizability alpha.
In 3D the imaginary part of the polarizability is related to the
dielectric function by Im(eps_M) = 4 pi * Im(alpha). In systems
with reduced dimensionality the converged value of alpha is
independent of the cell volume. This is not the case for eps_M,
which is ill defined. A truncated Coulomb kernel will always give
eps_M = 1.0, whereas the polarizability maintains its structure.
By default, generate a file 'polarizability.csv'. The five colomns are:
frequency (eV), Real(alpha0), Imag(alpha0), Real(alpha), Imag(alpha)
alpha0 is the result without local field effects and the
dimension of alpha is \AA to the power of non-periodic directions
"""
cell_cv = self.chi0.calc.wfs.gd.cell_cv
if not pbc:
pbc_c = self.chi0.calc.atoms.pbc
else:
pbc_c = np.array(pbc)
if pbc_c.all():
V = 1.0
else:
V = np.abs(np.linalg.det(cell_cv[~pbc_c][:, ~pbc_c]))
if not wigner_seitz_truncation:
# Without truncation alpha is simply related to eps_M
df0_w, df_w = self.get_dielectric_function(xc=xc, q_c=[0, 0, 0],
filename=None,
direction=direction)
alpha_w = V * (df_w - 1.0) / (4 * pi)
alpha0_w = V * (df0_w - 1.0) / (4 * pi)
else:
# With truncation we need to calculate \chit = v^0.5*chi*v^0.5
print('Using Wigner-Seitz truncated Coulomb interaction',
file=self.chi0.fd)
chi0_wGG, chi_wGG = self.get_chi(xc=xc, direction=direction,
wigner_seitz_truncation=True)
alpha_w = -V * (chi_wGG[:, 0, 0]) / (4 * pi)
alpha0_w = -V * (chi0_wGG[:, 0, 0]) / (4 * pi)
Nw = len(alpha_w)
if filename is not None and mpi.rank == 0:
fd = open(filename, 'w')
for iw in range(Nw):
print('%.6f, %.6f, %.6f, %.6f, %.6f' %
(self.chi0.omega_w[iw] * Hartree,
alpha0_w[iw].real * Bohr**(sum(~pbc_c)),
alpha0_w[iw].imag * Bohr**(sum(~pbc_c)),
alpha_w[iw].real * Bohr**(sum(~pbc_c)),
alpha_w[iw].imag * Bohr**(sum(~pbc_c))), file=fd)
fd.close()
return alpha0_w * Bohr**(sum(~pbc_c)), alpha_w * Bohr**(sum(~pbc_c))
def check_sum_rule(self, spectrum=None):
"""Check f-sum rule.
It takes the y of a spectrum as an entry and it check its integral.
"""
fd = self.chi0.fd
if spectrum is None:
raise ValueError('No spectrum input ')
dw = self.chi0.omega_w[1] - self.chi0.omega_w[0]
N1 = 0
for iw in range(len(spectrum)):
w = iw * dw
N1 += spectrum[iw] * w
N1 *= dw * self.chi0.vol / (2 * pi**2)
print('', file=fd)
print('Sum rule:', file=fd)
nv = self.chi0.calc.wfs.nvalence
print('N1 = %f, %f %% error' % (N1, (N1 - nv) / nv * 100), file=fd)
def get_eigenmodes(self, q_c=[0, 0, 0], w_max=None, name=None,
eigenvalue_only=False):
"""Plasmon eigenmodes as eigenvectors of the dielectric matrix."""
pd, chi0_wGG, chi0_wxvG, chi0_wvv = self.calculate_chi0(q_c)
e_wGG = self.get_dielectric_matrix(xc='RPA', q_c=q_c,
wigner_seitz_truncation=True,
symmetric=False)
kd = self.chi0.calc.wfs.kd
# Get real space grid for plasmon modes:
r = pd.gd.get_grid_point_coordinates()
w_w = self.chi0.omega_w * Hartree
if w_max:
w_w = w_w[np.where(w_w < w_max)]
Nw = len(w_w)
nG = e_wGG.shape[1]
eig = np.zeros([Nw, nG], dtype=complex)
eig_all = np.zeros([Nw, nG], dtype=complex)
# Find eigenvalues and eigenvectors:
e_GG = e_wGG[0]
eig_all[0], vec = np.linalg.eig(e_GG)
eig[0] = eig_all[0]
vec_dual = np.linalg.inv(vec)
omega0 = np.array([])
eigen0 = np.array([], dtype=complex)
v_ind = np.zeros([0, r.shape[1], r.shape[2], r.shape[3]],
dtype=complex)
n_ind = np.zeros([0, r.shape[1], r.shape[2], r.shape[3]],
dtype=complex)
# Loop to find the eigenvalues that crosses zero
# from negative to positive values:
for i in np.array(range(1, Nw)):
e_GG = e_wGG[i] # epsilon_GG'(omega + d-omega)
eig_all[i], vec_p = np.linalg.eig(e_GG)
if eigenvalue_only:
continue
vec_dual_p = np.linalg.inv(vec_p)
overlap = np.abs(np.dot(vec_dual, vec_p))
index = list(np.argsort(overlap)[:, -1])
if len(np.unique(index)) < nG: # add missing indices
addlist = []
removelist = []
for j in range(nG):
if index.count(j) < 1:
addlist.append(j)
if index.count(j) > 1:
for l in range(1, index.count(j)):
removelist.append(
np.argwhere(np.array(index) == j)[l])
for j in range(len(addlist)):
index[removelist[j]] = addlist[j]
vec = vec_p[:, index]
vec_dual = vec_dual_p[index, :]
eig[i] = eig_all[i, index]
for k in [k for k in range(nG)
# Eigenvalue crossing:
if (eig[i - 1, k] < 0 and eig[i, k] > 0)]:
a = np.real((eig[i, k] - eig[i - 1, k]) /
(w_w[i] - w_w[i - 1]))
# linear interp for crossing point
w0 = np.real(-eig[i - 1, k]) / a + w_w[i - 1]
eig0 = a * (w0 - w_w[i - 1]) + eig[i - 1, k]
print('crossing found at w = %1.2f eV' % w0)
omega0 = np.append(omega0, w0)
eigen0 = np.append(eigen0, eig0)
#Fourier Transform
qG = pd.G_Qv[pd.Q_qG[0]] + pd.K_qv
coef_G = np.diagonal(np.inner(qG, qG)) / (4 * pi)
qGr_R = np.inner(qG, r.T).T
phase = np.exp(1j * qGr_R)
v_ind = np.append(v_ind,
np.dot(phase, vec[:, k])[np.newaxis, :],
axis=0)
n_ind = np.append(n_ind,
np.dot(phase, vec[:, k] *
coef_G)[np.newaxis, :],
axis=0)
if name is None and self.name:
name = (self.name + '%+d%+d%+d-eigenmodes.pckl' %
tuple((q_c * kd.N_c).round()))
elif name:
name = (name + '%+d%+d%+d-eigenmodes.pckl' %
tuple((q_c * kd.N_c).round()))
else:
name = '%+d%+d%+d-eigenmodes.pckl' % tuple((q_c * kd.N_c).round())
# Returns: real space grid, frequency grid, all eigenvalues,
# sorted eigenvalues, zero-crossing frequencies + eigenvalues,
# induced potential + density in real space.
if eigenvalue_only:
pickle.dump((r * Bohr, w_w, eig_all),
open(name, 'wb'), pickle.HIGHEST_PROTOCOL)
return r * Bohr, w_w, eig_all
else:
pickle.dump((r * Bohr, w_w, eig_all, eig, omega0, eigen0,
v_ind, n_ind),
open(name, 'wb'),
pickle.HIGHEST_PROTOCOL)
return r * Bohr, w_w, eig_all, eig, omega0, eigen0, v_ind, n_ind
def get_spatial_eels(self, q_c=[0, 0, 0], direction='x',
w_max=None, filename='eels'):
"""Spatially resolved loss spectrum.
The spatially resolved loss spectrum is calculated as the inverse
fourier transform of ``VChiV = (eps^{-1}-I)V``::
EELS(w,r) = - Im [sum_{G,G'} e^{iGr} Vchi_{GG'}(w) V_G'e^{-iG'r}]
\delta(w-G\dot v_e )
Input parameters:
direction: 'x', 'y', or 'z'
The direction for scanning acroos the structure
(perpendicular to the electron beam) .
w_max: float
maximum frequency
filename: str
name of output
Returns: real space grid, frequency points, EELS(w,r)
"""
pd, chi0_wGG, chi0_wxvG, chi0_wvv = self.calculate_chi0(q_c)
e_wGG = self.get_dielectric_matrix(xc='RPA', q_c=q_c,
wigner_seitz_truncation=True,
symmetric=False)
r = pd.gd.get_grid_point_coordinates()
ix = r.shape[1] / 2
iy = r.shape[2] / 2
iz = r.shape[3] / 2
if direction == 'x':
r = r[:, :, iy, iz]
perpdir = [1, 2]
if direction == 'y':
r = r[:, ix, :, iz]
perpdir = [0, 2]
if direction == 'z':
r = r[:, ix, iy, :]
perpdir = [0, 1]
nG = e_wGG.shape[1]
Gvec = pd.G_Qv[pd.Q_qG[0]]
Glist = []
# Only use G-vectors that are zero along electron beam
# due to \delta(w-G\dot v_e )
for iG in range(nG):
if Gvec[iG, perpdir[0]] == 0 and Gvec[iG, perpdir[1]] == 0:
Glist.append(iG)
qG = Gvec[Glist] + pd.K_qv
w_w = self.chi0.omega_w * Hartree
if w_max:
w_w = w_w[np.where(w_w < w_max)]
Nw = len(w_w)
qGr = np.inner(qG, r.T).T
phase = np.exp(1j * qGr)
V_G = (4 * pi) / np.diagonal(np.inner(qG, qG))
phase2 = np.exp(-1j * qGr) * V_G
E_wrr = np.zeros([Nw, r.shape[1], r.shape[1]])
E_wr = np.zeros([Nw, r.shape[1]])
for i in range(Nw):
Vchi_GG = (np.linalg.inv(
e_wGG[i, np.array(Glist), :][:, np.array(Glist)]) -
np.eye(len(Glist)))
# Fourier transform:
E_wrr[i] = -np.imag(np.dot(np.dot(phase, Vchi_GG), phase2.T))
E_wr[i] = np.diagonal(E_wrr[i])
pickle.dump((r * Bohr, w_w, E_wr), open('%s.pickle' % filename, 'wb'),
pickle.HIGHEST_PROTOCOL)
return r * Bohr, w_w, E_wr
|
robwarm/gpaw-symm
|
gpaw/response/df.py
|
Python
|
gpl-3.0
| 21,827
|
[
"ASE",
"GPAW"
] |
2184a5e5472ca6bff507a4ffe9636eabe073ab20d628673455340a8f12f4cfd7
|
"""A convenience which constructs expression trees from an easy-to-read syntax
Use this unless you have a compelling reason not to; it performs some
optimizations that would be tedious to do when constructing an expression tree
by hand.
"""
from collections import Mapping
from inspect import isfunction, ismethod
from parsimonious.exceptions import BadGrammar, UndefinedLabel
from parsimonious.expressions import (Literal, Regex, Sequence, OneOf,
Lookahead, Optional, ZeroOrMore, OneOrMore, Not, TokenMatcher,
expression)
from parsimonious.nodes import NodeVisitor
from parsimonious.utils import StrAndRepr, evaluate_string
from six import text_type, iterkeys, itervalues, iteritems,
python_2_unicode_compatible
@python_2_unicode_compatible
class Grammar(StrAndRepr, Mapping):
"""A collection of rules that describe a language
You can start parsing from the default rule by calling ``parse()``
directly on the ``Grammar`` object::
g = Grammar('''
polite_greeting = greeting ", my good " title
greeting = "Hi" / "Hello"
title = "madam" / "sir"
''')
g.parse('Hello, my good sir')
Or start parsing from any of the other rules; you can pull them out of the
grammar as if it were a dictionary::
g['title'].parse('sir')
You could also just construct a bunch of ``Expression`` objects yourself
and stitch them together into a language, but using a ``Grammar`` has some
important advantages:
* Languages are much easier to define in the nice syntax it provides.
* Circular references aren't a pain.
* It does all kinds of whizzy space- and time-saving optimizations, like
factoring up repeated subexpressions into a single object, which should
increase cache hit ratio. [Is this implemented yet?]
"""
def __init__(self, rules='', **more_rules):
"""Construct a grammar.
:arg rules: A string of production rules, one per line.
:arg default_rule: The name of the rule invoked when you call
:meth:`parse()` or :meth:`match()` on the grammar. Defaults to the
first rule. Falls back to None if there are no string-based rules
in this grammar.
:arg more_rules: Additional kwargs whose names are rule names and
values are Expressions or custom-coded callables which accomplish
things the built-in rule syntax cannot. These take precedence over
``rules`` in case of naming conflicts.
"""
decorated_custom_rules = dict(
(k, expression(v, k, self) if isfunction(v) or
ismethod(v) else
v) for k, v in iteritems(more_rules))
self._expressions, first = self._expressions_from_rules(rules, decorated_custom_rules)
self.default_rule = first # may be None
def __getitem__(self, rule_name):
return self._expressions[rule_name]
def __iter__(self):
return iterkeys(self._expressions)
def __len__(self):
return len(self._expressions)
def default(self, rule_name):
"""Return a new Grammar whose :term:`default rule` is ``rule_name``."""
new = self._copy()
new.default_rule = new[rule_name]
return new
def _copy(self):
"""Return a shallow copy of myself.
Deep is unnecessary, since Expression trees are immutable. Subgrammars
recreate all the Expressions from scratch, and AbstractGrammars have
no Expressions.
"""
new = Grammar(**self._expressions)
new.default_rule = self.default_rule
return new
def _expressions_from_rules(self, rules, custom_rules):
"""Return a 2-tuple: a dict of rule names pointing to their
expressions, and then the first rule.
It's a web of expressions, all referencing each other. Typically,
there's a single root to the web of references, and that root is the
starting symbol for parsing, but there's nothing saying you can't have
multiple roots.
:arg custom_rules: A map of rule names to custom-coded rules:
Expressions
"""
tree = rule_grammar.parse(rules)
return RuleVisitor(custom_rules).visit(tree)
def parse(self, text, pos=0):
"""Parse some text with the :term:`default rule`.
:arg pos: The index at which to start parsing
"""
self._check_default_rule()
return self.default_rule.parse(text, pos=pos)
def match(self, text, pos=0):
"""Parse some text with the :term:`default rule` but not necessarily
all the way to the end.
:arg pos: The index at which to start parsing
"""
self._check_default_rule()
return self.default_rule.match(text, pos=pos)
def _check_default_rule(self):
"""Raise RuntimeError if there is no default rule defined."""
if not self.default_rule:
raise RuntimeError("Can't call parse() on a Grammar that has no "
"default rule. Choose a specific rule instead, "
"like some_grammar['some_rule'].parse(...).")
def __str__(self):
"""Return a rule string that, when passed to the constructor, would
reconstitute the grammar."""
exprs = [self.default_rule] if self.default_rule else []
exprs.extend(expr for expr in itervalues(self) if
expr is not self.default_rule)
return '\n'.join(expr.as_rule() for expr in exprs)
def __repr__(self):
"""Return an expression that will reconstitute the grammar."""
return "Grammar('%s')" % str(self).encode('string_escape')
class TokenGrammar(Grammar):
"""A Grammar which takes a list of pre-lexed tokens instead of text
This is useful if you want to do the lexing yourself, as a separate pass:
for example, to implement indentation-based languages.
"""
def _expressions_from_rules(self, rules, custom_rules):
tree = rule_grammar.parse(rules)
return TokenRuleVisitor(custom_rules).visit(tree)
class BootstrappingGrammar(Grammar):
"""The grammar used to recognize the textual rules that describe other
grammars
This grammar gets its start from some hard-coded Expressions and claws its
way from there to an expression tree that describes how to parse the
grammar description syntax.
"""
def _expressions_from_rules(self, rule_syntax, custom_rules):
"""Return the rules for parsing the grammar definition syntax.
Return a 2-tuple: a dict of rule names pointing to their expressions,
and then the top-level expression for the first rule.
"""
# Hard-code enough of the rules to parse the grammar that describes the
# grammar description language, to bootstrap:
comment = Regex(r'#[^\r\n]*', name='comment')
meaninglessness = OneOf(Regex(r'\s+'), comment, name='meaninglessness')
_ = ZeroOrMore(meaninglessness, name='_')
equals = Sequence(Literal('='), _, name='equals')
label = Sequence(Regex(r'[a-zA-Z_][a-zA-Z_0-9]*'), _, name='label')
reference = Sequence(label, Not(equals), name='reference')
quantifier = Sequence(Regex(r'[*+?]'), _, name='quantifier')
# This pattern supports empty literals. TODO: A problem?
spaceless_literal = Regex(r'u?r?"[^"\\]*(?:\\.[^"\\]*)*"',
ignore_case=True,
dot_all=True,
name='spaceless_literal')
literal = Sequence(spaceless_literal, _, name='literal')
regex = Sequence(Literal('~'),
literal,
Regex('[ilmsux]*', ignore_case=True),
_,
name='regex')
atom = OneOf(reference, literal, regex, name='atom')
quantified = Sequence(atom, quantifier, name='quantified')
term = OneOf(quantified, atom, name='term')
not_term = Sequence(Literal('!'), term, _, name='not_term')
term.members = (not_term,) + term.members
sequence = Sequence(term, OneOrMore(term), name='sequence')
or_term = Sequence(Literal('/'), _, term, name='or_term')
ored = Sequence(term, OneOrMore(or_term), name='ored')
expression = OneOf(ored, sequence, term, name='expression')
rule = Sequence(label, equals, expression, name='rule')
rules = Sequence(_, OneOrMore(rule), name='rules')
# Use those hard-coded rules to parse the (more extensive) rule syntax.
# (For example, unless I start using parentheses in the rule language
# definition itself, I should never have to hard-code expressions for
# those above.)
rule_tree = rules.parse(rule_syntax)
# Turn the parse tree into a map of expressions:
return RuleVisitor().visit(rule_tree)
# The grammar for parsing PEG grammar definitions:
# This is a nice, simple grammar. We may someday add to it, but it's a safe bet
# that the future will always be a superset of this.
rule_syntax = (r'''
# Ignored things (represented by _) are typically hung off the end of the
# leafmost kinds of nodes. Literals like "/" count as leaves.
rules = _ rule*
rule = label equals expression
equals = "=" _
literal = spaceless_literal _
# So you can't spell a regex like `~"..." ilm`:
spaceless_literal = ~"u?r?\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\""is /
~"u?r?'[^'\\\\]*(?:\\\\.[^'\\\\]*)*'"is
expression = ored / sequence / term
or_term = "/" _ term
ored = term or_term+
sequence = term term+
not_term = "!" term _
lookahead_term = "&" term _
term = not_term / lookahead_term / quantified / atom
quantified = atom quantifier
atom = reference / literal / regex / parenthesized
regex = "~" spaceless_literal ~"[ilmsux]*"i _
parenthesized = "(" _ expression ")" _
quantifier = ~"[*+?]" _
reference = label !equals
# A subsequent equal sign is the only thing that distinguishes a label
# (which begins a new rule) from a reference (which is just a pointer to a
# rule defined somewhere else):
label = ~"[a-zA-Z_][a-zA-Z_0-9]*" _
# _ = ~r"\s*(?:#[^\r\n]*)?\s*"
_ = meaninglessness*
meaninglessness = ~r"\s+" / comment
comment = ~r"#[^\r\n]*"
''')
class LazyReference(text_type):
"""A lazy reference to a rule, which we resolve after grokking all the
rules"""
name = u''
# Just for debugging:
def _as_rhs(self):
return u'<LazyReference to %s>' % self
class RuleVisitor(NodeVisitor):
"""Turns a parse tree of a grammar definition into a map of ``Expression``
objects
This is the magic piece that breathes life into a parsed bunch of parse
rules, allowing them to go forth and parse other things.
"""
quantifier_classes = {'?': Optional, '*': ZeroOrMore, '+': OneOrMore}
visit_expression = visit_term = visit_atom = NodeVisitor.lift_child
def __init__(self, custom_rules=None):
"""Construct.
:arg custom_rules: A dict of {rule name: expression} holding custom
rules which will take precedence over the others
"""
self.custom_rules = custom_rules or {}
def visit_parenthesized(self, parenthesized, _a):
"""Treat a parenthesized subexpression as just its contents.
Its position in the tree suffices to maintain its grouping semantics.
"""
(left_paren, _1, expression, right_paren, _2) = _a
return expression
def visit_quantifier(self, quantifier, _a):
"""Turn a quantifier into just its symbol-matching node."""
(symbol, _1) = _a
return symbol
def visit_quantified(self, quantified, _a):
(atom, quantifier) = _a
return self.quantifier_classes[quantifier.text](atom)
def visit_lookahead_term(self, lookahead_term, _a):
(ampersand, term, _) = _a
return Lookahead(term)
def visit_not_term(self, not_term, _a):
(exclamation, term, _) = _a
return Not(term)
def visit_rule(self, rule, _a):
"""Assign a name to the Expression and return it."""
(label, equals, expression) = _a
expression.name = label # Assign a name to the expr.
return expression
def visit_sequence(self, sequence, _a):
"""A parsed Sequence looks like [term node, OneOrMore node of
``another_term``s]. Flatten it out."""
(term, other_terms) = _a
return Sequence(term, *other_terms)
def visit_ored(self, ored, _a):
(first_term, other_terms) = _a
return OneOf(first_term, *other_terms)
def visit_or_term(self, or_term, _a):
"""Return just the term from an ``or_term``.
We already know it's going to be ored, from the containing ``ored``.
"""
(slash, _, term) = _a
return term
def visit_label(self, label, _a):
"""Turn a label into a unicode string."""
(name, _) = _a
return name.text
def visit_reference(self, reference, _a):
"""Stick a :class:`LazyReference` in the tree as a placeholder.
We resolve them all later.
"""
(label, not_equals) = _a
return LazyReference(label)
def visit_regex(self, regex, _a):
"""Return a ``Regex`` expression."""
(tilde, literal, flags, _) = _a
flags = flags.text.upper()
pattern = literal.literal # Pull the string back out of the Literal
# object.
return Regex(pattern, ignore_case='I' in flags,
locale='L' in flags,
multiline='M' in flags,
dot_all='S' in flags,
unicode='U' in flags,
verbose='X' in flags)
def visit_spaceless_literal(self, spaceless_literal, visited_children):
"""Turn a string literal into a ``Literal`` that recognizes it."""
return Literal(evaluate_string(spaceless_literal.text))
def visit_literal(self, literal, _a):
"""Pick just the literal out of a literal-and-junk combo."""
(spaceless_literal, _) = _a
return spaceless_literal
def generic_visit(self, node, visited_children):
"""Replace childbearing nodes with a list of their children; keep
others untouched.
For our case, if a node has children, only the children are important.
Otherwise, keep the node around for (for example) the flags of the
regex rule. Most of these kept-around nodes are subsequently thrown
away by the other visitor methods.
We can't simply hang the visited children off the original node; that
would be disastrous if the node occurred in more than one place in the
tree.
"""
return visited_children or node # should semantically be a tuple
def _resolve_refs(self, rule_map, expr, done):
"""Return an expression with all its lazy references recursively
resolved.
Resolve any lazy references in the expression ``expr``, recursing into
all subexpressions.
:arg done: The set of Expressions that have already been or are
currently being resolved, to ward off redundant work and prevent
infinite recursion for circular refs
"""
if isinstance(expr, LazyReference):
label = text_type(expr)
try:
reffed_expr = rule_map[label]
except KeyError:
raise UndefinedLabel(expr)
return self._resolve_refs(rule_map, reffed_expr, done)
else:
if getattr(expr, 'members', ()) and expr not in done:
# Prevents infinite recursion for circular refs. At worst, one
# of `expr.members` can refer back to `expr`, but it can't go
# any farther.
done.add(expr)
expr.members = [self._resolve_refs(rule_map, member, done)
for member in expr.members]
return expr
def visit_rules(self, node, _a):
"""Collate all the rules into a map. Return (map, default rule).
The default rule is the first one. Or, if you have more than one rule
of that name, it's the last-occurring rule of that name. (This lets you
override the default rule when you extend a grammar.) If there are no
string-based rules, the default rule is None, because the custom rules,
due to being kwarg-based, are unordered.
"""
(_, rules) = _a
# Map each rule's name to its Expression. Later rules of the same name
# override earlier ones. This lets us define rules multiple times and
# have the last declaration win, so you can extend grammars by
# concatenation.
rule_map = dict((expr.name, expr) for expr in rules)
# And custom rules override string-based rules. This is the least
# surprising choice when you compare the dict constructor:
# dict({'x': 5}, x=6).
rule_map.update(self.custom_rules)
# Resolve references. This tolerates forward references.
done = set()
rule_map = dict((expr.name, self._resolve_refs(rule_map, expr, done))
for expr in itervalues(rule_map))
# isinstance() is a temporary hack around the fact that * rules don't
# always get transformed into lists by NodeVisitor. We should fix that;
# it's surprising and requires writing lame branches like this.
return rule_map, (rule_map[rules[0].name]
if isinstance(rules, list) and rules else None)
class TokenRuleVisitor(RuleVisitor):
"""A visitor which builds expression trees meant to work on sequences of
pre-lexed tokens rather than strings"""
def visit_spaceless_literal(self, spaceless_literal, visited_children):
"""Turn a string literal into a ``TokenMatcher`` that matches
``Token`` objects by their ``type`` attributes."""
return TokenMatcher(evaluate_string(spaceless_literal.text))
def visit_regex(self, regex, _a):
(tilde, literal, flags, _) = _a
raise BadGrammar('Regexes do not make sense in TokenGrammars, since '
'TokenGrammars operate on pre-lexed tokens rather '
'than characters.')
# Bootstrap to level 1...
rule_grammar = BootstrappingGrammar(rule_syntax)
# ...and then to level 2. This establishes that the node tree of our rule
# syntax is built by the same machinery that will build trees of our users'
# grammars. And the correctness of that tree is tested, indirectly, in
# test_grammar.
rule_grammar = Grammar(rule_syntax)
# TODO: Teach Expression trees how to spit out Python representations of
# themselves. Then we can just paste that in above, and we won't have to
# bootstrap on import. Though it'll be a little less DRY. [Ah, but this is not
# so clean, because it would have to output multiple statements to get multiple
# refs to a single expression hooked up.]
|
smurfix/parsimonious
|
parsimonious/grammar.py
|
Python
|
mit
| 19,455
|
[
"VisIt"
] |
78501c810a5a9d5c5df3286f87d7f86ccde56ace74c595500a1fbe9c2f42d8d1
|
#!/galaxy/home/mgehrin/hiclib/bin/python
"""
Chops alignments in a MAF file to piece of a specified length. A random set of
non overlapping chunks of exactly the specified chop length will be produced
usage: %prog [options] < maf > maf
-l, --length: Chop to exactly this length in columns (default 100)
"""
import sys
import sys, random
import bx.align.maf
from optparse import OptionParser
def main():
# Parse command line arguments
parser = OptionParser()
parser.add_option( "-l", "--length", action="store", type="int", default=100, help="" )
( options, args ) = parser.parse_args()
length = options.length
maf_reader = bx.align.maf.Reader( sys.stdin )
maf_writer = bx.align.maf.Writer( sys.stdout )
for m in maf_reader:
for chopped in chop( m, length ):
maf_writer.write( chopped )
def chop( m, length ):
maf_length = m.text_size
chunk_count = maf_length // length
lost_bases = maf_length % length
skip_amounts = [0] * ( chunk_count + 1 )
for i in range( 0, lost_bases ): skip_amounts[ random.randrange( 0, chunk_count + 1 ) ] += 1
start = 0
rval = []
for i in range( 0, chunk_count ):
start += skip_amounts[ i ]
n = m.slice( start, start + length )
if check_len( n ): rval.append( m.slice( start, start + length ) )
start += length
return rval
def check_len( a ):
for c in a.components:
if c.size == 0: return False
return True
if __name__ == "__main__": main()
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/bx-python-0.7.1/build/scripts-2.7/maf_chop.py
|
Python
|
bsd-3-clause
| 1,524
|
[
"Galaxy"
] |
71cb5f67198e2ccd3750b4b47bcda494981e53880248cada9e57ac4164b7dd49
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
class Petsc(Package, CudaPackage, ROCmPackage):
"""PETSc is a suite of data structures and routines for the scalable
(parallel) solution of scientific applications modeled by partial
differential equations.
"""
homepage = "https://www.mcs.anl.gov/petsc/index.html"
url = "https://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-3.15.0.tar.gz"
git = "https://gitlab.com/petsc/petsc.git"
maintainers = ['balay', 'barrysmith', 'jedbrown']
tags = ['e4s']
version('main', branch='main')
version('3.16.3', sha256='eff44c7e7f12991dc7d2b627c477807a215ce16c2ce8a1c78aa8237ddacf6ca5')
version('3.16.2', sha256='7ab257ae150d4837ac8d3872a1d206997962578785ec2427639ceac46d131bbc')
version('3.16.1', sha256='909cf7bce7b6a0ddb2580a1ac9502aa01631ec4105c716594c1804f0ee1ea06a')
version('3.16.0', sha256='5aaad7deea127a4790c8aa95c42fd9451ab10b5d6c68b226b92d4853002f438d')
version('3.15.5', sha256='67dc31f1c1c941a0e45301ed4042628586e92e8c4e9b119695717ae782ef23a3')
version('3.15.4', sha256='1e62fb0859a12891022765d1e24660cfcd704291c58667082d81a0618d6b0047')
version('3.15.3', sha256='483028088020001e6f8d57b78a7fc880ed52d6693f57d627779c428f55cff73d')
version('3.15.2', sha256='3b10c19c69fc42e01a38132668724a01f1da56f5c353105cd28f1120cc9041d8')
version('3.15.1', sha256='c0ac6566e69d1d70b431e07e7598e9de95e84891c2452db1367c846b75109deb')
version('3.15.0', sha256='ac46db6bfcaaec8cd28335231076815bd5438f401a4a05e33736b4f9ff12e59a')
version('3.14.6', sha256='4de0c8820419fb15bc683b780127ff57067b62ca18749e864a87c6d7c93f1230')
version('3.14.5', sha256='8b8ff5c4e10468f696803b354a502d690c7d25c19d694a7e10008a302fdbb048')
version('3.14.4', sha256='b030969816e02c251a6d010c07a90b69ade44932f9ddfac3090ff5e95ab97d5c')
version('3.14.3', sha256='63ed7e3440f2bbc732a6c44aa878364f88f5016ab375d9b36d742893a049053d')
version('3.14.2', sha256='87a04fd05cac20a2ec47094b7d18b96e0651257d8c768ced2ef7db270ecfb9cb')
version('3.14.1', sha256='0b4681165a9af96594c794b97ac6993452ec902726679f6b50bb450f89d230ed')
version('3.14.0', sha256='a8f9caba03e0d57d8452c08505cf96be5f6949adaa266e819382162c03ddb9c5')
version('3.13.6', sha256='67ca2cf3040d08fdc51d27f660ea3157732b24c2f47aae1b19d63f62a39842c2')
version('3.13.5', sha256='10fc542dab961c8b17db35ad3a208cb184c237fc84e183817e38e6c7ab4b8732')
version('3.13.4', sha256='8d470cba1ceb9638694550134a2f23aac85ed7249cb74992581210597d978b94')
version('3.13.3', sha256='dc744895ee6b9c4491ff817bef0d3abd680c5e3c25e601be44240ce65ab4f337')
version('3.13.2', sha256='6083422a7c5b8e89e5e4ccf64acade9bf8ab70245e25bca3a3da03caf74602f1')
version('3.13.1', sha256='74a895e44e2ff1146838aaccb7613e7626d99e0eed64ca032c87c72d084efac3')
version('3.13.0', sha256='f0ea543a54145c5d1387e25b121c3fd1b1ca834032c5a33f6f1d929e95bdf0e5')
version('3.12.5', sha256='d676eb67e79314d6cca6422d7c477d2b192c830b89d5edc6b46934f7453bcfc0')
version('3.12.4', sha256='56a941130da93bbacb3cfa74dcacea1e3cd8e36a0341f9ced09977b1457084c3')
version('3.12.3', sha256='91f77d7b0f54056f085b9e27938922db3d9bb1734a2e2a6d26f43d3e6c0cf631')
version('3.12.2', sha256='d874b2e198c4cb73551c2eca1d2c5d27da710be4d00517adb8f9eb3d6d0375e8')
version('3.12.1', sha256='b72d895d0f4a79acb13ebc782b47b26d10d4e5706d399f533afcd5b3dba13737')
version('3.12.0', sha256='ba9ecf69783c7ebf05bd1c91dd1d4b38bf09b7a2d5f9a774aa6bb46deff7cb14')
version('3.11.4', sha256='319cb5a875a692a67fe5b1b90009ba8f182e21921ae645d38106544aff20c3c1')
version('3.11.3', sha256='199ad9650a9f58603b49e7fff7cd003ceb03aa231e5d37d0bf0496c6348eca81')
version('3.11.2', sha256='4d244dd7d1565d6534e776445fcf6977a6ee2a8bb2be4a36ac1e0fc1f9ad9cfa')
version('3.11.1', sha256='cb627f99f7ce1540ebbbf338189f89a5f1ecf3ab3b5b0e357f9e46c209f1fb23')
version('3.11.0', sha256='b3bed2a9263193c84138052a1b92d47299c3490dd24d1d0bf79fb884e71e678a')
version('3.10.5', sha256='3a81c8406410e0ffa8a3e9f8efcdf2e683cc40613c9bb5cb378a6498f595803e', deprecated=True)
version('3.10.4', sha256='6c836df84caa9ae683ae401d3f94eb9471353156fec6db602bf2e857e4ec339f', deprecated=True)
version('3.10.3', sha256='cd106babbae091604fee40c258737c84dec048949be779eaef5a745df3dc8de4', deprecated=True)
version('3.10.2', sha256='9d3381bcf9c63abe6521b21a88efc70f8e893293503cff497971d0d9c1ec68cc', deprecated=True)
version('3.10.1', sha256='b6e64ce062113ee0e2e2a6cfffb4d33c085ec91d5bc3afeb33781074aa5a22a5', deprecated=True)
version('3.10.0', sha256='6ebacc010397ea47649495e8363cd7d7d86b876e6df07c6f6ccfa48b22fa555c', deprecated=True)
version('3.9.4', sha256='ecc647c9b1ef565a2c113936454c65632eedc1626e0fc99b5a36accb91195a63', deprecated=True)
version('3.9.3', sha256='6c7f2c7a28433385d74d647b4934aaeea3c1b3053b207973c9497639b6ebf7c8', deprecated=True)
version('3.9.2', sha256='ab396ae5dbfff808df1b5648f5ce30f3021ec70faec3d5cd63df324d416ac6ac', deprecated=True)
version('3.9.1', sha256='742e838a35d278693e956ed1ca4592c1d663451f6beea0694bf334aeb67681e8', deprecated=True)
version('3.9.0', sha256='dcbcab1f321667be1c6e5f8e7b4ee8670bb09e372e51f1ea6471464519d54b2d', deprecated=True)
version('3.8.4', sha256='9f78dc4dd4c58433fa18d3dd3a9029e39a83e4e4b64f845a029dd9fed44bc4c7', deprecated=True)
version('3.8.3', sha256='01f9c3ed937eafac6c9e006510b61c7cd07197115ec40c429fc835f346ca3eac', deprecated=True)
version('3.8.2', sha256='42690508d408e31fb98be738ac097bc869be14c5bfe08dda2184243283ceb16a', deprecated=True)
version('3.8.1', sha256='9b48a9e72d304046923667d2ab1f201778cc56242928a374ff9e074843a334ff', deprecated=True)
version('3.8.0', sha256='1e1b4d90ccbf98dc5759a956ac9a771310a6690f1cbb37b31502b29568262d7e', deprecated=True)
version('3.7.7', sha256='40fd3bc76998e056c4097704c08f28eb89bf3b93164dc9e69abab393f43bf6f0', deprecated=True)
version('3.7.6', sha256='3c8ee051349587d45baa7910c54ce8e0a571592e3b40f3054a7b7f986919d449', deprecated=True)
version('3.7.5', sha256='493ab0b6c1b3fe68e71d990eff87c84f499f680e6d2c0c394e78646a82ed4be3', deprecated=True)
version('3.7.4', sha256='54b804f924ea5be3b6718b4d4e98f8ccb9d1bd6bbbd1e9c0f18c4a90ddf5db18', deprecated=True)
version('3.7.2', sha256='36681dd0df97e0d5cd182d902e89f527eb8f441f05271159dac5340acb4cf0ec', deprecated=True)
version('3.6.4', sha256='eb09925a139b52b4dd5a071b3da4fe2165f1d6e8f71d410479603c9976c940f0', deprecated=True)
version('3.6.3', sha256='776e2644e4003653c56a44a6f7c02c41427af26f7c5cd9bec3aa84ed90223245', deprecated=True)
version('3.5.3', sha256='68e6a42f5ec75bad87f74d4df8f55ad63f0c4d996f162da6713cb3d6f566830d', deprecated=True)
version('3.5.2', sha256='1a8f09af654afab787c732e7b2f5d0c1d856777398148351565389d38d30935e', deprecated=True)
version('3.5.1', sha256='199af205f62dcc572728600670c7d4c8cb0d4efc4172c26f02b895d9dd1df245', deprecated=True)
version('3.4.4', sha256='fa73b99caf70c416a967234f5476cdb1d2c014610ee0619e48f54d8d309631b7', deprecated=True)
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('mpi', default=True, description='Activates MPI support')
variant('double', default=True,
description='Switches between single and double precision')
variant('complex', default=False, description='Build with complex numbers')
variant('debug', default=False, description='Compile in debug mode')
variant('metis', default=True,
description='Activates support for metis and parmetis')
variant('ptscotch', default=False,
description='Activates support for PTScotch (only parallel)')
variant('hdf5', default=True,
description='Activates support for HDF5 (only parallel)')
variant('hypre', default=True,
description='Activates support for Hypre (only parallel)')
variant('hpddm', default=False,
description='Activates support for HPDDM (only parallel)')
variant('mmg', default=False,
description='Activates support for MMG')
variant('parmmg', default=False,
description='Activates support for ParMMG (only parallel)')
variant('tetgen', default=False,
description='Activates support for Tetgen')
# Mumps is disabled by default, because it depends on Scalapack
# which is not portable to all HPC systems
variant('mumps', default=False,
description='Activates support for MUMPS (only parallel)')
variant('superlu-dist', default=True,
description='Activates support for SuperluDist (only parallel)')
variant('strumpack', default=False,
description='Activates support for Strumpack')
variant('scalapack', default=False,
description='Activates support for Scalapack')
variant('trilinos', default=False,
description='Activates support for Trilinos (only parallel)')
variant('mkl-pardiso', default=False,
description='Activates support for MKL Pardiso')
variant('int64', default=False,
description='Compile with 64bit indices')
variant('clanguage', default='C', values=('C', 'C++'),
description='Specify C (recommended) or C++ to compile PETSc',
multi=False)
variant('fftw', default=False,
description='Activates support for FFTW (only parallel)')
variant('suite-sparse', default=False,
description='Activates support for SuiteSparse')
variant('knl', default=False,
description='Build for KNL')
variant('X', default=False,
description='Activate X support')
variant('batch', default=False,
description='Enable when mpiexec is not available to run binaries')
variant('valgrind', default=False,
description='Enable Valgrind Client Request mechanism')
variant('jpeg', default=False,
description='Activates support for JPEG')
variant('libpng', default=False,
description='Activates support for PNG')
variant('giflib', default=False,
description='Activates support for GIF')
variant('mpfr', default=False,
description='Activates support for MPFR')
variant('moab', default=False,
description='Acivates support for MOAB (only parallel)')
variant('random123', default=False,
description='Activates support for Random123')
variant('exodusii', default=False,
description='Activates support for ExodusII (only parallel)')
variant('cgns', default=False,
description='Activates support for CGNS (only parallel)')
variant('memkind', default=False,
description='Activates support for Memkind')
variant('p4est', default=False,
description='Activates support for P4Est (only parallel)')
variant('saws', default=False,
description='Activates support for Saws')
variant('libyaml', default=False,
description='Activates support for YAML')
variant('openmp', default=False,
description='Activates support for openmp')
variant('hwloc', default=False,
description='Activates support for hwloc')
variant('kokkos', default=False,
description='Activates support for kokkos and kokkos-kernels')
# 3.8.0 has a build issue with MKL - so list this conflict explicitly
conflicts('^intel-mkl', when='@3.8.0')
# These require +mpi
mpi_msg = 'Requires +mpi'
conflicts('+cgns', when='~mpi', msg=mpi_msg)
conflicts('+exodusii', when='~mpi', msg=mpi_msg)
conflicts('+fftw', when='~mpi', msg=mpi_msg)
conflicts('+hdf5', when='~mpi', msg=mpi_msg)
conflicts('+hypre', when='~mpi', msg=mpi_msg)
conflicts('+hpddm', when='~mpi', msg=mpi_msg)
conflicts('+parmmg', when='~mpi', msg=mpi_msg)
conflicts('+moab', when='~mpi', msg=mpi_msg)
conflicts('+mumps', when='~mpi', msg=mpi_msg)
conflicts('+p4est', when='~mpi', msg=mpi_msg)
conflicts('+ptscotch', when='~mpi', msg=mpi_msg)
conflicts('+superlu-dist', when='~mpi', msg=mpi_msg)
conflicts('+trilinos', when='~mpi', msg=mpi_msg)
conflicts('+kokkos', when='~mpi', msg=mpi_msg)
conflicts('^openmpi~cuda', when='+cuda') # +cuda requires CUDA enabled OpenMPI
# older versions of petsc did not support mumps when +int64
conflicts('+mumps', when='@:3.12+int64')
filter_compiler_wrappers(
'petscvariables', relative_root='lib/petsc/conf'
)
# temporary workaround Clang 8.1.0 with XCode 8.3 on macOS, see
# https://bitbucket.org/petsc/petsc/commits/4f290403fdd060d09d5cb07345cbfd52670e3cbc
# the patch is an adaptation of the original commit to 3.7.5
patch('macos-clang-8.1.0.diff', when='@3.7.5%apple-clang@8.1.0:')
patch('pkg-config-3.7.6-3.8.4.diff', when='@3.7.6:3.8.4')
patch('xcode_stub_out_of_sync.patch', when='@:3.10.4')
patch('xlf_fix-dup-petscfecreate.patch', when='@3.11.0')
patch('disable-DEPRECATED_ENUM.diff', when='@3.14.1 +cuda')
depends_on('diffutils', type='build')
# Virtual dependencies
# Git repository needs sowing to build Fortran interface
depends_on('sowing', when='@main')
# PETSc, hypre, superlu_dist when built with int64 use 32 bit integers
# with BLAS/LAPACK
depends_on('blas')
depends_on('lapack')
depends_on('mpi', when='+mpi')
depends_on('cuda', when='+cuda')
depends_on('hip', when='+rocm')
depends_on('hipblas', when='+rocm')
depends_on('hipsparse', when='+rocm')
depends_on('rocsparse', when='+rocm')
depends_on('rocsolver', when='+rocm')
depends_on('rocblas', when='+rocm')
depends_on('rocrand', when='+rocm')
depends_on('rocthrust', when='+rocm')
depends_on('rocprim', when='+rocm')
# Build dependencies
depends_on('python@2.6:2.8', type='build', when='@:3.10')
depends_on('python@2.6:2.8,3.4:', type='build', when='@3.11:')
# Other dependencies
depends_on('metis@5:~int64+real64', when='@:3.7+metis~int64+double')
depends_on('metis@5:~int64', when='@:3.7+metis~int64~double')
depends_on('metis@5:+int64+real64', when='@:3.7+metis+int64+double')
depends_on('metis@5:+int64', when='@:3.7+metis+int64~double')
# petsc-3.8+ uses default (float) metis with any (petsc) precision
depends_on('metis@5:~int64', when='@3.8:+metis~int64')
depends_on('metis@5:+int64', when='@3.8:+metis+int64')
# PTScotch: Currently disable Parmetis wrapper, this means
# nested disection won't be available thought PTScotch
depends_on('scotch+esmumps~metis+mpi', when='+ptscotch')
depends_on('scotch+int64', when='+ptscotch+int64')
depends_on('hdf5@:1.10+mpi', when='@:3.12+hdf5+mpi')
depends_on('hdf5+mpi', when='@3.13:+hdf5+mpi')
depends_on('hdf5+mpi', when='+exodusii+mpi')
depends_on('hdf5+mpi', when='+cgns+mpi')
depends_on('zlib', when='+hdf5')
depends_on('zlib', when='+libpng')
depends_on('zlib', when='+p4est')
depends_on('parmetis+int64', when='+metis+mpi+int64')
depends_on('parmetis~int64', when='+metis+mpi~int64')
depends_on('valgrind', when='+valgrind')
depends_on('mmg', when='+mmg')
depends_on('parmmg', when='+parmmg')
depends_on('tetgen+pic', when='+tetgen')
# Hypre does not support complex numbers.
# Also PETSc prefer to build it without internal superlu, likely due to
# conflict in headers see
# https://bitbucket.org/petsc/petsc/src/90564b43f6b05485163c147b464b5d6d28cde3ef/config/BuildSystem/config/packages/hypre.py
depends_on('hypre@:2.13+mpi~internal-superlu~int64', when='@:3.8+hypre+mpi~complex~int64')
depends_on('hypre@:2.13+mpi~internal-superlu+int64', when='@:3.8+hypre+mpi~complex+int64')
depends_on('hypre@2.14:2.18.2+mpi~internal-superlu~int64', when='@3.9:3.13+hypre+mpi~complex~int64')
depends_on('hypre@2.14:2.18.2+mpi~internal-superlu+int64', when='@3.9:3.13+hypre+mpi~complex+int64')
depends_on('hypre@2.14:2.22.0+mpi~internal-superlu~int64', when='@3.14:3.15+hypre+mpi~complex~int64')
depends_on('hypre@2.14:2.22.0+mpi~internal-superlu+int64', when='@3.14:3.15+hypre+mpi~complex+int64')
depends_on('hypre@2.14:+mpi~internal-superlu~int64', when='@3.16:+hypre+mpi~complex~int64')
depends_on('hypre@2.14:+mpi~internal-superlu+int64', when='@3.16:+hypre+mpi~complex+int64')
depends_on('hypre@develop+mpi~internal-superlu+int64', when='@main+hypre+mpi~complex+int64')
depends_on('hypre@develop+mpi~internal-superlu~int64', when='@main+hypre+mpi~complex~int64')
depends_on('superlu-dist@:4.3~int64', when='@3.4.4:3.6.4+superlu-dist+mpi~int64')
depends_on('superlu-dist@:4.3+int64', when='@3.4.4:3.6.4+superlu-dist+mpi+int64')
depends_on('superlu-dist@5.0.0:5.1.3~int64', when='@3.7.0:3.7+superlu-dist+mpi~int64')
depends_on('superlu-dist@5.0.0:5.1.3+int64', when='@3.7.0:3.7+superlu-dist+mpi+int64')
depends_on('superlu-dist@5.2.0:5.2~int64', when='@3.8:3.9+superlu-dist+mpi~int64')
depends_on('superlu-dist@5.2.0:5.2+int64', when='@3.8:3.9+superlu-dist+mpi+int64')
depends_on('superlu-dist@5.4.0:5.4~int64', when='@3.10:3.10.2+superlu-dist+mpi~int64')
depends_on('superlu-dist@5.4.0:5.4+int64', when='@3.10:3.10.2+superlu-dist+mpi+int64')
depends_on('superlu-dist@6.1.0:6.1~int64', when='@3.10.3:3.12+superlu-dist+mpi~int64')
depends_on('superlu-dist@6.1.0:6.1+int64', when='@3.10.3:3.12+superlu-dist+mpi+int64')
depends_on('superlu-dist@6.1:~int64', when='@3.13.0:+superlu-dist+mpi~int64')
depends_on('superlu-dist@6.1:+int64', when='@3.13.0:+superlu-dist+mpi+int64')
depends_on('superlu-dist@develop~int64', when='@main+superlu-dist+mpi~int64')
depends_on('superlu-dist@develop+int64', when='@main+superlu-dist+mpi+int64')
depends_on('strumpack', when='+strumpack')
depends_on('scalapack', when='+strumpack')
depends_on('metis', when='+strumpack')
depends_on('scalapack', when='+scalapack')
depends_on('mumps+mpi~int64~metis~parmetis~openmp', when='+mumps~metis~openmp')
depends_on('mumps+mpi~int64+metis+parmetis~openmp', when='+mumps+metis~openmp')
depends_on('mumps+mpi~int64~metis~parmetis+openmp', when='+mumps~metis+openmp')
depends_on('mumps+mpi~int64+metis+parmetis+openmp', when='+mumps+metis+openmp')
depends_on('scalapack', when='+mumps')
depends_on('trilinos@12.6.2:+mpi', when='@3.7.0:+trilinos+mpi')
depends_on('trilinos@develop+mpi', when='@main+trilinos+mpi')
depends_on('mkl', when='+mkl-pardiso')
depends_on('fftw+mpi', when='+fftw+mpi')
depends_on('suite-sparse', when='+suite-sparse')
depends_on('libx11', when='+X')
depends_on('mpfr', when='+mpfr')
depends_on('gmp', when='+mpfr')
depends_on('jpeg', when='+jpeg')
depends_on('libpng', when='+libpng')
depends_on('giflib', when='+giflib')
depends_on('exodusii+mpi', when='+exodusii+mpi')
depends_on('netcdf-c+mpi', when='+exodusii+mpi')
depends_on('parallel-netcdf', when='+exodusii+mpi')
depends_on('random123', when='+random123')
depends_on('moab+mpi', when='+moab+mpi')
depends_on('cgns+mpi', when='+cgns+mpi')
depends_on('memkind', when='+memkind')
depends_on('p4est+mpi', when='+p4est+mpi')
depends_on('saws', when='+saws')
depends_on('libyaml', when='+libyaml')
depends_on('hwloc', when='+hwloc')
depends_on('kokkos', when='+kokkos')
depends_on('kokkos-kernels', when='+kokkos')
depends_on('kokkos+cuda+wrapper+cuda_lambda', when='+kokkos +cuda')
depends_on('kokkos-kernels+cuda', when='+kokkos +cuda')
depends_on('kokkos+rocm', when='+kokkos +rocm')
# Using the following tarballs
# * petsc-3.12 (and older) - includes docs
# * petsc-lite-3.13, petsc-lite-3.14 (without docs)
# * petsc-3.15 and newer (without docs)
def url_for_version(self, version):
if self.spec.satisfies('@3.13.0:3.14.6'):
return "http://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-lite-{0}.tar.gz".format(version)
else:
return "http://ftp.mcs.anl.gov/pub/petsc/release-snapshots/petsc-{0}.tar.gz".format(version)
def mpi_dependent_options(self):
if '~mpi' in self.spec:
compiler_opts = [
'--with-cc=%s' % os.environ['CC'],
'--with-cxx=%s' % (os.environ['CXX']
if self.compiler.cxx is not None else '0'),
'--with-fc=%s' % (os.environ['FC']
if self.compiler.fc is not None else '0'),
'--with-mpi=0'
]
else:
compiler_opts = [
'--with-cc=%s' % self.spec['mpi'].mpicc,
'--with-cxx=%s' % self.spec['mpi'].mpicxx,
'--with-fc=%s' % self.spec['mpi'].mpifc,
]
if self.spec.satisfies('%intel'):
# mpiifort needs some help to automatically link
# all necessary run-time libraries
compiler_opts.append('--FC_LINKER_FLAGS=-lintlc')
return compiler_opts
def install(self, spec, prefix):
options = ['--with-ssl=0',
'--download-c2html=0',
'--download-sowing=0',
'--download-hwloc=0',
'CFLAGS=%s' % ' '.join(spec.compiler_flags['cflags']),
'FFLAGS=%s' % ' '.join(spec.compiler_flags['fflags']),
'CXXFLAGS=%s' % ' '.join(spec.compiler_flags['cxxflags'])]
options.extend(self.mpi_dependent_options())
options.extend([
'--with-precision=%s' % (
'double' if '+double' in spec else 'single'),
'--with-scalar-type=%s' % (
'complex' if '+complex' in spec else 'real'),
'--with-shared-libraries=%s' % ('1' if '+shared' in spec else '0'),
'--with-debugging=%s' % ('1' if '+debug' in spec else '0'),
'--with-openmp=%s' % ('1' if '+openmp' in spec else '0'),
'--with-64-bit-indices=%s' % ('1' if '+int64' in spec else '0')
])
if '+debug' not in spec:
options.extend(['COPTFLAGS=',
'FOPTFLAGS=',
'CXXOPTFLAGS='])
# Make sure we use exactly the same Blas/Lapack libraries
# across the DAG. To that end list them explicitly
lapack_blas = spec['lapack'].libs + spec['blas'].libs
options.extend([
'--with-blas-lapack-lib=%s' % lapack_blas.joined()
])
if '+batch' in spec:
options.append('--with-batch=1')
if '+knl' in spec:
options.append('--with-avx-512-kernels')
options.append('--with-memalign=64')
if '+X' in spec:
options.append('--with-x=1')
else:
options.append('--with-x=0')
if 'trilinos' in spec:
if spec.satisfies('^trilinos+boost'):
options.append('--with-boost=1')
if self.spec.satisfies('clanguage=C++'):
options.append('--with-clanguage=C++')
else:
options.append('--with-clanguage=C')
# Activates library support if needed (i.e. direct dependency)
jpeg_sp = spec['jpeg'].name if 'jpeg' in spec else 'jpeg'
scalapack_sp = spec['scalapack'].name if 'scalapack' in spec else 'scalapack'
# tuple format (spacklibname, petsclibname, useinc, uselib)
# default: 'gmp', => ('gmp', 'gmp', True, True)
# any other combination needs a full tuple
# if not (useinc || uselib): usedir - i.e (False, False)
for library in (
('cuda', 'cuda', False, False),
('hip', 'hip', True, False),
'metis',
'hypre',
'parmetis',
('kokkos', 'kokkos', False, False),
('kokkos-kernels', 'kokkos-kernels', False, False),
('superlu-dist', 'superlu_dist', True, True),
('scotch', 'ptscotch', True, True),
('suite-sparse:umfpack,klu,cholmod,btf,ccolamd,colamd,camd,amd, \
suitesparseconfig', 'suitesparse', True, True),
('hdf5:hl,fortran', 'hdf5', True, True),
'zlib',
'mumps',
('trilinos', 'trilinos', False, False),
('fftw:mpi', 'fftw', True, True),
('valgrind', 'valgrind', False, False),
'gmp',
'libpng',
('giflib', 'giflib', False, False),
'mpfr',
('netcdf-c', 'netcdf', True, True),
('parallel-netcdf', 'pnetcdf', True, True),
('moab', 'moab', False, False),
('random123', 'random123', False, False),
'exodusii',
'cgns',
'memkind',
'p4est',
('saws', 'saws', False, False),
('libyaml', 'yaml', True, True),
'hwloc',
(jpeg_sp, 'libjpeg', True, True),
(scalapack_sp, 'scalapack', False, True),
'strumpack',
'mmg',
'parmmg',
('tetgen', 'tetgen', False, False),
):
# Cannot check `library in spec` because of transitive deps
# Cannot check variants because parmetis keys on +metis
if isinstance(library, tuple):
spacklibname, petsclibname, useinc, uselib = library
else:
spacklibname = library
petsclibname = library
useinc = True
uselib = True
library_requested = spacklibname.split(':')[0] in spec.dependencies_dict()
options.append(
'--with-{library}={value}'.format(
library=petsclibname,
value=('1' if library_requested else '0'))
)
if library_requested:
if useinc or uselib:
if useinc:
options.append(
'--with-{library}-include={value}'.format(
library=petsclibname,
value=spec[spacklibname].prefix.include)
)
if uselib:
options.append(
'--with-{library}-lib={value}'.format(
library=petsclibname,
value=spec[spacklibname].libs.joined())
)
else:
options.append(
'--with-{library}-dir={path}'.format(
library=petsclibname, path=spec[spacklibname].prefix)
)
if '+cuda' in spec:
if not spec.satisfies('cuda_arch=none'):
cuda_arch = spec.variants['cuda_arch'].value
if spec.satisfies('@3.14:'):
options.append('--with-cuda-gencodearch={0}'.format(cuda_arch[0]))
else:
options.append('CUDAFLAGS=-gencode arch=compute_{0},code=sm_{0}'
.format(cuda_arch[0]))
if '+rocm' in spec:
if not spec.satisfies('amdgpu_target=none'):
hip_arch = spec.variants['amdgpu_target'].value
options.append('--with-hip-arch={0}'.format(hip_arch[0]))
hip_pkgs = ['hipsparse', 'hipblas', 'rocsparse', 'rocsolver', 'rocblas']
hip_ipkgs = hip_pkgs + ['rocthrust', 'rocprim']
hip_lpkgs = hip_pkgs + ['rocrand']
hip_inc = ''
hip_lib = ''
for pkg in hip_ipkgs:
hip_inc += spec[pkg].headers.include_flags + ' '
for pkg in hip_lpkgs:
hip_lib += spec[pkg].libs.joined() + ' '
options.append('HIPPPFLAGS=%s' % hip_inc)
options.append('with-hip-lib=%s -L%s -lamdhip64' %
(hip_lib, spec['hip'].prefix.lib))
if 'superlu-dist' in spec:
if spec.satisfies('@3.10.3:3.15'):
options.append('--with-cxx-dialect=C++11')
if '+mkl-pardiso' in spec:
options.append(
'--with-mkl_pardiso-dir=%s' % spec['mkl'].prefix
)
# For the moment, HPDDM does not work as a dependency
# using download instead
if '+hpddm' in spec:
options.append('--download-hpddm')
# revert changes by kokkos-nvcc-wrapper
if spec.satisfies('^kokkos+cuda+wrapper'):
env['MPICH_CXX'] = env['CXX']
env['OMPI_CXX'] = env['CXX']
env['MPICXX_CXX'] = env['CXX']
python('configure', '--prefix=%s' % prefix, *options)
# PETSc has its own way of doing parallel make.
make('V=1 MAKE_NP=%s' % make_jobs, parallel=False)
make("install")
if self.run_tests:
make('check PETSC_ARCH="" PETSC_DIR={0}'.format(self.prefix),
parallel=False)
def setup_build_environment(self, env):
# configure fails if these env vars are set outside of Spack
env.unset('PETSC_DIR')
env.unset('PETSC_ARCH')
def setup_run_environment(self, env):
# Set PETSC_DIR in the module file
env.set('PETSC_DIR', self.prefix)
env.unset('PETSC_ARCH')
def setup_dependent_build_environment(self, env, dependent_spec):
# Set up PETSC_DIR for everyone using PETSc package
env.set('PETSC_DIR', self.prefix)
env.unset('PETSC_ARCH')
@property
def archive_files(self):
return [join_path(self.stage.source_path, 'configure.log'),
join_path(self.stage.source_path, 'make.log')]
@property
def headers(self):
return find_headers('petsc', self.prefix.include, recursive=False) \
or None # return None to indicate failure
# For the 'libs' property - use the default handler.
@run_after('install')
def setup_build_tests(self):
"""Copy the build test files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources('src/ksp/ksp/tutorials')
self.cache_extra_test_sources('src/snes/tutorials')
def test(self):
# solve Poisson equation in 2D to make sure nothing is broken:
spec = self.spec
env['PETSC_DIR'] = self.prefix
env['PETSC_ARCH'] = ''
if ('+mpi' in spec):
runexe = Executable(join_path(spec['mpi'].prefix.bin,
'mpiexec')).command
runopt = ['-n', '4']
else:
runexe = Executable(join_path(self.prefix,
'lib/petsc/bin/petsc-mpiexec.uni')).command
runopt = ['-n', '1']
w_dir = join_path(self.install_test_root, 'src/ksp/ksp/tutorials')
with working_dir(w_dir):
testexe = ['ex50', '-da_grid_x', '4', '-da_grid_y', '4']
testdict = {
None: [],
'+superlu-dist':
['-pc_type', 'lu', '-pc_factor_mat_solver_type', 'superlu_dist'],
'+mumps':
['-pc_type', 'lu', '-pc_factor_mat_solver_type', 'mumps'],
'+hypre':
['-pc_type', 'hypre', '-pc_hypre_type', 'boomeramg'],
'+mkl-pardiso':
['-pc_type', 'lu', '-pc_factor_mat_solver_type', 'mkl_pardiso'],
}
make('ex50', parallel=False)
for feature, featureopt in testdict.items():
if not feature or feature in spec:
self.run_test(runexe, runopt + testexe + featureopt)
if '+cuda' in spec:
make('ex7', parallel=False)
testexe = ['ex7', '-mat_type', 'aijcusparse',
'-sub_pc_factor_mat_solver_type', 'cusparse',
'-sub_ksp_type', 'preonly', '-sub_pc_type', 'ilu',
'-use_gpu_aware_mpi', '0']
self.run_test(runexe, runopt + testexe)
make('clean', parallel=False)
w_dir = join_path(self.install_test_root, 'src/snes/tutorials')
with working_dir(w_dir):
if '+kokkos' in spec:
make('ex3k', parallel=False)
testexe = ['ex3k', '-view_initial', '-dm_vec_type', 'kokkos',
'-dm_mat_type', 'aijkokkos', '-use_gpu_aware_mpi', '0',
'-snes_monitor']
self.run_test(runexe, runopt + testexe)
make('clean', parallel=False)
|
LLNL/spack
|
var/spack/repos/builtin/packages/petsc/package.py
|
Python
|
lgpl-2.1
| 33,000
|
[
"NetCDF"
] |
e96e77d65af8a7d87cc7c70656a2c9267199b3d8091dd5960f2d522ed3564e0c
|
#!/usr/bin/env python
from math import sqrt
import gtk
from ase.gui.languages import translate as _
from ase.gui.widgets import pack, Help
class DFT(gtk.Window):
def __init__(self, gui):
gtk.Window.__init__(self)
self.set_title(_('DFT'))
vbox = gtk.VBox()
combo = gtk.combo_box_new_text()
self.xcfuncs = 'None LDA PBE revPBE RPBE PW91 EXX PBE0'.split()
for xc in self.xcfuncs:
combo.append_text(xc)
pack(vbox, [gtk.Label(_('XC-functional: ')), combo])
button=radio(None,monkhorstpack)
button=radio(button, special)
pack(vbox, gtk.Label(_('Repeat atoms:')))
self.kpts = [gtk.Adjustment(r, 1, 99, 1) for r in gui.atoms.repeat]
pack(vbox, [gtk.SpinButton(r, 0, 0) for r in self.repeat])
for r in self.repeat:
r.connect('value-changed', self.change)
close = pack(vbox, gtk.Button(_('Close')))
close.connect('clicked', lambda widget: self.destroy())
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
xc = gui.atoms.dft.get('xc', 'None')
combo.set_active(self.xcfuncs.index(xc))
def selected(self, button):
self.gui.atoms.dynamic = ~self.gui.atoms.selected
self.gui.draw()
def immobile(self, button):
self.gui.atoms.set_dynamic()
self.gui.draw()
def clear(self, button):
self.gui.atoms.dynamic[:] = True
self.gui.draw()
|
freephys/python_ase
|
ase/gui/dft.py
|
Python
|
gpl-3.0
| 1,485
|
[
"ASE"
] |
f1fd52205d00a9b588d285bb3f7d48338fc3a50621959c37095d5138ec3546e3
|
# -*- coding: utf-8 -*-
#
# test_3d_exp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module
EXPERIMENTAL example of 3d layer.
3d layers are currently not supported, use at your own risk!
Hans Ekkehard Plesser, UMB
This example uses the function GetChildren, which is deprecated. A deprecation
warning is therefore issued. For details about deprecated functions, see
documentation.
'''
import nest
import pylab
import random
import nest.topology as topo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
pylab.ion()
nest.ResetKernel()
# generate list of 1000 (x,y,z) triplets
pos = [[random.uniform(-0.5, 0.5), random.uniform(-0.5, 0.5),
random.uniform(-0.5, 0.5)]
for j in range(1000)]
l1 = topo.CreateLayer(
{'extent': [1.5, 1.5, 1.5], # must specify 3d extent AND center
'center': [0., 0., 0.],
'positions': pos,
'elements': 'iaf_psc_alpha'})
# visualize
# xext, yext = nest.GetStatus(l1, 'topology')[0]['extent']
# xctr, yctr = nest.GetStatus(l1, 'topology')[0]['center']
# l1_children is a work-around until NEST 3.0 is released
l1_children = nest.GetChildren(l1)[0]
# extract position information, transpose to list of x, y and z positions
xpos, ypos, zpos = zip(*topo.GetPosition(l1_children))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xpos, ypos, zpos, s=15, facecolor='b', edgecolor='none')
# Gaussian connections in full volume [-0.75,0.75]**3
topo.ConnectLayers(l1, l1,
{'connection_type': 'divergent', 'allow_autapses': False,
'mask': {'volume': {'lower_left': [-0.75, -0.75, -0.75],
'upper_right': [0.75, 0.75, 0.75]}},
'kernel': {'exponential':
{'c': 0., 'a': 1., 'tau': 0.25}}})
# show connections from center element
# sender shown in red, targets in green
ctr = topo.FindCenterElement(l1)
xtgt, ytgt, ztgt = zip(*topo.GetTargetPositions(ctr, l1)[0])
xctr, yctr, zctr = topo.GetPosition(ctr)[0]
ax.scatter([xctr], [yctr], [zctr], s=40, facecolor='r', edgecolor='none')
ax.scatter(xtgt, ytgt, ztgt, s=40, facecolor='g', edgecolor='g')
tgts = topo.GetTargetNodes(ctr, l1)[0]
d = topo.Distance(ctr, tgts)
plt.figure()
plt.hist(d, 25)
# plt.show()
|
tillschumann/nest-simulator
|
topology/examples/test_3d_exp.py
|
Python
|
gpl-2.0
| 2,956
|
[
"Gaussian"
] |
5d2f7504bdbd43d8721f6bdd3d90ddd11ebb5b885801453831819e244c6f27a8
|
import argparse
import sys
import datetime
import os
from collections import OrderedDict
import json
import base64
import logging
from functools import partial
from itertools import chain
import pybedtools
import pysam
import vcf
import fasta_utils
mydir = os.path.dirname(os.path.realpath(__file__))
VCF_TEMPLATE = os.path.join(mydir, "resources/template.vcf")
def check_duplicates(interval1, interval2, max_dist=10):
if interval1.chrom != interval2.chrom or \
abs(interval1.start - interval2.start) > max_dist or \
abs(interval1.end - interval2.end) > max_dist or \
interval1.fields[3].split(",")[1] != \
interval2.fields[3].split(",")[1]:
return None
info1 = json.loads(base64.b64decode(interval1.fields[3].split(",")[0]))
info2 = json.loads(base64.b64decode(interval2.fields[3].split(",")[0]))
svmethods = sorted(list(set(info1["SVMETHOD"] + info2["SVMETHOD"])))
sources = []
if "SOURCES" in info1:
sources.append(info1["SOURCES"])
if "SOURCES" in info2:
sources.append(info2["SOURCES"])
sources = ",".join(sources)
if sources:
info1["SOURCES"] = sources
if "PASS" in [interval1.fields[7], interval2.fields[7]] or (
"AS" not in svmethods and len(set(svmethods) - {"SC", "AS"}) > 1):
sv_filter = "PASS"
else:
sv_filter = "LowQual"
end = max(interval1.end, interval2.end)
start = min(interval1.start, interval2.start)
info1.update(
{"END": end, "SVMETHOD": svmethods, "NUM_SVMETHODS": len(svmethods)})
return pybedtools.Interval(interval1.chrom, start, end,
name="%s,%s,%d,%s" % (
base64.b64encode(json.dumps(info1)),
info1["SVTYPE"], end - start,
";".join(svmethods)),
score=interval1.score,
otherfields=[interval1.fields[6], sv_filter])
def get_interval_info(feature, pass_calls):
func_logger = logging.getLogger("%s" % get_interval_info.__name__)
pos = feature.start
end = feature.end
genotype = "./." if len(feature.fields) < 12 else feature.fields[11]
if genotype == "0/0":
func_logger.info("Skipping homozygous reference %s" % str(feature))
return None
sub_names = feature.name.split(":")
sub_lengths = map(lambda x: int(x.split(",")[2]), sub_names)
sub_types = map(lambda x: x.split(",")[1], sub_names)
sub_methods = [name.split(",")[3] for name in sub_names]
svmethods = (";".join([name.split(",")[3] for name in sub_names])).split(
";")
try:
info = json.loads(base64.b64decode(name.split(",")[0]))
except TypeError:
info = dict()
if len(feature.fields) > 10:
info.update(json.loads(base64.b64decode(feature.fields[10])))
index_to_use = 0
is_pass = False
svlen = -1
sv_type = None
for sub_type in ["DEL", "INV", "DUP", "ITX", "CTX", "INS"]:
if sub_type in sub_types:
index_to_use = sub_types.index(sub_type)
sv_type = sub_type
break
if not sv_type:
func_logger.info("Unknown SV type! %s" % str(feature))
return None
if sv_type != "INS":
svmethods_s = set(svmethods) - {"SC", "AS"}
is_pass = len(svmethods_s) > 1
if "AS" in svmethods:
pos, end, svlen = map(int, feature.fields[6:9])
is_pass = svlen >= 100
if sv_type in ["ITX", "CTX"]:
end = info["END"]
if svlen < 0: svlen = sub_lengths[index_to_use]
if sv_type == "DEL":
svlen = -svlen
else:
if "SC" in svmethods or "AS" in svmethods:
# TODO: I think it should be sub_types.index
# index_to_use = [i for i,methods in enumerate(sub_methods) if ("SC" in methods) or ("AS" in svmethods)][0]
pos, end, svlen = map(int, feature.fields[6:9])
if svlen < 0: svlen = sub_lengths[index_to_use]
if pass_calls and end != pos + 1:
return None
end = pos
is_pass = (int(feature.fields[8]) != -1) and (
svlen == 0 or svlen >= 100) and (
("SC" in svmethods or "AS" in svmethods) or (
len(set(svmethods) - {"SC", "AS"}) > 1))
if pos < 1:
func_logger.info(
"Variant with pos < 1 encountered. Skipping! %s" % str(feature))
return None
info.update(
{"END": end, "SVLEN": svlen, "SVTYPE": sv_type, "SVMETHOD": svmethods,
"NUM_SVMETHODS": len(svmethods)})
if "IMPRECISE" in info:
info["IMPRECISE"] = True
sv_filter = "PASS" if is_pass else "LowQual"
interval_info = {"pos": pos, "end": end, "info": info, "sv_type": sv_type,
"genotype": genotype,
"sv_length": abs(svlen), "svmethods": svmethods,
"sv_filter": sv_filter}
return interval_info
def filter_confused_INS_calls(nonfilterd_bed, filterd_bed, wiggle=20):
nonins_intervals = []
bedtool = pybedtools.BedTool(nonfilterd_bed)
bedtool_INS = bedtool.filter(lambda x: "INS" in x.name.split(",")[1]).sort()
bedtool_others = bedtool.filter(
lambda x: "INS" not in x.name.split(",")[1]).sort()
bedtool_good_nonINS = bedtool_others.filter(lambda x: ("DEL" in
x.name.split(",")[
1] or "INV" in
x.name.split(",")[
1]) and x.fields[
7] != "LowQual").saveas()
nonINS_bp_intervals = []
for interval in bedtool_good_nonINS:
start = interval.start
end = interval.end
nonINS_bp_intervals.append(
pybedtools.Interval(interval.chrom, max(start - wiggle, 0),
start + wiggle))
nonINS_bp_intervals.append(
pybedtools.Interval(interval.chrom, max(end - wiggle, 0),
end + wiggle))
bedtool_bp_nonINS = pybedtools.BedTool(nonINS_bp_intervals)
bad_INS = bedtool_INS.window(bedtool_bp_nonINS, w=wiggle)
bedtool_filtered = bedtool_INS.window(bad_INS, w=wiggle, v=True).saveas()
if len(bedtool_filtered) == 0:
bedtool_others.saveas(filterd_bed)
elif len(bedtool_others) == 0:
bedtool_filtered.saveas(filterd_bed)
else:
bedtool_filtered = bedtool_filtered.cat(bedtool_others,
postmerge=False).sort().saveas(
filterd_bed)
return filterd_bed
def find_idp(feature, wiggle):
n = len(feature.fields) / 2
if feature.chrom != feature.fields[n]:
return None
start_dup = feature.start
end_dup = feature.end
start_del = int(feature.fields[n + 1])
end_del = int(feature.fields[n + 2])
if abs(start_del - end_del) > (abs(start_dup - end_dup) - wiggle):
return None
dist_ends = [abs(start_del - start_dup), abs(end_del - end_dup)]
if min(dist_ends) > wiggle:
return None
del_pos = start_del if dist_ends[0] > dist_ends[1] else end_del
name = "%s,%s" % (feature.name, feature.fields[n + 3])
score = "%s,%s" % (feature.score, feature.fields[n + 4])
return pybedtools.Interval(feature.chrom, feature.start, feature.end,
name=name, score=score,
otherfields=["%d" % del_pos,
"%d-%d" % (start_del, end_del)])
def find_itx(feature, wiggle):
n = len(feature.fields) / 2
start_idp1 = feature.start
end_idp1 = feature.end
start_idp2 = int(feature.fields[n + 1])
end_idp2 = int(feature.fields[n + 2])
dist_ends = [abs(start_idp1 - start_idp2), abs(end_idp1 - end_idp2)]
if min(dist_ends) > wiggle:
return None
del_pos1 = int(feature.fields[6])
del_pos2 = int(feature.fields[n + 6])
if abs(del_pos1 - del_pos2) > wiggle:
return None
del_interval1 = map(int, feature.fields[7].split("-"))
del_interval2 = map(int, feature.fields[n + 7].split("-"))
lr_1 = 1 if abs(del_pos1 - del_interval1[0]) < abs(
del_pos1 - del_interval1[1]) else 0
lr_2 = 1 if abs(del_pos2 - del_interval2[0]) < abs(
del_pos2 - del_interval2[1]) else 0
if lr_1 == lr_2 or lr_2 < lr_1:
return None
del_id_2 = feature.name.split(",")[-1]
del_filter_2 = feature.score.split(",")[-1]
name = "%s,%s" % (feature.name, del_id_2)
score = "%s,%s" % (feature.score, del_filter_2)
return pybedtools.Interval(feature.chrom, feature.start, feature.end,
name=name, score=score,
otherfields=["%d" % ((del_pos1 + del_pos2) / 2),
"%d-%d,%d-%d" % (
del_interval1[0], del_interval1[1],
del_interval2[0],
del_interval2[1])])
def build_chr2_ins(feature, thr_top=0.15):
sc_chr2_str = feature.fields[6]
if sc_chr2_str == ".":
return []
sub_str = map(lambda x: [x.split(";")[0], map(int, x.split(";")[1:])],
sc_chr2_str.split(","))
chr2_dict = {}
for chr2, poses in sub_str:
if chr2 not in chr2_dict:
chr2_dict[chr2] = []
chr2_dict[chr2].append(poses)
chr2_dict = {k: [sum(map(lambda x: x[0], v)), min(map(lambda x: x[1], v)),
max(map(lambda x: x[2], v))] for k, v in
chr2_dict.iteritems()}
sorted_chr2 = sorted(chr2_dict.items(), key=lambda x: x[1][0], reverse=True)
n_reads = sum(map(lambda x: x[1][0], sorted_chr2))
top_chr2s = filter(
lambda x: x[1][0] > (thr_top * n_reads) and x[0] not in ["-1",
feature.chrom],
sorted_chr2)
if not top_chr2s:
return []
ctx_intervals = []
for chr2, [cnt, start, end] in top_chr2s:
ctx_intervals.append(pybedtools.Interval(chr2, start, end,
name=feature.name,
score=feature.score))
return ctx_intervals
def find_ctx(feature, overlap_ratio=0.9):
n = len(feature.fields) / 2
start_del_ins = int(feature.fields[n + 1])
end_del_ins = int(feature.fields[n + 2])
name = "%s,%s" % (feature.name, feature.fields[n + 3])
score = "%s,%s" % (feature.score, feature.fields[n + 4])
return pybedtools.Interval(feature.chrom, feature.start, feature.end,
name=name, score=score,
otherfields=[".", "%d-%d" % (
start_del_ins, end_del_ins)])
def extract_del_interval(feature):
start, end = map(int, feature.fields[7].split("-"))
return pybedtools.Interval(feature.chrom, start, end)
def filter_itxs(feature):
n = len(feature.fields) / 2
del_interval_idp = map(int, feature.fields[7].split("-"))
del_interval_itx_1 = map(int,
feature.fields[n + 7].split(",")[0].split("-"))
del_interval_itx_2 = map(int,
feature.fields[n + 7].split(",")[1].split("-"))
if filter(lambda x: abs(x[0] - del_interval_idp[0]) + abs(
x[1] - del_interval_idp[1]) == 0,
[del_interval_itx_1, del_interval_itx_2]) and "LowQual" not in \
feature.fields[n + 4]:
return None
return pybedtools.Interval(feature.chrom, feature.start, feature.end,
name=feature.name,
score=feature.score,
otherfields=feature.fields[6:n])
def merge_idp_itx(fasta_file, record_dup, records_del, del_pos, del_interval,
score, svtype):
info = {}
info.update(record_dup.INFO)
start = int(record_dup.POS)
end = info["END"]
dup_interval = "%s-%d-%d" % (record_dup.CHROM, start, end)
if svtype == "IDP":
del_interval_ends = map(int, del_interval.split("-"))
if abs(del_pos - del_interval_ends[0]) < abs(
del_pos - del_interval_ends[1]):
pos = start
info["END"] = max(del_pos, pos)
info["POS2"] = end
else:
pos = del_pos
info["END"] = max(end, pos)
info["POS2"] = start
elif svtype == "ITX":
pos = start
info["END"] = max(del_pos, pos)
info["POS2"] = end
info["CHR2"] = record_dup.CHROM
info["SVLEN"] = max(info["END"] - pos, 0)
info["%s_INTERVALS" % svtype] = "DUP-%s," % dup_interval + ",".join(
map(lambda x: "DEL-%s-%s" % (record_dup.CHROM, x),
del_interval.split(",")))
info["SVTYPE"] = svtype
info["SVMETHOD"] = list(
set(reduce(lambda y, z: y + z, map(lambda x: x.INFO["SVMETHOD"]
, [record_dup] + records_del))))
info["SOURCES"] = ",".join(
map(lambda x: x.INFO["SOURCES"], [record_dup] + records_del))
info["NUM_SVMETHODS"] = len(info["SVMETHOD"])
info["NUM_SVTOOLS"] = len(
set(map(lambda x: x.split('-')[-1], info["SOURCES"].split(','))))
sv_id = "."
ref = fasta_file.fetch(record_dup.CHROM, pos,
pos + 1) if fasta_file else "."
alt = [vcf.model._SV(svtype)]
qual = "."
sv_filter = ["PASS"] if "LowQual" not in score else ["LowQual"]
sv_format = "GT"
sample_indexes = [0]
vcf_record = vcf.model._Record(record_dup.CHROM, pos, sv_id, ref, alt, qual,
sv_filter, info, sv_format, sample_indexes)
vcf_record.samples = record_dup.samples
return vcf_record
def merge_ctx(fasta_file, record_del, record_ins, score):
info = {}
info.update(record_del.INFO)
start = int(record_del.POS)
end = info["END"]
del_interval = "%s-%d-%d" % (record_del.CHROM, start, end)
ins_interval = "%s-%d-%d" % (
record_ins.CHROM, record_ins.POS, record_ins.POS)
pos = start
info["POS2"] = record_ins.POS
info["CHR2"] = record_ins.CHROM
info["CTX_INTERVALS"] = "DEL-%s,INS-%s" % (del_interval, ins_interval)
info["SVTYPE"] = "CTX"
info["SVMETHOD"] = list(
set(reduce(lambda y, z: y + z, map(lambda x: x.INFO["SVMETHOD"]
, [record_del, record_ins]))))
info["SOURCES"] = ",".join(
map(lambda x: x.INFO["SOURCES"], [record_del, record_ins]))
info["NUM_SVMETHODS"] = len(info["SVMETHOD"])
info["NUM_SVTOOLS"] = len(
set(map(lambda x: x.split('-')[-1], info["SOURCES"].split(','))))
sv_id = "."
ref = fasta_file.fetch(record_del.CHROM, pos,
pos + 1) if fasta_file else "."
alt = [vcf.model._SV("CTX")]
qual = "."
sv_filter = ["PASS"] if "LowQual" not in score else ["LowQual"]
sv_format = "GT"
sample_indexes = [0]
vcf_record = vcf.model._Record(record_del.CHROM, pos, sv_id, ref, alt, qual,
sv_filter, info, sv_format, sample_indexes)
vcf_record.samples = record_del.samples
return vcf_record
def remove_info_fields(record, fields):
info = {}
info.update(record.INFO)
for field in fields:
if field in info:
del info[field]
sample_indexes = [0]
vcf_record = vcf.model._Record(record.CHROM, record.POS, record.ID,
record.REF, record.ALT, record.QUAL,
record.FILTER, info, record.FORMAT,
sample_indexes)
vcf_record.samples = record.samples
return vcf_record
def resolve_for_IDP_ITX_CTX(vcf_records, fasta_file, pad=0, wiggle=10,
overlap_ratio=0.9):
del_records = filter(lambda x: (x.INFO["SVTYPE"] == "DEL"), vcf_records)
dup_records = filter(lambda x: (x.INFO["SVTYPE"] == "DUP"), vcf_records)
ins_records = filter(lambda x: (x.INFO["SVTYPE"] == "INS"), vcf_records)
other_records = filter(
lambda x: (x.INFO["SVTYPE"] not in ["DEL", "DUP", "INS"]), vcf_records)
del_bedtool = pybedtools.BedTool(
[pybedtools.Interval(x.CHROM, x.POS, (x.POS + abs(x.INFO["SVLEN"])),
name="DEL_%d" % i, score=x.FILTER[0]) for i, x in
enumerate(del_records)])
dup_bedtool = pybedtools.BedTool(
[pybedtools.Interval(x.CHROM, x.POS, (x.POS + abs(x.INFO["SVLEN"])),
name="DUP_%d" % i, score=x.FILTER[0]) for i, x in
enumerate(dup_records)])
ins_bedtool = pybedtools.BedTool(
[pybedtools.Interval(x.CHROM, x.POS, (x.POS + 1),
name="INS_%d" % i, score=x.FILTER[0],
otherfields=[x.INFO["SC_CHR2_STR"] if
"SC_CHR2_STR" in x.INFO else "."])
for i, x in enumerate(ins_records)])
chr2_intervals = []
for interval in ins_bedtool:
chr2_intervals.extend(build_chr2_ins(interval))
chr2_ins_bedtool = pybedtools.BedTool(chr2_intervals)
if len(chr2_ins_bedtool):
chr2_ins_bedtool = chr2_ins_bedtool.sort()
idp_bedtool = pybedtools.BedTool([])
remained_dup_bedtool = pybedtools.BedTool([])
if len(dup_bedtool):
idp_bedtool = dup_bedtool.window(del_bedtool, w=wiggle).each(
partial(find_idp, wiggle=wiggle))
remained_dup_bedtool = dup_bedtool.intersect(idp_bedtool, f=0.95, r=True,
wa=True, v=True, nonamecheck=True)
if len(idp_bedtool):
idp_bedtool = idp_bedtool.sort()
if len(remained_dup_bedtool):
remained_dup_bedtool = remained_dup_bedtool.sort()
remained_del_bedtool = del_bedtool
if len(del_bedtool) and len(idp_bedtool):
remained_del_bedtool = del_bedtool.intersect(
idp_bedtool.each(partial(extract_del_interval)).sort(), f=0.95, r=True,
wa=True, v=True, nonamecheck=True)
itx_bedtool = pybedtools.BedTool([])
remained_idp_bedtool_1 = pybedtools.BedTool([])
remained_idp_bedtool_2 = pybedtools.BedTool([])
if len(idp_bedtool):
itx_bedtool = idp_bedtool.window(idp_bedtool, w=wiggle).each(
partial(find_itx, wiggle=wiggle))
remained_idp_bedtool_1 = idp_bedtool.window(itx_bedtool, w=wiggle).each(
partial(filter_itxs))
remained_idp_bedtool_2 = idp_bedtool.window(itx_bedtool, w=wiggle,
c=True).filter(
lambda x: x.fields[-1] == "0")
if len(itx_bedtool):
itx_bedtool = itx_bedtool.sort()
if len(remained_idp_bedtool_1):
remained_idp_bedtool_1 = remained_idp_bedtool_1.sort()
if len(remained_idp_bedtool_2):
remained_idp_bedtool_2 = remained_idp_bedtool_2.sort()
ctx_bedtool = pybedtools.BedTool([])
if len(remained_del_bedtool):
ctx_bedtool = remained_del_bedtool.intersect(chr2_ins_bedtool, r=True,
f=overlap_ratio, wa=True,
wb=True, nonamecheck=True).each(
partial(find_ctx, overlap_ratio=overlap_ratio))
remained_del_bedtool = remained_del_bedtool.intersect(ctx_bedtool, f=0.95,
r=True, wa=True,
v=True, nonamecheck=True)
if len(ctx_bedtool):
ctx_bedtool = ctx_bedtool.sort()
if len(remained_del_bedtool):
remained_del_bedtool = remained_del_bedtool.sort()
if len(remained_idp_bedtool_2):
remained_idp_bedtool_2 = remained_idp_bedtool_2.cut(
range(idp_bedtool.field_count())).sort()
recoverd_pass_del_dup_ins = []
removed_pass_del_dup_ins = []
for bed in remained_idp_bedtool_1, remained_idp_bedtool_2, itx_bedtool, ctx_bedtool:
recoverd_pass_del_dup_ins.append(",".join(
map(lambda y: y.name, filter(lambda x: "LowQual" in x.score, bed))))
removed_pass_del_dup_ins.append(",".join(map(lambda y: y.name, filter(
lambda x: "LowQual" not in x.score, bed))))
recoverd_pass_del_dup_ins = set(
(",".join(recoverd_pass_del_dup_ins)).split(",")) - set([''])
removed_pass_del_dup_ins = set(
(",".join(removed_pass_del_dup_ins)).split(",")) - set([''])
recoverd_pass_del_dup_ins = recoverd_pass_del_dup_ins - removed_pass_del_dup_ins
recoverd_dups = list(set([x.name for x in remained_dup_bedtool]) | set(
filter(lambda x: "DUP" in x, recoverd_pass_del_dup_ins)))
recoverd_dels = list(set([x.name for x in remained_del_bedtool]) | set(
filter(lambda x: "DEL" in x, recoverd_pass_del_dup_ins)))
recoverd_inss = list(set([x.name for x in ins_bedtool]) - (
set(filter(lambda x: "INS" in x, removed_pass_del_dup_ins))))
vcf_records = other_records + [dup_records[int(x.split("_")[-1])] for x in
recoverd_dups] + \
[del_records[int(x.split("_")[-1])] for x in recoverd_dels] + \
[ins_records[int(x.split("_")[-1])] for x in recoverd_inss] + \
[merge_idp_itx(fasta_file, dup_records[
int(x.name.split(",")[0].split("_")[-1])],
[del_records[int(
x.name.split(",")[1].split("_")[-1])]],
int(x.fields[6]), x.fields[7], x.score, "IDP")
for x in remained_idp_bedtool_1] + \
[merge_idp_itx(fasta_file, dup_records[
int(x.name.split(",")[0].split("_")[-1])],
[del_records[int(
x.name.split(",")[1].split("_")[-1])]],
int(x.fields[6]), x.fields[7], x.score, "IDP")
for x in remained_idp_bedtool_2] + \
[merge_idp_itx(fasta_file, dup_records[
int(x.name.split(",")[0].split("_")[-1])],
[del_records[
int(x.name.split(",")[1].split("_")[-1])],
del_records[int(
x.name.split(",")[2].split("_")[-1])]],
int(x.fields[6]), x.fields[7], x.score, "ITX")
for x in itx_bedtool] + \
[merge_ctx(fasta_file, del_records[
int(x.name.split(",")[0].split("_")[-1])],
ins_records[
int(x.name.split(",")[1].split("_")[-1])],
x.score) for x in ctx_bedtool]
vcf_records = sorted(
map(lambda x: remove_info_fields(x, ["SC_CHR2_STR"]), vcf_records),
key=lambda x: (x.CHROM, x.POS))
return vcf_records
def convert_metasv_bed_to_vcf(bedfile=None, vcf_out=None, workdir=None,
vcf_template_file=VCF_TEMPLATE, sample=None,
reference=None,
pass_calls=True):
func_logger = logging.getLogger("%s" % convert_metasv_bed_to_vcf.__name__)
if not os.path.exists(workdir):
os.makedirs(workdir)
intervals = []
if bedfile:
for interval in pybedtools.BedTool(bedfile):
interval_info = get_interval_info(interval, pass_calls)
if interval_info:
updated_interval = pybedtools.Interval(interval.chrom,
interval_info["pos"],
interval_info["end"],
name="%s,%s,%d,%s" % (
base64.b64encode(
json.dumps(
interval_info[
"info"])),
interval_info[
"sv_type"],
interval_info[
"sv_length"],
";".join(
interval_info[
"svmethods"])),
score=interval.score,
otherfields=[
interval_info[
"genotype"]
, interval_info[
"sv_filter"]])
if not intervals:
intervals.append(updated_interval)
else:
merged_interval = check_duplicates(updated_interval,
intervals[-1])
if merged_interval:
func_logger.info("Merging intervals: %s and %s" % (
updated_interval, intervals[-1]))
intervals.pop()
intervals.append(merged_interval)
else:
intervals.append(updated_interval)
else:
func_logger.info("Skip interval: %s" % (interval))
nonfilterd_bed = os.path.join(workdir, "final_nonfilterd.bed")
filterd_bed = os.path.join(workdir, "final_filterd.bed")
bedtool = pybedtools.BedTool(intervals).sort().moveto(nonfilterd_bed)
filterd_bed = filter_confused_INS_calls(nonfilterd_bed, filterd_bed)
vcf_template_reader = vcf.Reader(open(vcf_template_file, "r"))
# The following are hacks to ensure sample name and contig names are put in the VCF header
vcf_template_reader.samples = [sample]
contigs = []
fasta_file = None
if reference:
contigs = fasta_utils.get_contigs(reference)
contigs_order_dict = {contig.name: index for (index, contig) in
enumerate(contigs)}
vcf_template_reader.contigs = OrderedDict(
[(contig.name, (contig.name, contig.length)) for contig in contigs])
vcf_template_reader.metadata["reference"] = reference
fasta_file = pysam.Fastafile(reference)
vcf_template_reader.metadata["fileDate"] = str(datetime.date.today())
vcf_template_reader.metadata["source"] = [" ".join(sys.argv)]
vcf_writer = vcf.Writer(open(vcf_out, "w"), vcf_template_reader)
vcf_records = []
if filterd_bed:
bedtool = pybedtools.BedTool(filterd_bed)
for interval in bedtool:
name_split = interval.name.split(",")
info = json.loads(base64.b64decode(name_split[0]))
# Fix info
if "INSERTION_SEQUENCE" in info and (not info["INSERTION_SEQUENCE"] or info["INSERTION_SEQUENCE"] == "."):
del info["INSERTION_SEQUENCE"]
sv_type = name_split[1]
sv_id = "."
ref = fasta_file.fetch(str(interval.chrom), interval.start,
interval.start + 1) if fasta_file else "."
alt = [vcf.model._SV(sv_type)]
qual = "."
sv_filter = [interval.fields[7]]
genotype = interval.fields[6]
sv_format = "GT"
sample_indexes = [0]
vcf_record = vcf.model._Record(interval.chrom, interval.start,
sv_id, ref, alt, qual,
sv_filter, info, sv_format,
sample_indexes)
vcf_record.samples = vcf_template_reader._parse_samples([genotype],
"GT",
vcf_record)
vcf_records.append(vcf_record)
if contigs:
vcf_records.sort(key=lambda x: (contigs_order_dict[x.CHROM], x.POS))
else:
vcf_records.sort(key=lambda x: (x.CHROM, x.POS))
resolved_vcf_records = resolve_for_IDP_ITX_CTX(vcf_records, fasta_file)
for vcf_record in resolved_vcf_records:
vcf_writer.write_record(vcf_record)
vcf_writer.close()
func_logger.info("Tabix compressing and indexing %s" % vcf_out)
pysam.tabix_index(vcf_out, force=True, preset="vcf")
|
msahraeian/metasv
|
metasv/generate_final_vcf.py
|
Python
|
bsd-2-clause
| 29,341
|
[
"pysam"
] |
abbcba9787381c5ed3c9fd3fc1a9a321a061d8d522d3fe68f8be85650bb72675
|
"""
Hidden Markov Models
====================
.. autosummary::
:toctree: generated/
:nosignatures:
HMM
DiscreteHMM
GaussianHMM
StudentHMM
GMMHMM
"""
from __future__ import division, print_function, absolute_import
# noinspection PyUnresolvedReferences
from six.moves import range
from abc import ABCMeta, abstractmethod
import numpy as np
# noinspection PyPackageRequirements
from sklearn.mixture import GMM
from ...auxiliary.array import accum, normalize
from ...optimize.algorithms import EM
from ...libs import hmmc
from ..models.mixture import StudentMM
from ..models import markov
from .. import normalize_logspace, nonuniform
from .. import conditional_normal, conditional_student, conditional_mix_normal
from .. import multivariate_normal, multivariate_student
class HMM(EM):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Parameters
----------
ncomponents : int
Number of states in the model.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission : cond_rv_frozen
The conditional probability distribution used for the emission.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
n_iter : int
Number of iterations to perform during training, optional.
thresh : float
Convergence threshold, optional.
verbose : bool
Controls if debug information is printed to the console, optional.
Attributes
----------
ncomponents : int
The number of hidden states.
nfeatures : int
Dimensionality of the Gaussian emission.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
emission : cond_rv_frozen
The conditional probability distribution used for the emission.
Examples
--------
>>> from mlpy.stats.dbn.hmm import GaussianHMM
>>> model = GaussianHMM(ncomponents=2, startprob_prior=[3, 2])
Create a gaussian hidden Markov model
>>> import scipy.io
>>> mat = scipy.io.loadmat('data/speechDataDigits4And5.mat'))
>>> x = np.hstack([mat['train4'][0], mat['train5'][0]])
Load data used for fitting the HMM and fit the HMM:
>>> model.fit(x, n_init=3)
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
__metaclass__ = ABCMeta
@property
def startprob_prior(self):
"""Vector of initial probabilities for each state.
Returns
-------
startprob_prior : array, shape (`ncomponents`,)
The initial probabilities.
"""
return self._startprob_prior
@startprob_prior.setter
def startprob_prior(self, pi_prior):
self._startprob_prior = np.ones(self.ncomponents,
dtype=float) if pi_prior is None else np.asarray(pi_prior, dtype=np.float64)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(self._startprob_prior):
normalize(self._startprob_prior)
if len(self._startprob_prior) != self.ncomponents:
raise ValueError('pi_prior must have length ncomponents')
@property
def transmat_prior(self):
"""Transition probability matrix.
Returns
-------
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities from each state to every other state.
"""
return self._transmat_prior
@transmat_prior.setter
def transmat_prior(self, trans_prior):
self._transmat_prior = np.ones((self.ncomponents, self.ncomponents),
float) if trans_prior is None else np.asarray(trans_prior, dtype=np.float64)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(self._transmat_prior):
normalize(self._transmat_prior)
if np.asarray(self._transmat_prior).shape != (self.ncomponents, self.ncomponents):
self._transmat_prior = np.tile(self._transmat_prior, (self.ncomponents, 1))
def __init__(self, ncomponents=1, startprob_prior=None, startprob=None, transmat_prior=None, transmat=None,
emission_prior=None, emission=None, n_iter=None, thresh=None, verbose=None):
super(HMM, self).__init__(n_iter, thresh, verbose)
self.ncomponents = ncomponents
self.nfeatures = 1
self._startprob_prior = None
self._transmat_prior = None
self._fit_X = None
""":type:ndarray[ndarray[float]]"""
self.startprob_prior = startprob_prior
self.transmat_prior = transmat_prior
self.startprob = startprob
self.transmat = transmat
self.emission_prior = emission_prior
self.emission = emission
def score_samples(self, obs):
"""Compute the log probability of the evidence.
Compute the log probability of the evidence (likelihood) under the
model and the posteriors.
Parameters
----------
obs : array_like, shape (`n`, `len`, `nfeatures`)
Sequence of `nfeatures`-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logp : float
Log likelihood of the sequence `obs`.
posteriors : array_like, shape (`n`, `ncomponents`)
Posterior probabilities of each state for each observation
"""
n = obs.shape[0]
logp = np.zeros(n)
posterior = np.zeros((n, obs.shape[1], self.ncomponents), dtype=np.float64)
for i, x in enumerate(obs):
log_b = self.emission.logpdf(x)
log_b, scale = normalize_logspace(log_b.T)
b = np.exp(log_b)
logp[i], alpha = self._forward(self.startprob, self.transmat, b.T)
_, gamma = self._backward(self.transmat, b.T, alpha)
logp[i] += np.sum(scale)
posterior[i] = gamma.T
return logp, posterior
def score(self, obs):
"""
Compute log probability of the evidence (likelihood) under the model.
Parameters
----------
obs : array_like, shape (`n`, `len`, `nfeatures`)
Sequence of `nfeatures`-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logp : float
Log likelihood of the sequence `obs`.
"""
n = obs.shape[0]
logp = np.zeros(n)
for i, x in enumerate(obs):
log_b = self.emission.logpdf(x)
log_b, scale = normalize_logspace(log_b.T)
b = np.exp(log_b)
logp[i], alpha = self._forward(self.startprob, self.transmat, b.T)
logp[i] += np.sum(scale)
return logp
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model.
Parameters
----------
obs : array_like, shape (`n`, `len`, `nfeatures`)
Sequence of `nfeatures`-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
posteriors : array_like, shape (`n`, `ncomponents`)
Posterior probabilities of each state for each observation
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, length, size=1):
"""Generates random samples from the model.
Parameters
----------
length : int or ndarray[int]
Length of a sample
size : int, optional
Number of samples to generate. Default is 1.
Returns
-------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of samples, where `n` is the number of samples, `ni` is the
length of the i-th sample, and each observation has `nfeatures`.
hidden_states : array_like, shape (`n`, `ni`)
List of hidden states, where `n` is the number of samples, `ni` is
the i-th hidden state.
"""
# noinspection PyTypeChecker
length = np.tile(length, size) if not hasattr(length, "__len__") else length
assert (length.size == size)
hidden_states = np.empty((size, np.max(length)), dtype=np.int32)
obs = np.empty((size, np.max(length), self.nfeatures), dtype=np.float64) * np.nan
for i in range(size):
hidden_states[i] = markov.sample(self.startprob, self.transmat, size=length[i])
for t in range(length[i]):
obs[i][t] = self._generate_sample_from_state(hidden_states[i][t])
if size == 1:
hidden_states = hidden_states[0]
obs = obs[0]
return obs, hidden_states
def decode(self, obs, algorithm="viterbi"):
"""Find the most likely state sequence.
Find the most likely state sequence corresponding to the observation `obs`.
Uses the given algorithm for decoding.
Parameters
----------
obs : array_like, shape (`nfeatures`, `T`)
The local evidence vector.
algorithm : {'viterbi', 'map'}
Decoder algorithm to be used.
Returns
-------
best_path : array_like, shape (`n`,)
The most likely states for each observation
loglik : float
Log probability of the maximum likelihood path through the HMM
"""
algorithm = algorithm if algorithm in frozenset(("viterbi", "map")) else "viterbi"
return {
"viterbi": self._decode_viterbi,
"map": self._decode_map
}[algorithm](obs)
def fit(self, obs, n_init=1):
"""Estimate model parameters.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
Returns
-------
float :
log likelihood of the sequence `obs`
"""
self.nfeatures = obs[0].shape[0]
self._fit_X = obs
return self._em(obs, n_init=n_init)
@abstractmethod
def _initialize(self, obs, init_count):
"""Perform initialization step before entering the EM algorithm.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
init_count : int
Restart counter
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
@abstractmethod
def _generate_sample_from_state(self, state):
"""Generate a sample from the given current state.
Parameters
----------
state : int
Current state.
Returns
-------
sample: int
An observation sampled for the given state
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
def _decode_viterbi(self, obs, scale=True):
"""Find most likely (Viterbi) state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (`nfeatures`, `T`)
The local evidence vector.
Returns
-------
best_path: array_like, shape (`n`,)
The most likely states for each observation
loglik: float
Log probability of the maximum likelihood path through the HMM
"""
log_b = self.emission.logpdf(obs.T)
obslik = np.exp(log_b)
seq_len = obslik.shape[1]
delta = np.zeros((self.ncomponents, seq_len), dtype=np.float64)
psi = np.zeros((self.ncomponents, seq_len), dtype=np.int32)
best_path = np.zeros(seq_len, dtype=np.int32)
scales = np.ones(seq_len, dtype=np.float64)
delta[:, 0] = self.startprob * obslik[:, 0]
if scale:
delta[:, 0], n = normalize(delta[:, 0], return_scale=True)
scales[0] = 1.0 / n
psi[:, 0] = 0 # arbitrary, since there is no predecessor to t=0
for t in range(1, seq_len):
for j in range(self.ncomponents):
m = delta[:, t - 1] * self.transmat[:, j]
delta[j, t] = np.max(m)
psi[j, t] = np.argmax(m)
delta[j, t] = delta[j, t] * obslik[j, t]
if scale:
delta[:, t], n = normalize(delta[:, t], return_scale=True)
scales[t] = 1.0 / n
best_path[seq_len - 1] = np.argmax(delta[:, seq_len - 1])
for t in range(seq_len - 2, -1, -1):
best_path[t] = psi[best_path[t + 1], t + 1]
if scale:
loglik = -np.sum(np.log(scales))
else:
p = np.max(delta[:, seq_len - 1])
loglik = np.log(p)
return best_path, loglik
def _decode_map(self, obs):
"""Find most likely (MAP) state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (`nfeatures`, `T`)
The local evidence vector.
Returns
-------
best_path: array_like, shape (`n`,)
The most likely states for each observation
loglik: float
Log probability of the maximum likelihood path through the HMM
"""
_, posteriors = self.score_samples(np.array([obs.T]))
best_path = np.argmax(posteriors[0], axis=1)
loglik = np.max(posteriors[0], axis=1).sum()
return best_path, loglik
def _estep(self, obs):
"""Perform expectation step of the EM algorithm.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
Returns
-------
loglik : float
Log likelihood of the observation `obs`
"""
stacked_obs = self._stack_obs(obs)
seq_idx = np.cumsum([0] + [x.shape[1] for x in obs])
nstacked = stacked_obs.shape[0]
start_counts = np.zeros(self.ncomponents)
trans_counts = np.zeros((self.ncomponents, self.ncomponents))
weights = np.zeros((nstacked, self.ncomponents))
loglik = 0
nobs = obs.shape[0]
log_b = self.emission.logpdf(stacked_obs)
log_b, scale = normalize_logspace(log_b.T)
b = np.exp(log_b)
for i in range(nobs):
ndx = np.arange(seq_idx[i], seq_idx[i + 1])
bi = b[ndx]
logp, alpha = hmmc.forward(self.startprob, self.transmat, bi.T)
# logp, alpha = self._forward(self.startprob, self.transmat, bi.T)
beta, gamma = hmmc.backward(self.transmat, bi.T, alpha)
# beta, gamma = self._backward(self.transmat, bi.T, alpha)
loglik += logp
xi_summed = hmmc.computeTwoSliceSum(alpha, beta, self.transmat, bi.T)
# xi_summed = self._compute_two_slice_sum(alpha, beta, self.transmat, bi.T)
start_counts += gamma[:, 0]
trans_counts += xi_summed
weights[ndx] += gamma.T
loglik += np.sum(scale)
log_prior = np.dot(np.log(np.ravel(self.transmat) + np.spacing(1)),
np.ravel(self.transmat_prior)) + np.dot(np.log(self.startprob + np.spacing(1)),
self.startprob_prior)
loglik += log_prior
# emission component
self.startprob = normalize(start_counts + self.startprob_prior)
self.transmat = normalize(trans_counts + self.transmat_prior, 1)
self.emission.expected_sufficient_statistics(stacked_obs, weights)
loglik += self.emission.logprior()
return loglik
def _mstep(self):
"""Perform maximization step of the EM algorithm."""
self.emission.fit()
def _stack_obs(self, obs):
"""Stack observations.
Stack observations to a sequence of (`n`*`ni`)-by-`nfeatures`-dimensional
data points. Each row corresponds to a single data point in a sequence.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
Returns
-------
stacked_obs : array_like, shape (`n`*`ni`, `nfeatures`)
List of observation sequences
"""
stacked_obs = np.empty((0, self.nfeatures), dtype=np.float64)
for i in range(obs.shape[0]):
# d = obs[i].T if obs[i].shape[0] == self.nfeatures else obs[i]
stacked_obs = np.vstack([stacked_obs, obs[i].T])
return stacked_obs
def _rand_init(self):
"""Randomly initialize the prior and the transition probabilities.
"""
# noinspection PyArgumentList
self.startprob = normalize(np.random.rand(self.ncomponents) + self.startprob_prior - 1)
self.transmat = normalize(
np.random.rand(self.ncomponents, self.ncomponents) + self.transmat_prior - 1, 1)
def _init_with_mix_model(self, obs, pz):
"""Initialize the prior probabilities and transition probabilities.
Initialize the prior probabilities and transition probabilities during the
initialization step before the EM algorithm.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
pz : array_like, shape (`n`*`ni`, `ncomponents`)
Posterior probability of the observation
"""
z = np.argmax(pz, axis=1)
if self.transmat is None:
self.transmat = accum(np.vstack([z[0:-1], z[1::]]).T, 1,
size=(self.ncomponents, self.ncomponents))
# regularize
self.transmat = normalize(self.transmat + np.ones(self.transmat.shape), 1)
if self.startprob is None:
seq_idx = np.cumsum([0] + [x.shape[1] for x in obs])
self.startprob = np.bincount(z[seq_idx[0:-1]], minlength=self.ncomponents)
self.startprob = normalize(self.startprob + np.ones(self.startprob.shape)) # regularize
# noinspection PyMethodMayBeStatic
def _forward(self, pi, transmat, softev):
n, m = softev.shape
scale = np.zeros(m)
at = transmat.T
alpha = np.zeros((n, m))
alpha[:, 0], scale[0] = normalize(np.ravel(pi) * softev[:, 0], return_scale=True)
for t in range(1, m):
alpha[:, t], scale[t] = normalize(np.dot(at, alpha[:, t - 1]) * softev[:, t], return_scale=True)
loglik = np.sum(np.log(scale + np.finfo(float).eps))
return loglik, alpha
# noinspection PyMethodMayBeStatic
def _backward(self, transmat, softev, alpha):
n, m = softev.shape
beta = np.zeros((n, m))
beta[:, m - 1] = np.ones(n)
for t in reversed(range(m)):
beta[:, t - 1] = normalize(np.dot(transmat, beta[:, t] * softev[:, t]))
gamma = normalize(alpha * beta, 0)
return beta, gamma
# noinspection PyMethodMayBeStatic
def _compute_two_slice_sum(self, alpha, beta, transmat, softev):
n, m = softev.shape
xi_summed = np.zeros((n, n))
for t in reversed(range(1, m)):
b = beta[:, t] * softev[:, t]
xit = transmat * np.dot(alpha[:, t - 1].reshape(n, 1), b.reshape(1, n))
xi_summed += np.true_divide(xit, np.sum(np.ravel(xit)))
return xi_summed
class DiscreteHMM(HMM):
"""Hidden Markov Model with discrete(multinomial) emissions.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
ncomponents : int
Number of states in the model.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission : cond_rv_frozen
The conditional probability distribution used for the emission.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
n_iter : int
Number of iterations to perform during training, optional.
thresh : float
Convergence threshold, optional.
verbose : bool
Controls if debug information is printed to the console, optional.
Attributes
----------
ncomponents : int
The number of hidden states.
nfeatures : int
Dimensionality of the Gaussian emission.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
emission : cond_rv_frozen
The conditional probability distribution used for the emission.
Examples
--------
>>> from mlpy.stats.dbn.hmm import DiscreteHMM
>>> DiscreteHMM(ncomponents=2)
...
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self, ncomponents=1, startprob_prior=None, startprob=None, transmat_prior=None, transmat=None,
emission_prior=None, emission=None, n_iter=None, thresh=None, verbose=None):
super(DiscreteHMM, self).__init__(ncomponents, startprob_prior, startprob, transmat_prior, transmat,
emission_prior, emission, n_iter, thresh, verbose)
def _generate_sample_from_state(self, state):
"""Generate a sample from the given current state.
Parameters
----------
state : int
Current state.
Returns
-------
sample: int
An observation sampled for the given state
"""
return nonuniform.rvs(self.emission.T[state])
def _initialize(self, obs, init_count):
"""Perform initialization step before entering the EM algorithm.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
init_count : int
Restart counter
Raises
------
NotImplementedError
This function is not implemented yet.
"""
raise NotImplementedError
class GaussianHMM(HMM):
"""
Hidden Markov Model with Gaussian emissions.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
ncomponents : int
Number of states in the model.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission : conditional_normal_frozen
The conditional probability distribution used for the emission.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
n_iter : int
Number of iterations to perform during training, optional.
thresh : float
Convergence threshold, optional.
verbose : bool
Controls if debug information is printed to the console, optional.
Attributes
----------
ncomponents : int
The number of hidden states.
nfeatures : int
Dimensionality of the Gaussian emission.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
emission : cond_rv_frozen
The conditional probability distribution used for the emission.
mean : array, shape (`ncomponents`, `nfeatures`)
Mean parameters for each state.
cov : array, shape (`ncomponents`, `nfeatures`, `nfeatures`)
Covariance parameters for each state.
Examples
--------
>>> from mlpy.stats.dbn.hmm import GaussianHMM
>>> GaussianHMM(ncomponents=2)
...
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
@property
def mean(self):
"""The mean parameters for each state
Returns
-------
array, shape (`ncomponents`, `nfeatures`) :
Mean parameters for each state.
"""
return self.emission.mean
@property
def cov(self):
"""Covariance parameters for each state.
Returns
-------
array, shape (`ncomponents`, `nfeatures`, `nfeatures`) :
Covariance parameters for each state as a full matrix
"""
return self.emission.cov
def __init__(self, ncomponents=1, startprob_prior=None, startprob=None, transmat_prior=None, transmat=None,
emission_prior=None, emission=None, n_iter=None, thresh=None, verbose=None):
super(GaussianHMM, self).__init__(ncomponents, startprob_prior, transmat_prior, startprob, transmat,
emission_prior, emission, n_iter, thresh, verbose)
if emission:
self.nfeatures = emission.dim
def _generate_sample_from_state(self, state):
"""Generate a sample from the given current state.
Parameters
----------
state : int
Current state.
Returns
-------
sample: int
An observation sampled for the given state
"""
return multivariate_normal.rvs(self.emission.mean[state], self.emission.cov[state], size=1)
def _initialize(self, obs, init_count):
"""Perform initialization step before entering the EM algorithm.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
init_count : int
Restart counter
"""
if self.emission is None or self.startprob is None or self.transmat is None:
if init_count == 0:
stacked_obs = self._stack_obs(obs)
gmm = GMM(n_components=self.ncomponents, covariance_type='full')
gmm.fit(stacked_obs)
if self.transmat is None or self.startprob is None:
pz = gmm.predict_proba(stacked_obs)
self._init_with_mix_model(obs, pz)
if self.emission is None:
self.emission = conditional_normal(gmm.means_, gmm.covars_, algorithm='map')
# regularize MLE
for i in range(self.emission.ncomponents):
self.emission.cov[i] += np.eye(self.emission.dim)
else:
stacked_obs = self._stack_obs(obs)
mean = np.zeros((self.ncomponents, self.nfeatures))
cov = np.zeros((self.ncomponents, self.nfeatures, self.nfeatures))
for i in range(self.ncomponents):
xx = stacked_obs + np.random.randn(stacked_obs.shape[0], stacked_obs.shape[1])
mean[i] = np.mean(xx, 0)
cov[i, :, :] = np.cov(xx, rowvar=0)
if self.emission is None:
self.emission = conditional_normal(mean, cov, algorithm='map')
else:
self.emission.mean = mean
self.emission.cov = cov
self._rand_init()
if self.emission_prior:
self.emission.prior = self.emission_prior
class StudentHMM(HMM):
"""
Hidden Markov Model with Student emissions
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
ncomponents : int
Number of states in the model.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission : conditional_student_frozen
The conditional probability distribution used for the emission.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
n_iter : int
Number of iterations to perform during training, optional.
thresh : float
Convergence threshold, optional.
verbose : bool
Controls if debug information is printed to the console, optional.
Attributes
----------
ncomponents : int
The number of hidden states.
nfeatures : int
Dimensionality of the Gaussian emission.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
emission : cond_rv_frozen
The conditional probability distribution used for the emission.
Examples
--------
>>> from mlpy.stats.dbn.hmm import StudentHMM
>>> StudentHMM(ncomponents=2)
...
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self, ncomponents=1, startprob_prior=None, startprob=None, transmat_prior=None,
transmat=None, emission_prior=None, emission=None, n_iter=None, thresh=None, verbose=None):
super(StudentHMM, self).__init__(ncomponents, startprob_prior, startprob, transmat_prior, transmat,
emission_prior, emission, n_iter, thresh, verbose)
def _generate_sample_from_state(self, state):
"""Generate a sample from the given current state.
Parameters
----------
state : int
Current state.
Returns
-------
sample: int
An observation sampled for the given state
"""
return multivariate_student.rvs(self.emission.mean[state], self.emission.cov[state], self.emission.df, size=1)
def _initialize(self, obs, init_count):
"""Perform initialization step before entering the EM algorithm.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
init_count : int
Restart counter
"""
df = 10 * np.ones(self.ncomponents)
fix_df = False
if self.emission and self.emission.df:
df = self.emission.df
fix_df = True
if self.emission is None or self.startprob is None or self.transmat is None:
if init_count == 0:
stacked_obs = self._stack_obs(obs)
model = StudentMM(ncomponents=self.ncomponents, n_iter=10)
model.fit(stacked_obs)
if self.transmat is None or self.startprob is None:
pz = model.predict_proba(stacked_obs)
self._init_with_mix_model(obs, pz)
cov = model.cond_proba.cov + np.eye(self.nfeatures)
mean = model.cond_proba.mean
if not fix_df:
df = model.cond_proba.df
self.emission = conditional_student(mean, cov, df, self.emission_prior)
else:
stacked_obs = self._stack_obs(obs)
mean = np.zeros((self.ncomponents, self.nfeatures))
cov = np.zeros((self.ncomponents, self.nfeatures, self.nfeatures))
for i in range(self.ncomponents):
xx = stacked_obs + np.random.randn(stacked_obs.shape[0], stacked_obs.shape[1])
mean[i] = np.mean(xx, 0)
cov[i, :, :] = np.cov(xx, rowvar=0)
if self.emission is None:
self.emission = conditional_student(mean, cov, df, self.emission_prior)
else:
self.emission.mean = mean
self.emission.cov = cov
self._rand_init()
if self.emission_prior:
self.emission.prior = self.emission_prior
class GMMHMM(HMM):
"""
Hidden Markov Model with Gaussian mixture emissions.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
Parameters
----------
ncomponents : int
Number of states in the model.
nmix : int
Number of mixtures.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission : conditional_mix_normal_frozen
The conditional probability distribution used for the emission.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
n_iter : int
Number of iterations to perform during training, optional.
thresh : float
Convergence threshold, optional.
verbose : bool
Controls if debug information is printed to the console, optional.
Attributes
----------
ncomponents : int
The number of hidden states.
nmix : int
Number of mixtures.
nfeatures : int
Dimensionality of the Gaussian emission.
startprob_prior : array, shape (`ncomponents`,)
Initial state occupation prior distribution.
startprob : array, shape (`ncomponents`,)
Initial state occupation distribution.
transmat_prior : array, shape (`ncomponents`, `ncomponents`)
Matrix of prior transition probabilities between states.
transmat : array, shape (`ncomponents`, `ncomponents`)
Matrix of transition probabilities between states.
emission_prior : normal_invwishart
Initial emission parameters, a normal-inverse Wishart distribution.
emission : cond_rv_frozen
The conditional probability distribution used for the emission.
Examples
--------
>>> from mlpy.stats.dbn.hmm import GMMHMM
>>> GMMHMM(ncomponents=2)
...
.. note::
Adapted from Matlab:
| Project: `Probabilistic Modeling Toolkit for Matlab/Octave <https://github.com/probml/pmtk3>`_.
| Copyright (2010) Kevin Murphy and Matt Dunham
| License: `MIT <https://github.com/probml/pmtk3/blob/5fefd068a2e84ae508684d3e4750bd72a4164ba0/license.txt>`_
"""
def __init__(self, ncomponents=1, nmix=1, startprob_prior=None, startprob=None, transmat_prior=None,
transmat=None, emission_prior=None, emission=None, n_iter=None, thresh=None, verbose=None):
super(GMMHMM, self).__init__(ncomponents, startprob_prior, startprob, transmat_prior, transmat,
emission_prior, emission, n_iter, thresh, verbose)
self.nmix = 1 if nmix is None else nmix
def _generate_sample_from_state(self, state):
"""Generate a sample from the given current state.
Parameters
----------
state : int
Current state.
Returns
-------
sample: int
An observation sampled for the given state
Raises
------
NotImplementedError
This functionality is not implemented yet.
"""
raise NotImplementedError
def _initialize(self, obs, init_count):
"""Perform initialization step before entering the EM algorithm.
Parameters
----------
obs : array_like, shape (`n`, `ni`, `nfeatures`)
List of observation sequences, where `n` is the number of sequences, `ni` is
the length of the i_th observation, and each observation has `nfeatures` features.
init_count : int
Restart counter
"""
if self.emission is None or self.startprob is None or self.transmat is None:
stacked_obs = self._stack_obs(obs)
if init_count == 0:
gmm = GMM(n_components=self.ncomponents, covariance_type='full')
gmm.fit(stacked_obs)
if self.emission is None:
cov = np.eye(self.nfeatures) + gmm.covars_
# noinspection PyTypeChecker
m = np.tile(gmm.weights_, self.ncomponents)
m = normalize(m + np.random.rand(m.shape[0], m.shape[1]), 0)
self.emission = conditional_mix_normal(gmm.means_, cov, m, self.emission_prior)
else:
stacked_obs = self._stack_obs(obs)
mean = np.zeros((self.ncomponents, self.nfeatures))
cov = np.zeros((self.ncomponents, self.nfeatures, self.nfeatures))
for i in range(self.ncomponents):
xx = stacked_obs + np.random.randn(stacked_obs.shape[0], stacked_obs.shape[1])
mean[i] = np.mean(xx, 0)
cov[i, :, :] = np.cov(xx, rowvar=0)
m = normalize(np.random.rand(self.nmix, self.ncomponents), 0)
if self.emission is None:
self.emission = conditional_mix_normal(mean, cov, m, self.emission_prior)
else:
self.emission.mean = mean
self.emission.cov = cov
self.m = m
self._rand_init()
if self.emission_prior:
self.emission.prior = self.emission_prior
|
evenmarbles/mlpy
|
mlpy/stats/dbn/hmm.py
|
Python
|
mit
| 43,454
|
[
"Gaussian"
] |
013ae2b6c897ce0b73cfd737b6857d01cb99305991608b96ede69e2a06eabbea
|
# -*- coding: utf-8 -*
"""
pymeasure.liveplotting
----------------------
The liveplotting module is part of the pymeasure package. It allows
parallel 1D and 2D live plotting of multiple incoming data streams. The
focus of the module is on rapid and uncomplicated displaying of data
streams. Liveplotting makes adding and removing streams as easy as
possible. Although the direct focus is not so much on pretty figures, the
access to the underlying matplotlib elements gives you almost unlimted
power.
"""
# Pymeasure
from pymeasure.indexdict import IndexDict
import abc
import tkinter as Tk
import warnings
import numpy as np
import matplotlib as mpl
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2TkAgg)
from PyQt5 import QtGui, QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.colors import Normalize, LogNorm
from queue import Queue
from threading import Event
from functools import partial
from collections import ChainMap
from matplotlib.backend_bases import key_press_handler
class Manager(object):
def __init__(self):
self.tasks = Queue()
self.running = False
def update(self):
graphs = []
# Execute all tasks
while not self.tasks.empty():
graph, task = self.tasks.get()
task()
self.tasks.task_done()
if graph not in graphs:
graphs.append(graph)
for graph in graphs:
graph._update()
class Backend(object):
def __init__(self, figure, manager, master=None):
self.figure = figure
self.manager = manager
self.master = master
self.close_events = []
self.closed = True
@property
def visible(self):
return True
def show(self, delay):
if not self.manager.running:
self.run(delay)
self.manager.running = True
def close(self):
for event in self.close_events:
event()
def run(self):
self.manager.update()
def on_key_event(self, event):
if event.key == 'a':
ax = event.inaxes
ax.set_autoscalex_on(True)
ax.set_autoscaley_on(True)
else:
key_press_handler(event, self.canvas, self.toolbar)
class LiveGraphTk(Backend):
""" LiveGraph backend for Tkinter.
"""
def show(self, delay):
if self.master is None:
self.root = Tk.Toplevel()
self.master = self.root
else:
self.root = Tk.Toplevel(self.master)
self.canvas = FigureCanvasTkAgg(self.figure, master=self.root)
#self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.root)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=True)
self.canvas.mpl_connect('key_press_event', self.on_key_event)
self.root.protocol("WM_DELETE_WINDOW", self.close)
super().show(delay)
def run(self, delay):
"""Calls the update method periodically with the delay in milliseconds.
Decrease the delay to make the plotting smoother and increase it to
reduce the preformance. For live plotting the delay must fit the rate
of the incoming data.
Keyword arguments:
delay -- the delay in millisconds after each call (default 50)
"""
super().run()
self.root.after(delay, self.run, delay)
@property
def visible(self):
if self.root.state() in ['normal', 'zoomed']:
return True
else:
return False
@visible.setter
def visible(self, boolean):
if boolean:
self.root.deiconify()
else:
self.root.withdraw()
self.visible = False
def close(self):
super().close()
self.master.destroy()
class LiveGraphQt4(Backend):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def show(self, delay=50):
self.canvas = FigureCanvasQTAgg(self.figure)
self.canvas.show()
self.timer = QtCore.QTimer(self.canvas)
self.timer.timeout.connect(self.run)
super().show(delay)
class LiveGraph(IndexDict):
"""Base class for differnt graphic backends.
"""
_current_graph = None
def __init__(self, master=None, style='pymeasure', manager=None, **fig_kwargs):
"""Initiate LivegraphBase class.
"""
super().__init__()
if master is None:
self._manager = Manager()
else:
self._manager = master._manager
master = master._window.root
if style == 'pymeasure':
mpl.style.use('ggplot')
mpl.rcParams['grid.alpha'] = 0.7
elif isinstance(style, str):
mpl.style.use(style)
else:
pass
self.figure = mpl.figure.Figure(**fig_kwargs)
# Get matplotlib backend
backend = mpl.get_backend()
if backend == 'Qt4Agg':
self._window = LiveGraphQt4(self.figure, self._manager, master)
elif backend == 'TkAgg':
self._window = LiveGraphTk(self.figure, self._manager, master)
else:
raise TypeError('backend is not supported')
self._draw = True
self.shape = ()
self.close_event = None
LiveGraph._current_graph = self
def __setitem__(self, key, dataplot):
"""x.__setitem__(key, dataplot) <==> x['key'] = dataplot
Add a Dataplot to Graph.
"""
if isinstance(dataplot, DataplotBase):
super().__setitem__(key, dataplot)
else:
raise TypeError('item must be a Dataplot.')
def __getitem___(self, key):
return super().__getitem___(key)
def add_task(self, function, *args, **kwargs):
task = (self, partial(function, *args, **kwargs))
self._manager.tasks.put(task)
def dataplots(self):
"""Return a list of (index, key) pairs in Graph.
"""
return [index_key for index_key in enumerate(self._odict.keys())]
def add_subplot(self, *args, **kwargs):
"""Wrapper for matplotlib.figure.add_subplot(*args, **kwargs)
"""
return self.figure.add_subplot(*args, **kwargs)
def subplot_grid(self, ysubs, xsubs):
""" Create a grid of subplots and return all axes in a list.
"""
return[self.add_subplot(ysubs, xsubs, nr) for nr in range(1, ysubs * xsubs + 1)]
@property
def visible(self):
return self._window.visible
def _update(self):
# Iterate through all subplots and check for updates
for subplot in self.__iter__():
if subplot._request_update.is_set():
subplot._update()
subplot._request_update.clear()
if self._window.visible:
self.figure.canvas.draw()
@property
def tight_layout(self):
return self.figure.get_tight_layout()
@tight_layout.setter
def tight_layout(self, boolean):
# Check for bool type
if not isinstance(boolean, bool):
raise TypeError('not bool')
self.add_task(self.figure.set_tight_layout, boolean)
def snapshot(self, filename):
"""Make a snapshot and save it as filename.
"""
def task():
self._update()
self.figure.savefig(filename)
self.add_task(task)
def connect_looper(self, looper, shape=True):
self._window.close_events.append(looper.stop)
if shape:
self.shape = looper.shape
def close(self):
self._window.close()
def show(self, *, delay=50):
self._window.show(delay)
class DataplotBase(object, metaclass=abc.ABCMeta):
def __init__(self, axes, graph=None):
"""Initiate DataplotBase class.
"""
if graph:
self._graph = graph
else:
self._graph = LiveGraph._current_graph
# Check for integer like 221 and handle sequences (2,2,1)
if not isinstance(axes, mpl.axes.SubplotBase):
if isinstance(axes, int):
axes = tuple([int(c) for c in str(axes)])
axes = self._graph.add_subplot(*axes)
self._axes = axes
self._request_update = Event()
@abc.abstractmethod
def add_data(self):
pass
@abc.abstractmethod
def _update(self):
pass
class LabelConf1d(object):
def __init__(self, graph, axes):
self._graph = graph
self._axes = axes
@property
def title(self):
return self._axes.get_title()
@title.setter
def title(self, string):
self._graph.add_task(self._axes.set_title, string)
@property
def xaxis(self):
return self._axes.get_xlabel()
@xaxis.setter
def xaxis(self, string):
self._graph.add_task(self._axes.set_xlabel, string)
@property
def yaxis(self):
return self._axes.get_ylabel()
@yaxis.setter
def yaxis(self, string):
self._graph.add_task(self._axes.set_ylabel, string)
class LineConf(object):
def __init__(self, graph, line):
self._graph = graph
self._line = line
@property
def style(self):
return self._line.get_linestyle()
@style.setter
def style(self, linestyle):
# Handle wrong input to avoid crashing running liveplot
if linestyle not in list(self._line.lineStyles.keys()):
raise ValueError('not a valid linestyle')
self._graph.add_task(self._line.set_linestyle, linestyle)
@property
def draw(self):
return self._line.get_drawstyle()
@draw.setter
def draw(self, drawstyle):
# Handle wrong input to avoid crashing running liveplot
if drawstyle not in self._line.drawStyleKeys:
raise ValueError('not a valid drawstyle')
self._graph.add_task(self._line.set_drawstyle, drawstyle)
@property
def color(self):
return self._line.get_color()
@color.setter
def color(self, color):
# Handle wrong input to avoid crashing running liveplot
if not mpl.colors.is_color_like(color):
raise ValueError('not a valid color')
self._graph.add_task(self._line.set_color, color)
@property
def width(self):
return self._line.get_linewidth()
@width.setter
def width(self, linewidth):
# Handle wrong input to avoid crashing running liveplot
linewidth = float(linewidth)
self._graph.add_task(self._line.set_linewidth, linewidth)
class MarkerConf(object):
def __init__(self, graph, line):
self._graph = graph
self._line = line
@property
def style(self):
marker = self._line.get_marker()
if marker == 'None':
marker = None
return marker
@style.setter
def style(self, marker):
# Handle wrong input to avoid crashing running liveplot
if marker in [None, False]:
marker = 'None'
elif marker not in list(self._line.markers.keys()):
raise ValueError('not a valid marker')
self._graph.add_task(self._line.set_marker, marker)
@property
def color(self):
return [self.facecolor, self.edgecolor]
@color.setter
def color(self, color):
if not mpl.colors.is_color_like(color):
raise ValueError('not a valid color')
self.facecolor = color
self.edgecolor = color
@property
def facecolor(self):
return self._line.get_markerfacecolor()
@facecolor.setter
def facecolor(self, color):
# Handle wrong input to avoid crashing running liveplot
if not mpl.colors.is_color_like(color):
raise ValueError('not a valid color')
self._graph.add_task(self._line.set_markerfacecolor, color)
@property
def edgecolor(self):
return self._line.get_markeredgecolor()
@edgecolor.setter
def edgecolor(self, color):
# Handle wrong input to avoid crashing running liveplot
if not mpl.colors.is_color_like(color):
raise ValueError('not a valid color')
self._graph.add_task(self._line.set_markeredgecolor, color)
@property
def size(self):
return self._line.get_markersize()
@size.setter
def size(self, markersize):
markersize = float(markersize)
self._graph.add_task(self._line.set_markersize, markersize)
class XaxisConf(object):
def __init__(self, graph, axes):
self._graph = graph
self._axes = axes
self._scale = 'linear'
@property
def autoscale(self):
return self._axes.get_autoscalex_on()
@autoscale.setter
def autoscale(self, boolean):
# Check for bool type
if not isinstance(boolean, bool):
raise TypeError('not bool')
self._graph.add_task(self._axes.set_autoscalex_on, boolean)
@property
def lim_left(self):
return self._axes.get_xlim()[0]
@lim_left.setter
def lim_left(self, limit):
if not isinstance(limit, (int, float)):
raise TypeError('not int or float')
if limit == self.lim_right:
raise ValueError('left and right limits are identical.')
self._graph.add_task(self._axes.set_xlim, left=limit)
@property
def lim_right(self):
return self._axes.get_xlim()[1]
@lim_right.setter
def lim_right(self, limit):
if not isinstance(limit, (int, float)):
raise TypeError('not int or float')
if limit == self.lim_left:
raise ValueError('left and right limits are identical.')
self._graph.add_task(self._axes.set_xlim, right=limit)
@property
def inverted(self):
return self.lim_left > self.lim_right
@inverted.setter
def inverted(self, boolean):
if boolean:
if self.inverted:
return
else:
if not self.inverted:
return
autoscale = self.autoscale
def task():
self._axes.invert_xaxis()
self._axes.set_autoscalex_on(autoscale)
self._graph.add_task(task)
@property
def ticks(self):
return self._axes.get_xticks()
@ticks.setter
def ticks(self, ticks):
self._graph.add_task(self._axes.set_xticks, ticks)
@property
def scale(self):
return self._axes.get_xscale()
@property
def log(self):
if self.scale == 'log':
return True
else:
return False
@log.setter
def log(self, boolean):
# Check for bool type
if not isinstance(boolean, bool):
raise TypeError('not bool')
if boolean:
scale = 'log'
else:
scale = 'linear'
self._graph.add_task(self._axes.set_xscale, scale)
class YaxisConf(object):
def __init__(self, graph, axes):
self._graph = graph
self._axes = axes
self._scale = 'linear'
@property
def autoscale(self):
return self._axes.get_autoscaley_on()
@autoscale.setter
def autoscale(self, boolean):
# Check for bool type
if not isinstance(boolean, bool):
raise TypeError('not bool')
self._graph.add_task(self._axes.set_autoscaley_on, boolean)
@property
def lim_bottom(self):
return self._axes.get_ylim()[0]
@lim_bottom.setter
def lim_bottom(self, limit):
if not isinstance(limit, (int, float)):
raise TypeError('not int or float')
if limit == self.lim_top:
raise ValueError('bottom and top limits are identical.')
self._graph.add_task(self._axes.set_ylim, bottom=limit)
@property
def lim_top(self):
return self._axes.get_ylim()[1]
@lim_top.setter
def lim_top(self, limit):
if not isinstance(limit, (int, float)):
raise TypeError('not int or float')
if limit == self.lim_bottom:
raise ValueError('left and right limits are identical.')
self._graph.add_task(self._axes.set_ylim, top=limit)
@property
def inverted(self):
return self.lim_bottom > self.lim_top
@inverted.setter
def inverted(self, boolean):
if boolean:
if self.inverted:
return
else:
if not self.inverted:
return
autoscale = self.autoscale
def task():
self._axes.invert_yaxis()
self._axes.set_autoscaley_on(autoscale)
self._graph.add_task(task)
@property
def ticks(self):
return self._axes.get_yticks()
@ticks.setter
def ticks(self, ticks):
self._graph.add_task(self._axes.set_yticks, ticks)
@property
def scale(self):
return self._axes.get_yscale()
@property
def log(self):
if self.scale == 'log':
return True
else:
return False
@log.setter
def log(self, boolean):
# Check for bool type
if not isinstance(boolean, bool):
raise TypeError('not bool')
if boolean:
scale = 'log'
else:
scale = 'linear'
self._graph.add_task(self._axes.set_yscale, scale)
class Dataplot1d(DataplotBase):
def __init__(self, axes, *plt_args, length=None, continuously=False, graph=None, **plt_kwargs):
"""Initiate Dataplot1d class.
"""
super().__init__(axes, graph)
# Create emtpy line instance for axes
self._line, = self._axes.plot([], [], *plt_args, **plt_kwargs)
# Attributes for displayed number of points
if length is None:
self._length = self._graph.shape[-1]
else:
self._length = length
self._continuously = continuously
# Create list to contain plotting data
self._xdata = list()
self._ydata = list()
# Dataplot1d Configs
self.line = LineConf(self._graph, self._line)
self.marker = MarkerConf(self._graph, self._line)
self.xaxis = XaxisConf(self._graph, self._axes)
self.yaxis = YaxisConf(self._graph, self._axes)
self.label = LabelConf1d(self._graph, self._axes)
self.switch_xy = False
@property
def length(self):
"""Length of displayed datapoints.
If continuously plotting is off Dataplot1d gets cleared when the number
of added datapoints matches length. Otherwise this is the maximum
number of displayed datapoints.
"""
return self._length
@property
def continuously(self):
"""Set continuously plotting True or False.
If continuously plotting is True Dataplot1d gets cleared when the
number of added datapoints matches length.
"""
return self._continuously
@property
def switch_xy(self):
return self._xy_switch
@switch_xy.setter
def switch_xy(self, boolean):
self._xy_switch = bool(boolean)
def add_data(self, xdata, ydata):
"""Add a list of data to the plot.
"""
# Put the incoming data into the data exchange queue
self._graph.add_task(self._add_data, xdata[:], ydata[:])
def _add_data(self, xdata, ydata):
if self._length:
self._xdata.extend(xdata)
self._ydata.extend(ydata)
# Clear if data is to long
while len(self._xdata) > self._length:
# Remove oldest datapoints if plotting continuously.
if self._continuously:
del self._xdata[:-self._length]
del self._ydata[:-self._length]
# Clear all displayed datapoints otherwise.
else:
del self._xdata[:self._length]
del self._ydata[:self._length]
else:
self._xdata = xdata
self._ydata = ydata
self._request_update.set()
def _update(self):
"""Update the dataplot with the incoming data.
Process the added data, handle the maximum number of displayed
datapoints and manage view limits.
The _update method is called by the Gaph build method and should not be
called directly.
"""
xdata = np.array(self._xdata)
ydata = np.array(self._ydata)
# Prepare displayed xdata
if self.xaxis.log:
xdata = np.abs(xdata)
# Prepare displayed ydata
if self.yaxis.log:
ydata = np.abs(ydata)
# Update displayed data.
if not self.switch_xy:
self._line.set_data(xdata, ydata)
else:
self._line.set_data(ydata, xdata)
# Recompute the data limits.
self._axes.relim()
# Resacale the view limits using the previous computed data limit.
try:
self._axes.autoscale_view()
except ValueError:
pass
class ImageConf(object):
def __init__(self, graph, image):
self._graph = graph
self._image = image
self._auto_extent = True
@property
def interpolation(self):
"""Set the interpolation method the image uses when resizing.
ACCEPTS: ['nearest' | 'bilinear' | 'bicubic' | 'spline16' |
'spline36' | 'hanning' | 'hamming' | 'hermite' | 'kaiser' |
'quadric' | 'catrom' | 'gaussian' | 'bessel' | 'mitchell' |
'sinc' | 'lanczos' | 'none' |]
"""
return self._image.get_interpolation()
@interpolation.setter
def interpolation(self, interpolation):
if interpolation not in self._image._interpd:
raise ValueError('Illegal interpolation string')
self._graph.add_task(self._image.set_interpolation, interpolation)
@property
def auto_extent(self):
return self._auto_extent
@auto_extent.setter
def auto_extent(self, boolean):
# Check for bool type
if not isinstance(boolean, bool):
raise TypeError('not bool')
self._auto_extent = boolean
@property
def extent(self):
return self._image.get_extent()
@extent.setter
def extent(self, extent):
self._auto_extent = False
self._graph.add_task(self._image.set_extent, extent)
class ColorbarConf(object):
def __init__(self, graph, image, colorbar):
self._graph = graph
self._image = image
self._colorbar = colorbar
self._scale = 'linear'
@property
def colormap_names(self):
return sorted(mpl.cm.cmap_d.keys())
@property
def colormap(self):
return self._image.get_cmap().name
@colormap.setter
def colormap(self, colormap):
if colormap not in self.colormap_names:
raise TypeError('colormap is not valid')
self._graph.add_task(self._image.set_cmap, colormap)
@property
def scale(self):
return self._scale
@property
def log(self):
"""Set colorbar to logarithmic scale.
"""
if self._scale == 'log':
return True
else:
return False
@log.setter
def log(self, log):
if not isinstance(log, bool):
raise TypeError('is not bool')
if log:
self._scale = 'log'
else:
self._scale = 'linear'
class LabelConf2d(LabelConf1d):
def __init__(self, graph, axes, colorbar):
super().__init__(graph, axes)
self._colorbar = colorbar
@property
def zaxis(self):
return self._colorbar._label
@zaxis.setter
def zaxis(self, string):
self._graph.add_task(self._colorbar.set_label, string)
class Dataplot2d(DataplotBase):
def __init__(self, axes, *imshow, length=None, colorbar=True, graph=None, **kw_imshow):
super().__init__(axes, graph)
if length is None:
self._length = self._graph.shape[-1]
else:
self._length = length
self._exchange_queue = Queue()
self._trace = []
self._data = np.array([[]])
# Draw an empty image
defaults = {'cmap': 'hot', 'aspect': 'auto'}
kw_imshow = ChainMap(kw_imshow, defaults)
self._image = self._axes.imshow([[np.nan]], *imshow, **kw_imshow)
if colorbar is True:
self.add_colorbar()
#self._label_conf = LabelConf2d(self._graph, self._axes, self._colorbar)
#self._colorbar_conf = ColorbarConf(self._graph, self._image, self._colorbar)
self.image = ImageConf(self._graph, self._image)
self._diff = False
def add_colorbar(self, *colorbar, **kw_colorbar):
# Create colorbar and ignor warning caused because figure has only
# one value.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self._colorbar = self._graph.figure.colorbar(self._image, ax=self._axes,
*colorbar, **kw_colorbar)
@property
def diff(self):
return self._diff
@diff.setter
def diff(self, diff):
if int(diff) > 0:
self._diff = int(diff)
elif diff is False:
self._diff = False
else:
raise ValueError('diff musste be True, False or integer greater 0')
def add_data(self, data):
"""Add a list of data to the plot.
"""
if isinstance(data, Dataplot1d):
self._graph.add_task(self._add_data, data)
else:
self._graph.add_task(self._add_data, data[:])
def _add_data(self, data):
if isinstance(data, Dataplot1d):
self._trace.extend(data._ydata)
else:
self._trace.extend(data)
# Handle the maximum number of displayed points.
while len(self._trace) >= self._length:
trace = self._trace[:self._length]
del self._trace[:self._length]
if self._data.size:
self._data = np.vstack((self._data, trace))
else:
self._data = np.array([trace])
self._request_update.set()
def new_line(self):
self._graph.add_task(self._clear)
def _new_line(self):
self._data.add_data([])
self._request_update.set()
def clear(self):
self._graph.add_task(self._clear)
def _clear(self):
self._data = np.array([[]])
self._request_update.set()
def _update(self):
# Prepare displayed data
data = self._data.copy()
# Differentiate data
if self.diff:
data = data[:, self.diff:] - data[:, :-self.diff]
# Take absolute value if log scaled
#if self.colorbar.log:
# data[data <= 0] = np.nan
#if self.colorbar.scale == 'linear':
# self._colorbar.set_norm(Normalize())
#elif self.colorbar.scale == 'log':
# self._colorbar.set_norm(LogNorm())
# Set image data
try:
self._image.set_data(data)
# Extent image automaticaly
if self.image.auto_extent:
extent = [0, self._length, len(data), 0]
self._image.set_extent(extent)
except TypeError:
self._image.set_data([[np.nan]])
self._axes.set_xticks([])
self._axes.set_yticks([])
# Resacale the image
try:
self._image.autoscale()
except ValueError:
pass
|
t--wagner/pymeasure
|
liveplot.py
|
Python
|
gpl-3.0
| 27,901
|
[
"Gaussian"
] |
1c862f470a8b151d2bb2ab5e2502fc7069c23a0586ccd8392342e9945e63087c
|
import copy
import pytest
from .utils import *
import numpy as np
import psi4
pytestmark = pytest.mark.quick
_vars_entered = {
'VAR A': 4.0,
'VaR B': -4.0,
'MATVAR A': psi4.core.Matrix.from_array(np.arange(6).reshape(2, 3)),
'MatvaR B': psi4.core.Matrix.from_array(np.arange(3).reshape(1, 3)),
'NPVAR A': np.arange(8).reshape(2, 4),
'NpvaR B': np.arange(4).reshape(1, 4),
}
_vars_stored = {
'VAR A': 4.0,
'VAR B': -4.0,
'MATVAR A': psi4.core.Matrix.from_array(np.arange(6).reshape(2, 3)),
'MATVAR B': psi4.core.Matrix.from_array(np.arange(3).reshape(1, 3)),
'NPVAR A': psi4.core.Matrix.from_array(np.arange(8).reshape(2, 4)),
'NPVAR B': psi4.core.Matrix.from_array(np.arange(4).reshape(1, 4)),
}
@pytest.fixture
def pe_wfn_qcvars():
psi4.core.clean_variables()
he = psi4.geometry('He')
wfn = psi4.core.Wavefunction.build(he, 'cc-pvdz')
for pv, pvv in _vars_entered.items():
psi4.core.set_variable(pv, pvv)
wfn.set_variable(pv, pvv)
return wfn
# can't use compare_dicts with symmetry psi4.Matrix
def _compare_qcvars(ref, expected, decimal, label):
assert set(ref.keys()) == set(expected.keys())
for k, v in ref.items():
if isinstance(v, psi4.core.Matrix):
assert compare_matrices(v, expected[k], decimal, label)
else:
assert compare_values(v, expected[k], decimal, label)
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variables(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
subject = obj.variables()
_compare_qcvars(_vars_stored, subject, 8, '')
obj.set_variable('npvar A', np.zeros(3).reshape(1, 3))
_compare_qcvars(_vars_stored, subject, 8, '')
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_set_variable_overwrite(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
# fine to overwrite keys
key = 'var D'
val = 3.3
val2 = 4.4
obj.set_variable(key, val)
assert compare_values(val, obj.variable(key), 8, tnm())
obj.set_variable(key, val2)
assert compare_values(val2, obj.variable(key), 8, tnm())
# fine to overwrite array keys
key = 'matvar D'
mat = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
mat2 = psi4.core.Matrix.from_array(np.arange(6).reshape(3, 2))
obj.set_variable(key, mat)
assert compare_matrices(mat, obj.variable(key), 8, tnm())
obj.set_variable(key, mat2)
assert compare_matrices(mat2, obj.variable(key), 8, tnm())
# not fine to shadow keys with both types
with pytest.raises(psi4.ValidationError) as err:
obj.set_variable('vAr D', mat)
assert 'already a scalar variable' in str(err.value)
with pytest.raises(psi4.ValidationError) as err:
obj.set_variable('matvAr D', val)
assert 'already an array variable' in str(err.value)
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_none(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
with pytest.raises(KeyError):
obj.variable('var f')
@pytest.mark.parametrize("mode,key", [
pytest.param('globals', 'vAR B', id='globals scal'),
pytest.param('globals', 'MatvAR B', id='globals mat'),
pytest.param('globals', 'NpvAR B', id='globals np'),
pytest.param('wfn', 'vAR B', id='wfn scal'),
pytest.param('wfn', 'MatvAR B', id='wfn mat'),
pytest.param('wfn', 'NpvAR B', id='wfn np'),
])
def test_variable(mode, key, pe_wfn_qcvars, request):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
if 'scal' in request.node.name:
compare = compare_values
else:
compare = compare_matrices
assert compare(_vars_stored[key.upper()], obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_scal(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'VaR C'
ref = 3.3
val = 3.3
obj.set_variable(key, val)
assert compare_values(ref, val, 8, tnm())
assert compare_values(ref, obj.variable(key), 8, tnm())
val *= 2
assert compare_values(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed *= 3
assert compare_values(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_mat(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'MaTvAr C'
ref = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
val = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
obj.set_variable(key, val)
assert compare_matrices(ref, val, 8, tnm())
assert compare_matrices(ref, obj.variable(key), 8, tnm())
val.scale(2)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed.scale(3)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_np(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'npVaR C'
ref = np.arange(4).reshape(2, 2)
val = np.arange(4).reshape(2, 2)
obj.set_variable(key, val)
assert compare_arrays(ref, val, 8, tnm())
ref = psi4.core.Matrix.from_array(ref)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
val *= 2
assert compare_matrices(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed.scale(3)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode,tkey,fkey", [
pytest.param('globals', 'var A', 'var C', id='globals scal'),
pytest.param('globals', 'matvar A', 'var C', id='globals mat'),
pytest.param('globals', 'npvar A', 'var C', id='globals np'),
pytest.param('wfn', 'var A', 'var C', id='wfn scal'),
pytest.param('wfn', 'matvar A', 'var C', id='wfn mat'),
pytest.param('wfn', 'npvar A', 'var C', id='wfn np'),
])
def test_has_del_variable_scal(mode, tkey, fkey, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
assert obj.has_variable(tkey)
assert not obj.has_variable(fkey)
obj.del_variable(tkey)
assert not obj.has_variable(tkey)
obj.del_variable(fkey)
# <<< TODO Deprecated! Delete in Psi4 v1.4 >>>
def test_deprecated_core_get_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_variable('vAR B')
assert compare_values(_vars_stored['VAR B'], subject, 8, tnm())
def test_deprecated_core_get_variables(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_variables()
scals = {k: v for k, v in _vars_stored.items() if k.startswith('VAR ')}
_compare_qcvars(scals, subject, 8, tnm())
def test_deprecated_core_get_array_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_array_variable('MatvAR B')
assert compare_matrices(_vars_stored['MATVAR B'], subject, 8, tnm())
def test_deprecated_core_get_array_variables(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_array_variables()
arrs = {k: v for k, v in _vars_stored.items() if not k.startswith('VAR ')}
_compare_qcvars(arrs, subject, 8, tnm())
def test_deprecated_wfn_get_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.get_variable('vAR B')
assert compare_values(_vars_stored['VAR B'], subject, 8, tnm())
def test_deprecated_wfn_get_array(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.get_array('MatvAR B')
assert compare_matrices(_vars_stored['MATVAR B'], subject, 8, tnm())
def test_deprecated_wfn_set_array(pe_wfn_qcvars):
mat = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
with pytest.warns(FutureWarning) as err:
pe_wfn_qcvars.set_array('matvar D', mat)
assert compare_matrices(mat, pe_wfn_qcvars.variable('MATvar D'), 8, tnm())
def test_deprecated_wfn_arrays(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.arrays()
arrs = {k: v for k, v in _vars_stored.items() if not k.startswith('VAR ')}
_compare_qcvars(arrs, subject, 8, tnm())
|
jgonthier/psi4
|
tests/pytests/test_qcvars.py
|
Python
|
lgpl-3.0
| 8,520
|
[
"Psi4"
] |
14841f4add67f0d1d2e96f790e1502c7403916d4ddff93f93e019abf7ce37e39
|
# Copyright 2017 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Sequence, Type
import numpy as np
import pytest
from numpy.testing import assert_allclose
import gpflow
from gpflow.base import TensorType
from gpflow.config import default_int
from gpflow.inducing_variables import InducingPoints
from gpflow.mean_functions import (
Additive,
Constant,
Linear,
MeanFunction,
Product,
SwitchedMeanFunction,
Zero,
)
rng = np.random.RandomState(99021)
class Datum:
input_dim, output_dim = 3, 2
N, Ntest, M = 20, 30, 10
_mean_functions = [
Zero(),
Linear(
A=rng.randn(Datum.input_dim, Datum.output_dim),
b=rng.randn(Datum.output_dim, 1).reshape(-1),
),
Constant(c=rng.randn(Datum.output_dim, 1).reshape(-1)),
]
@pytest.mark.parametrize("mean_function_1", _mean_functions)
@pytest.mark.parametrize("mean_function_2", _mean_functions)
@pytest.mark.parametrize("operation", ["+", "*"])
def test_mean_functions_output_shape(
mean_function_1: MeanFunction, mean_function_2: MeanFunction, operation: str
) -> None:
"""
Test the output shape for basic and compositional mean functions, also
check that the combination of mean functions returns the correct class
"""
X = np.random.randn(Datum.N, Datum.input_dim)
Y = mean_function_1(X)
# basic output shape check
assert Y.shape in [(Datum.N, Datum.output_dim), (Datum.N, 1)]
# composed mean function output shape check
if operation == "+":
mean_composed = mean_function_1 + mean_function_2
elif operation == "*":
mean_composed = mean_function_1 * mean_function_2
else:
raise (NotImplementedError)
Y_composed = mean_composed(X)
assert Y_composed.shape in [(Datum.N, Datum.output_dim), (Datum.N, 1)]
@pytest.mark.parametrize("mean_function_1", _mean_functions)
@pytest.mark.parametrize("mean_function_2", _mean_functions)
@pytest.mark.parametrize("operation", ["+", "*"])
def test_mean_functions_composite_type(
mean_function_1: MeanFunction, mean_function_2: MeanFunction, operation: str
) -> None:
if operation == "+":
mean_composed = mean_function_1 + mean_function_2
assert isinstance(mean_composed, Additive)
elif operation == "*":
mean_composed = mean_function_1 * mean_function_2
assert isinstance(mean_composed, Product)
else:
raise (NotImplementedError)
_linear_functions = [
Linear(
A=rng.randn(Datum.input_dim, Datum.output_dim),
b=rng.randn(Datum.output_dim, 1).reshape(-1),
)
for _ in range(3)
]
# Append inverse of first Linear mean function in _linear_functions
_linear_functions.append(Linear(A=-1.0 * _linear_functions[0].A, b=-1.0 * _linear_functions[0].b))
_constant_functions = [Constant(c=rng.randn(Datum.output_dim, 1).reshape(-1)) for _ in range(3)]
# Append inverse of first Constant mean function in _constant_functions
_constant_functions.append(Constant(c=-1.0 * _constant_functions[0].c))
def _create_GPR_model_with_bias(
X: TensorType, Y: TensorType, mean_function: MeanFunction
) -> gpflow.models.GPR:
return gpflow.models.GPR(
(X, Y), mean_function=mean_function, kernel=gpflow.kernels.Bias(Datum.input_dim)
)
@pytest.mark.parametrize("mean_functions", [_linear_functions, _constant_functions])
def test_mean_functions_distributive_property(mean_functions: Sequence[MeanFunction]) -> None:
"""
Tests that distributive property of addition and multiplication holds for mean functions
(both Constant and Linear): A * (B + C) = A * B + A * C
"""
X, Y = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, Datum.output_dim)
Xtest = rng.randn(30, Datum.input_dim)
A, B, C = mean_functions[0], mean_functions[1], mean_functions[2]
lhs = Product(A, Additive(B, C)) # A * (B + C)
rhs = Additive(Product(A, B), Product(A, C)) # A * B + A * C
model_lhs = _create_GPR_model_with_bias(X, Y, mean_function=lhs)
model_rhs = _create_GPR_model_with_bias(X, Y, mean_function=rhs)
mu_lhs, var_lhs = model_lhs.predict_f(Xtest)
mu_rhs, var_rhs = model_rhs.predict_f(Xtest)
assert_allclose(mu_lhs, mu_rhs)
assert_allclose(var_lhs, var_rhs)
@pytest.mark.parametrize("mean_functions", [_linear_functions, _constant_functions])
def test_mean_functions_A_minus_A_equals_zero(mean_functions: Sequence[MeanFunction]) -> None:
"""
Tests that the addition the inverse of a mean function to itself is equivalent to having a
Zero mean function: A + (-A) = 0
"""
X, Y = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, Datum.output_dim)
Xtest = rng.randn(30, Datum.input_dim)
A, A_inverse = mean_functions[0], mean_functions[-1]
lhs = Additive(A, A_inverse) # A + (-A)
rhs = Zero() # 0
model_lhs = _create_GPR_model_with_bias(X, Y, mean_function=lhs)
model_rhs = _create_GPR_model_with_bias(X, Y, mean_function=rhs)
mu_lhs, var_lhs = model_lhs.predict_f(Xtest)
mu_rhs, var_rhs = model_rhs.predict_f(Xtest)
assert_allclose(mu_lhs, mu_rhs)
assert_allclose(var_lhs, var_rhs)
@pytest.mark.parametrize("mean_functions", [_linear_functions])
def test_linear_mean_functions_associative_property(mean_functions: Sequence[MeanFunction]) -> None:
"""
Tests that associative property of addition holds for linear mean functions:
A + (B + (-A)) = B = (A + B) + (-A)
"""
X, Y = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, Datum.output_dim)
Xtest = rng.randn(30, Datum.input_dim)
A, B, A_inverse = mean_functions[0], mean_functions[1], mean_functions[-1]
lhs = Additive(A, Additive(B, A_inverse)) # A + (B + (-A))
rhs = Additive(Additive(A, B), A_inverse) # (A + B) + (-A)
model_lhs = _create_GPR_model_with_bias(X, Y, mean_function=lhs)
model_b = _create_GPR_model_with_bias(X, Y, mean_function=B)
model_rhs = _create_GPR_model_with_bias(X, Y, mean_function=rhs)
mu_lhs, var_lhs = model_lhs.predict_f(Xtest)
mu_b, var_b = model_b.predict_f(Xtest)
mu_rhs, var_rhs = model_rhs.predict_f(Xtest)
assert_allclose(mu_lhs, mu_b)
assert_allclose(var_lhs, var_b)
assert_allclose(mu_b, mu_rhs)
assert_allclose(var_b, var_rhs)
@pytest.mark.parametrize("N, D", [[10, 3]])
def test_switched_mean_function(N: int, D: int) -> None:
"""
Test for the SwitchedMeanFunction.
"""
X = np.hstack([rng.randn(N, D), 1.0 * rng.randint(0, 2, N).reshape(-1, 1)])
zeros, ones = Constant(np.zeros(1)), Constant(np.ones(1))
switched_mean = SwitchedMeanFunction([zeros, ones])
np_list = np.array([0.0, 1.0])
result_ref = (np_list[X[:, D].astype(default_int())]).reshape(-1, 1)
result = switched_mean(X)
assert_allclose(result, result_ref)
def test_bug_277_regression() -> None:
"""
See github issue #277. This is a regression test.
"""
model1, model2 = Linear(), Linear()
assert model1.b.numpy() == model2.b.numpy()
model2.b.assign([1.0])
assert not model1.b.numpy() == model2.b.numpy()
_model_classes = [
gpflow.models.GPR,
gpflow.models.SGPR,
gpflow.models.GPRFITC,
gpflow.models.SVGP,
gpflow.models.VGP,
gpflow.models.GPMC,
gpflow.models.SGPMC,
]
@pytest.mark.parametrize("model_class", _model_classes)
def test_models_with_mean_functions_changes(model_class: Type[Any]) -> None:
"""
Simply check that all models have a higher prediction with a constant mean
function than with a zero mean function.
For compositions of mean functions check that multiplication/ addition of
a constant results in a higher prediction, whereas addition of zero/
mutliplication with one does not.
"""
data = rng.randn(Datum.N, Datum.input_dim), rng.randn(Datum.N, 1)
Xnew = rng.randn(Datum.Ntest, Datum.input_dim)
inducing_variable = InducingPoints(Z=rng.randn(Datum.M, Datum.input_dim))
kernel = gpflow.kernels.Matern32()
likelihood = gpflow.likelihoods.Gaussian()
zero_mean = Zero()
non_zero_mean = Constant(c=np.ones(1) * 10)
if model_class in [gpflow.models.GPR]:
model_zero_mean = model_class(data, kernel=kernel, mean_function=zero_mean)
model_non_zero_mean = model_class(data, kernel=kernel, mean_function=non_zero_mean)
elif model_class in [gpflow.models.VGP]:
model_zero_mean = model_class(
data, likelihood=likelihood, kernel=kernel, mean_function=zero_mean
)
model_non_zero_mean = model_class(
data, likelihood=likelihood, kernel=kernel, mean_function=non_zero_mean
)
elif model_class in [gpflow.models.SVGP]:
model_zero_mean = model_class(
kernel=kernel,
likelihood=likelihood,
inducing_variable=inducing_variable,
mean_function=zero_mean,
)
model_non_zero_mean = model_class(
kernel=kernel,
likelihood=likelihood,
inducing_variable=inducing_variable,
mean_function=non_zero_mean,
)
elif model_class in [gpflow.models.SGPR, gpflow.models.GPRFITC]:
model_zero_mean = model_class(
data,
kernel=kernel,
inducing_variable=inducing_variable,
mean_function=zero_mean,
)
model_non_zero_mean = model_class(
data,
kernel=kernel,
inducing_variable=inducing_variable,
mean_function=non_zero_mean,
)
elif model_class in [gpflow.models.SGPMC]:
model_zero_mean = model_class(
data,
kernel=kernel,
likelihood=likelihood,
inducing_variable=inducing_variable,
mean_function=zero_mean,
)
model_non_zero_mean = model_class(
data,
kernel=kernel,
likelihood=likelihood,
inducing_variable=inducing_variable,
mean_function=non_zero_mean,
)
elif model_class in [gpflow.models.GPMC]:
model_zero_mean = model_class(
data, kernel=kernel, likelihood=likelihood, mean_function=zero_mean
)
model_non_zero_mean = model_class(
data, kernel=kernel, likelihood=likelihood, mean_function=non_zero_mean
)
else:
raise (NotImplementedError)
mu_zero, var_zero = model_zero_mean.predict_f(Xnew)
mu_non_zero, var_non_zero = model_non_zero_mean.predict_f(Xnew)
# predictive variance remains unchanged after modifying mean function
assert np.all(var_zero.numpy() == var_non_zero.numpy())
# predictive mean changes after modifying mean function
assert not np.all(mu_zero.numpy() == mu_non_zero.numpy())
|
GPflow/GPflow
|
tests/gpflow/test_mean_functions.py
|
Python
|
apache-2.0
| 11,259
|
[
"Gaussian"
] |
48e32b89525ec8402af1946758c6cc8efb45794cd71b8ebfc36dabe5ef30eb4f
|
import os
import numpy as np
from ase.calculators.vasp import Vasp
def write_kpoints(self, fname='KPOINTS'):
"""Writes the KPOINTS file.
The KPOINTS file format is as follows:
line 1: a comment
line 2: number of kpoints
n <= 0 Automatic kpoint generation
n > 0 explicit number of kpoints
line 3: kpt format
if n > 0:
C,c,K,k = cartesian coordinates
anything else = reciprocal coordinates
if n <= 0
M,m,G,g for Monkhorst-Pack or Gamma grid
anything else is a special case
line 4: if n <= 0, the Monkhorst-Pack grid
if n > 0, then a line per kpoint
line 5: if n <=0 it is the gamma shift
After the kpts may be tetrahedra, but we do now support that for now.
"""
p = self.input_params
shape = np.array(p['kpts']).shape
if len(shape) == 1:
NKPTS = 0 # automatic
else:
NKPTS = len(p['kpts'])
# figure out the mode
if NKPTS == 0 and p['gamma'] is False:
MODE = 'm' # automatic monkhorst-pack
elif NKPTS == 0 and p['gamma'] is not False:
MODE = 'g' # automatic gamma monkhorst pack
# we did not trigger automatic kpoints
elif p['kpts_nintersections'] is not None:
MODE = 'l'
elif p['reciprocal'] is True:
MODE = 'r'
else:
MODE = 'c'
kpoints = open(fname, 'w')
# line 1 - comment
kpoints.write('KPOINTS created by Atomic Simulation Environment\n')
# line 2 - number of kpts
if MODE in ['c', 'k', 'm', 'g', 'r']:
kpoints.write('%i\n' % NKPTS)
elif MODE in ['l']: # line mode, default intersections is 10
kpoints.write('%i\n' % p['kpts_nintersections'])
# line 3
if MODE in ['m', 'g']:
if MODE == 'm':
kpoints.write('Monkhorst-Pack\n') # line 3
elif MODE == 'g':
kpoints.write('Gamma\n')
elif MODE in ['c', 'k']:
kpoints.write('Cartesian\n')
elif MODE in ['l']:
kpoints.write('Line-mode\n')
else:
kpoints.write('Reciprocal\n')
# line 4
if MODE in ['m', 'g']:
kpoints.write('{0} {1} {2}\n'.format(*p.get('kpts',
(1, 1, 1))))
elif MODE in ['c', 'k', 'r']:
for n in range(NKPTS):
# I assume you know to provide the weights
kpoints.write('{0} {1} {2} {3}\n'.format(*p['kpts'][n]))
elif MODE in ['l']:
if p['reciprocal'] is False:
kpoints.write('Cartesian\n')
else:
kpoints.write('Reciprocal\n')
for n in range(NKPTS):
kpoints.write('{0} {1} {2}\n'.format(*p['kpts'][n]))
# line 5 - only if we are in automatic mode
if MODE in ['m', 'g']:
if p['gamma'] is None or p['gamma'] is True or p['gamma'] is False:
kpoints.write('0.0 0.0 0.0\n')
elif len(p['gamma']) == 3:
kpoints.write('{0} {1} {2}\n'.format(*p['gamma']))
kpoints.close()
Vasp.write_kpoints = write_kpoints
def read_kpoints(self, filename='KPOINTS'):
'''monkey patch to read all kpoint files'''
file = open(filename, 'r')
lines = file.readlines()
file.close()
# first line is a comment
# second line is the number of kpoints or 0 for automatic kpoints
nkpts = int(lines[1].strip())
# third line you have to specify whether the coordinates are given
# in cartesian or reciprocal coordinates if nkpts is greater than
# zero. Only the first character of the third line is
# significant. The only key characters recognized by VASP are 'C',
# 'c', 'K' or 'k' for switching to cartesian coordinates, any
# other character will switch to reciprocal coordinates.
#
# if nkpts = 0 then the third line will start with m or g for
# Monkhorst-Pack and Gamma. if it does not start with m or g, an
# alternative mode is entered that we do not support yet.
ktype = lines[2].split()[0].lower()[0]
if nkpts <= 0:
# automatic mode
if ktype not in ['g', 'm']:
raise NotImplementedError('Only Monkhorst-Pack and '
'gamma centered grid supported '
'for restart.')
if ktype == 'g':
line5 = np.array([float(lines[4].split()[i]) for i in range(3)])
if (line5 == np.array([0.0, 0.0, 0.0])).all():
self.set(gamma=True)
else:
self.set(gamma=line5)
kpts = [int(lines[3].split()[i]) for i in range(3)]
elif nkpts > 0:
# list of kpts provided. Technically c,k are supported and
# anything else means reciprocal coordinates.
if ktype in ['c', 'k', 'r']:
kpts = []
for i in range(3, 3 + nkpts):
# the kpts also have a weight attached to them
kpts.append([float(lines[i].split()[j])
for j in range(4)])
# you may also be in line-mode
elif ktype in ['l']:
if lines[3][0].lower() == 'r':
self.set(reciprocal=True)
self.set(kpts_nintersections=nkpts)
kpts = []
for i in range(4, len(lines)):
if lines[i] == '':
continue
else:
kpts.append([float(lines[i].split()[j])
for j in range(3)])
else:
raise NotImplementedError('ktype = %s' % lines[2])
if ktype == 'r':
self.set(reciprocal=True)
self.set(kpts=kpts)
Vasp.read_kpoints = read_kpoints
def get_kpts_from_kppra(atoms, kppra, even=False, slab=False, gamma=False):
'''
Returns a kpt grid that most uniformly samples each unit cell
vector direction, and provides at least the desired kpoints per
reciprocal atoms.
even: constrains the grid to be even
slab: if True, sets grid to (n1, n2, 1)
gamma: wheter to offset
'''
nreciprocal_atoms = 1./len(atoms)
NKPTS = kppra*nreciprocal_atoms
# lengths of unit cell vectors
u1, u2, u3 = np.sqrt(np.sum(atoms.get_cell()**2, 1))
'''
The algorithm is:
k1 * k2 * k3 = NKPTS
we want the following to be as close to true as possible:
u1*k1 = u2*k2 = u3*k3 = constant
where u1, u2, u3 are the lengths of the unit cell vectors, and k1,
k2, k3 are the number of kpoints in each direction. This means if
u2 is twice as long as u1, it should have half as many kpoints.
That means:
(u1*u2*u3)*(k1*k2*k3) = constant**3
or constant = ((u1*u2*u3)*NKPTS)**(1./3.)
We will start the algorighm below this value, say at 90% of the
constant because there will be rounding, and we will always round up.
now, k1 = int(np.ceil(constant/u1))
all we have to do is iteratively increase the constant until
k1 * k2 * k3 >= NKPTS
'''
constant = 0.9 * (NKPTS * (u1 * u2 * u3))**(1./3.)
while True:
k1, k2, k3 = np.array([int(np.ceil(constant/u))
for u in [u1, u2, u3]])
if even:
k1 -= k1 % 2
k2 -= k2 % 2
k3 -= k3 % 2
if slab:
k3 = 1
if k1*k2*k3 >= NKPTS:
break
else:
constant *= 1.01 # gradually increase the constant until
# the numkpts/atm is satisfied
return (k1, k2, k3)
if __name__ == '__main__':
from ase.visualize import view
from jasp import *
kppra = 1000
with jasp('tests/ref/c0') as calc:
atoms = calc.get_atoms()
grid = set_kppra(calc, 1000)
print grid
print 'nkpts: ', np.multiply.reduce(grid)
print 'you asked for: ', kppra / len(atoms)
with jasp('../../dft-org/surfaces/Pt-slab-1x1') as calc:
grid = set_kppra(calc, 1000, slab=True, even=True)
print view(calc.get_atoms())
print grid
print 'nkpts: ', np.multiply.reduce(grid)
print 'you asked for: ', kppra/len(atoms)
|
prtkm/jasp
|
jasp/jasp_kpts.py
|
Python
|
gpl-2.0
| 8,111
|
[
"ASE",
"VASP"
] |
e25a4f46cd09bd89e4e6f0e823b01357f4b19912a4edd52a0f3aa423dbf69482
|
##########
# LD 22
# The theme is alone
# it's a dumb theme
# fiona wrote this
##########
# System and Python lib imports
import sys
sys.path += ['.']
# Game engine imports
from myrmidon.myrmidon import MyrmidonGame, MyrmidonProcess
from myrmidon.consts import *
from pygame.locals import *
# Game imports
from consts import *
from media import Media
from gui import GUI
from galaxy import Galaxy
from game_galaxy import Galaxy_background, Solar_system_star, Player_ship, Galaxy_player_ship
class Game(MyrmidonProcess):
# Current state
game_state = 0
# Player state
money = 2000000000
fuel = 0
crew = 0
current_system = "Sol"
current_object = "Earth"
fuel_cost = 1000000000
crew_cost = 500000000
actions_done = {}
home_planet_result = []
first_time = True
# Self explanitory object pointers and lists
fps_text = None
gui = None
media = None
solar_system_objects = []
player_ship = None
background = None
galaxy = None
def execute(self):
# Pre launch set-up
MyrmidonGame.current_fps = 60
self.priority = PRIORITY_MAIN_GAME
# Load all media
self.media = Media()
self.media.load_fonts()
self.media.load_graphics()
self.media.load_audio()
# Debug display
if DEBUG_SHOW_FPS:
self.fps_text = MyrmidonGame.write_text(0.0, 0.0, font = self.media.fonts['basic'], text = 0)
self.fps_text.colour = (1, 1, 1, 1)
self.fps_text.z = -2000
# Set up starting game objects
self.galaxy = Galaxy(self)
self.gui = GUI(self)
self.switch_game_state_to(GAME_STATE_SOLAR_SYSTEM)
self.media.audio['ambient'].play(loops = -1)
while True:
# update debug display
if DEBUG_SHOW_FPS:
self.fps_text.text = "fps: " + str(MyrmidonGame.fps)
yield
def quit_game(self):
sys.exit()
def switch_game_state_to(self, state, gui_state = None):
"""
Pass in a state and this will switch to it.
It will also clean up everying necessary to go out of the
previous game state.
"""
# Undo and destroy everything in the current state
self.gui.destroy_current_gui_state()
col = (1.0, 1.0, 1.0)
if self.game_state == GAME_STATE_SOLAR_SYSTEM:
for x in self.solar_system_objects:
x.signal(S_KILL)
self.solar_system_objects = []
self.player_ship.signal(S_KILL)
self.background.signal(S_KILL)
elif self.game_state == GAME_STATE_GALAXY:
self.player_ship.signal(S_KILL)
self.background.signal(S_KILL)
# Switch to new state
self.game_state = state
# Create everything we require
if state == GAME_STATE_GALAXY:
self.background = Galaxy_background(self)
self.gui.fade_toggle()
self.gui.switch_gui_state_to(GUI_STATE_GALAXY if gui_state is None else gui_state)
self.player_ship = Galaxy_player_ship(self)
elif state == GAME_STATE_SOLAR_SYSTEM:
self.background = Galaxy_background(self)
self.solar_system_objects = []
self.solar_system_objects.append(Solar_system_star(self, self.galaxy.solar_systems[self.current_system]))
self.gui.fade_toggle()
self.gui.switch_gui_state_to(GUI_STATE_SOLAR_SYSTEM if gui_state is None else gui_state)
self.player_ship = Player_ship(self)
def do_home_planet_results(self):
if len(self.home_planet_result) > 0:
result = self.home_planet_result.pop()
result[0](self, *result[1])
if __name__ == '__main__':
MyrmidonGame.screen_resolution = (1024, 768)
MyrmidonGame.lowest_resolution = (1024, 768)
MyrmidonGame.full_screen = False
Game()
|
Fiona/AreWeAlone
|
__main__.py
|
Python
|
mit
| 4,157
|
[
"Galaxy"
] |
2d7a5699c438dc910c74ee53f4c31cf0ba42c92196a8ecaffee813b1a3209bdc
|
#!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_pool
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of Pool Avi RESTful Object
description:
- This module is used to configure Pool object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
a_pool:
description:
- Name of container cloud application that constitutes a pool in a a-b pool configuration, if different from vs app.
- Field deprecated in 18.1.2.
ab_pool:
description:
- A/b pool configuration.
- Field deprecated in 18.1.2.
ab_priority:
description:
- Priority of this pool in a a-b pool pair.
- Internally used.
- Field deprecated in 18.1.2.
analytics_policy:
description:
- Determines analytics settings for the pool.
- Field introduced in 18.1.5, 18.2.1.
version_added: "2.9"
analytics_profile_ref:
description:
- Specifies settings related to analytics.
- It is a reference to an object of type analyticsprofile.
- Field introduced in 18.1.4,18.2.1.
version_added: "2.9"
apic_epg_name:
description:
- Synchronize cisco apic epg members with pool servers.
application_persistence_profile_ref:
description:
- Persistence will ensure the same user sticks to the same server for a desired duration of time.
- It is a reference to an object of type applicationpersistenceprofile.
autoscale_launch_config_ref:
description:
- If configured then avi will trigger orchestration of pool server creation and deletion.
- It is only supported for container clouds like mesos, opensift, kubernates, docker etc.
- It is a reference to an object of type autoscalelaunchconfig.
autoscale_networks:
description:
- Network ids for the launch configuration.
autoscale_policy_ref:
description:
- Reference to server autoscale policy.
- It is a reference to an object of type serverautoscalepolicy.
capacity_estimation:
description:
- Inline estimation of capacity of servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
capacity_estimation_ttfb_thresh:
description:
- The maximum time-to-first-byte of a server.
- Allowed values are 1-5000.
- Special values are 0 - 'automatic'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
conn_pool_properties:
description:
- Connnection pool properties.
- Field introduced in 18.2.1.
version_added: "2.9"
connection_ramp_duration:
description:
- Duration for which new connections will be gradually ramped up to a server recently brought online.
- Useful for lb algorithms that are least connection based.
- Allowed values are 1-300.
- Special values are 0 - 'immediate'.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
created_by:
description:
- Creator name.
default_server_port:
description:
- Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute.
- The ssl checkbox enables avi to server encryption.
- Allowed values are 1-65535.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
delete_server_on_dns_refresh:
description:
- Indicates whether existing ips are disabled(false) or deleted(true) on dns hostname refreshdetail -- on a dns refresh, some ips set on pool may
- no longer be returned by the resolver.
- These ips are deleted from the pool when this knob is set to true.
- They are disabled, if the knob is set to false.
- Field introduced in 18.2.3.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
version_added: "2.9"
type: bool
description:
description:
- A description of the pool.
domain_name:
description:
- Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates.
- It is performed only when common name check host_check_enabled is enabled.
east_west:
description:
- Inherited config from virtualservice.
type: bool
enabled:
description:
- Enable or disable the pool.
- Disabling will terminate all open connections and pause health monitors.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
external_autoscale_groups:
description:
- Names of external auto-scale groups for pool servers.
- Currently available only for aws and azure.
- Field introduced in 17.1.2.
fail_action:
description:
- Enable an action - close connection, http redirect or local http response - when a pool failure happens.
- By default, a connection will be closed, in case the pool experiences a failure.
fewest_tasks_feedback_delay:
description:
- Periodicity of feedback for fewest tasks server selection algorithm.
- Allowed values are 1-300.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
graceful_disable_timeout:
description:
- Used to gracefully disable a server.
- Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled.
- Allowed values are 1-7200.
- Special values are 0 - 'immediate', -1 - 'infinite'.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
gslb_sp_enabled:
description:
- Indicates if the pool is a site-persistence pool.
- Field introduced in 17.2.1.
version_added: "2.5"
type: bool
health_monitor_refs:
description:
- Verify server health by applying one or more health monitors.
- Active monitors generate synthetic traffic from each service engine and mark a server up or down based on the response.
- The passive monitor listens only to client to server communication.
- It raises or lowers the ratio of traffic destined to a server based on successful responses.
- It is a reference to an object of type healthmonitor.
host_check_enabled:
description:
- Enable common name check for server certificate.
- If enabled and no explicit domain name is specified, avi will use the incoming host header to do the match.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
inline_health_monitor:
description:
- The passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses.
- This may alter the expected behavior of the lb method, such as round robin.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
ipaddrgroup_ref:
description:
- Use list of servers from ip address group.
- It is a reference to an object of type ipaddrgroup.
lb_algorithm:
description:
- The load balancing algorithm will pick a server within the pool's list of available servers.
- Enum options - LB_ALGORITHM_LEAST_CONNECTIONS, LB_ALGORITHM_ROUND_ROBIN, LB_ALGORITHM_FASTEST_RESPONSE, LB_ALGORITHM_CONSISTENT_HASH,
- LB_ALGORITHM_LEAST_LOAD, LB_ALGORITHM_FEWEST_SERVERS, LB_ALGORITHM_RANDOM, LB_ALGORITHM_FEWEST_TASKS, LB_ALGORITHM_NEAREST_SERVER,
- LB_ALGORITHM_CORE_AFFINITY, LB_ALGORITHM_TOPOLOGY.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_LEAST_CONNECTIONS.
lb_algorithm_consistent_hash_hdr:
description:
- Http header name to be used for the hash key.
lb_algorithm_core_nonaffinity:
description:
- Degree of non-affinity for core afffinity based server selection.
- Allowed values are 1-65535.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
version_added: "2.4"
lb_algorithm_hash:
description:
- Criteria used as a key for determining the hash between the client and server.
- Enum options - LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS, LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT,
- LB_ALGORITHM_CONSISTENT_HASH_URI, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_HEADER, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_STRING,
- LB_ALGORITHM_CONSISTENT_HASH_CALLID.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS.
lookup_server_by_name:
description:
- Allow server lookup by name.
- Field introduced in 17.1.11,17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
max_concurrent_connections_per_server:
description:
- The maximum number of concurrent connections allowed to each server within the pool.
- Note applied value will be no less than the number of service engines that the pool is placed on.
- If set to 0, no limit is applied.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
max_conn_rate_per_server:
description:
- Rate limit connections to each server.
min_health_monitors_up:
description:
- Minimum number of health monitors in up state to mark server up.
- Field introduced in 18.2.1, 17.2.12.
version_added: "2.9"
min_servers_up:
description:
- Minimum number of servers in up state for marking the pool up.
- Field introduced in 18.2.1, 17.2.12.
version_added: "2.9"
name:
description:
- The name of the pool.
required: true
networks:
description:
- (internal-use) networks designated as containing servers for this pool.
- The servers may be further narrowed down by a filter.
- This field is used internally by avi, not editable by the user.
nsx_securitygroup:
description:
- A list of nsx service groups where the servers for the pool are created.
- Field introduced in 17.1.1.
pki_profile_ref:
description:
- Avi will validate the ssl certificate present by a server against the selected pki profile.
- It is a reference to an object of type pkiprofile.
placement_networks:
description:
- Manually select the networks and subnets used to provide reachability to the pool's servers.
- Specify the subnet using the following syntax 10-1-1-0/24.
- Use static routes in vrf configuration when pool servers are not directly connected butroutable from the service engine.
prst_hdr_name:
description:
- Header name for custom header persistence.
- Field deprecated in 18.1.2.
request_queue_depth:
description:
- Minimum number of requests to be queued when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as 128.
request_queue_enabled:
description:
- Enable request queue when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
rewrite_host_header_to_server_name:
description:
- Rewrite incoming host header to server name of the server to which the request is proxied.
- Enabling this feature rewrites host header for requests to all servers in the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
rewrite_host_header_to_sni:
description:
- If sni server name is specified, rewrite incoming host header to the sni server name.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
server_auto_scale:
description:
- Server autoscale.
- Not used anymore.
- Field deprecated in 18.1.2.
type: bool
server_count:
description:
- Field deprecated in 18.2.1.
server_name:
description:
- Fully qualified dns hostname which will be used in the tls sni extension in server connections if sni is enabled.
- If no value is specified, avi will use the incoming host header instead.
server_reselect:
description:
- Server reselect configuration for http requests.
server_timeout:
description:
- Server timeout value specifies the time within which a server connection needs to be established and a request-response exchange completes
- between avi and the server.
- Value of 0 results in using default timeout of 60 minutes.
- Allowed values are 0-3600000.
- Field introduced in 18.1.5,18.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
version_added: "2.9"
servers:
description:
- The pool directs load balanced traffic to this list of destination servers.
- The servers can be configured by ip address, name, network or via ip address group.
service_metadata:
description:
- Metadata pertaining to the service provided by this pool.
- In openshift/kubernetes environments, app metadata info is stored.
- Any user input to this field will be overwritten by avi vantage.
- Field introduced in 17.2.14,18.1.5,18.2.1.
version_added: "2.9"
sni_enabled:
description:
- Enable tls sni for server connections.
- If disabled, avi will not send the sni extension as part of the handshake.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
type: bool
ssl_key_and_certificate_ref:
description:
- Service engines will present a client ssl certificate to the server.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- When enabled, avi re-encrypts traffic to the backend servers.
- The specific ssl profile defines which ciphers and ssl versions will be supported.
- It is a reference to an object of type sslprofile.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
use_service_port:
description:
- Do not translate the client's destination port when sending the connection to the server.
- The pool or servers specified service port will still be used for health monitoring.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
uuid:
description:
- Uuid of the pool.
vrf_ref:
description:
- Virtual routing context that the pool is bound to.
- This is used to provide the isolation of the set of networks the pool is attached to.
- The pool inherits the virtual routing conext of the virtual service, and this field is used only internally, and is set by pb-transform.
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Pool with two servers and HTTP monitor
avi_pool:
controller: 10.10.1.20
username: avi_user
password: avi_password
name: testpool1
description: testpool1
state: present
health_monitor_refs:
- '/api/healthmonitor?name=System-HTTP'
servers:
- ip:
addr: 10.10.2.20
type: V4
- ip:
addr: 10.10.2.21
type: V4
- name: Patch pool with a single server using patch op and avi_credentials
avi_pool:
avi_api_update_method: patch
avi_api_patch_op: delete
avi_credentials: "{{avi_credentials}}"
name: test-pool
servers:
- ip:
addr: 10.90.64.13
type: 'V4'
register: pool
when:
- state | default("present") == "present"
"""
RETURN = '''
obj:
description: Pool (api/pool) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
a_pool=dict(type='str',),
ab_pool=dict(type='dict',),
ab_priority=dict(type='int',),
analytics_policy=dict(type='dict',),
analytics_profile_ref=dict(type='str',),
apic_epg_name=dict(type='str',),
application_persistence_profile_ref=dict(type='str',),
autoscale_launch_config_ref=dict(type='str',),
autoscale_networks=dict(type='list',),
autoscale_policy_ref=dict(type='str',),
capacity_estimation=dict(type='bool',),
capacity_estimation_ttfb_thresh=dict(type='int',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
conn_pool_properties=dict(type='dict',),
connection_ramp_duration=dict(type='int',),
created_by=dict(type='str',),
default_server_port=dict(type='int',),
delete_server_on_dns_refresh=dict(type='bool',),
description=dict(type='str',),
domain_name=dict(type='list',),
east_west=dict(type='bool',),
enabled=dict(type='bool',),
external_autoscale_groups=dict(type='list',),
fail_action=dict(type='dict',),
fewest_tasks_feedback_delay=dict(type='int',),
graceful_disable_timeout=dict(type='int',),
gslb_sp_enabled=dict(type='bool',),
health_monitor_refs=dict(type='list',),
host_check_enabled=dict(type='bool',),
inline_health_monitor=dict(type='bool',),
ipaddrgroup_ref=dict(type='str',),
lb_algorithm=dict(type='str',),
lb_algorithm_consistent_hash_hdr=dict(type='str',),
lb_algorithm_core_nonaffinity=dict(type='int',),
lb_algorithm_hash=dict(type='str',),
lookup_server_by_name=dict(type='bool',),
max_concurrent_connections_per_server=dict(type='int',),
max_conn_rate_per_server=dict(type='dict',),
min_health_monitors_up=dict(type='int',),
min_servers_up=dict(type='int',),
name=dict(type='str', required=True),
networks=dict(type='list',),
nsx_securitygroup=dict(type='list',),
pki_profile_ref=dict(type='str',),
placement_networks=dict(type='list',),
prst_hdr_name=dict(type='str',),
request_queue_depth=dict(type='int',),
request_queue_enabled=dict(type='bool',),
rewrite_host_header_to_server_name=dict(type='bool',),
rewrite_host_header_to_sni=dict(type='bool',),
server_auto_scale=dict(type='bool',),
server_count=dict(type='int',),
server_name=dict(type='str',),
server_reselect=dict(type='dict',),
server_timeout=dict(type='int',),
servers=dict(type='list',),
service_metadata=dict(type='str',),
sni_enabled=dict(type='bool',),
ssl_key_and_certificate_ref=dict(type='str',),
ssl_profile_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_service_port=dict(type='bool',),
uuid=dict(type='str',),
vrf_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pool',
set([]))
if __name__ == '__main__':
main()
|
amenonsen/ansible
|
lib/ansible/modules/network/avi/avi_pool.py
|
Python
|
gpl-3.0
| 23,241
|
[
"VisIt"
] |
20b39938be275ab0437d780fe2189c17b34f6995465f9c5e66bfd8aa4f9cd6ca
|
# hu.po
val = {" days." : " napok.",
"(all)" : "(összes)",
"(any)" : "(bármely)",
"(anyone)" : "(bárki)",
"(available)" : "(elérhető)",
"(blank)" : "(üres)",
"(both)" : "(mindkettő)",
"(everyone)" : "(mindenki)",
"(master user, not editable)" : "(kiemelt felhasználó, nem szerkeszthető)",
"(no change)" : "(nincs változás)",
"(no deduction)" : "(nincs csökkenés)",
"(none)" : "(nincs)",
"(unknown)" : "(ismeretlen)",
"(use system)" : "(rendszer használata)",
"({0} given, {1} remaining)" : "({0} adott, {1} megmaradt)",
"1 treatment" : "1 kezelés",
"1 week" : "1 hét",
"1 year" : "1 év",
"2 weeks" : "2 hét",
"3 months" : "3 hónap",
"4 weeks" : "4 hét",
"5 Year" : "5 év",
"6 months" : "6 hónap",
"6 weeks" : "6 hét",
"8 weeks" : "8 hét",
"9 months" : "9 hónap",
"A (Stray Dog)" : "A (kóbor kutya)",
"A description or other information about the animal" : "Az állat leírása vagy egyéb információk",
"A list of areas this person will homecheck - eg: S60 S61" : "Ezen személy által látogatható körzetek listája - pl. S60 S61",
"A movement must have a reservation date or type." : "A mozgatáshoz szükséges a foglalás dátuma vagy típusa.",
"A person is required for this movement type." : "Egy személy szükséges az ilyen típusú átsoroláshoz.",
"A publish job is already running." : "Már folyamatban van a közzététel.",
"A short version of the reference number" : "A referencia szám rövidítése",
"A task is already running." : "Egy feladat már folyamatban van.",
"A unique number to identify this movement" : "Egyedi sorszám a tevékenység azonosítására",
"A unique reference for this litter" : "Alom egyedi azonosítója",
"A4" : "A4-es",
"ACO" : "ACO",
"AM" : "AM",
"ASM" : "ASM",
"ASM 3 is compatible with your iPad and other tablets." : "Az ASM 3 kompatibilis az Ön iPad-jével és egyéb tablet-tel.",
"ASM News" : "ASM hírek",
"ASM can track detailed monthly and annual figures for your shelter. Install the Monthly Figures and Annual Figures reports from Settings-Reports-Browse sheltermanager.com" : "Az ASM segítségével rögzítheti az Ön Menhelyének havi és éves adatait. Telepítse a Havi adatok (Monthly Figures) és Éves adatok (Annual Figures) beszámolókat a Settings-Reports-Browse sheltermanager.com-ról",
"ASM comes with a dictionary of 4,000 animal names. Just click the generate random name button when adding an animal." : "Az ASM szótára 4.000 db állatnevet tartalmaz. Egyszerűen kattintson a véletlenszerű névválasztás mezőre, amikor új állatot rögzít.",
"ASM will remove this animal from the waiting list after a set number of weeks since the last owner contact date." : "ASM eltávolítja ezt az állatot a várólistáról, a tulajdonossal való utolsó kapcsolatfelvételtől számított megadott számú hét után.",
"Abandoned" : "Gyepmesteri telepről",
"Abuse" : "Bántalmazott",
"Abyssinian" : "Abesszín",
"Access System Menu" : "System menü hozzáférés",
"Account" : "Fiók",
"Account Types" : "Fiók fajták",
"Account code '{0}' has already been used." : "A '{0}' fiók kód már foglalt.",
"Account code '{0}' is not valid." : "A '{0}' fiók kód érvénytelen.",
"Account code cannot be blank." : "Fiók kód nem lehet üres.",
"Account disabled." : "Fiók letiltva.",
"Accountant" : "Könyvelő",
"Accounts" : "Fiókok",
"Accounts need a code." : "A fiókokhoz kód szükséges.",
"Active" : "Aktív",
"Active Incidents" : "Aktív esetek",
"Active Trap Loans" : "Jelenlegi csapda kölcsönzések",
"Active users: {0}" : "Aktív felhasználók",
"Add" : "Hozzáad",
"Add Accounts" : "Fiók hozzáadása",
"Add Animal" : "Állat hozzáadása",
"Add Animals" : "Állatok hozzáadása",
"Add Appointment" : "Esemény hozzáadása",
"Add Call" : "Hívás hozzáadása",
"Add Citations" : "Hivatkozás hozzáadása",
"Add Clinic Appointment" : "Orvosi vizsgálat hozzáadása",
"Add Cost" : "Költség hozzáadása",
"Add Diary" : "Napló hozzáadása",
"Add Diets" : "Diéta hozzáadása",
"Add Document to Repository" : "Iratok hozzáadása a gyüjteményhez",
"Add Flag" : "Jelölés hozzáadása",
"Add Found Animal" : "Talált állat hozzáadása",
"Add Incidents" : "Eset hozzáadása",
"Add Investigation" : "Vizsgálat hozzáadása",
"Add Invoice Item" : "Számla elem hozzáadása",
"Add Licenses" : "Engedély hozzáadása",
"Add Litter" : "Alom hozzáadása",
"Add Log" : "Napló hozzáadása",
"Add Log to Animal" : "Napló állathoz rendelése",
"Add Lost Animal" : "Elveszett állat hozzáadása",
"Add Media" : "Video hozzáadása",
"Add Medical Records" : "Orvosi dokumentáció hozzáadása",
"Add Message" : "Üzenet hozzáadása",
"Add Movement" : "Áthelyezés hozzáadása",
"Add Payments" : "Kifizetés hozzáadása",
"Add Person" : "Személy hozzáadása",
"Add Report" : "Jelentés hozzáadása",
"Add Rota" : "Sorrend megadása",
"Add Stock" : "Készlet hozzáadása",
"Add Tests" : "Tesztek hozzáadása",
"Add Transport" : "Transzport hozzáadása",
"Add Trap Loans" : "Csapda kölcsönzés hozzáadása",
"Add Users" : "Felhasználók hozzáadása",
"Add Vaccinations" : "Oltások hozzáadása",
"Add Vouchers" : "Igazolás hozzáadása",
"Add Waiting List" : "Várólista hozzáadása",
"Add a diary note" : "Naplóbejegyzés hozzáadása",
"Add a found animal" : "Talált állat hozzáadása",
"Add a log entry" : "Naplóbejegyzés hozzáadása",
"Add a lost animal" : "Elveszett állat hozzáadása",
"Add a medical regimen" : "Egészségügyi étrend hozzáadása",
"Add a new animal" : "Új állat hozzáadása",
"Add a new log" : "Új naplóbejegyzés hozzáadása",
"Add a new person" : "Új személy hozzáadása",
"Add a person" : "Személy hozzáadása",
"Add a photo" : "Fénykép hozzárendelése",
"Add a test" : "Teszt hozzáadása",
"Add a vaccination" : "Oltóanyag hozzáadása",
"Add account" : "Fiók hozzáadása",
"Add additional field" : "További mező hozzáadása",
"Add an animal to the waiting list" : "Állat várólistára vétele",
"Add citation" : "Hivatkozás hozzáadása",
"Add cost" : "Költség hozzáadása",
"Add details of this email to the log after sending" : "Jelen email elküldése után, vonatkozó részletek hozzáadása a naplóhoz",
"Add diary" : "Napló hozzáadása",
"Add diary task" : "Napló feladat hozzáadása",
"Add diet" : "Diéta hozzáadása",
"Add extra images for use in reports and documents" : "Extra képek hozzárendelése jelentésekhez és dokumentumokhoz",
"Add form field" : "Nyomtatványmező hozzáadása",
"Add found animal" : "Talált állat hozzáadása",
"Add investigation" : "Vizsgálat hozzáadása",
"Add license" : "Engedély hozzárendelése",
"Add litter" : "Alom hozzáadása",
"Add log" : "Napló hozzáadása",
"Add lost animal" : "Elveszett állat hozzáadása",
"Add medical profile" : "Egészségügyi profil hozzáadása",
"Add medical regimen" : "Egészségügyi étrend hozzáadása",
"Add message" : "Üzenet hozzáadása",
"Add movement" : "Tétel hozzáadása",
"Add online form" : "Online nyomtatvány hozzárendelése",
"Add payment" : "Kifizetés hozzáadása",
"Add person" : "Személy hozzáadása",
"Add report" : "Beszámoló hozzáadása",
"Add role" : "Feladat hozzáadása",
"Add rota item" : "Lista elem hozzáadása",
"Add stock" : "Készlet hozzáadása",
"Add template" : "Sablon hozzáadása",
"Add test" : "Teszt hozzáadása",
"Add this text to all animal descriptions" : "Jelen szöveg hozzárendelése minden állat leírásához",
"Add to log" : "Hozzáadás naplóbejegyzéshez",
"Add transport" : "Transzport hozzáadása",
"Add trap loan" : "Csapda kölcsönzés hozzáadása",
"Add user" : "Felhasználó hozzáadása",
"Add vaccination" : "Oltás hozzáadása",
"Add voucher" : "Igazolás hozzáadása",
"Add waiting list" : "Várólista hozzáadása",
"Add {0}" : "{0} hozzáadása",
"Added" : "Hozzáadva",
"Added by {0} on {1}" : "Hozzáadva {0} által {1}-kor",
"Additional" : "További",
"Additional Fields" : "További mezők",
"Additional date field '{0}' contains an invalid date." : "További dátum mező '{0} érvénytelen dátumot tartalmaz.",
"Additional fields" : "További mezők",
"Additional fields need a name, label and type." : "Név, címke és típus szükséges a további mezőkhöz.",
"Address" : "Cím",
"Address Contains" : "Cím tartalmazza",
"Address contains" : "Cím tartalmazza",
"Administered" : "Beadva",
"Administering Vet" : "Beadó állatorvos",
"Adopt" : "Örökbefogadás",
"Adopt an animal" : "Állat örökbefogadása",
"Adoptable" : "Örökbefogadható",
"Adoptable Animal" : "Örökbefogadható állat",
"Adoptable and published for the first time" : "Örökbefogadható és első alkalommal közzétéve",
"Adopted" : "Örökbefogadott",
"Adopted Animals" : "Örökbefogadott állatok",
"Adopted Transferred In {0}" : "Örökbefogadott áthelyezve ide: {0}",
"Adoption" : "Örökbefogadás",
"Adoption Coordinator" : "Örökbefogadás koordinátora",
"Adoption Coordinator and Fosterer" : "Örökbeadási koordinátor és ideiglenes befogadó",
"Adoption Event" : "Örökbefogadási esemény",
"Adoption Fee" : "Örökbe fogadási díj",
"Adoption Number" : "Örökbefogadási szám",
"Adoption fee donations" : "Örökbefogadási díj adományok",
"Adoption movements must have a valid adoption date." : "Örökbefogadási tételekhez érvényes örökbe fogadási dátum szükséges.",
"Adoption successfully created." : "Az örökbefogadás sikeresen megtörtént.",
"Adoptions {0}" : "Örökbefogadások {0}",
"Adult" : "Felnőtt",
"Advanced" : "Speciális keresés",
"Advanced find animal screen defaults to on shelter" : "Részletes állat keresés képernyőjének beállításai ",
"Affenpinscher" : "Majompincsi",
"Afghan Hound" : "Afgán agár",
"African Grey" : "Afrikai agár",
"After the user presses submit and ASM has accepted the form, redirect the user to this URL" : "Miután a felhasználó az engedélyezésre kattint és ASM elfogadta a nyomtatványt, irányítsa át erre az URL-re",
"Age" : "Életkor",
"Age Group" : "Korcsoport",
"Age Group 1" : "1. korcsoport",
"Age Group 2" : "2. korcsoport",
"Age Group 3" : "3. korcsoport",
"Age Group 4" : "4. korcsoport",
"Age Group 5" : "5. korcsoport",
"Age Group 6" : "6. korcsoport",
"Age Group 7" : "7. korcsoport",
"Age Group 8" : "8. korcsoport",
"Age Groups" : "Korcsoportok",
"Age groups are assigned based on the age of an animal. The figure in the left column is the upper limit in years for that group." : "Az állat a kora alapján kerül besorolásra egy korcsoportba. A bal oldali oszlopban szereplő szám az adott csoport felső korhatárát jelzi években.",
"Aged Between" : "Kor közötti",
"Aged From" : "Kortól",
"Aged To" : "Éves korig",
"Aggression" : "Agresszió",
"Airedale Terrier" : "Airedale terrier",
"Akbash" : "Keverék",
"Akita" : "Akita",
"Alaskan Malamute" : "Alaszkai malamut",
"Alerts" : "Figyelmeztetések",
"All Animals" : "Összes állat",
"All On-Shelter Animals" : "A menhelyen található összes állat",
"All Publishers" : "Valamennyi közzétevő",
"All accounts" : "Összes fiók",
"All animal care officers on file." : "Valamennyi rögzített állatvédelmi megbízott.",
"All animal shelters on file." : "Valamennyi rögzített állatmenhely.",
"All animals matching current publishing options." : "A jelen közzétételi opcióknak megfelelő valamennyi állat.",
"All animals on the shelter." : "Összes menhelyen lévő állat.",
"All animals where the hold ends today." : "Valamennyi állat, melyek foglalási ideje ma lejárt.",
"All animals who are currently held in case of reclaim." : "Valamennyi állat, amely eredeti gazdája jelentkezését várja.",
"All animals who are currently quarantined." : "Összes jelenleg karanténban lévő állat.",
"All animals who are flagged as not for adoption." : "Összes, nem örökbe fogadhatóként megjelölt állat.",
"All animals who have been on the shelter longer than {0} months." : "Valamennyi állat, amely több mint {0} hónapja van a menhelyen.",
"All animals who have not been microchipped" : "Valamennyi microchippel nem rendelkező állat",
"All banned owners on file." : "Valamennyi fekete listás örökbefogadó.",
"All diary notes" : "Összes napló jegyzet",
"All donors on file." : "Összes rögzített támogató.",
"All drivers on file." : "Összes rögzített fuvaros.",
"All existing data in your database will be REMOVED before importing the CSV file. This removal cannot be reversed." : "Az adatbázisában található valamennyi létező adat ELTÁVOLÍTÁSRA kerül, a CSV fájl importálása előtt. Ez az eltávolítás nem visszafordítható.",
"All fields should be completed." : "Az összes mező kitöltendő.",
"All fosterers on file." : "Az összes regisztrált ideiglenes befogadó.",
"All homechecked owners on file." : "Összes lelátogatott örökbefogadó listája.",
"All homecheckers on file." : "Összes lelátogatást végző személy listája.",
"All members on file." : "Összes regisztrált tag.",
"All notes upto today" : "Összes jegyzet a mai napig",
"All people on file." : "Összes regisztrált személy.",
"All retailers on file." : "Összes regisztrált átvevő.",
"All staff on file." : "Összes dolgozó listája.",
"All time" : "Mindig",
"All vets on file." : "Összes rögzített állatorvos.",
"All volunteers on file." : "Összes rögzített önkéntes.",
"Allergies" : "Allergiák",
"Allow a fosterer to be selected" : "Ideiglenes befogadó kiválasztásának engedélyezése",
"Allow an adoption coordinator to be selected" : "Örökbefogadás koordinátorának kiválasztása",
"Allow creation of payments on the Move-Reserve screen" : "Befizetések létrehozásának engedélyezése Move-Reserve képernyőn",
"Allow drag and drop to move animals between locations" : "Az állat egyik helyről a másikra való áthelyezésének engedélyezése",
"Allow duplicate license numbers" : "Dupla regisztrációs szám engedélyezése",
"Allow duplicate microchip numbers" : "Dupla mikrochip számok engedélyezése",
"Allow overriding of the movement number on the Move menu screens" : "Engedélyezze a Move menü képernyőjén a tételek számának átugrását",
"Allow use of OpenOffice document templates" : "Engedélyezze az OpenOffic dokumentumsablonjainak használatát",
"Alphabetically A-Z" : "ABC sorrendben A-tol Z-ig",
"Alphabetically Z-A" : "ABC sorrendben Z-től A-ig",
"Already Signed" : "Már aláírva",
"Already fostered to this person." : "Ideiglenes befogadóhoz került.",
"Altered" : "Ivartalanított",
"Altered Date" : "Ivartalanítás dátuma",
"Altered Dog - 1 year" : "Ivartalanított kutya - 1 év",
"Altered Dog - 3 year" : "Ivartalanított kutya - 3 év",
"Altering Vet" : "Ivartalanítást végző állatorvos",
"Always show an emblem to indicate the current location" : "Mindig szimbólummal jelezze a jelenlegi helyét",
"Amazon" : "Amazon",
"Amber" : "Sárga",
"American" : "Amerikai",
"American Bulldog" : "Amerikai bulldog",
"American Curl" : "american curl macska",
"American Eskimo Dog" : "amerikai eszkimókutya",
"American Fuzzy Lop" : "Amerikai Fuzzy Lop (nyúl)",
"American Sable" : "Amerikai Sable (nyúl)",
"American Shorthair" : "Amerikai rövidszőrű macska",
"American Staffordshire Terrier" : "amerikai staffordshire terrier",
"American Water Spaniel" : "amerikai vizispániel",
"American Wirehair" : "Amerikai drótszőrű macska",
"Amount" : "Mennyiség",
"An age in years, eg: 1, 0.5" : "Kor években, pl. 1, 0.5",
"An animal cannot have multiple open movements." : "Egy állat esetében nem lehet több mozgatás folyamatban.",
"An optional comma separated list of email addresses to send the output of this report to" : "Egy opcionális, vesszővel elválasztott emailcím lista, amelyekre ezen riport eredményét elküldi",
"Anatolian Shepherd" : "Anatóliai pásztorkutya",
"Angora Rabbit" : "Angóra nyúl",
"Animal" : "Állat",
"Animal '{0}' created with code {1}" : "Állat '{0}' {1} kóddal létrehozva",
"Animal '{0}' successfully marked deceased." : "Az '{0}' állat elhullottként jelölve.",
"Animal (optional)" : "Állat (választható)",
"Animal (via animalname field)" : "Állat nevének megadása",
"Animal - Additional" : "Állat - Egyéb",
"Animal - Death" : "Állat - Halál",
"Animal - Details" : "Állat - Részletek",
"Animal - Entry" : "Állat - Belépés",
"Animal - Health and Identification" : "Állat - Egészségi állapot és azonosítás",
"Animal - Notes" : "Állat - Jegyzet",
"Animal Codes" : "Állat kódok",
"Animal Control" : "Állat hatósági ellenőrzése",
"Animal Control Caller" : "Állat hatósági ellenőrzését kezdeményezte",
"Animal Control Incident" : "Állat hatósági ellenőrzését kiváltó esemény",
"Animal Control Officer" : "Ellenőrzéssel megbízott hatósági állatorvos ",
"Animal Control Victim" : "A hatósági ellenőrzés alanya",
"Animal Emblems" : "Állat jelzések",
"Animal Flags" : "Állat jelölések",
"Animal Links" : "Állat linkek",
"Animal Name" : "Állat neve",
"Animal Selection" : "Állat kiválasztása",
"Animal Shelter Manager" : "ASM",
"Animal Shelter Manager Login" : "ASM Belépés",
"Animal Sponsorship" : "Állat támogatója",
"Animal Type" : "Állat faja",
"Animal Types" : "Állat fajtája",
"Animal board costs" : "Állat ellátásának költségei",
"Animal cannot be deceased before it was brought to the shelter" : "Az állat nem jelölhető elhullottként, mielőtt a menhelyre került.",
"Animal code format" : "Állat kód formátum",
"Animal comments MUST contain this phrase in order to match." : "Az állathoz kapcsolódó megjegyzésnek tartalmazni kell ezt a kifejezést az egyezéshez.",
"Animal control calendar" : "Állat ellenőrzési naptár",
"Animal control incidents matching '{0}'." : "Állat ellenőrzési tételek egyezése '{0}'.",
"Animal defecation" : "Állat féreghajtása",
"Animal descriptions" : "Állat leírása",
"Animal destroyed" : "Állat törölve",
"Animal emblems are the little icons that appear next to animal names in shelter view, the home page and search results." : "Az állat szimbólumok kis ikonok, amik megjelennek az állat neve mellett a honlapon, a keresési eredményeknél és a menhely felületen.",
"Animal food costs" : "Állateledel költségek",
"Animal picked up" : "Állat begyüjtve",
"Animal shortcode format" : "Állat rövidkód formátuma",
"Animals" : "Állatok",
"Animals at large" : "Állatok áltlában",
"Animals left in vehicle" : "Autóban hagyott állatok",
"Animals matching '{0}'." : "'{0}'-nak megfelelő állatok",
"Animals per page" : "Állatok oldalanként",
"Annual" : "Éves",
"Annually" : "Évente",
"Anonymize" : "Névtelenül",
"Anonymize personal data after this many years" : "Személyes adatok névtelenitése sok év óta",
"Any animal types, species, breeds, colors, locations, etc. in the CSV file that aren't already in the database will be created during the import." : "A CSV fájlban található bármely állat típus, faj, fajta, szín, helyszín, stb., amely még nincs az adatbázisban, létrehozásra kerül az importálás során.",
"Any health problems the animal has" : "Az állat összes egészségügyi problémája",
"Any information about the animal" : "Összes információ az állatról",
"Any markings or distinguishing features the animal has" : "Az állat összes ismertetőjegyei és tulajdonságai",
"Appaloosa" : "Appaloosa",
"Appenzell Mountain Dog" : "Appenzell hegyikutya",
"Applehead Siamese" : "sziámi",
"Appointment" : "Találkozó",
"Appointment date must be a valid date" : "A találkozó dátuma érvényes dátum kell legyen",
"Appointment {0}. {1} on {2} for {3}" : "Találkozó {0}, {1} {2}-án {3}-nak",
"Appointments need a date and time." : "A találkozóhoz dátumot és időpontot kell megadni. ",
"Approved" : "Jóváhagyva",
"Apr" : "Ápr.",
"April" : "Április",
"Arabian" : "Arab",
"Area" : "Terület",
"Area Found" : "Megtalálás helye",
"Area Lost" : "Elvesztés helye",
"Area Postcode" : "Terület irányító száma",
"Area where the animal was found" : "Az állat megtalálásának a helyszíne.",
"Area where the animal was lost" : "Az állat elvesztésének a helyszíne",
"Areas" : "Területek",
"Arrived" : "Beérkezett",
"Asset" : "Tárgyi eszköz",
"Asset::Premises" : "Ingóság::Ingatlan",
"At least the last name should be completed." : "Legalább a vezetéknév kitöltese kötelező",
"Attach" : "Csatol",
"Attach File" : "Fájl csatolása",
"Attach Link" : "Link csatolása",
"Attach a file" : "Csatolj egy fájlt",
"Attach a link to a web resource" : "Csatolj egy linket a web forráshoz",
"Attach link" : "Csatolj link-et",
"Audit Trail" : "Ellenőrzési nyomvonal",
"Aug" : "Aug.",
"August" : "Augusztus",
"Australian Cattle Dog/Blue Heeler" : "Australian Cattle Dog/Blue Heeler",
"Australian Kelpie" : "ausztrál kelpie",
"Australian Shepherd" : "Ausztrál juhászkutya",
"Australian Terrier" : "ausztrál terrier",
"Auto log users out after this many minutes of inactivity" : "Felhasználók kiléptetése ennyi percnyi inaktivitás után",
"Auto removed due to lack of owner contact." : "Az tulajdonos hiányos adatai miatt automatikus törlésre került.",
"Automatically cancel any outstanding reservations on an animal when it is adopted" : "Automatikusan törlődjön az állatra fennálló bármely foglalás, ha az állatot örökbe fogadták",
"Automatically remove" : "Automatikus eltávolítás",
"Automatically return any outstanding foster movements on an animal when it is adopted" : "Automatikusan vonja vissza az állatra vonatkozó valamennyi ideiglenes befogadási eseményt, ha az állatot örökbe fogadták",
"Automatically return any outstanding foster movements on an animal when it is transferred" : "Automatikusan vonja vissza az állatra vonatkozó valamennyi ideiglenes befogadási eseményt, ha az állat elkerült az állatotthonból.",
"Available for adoption" : "Örökbefogadható",
"Available sheltermanager.com reports" : "Elérhető sheltermanager.com reportok",
"B (Boarding Animal)" : "B (Elhelyezett állat)",
"Baby" : "Kölyök",
"Balance" : "Egyenleg",
"Balinese" : "balinéz macska",
"Bank" : "Bank",
"Bank account interest" : "Bankszámla kamat",
"Bank current account" : "Banki folyószámla",
"Bank deposit account" : "Befizetési bankszámla",
"Bank savings account" : "Megtakarítási bankszámla",
"Bank::Current" : "Bank::Forgalom",
"Bank::Deposit" : "Bank::Letét",
"Bank::Savings" : "Bank::Megtakarítások",
"Banned" : "Kitiltott",
"Base Color" : "Alapszín",
"Basenji" : "basenji",
"Basset Hound" : "basset hound",
"Batch" : "Sorozat",
"Batch Number" : "Sorozatszám",
"Beagle" : "beagle",
"Bearded Collie" : "bearded collie",
"Beauceron" : "beauceron",
"Bedlington Terrier" : "Bedlington terrier",
"Beginning of month" : "Hónap kezdete",
"Belgian Hare" : "Belga nyúl",
"Belgian Shepherd Dog Sheepdog" : "belga pásztorkutya",
"Belgian Shepherd Laekenois" : "Laekenois belga pásztorkutya",
"Belgian Shepherd Malinois" : "Malinois belga pásztorkutya",
"Belgian Shepherd Tervuren" : "Tervuren belga pásztorkutya",
"Bengal" : "bengáli macska",
"Bernese Mountain Dog" : "berni pásztorkutya",
"Beveren" : "Beveren",
"Bichon Frise" : "Bichon Frise",
"Bird" : "Madár",
"Birman" : "birman macska",
"Bite" : "Harap",
"Biting" : "Harapás",
"Black" : "Fekete",
"Black Labrador Retriever" : "fekete labrador retriever",
"Black Mouth Cur" : "Black Mouth Cur",
"Black Tortie" : "Teknőstarka",
"Black and Brindle" : "fekete és tigriscsíkos",
"Black and Brown" : "fekete és barna",
"Black and Tan" : "Fekete-cser",
"Black and Tan Coonhound" : "Fekete-cser mosómedvekopó",
"Black and White" : "Fekete-fehér",
"Bloodhound" : "véreb",
"Blue" : "kék",
"Blue Tortie" : "Kék teknőstarka",
"Bluetick Coonhound" : "Kék pettyes mosómedvekopó",
"Board and Food" : "Ellátás és etetés",
"Boarding" : "Elhelyezés",
"Boarding Cost" : "Ellátási költség",
"Boarding cost type" : "Ellátási költség fajtája",
"Bobtail" : "bobtail",
"Body" : "test",
"Bombay" : "Bombay macska",
"Bonded" : "Kötegelt",
"Bonded With" : "Összeköttetésben",
"Books" : "Könyvek",
"Border Collie" : "border collie",
"Border Terrier" : "border terrier",
"Bordetella" : "Bordetella",
"Born in Shelter" : "Menhelyen született",
"Born on Foster {0}" : "Ideiglenes befogadónál született {0}",
"Born on Shelter {0}" : "Menhelyen született {0}",
"Borzoi" : "orosz agár",
"Boston Terrier" : "Boston terrier",
"Both" : "Mindkettő",
"Bouvier des Flanders" : "Flandriai Juhászkutya",
"Boxer" : "Boxer",
"Boykin Spaniel" : "boykin spániel",
"Breed" : "fajta",
"Breed to use when publishing to third party services and adoption sites" : "Harmadik fél szolgáltatásához és hirdetési oldalakon történő közzétételnél feltüntetett fajta",
"Breeds" : "fajták",
"Briard" : "briard",
"Brindle" : "tigriscsíkos",
"Brindle and Black" : "tigriscsíkos és fekete",
"Brindle and White" : "tigriscsíkos és fehér",
"Britannia Petite" : "Britannia Petite nyúl",
"British Shorthair" : "brit rövidszőrű",
"Brittany Spaniel" : "Brittany Spaniel",
"Brotogeris" : "Brotogeris",
"Brought In" : "Leadás dőpontja",
"Brought In By" : "Leadó",
"Brown" : "Barna",
"Brown and Black" : "barna-fekete",
"Brown and White" : "barna és fehér",
"Browse sheltermanager.com" : "Sheltermanager.com böngészése",
"Browse sheltermanager.com and install some reports, charts and mail merges into your new system." : "Sheltermanager.com böngészése és néhány jelentés, táblázat és levél telepítése az új rendszerébe ",
"Brussels Griffon" : "Brüsseli griffon",
"Budgie/Budgerigar" : "Hullámos papagáj",
"Bulk Complete Diary" : "Teljes napló tömörítése",
"Bulk Complete Medical Records" : "Összes egészségügyi információ összefoglalása",
"Bulk Complete Vaccinations" : "Valamennyi oltás teljesítése",
"Bulk Complete Waiting List" : "Teljes várólista tömörítése",
"Bulk Regimen" : "Étrend összefoglalása",
"Bulk Test" : "Teszt összefoglalása",
"Bulk Transport" : "Tömeges szállítás",
"Bulk Vaccination" : "Oltások tömeges rögzítése",
"Bulk change animals" : "Állatok tömeges módosítása",
"Bull Terrier" : "bullterrier",
"Bullmastiff" : "bullmasztiff",
"Bunny Rabbit" : "Bunny nyúl",
"Burmese" : "burmai",
"Burmilla" : "burmilla",
"By" : "Által",
"CC" : "Másolatot kap",
"CSV of animal/adopter data" : "Állat/örökbefogadó adatainak CSV-je",
"CSV of animal/medical data" : "Állat / orvosi adatok CSV-je",
"CSV of incident data" : "Eset adataihoz tartozó CSV",
"CSV of license data" : "Licenc adatok CSV-je",
"CSV of payment data" : "Fizetési adatok CSV-je",
"CSV of person data" : "Személyes adatok CSV-je",
"Caique" : "Caique",
"Cairn Terrier" : "Cairn terrier",
"Calendar View" : "Naptárnézet",
"Calendar view" : "Naptár nézet",
"Calico" : "calico",
"Californian" : "kaliforniai",
"Call" : "Hívás",
"Call Date/Time" : "Hívás napja/ideje",
"Caller" : "Hívó",
"Caller Name" : "Hívó neve",
"Caller Phone" : "Hívó telefonszáma",
"Camel" : "teve",
"Can Login" : "Beléphet",
"Can afford donation?" : "Tud adományozni? ",
"Can't reserve an animal that has an active movement." : "Aktív eseménnyel rendelkező állat nem foglalható le. ",
"Canaan Dog" : "Canaan Dog",
"Canadian Hairless" : "Canadian Hairless",
"Canary" : "kanári",
"Cancel" : "Törlés",
"Cancel holds on animals this many days after the brought in date, or 0 to never cancel" : "A törlés az állatok bekerülésétől számított ennyi napig fennál, vagy 0 esetén sosem törlődik.",
"Cancel unadopted reservations after" : "A nem teljesült foglalások ennyi idő után törlésre kerülnek",
"Cancel unadopted reservations after this many days, or 0 to never cancel" : "A nem teljesült foglalások ennyi idő után törlésre kerülnek, vagy 0 esetén sosem törlődnek",
"Cancelled" : "Törölve",
"Cancelled Reservation" : "Törölt foglalás",
"Cane Corso Mastiff" : "cane corso",
"Carolina Dog" : "Carolina Dog",
"Cash" : "Készpénz",
"Cat" : "Macska",
"Catahoula Leopard Dog" : "Catahoula Leopard Dog",
"Category" : "Kategória",
"Cats" : "Macskák",
"Cattery" : "Tenyészet",
"Cattle Dog" : "Pásztorkutya",
"Cavalier King Charles Spaniel" : "Cavalier King Charles Spaniel",
"Cell" : "Mobil",
"Cell Phone" : "Mobiltelefon",
"Champagne D'Argent" : "Champagne D'Argent fajta nyúl",
"Change" : "Módosítás",
"Change Accounts" : "Fiók megváltoztatása",
"Change Animals" : "Állat módosítása",
"Change Citations" : "Hivatkozás módosítása",
"Change Clinic Apointment" : "Kórházi előjegyzés módositása",
"Change Cost" : "Költség módosítása",
"Change Date Required" : "A kivánt dátum módosítása",
"Change Diets" : "Diéta módosítása",
"Change Found Animal" : "Talált állat módosítása",
"Change Incidents" : "Események módosítása",
"Change Investigation" : "Vizsgálat módosítása",
"Change Licenses" : "Engedélyek módosítása",
"Change Litter" : "Alom módosítása",
"Change Log" : "Változtatások listája",
"Change Lost Animal" : "Elveszett állat módosítása",
"Change Media" : "Video módositása ",
"Change Medical Records" : "Egészségügyi adatok módosítása",
"Change Movement" : "Áthelyezés módosítása",
"Change Password" : "Jelszó megváltoztatása",
"Change Payments" : "Kifizetés módosítása",
"Change Person" : "Személy módosítása",
"Change Publishing Options" : "Közzétételi opciók módosítása",
"Change Report" : "Jelentés módosítása",
"Change Rota" : "Sorrend módosítása",
"Change Stock" : "Állomány módosítása",
"Change System Options" : "Rendszeropciók módostása",
"Change Tests" : "Tesztek módosítása",
"Change Transactions" : "Tranzakciók módosítása",
"Change Transport" : "Transzport módosítása",
"Change Trap Loans" : "Csapdakölcsönzések módosítása ",
"Change User Settings" : "Felhasználó beállítások módosítása",
"Change Vaccinations" : "Oltások módosítása",
"Change Vouchers" : "Bizonylat módosítása",
"Change Waiting List" : "Várólista módosítása",
"Change date required on selected treatments" : "A kiválasztott kezelésekhez szükséges dátumok módosítása",
"Changed Mind" : "Módosított vélemény",
"Chart" : "Diagram",
"Chart (Bar)" : "Sávdiagram",
"Chart (Line)" : "Vonaldiagram",
"Chart (Pie)" : "Kördiagram",
"Chart (Point)" : "Pontdiagram",
"Chart (Steps)" : "Táblázat (lépcsők)",
"Chartreux" : "karthauzi macska",
"Check" : "Ellenőrzés",
"Check License" : "Engedély ellenőrzése",
"Check No" : "Ellenőrzési szám",
"Checkbox" : "Jelölőnégyzet",
"Checked By" : "Ellenőrizte",
"Checkered Giant" : "Checkered Giant",
"Cheque" : "Csekk",
"Chesapeake Bay Retriever" : "Chesapeake Bay Retriever",
"Chicken" : "csirke",
"Chihuahua" : "csivava",
"Children" : "Gyermekek",
"Chinchilla" : "csincsilla",
"Chinese Crested Dog" : "Kínai meztelen kutya",
"Chinese Foo Dog" : "Kínai Foo Dog",
"Chlamydophila" : "Chlamydophila",
"Chocolate" : "Csokoládé",
"Chocolate Labrador Retriever" : "Csokoládé színű labrador retriever",
"Chocolate Tortie" : "Chocolate Tortie",
"Chow Chow" : "csau-csau",
"Cinnamon" : "fahéj",
"Cinnamon Tortoiseshell" : "Fahéjárnyalatú teknőctarka",
"Citation Type" : "Hivatkozás típusa",
"Citation Types" : "Hivatkozás típusok",
"Citations" : "Hivatkozások",
"City" : "Város",
"City contains" : "Város tartalmaz",
"Class" : "Osztály",
"Clear" : "Törlés",
"Clear and sign again" : "Törlés és újból bejelentkezés",
"Clear tables before importing" : "Importálás előtt törölje a táblát",
"Clinic" : "Kórház",
"Clinic Calendar" : "Kórházi naptár",
"Clinic Invoice - {0}" : "Kórházi számla",
"Clinic Statuses" : "Kórházi státuszok",
"Clone" : "Másol",
"Clone Animals" : "Állatok másolása",
"Clone Rota" : "Beosztás másolása",
"Clone the rota this week to another week" : "Eheti beosztás átmásolása másik hétre",
"Cloning..." : "Máslolás",
"Close" : "Bezárás",
"Clumber Spaniel" : "Clumber Spaniel",
"Clydesdale" : "Clydesdale ló",
"Coat" : "Szőr",
"Coat Type" : "Szőr típusa",
"Coat Types" : "Szőrtipusok",
"Cockapoo" : "Cockapoo",
"Cockatiel" : "Nimfapapagáj",
"Cockatoo" : "Kakadu",
"Cocker Spaniel" : "Cocker spániel",
"Code" : "Kód",
"Code contains" : "Kód tartalma",
"Code format tokens:" : "Kód formátum token:",
"Collie" : "skót juhászkutya",
"Color" : "Szín",
"Color to use when publishing to third party services and adoption sites" : "Harmadik fél szolgáltatója és hirdetőfelületeken való közzétételhez használatos szín.",
"Colors" : "Színek",
"Columns" : "Oszlopok",
"Columns displayed" : "Megjelenített oszlopok",
"Comma separated list of units for this location, eg: 1,2,3,4,Isolation,Pen 5" : "",
"Comments" : "Megjegyzések",
"Comments Contain" : "Megjegyzések tartalma",
"Comments contain" : "Megjegyzések tartalma",
"Comments copied to web preferred media." : "Webes felületen preferált adathordozóra másolt megjegyzések.",
"Complaint" : "Panasz",
"Complete" : "Kész",
"Complete Tasks" : "Kész feladatok",
"Completed" : "Befejezett",
"Completed Between" : "között befejezve",
"Completed Type" : "típus befejezve",
"Completed notes upto today" : "Befejezett jegyzetek mai napig",
"Completion Date" : "Teljesítás dátuma",
"Completion Type" : "Teljesítés típusa",
"Configuration" : "Beállítás",
"Confirm" : "Megerősítés",
"Confirm Password" : "Jelszó megerõsítése",
"Confirmation message" : "Megerősítő üzenet",
"Confirmed" : "Megerősitett",
"Consulting Room" : "Tárgyaló",
"Consulting Room - {0}" : "Tárgyaló terem - {0}",
"Consumed" : "Elfogyasztott",
"Contact" : "Kapcsolattartó",
"Contact Contains" : "Kapcsolattartó adatai",
"Conure" : "Conure papagáj",
"Convert this reservation to an adoption" : "A foglalást módosítsa örökbefoglalásra.",
"Coonhound" : "Kopó",
"Copy animal comments to the notes field of the web preferred media for this animal" : "Az állathoz fűzött kommentek átmásolása az adott állat preferált webes felületének megjegyzés rovatába",
"Copy from animal comments" : "Másold az állat megjegyzés rovatból",
"Copy of {0}" : "{0} Másolata",
"Corded" : "Összekötött",
"Corgi" : "corgi",
"Cornish Rex" : "Cornish Rex",
"Cost" : "Költség",
"Cost For" : "Felmerült költségek",
"Cost Type" : "Költség típusa",
"Cost Types" : "Költség típusok",
"Cost date must be a valid date" : "A költség dátuma érvényes dátum kell legyen",
"Cost record" : "Költség jegyzék",
"Costs" : "Költségek",
"Costs need a date and amount." : "A költségek megadásához dátum és összeg szükséges",
"Coton de Tulear" : "Coton de Tulear",
"Could not find animal with name '{0}'" : "'{0}' nevű kutya nem található",
"Country" : "Ország",
"Courtesy Listing" : "Önkéntes lista",
"Cow" : "tehén",
"Cream" : "krémszínű",
"Create" : "Létrehoz",
"Create Animal" : "Állat létrehozása",
"Create Log" : "Napló létrehozása",
"Create Payment" : "Kifizetés létrehozása",
"Create Waiting List" : "Várólista létrehozása",
"Create a cost record" : "Költség feljegyzés létrehozása",
"Create a due or received payment record from this appointment" : "Hozzon létre esedékes vagy beérkezett fizetési nyilvántartást erről a találkozóból.",
"Create a new animal by copying this one" : "Új állat rögzítése ennek az átmásolásával",
"Create a new animal from this found animal record" : "Talált állat adataiból új állat rögzítése",
"Create a new animal from this incident" : "Új állat rögzítése ebből az eseményből",
"Create a new animal from this waiting list entry" : "Új állat rögzítése ebből a várólista beírásból",
"Create a new document" : "Új dokumentum létrehozása",
"Create a new template" : "Új sablon létrehozása",
"Create a new template by copying the selected template" : "Új sablon létrehozása ennek a másolásával",
"Create a new waiting list entry from this found animal record" : "Talált állat adataiból új állat rögzítése a várólistára",
"Create and edit" : "Létrehoz ás szerkeszt",
"Create boarding cost record when animal is adopted" : "Ellátási költségek feljegyzésének létrehozása az állat örökbefogadásakor",
"Create diary notes from a task" : "Napló jegyzetek rögzítése egy feladatból",
"Create missing lookup values" : "Hozza létre a hiányzó kiválasztott értékeket",
"Create note this many days from today, or 9999 to ask" : "Hozzon létre bejegyzést a mai naptól számított ennyi nap elteltével vagy 9999 megkeresésre",
"Create this message" : "Ennek az üzenetnek a rögzítése",
"Create this person" : "Ennek a személynek a rögzítése",
"Created By" : "Létrehozta",
"Creating cost and cost types creates matching accounts and transactions" : "Költségek és költségtípusok létrehozásával megegyező számlák és tranzakciók jönnek létre",
"Creating payments and payments types creates matching accounts and transactions" : "Fizetések és fizetéstípusok létrehozásával megegyező számlák és tranzakciók jönnek létre.",
"Creating..." : "létrehozás",
"Credit Card" : "Bankkártya",
"Creme D'Argent" : "",
"Criteria:" : "Feltétel:",
"Crossbreed" : "keverék",
"Cruelty Case" : "Kegyetlenkedés",
"Culling" : "Selejtezés",
"Curly" : "Göndör",
"Current" : "Jelenlegi",
"Current Vet" : "Jelenlegi állatorvos",
"Cymric" : "Wales-i",
"D (Dog)" : "K (Kutya)",
"DD = current day" : "DD = mai nap",
"DDL dump (DB2)" : "DDL dump (DB2)",
"DDL dump (MySQL)" : "DDL dump (MySQL)",
"DDL dump (PostgreSQL)" : "DDL dump (PostgreSQL)",
"DHLPP" : "DHLPP",
"DO NOT use this field to store notes about what the person is looking for." : "NE használja ezt a mezőt annak rögzítésére, hogy a személy mit keres.",
"DOA {0}" : "DOA {0}",
"DOB" : "Születési dátum",
"Dachshund" : "tacskó",
"Daily Boarding Cost" : "Napi ellátási költségek",
"Dalmatian" : "dalmata",
"Dandi Dinmont Terrier" : "Dandi Dinmont Terrier",
"Data" : "Adat",
"Data Protection" : "Adatvédelem",
"Database" : "Adatbázis",
"Date" : "Dátum",
"Date '{0}' is not valid." : "Dátum '{0}' érvénytelen",
"Date Brought In" : "Bekerülés dátuma",
"Date Found" : "Megtalálás dátuma",
"Date Lost" : "Elvesztés dátuma",
"Date Of Birth" : "Születési dátum",
"Date Put On" : "Feltöltés dátuma",
"Date Removed" : "Dátum törlése",
"Date Reported" : "Bejelentés dátuma",
"Date and notes are mandatory." : "Dátum és jegyzetek kitöltése kötelező.",
"Date brought in cannot be blank" : "A bekerülés dátuma nem maradhat üresen",
"Date brought in cannot be in the future." : "A bekerülés dátuma nem lehet jövőbeli dátum.",
"Date brought in is not valid" : "A bekerülés dátuma nem érvényes",
"Date found cannot be blank" : "A megtalálás dátuma nem maradhat üresen",
"Date found cannot be blank." : "A megtalálás dátuma nem maradhat üresen",
"Date lost cannot be blank" : "Az elvesztés dátuma nem maradhat üresen",
"Date lost cannot be blank." : "Az elvesztés dátuma nem maradhat üresen",
"Date of Birth" : "Születési dátum",
"Date of birth cannot be blank" : "A születési dátum nem maradhat üresen",
"Date of birth cannot be in the future." : "A születési dátum nem lehet a jövőben",
"Date of birth is not valid" : "Érvénytelen születési dátum",
"Date of last owner contact" : "A gazdival történt utolsó kapcsolatfelvétel időpontja",
"Date put on" : "Feltöltés dátuma",
"Date put on cannot be blank" : "A feltöltés dátuma nem maradhat üresen",
"Date put on list" : "Listára került dátum",
"Date removed" : "Dátum törlése",
"Date reported cannot be blank" : "A jelentés dátuma nem maradhat üresen",
"Date reported cannot be blank." : "Az elvesztés dátuma nem maradhat üresen",
"Date/Time" : "Dátum/Idő",
"Day" : "Nap",
"Day Pivot" : "Napi fordulópont",
"Days On Shelter" : "Menhelyen töltött napk száma",
"Dead On Arrival" : "Beérkezéskor halott",
"Dead animal" : "Elhullott állat",
"Dead on arrival" : "Beérkezéskor halott",
"Death" : "Elhullás",
"Death Comments" : "Elhullással kapcsolatos megjegyzések",
"Death Reason" : "Elhullás oka",
"Death Reasons" : "Elhullás okai",
"Debit Card" : "Hitelkártya",
"Dec" : "Dec.",
"Deceased" : "halott",
"Deceased Date" : "Halál dátuma",
"December" : "December",
"Declawed" : "halott",
"Declined" : "Elutasított",
"Default Breed" : "Alapértelmezett fajta (keverék)",
"Default Brought In By" : "Alapértelmezést létrehozta",
"Default Coat Type" : "Alapértelmezett szőrtípus",
"Default Color" : "Alapértelmezett szín",
"Default Cost" : "Alapértelmezett költség",
"Default Death Reason" : "Alapértelmezett elhullási ok",
"Default Diary Person" : "Alapértelmezett napló személy",
"Default Entry Reason" : "Alapértelmezett rögzítési ok",
"Default Incident Type" : "Alapértelmezett incidens típusa",
"Default Location" : "Alapértelmezett hely",
"Default Log Filter" : "Alapértelmezett napló szűrő",
"Default Log Type" : "Alapértelmezett napló típus",
"Default Payment Method" : "Alap fizetési mód",
"Default Payment Type" : "Alap fizetési típus",
"Default Reservation Status" : "Alap foglalási státusz",
"Default Return Reason" : "Alap visszaadási indok",
"Default Rota Shift" : "Alapértelmezett sorrend váltás",
"Default Size" : "Alapértelmezett méret",
"Default Species" : "Alapértelmezett faj",
"Default Test Type" : "Alapértelmezett teszt típus",
"Default Type" : "Alapértelmezett típus",
"Default Vaccination Type" : "Alapértelmezett oltás típus",
"Default Value" : "Alapértelmezett érték",
"Default daily boarding cost" : "Alapértelmezett napi ellátási költség",
"Default destination account for payments" : "Befizetések alapértelmezett célszámlája",
"Default image for documents" : "Dokumentumok alapértelmezett képe",
"Default image for this record and the web" : "Jelen jelentés és a webes felülethez alapértelmezett kép",
"Default source account for costs" : "Költségek alapértelmezett forrászszámlája",
"Default to advanced find animal screen" : "Állatok részletes keresésének alapértelmezett képernyője",
"Default to advanced find person screen" : "Személyek részletes keresésének alapértelmezett képernyője",
"Default transaction view" : "Alapértelmezett tranzakciós nézet",
"Default urgency" : "Alapértelmezett sürgősség",
"Default video for publishing" : "Alapértelmezett videó publikálás céljából",
"Default view" : "Alapértelmezett nézet",
"Defaults" : "Alapértékek",
"Defaults formats for code and shortcode are TYYYYNNN and NNT" : "A kódok és rövidített kódok alapértelmezett formátumai TYYYYNNN és NNT",
"Delete" : "Töröl",
"Delete Accounts" : "Fiókok törlése",
"Delete Animals" : "Állatok törlése",
"Delete Citations" : "Hivatkozások törlése",
"Delete Clinic Appointment" : "Kórházi előjegyzések törlése",
"Delete Cost" : "Költség törlése",
"Delete Diary" : "Napló törlése",
"Delete Diets" : "Diéták törlése",
"Delete Document from Repository" : "Dokumentum törlése tárhelyről",
"Delete Found Animal" : "Talált állat törlése",
"Delete Incidents" : "Incidens törlése",
"Delete Incoming Forms" : "Bejövő nyomtatványok törlése",
"Delete Investigation" : "Vizsgálat törlése",
"Delete Licenses" : "Engedélyek törlése",
"Delete Litter" : "Alom törlése",
"Delete Log" : "Napló törlése",
"Delete Lost Animal" : "Elveszett állat törlése",
"Delete Media" : "Video törlése",
"Delete Medical Records" : "Állatorvosi jelentések törlése",
"Delete Movement" : "Áthelyezés törlése",
"Delete Payments" : "Kifizetések törlése",
"Delete Person" : "Személy törlése",
"Delete Regimen" : "Étrend törlése",
"Delete Report" : "Jelentés törlése",
"Delete Rota" : "Sorrend törlése",
"Delete Stock" : "Állomány törlése",
"Delete Tests" : "Tesztek törlése",
"Delete Transport" : "Transzport törlése",
"Delete Trap Loans" : "Csapdakölcsönzés törlése",
"Delete Treatments" : "Kezelések törlése",
"Delete Vaccinations" : "Oltások törlése",
"Delete Vouchers" : "Igazolás törlése",
"Delete Waiting List" : "Várólista törlése",
"Delete all rota entries for this week" : "Az ezen hétre vonatkozó valamennyi sorrendi bejegyzés törlése",
"Delete this animal" : "Ennek az állatnak a törlése",
"Delete this incident" : "Ennek az eseménynek a törlése",
"Delete this person" : "Ennek a személynek a törlése",
"Delete this record" : "Ennek az adatnak a törlése",
"Delete this waiting list entry" : "Ennek a várólista bejegyzésnek a törlése",
"Denied" : "Elutasítva",
"Deposit" : "Letét",
"Deposit Account" : "Letét számla",
"Deposit Returned" : "Letét visszaadása",
"Description" : "Leírás",
"Description Contains" : "Leírás tartalma",
"Description cannot be blank" : "A leírás nem maradhat üresen",
"Deselect" : "Kijelölés megszüntetése",
"Details" : "Részletek",
"Devon Rex" : "Devon Rex",
"Dialog title" : "Az ablak felirata",
"Diary" : "Napló",
"Diary Task" : "Napló feladat",
"Diary Task: {0}" : "Napló feladat: {0}",
"Diary Tasks" : "Napló feladat",
"Diary and Messages" : "Napló és üzenetek",
"Diary calendar" : "Napló naptár",
"Diary date cannot be blank" : "Napló dátuma nem maradhat üresen",
"Diary date is not valid" : "Napló dátum érvénytelen",
"Diary for {0}" : "{0} naplója",
"Diary note cannot be blank" : "Napló dátuma nem maradhat üresen",
"Diary note {0} marked completed" : "{0} Naplóbejegyzés teljesítettként jelölve",
"Diary note {0} rediarised for {1}" : "{0} Naplóbejegyzés újbóli feljegyzése {1}-hez",
"Diary notes for: {0}" : "{0} Naplóbejegyzése",
"Diary notes need a date and subject." : "A naplóbejegyzésekhez dátum és tárgy szükséges.",
"Diary subject cannot be blank" : "Napló témája nem maradhat üresen",
"Diary task items need a pivot, subject and note." : "A naplófeladatokhoz határidő, tárgy és megjegyzés szükséges.",
"Diary tasks need a name." : "A naplófeladatokat meg kell nevezni.",
"Did not ask" : "Nem kérdezte",
"Did you know?" : "Tudta-e?",
"Died" : "Elhunyt",
"Died off shelter" : "Menhelyen kívül meghalt",
"Died {0}" : "Meghalt {0}",
"Diet" : "Diéta",
"Diets" : "Diéták",
"Diets need a start date." : "A diétákhoz kezdő időpont szükséges",
"Dispatch" : "Elküldés",
"Dispatch Address" : "Értesítési cím",
"Dispatch Between" : "Értesítés között",
"Dispatch Date/Time" : "Értesítés dátum/idő",
"Dispatch {0}: {1}" : "Értesítés {0}: {1}",
"Dispatched ACO" : "Elküldött ACO",
"Display" : "Megjelenítés",
"Display Index" : "Kijelző index",
"Display a search button at the right side of the search box" : "Keresőgomb megjelenítése a kereső mező jobb alsó sarkában",
"Distemper" : "szopornyica",
"Do Not Publish" : "Nem publikálható",
"Do Not Register Microchip" : "Ne regisztrálja a mikrochipet",
"Do not show" : "Ne jelenjen meg",
"Doberman Pinscher" : "pincser",
"Document" : "Dokumentum",
"Document Link" : "Dokumentum link",
"Document Repository" : "Dokumentumgyüjtemény",
"Document Templates" : "Dokumentum formanyomtatványok",
"Document file" : "Dokumentum fájl",
"Document signed" : "Dokumentum aláirása",
"Document signing request" : "Dokumentum aláírási kérelem",
"Document templates" : "Dokumentum formanyomtatványok",
"Documents" : "Dokumentumok",
"Dog" : "Kutya",
"Dogo Argentino" : "Argentin dog",
"Dogs" : "Kutyák",
"Dogue de Bordeaux" : "Bordeaux-i dog",
"Domestic Long Hair" : "Hosszúszőrű házimacska",
"Domestic Medium Hair" : "Félhosszú szőrű házimacska",
"Domestic Short Hair" : "Rövid szőrű házimacska",
"Don't create a cost record" : "Ne hozzon létre költségfeljegyzést",
"Don't scale" : "Ne méretezzen",
"Donated" : "Támogatott",
"Donation" : "Adomány",
"Donation?" : "Adomány?",
"Donations for animals entering the shelter" : "A menhelyre érkező állatokra érkező adomány",
"Done" : "Kész",
"Donkey" : "Szamár",
"Donkey/Mule" : "Szamár/Öszvér",
"Donor" : "Adományozó",
"Dosage" : "Adagolás",
"Dove" : "Galamb",
"Download" : "Letöltés",
"Draft" : "Tervezet",
"Driver" : "Meghajtó",
"Drop files here..." : "Fájl áthelyezése ide ",
"Dropoff" : "Lemorzsolódás",
"Duck" : "Kacsa",
"Due" : "Esedékes",
"Due in next month" : "Következő hónapban esedékes",
"Due in next week" : "Következő héten esedékes",
"Due in next year" : "Következő évben esedékes",
"Due today" : "Ma esedékes",
"Duration" : "Időtartam",
"Dutch" : "Holland",
"Dutch Shepherd" : "Holland juhászkutya",
"Dwarf" : "Törpe",
"Dwarf Eared" : "Kis fülű",
"E = first letter of animal entry category" : "E = állat felviteli kategória első betűje",
"EE = first and second letter of animal entry category" : "EE = állat beviteli kategóriájának első és második betűje",
"Eclectus" : "Eclectus",
"Edit" : "Szerkesztés",
"Edit All Diary Notes" : "Összes napló jegyzet szerkesztése",
"Edit Appointment" : "Időpont szerkesztése",
"Edit Diary Tasks" : "Napló feladatok szerkesztése",
"Edit HTML publishing templates" : "HTML közzétételi sablonok szerkesztése",
"Edit Header/Footer" : "Fejléc/Lábléc szerkesztése",
"Edit Invoice Item" : "Számla tétel szerkesztése",
"Edit Lookups" : "Keresések szerkesztése",
"Edit My Diary Notes" : "Napló feljegyzéseim szerkesztése",
"Edit Online Forms" : "Online nyomtatvány szerkesztése",
"Edit Reports" : "Jelentések szerkesztése",
"Edit Roles" : "Feladatok szerkesztése",
"Edit Users" : "Felhasználók szerkesztése",
"Edit account" : "Fiók szerkesztése",
"Edit additional field" : "További mező szerkesztése",
"Edit citation" : "Oltás szerkesztése",
"Edit cost" : "Költségek szerkesztés",
"Edit diary" : "Napló szerkesztése",
"Edit diary notes" : "Napló feljegyszések szerkesztése",
"Edit diary task" : "Napló feladat szerkesztése",
"Edit diary tasks" : "Napló feladatok szerkeszétse",
"Edit diet" : "Étrend szerkesztése",
"Edit document" : "Dokumentum szerkesztése",
"Edit form field" : "Nyomtatványmező szerkesztése",
"Edit investigation" : "Vizsgálat szerkesztése",
"Edit invoice" : "Számla szerkeszétse",
"Edit license" : "Engedély szerkesztése",
"Edit litter" : "Alom szerkesztése",
"Edit litters" : "Almok szerkesztése",
"Edit log" : "Bejegyzés szerkesztése",
"Edit media notes" : "Média feljegyzések szerkesztése",
"Edit medical profile" : "Egészségügyi profil szerkesztése",
"Edit medical regimen" : "Egészségügyi étrend szerkesztése",
"Edit movement" : "Mozgatások szerkesztése",
"Edit my diary notes" : "Napló jegyzeteim szerkesztése",
"Edit my diary notes" : "Napló jegyzeteim szerkeszétse",
"Edit notes" : "Feljegyzések szerkesztése",
"Edit online form" : "Online nyomtatvány szerkesztése",
"Edit online form HTML header/footer" : "Online nyomtatvány HTML fejlécének/láblécének szerkesztése",
"Edit payment" : "Kifizetés szerkesztése",
"Edit report" : "Riportok szerkesztése",
"Edit report template HTML header/footer" : "Jelentés minta HTML fejlécének/láblécének szerkesztése",
"Edit role" : "Feladat szerkesztése",
"Edit roles" : "Feladatok szerkesztése",
"Edit rota item" : "Lista elem szerkesztése",
"Edit stock" : "Készlet szerkesztése",
"Edit system users" : "Rendszer felhasználók szerkesztése",
"Edit template" : "Template hozzáadása",
"Edit test" : "Teszt szerkesztése",
"Edit the current waiting list" : "Aktuális várólista szerkesztése",
"Edit transaction" : "Tranzakció szerkesztése",
"Edit transport" : "Transzport szerkesztése",
"Edit trap loan" : "Csapda kölcsönzés szerkesztése",
"Edit user" : "Felhasználó szerkesztése",
"Edit vaccination" : "Oltás szerkesztése",
"Edit voucher" : "Utalvány szerkesztése",
"Edit {0}" : "Szerkesztés {0}",
"Egyptian Mau" : "Egyiptomi Mau",
"Electricity Bills" : "Áramszámlák",
"Email" : "E-mail",
"Email Address" : "E-mail cím",
"Email PDF" : "E-mail PDF",
"Email Person" : "E-mail személy",
"Email To" : "Email címzett",
"Email a copy of the selected HTML documents as PDFs" : "A kiválasztott HTML dokumentumok másolatának PDF-ként való elküldése E-mailen",
"Email a copy of the selected media files" : "A kiválasztott média fájlok másolatának elküldése E-mailen",
"Email address" : "Email cím",
"Email document for electronic signature" : "Dokumentum elektronikus aláírásra való elküldése",
"Email incident notes to ACO" : "Események jegyzeteinek elküldése ACO részére",
"Email incoming form submissions to this comma separated list of email addresses" : "Bejövő emailek formai hozzáigazítása ezen vesszővel elválasztott email címlistához",
"Email media" : "Média küldése E-mailen ",
"Email person" : "E-mail címzett",
"Email signature" : "Aláírás küldése E-mailen",
"Email submissions to" : "Email benyújtása a",
"Email this message to all matching users" : "Ezen üzenet elküldése valamennyi megfelelő felhasználónak emailben",
"Email this person" : "Email küldése ennek a személynek",
"Email users their diary notes each day" : "Felhasználók naplójegyezeteinek napi továbbítása számukra ",
"Emu" : "Emu",
"Enable FTP uploading" : "FTP feltöltés engedélyezése",
"Enable accounts functionality" : "Fiókok funkcionalitásának engedélyezése",
"Enable location filters" : "Helyszín szűrő engedélyezése",
"Enable lost and found functionality" : "Elvesztett és megtalált funkcionalitás engedélyezése",
"Enable multiple sites" : "Több helyszín megadásának lehetősége",
"Enable the waiting list functionality" : "Várólista működésének engedélyezése",
"Enable visual effects" : "Vizuális effektek engedélyezése",
"Enabled" : "Engedélyezve",
"End Of Day" : "Nap vége",
"End Time" : "Befejezési idő",
"End at" : "Befejezés időpontja",
"End of month" : "Hónap vége",
"End of year" : "Év vége",
"Ends" : "Befejeződik",
"Ends after" : "Befejezés ez után",
"English Bulldog" : "angol bulldog",
"English Cocker Spaniel" : "angol cocker spániel",
"English Coonhound" : "Angol mosómedvekopó",
"English Lop" : "English Lop (nyúl) lógó fülű",
"English Pointer" : "pointer",
"English Setter" : "angol szetter",
"English Shepherd" : "angol szetter",
"English Spot" : "Angol foltos nyúl",
"English Springer Spaniel" : "angol springer spániel",
"English Toy Spaniel" : "angol cocker spániel",
"Entered (newest first)" : "Beléptetve (legujabbakkal kezdve)",
"Entered (oldest first)" : "Beléptetve (legrégebbivel kezdve)",
"Entered From" : "Beléptetés kezdve",
"Entered To" : "Beléptetés eddig",
"Entered shelter" : "Menhelyre bekerült",
"Entering 'activelost' or 'activefound' in the search box will show you lost and found animals reported in the last 30 days." : "'activelost' vagy 'activefound' szavak keresőbe való beírásával az utóbbi 30 napban elveszettként és megtaláltként bejelentett állatokat fogja felhozni",
"Entering 'deceased' in the search box will show you recently deceased animals." : "'deceased' szó keresőbe való beírásával a nemrégiben elhullott állatokat fogja felhozni.",
"Entering 'fosterers', 'homecheckers', 'staff', 'volunteers', 'aco' or 'members' in the search box will show you those groups of people." : "Az 'ideiglenes befogadók', 'látogatók', 'alkalmazottak', 'önkéntesek', 'aco' vagy 'tagok' szavak keresőbe írása esetén ezen személyek csoportjait fogja felhozni",
"Entering 'notforadoption' in the search box will show you all shelter animals with the not for adoption flag set." : "A 'nemörökbefogadható' kifejezés keresőbe való beírásával azon menhelyi állatok listája fog megjelenni, amelyek nem örökbefogadhatóként vannak feltüntetve.",
"Entering 'os' in the search box will show you all shelter animals." : "Az 'os' kifejezés keresőbe való beírásával valamennyi menhelyi állat listája fog megjelenni.",
"Entlebucher" : "Entlebuchi pásztorkutya",
"Entry" : "Belépés",
"Entry Category" : "Belépési kategória",
"Entry Donation" : "Belépési adomány",
"Entry Reason" : "Belépés oka",
"Entry Reason Category" : "Belépési ok kategóriája",
"Entry Reasons" : "Belépési okok",
"Entry reason" : "Bejegyzési ok",
"Error contacting server." : "Hiba a kiszolgálóhoz csatlakozáskor:",
"Escaped" : "Megszökött",
"Escaped {0}" : "Megszökött {0}",
"Eskimo Dog" : "eszkimókutya",
"Estimate" : "Becsült",
"Euthanized" : "Elaltatva",
"Euthanized {0}" : "Elaltatva {0}",
"Every day" : "Minden nap",
"Exclude animals who are aged under" : "Ilyen kor alatti állatok kizárása",
"Exclude from bulk email" : "Kör-emailből kizárva",
"Exclude new animal photos from publishing" : "Az új állatok fotóinak kizárása a publikálásból",
"Exclude this image when publishing" : "Közzététel esetén ezen kép ne jelenjen meg",
"Execute" : "Végrehajtás",
"Execute Script" : "Parancsfájl végrehajtása",
"Execute the SQL in the box below" : "SQL alábbi mezőben való végrehajtása",
"Executing Task" : "Feladat teljesítése",
"Executing..." : "Végrehajtás…",
"Exotic Shorthair" : "Egzotikus rövidszőrű macska",
"Expense" : "Költség",
"Expense::" : "Költség",
"Expenses::Board" : "Kiadások: Élelmezés",
"Expenses::Electricity" : "Kiadások::Elektromos áram",
"Expenses::Food" : "Kiadások::Élelmiszerek",
"Expenses::Gas" : "Kiadások::Gáz",
"Expenses::Phone" : "Kiadások::Telefon",
"Expenses::Postage" : "Kiadások::Posta",
"Expenses::Stationary" : "Kiadások::Irodaszerek",
"Expenses::Water" : "Kiadások::Víz",
"Expire in next month" : "Következő hónapban esedékes",
"Expired" : "Lejárt",
"Expired in the last month" : "Múlt hónapban lejárt",
"Expired in the last week" : "Múlt héten lejárt",
"Expires" : "Lejár",
"Expiry" : "Lejárat",
"Expiry date" : "Lejárati dátum",
"Export" : "Exportálás",
"Export Animals as CSV" : "Állatok exportálása CSV-ként",
"Export Report" : "Jelentés exportálása",
"Export Reports as CSV" : "Jelentések exportálása CSV-ként",
"Export a CSV file of animal records that ASM can import into another database." : "Állatok rögzített adatait tartalmazó CSV fájl exportálása, amelyeket ASM más adatbázisba importálhat.",
"Export this database in various formats" : "Jelen adatbázis különböző formátumban való exportálása",
"Exporting the complete database can take some time and generate a very large file, are you sure?" : "A teljes adatbázis exportálása időbe telik és nagyon nagy fájlt generál. Folytatja?",
"Extra Images" : "További képek",
"Extra images" : "További képek",
"Extra-Toes Cat (Hemingway Polydactyl)" : "Hemingway (polydactyl) macska",
"F (Feral Cat)" : "Vadmacska",
"FECV/FeCoV" : "FECV/FeCoV",
"FIPV" : "FIP",
"FIV" : "FIV",
"FIV Result" : "FIV eredmény",
"FIV+" : "FIV+",
"FIV/L Test Date" : "FIV/L Teszt időpontja",
"FIV/L Tested" : "FIV/L-re tesztelve",
"FLV" : "FLV",
"FLV Result" : "FLV eredmény",
"FLV+" : "FLV+",
"FTP hostname" : "FTP gépnév",
"FTP password" : "FTP jelszó",
"FTP username" : "FTP felhasználó neve",
"FVRCP" : "FVRCP",
"Facebook" : "Facebook",
"Failed sending email" : "Nem sikerült elküldeni egy e-mailt",
"Failed to create payment." : "Fizetés létrehozása nem sikerült.",
"Failed to renew license." : "Engedély megújítása meghiúsult.",
"Fawn" : "Sárga/őzbarna",
"Fawn Tortoiseshell" : "Őzbarna teknőctarka macska",
"FeLV" : "FeLV",
"Features" : "Jellemzők",
"Feb" : "Feb.",
"February" : "Február",
"Fee" : "Díj",
"Female" : "Nőstény",
"Feral" : "Vad",
"Ferret" : "Vadászgörény",
"Field Spaniel" : "Field spániel",
"Field names should not contain spaces." : "A mezők nevei nem tartalmazhatnak szóközt",
"Fila Brasileiro" : "Brazil masztiff",
"File" : "Fájl",
"Filter" : "Szűrő",
"Financial" : "Pénzügyi",
"Finch" : "Pinty",
"Find Animal" : "Talált állat hozzáadása",
"Find Animal/Person" : "Állat/Személy keresése",
"Find Found Animal" : "Talált állat keresése",
"Find Incident" : "Esemény keresése",
"Find Lost Animal" : "Elveszett állat keresése",
"Find Person" : "Személy keresése",
"Find a found animal" : "Talált állat keresése",
"Find a lost animal" : "Elveszett állat keresése",
"Find aco" : "Aco keresése",
"Find an incident" : "Esemény keresése",
"Find animal" : "Állat keresése",
"Find animal columns" : "Állat oszlopok keresése",
"Find animal control incidents returned {0} results." : "Állatfelügyeleti esetek keresés eredménye {0}.",
"Find animals matching the looking for criteria of this person" : "Ezen személy kiválasztási kritériumainak megfelelő állat keresése",
"Find donor" : "Adományozó keresése",
"Find driver" : "Meghajtóprogram keresése",
"Find fosterer" : "Ideiglenes befogadó keresése",
"Find found animal returned {0} results." : "Talált állat visszaadására vonatkozó eredmény keresése ",
"Find homechecked" : "Lelátogatottak keresése",
"Find homechecker" : "Lelátogató keresése",
"Find incident" : "Esemény keresése",
"Find lost animal returned {0} results." : "Elveszett állatok visszaadására vonatkozó eredmény keresése",
"Find member" : "Tag keresése",
"Find person" : "Személy keresése",
"Find person columns" : "Személy oszlopok keresése",
"Find retailer" : "Kiskereskedő keresése",
"Find shelter" : "Menhely keresése",
"Find staff" : "Alkalmazott keresése",
"Find staff/volunteer" : "Alkalmazott/önkéntes keresése",
"Find this address on a map" : "A cím keresése a térképen",
"Find vet" : "Állatorvos keresése",
"Find volunteer" : "Önkéntes keresése",
"Fine Amount" : "Bírság összege",
"Finnish Lapphund" : "Finn lapphund",
"Finnish Spitz" : "finn spicc",
"First Last" : "Első utolsó",
"First Names" : "Keresztnév:",
"First name(s)" : "Keresztnév(nevek)",
"First offence" : "Első támadás",
"Fish" : "Hal",
"Flag" : "Megjelölés",
"Flags" : "Jelölések",
"Flat-coated Retriever" : "Simaszőrű retriever",
"Flemish Giant" : "Belga óriás nyúl",
"Florida White" : "Floridai fehér nyúl",
"Followup" : "Követés",
"Followup Between" : "Követés megadott időszakban",
"Followup Date/Time" : "Követés Dátum/idő",
"Footer" : "Lábjegyzet",
"For" : "Részére",
"Forbidden" : "Tilos",
"Forenames" : "Utónevek",
"Forget" : "Elvet",
"Form URL" : "URL formázása",
"Forms need a name." : "A nyomtatványt el kell nevezni",
"Foster" : "Ideiglenes befogadó",
"Foster Book" : "Ideiglenes befogadók könyve",
"Foster Capacity" : "Ideiglenes befogadó kapacitás",
"Foster Transfer" : "Ideiglenes befogadóhoz átszállítás",
"Foster an animal" : "Egy állat ideiglenes befogadása",
"Foster book" : "Ideiglenes befogadók könyve",
"Foster movements must have a valid foster date." : "Ideiglenes befogadóhoz szállításnak érvényes ideigelenes befogadási dátummal kell rendelkeznie. ",
"Foster successfully created." : "Ideigles befogadóhoz való kihelyezés sikeresen megtörtént. ",
"Fostered" : "Ideiglenesen befogadott ",
"Fostered Animals" : "Ideiglenesen befogadott állat",
"Fostered to {0} since {1}" : "Ideiglenesen befogadva {0} által {1} óta ",
"Fosterer" : "Ideiglenes befogadó",
"Fosterer (Active Only)" : "Ideiglenes befogadó (csak aktív)",
"Fosterer Medical Report" : "Ideiglenes befogadó orvosi jelentés",
"Found" : "vadászkutya",
"Found Animal" : "Talált állat",
"Found Animal - Additional" : "Talált állat - További",
"Found Animal - Details" : "Talált állat - Részletek",
"Found Animal Contact" : "Talált állathoz tartozó kapcsolat",
"Found Animal {0}" : "Talált állat { 0 }",
"Found Animal: {0}" : "Talált állatok: { 0 }",
"Found animal - {0} {1} [{2}]" : "Talált állat - {0} {1} [{2}]",
"Found animal entries matching '{0}'." : "Talált állatra vonatkozó bejegyzések megegyeznek '{0}'",
"Found animals must have a contact" : "Talált állatokhoz kapcsolattartó szükséges",
"Found animals reported in the last 30 days." : "Az utóbbi 30 napon bejelentett talált állatok",
"Found from" : "",
"Found to" : "",
"FoundLost animal entry {0} successfully created." : "TaláltElveszett állat bejegyzése {0} sikeresen létrehozva",
"Fox Terrier" : "foxterrier",
"Foxhound" : "Rókakopó",
"Fr" : "Fr",
"French Bulldog" : "francia bulldog",
"French-Lop" : "Francia lógófülű nyúl",
"Frequency" : "Gyakoriság",
"Frequently Asked Questions" : "Gyakori kérdések",
"Fri" : "Péntek",
"Friday" : "Péntek",
"From" : "Honnan",
"From Fostering" : "Ideiglenes befogadótól",
"From Other" : "Máshonnan",
"From retailer is only valid on adoption movements." : "A kiskereskedőtől való érkeztetés csak az örökbefogadásnál érvényes.",
"Future notes" : "Jövőbeni megjegyzések",
"GDPR Contact Opt-In" : "GDPR kapcsolattartó ",
"Gaited" : "Jármód",
"Gas Bills" : "Gázszámlák",
"Gecko" : "Gekkó",
"General" : "Szokásos",
"Generate" : "Létrehozás",
"Generate Documents" : "Dokumentum létrehozása",
"Generate HTML from this SQL" : "HTML létrehozása ezen SQL-ből",
"Generate Report" : "Riport létrehozása",
"Generate a document from this animal" : "Dokumentum létrehozása erről az állatról",
"Generate a document from this incident" : "Dokumentum létrehozása erről az eseményről",
"Generate a document from this movement" : "Dokumentum létrehozása erről a mozgatásról",
"Generate a document from this person" : "Dokumentum létrehozása erről a személyről",
"Generate a document from this record" : "Dokumentum létrehozása erről a feljegyzésről",
"Generate a javascript database for the search page" : "Javascript adatbázis létrehozása a kereső oldalhoz",
"Generate a new animal code" : "Új állatkód létrehozása",
"Generate a random name for this animal" : "Tetszőleges névválasztás ehhez az állathoz",
"Generate document from this appointment" : "Dokumentum létrehozása ehhez az időponthoz",
"Generate document from this license" : "Dokumentum létrehozása erről az engedélyről",
"Generate document from this payment" : "Dokumentum létrehozása erről a kifizetésről",
"Generate document from this transport" : "Dokumentum létrehozása erről a transzportról",
"Generate documentation" : "Dokumentáció létrehozása",
"Generate documents" : "Dokumentum létrehozása",
"Generate image thumbnails as tn_$$IMAGE$$" : "Képlogó létrehozása mint tn_$$IMAGE$$",
"Generated document '{0}'" : "Létrehozott dokumentum '{0}'",
"Gerbil" : "Futóegér",
"German Pinscher" : "dobermann",
"German Shepherd Dog" : "német juhászkutya",
"German Shorthaired Pointer" : "rövidszőrű német vizsla",
"German Wirehaired Pointer" : "drótszőrű német vizsla",
"Get more reports from sheltermanager.com" : "További beszámolók generálása a sheltermanager.com oldalról",
"Gift Aid" : "Tárgyi adomány",
"GiftAid" : "Tárgyi adomány",
"Giftaid" : "Tárgyi adomány",
"Ginger" : "Vörös",
"Ginger and White" : "vörös és fehér",
"Give" : "Ad",
"Give Treatments" : "Szükséges kezelések",
"Give Vaccination" : "Szükésges oltás",
"Given" : "Beadva ",
"Glen of Imaal Terrier" : "Glenn of Imaal terrier",
"Go" : "Ugrás",
"Go to the lookup data screen and add/remove breeds, species and animal types according to the animals your shelter deals with." : "Ugrás az ellenőrző adattáblába és fajták, fajok és állat típusok hozzáadása/eltávolítása attól függően, hogy milyen állatokkal foglalkozik a menhely",
"Go to the options screen and set your shelter's contact details and other settings." : "Ugrás az opciókra és a menhely kapcsolatainak és egyéb beállításainak beállítása",
"Go to the system users screen and add user accounts for your staff." : "Ugrás a rendszer felhasználói képernyőre és a személyzet fiókjainak hozzáadása",
"Goat" : "Kecske",
"Golden" : "Arany",
"Golden Retriever" : "golden retriever",
"Goldfish" : "Aranyhal",
"Good With Cats" : "Macskákkal jó a viszonya",
"Good With Children" : "Gyerekekkel jó a viszonya",
"Good With Dogs" : "Kutyákkal jó a viszonya",
"Good with Cats" : "Macskákkal jó a viszonya",
"Good with Children" : "Gyerekekkel jó a viszonya",
"Good with Dogs" : "Kutyákkal jó a viszonya",
"Good with cats" : "Macskákkal jó a viszonya",
"Good with children" : "Gyerekekkel jó a viszonya",
"Good with dogs" : "Kutyákkal jó a viszonya",
"Good with kids" : "Gyerekekkel jó a viszonya",
"Google+" : "Google+",
"Goose" : "Liba",
"Gordon Setter" : "gordon szetter",
"Grade" : "Fokozat",
"Great Dane" : "dán dog",
"Great Pyrenees" : "pireneusi hegyikutya",
"Greater Swiss Mountain Dog" : "Nagy svájci havasi kutya",
"Green" : "zöld",
"Grey" : "Szürke",
"Grey and White" : "szürke és fehér",
"Greyhound" : "agár",
"Guinea Pig" : "Tengerimalac",
"Guinea fowl" : "Gyöngytyúk",
"HMRC Gift Aid Spreadsheet" : "HMRC Adomány adatbázis",
"HTML" : "HTML",
"HTML Publishing Templates" : "HTML közzétételi sablonok szerkesztése",
"HTML/FTP Publisher" : "HTML/FTP Kiadó",
"Hairless" : "Kopasz",
"Half-Yearly" : "Félévente",
"Hamster" : "Hörcsög",
"Harlequin" : "Harlequin",
"Havana" : "Havanna",
"Havanese" : "havanese",
"Header" : "Fejléc",
"Health Problems" : "Egészségügyi problémák",
"Health and Identification" : "Egészségi állapot és azonosítás",
"Healthy" : "Egészséges",
"Heartworm" : "Szívférgesség",
"Heartworm Test Date" : "Szívféreg teszt dátuma",
"Heartworm Test Result" : "Szívféreg teszt eredménye",
"Heartworm Tested" : "Szívférgességre tesztelve",
"Heartworm+" : "Szívféreg teszt pozitív",
"Hedgehog" : "Sündisznó",
"Held" : "Fenntartott",
"Help" : "Segítség",
"Hepatitis" : "Hepatitisz",
"Here are some things you should do before you start adding animals and people to your database." : "Néhány dolog, amit el kell végezni, mielőtt állatokat és személyeket vesz fel az adatbázisába.",
"Hidden" : "Rejtett",
"Hidden Comments" : "Rejtett megjegyzések",
"Hidden comments about the animal" : "Állatra vonatkozó rejtett megjegyzések",
"Hide deceased animals from the home page" : "Elhullott állat elrejtése a honlapról",
"High" : "Magas",
"Highlight" : "Kiemelés",
"Himalayan" : "Himalájai",
"History" : "Előzmények",
"Hold" : "Előjegyzés",
"Hold the animal until this date or blank to hold indefinitely" : "Az állat előjegyzése eddig az időpontig, vagy dátum mező üresen hagyása határozatlan ideig való előjegyzés esetén.",
"Hold until" : "Visszatartás:",
"Hold until {0}" : "Előjegyzés {0}-ig",
"Holland Lop" : "Holland lógófülű nyúl",
"Home" : "Otthon",
"Home Phone" : "Otthoni telefonszám",
"Home page" : "Honlap",
"Homecheck Areas" : "Lelátogatási körzetek",
"Homecheck Date" : "Lelátogatás dátuma",
"Homecheck History" : "Lelátogatási előzmények",
"Homecheck areas" : "Lelátogatási körzetek",
"Homechecked" : "Lelátogatott",
"Homechecked By" : "Lelátógató megnevezése",
"Homechecked by" : "Lelátogató megnevezése",
"Homechecker" : "Lelátogató",
"Horizontal Pitch" : "Vízszintes osztóköz",
"Horse" : "Ló",
"Hotot" : "Hotot nyúl",
"Hound" : "vadászkutya",
"Hours" : "Órák",
"Housetrained" : "Szobatiszta",
"Hovawart" : "Hovawart",
"How urgent is it that we take this animal?" : "Mennyire sürgős az állat fogadása?",
"Husky" : "Husky",
"I've finished, Don't show me this popup again." : "Befejeztem. Ne jelenjen meg ez az ablak újra.",
"IP Restriction" : "IP korlátozás",
"IP restriction is a space-separated list of IP netblocks in CIDR notation that this user is *only* permitted to login from (eg: 192.168.0.0/24 127.0.0.0/8). If left blank, the user can login from any address." : "Az IP korlátozás, azon CIDR jelölésű IP netblock-ok listája, amelyekről az adott felhasználó jogosult belépni (pl: 192.168.0.0/24 127.0.0.0/8). Ha ez üresen marad, akkor a felhasználó bármely IP címről beléphet.",
"Ibizan Hound" : "Ibizai kopó",
"If the shelter provides initial insurance cover to new adopters, the policy number" : "A biztosítás száma, amennyiben a menhely kezdeti biztosítást nyújt az új örökbe fogadónak.",
"If this form has a populated emailaddress field during submission, send a confirmation email to it" : "Ha ezen nyomtatványhoz van hozzárendelt emailcím az engedélyezésnél, küldjön igazolást erre a címre",
"If this is the web preferred image, web publishers will use these notes as the animal description" : "Ha ez a web által preferált kép, akkor a webes szolgáltatók ezen jegyzetet fogják az állat leírásaként használni.",
"If this person is a fosterer, the maximum number of animals they can care for." : "Ha ez a személy egy ideiglenes befogadó, az általa maximálisan vállalható állatok száma.",
"If this person is a member, the date that membership expires." : "Amennyiben ez a személy tag, a tagság lejáratának dátuma.",
"If this person is a member, their membership number" : "Tagsági szám, amennyiben ez a személy tag",
"If this person is a member, their membership number." : "Amennyiben ez a személy tag, a tagsági azonosító száma.",
"If this stock record is for a drug, the batch number from the container" : "Amennyiben ez a készlet bejegyzés egy gyógyszerre vonatkozik, a csomagoláson található azonosító száma",
"If this stock record is for a perishable good, the expiry date on the container" : "Amennyiben ez a készletbejegyzés egy romlandó árura vonatkozik, a csomagoláson található lejárat dátuma",
"If you assign view or edit roles, only users within those roles will be able to view and edit this account." : "Amennyiben hozzáférési és szerkesztési szabályokat állít be, úgy csak az ezen szabályoknak megfelelő felhasználók láthatják és szerkeszthetik a fiókot.",
"If you don't select any locations, publishers will include animals in all locations." : "Ha nem választ ki helyiséget, a szolgáltatók bele veszik az összes helyiségben található állatot.",
"Iguana" : "Iguana",
"Illyrian Sheepdog" : " Jugoszláv farkasölő kutya",
"Image" : "Kép",
"Image file" : "Képfájl",
"Import" : "Importálás",
"Import a CSV file" : "CSV fájl importálása",
"Import a PayPal CSV file" : "PayPal CSV fájl importálása",
"Import from file" : "Importálás fájlból",
"Important" : "Fontos",
"In" : "In",
"In SubTotal" : "Részösszegben",
"In the last month" : "Az utolsó hónapban",
"In the last quarter" : "Az előző negyedévben",
"In the last week" : "Az előző héten",
"In the last year" : "Az előző évben",
"In-Kind Donation" : "Természetbeni támogatás",
"Inactive" : "Inaktív",
"Inactive - do not include" : "Inaktív - ne tartalmazza",
"Incident" : "Eset",
"Incident - Additional" : "Eset - Újabb",
"Incident - Citation" : "Eset - Hivatkozás",
"Incident - Details" : "Eset - Részletek",
"Incident - Dispatch" : "Eset - Jelentés",
"Incident - Owner" : "Eset - tulajdonos",
"Incident Between" : "Eset résztvevői",
"Incident Completed Types" : "Befejezett eset típusok",
"Incident Date/Time" : "Esemény dátuma/időpontja",
"Incident Type" : "Esemény típusa",
"Incident Types" : "Esemény típusok",
"Incident date cannot be blank" : "Esemény dátuma nem maradhat üresen. ",
"Incident followup" : "Esemény következménye",
"Incident {0} successfully created." : "Esemény {0} sikeresen létrehozva.",
"Incident {0}, {1}: {2}" : "Esemény {0}, {1}: {2}",
"Incidents" : "Események",
"Incidents Requiring Followup" : "Utánkövetést igénylő események",
"Include CSV header line" : "CSV fejléc beszúrása",
"Include Removed" : "Töröltek beszámítása",
"Include animals in the following locations" : "Az alábbi helyszíneken található állatok bevonása. ",
"Include animals on trial adoption" : "Hatósági eljárásban lévő állatok bevonása",
"Include animals who don't have a description" : "Leírással nem rendelkező állatok bevonása",
"Include animals who don't have a picture" : "Fénykép nélküli állatok bevonása",
"Include cruelty case animals" : "Állatkínzásos esetekben érintett állatok bevonása",
"Include deceased animals" : "Elhullott állatok bevonása ",
"Include fostered animals" : "Ideiglenes befogadónál lévő állatok bevonása ",
"Include found" : "Talált állatok bevonása",
"Include held animals" : "Előjegyzett állatok bevonása",
"Include incomplete medical records when generating document templates" : "Hiányos egészségügyi információk bevonása formanyomtatványok generálásakor ",
"Include incomplete vaccination and test records when generating document templates" : "Hiányos oltások és teszteredmények bevonása formanyomtatványok generálásakor",
"Include non-shelter animals" : "Nem menhelyi állatok beovnása ",
"Include off-shelter animals in medical calendar and books" : "Menhelyen kívüli állatok egészségügyi naptárba és könyvbe való bevonása",
"Include preferred photo" : "Kedvelt fotó bevonása ",
"Include quarantined animals" : "Karanténban lévő állatok beszámítása",
"Include reserved animals" : "Foglalt állatok beszámítása",
"Include retailer animals" : "Kereskedésből való állatok beszámítása",
"Include returned" : "Visszahozottak bevonása",
"Include this image when publishing" : "Közzétételkor ennek a képnek a bevonása ",
"Include unaltered animals" : "Nem módosított állatok bevonása",
"Income" : "Bevétel",
"Income from an on-site shop" : "Webshopból származó bevétel",
"Income::" : "Bevétel:",
"Income::Adoption" : "Bevétel::Örökbefogadás",
"Income::Donation" : "Bevétel::Adomány",
"Income::EntryDonation" : "Bevétel::Belépési támogatás ",
"Income::Interest" : "Bevétel::kamat",
"Income::OpeningBalances" : "Bevétel::NyitóEgyenleg",
"Income::Shop" : "Bevétel::Bolt",
"Income::Sponsorship" : "Bevétel::Támogatás",
"Income::WaitingList" : "Bevétel::VáróLista",
"Incoming" : "Bejövő",
"Incoming Forms" : "Beérkező nyomtatványok",
"Incoming donations (misc)" : "Bejövő adományok (egyéb)",
"Incoming forms are online forms that have been completed and submitted by people on the web." : "A beérkező nyomtatványok online nyomtatványok, melyeket az interneten töltöttek ki és küldtek be személyek.",
"Incomplete incidents" : "Hiányos esetek",
"Incomplete notes upto today" : "Befejezetlen feljegyszések a mai napig",
"Index" : "Index",
"Individual/Couple" : "Személy/személyek",
"Induct a new animal" : "Új állat behívása",
"Information" : "Információk",
"Initials" : "Kezdőbetűk",
"Install" : "Telepítés",
"Install the selected reports to your database" : "Kiválasztott jelentés feltöltése a saját adatbázisba",
"Insurance" : "Biztosítás",
"Insurance No" : "Biztosítás száma",
"Intake" : "Felvett létszám",
"Intakes {0}" : "Felvett létszám {0}",
"Internal Location" : "Belső Helyszín",
"Internal Locations" : "Belső Helyszínek",
"Invalid email address" : "Érvénytelen e-mail cím",
"Invalid email address '{0}'" : "Érvénytelen e-mail cím '{0}'",
"Invalid microchip number length" : "Érvénytelen mikrochip szám hosszúság",
"Invalid time '{0}', times should be in 00:00 format" : "Érvénytelen idő '{0}', az időt 00:00 formátumban kell megadni",
"Invalid time, times should be in HH:MM format" : "Érvénytelen idő, az időt óra:perc formátumban kell megadni",
"Invalid username or password." : "Érvénytelen felhasználói név és/vagy jelszó.",
"Investigation" : "Ellenőrzés",
"Investigations" : "Ellenőrzések",
"Investigator" : "Vizsgáló ",
"Invoice Only" : "Csak számla",
"Invoice items need a description and amount." : "A számla tételekhez leírás és összeg magadása szükséges.",
"Irish Setter" : "ír szetter",
"Irish Terrier" : "welsh terrier",
"Irish Water Spaniel" : "ír vizispániel",
"Irish Wolfhound" : "ír farkaskutya",
"Is this a permanent foster?" : "Ez egy állandó ideiglenes befogadó? ",
"Is this a trial adoption?" : "Ez egy hatósági örökbefogadás? ",
"Issue a new insurance number for this animal/adoption" : "Új biztosítási szám kiadása ehhez az állathoz/örökbefogadáshoz",
"Issue date and expiry date must be valid dates." : "Kiadási és lejárati dátum érvényes kell legyen. ",
"Issued" : "Kiadva",
"Issued in the last month" : "Múlt hónapban kiadva ",
"Issued in the last week" : "Múlt héten kiadva ",
"Italian Greyhound" : "olasz agár",
"Italian Spinone" : "olasz drótszőrű vizsla",
"Item" : "Elem",
"Jack Russell Terrier" : "Jack Russell terrier",
"Jan" : "Jan.",
"January" : "Január",
"Japanese Bobtail" : "Japán csonkafarkú macska",
"Japanese Chin" : "japán chin",
"Javanese" : "havanese",
"Jersey Wooly" : "Jersey Wooly nyúl",
"Jindo" : "Koreai jindo kutya",
"Jul" : "Júl.",
"July" : "Júl.",
"Jump to diary" : "Ugrás a naplóra",
"Jump to donations" : "Ugrás az adományokhoz",
"Jump to media" : "Ugrás a médiára",
"Jump to movements" : "Ugrás a mozgatásra",
"Jun" : "Jún.",
"June" : "Jún.",
"Jurisdiction" : "Ítélet",
"Jurisdictions" : "Ítéletek",
"Kai Dog" : "kai",
"Kakariki" : "Kakariki",
"Karelian Bear Dog" : "karéliai medvevadász kutya",
"Keep table headers visible when scrolling" : "Táblázat fejléce maradjon látható görgetés közben",
"Keeshond" : "Farkasspicc",
"Kennel" : "Kennel",
"Kerry Blue Terrier" : "kerry blue terrier",
"Kishu" : "kishu",
"Kittens (under {0} months)" : "Kölyök cicák ({0} hónapos kor alatt)",
"Km" : "km",
"Komondor" : "komondor",
"Korat" : "Korat",
"Kuvasz" : "kuvasz",
"Kyi Leo" : "kyi leo",
"Label" : "Címke",
"Labrador Retriever" : "labrador retriever",
"Lakeland Terrier" : "Lakeland terrier",
"Lancashire Heeler" : "Lancashire heeler",
"Large" : "Nagy",
"Last First" : "Az elsőt utoljára ",
"Last Location" : "Legutóbbi helyszín",
"Last Month" : "Legutóbbi hónap",
"Last Name" : "Vezetéknév",
"Last Week" : "Múlt hét",
"Last changed by {0} on {1}" : "Utoljára módosította: {0} ekkor: {1}",
"Last name" : "Vezetéknév",
"Last, First" : "Utolsó, Első",
"Latency" : "Lappangás",
"Latency Tester" : "Lappangási teszt",
"Least recently changed" : "Legrégebben módosítva",
"Leave" : "Oldal elhagyása",
"Leave of absence" : "Szabadság",
"Left Margin" : "Baloldali margó",
"Left shelter" : "Elhagyta a menhelyet",
"Leonberger" : "leonbergi",
"Leptospirosis" : "Leptospirosis",
"Letter" : "Levél",
"Lhasa Apso" : "lhasa apso",
"Liability" : "Kötelezettség",
"Licence for {0} successfully renewed {1} - {2}" : "{0}-re vonatkozó engedély eredményesen megújításra került {1} - {2}",
"License" : "Engedély",
"License Number" : "Engedély szám",
"License Types" : "Engedély típusok",
"License number '{0}' has already been issued." : "'{0}' Licensz szám már kiadásra került",
"License numbers matching '{0}'." : "Licensz szám megegyezik '{0}'",
"License requires a number" : "Engedélyhez szám megadása szükséges",
"License requires a person" : "Engedélyhez személy megadása szükséges",
"License requires issued and expiry dates" : "Engedélyhez kiadási és lejárati idő megadása szükséges",
"Licenses" : "Engedélyek",
"Licensing" : "Engedélyezés",
"Lifetime" : "Élettartam",
"Light Amber" : "Világos borostyán",
"Lilac" : "Lila",
"Lilac Tortie" : "Teknőstarka",
"Limited to {0} matches" : "{0} egyezésekre szűkítve",
"Link" : "link",
"Link an animal" : "Állat belinkelése",
"Link to an external web resource" : "Külső webfelületre való belinkelés",
"Link to this animal" : "Ezen állathoz való belinkelés",
"Links" : "Hivatkozások",
"List" : "Lista",
"Litter" : "Alom",
"Litter Ref" : "Alom hivatkozás",
"Litter Reference" : "Alom hivatkozás",
"Littermates" : "Alomtársak",
"Litters" : "Almok",
"Litters need at least a required date and number." : "Az almokhoz legalább egy dátum és szám megadása szükséges.",
"Live Releases {0}" : "Élő kiadások {0}",
"Liver" : "májbarna",
"Liver and White" : "májbarna és fehér",
"Lizard" : "Gyík",
"Llama" : "Láma",
"Loading..." : "Betöltés…",
"Loan" : "Kölcsön",
"Local" : "Helyi",
"Locale" : "Helyi",
"Location" : "Helyszín",
"Location Filter" : "Hely szerinti szűrés:",
"Location and Species" : "Helyszín és fajok",
"Location and Type" : "Helyszín és típus",
"Location and Unit" : "Helyszín és egység",
"Locations" : "Helyszínek",
"Log" : "Napló",
"Log Text" : "Napló szöveg",
"Log Type" : "Naplózás típusa",
"Log Types" : "Naplózás típusai",
"Log date must be a valid date" : "A napló dátuma érvényes dátum kell legyen",
"Log entries need a date and text." : "A naplóbejegyzésekhez dátum és szöveg szükséges",
"Log requires a date." : "Naplóbejegyzéshez dátum szükséges",
"Log requires a person." : "Naplóbejegyzéshez személy szükséges",
"Log requires an animal." : "Naplóbejegyzéshez állat megadása szükséges.",
"Log successfully added." : "Naplóbejegyzés sikeresen hozzáadva.",
"Login" : "Bejelentkezés",
"Logout" : "Kijelentkezés",
"Long" : "Hosszú",
"Long term" : "Hosszú távú",
"Longest On Shelter" : "Legrégebben a Menhelyen",
"Looking For" : "Keresés",
"Looking for" : "Keresés",
"Lookup" : "Keresés",
"Lookup (Multiple Select)" : "Keresés (többszörös kiválasztás)",
"Lookup Values" : "Keresési érték",
"Lookup data" : "Keresési adatok",
"Lookups" : "Keresések",
"Lop Eared" : "Lógó fülű",
"Lory/Lorikeet" : "Lórifélék",
"Lost" : "Elveszett",
"Lost Animal" : "Elveszett állat",
"Lost Animal - Additional" : "Elveszett állat - Egyéb ",
"Lost Animal - Details" : "Elveszett állat - Részletek",
"Lost Animal Contact" : "Elveszett állathoz tartozó kapcsolat",
"Lost Animal: {0}" : "Elveszett állatok: { 0 }",
"Lost and Found" : "Elveszett és megtalált",
"Lost and found entries must have a contact" : "Az elveszett és megtalált bejegyzésekhez kapcsolattartó megadása szükséges",
"Lost animal - {0} {1} [{2}]" : "Elveszett állat - {0} {1} [{2}]",
"Lost animal entries matching '{0}'." : "Talált állatra vonatkozó bejegyzések megegyeznek '{0}'",
"Lost animal entry {0} successfully created." : "Talált állat bejegyzése {0} sikeresen létrehozva",
"Lost animals must have a contact" : "Elveszett állatokhoz kapcsolattartó szükséges",
"Lost animals reported in the last 30 days." : "Az utóbbi 30 napon bejelentett elveszett állatok.",
"Lost from" : "Elveszett innen",
"Lost to" : "Elvesztett",
"Lost/Found" : "Elveszett/Talált",
"Lots of reports installed? Clean up the Reports menu with Settings-Options- Display-Show report menu items in collapsed categories." : "Sok jelentés van telepítve? Tisztítsa meg a Jelentések menüt a Settings-Options- Display-Show report menü elemekkel az összeomlott kategóriákban.",
"Lovebird" : "Törpepapagáj",
"Low" : "Alacsony",
"Lowchen" : "Oroszlánkutya",
"Lowest" : "Legalacsonyabb",
"M (Miscellaneous)" : "M (Egyéb)",
"MM = current month" : "MM = aktuális hónap",
"Macaw" : "Arapapagáj",
"Mail" : "Levél",
"Mail Merge" : "Körlevél",
"Mail Merge - {0}" : "Körlevél - {0}",
"Maine Coon" : "Maine Coon ",
"Make this the default image when creating documents" : "Ez legyen az alapbeállítás képe egy dokumentum létrehozásánál",
"Make this the default image when viewing this record and publishing to the web" : "Ez legyen az alapbeállitás képe a bejegyzés megtekintésekor és interneten való közzétételnél",
"Make this the default video link when publishing to the web" : "Ez legyen az alapértelmezett videó link az interneten való közztételnél",
"Male" : "Hím",
"Maltese" : "máltai selyemkutya",
"Manchester Terrier" : "Manchester terrier",
"Mandatory" : "Kötelező",
"Manual" : "Kézi",
"Manually enter codes (do not generate)" : "Kód manuális beírása (ne generálódjon)",
"Manufacturer" : "Gyártó",
"Manx" : "Man-szigeti macska",
"Map" : "Térkép",
"Map of active incidents" : "Aktív esetek térképe",
"Mar" : "Márc.",
"March" : "Március",
"Maremma Sheepdog" : "maremma sheepdog",
"Mark Deceased" : "Elhullottak megjelölése",
"Mark an animal deceased" : "Elhullott állat megjelölése",
"Mark dispatched now" : "Elküldöttek megjelölése",
"Mark new animals as not for adoption" : "Új állatok megjelölése nem örökbefogadhatóként",
"Mark responded now" : "Megjelölés visszaigazolása",
"Mark selected payments received" : "Kiválasztott jóváírt befizetések megjelölése",
"Mark this owner homechecked" : "Tulajdonos megjelölése lelátogatottként",
"Mark treatments given" : "Kezelések megjelölése elvégzettként",
"Marketer" : "Forgalmazó",
"Markings" : "Jelölések",
"Markup" : "Haszonkulcs",
"Marriage/Relationship split" : "Házasság/kapcsolat felbontva",
"Mastiff" : "masztiff",
"Match" : "Egyezés",
"Match Lost and Found" : "Elveszett és megtalált egyezés",
"Match against other lost/found animals" : "Egyéb elveszett/talált állattal való összehasonlítás",
"Match lost and found animals" : "Elveszett és talált állatok összehasonlítása",
"Match this animal with the lost and found database" : "Állat összehasonlítása az eltűnt és talált adatabázissal",
"Maternity" : "Anyaság",
"May" : "Máj.",
"McNab" : "McNab juhászkutya",
"Media" : "Média",
"Media Notes" : "Média megjegyzések",
"Media notes contain" : "Média megjegyzések tartalma",
"Medical" : "Egészségügyi",
"Medical Book" : "Egészségügyi könyv",
"Medical Profiles" : "Egészségügyi profilok",
"Medical book" : "Egészségügyi könyv",
"Medical calendar" : "Egészségügyi naptár",
"Medical profiles" : "Egészségügyi profilok",
"Medical profiles need a profile name, treatment, dosage and frequencies." : "Az egészségügyi profilokhoz név, kezelés, dózis és kezelési gyakoriásg megadása szükséges. ",
"Medical regimens need an animal, name, dosage, a start date and frequencies." : "Gyógyszeres kezeléseknél szükség van az állat nevére, a gyógyszer nevére és adagolására valamint a kezelés kezdő időpontjára és kezelések közötti intervallumokra",
"Medicate" : "Gyógykezelés",
"Medicate Animal" : "Gyógykezelés alatt álló állat",
"Medium" : "Közepes",
"Member" : "Tag",
"Membership Expiry" : "Tagság lejárata",
"Membership Number" : "Tagsági szám",
"Merge" : "Egyesítés",
"Merge Person" : "Személy összevonása",
"Merge another animal into this one" : "Másik állat összevonása ezzel",
"Merge another person into this one" : "Másik személy összevonása ezzel a személlyel",
"Merge bonded animals into a single record" : "Összeetartozó állatok összevonása egyetlen bejegyzésbe",
"Merge duplicate records" : "Dupla bejegyzések összevonása",
"Message" : "Üzenet",
"Message Board" : "Üzenőfal",
"Message from {0}" : "Üzenet a következőtől: {0}",
"Message successfully sent to {0}" : "Üzenet sikeresen elküldésre került {0}",
"Messages" : "Üzenetek",
"Messages successfully sent" : "Üzenet sikeresen elküldve",
"Method" : "Módszer",
"Microchip" : "Microchip",
"Microchip Date" : "Microchip dátum",
"Microchip Number" : "Microchip szám",
"Microchip number {0} has already been allocated to another animal." : "Ez a microchip szám {0} már más állathoz lett hozzárendelve. ",
"Microchipped" : "Chippezett",
"Miles" : "Mérföld",
"Mini Rex" : "Mini Rex nyúl",
"Mini-Lop" : "Mini kosorrú nyúl",
"Miniature Pinscher" : "törpe pincser",
"Minutes" : "Percek",
"Missouri Foxtrotter" : "Missouri Foxtrotter ló",
"Mixed Breed" : "Keverék ",
"Mo" : "Mo",
"Mobile signing pad" : "Mobil aláírási blokk",
"Modify Additional Fields" : "További mezők módosítása",
"Modify Document Templates" : "Dokumentum sablonok módosítása",
"Modify Lookups" : "Keresési találatok módosítása",
"Mon" : "Hétfő",
"Monday" : "Hétfő",
"Money" : "Pénz",
"Month" : "Hónap",
"Monthly" : "Havi",
"More Info Needed" : "Több információ szükséges",
"More Medications" : "További gyógyszerezés",
"More Tests" : "További tesztek",
"More Vaccinations" : "További oltások",
"More diary notes" : "További napló jegyzet",
"Morgan" : "Morgan ló",
"Most browsers let you search in dropdowns by typing the first few letters of the item you want." : "A legtöbb böngészőben a keresésnél a keresett elem első néhány betűjének begépelésekor lenyílik egy ablak egy listával ",
"Most browsers will let you visit a record you have been to in this session by typing part of its name in the address bar." : "A legtöbb böngésző lehetőséget biztosít arra, hogy a legutóbbi előzményeit megtekintse azok nevének első betűinek begépelésével",
"Most recently changed" : "Legutóbb módosítva",
"Most relevant" : "Legfontosabb",
"Mother" : "Anya",
"Mountain Cur" : "mountain cur",
"Mountain Dog" : "mountain cur",
"Mouse" : "Egér",
"Move" : "Mozgatás",
"Move an animal to a retailer" : "Állat áthelyezése egy elosztóba",
"Moved to animal record {0}" : "{0} Állatok jegyzékébe áthelyezve ",
"Movement" : "Mozgatás:",
"Movement Date" : "Mozgatás dátuma",
"Movement Number" : "Mozgatás száma",
"Movement Type" : "Mozgatás típusa",
"Movement Types" : "Mozgatás típusok",
"Movement dates clash with an existing movement." : "Esemény dátuma egybe esik egy már létező eseménnyel.",
"Movement numbers must be unique." : "Az áthelyezés sorszámának egyedinek kell lennie",
"Movements" : "Mozgatás:",
"Movements require an animal" : "Az áthelyezéshez állatok szükségesek",
"Movements require an animal." : "A tételekhez állat szükséges",
"Moving..." : "Áthelyezés...",
"Multi-Lookup" : "Multi-keresés",
"Multiple Treatments" : "Többszöri kezelések",
"Munchkin" : "Tacskó macska",
"Munsterlander" : "münszterlandi vizsla",
"Mustang" : "Musztáng",
"My Fosters" : "Ideigleneseim",
"My Incidents" : "Eseményeim",
"My Undispatched Incidents" : "Feladatlan eseményeim",
"My diary notes" : "Összes napló jegyzetem",
"My sheltermanager.com account" : "Saját sheltermanager.com fiókom",
"Mynah" : "Hegyi seregély",
"N (Non-Shelter Animal)" : "nem menhelyi állat",
"NNN or NN = number unique for this type of animal for this year" : "NNN vagy MM = egyedi azonosító az ilyen típusú állatokhoz ebben az évben",
"Name" : "Név",
"Name Contains" : "Név tartalmazza",
"Name and Address" : "Név és Cím",
"Name cannot be blank" : "A név nem lehet üres",
"Name contains" : "Név tartalmazza",
"Neapolitan Mastiff" : "nápolyi masztiff",
"Negative" : "Negatív",
"Neglect" : "Elhanyagoltság",
"Netherland Dwarf" : "Holland törpenyúl",
"Neuter/Spay" : "ivartalantás",
"Neutered" : "Ivartalanított",
"Neutered/Spayed Non-Shelter Animals In {0}" : "Ivartalanított nem menhelyi állatok itt {0}",
"Neutered/Spayed Shelter Animals In {0}" : "Ivartalanított menhelyi állat itt {0}",
"New" : "Új",
"New Account" : "Új fiók",
"New Appointment" : "Új megbeszélt időpont",
"New Citation" : "Új hivatkozás",
"New Cost" : "Új költség",
"New Diary" : "Új Napló",
"New Diet" : "Új Étrend",
"New Document" : "Új dokumentum",
"New Field" : "Új mező",
"New Fosterer" : "Új ideiglenes befogadó",
"New Guinea Singing Dog" : "új-guineai éneklő kutya",
"New Item" : "Új elem",
"New License" : "Új engedély",
"New Litter" : "Új alom",
"New Log" : "Új napló",
"New Movement" : "Új áthelyezés",
"New Owner" : "Új tulajdonos",
"New Password" : "Új jelszó",
"New Payment" : "Új fizetés",
"New Profile" : "Új profil",
"New Record" : "Új feljegyzés",
"New Regimen" : "Új recept",
"New Report" : "Új jelentés",
"New Role" : "Új szerep",
"New Stock" : "Új állomány",
"New Task" : "Új feladat",
"New Template" : "Új formanyomtatvány",
"New Test" : "Új teszt",
"New Transport" : "Új transzport",
"New Trap Loan" : "Új csapda kölcsönzés",
"New User" : "Új felhasználó",
"New Vaccination" : "Új oltás",
"New Voucher" : "Új bizonylat",
"New Waiting List Entry" : "Új bejegyzés a várólistára",
"New Zealand" : "Új-Zéland",
"New diary task" : "Új napló jegyzet",
"New form field" : "Új nyomtatványmező ",
"New name" : "Új név",
"New online form" : "Új online nyomtatvány",
"New password and confirmation password don't match." : "Az új jelszó és az új jelszó megerősítése nem egyezik",
"New task detail" : "Új feladat részletei",
"New template" : "Új nyomtatvány",
"Newfoundland Dog" : "újfundlandi",
"Next" : "Következő",
"No" : "Nem",
"No adjustment" : "Nincs szabályozás",
"No data to show on the report." : "Nincs adat a riporthoz",
"No data." : "Nincs adat",
"No description" : "Nincs leírás",
"No longer retained" : "A továbbiakban nem visszatartott",
"No matches found." : "Nincs találat.",
"No picture" : "Nincs kép",
"No publishers are running." : "Nincs közzététel folyamatban",
"No results found." : "Nincsenek találatok.",
"No results." : "Nincs találat.",
"No tasks are running." : "Nincs folyamatban feladat.",
"No view permission for this report" : "Nincs jogosultsága megtekinteni ezt a jelentést",
"Noise" : "Zaj",
"Non-Shelter" : "Nem menhelyi",
"Non-Shelter Animal" : "nem menhelyi állat",
"Non-Shelter Animals" : "Nem menhelyi állatok",
"Non-shelter Animals" : "Nem menhelyi állatok",
"None" : "Kikerülők",
"Norfolk Terrier" : "Norfolk terrier",
"Normal user" : "Normál felhasználó",
"Norwegian Buhund" : "norvég buhund",
"Norwegian Elkhound" : "norvég elkhound",
"Norwegian Forest Cat" : "Norvég erdei macska",
"Norwegian Lundehund" : "norvég lundehund",
"Norwich Terrier" : "Norwich terrier",
"Not Arrived" : "Nem érkezett meg",
"Not Available For Adoption" : "Nem örökbefogadható",
"Not Available for Adoption" : "Nem örökbefogadható",
"Not For Adoption" : "Nem örökbefogadható",
"Not Microchipped" : "Nem chipezett",
"Not Reconciled" : "Nem egyeztetett",
"Not available for adoption" : "Nem örökbefogadható",
"Not dispatched" : "Nincs elküldve",
"Not for adoption" : "Nem örökbefogadható",
"Not for adoption flag set" : "Nem örökbe fogadhatóként jelölve",
"Not in chosen publisher location" : "Nem található a kiválasztott kiadói helyszínen",
"Not reconciled" : "Nem egyeztetett",
"Note" : "Jegyzet",
"Notes" : "Jegyzetek",
"Notes about the death of the animal" : "Állat elhullásával kapcsolatos feljegyzések",
"Nov" : "Nov.",
"Nova Scotia Duck-Tolling Retriever" : "Nova Scotia vadkacsavadász retriever",
"November" : "November",
"Now" : "Most",
"Number" : "Szám",
"Number in litter" : "Alom létszáma",
"Number of Tasks" : "Feladatok száma",
"Number of animal links to show" : "Mutatandó állat-linkek száma",
"Number of fields" : "Mezők száma",
"Number of pets" : "Háziállatok száma",
"Ocicat" : "Ocicat macska",
"Oct" : "Okt.",
"October" : "Október",
"Office" : "Iroda",
"Old English Sheepdog" : "óangol juhászkutya",
"Old Password" : "Régi jelszó",
"Omit criteria" : "Kritérium kihagyása",
"Omit header/footer" : "Fejléc/Lábléc elhagyása",
"On Foster (in figures)" : "Ideiglenes befogadónál (számokkal)",
"On Shelter" : "Menhelyen",
"On shelter for {0} days, daily cost {1}, cost record total <b>{2}</b>" : "{0} napja a menhelyen, napi költség {1}, teljes költség feljegyzés <b>{2}</b>",
"On shelter for {0} days. Total cost: {1}" : "{0} napja a menhelyen. Teljes költség {1}",
"Once assigned, codes cannot be changed" : "A kijelölés után a kódok nem változtathatók meg",
"Once signed, this document cannot be edited or tampered with." : "Aláírás után a dokumentum nem módosítható vagy változtatható meg.",
"One Off" : "Egyszeri",
"One-Off" : "Egyszeri",
"Online Form: {0}" : "Online nyomtatvány: {0}",
"Online Forms" : "Online nyomtatványok",
"Online form fields need a name and label." : "Az online nyomtatvány mezőkhöz név és címke szükséges.",
"Online forms can be linked to from your website and used to take information from visitors for applications, etc." : "Az online nyomtatványokat a honlapjáról belinkelheti és ezek segítségével információt szerezhet látogatóktól kérelmekhez stb.",
"Only PDF, HTML and JPG image files can be attached." : "Csak PDF, HTML és JPG képfájlokat lehet csatolni.",
"Only active accounts" : "Csak aktív fiókok",
"Only allow users with one of these roles to view this incident" : "Csak az ezen szerepkörök egyikével rendelkező felhasználó láthatja ezt az eseményt",
"Only show account totals for the current period, which starts on " : "Csak az aktuális időszakra mutassa a fiókok összesítését, időszak kezdete ...",
"Only show declawed" : "",
"Only show pickups" : "Csak a befogásokat mutassa",
"Only show special needs" : "Csak a speciális igényeket mutassa",
"Only show transfers" : "Csak a transzfereket mutassa",
"Open Incidents" : "Esetek megnyitása",
"Open records in a new browser tab" : "Feljegyzés megnyitása új böngésző oldalon",
"Open reports in a new browser tab" : "Feljegyzések megnyitása új böngésző oldalon",
"Opening balances" : "Nyitó egyenlegek",
"Optional, the date the vaccination \"wears off\" and needs to be administered again" : "Opcionálisan az a dátum, amikor a oltás lejár, és újra be kell adni",
"Options" : "Opciók",
"Or move this diary on to" : "Vagy ezen napló áthelyezése ide",
"Order published animals by" : "Közzétett állatok rendezése következők szerint",
"Organisation" : "Szervezet",
"Organization" : "Szervezet",
"Organization name" : "Szervezet neve",
"Oriental Long Hair" : "Keleti hosszúszőrű macska",
"Oriental Short Hair" : "Keleti rövidszőrű macska",
"Oriental Tabby" : "Keleti Tabby macska",
"Original Owner" : "Eredeti tulajdonos",
"Ostrich" : "Strucc",
"Other Account" : "Másik fiók",
"Other Organisation" : "Más szervezetek",
"Other Shelter" : "Más menhely",
"Otterhound" : "Canis lupus familiaris",
"Our shelter does trial adoptions, allow us to mark these on movement screens" : "A menhelyünk végez próba örökbeadásokat, engedélyezze ezek megjelölését az események képernyőn",
"Out" : "Ki",
"Out Between" : "Között",
"Out SubTotal" : "Részösszegen kivül",
"Output a deceased animals page" : "Elhullott állat oldalának kimentése",
"Output a page with links to available online forms" : "Elérhető online kérdőívek linkjeit tartalmazó oldalak kimentése",
"Output a separate page for each animal type" : "Minden állat típus számára külön oldal kimentése",
"Output a separate page for each species" : "Minden állatfaj számára külön oldal kimentése",
"Output an adopted animals page" : "Örökbefogadott állat oldalának kimentése",
"Output an rss.xml page" : "Eredmény az rss.xml oldalon",
"Overdue" : "Lejárt",
"Overdue medical items" : "Lejárt egészségügyi elemek",
"Overtime" : "Túlóra",
"Owl" : "Bagoly",
"Owner" : "Tulajdonos",
"Owner Vet" : "Tulajdonos állatorvosa",
"Owner given citation" : "Tulajdonos általi hivatkozás",
"Owners Vet" : "Tulajdonos állatorvosa",
"PM" : "du.",
"Page extension" : "Oldal kiterjesztés",
"Paid" : "Befizetett",
"Paint/Pinto" : "Paint/Pinto ló",
"Palomino" : "Palomino",
"Paper Size" : "Papírméret",
"Papillon" : "papillon",
"Parainfluenza" : "Parainfluenza",
"Parakeet (Other)" : "Arapapagáj (egyéb)",
"Parent" : "Szülő",
"Parrot (Other)" : "Papagáj (egyéb)",
"Parrotlet" : "Parrotlet papagáj",
"Parvovirus" : "Parvovirus",
"Paso Fino" : "Paso Fino ló",
"Pass Homecheck" : "Sikeres látogatás",
"Password" : "Jelszó",
"Password for '{0}' has been reset." : "'{0}' jelszó visszaállításra került.",
"Password is incorrect." : "A jelszó helytelen.",
"Password successfully changed." : "A jelszó megváltoztatása sikeres.",
"Passwords cannot be blank." : "A jelszó nem lehet üres.",
"Path" : "Elérési út",
"Patterdale Terrier (Fell Terrier)" : "patterdale terrier",
"PayPal" : "PayPal",
"Payment" : "Fizetés",
"Payment Book" : "Fizetési könyv",
"Payment From" : "Fizetés ettől:",
"Payment Methods" : "Fizetési módok",
"Payment Type" : "Fizetési mód",
"Payment Types" : "Fizetési módok",
"Payment book" : "Fizetési könyv",
"Payment calendar" : "Fizetési naptár",
"Payment of {0} successfully received ({1})." : " {0} általi fizetés sikeresen megérkezett ({1})",
"Payments" : "Kifizetések",
"Payments need at least one date, an amount and a person." : "A fizetésekhez minimum egy dátum, egy összeg és egy személy szükséges",
"Payments of type" : "Típus szerinti fizetések",
"Payments require a person" : "A fizetésekhez egy személy szükséges",
"Payments require a received date" : "A fizetésekhez a beérkezés dátuma szükséges",
"Peacock/Pea fowl" : "Páva",
"Pekingese" : "pekingi palotakutya",
"Pending Adoption" : "Függőben lévő örökbefogadás",
"Pending Apartment Verification" : "Függőben lévő lakás igazolás",
"Pending Home Visit" : "Függőben lévő látogatás",
"Pending Vet Check" : "Függőben lévő állatorvosi vizsgálat",
"Pension" : "Nyugdíj",
"People" : "Emberek",
"People Looking For" : "Személyek, akik ezt keresik:",
"People matching '{0}'." : "'{0}'-nek megfelelő személyek",
"People or animal records that already exist in the database will not be imported again and movement/payment data will be attached to the existing records instead." : "Az adatbázisban már szereplő személyek vagy állatok adatai nem kerülnek újból importálásra, ehelyett a mozgás / fizetés adatai csatolásra kerülnek a már létező adatokhoz.",
"People with active reservations, but no homecheck has been done." : "Aktív jelölti státusszal rendelkező személyek, akiknél még nem történt meg a látogatás",
"People with overdue donations." : "Lejárt adományos személyek",
"Percheron" : "Percheron ló",
"Perform" : "Végrehajtás",
"Perform Homecheck" : "Látogatás végrehajtása",
"Perform Test" : "Teszt elvégzése",
"Performed" : "Végrehajtva",
"Permanent Foster" : "Állandó befogadó",
"Persian" : "Perzsa",
"Person" : "Személy",
"Person - Additional" : "Személy - további",
"Person - Name and Address" : "Személy - név és cím",
"Person - Type" : "Személy - típus",
"Person Flags" : "Személy Zászlók",
"Person looking for report" : "Jelentést kereső személy",
"Person successfully created" : "Az örökbe fogadás sikeresen megtörtént.",
"Personal" : "Személyes",
"Peruvian Inca Orchid" : "perui meztelen kutya",
"Peruvian Paso" : "Perui Paso ló",
"Petit Basset Griffon Vendeen" : "Kis Vendee-i griffon basset",
"Pharaoh Hound" : "fáraókutya",
"Pheasant" : "Fácán",
"Phone" : "Telefon",
"Phone contains" : "A telefon tartalmazza",
"Photo successfully uploaded." : "Fénykép sikeresen feltöltve.",
"Picked Up" : "Begyüjtve",
"Picked Up By" : "Személy aki begyüjtötte",
"Pickup" : "Begyüjtés",
"Pickup Address" : "Begyűjtési cím",
"Pickup Location" : "Begyűjtés helye",
"Pickup Locations" : "Begyűjtés helye",
"Pig" : "Sertés",
"Pig (Farm)" : "Sertés (Farm)",
"Pigeon" : "Galamb",
"Pinterest" : "Pinterest",
"Pionus" : "Pirosfarú papagáj",
"Pit Bull Terrier" : "pitbull terrier",
"Pixie-Bob" : "Pixie-Bob macska",
"Please click the Sign button when you are finished." : "Nyomja meg az Aláírás gombot, ha befejezte.",
"Please see the manual for more information." : "Nézze meg a kézikönyvet további információkért.",
"Please select a PDF, HTML or JPG image file to attach" : "Kérem válasszon PDF, HTML vagy JPG fájlt csatolmányként",
"Please tighten the scope of your email campaign to {0} emails or less." : "Kérjük szűkítse le az emailkampány hatókörét {0} vagy kevesebb emailre.",
"Please use the links below to electronically sign these documents." : "Kérjük az alábbi link segítségével elektronikusan írja alá ezen dokumentumokat.",
"Plott Hound" : "Plott kopó",
"Poicephalus/Senegal" : "Szenegáli papagáj",
"Pointer" : "vizsla",
"Points for being found within 2 weeks of being lost" : "Eltűnéstől számított 2 héten belüli megtalálásért járó pontok",
"Points for matching age group" : "Megegyező korosztály pontok",
"Points for matching breed" : "Fajta összehasonlítási szempontok",
"Points for matching color" : "Szín összehasonlítási szempontok",
"Points for matching features" : "Tulajdonságok összehasonlítási szempontjai",
"Points for matching lost/found area" : "Elveszett/Talált területek összehasonlítási szempontjai",
"Points for matching sex" : "Nemek összehasonlítási szempontjai",
"Points for matching species" : "Faj összehasonlítási szempontok",
"Points for matching zipcode" : "Irányítószám összehasonlítási szempontok",
"Points required to appear on match report" : "Összehasonlítási riportokon megjelenő szempontok",
"Polish" : "Lengyel",
"Polish Lowland Sheepdog" : "lengyel alföldi juhászkutya",
"Pomeranian" : "pomerániai",
"Pony" : "Póni",
"Poodle" : "uszkár",
"Portugese Podengo" : "portugál podengo",
"Portuguese Water Dog" : "Portugál vízi kutya",
"Positive" : "Pozitív",
"Positive for Heartworm, FIV or FLV" : "Pozitívan tesztelve szívféregre, FIV-re vagy FLV-re",
"Positive/Negative" : "Pozitív/Negatív",
"Post" : "Postáz",
"Postage costs" : "Postai kiadások",
"Pot Bellied" : "Csüngőhasú sertés",
"Prairie Dog" : "Prérikutya",
"Prefill new media notes for animal images with animal comments if left blank" : "Az üresen hagyott megjegyzés rovatokat töltse ki az állatok képeihez az állatra vonatkozó megjegyzésekkel",
"Prefill new media notes with the filename if left blank" : "Az üresen hagyott új megjegyzés rovatokat töltse ki a fájlnévvel",
"Premises" : "Helységek",
"Presa Canario" : "Kanári-szigeteki kutya",
"Press F11 in HTML or SQL code editing boxes to edit in fullscreen mode" : "A teljes képernyőn való szerkesztéshez nyomja meg az F11 gombot a HTML vagy SQL szerkesztési mezőkben",
"Preview" : "Előnézet",
"Previous" : "Előző",
"Previous Adopter" : "Korábbi örökbefogadó",
"Print" : "Nyomtatás",
"Print Preview" : "Nyomtatási kép",
"Print selected forms" : "A kijelölt nyomtatványok nyomtatása",
"Printable Manual" : "Nyomtatható leírás",
"Printing word processor documents uses hidden iframe and window.print" : "A word processzoros dokumentumok nyomtatásához rejtett iframe és windows.print használata",
"Priority" : "Prioritás",
"Priority Floor" : "Prioritási alap",
"Produce a CSV File" : "CSV fájl előállítása",
"Produce a PDF of printable labels" : "Nyomtatható cimkés PDF előállítása",
"Profile" : "Profil",
"Profile name cannot be blank" : "Profil név nem maradhat üresen",
"Public Holiday" : "Állami ünnep",
"Publish Animals to the Internet" : "Állat közzététele Interneten",
"Publish HTML via FTP" : "HTML közzététele FTP segítségével",
"Publish now" : "Közzététel most",
"Publish to folder" : "Közzététel mappában",
"Published to Website" : "Weboldalon közzétéve",
"Publisher" : "Kiadó",
"Publisher Breed" : "Közzététel fajtája",
"Publisher Color" : "Közzétételi szín",
"Publisher Logs" : "Kiadó naplóbejegyzései",
"Publisher Species" : "Kiadó fajtája",
"Publishing" : "Kiadás",
"Publishing History" : "Közzétételi előzmények",
"Publishing Logs" : "Közzétételi naplóbejegyzések",
"Publishing Options" : "Közzétételi opciók",
"Publishing complete." : "Közzététel befejeződött",
"Publishing template" : "Közzétételi minta",
"Pug" : "mopsz",
"Puli" : "puli",
"Pumi" : "pumi",
"Puppies (under {0} months)" : "Kölykök ( {0} hónapnál fiatalabb)",
"Purchased" : "Megvásárolva",
"Qty" : "Mennyiség",
"Quaker Parakeet" : "Barátpapagáj",
"Quantity" : "Mennyiség",
"Quarantine" : "Karantén",
"Quarterhorse" : "Quarterhorse ló",
"Quarterly" : "Negyedévi",
"Quick Links" : "Gyors hivatkozások",
"Quicklinks" : "Gyors hivatkozások",
"Quicklinks are shown on the home page and allow quick access to areas of the system." : "A gyorshivatkozások a honlapon jelennek meg és lehetővé teszik a rendszer más részeihez való gyors hozzáférést.",
"R" : "R",
"Rabbit" : "Nyúl",
"Rabies" : "Veszettség",
"Rabies Tag" : "Veszettség címke",
"RabiesTag" : "VeszettségCímke",
"Radio Buttons" : "Választógombok ",
"Ragamuffin" : "RagaMuffin macska",
"Ragdoll" : "Ragdoll macska",
"Rank" : "Besorol",
"Rat" : "Patkány",
"Rat Terrier" : "Rat Terrier",
"Raw Markup" : "Nyers jelölés",
"Read the manual for more information about Animal Shelter Manager." : "Az Animal Shelter Managerre vonatkozó további információkért olvassa el a kézikönyvet",
"Real name" : "Valódi név",
"Reason" : "Ok",
"Reason For Appointment" : "Az időpont célja",
"Reason Not From Owner" : "Indoklás nem a tulajdonos által",
"Reason for Entry" : "Bejegyzés oka",
"Reason for entry" : "Bejegyzés oka",
"Reason not from Owner" : "Indoklás nem a tulajdonos által",
"Reason the owner did not bring in the animal themselves" : "Annak oka, hogy a tulajdonosok miért nem maguk hozták be az állatot",
"Recalculate ALL animal ages/times" : "Számítsa ki újra az ÖSSZES állat korát/idejét",
"Recalculate ALL animal locations" : "Valamennyi állat elhelyezésének újraszámítása",
"Recalculate on-shelter animal locations" : "A menhelyen lévő állatok elhelyezésének újraszámítása",
"Receipt No" : "Befogadó sz.",
"Receipt/Invoice" : "Nyugta/Számla",
"Receive" : "Fogadás",
"Receive a donation" : "Adomány befogadása",
"Receive a payment" : "Fizetés befogadása",
"Received" : "megkapta",
"Received in last day" : "Előző napon érkezett",
"Received in last month" : "Előző hónapban érkezett",
"Received in last week" : "Előző héten érkezett",
"Received in last year" : "Előző évben érkezett",
"Received today" : "Ma érkezett",
"Recently Adopted" : "Nemrég örökbefogadva",
"Recently Changed" : "Mostanában módosult",
"Recently Entered Shelter" : "Mostanában menhelyre bekerült",
"Recently Fostered" : "Mostanában ideiglenes befogadóhoz került ",
"Recently deceased" : "Mostanában elhullott",
"Recently deceased shelter animals (last 30 days)." : "Nemrégiben elhullott menhelyi állatok (utóbbi 30 nap)",
"Reception" : "Befogadás",
"Reclaim" : "Visszakövetel",
"Reclaim an animal" : "Állat visszakövetelése",
"Reclaim movements must have a valid reclaim date." : "A visszakövetelő intézkedéseknek érvényes visszakövetelési dátummal kell rendelkeznie",
"Reclaim successfully created." : "A visszakövetelés sikeresen létrehozva.",
"Reclaimed" : "Visszakövetelt",
"Reconcile" : "Egyeztetés",
"Reconciled" : "Egyeztetett",
"Redbone Coonhound" : "Vörös mosómedvekopó",
"Rediarised" : "Újradátumozott",
"Redirect to URL after POST" : "POST után irányítsa vissza az URL-re",
"Reference" : "Hivatkozás",
"Refresh" : "Frissítés",
"Regenerate 'Match lost and found animals' report" : "'Elveszett és megtalált állatok összehasonlítása' jelentés újra generálása",
"Regenerate 'Person looking for' report" : "'Személy keres' jelentés újra generálása",
"Regenerate annual animal figures for" : "Éves állat mutatószámok újra generálása a következőre",
"Regenerate monthly animal figures for" : "Havi állat mutatószámok újra generálása következőre",
"Regenerate person names in selected format" : "Személyek neveinek újra generálása a kiválogatott formátumban",
"Register Microchip" : "Regisztrálja a microchipet",
"Register microchips after" : "Mikrocsip regisztrálása ",
"Released To Wild" : "Szabadon engedve",
"Released To Wild {0}" : "Szabadon engedve {0}",
"Reload" : "Újratöltés",
"Remaining" : "Hátralévő",
"Remember me on this computer" : "Emlékezzen rám ezen a számítógépen",
"Removal" : "Eltávolítás",
"Removal Reason" : "Eltávolítás oka",
"Removal reason" : "Eltávolítás oka",
"Remove" : "Eltávolít",
"Remove HTML and PDF document media after this many years" : "HTML és PDF dokumentum tartalmak eltávolítása sok év után.",
"Remove clinic functionality from screens and menus" : "Távolítsa el a klinikai funkciókat a képernyőkről és a menükből",
"Remove fine-grained animal control incident permissions" : "Távolítsa el a finom szemcsés, állatok elleni védekezésre vonatközó engedélyeket",
"Remove holds after" : "Távolítsa el az ezt követő fenntartásokat",
"Remove move menu and the movements tab from animal and person screens" : "A mozgatás menü és a mozgások tab eltávolítása az állat és személy képernyőről",
"Remove personally identifiable data" : "Távolítsa el a személyesen azonosítható adatokat",
"Remove previously published files before uploading" : "Korábban közzétett fájlok eltávolítása feltöltés előtt",
"Remove retailer functionality from the movement screens and menus" : "Távolítsa el a kiskereskedő funkciót a mozgásképernyőkről és a menükből",
"Remove short shelter code box from the animal details screen" : "Rövid menhelyi kód box eltávolítása az állatok információs képernyőjéről",
"Remove the FIV/L test fields from animal health details" : "Távolítsa el a FIV / L tesztmezőket az állat-egészségügyi adataiból",
"Remove the Litter ID field from animal details" : "Távolítsa el az Alom azonosító mezőt az állat adataiból",
"Remove the Rabies Tag field from animal health details" : "Távolítsa el a veszettség címke mezőt az állat egészségügyi információiból",
"Remove the adoption coordinator field from animal entry details" : "Örökbefogadási koordinátor mező eltávolítása az állat felviteli adatai közül",
"Remove the adoption fee field from animal details" : "Örökbefogadási díj mező eltávolítása az állat adataiból",
"Remove the animal control functionality from menus and screens" : "Az állat kontroll funkció eltávolítása a menükből és képernyőképekről",
"Remove the bonded with fields from animal entry details" : "Távolítsa el az 'Összekapcsolva ' mezőket az állat leírásából",
"Remove the city/state fields from person details" : "Város/állam mezők eltávolítása a személy adataiból",
"Remove the coat type field from animal details" : "Szőrzet típusa mező eltávolítása az állat adataiból",
"Remove the declawed box from animal health details" : "A karomvágás mező eltávolítása az állat egészségügyi infóiból",
"Remove the document repository functionality from menus" : "A dokumentum gyűjtemény funkció eltávolítása a menükből",
"Remove the good with fields from animal notes" : "A kompatibilitásra vonatkozó mező eltávolítása az állat jegyzeteiből",
"Remove the heartworm test fields from animal health details" : "A szívféreg teszt mező eltávolítása az állat egészségügyi infóiból",
"Remove the insurance number field from the movement screens" : "Biztosítási szám mező eltávolítása a mozgatási képernyőkről",
"Remove the location unit field from animal details" : "Helymeghatározó egység mező eltávolítása az állat adataiból",
"Remove the microchip fields from animal identification details" : "Microchip mező eltávolítása az állat azonosítási adatai közül",
"Remove the neutered fields from animal health details" : "Az ivartalanításra vonatkozó mező eltávolítása az állat egészségügyi infóiból",
"Remove the online form functionality from menus" : "Az online nyomtatvány funkció eltávolítása a menükből",
"Remove the picked up fields from animal entry details" : "A felvételi mező eltávolítása az állat adataiból",
"Remove the rota functionality from menus and screens" : "A rota funkció eltávolítása a menükből és képernyőképekről",
"Remove the size field from animal details" : "A méret mező eltávolítása az állat adataiból",
"Remove the stock control functionality from menus and screens" : "Távolítsa el a készletkezelési funkciókat a menükből és a képernyőkről",
"Remove the tattoo fields from animal identification details" : "Tetoválási mező eltávolítása az állat azonosítási adatai közül",
"Remove the transport functionality from menus and screens" : "Távolítsa el a szállítási funkciókat a menükből és a képernyőkről",
"Remove the trap loan functionality from menus and screens" : "Csapda kölcsönzés eltávolítása a menükből és képernyőképekről",
"Remove the weight field from animal details" : "Súly mező eltávolítása az állat adataiból",
"Removed" : "Eltávolított",
"Rename" : "Átnevezés",
"Renew License" : "Engedély megújítása",
"Renew licence" : "Engedély megújítása ",
"Renew license" : "Engedély megújítása",
"Report" : "Beszámoló",
"Report Title" : "Beszámoló címe",
"Report a new incident" : "Új esemény jelentése",
"Reports" : "Beszámolók",
"Request signature by email" : "Aláírás kérése emailen",
"Requested" : "Bekérve",
"Require followup" : "Követés kérése",
"Required" : "Szükséges",
"Required date must be a valid date" : "A kért dátuma érvényes dátum kell legyen",
"Reschedule" : "Átütemezés",
"Reservation" : "Foglalás",
"Reservation Book" : "Foglalási könyv",
"Reservation Cancelled" : "Foglalás törlölve",
"Reservation Date" : "Foglalás dátuma",
"Reservation For" : "Foglalás",
"Reservation Status" : "Foglalás státusza",
"Reservation Statuses" : "Foglalás státuszai",
"Reservation book" : "Foglalási nyilvántartás",
"Reservation date cannot be after cancellation date." : "A foglalás dátuma nem lehet későbbi mint a törlés dátuma",
"Reservation successfully created." : "Az örökbe fogadás sikeresen megtörtént.",
"Reservations must have a valid reservation date." : "A foglaláshoz szükséges érvényes foglalási dátum",
"Reserve" : "Foglal",
"Reserve an animal" : "Állat foglalása",
"Reserved" : "Foglalt",
"Reset" : "Visszaállítás",
"Reset Password" : "Jelszó visszaállítása",
"Respond" : "Válasz",
"Responded" : "Válaszolt",
"Responded Between" : "Ezen időszakban megválaszolva",
"Responded Date/Time" : "Visszaigazolt dátum / idő",
"Result" : "Eredmény",
"Results" : "Eredmények",
"Results for '{0}'." : "Eredmények '{0}'-re",
"Retailer" : "Kiskereskedő",
"Retailer Animals" : "Kiskereskedőnél lévő állatok",
"Retailer Book" : "Kiskereskedői nyilvántartás",
"Retailer book" : "Kiskereskedői nyilvántartás",
"Retailer movement successfully created." : "Kiskereskedői mozgatás sikeresen megtörtént. ",
"Retailer movements must have a valid movement date." : "A kiskereskedői mozgatáshoz érvényes dátumot kell rendelni.",
"Retriever" : "retriever",
"Return" : "Visszahozatal",
"Return Category" : "Visszahozatal kategórája",
"Return Date" : "Visszahozatal dátuma",
"Return a transferred animal" : "Áthelyezett állat visszahelyezése",
"Return an animal from adoption" : "Örökbefogadott állat visszahelyezése",
"Return an animal from another movement" : "Állat visszahelyezése más mozgatásból",
"Return an animal from transfer" : "Állat visszahelyezése transzferből",
"Return date cannot be before the movement date." : "A visszahelyezés dátuma nem lehet korábbi a mozgatás dátumánál",
"Return this movement and bring the animal back to the shelter" : "Mozgatás visszavonása és állat visszahozatala a menhelyre",
"Returned" : "Visszaadva",
"Returned By" : "Visszaadta ",
"Returned To Owner" : "Tulajdonosnak visszaadva",
"Returned from" : "Visszahozva innen",
"Returned to" : "Visszaadva ide",
"Returned to Owner {0}" : "Tulajdonosnak visszaadva {0}",
"Returning" : "Visszatérő",
"Returns {0}" : "Visszaérkezések {0}",
"Reupload animal images every time" : "Állat képének újra feltöltése minden alkalommal.",
"Rex" : "Rex",
"Rhea" : "Rhea",
"Rhinelander" : "Rhinelander",
"Rhodesian Ridgeback" : "rhodesiai ridgeback",
"Ringneck/Psittacula" : "Örvösmadár/Psittacula",
"Role is in use and cannot be deleted." : "A szerep használatban van, nem törölhető.",
"Roles" : "Szerepek",
"Roles need a name." : "A nyomtatványt el kell nevezni",
"Rosella" : "Rozellapapagáj",
"Rostered day off" : "Felosztott szabadnap",
"Rota" : "Beosztás",
"Rota Types" : "Beosztás típusok",
"Rota cloned successfully." : "Beosztás sikeres másolása.",
"Rotate image 90 degrees anticlockwis" : "Kép elforgatása 90 fokkal óramutató járásval ellentétes irányba",
"Rotate image 90 degrees clockwise" : "Kép elforgatása 90 fokkal óramutató járásával azonos irányba",
"Rottweiler" : "rottweiler",
"Rough" : "Érdes",
"Rows" : "Sorok",
"Ruddy" : "Pirosított",
"Russian Blue" : "Oroszkék",
"S (Stray Cat)" : "S (kóbor macska)",
"S = first letter of animal species" : "S = állat fajának első betűje",
"SM Account" : "SM fiók",
"SMS" : "SMS",
"SQL" : "SQL",
"SQL Interface" : "SQL Interface",
"SQL dump" : "SQL kuka",
"SQL dump (ASM2 HSQLDB Format)" : "SQL kuka (ASM2 HSQLDB formátum)",
"SQL editor: Press F11 to go full screen and press CTRL+SPACE to autocomplete table and column names" : "SQL editor: teljes képernyőre váltáshoz nyomja meg az F11-et és CTRL+SPACE-t a táblázat és oszlopok neveinek automatikus befejezéséhez",
"SQL interface" : "SQL Interface",
"SQL is syntactically correct." : "SQL mondattanilag korrekt.",
"SS = first and second letter of animal species" : "SS = állat beviteli kategóriájának első és második betűje",
"Sa" : "Sa",
"Saddlebred" : "Hátasló",
"Saint Bernard St. Bernard" : "bernáthegyi",
"Sales Tax" : "Forgalmi adó",
"Saluki" : "saluki",
"Samoyed" : "szamojéd",
"Sat" : "Sat",
"Satin" : "Szatén",
"Saturday" : "Szombat",
"Save" : "Mentés",
"Save and leave" : "Mentés és kilépés",
"Save this incident" : "Esemény mentése",
"Save this person" : "Személy mentése",
"Save this record" : "Bejegyzés mentése",
"Save this waiting list entry" : "Ennek a várólista feljegyzésnek a mentése",
"Saving..." : "Mentés....",
"Scale published animal images to" : "Közzétett képek méretezése",
"Scheduled" : "Ütemezett",
"Schipperke" : "Schipperke",
"Schnauzer" : "schnauzer",
"Scottish Deerhound" : "Skót szarvasagár",
"Scottish Fold" : "Skót lógófülű macska",
"Scottish Terrier Scottie" : "skót terrier",
"Script" : "Jegyzet",
"Seal" : "Hitelesít",
"Sealyham Terrier" : "Sealyham terrier",
"Search" : "Keresés",
"Search Results for '{0}'" : "Keresési eredmények erre '{0}'",
"Search returned {0} results." : "Keresésnél kiadott '{0}' eredmény",
"Search sort order" : "Keresési sorrend",
"Searchable" : "Keresés lehetséges",
"Second offence" : "Második vétség",
"Select" : "Kiválasztás",
"Select a person" : "Személy kiválasztása",
"Select a person to attach this form to." : "Válasszon ki egy személyt akihez csatolja ezt az űrlapot.",
"Select a person to merge into this record. The selected person will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "Válasszon ki egy személyt, akit bele szeretne vonni ebbe a feljegyzésbe. A kiválasztott személyt eltávolítják, és mozgásait, naplójegyzeteit, naplóbejegyzéseit stb. újra csatolják ehhez a rekordhoz.",
"Select all" : "Összes kiválsztása ",
"Select an animal" : "Válasszon egy állatot",
"Select an animal to attach this form to." : "Válasszon egy állatot, amelyhez ezt a nyomtatványt csatolja.",
"Select an animal to merge into this record. The selected animal will be removed, and their movements, diary notes, log entries, etc. will be reattached to this record." : "Válasszon egy állatot, amellyet összevonnak ezzel a feljegyzéssel. A kiválasztott állatot eltávolítják, és mozgásait, naplójegyzeteit, naplóbejegyzéseit stb. újra csatolják ehhez a feljegyzéshez.",
"Select animal to merge" : "Válassza ki az egyesiteni kívánt állatot ",
"Select animals" : "Válasszon ki állatokat",
"Select date for diary task" : "Válasszon ki dátumot ehhez a napló feladathoz",
"Select person to merge" : "Válassza ki az egyesíteni kívánt személyt",
"Select recommended" : "Válassza az ajánlott lehetőséget",
"Selected On-Shelter Animals" : "Kiválasztott menhelyen lévő állatok",
"Selkirk Rex" : "Selkirk rex macskafajta",
"Send" : "Elküld",
"Send Emails" : "Emaileket küld",
"Send a weekly email to fosterers with medical information about their animals" : "Heti e-mail küldése az ideiglenes befogadóknak az állatokra vonatkozó orvosi információkkal",
"Send confirmation email to form submitter" : "Küldjön megerősítő e-mailt az űrlap benyújtójának",
"Send emails" : "Emailek küldése",
"Send mass emails and perform mail merges" : "Küldjön tömeg e-maileket és hajtson végre cím összevonásokat",
"Send via email" : "Küldés emailen",
"Sending {0} emails is considered abusive and will damage the reputation of the email server." : " {0} emailek küldése sértőnek minősülhet és ronthatja az email szerver hírnevét",
"Sending..." : "Küldés....",
"Senior" : "Idős",
"Sent to mobile signing pad." : "Elküldve mobil aláíró pad-re",
"Sep" : "Szept.",
"Separate waiting list rank by species" : "Várólista sorrendjének lebontása fajok szerint",
"September" : "Szeptember",
"Server clock adjustment" : "Server óra beállítása",
"Set publishing options" : "Közzétételi opciók beállítása",
"Set this to 0 to never automatically remove." : "Állítsa ezt 0-ra, hogy soha ne törlődjön automatikusan",
"Set to 0 to never update urgencies." : "Állítsa 0-ra, hogy soha ne frissítse sürgősséggel",
"Set wether or not this user account can log in to the user interface." : "Állítsa be, hogy ez a felhasználói fiók be tud-e jelentkezni vagy sem a felhasználói felületbe.",
"Setter" : "szetter",
"Setting a location filter will prevent this user seeing animals who are not in these locations on shelterview, find animal and search." : "A helyszűrő beállítása megakadályozza, hogy ez a felhasználó állatokat nézzen meg, akik nem ezen a helyen láthatók menhelynézetben, állatokat találjon vagy keressen.",
"Settings" : "Beállítások",
"Settings, Lookup data" : "Beállítások, Adatok keresése",
"Settings, Options" : "Beállítások, Opciók",
"Settings, Reports" : "Beállítások, Relentések",
"Settings, System user accounts" : "Beállítások, Rendszerfelhasználók fiókjai",
"Sex" : "Nem",
"Sex and Species" : "Nem és fajta",
"Sexes" : "Nemek",
"Shar Pei" : "shar pei",
"Share" : "Megosztás",
"Shared weblink" : "Megosztott weblink",
"Shares" : "Megosztások",
"Sheep" : "Birka ",
"Sheep Dog" : "juhászkutya",
"Shelter" : "Menhely",
"Shelter Animal" : "nem menhelyi állat",
"Shelter Animals" : "nem menhelyi állat",
"Shelter Details" : "Menhely adatai",
"Shelter animal {0} '{1}'" : "Menhelyi állat {0} '{1}'",
"Shelter animals" : "nem menhelyi állat",
"Shelter code cannot be blank" : "Menhely kódja nem maradhat üresen.",
"Shelter code {0} has already been allocated to another animal." : "Ez a microchip szám {0} már más állathoz lett hozzárendelve. ",
"Shelter stats (all time)" : "Menhelyi statisztika (összes)",
"Shelter stats (this month)" : "Menhelyi statisztika (ehavi)",
"Shelter stats (this week)" : "Menhelyi statisztika (eheti)",
"Shelter stats (this year)" : "Menhelyi statisztika (ez évi)",
"Shelter stats (today)" : "Menhelyi statisztika (mai)",
"Shelter view" : "Menhely nézet",
"Shepherd" : "pásztorkutya",
"Shetland Sheepdog Sheltie" : "sheltie",
"Shiba Inu" : "shiba inu",
"Shift" : "Váltás",
"Shih Tzu" : "shih tzu",
"Short" : "Rövid",
"Show GDPR Contact Opt-In field on person screens" : "Mutassa a GDPR kapcsolat Opt-In mezőt a személy képernyőjén",
"Show PDF files inline instead of sending them as attachments" : "Mutassa a PDF fájlokat inline formátumban, és ne mellékletekként küldje el őket",
"Show a cost field on medical/test/vaccination screens" : "Mutasson egy költségmezőt az orvosi / teszt / oltási nézetekben",
"Show a minimap of the address on person screens" : "Mutassa meg a cím minitérképét a személy képernyőjén",
"Show a separate paid date field with costs" : "Mutasson egy külön, fizetett dátummezőt a költségekkel",
"Show alerts on the home page" : "Figyelmeztetése megjelenítése a honlapon",
"Show animal thumbnails in movement and medical books" : "Mutassa az állat ikonképét a mozgási és az orvosi könyvekben",
"Show animals adopted" : "Mutassa az örökbefogadott állatokat",
"Show codes on the shelter view screen" : "Kódok megjelenítése a menhely nézet képernyőn",
"Show complete comments in table views" : "Összes megjegyzés megjelenítése táblázat nézetben",
"Show empty locations" : "Üres helyek mutatása",
"Show on new record screens" : "Megjelenítés új felvételi képernyőkön",
"Show quick links on all pages" : "Gyors linkek megjelenítése az összes oldalon",
"Show quick links on the home page" : "Gyors linkek megjelenítése a honlapon",
"Show report menu items in collapsed categories" : "Jelenítse meg a jelentés menüelemeit összecsukott kategóriákban",
"Show short shelter codes on screens" : "Rövid menhelykódok megjelenítése a képernőn",
"Show the adoption fee field" : "Örökbefogadási díj mező megjelenítése",
"Show the altered fields" : "Módosított mezők megjelenítése",
"Show the breed fields" : "Fajta mezők megjelenítése",
"Show the brought in by field" : "A behozóra vonatkozó mező megjelenítése",
"Show the color field" : "Szín mező megjelenítése",
"Show the date brought in field" : "Behozatal időpontjára vonatkozó mező megjelenítése",
"Show the entry category field" : "Rögzítési kategória mező megjelenítése",
"Show the full diary (instead of just my notes) on the home page" : "A teljes napló megjelenítése a honlapon (nem csak az én jegyzeteimet)",
"Show the hold fields" : "Fenntartások mező megjelenítése",
"Show the internal location field" : "A belső elhelyezési mező megjelenítése",
"Show the litter ID field" : "Az alom azonosító mezőjének megjelenítése",
"Show the location unit field" : "Az elhelyezői egység mező megjelenítése",
"Show the microchip fields" : "Microchip mezők megjelenítése",
"Show the original owner field" : "Eredeti tulajdonos mező megjelenítése",
"Show the size field" : "Méret mező megjelenítése",
"Show the tattoo fields" : "Tetoválás mező megjelenítése",
"Show the time brought in field" : "Behozatal időpontja mező megjelenítése",
"Show the transfer in field" : "Átszállítási mező megjelenítése",
"Show the weight field" : "Súly mező megjelenítése",
"Show timeline on the home page" : "Idővonal megjelenítése a honlapon",
"Show tips on the home page" : "Tippek megjelenítése a honlapon",
"Show transactions from" : "Tranzakciók megjelenítése",
"Show weight as lb rather than kg" : "Súlyok lb-ben való megjelenítése kg helyett",
"Showing {0} timeline events." : "Idővonal események {0} mutatása",
"Siamese" : "Sziámi macska",
"Siberian" : "Szibériai",
"Siberian Husky" : "szibériai husky",
"Sick leave" : "Betegállomány",
"Sick/Injured" : "Beteg/Sérült",
"Sick/injured animal" : "Beteg/sérült állat",
"Sign" : "Aláir",
"Sign document" : "Irja alá a dokumentumot",
"Sign on screen" : "Képernyőn történtő aláirás",
"Signature" : "Aláírás",
"Signed" : "Aláirt",
"Signing" : "Aláírás",
"Signing Pad" : "Aláíró rész",
"Signup" : "Feliratkozás",
"Silky Terrier" : "skye terrier",
"Silver" : "Ezüst",
"Silver Fox" : "Ezüst róka",
"Silver Marten" : "Silver Marten nyúl",
"Similar Animal" : "Hasonló állat",
"Similar Person" : "Hasonló személy",
"Simple" : "Egyszerű",
"Singapura" : "Szingapúri macska",
"Single Treatment" : "Egyszeri kezelés",
"Site" : "Hely",
"Sites" : "Webhelyek",
"Size" : "Méret",
"Sizes" : "Méretek",
"Skunk" : "Bűzösborz",
"Skye Terrier" : "skye terrier",
"Sloughi" : "arab agár",
"Small" : "Kicsi",
"SmartTag PETID" : "OkosCimke PETID",
"Smooth Fox Terrier" : "foxi",
"Snake" : "Kígyó",
"Snowshoe" : "Hócipő",
"Social" : "Társas",
"Softbill (Other)" : "Puhacsőrű (egyéb)",
"Sold" : "Eladott ",
"Somali" : "Szomáli macska",
"Some batch processes may take a few minutes to run and could prevent other users being able to use the system for a short time." : "Egyes összevont folyamatok néhány percet vehetnek igénybe, és megakadályozhatják, hogy más felhasználók rövid ideig használják a rendszert.",
"Some browsers allow shortcut keys, press SHIFT+ALT+A in Chrome or Firefox to jump to the animal adoption screen." : "Egyes böngészők lehetővé teszik a gyorsbillentyűk használatát, nyomja meg a SHIFT + ALT + A billentyűket a Chrome-ban vagy a Firefox-ban, hogy ugorjon az állatok befogadására szolgáló képernyőre.",
"Some info text" : "Néhány információ szöveg",
"Sorrel" : "Sorrel fajta ló",
"Sorrel Tortoiseshell" : "Sorrel fajta ló Teknőctarka",
"Sorry, this document has already been signed" : "Sajnálom, ezt a dokumentumot már aláírták",
"South Russian Ovcharka" : "délorosz juhászkutya",
"Spaniel" : "spániel",
"Special Needs" : "Különleges igények",
"Species" : "Fajták ",
"Species A-Z" : "Fajták A-Z",
"Species Z-A" : "Fajták Z-A",
"Species to use when publishing to third party services and adoption sites" : "Használható fajok, amikor harmadik felek szolgáltatásaiban és örökbefogadási webhelyeken teszik közzé",
"Specifying a reschedule date will make copies of the selected vaccinations and mark them to be given on the reschedule date. Example: If this vaccination needs to be given every year, set the reschedule date to be 1 year from today." : "Az átütemezési dátum megadásával a kiválasztott oltásokról másolatokat készít, és megjelöli őket az átütemezés dátumán. Példa: Ha ezt a vakcinát minden évben be kell adni, akkor állítsa be az átütemezés dátumát a mai naptól 1 évre.",
"Sphynx (hairless cat)" : "Szfinx (kanadai szőrtelen macska)",
"Spitz" : "spicc",
"Split baby/adult age at" : "Kölyök/felnőtt kor szétválasztása ekkor",
"Split species pages with a baby/adult prefix" : "Ossza fel a fajok oldalakat kölyök / felnőtt előtaggal",
"Sponsorship donations" : "Szponzorok adományai",
"Staff" : "Személyzet",
"Staff Rota" : "Személyzet beosztása",
"Staff record" : "Személyzet nyilvántartása",
"Staff rota" : "Személyzet beosztása",
"Staffordshire Bull Terrier" : "staffordshire bullterrier",
"Standard" : "Szabvány",
"Standardbred" : "",
"Start Date" : "Kezdő dátum",
"Start Of Day" : "A nap kezdete ",
"Start Time" : "Kezdési idő",
"Start at" : "Kezdés ekkor",
"Start date" : "Kezdő dátum",
"Start date must be a valid date" : "A kezdő dátum érvényes kell legyen",
"Start of year" : "Év kezdete",
"Started" : "Elkezdődött ",
"Starts" : "Kezdések",
"State" : "Állam",
"State contains" : "Megállapítás tartalma",
"Stationary costs" : "Irodaszer költségek",
"Stats" : "Statisztikák",
"Stats period" : "Statisztikai időszak",
"Stats show running figures for the selected period of animals entering and leaving the shelter on the home page." : "A statisztikák a menhelyre érkező és onnan távozó állatok kiválasztott időszakának futó adatait mutatják a honlapon.",
"Status" : "Állapot",
"Status and Species" : "Állapot és faj",
"Stay" : "Tartózkodás",
"Stock" : "Készlet",
"Stock Control" : "Készlet kontroll",
"Stock Levels" : "Készlet szintjei",
"Stock Locations" : "Készlethelyek",
"Stock Take" : "Készletből elvétel",
"Stock Usage Type" : "Készlet felhasználásának típusa",
"Stock level must have a name" : "A készlet szintjének névvel kell rendelkeznie",
"Stock level must have a unit" : "A készlet szintjének egységgel kell rendelkeznie",
"Stock needs a name and unit." : "A készletnek névvel és egységgel kell rendelkeznie",
"Stocktake" : "Készlethiány",
"Stolen" : "Lopott",
"Stolen {0}" : "Lopott {0}",
"Stop" : "Leállít",
"Stop Publishing" : "A közzététel leállítása",
"Stores" : "Raktárok",
"Stray" : "Kóbor",
"Su" : "Su",
"SubTotal" : "Részösszeg",
"Subject" : "Tárgy",
"Submission received: {0}" : "Beadvány érkezett: {0}",
"Success" : "Siker",
"Successfully attached to {0}" : "Sikeresen csatolva a következőhöz: {0}",
"Sugar Glider" : "Törpe erszényesmókus",
"Sun" : "Vas",
"Sunday" : "Vasárnap",
"Super user" : "Superuser",
"Superuser" : "Superuser",
"Surname" : "Vezetéknév",
"Surrender" : "Lemondó",
"Surrender Pickup" : "Lemondótól felvétel",
"Suspect" : "Gyanusított ",
"Suspect 1" : "Gyanusított 1 ",
"Suspect 2" : "Gyanusított 2",
"Suspect 3" : "Gyanusított 3",
"Suspect/Animal" : "Gyanusított/Állat",
"Swan" : "Hattyú",
"Swedish Vallhund" : "svéd vallhund",
"Syntax check this SQL" : "Szintaxis ellenőrizze ezt az SQL-t",
"System" : "Rendszer",
"System Admin" : "Rendszergazda",
"System Options" : "Rendszerbeállítások",
"System user accounts" : "Rendszer felhasználói fiókok",
"T = first letter of animal type" : "T = állat típusának első betűje",
"TNR" : "BIV",
"TNR - Trap/Neuter/Release" : "BIV - Befogás/Ivartalanítás/Visszaengedéls",
"TT = first and second letter of animal type" : "TT = állat típusának első és második betűje",
"Tabby" : "cirmos",
"Tabby and White" : "Cirmos-fehér",
"Take another payment" : "Újabb fizetés beszedése",
"Taken By" : "Ezen személy által hozott",
"Tan" : "Cser",
"Tan and Black" : "Fekete-cser",
"Tan and White" : "Barna-fehér",
"Task complete." : "Feladat teljesitve.",
"Task items are executed in order of index, lowest to highest" : "A feladat elemeit a tárgymutató sorrendjében hajtják végre, a legalacsonyabbtól a legmagasabbig",
"Tattoo" : "Tetoválás",
"Tattoo Date" : "Tetoválás dátuma",
"Tattoo Number" : "Tetoválási szám",
"Tax" : "Adó",
"Tax Amount" : "Adó összege",
"Tax Rate %" : "Adókulcs %",
"Telephone" : "Telefon",
"Telephone Bills" : "Telefon számlák",
"Template" : "Formanyomtatvány",
"Template Name" : "Formanyomtatvány neve",
"Template names can include a path portion with /, eg: Vets/Rabies Certificate" : "A formanyomtatvány nevek tartalmazhatnak egy elérési utat, például /, például: Állatorvosok / veszettség igazolás",
"Tennessee Walker" : "Tennessee sétáló ló",
"Terrapin" : "Gyémántteknős",
"Terrier" : "terrier",
"Test" : "Teszt",
"Test Animal" : "Tesztelendő állat",
"Test Book" : "Tesztkönyv",
"Test Performed" : "Teszt elvégezve",
"Test Results" : "Teszt eredmények",
"Test Types" : "Teszt típusok",
"Test book" : "Tesztkönyv",
"Test marked as performed for {0} - {1}" : "A teszt elvégzettként megjelölve: {0} - {1}",
"Tests" : "Tesztek",
"Tests need an animal and at least a required date." : "A tesztekhez állatra és legalább egy előírt időpontra van szükség.",
"Text" : "Szöveg",
"Text Encoding" : "Szöveg kódolása",
"Th" : "Cs",
"Thai Ridgeback" : "thai ridgeback",
"Thank you for choosing Animal Shelter Manager for your shelter!" : "Köszönjük, hogy az ASM-t választotta.",
"Thank you, the document is now signed." : "Köszönöm, a dokumentum már aláírásra került.",
"That animal is already linked to the incident" : "Ez az állat már kapcsolódik az eseményhez",
"The CSV file should be created by PayPal's \"All Activity\" report." : "A CSV fájlt a PayPal \"Minden tevékenység\" jelentéssel kell létrehoznia.",
"The SmartTag PETID number" : "Az OkosCimke PETID száma",
"The SmartTag type" : "Az OkosCímke típusa",
"The URL is the address of a web resource, eg: www.youtube.com/watch?v=xxxxxx" : "Az URL egy webes forrás címe, például: www.youtube.com/watch?v=xxxxxx",
"The animal name" : "Az állat neve",
"The animal record to merge must be different from the original." : "Az összevonandó állat regisztrációjának különböznie kell a eredetitől.",
"The animal sex" : "Az állat neme",
"The base color of this animal" : "Az állat alapszíne",
"The coat type of this animal" : "Az állat szőrzetének típusa",
"The confirmation email message to send to the form submitter. Leave blank to send a copy of the completed form." : "A megerősítő e-mail üzenertet kell küldeni az űrlap benyújtójának. Egy kitöltött nyomtatvány másolatának küldléséhez hagyja üresen. ",
"The database will be inaccessible to all users while the export is in progress." : "",
"The date reported to the shelter" : "",
"The date the animal died" : "Az állat halálának napja.",
"The date the animal was FIV/L tested" : "Az állat ezen a napon volt FIV/FeLV tesztelve",
"The date the animal was adopted" : "Az állat örökbefogadásának napja",
"The date the animal was altered" : "",
"The date the animal was born" : "Az állat születésének napja",
"The date the animal was brought into the shelter" : "Az állat menhelyre kerülésének napja",
"The date the animal was heartworm tested" : "Az állat szívféreg tesztelésének napja",
"The date the animal was microchipped" : "Az állat microchippel való ellátásának napja",
"The date the animal was reclaimed" : "Az állat visszakövetelésének napja ",
"The date the animal was tattooed" : "Az állat tetoválásának napja",
"The date the foster animal will be returned if known" : "Az állat ideiglenes befogadótól való visszakerülésének napja, ha ismert",
"The date the foster is effective from" : "Az ideiglenes befogadás érvénybe lépésének napja ",
"The date the litter entered the shelter" : "Az alom menhelyre érkezésének napja",
"The date the owner last contacted the shelter" : "A tulajdonos menhellyel való kapcsolatfelvételének utolsó napja ",
"The date the payment was received" : "A befizetés érkezésének napja",
"The date the reservation is effective from" : "A lefoglalás ettől a naptól van érvényben",
"The date the retailer movement is effective from" : "A",
"The date the transfer is effective from" : "A transfer ettől a naptól van érvényben",
"The date the trial adoption is over" : "A próbaidő leteltének napja ",
"The date the vaccination is required/due to be administered" : "Az esedékes oltás ezen a napon esedékes/ekkor kell beadni",
"The date the vaccination was administered" : "Az oltás beadásának napja ",
"The date this animal was found" : "Az állat megtalálásának napja",
"The date this animal was lost" : "Az állat elvesztésének napja ",
"The date this animal was put on the waiting list" : "Az állat várólistára vételének napja ",
"The date this animal was removed from the waiting list" : "Az állat várólistáról való eltávolításának napj a",
"The date this animal was reserved" : "Az állat foglalásának napja",
"The date this animal was returned to its owner" : "Az állat eredeti tulajdonosának való visszaadás napja ",
"The date this person was homechecked." : "A dátum, amikor ennek a személynek a lelátogatása megtörtént. ",
"The default username is 'user' with the password 'letmein'" : "Az alapértelmezett felhasználónév a 'user', a jelszó 'letmein'. ",
"The entry reason for this animal" : "Az állat felvitelének oka ",
"The litter this animal belongs to" : "Az állat, amihez az alom tartozik. ",
"The locale determines the language ASM will use when displaying text, dates and currencies." : "A területi beállítás határozza meg az ASM által használt szöveg, dátum és pénznem megjelenítésekor használt nyelvet.",
"The location where the animal was picked up" : "A helyszín, ahonnan az állatot elhozták",
"The microchip number" : "A mikrochip száma",
"The movement number '{0}' is not unique." : "A mozgatási szám'{0}' nem egyedi.",
"The number of stock records to create" : "A létrehozandó készletnyilvántartások száma",
"The period in days before waiting list urgency is increased" : "",
"The person record to merge must be different from the original." : "",
"The primary breed of this animal" : "Az állat elsődleges fajtája ",
"The reason the owner wants to part with the animal" : "Az állat tulajdonos által való leadásának oka ",
"The reason this animal was removed from the waiting list" : "Az állat várólistáról való eltávolításának oka ",
"The remaining units in the container" : "Fennmaradó egységek a tárolóban",
"The result of the FIV test" : "FIV teszt eredménye ",
"The result of the FLV test" : "A FeLV teszt eredménye ",
"The result of the heartworm test" : "Az állat szívféreg tesztjének eredménye",
"The retail/resale price per unit" : "",
"The secondary breed of this animal" : "Az állat másodlagos fajtája ",
"The selected file is not an image." : "A kiválasztott file nem kép formátumú.",
"The shelter category for this animal" : "Az állat menhelyi kategóriája ",
"The shelter reference number" : "A menhely referencia száma ",
"The sheltermanager.com admin account password cannot be changed here, please visit {0}" : "A sheltermanager.com admin fiók jelszava itt nem változtatható meg, kérjük, látogasson el a {0} oldalra.",
"The size of this animal" : "Az állat mérete",
"The species of this animal" : "Az állat faja",
"The tattoo number" : "A tetoválási szám ",
"The type of unit in the container, eg: tablet, vial, etc." : "A raktáron lévő anyagok típusa, pl .: tabletta, injekciós üveg stb.",
"The veterinary license number." : "Az állatorvos kamarai száma",
"The wholesale/trade price the container was bought for" : "",
"There is not enough information in the form to attach to a shelter animal record (need an animal name)." : "A nyomtatványon nincs elegendő információ ahhoz, hogy csatolhassuk a menhelyi állatok nyilvántartásához (szükség van egy állatnevére).",
"There is not enough information in the form to create a found animal record (need a description and area found)." : "Nincs elegendő információ megadva egy talált állat rögzítéséhez (leírásra és megtalálási terület megadására van szükség).",
"There is not enough information in the form to create a lost animal record (need a description and area lost)." : "Nincs elegendő információ megadva egy elveszett állat rögzítéséhez (leírásra és elvesztési terület megadására van szükség).",
"There is not enough information in the form to create a person record (need a surname)." : "Nincs elegendő információ megadva egy személyrögzítéséhez (vezetéknév megadására van szükség).",
"There is not enough information in the form to create a transport record (need animalname)." : "Nincs elegendő információ megadva egy transzport rögzítéséhez (állatnév megadására van szükség).",
"There is not enough information in the form to create a transport record (need pickupdate and dropoffdate)." : "Nincs elegendő információ megadva egy transzport rögzítéséhez (felvételi és leadási dátum megadására van szükség).",
"There is not enough information in the form to create a waiting list record (need a description)." : "Nincs elegendő információ megadva egy várólistás állat rögzítéséhez (részletes leírásra van szükség).",
"There is not enough information in the form to create an incident record (need call notes and dispatch address)." : "",
"These are the HTML headers and footers used when displaying online forms." : "Ezek a HTML-fejlécek és láblécek, amelyeket az online űrlapok megjelenítésekor használnak.",
"These are the HTML headers and footers used when generating reports." : "Ezek a HTML fejlécek és láblécek, amelyeket a jelentések előállításakor használnak.",
"These are the default values for these fields when creating new records." : "Ezek a mezők alapértelmezett értékei új bejegyzések létrehozásakor.",
"These batch processes are run each night by the system and should not need to be run manually." : "Ezeket a kötegelt folyamatokat a rendszer minden este futtatja, és ezeket nem kell manuálisan futtatni.",
"These fields allow you to deduct stock for the test(s) given. This single deduction should cover the selected tests being performed." : "",
"These fields allow you to deduct stock for the treatment(s) given. This single deduction should cover the selected treatments being administered." : "",
"These fields allow you to deduct stock for the vaccination(s) given. This single deduction should cover the selected vaccinations being administered." : "",
"These fields determine which columns are shown on the find animal and find person screens." : "",
"These numbers are for shelters who have agreements with insurance companies and are given blocks of policy numbers to allocate." : "",
"These options change the behaviour of the search box at the top of the page." : "",
"These values are required for correct operation of the system. ONLY change them if you are translating to another language." : "",
"Third offence" : "Harmadik támadás",
"This Month" : "Ehavi",
"This Week" : "Eheti",
"This Year" : "Ezévi",
"This animal already has an active reservation." : "Az állatnak már aktív foglalása van.",
"This animal has a SmartTag PETID" : "Ennek az állatnak SmartTag PETID-je van.",
"This animal has a tattoo" : "Ennek az állatnak tetoválása van",
"This animal has active reservations, they will be cancelled." : "",
"This animal has an adoption fee of {0}" : "Ennek az állatnak az örökbefogadási díja {0}",
"This animal has been FIV/L tested" : "Ez az állat FIV/FeLV teszelve lett",
"This animal has been altered" : "Ez az állat megváltozott",
"This animal has been declawed" : "Ezt az állatot kijelentették",
"This animal has been heartworm tested" : "Ez az állat szívféreg tesztelve lett",
"This animal has movements and cannot be removed." : "Ennek az állat át van helyezve, és nem távolítható el.",
"This animal has not been altered." : "Az állat nem lett megváltoztatva.",
"This animal has not been microchipped." : "Az állat nem volt microchipezve.",
"This animal has special needs" : "Az állatnak különleges igényei vannak",
"This animal has the same name as another animal recently added to the system." : "Ennek az állatnak ugyanaz a neve, mint egy másik állatnak, amelyet nemrégiben adtak hozzá a rendszerhez.",
"This animal is a crossbreed" : "Ez az állat keverék",
"This animal is bonded with {0}" : "Ez az állat a következővel kötődik: {0}",
"This animal is bonded with {0}. Adoption movement records will be created for all bonded animals." : "Ez az állat {0} kapcsolódik Az örökbefogadás rögzitését minden kapcsolódó állat számára el kell készíteni.",
"This animal is currently at a retailer, it will be automatically returned first." : "",
"This animal is currently fostered, it will be automatically returned first." : "Ez az állat jelenleg ideiglenes befogadónál van, először autómatikusan visszahozzák. ",
"This animal is currently held and cannot be adopted." : "",
"This animal is currently quarantined and should not leave the shelter." : "Az állat jelenleg karanténban van, és nem hagyhatja el a menhelyet.",
"This animal is marked not for adoption." : "Ez az állat nem jelölheti ki örökbefogadásra. ",
"This animal is microchipped" : "Ez az állat chipezett",
"This animal is not on the shelter." : "Ez az állat nincs a menhelyen.",
"This animal is part of a cruelty case and should not leave the shelter." : "Ez az állat egy bántalmazás/foglalás tagja,, ezért nem hagyhatja el a menhelyet.",
"This animal should be held in case it is reclaimed" : "",
"This animal should not be shown in figures and is not in the custody of the shelter" : "",
"This animal was dead on arrival to the shelter" : "Ez az állat holtan érkezett a menhelyre. ",
"This animal was euthanized" : "Az állat elaltatásra került",
"This animal was picked up" : "",
"This animal was transferred from another shelter" : "Az állat egy másik menhelyről került át",
"This code has already been used." : "Ez a kód már használatban van.",
"This database is locked and in read-only mode. You cannot add, change or delete records." : "Ez az adatbázis zárolva van, és csak olvasás módban működik. Nem vehet fel, módosíthat vagy törölhet feljegyzéseket.",
"This database is locked." : "Az adatbázis zárolva van.",
"This date of birth is an estimate" : "A születési idő becsült",
"This expense account is the source for costs of this type" : "",
"This income account is the source for payments received of this type" : "",
"This item is referred to in the database ({0}) and cannot be deleted until it is no longer in use." : "",
"This many years after creation of a person record, the name, address and telephone data will be anonymized." : "",
"This month" : "Ehavi",
"This movement cannot be from a retailer when the animal has no prior retailer movements." : "",
"This person has an animal control incident against them" : "",
"This person has an animal control incident against them." : "",
"This person has been banned from adopting animals" : "",
"This person has been banned from adopting animals." : "",
"This person has been under investigation" : "",
"This person has been under investigation." : "",
"This person has movements and cannot be removed." : "",
"This person has not passed a homecheck" : "A személy még nem esett át otthon leltogatáson.",
"This person has not passed a homecheck." : "A személy még nem esett át otthoni lelátogatáson.",
"This person has payments and cannot be removed." : "",
"This person has previously surrendered an animal." : "Ez a személy korábban állatot adott le. ",
"This person is linked to a waiting list record and cannot be removed." : "Ez a személy egy várólista feljegyzéshez van rendelve, és nem lehet törölni. ",
"This person is linked to an animal and cannot be removed." : "Ez a személy egy állathoz van rendelve, és nem lehet törölni. ",
"This person is linked to an investigation and cannot be removed." : "Ez a személy egy vizsgálathoz van rendelve, és nem lehet törölni. ",
"This person is linked to animal control and cannot be removed." : "Ez a személy egy állat ellenőrzéshez van rendelve, és nem lehet törölni. ",
"This person is linked to animal licenses and cannot be removed." : "Ez a személy állati engedélyekhez van rendelve, és nem lehet törölni. ",
"This person is linked to animal transportation and cannot be removed." : "Ez a személy egy transzporthoz van rendelve, és nem lehet törölni. ",
"This person is linked to citations and cannot be removed." : "",
"This person is linked to found animals and cannot be removed." : "Ez a személy egy talált állathoz van rendelve, és nem lehet törölni. ",
"This person is linked to lost animals and cannot be removed." : "Ez a személy egy elveszett állathoz van rendelve, és nem lehet törölni. ",
"This person is linked to trap loans and cannot be removed." : "Ez a személy egy csapdakölcsönéshez van rendelve, és nem lehet törölni. ",
"This person is not flagged as a fosterer and cannot foster animals." : "",
"This person is not flagged as a retailer and cannot handle retailer movements." : "",
"This person is very similar to another person on file, carry on creating this record?" : "",
"This person lives in the same area as the person who brought the animal to the shelter." : "",
"This record has been changed by another user, please reload." : "Ezt a rekordot egy másik felhasználó módosította, kérjük, töltse be újra.",
"This report cannot be sent by email as it requires criteria to run." : "",
"This screen allows you to add extra documents to your database, for staff training, reference materials, etc." : "",
"This screen allows you to add extra images to your database, for use in reports and documents." : "",
"This type of movement requires a date." : "Az ilyen típusú mozgáshoz dátum szükséges.",
"This type of movement requires a person." : "Az ilyen típusú mozgáshoz személy megadása szükséges.",
"This week" : "Eheti",
"This will permanently remove the selected records, are you sure?" : "Ez véglegesen eltávolítja a kiválasztott feljegyzéseket, biztos benne?",
"This will permanently remove the selected roles, are you sure?" : "Ez véglegesen eltávolítja a kiválasztott szerepeket, biztos benne?",
"This will permanently remove the selected user accounts. Are you sure?" : "",
"This will permanently remove this account and ALL TRANSACTIONS HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"This will permanently remove this additional field and ALL DATA CURRENTLY HELD AGAINST IT. This action is irreversible, are you sure you want to do this?" : "",
"This will permanently remove this animal, are you sure?" : "",
"This will permanently remove this incident, are you sure?" : "",
"This will permanently remove this person, are you sure?" : "",
"This will permanently remove this record, are you sure?" : "",
"This will permanently remove this waiting list entry, are you sure?" : "",
"This will remove ALL rota entries for the week beginning {0}. This action is irreversible, are you sure?" : "",
"This year" : "Ezévi",
"Thoroughbred" : "Telivér",
"Thu" : "",
"Thumbnail size" : "",
"Thursday" : "",
"Tibetan Mastiff" : "tibeti masztiff",
"Tibetan Spaniel" : "tibeti spániel",
"Tibetan Terrier" : "tibeti terrier",
"Tiger" : "Tigris",
"Time" : "Idő",
"Time Brought In" : "Bekerülés dátuma",
"Time On List" : "",
"Time On Shelter" : "",
"Time on list" : "",
"Time on shelter" : "",
"Timeline" : "",
"Timeline ({0})" : "",
"Times should be in HH:MM format, eg: 09:00, 16:30" : "",
"Title" : "",
"Title First Last" : "",
"Title Initials Last" : "",
"To" : "",
"To Adoption" : "",
"To Fostering" : "Ideiglenes befogadótól",
"To Other" : "",
"To Retailer" : "",
"To add people to the rota, create new person records with the staff or volunteer flag." : "",
"To continue using ASM, please renew {0}" : "",
"To week beginning" : "",
"Today" : "",
"Tonkinese" : "",
"Too Many Animals" : "",
"Tooltip" : "",
"Top Margin" : "",
"Tortie" : "",
"Tortie and White" : "",
"Tortoise" : "",
"Tosa Inu" : "tosa inu",
"Total" : "",
"Total number of units in the container" : "",
"Total payments" : "",
"Toucan" : "",
"Toy Fox Terrier" : "toy fox terrier",
"Training" : "",
"Transactions" : "",
"Transactions need a date and description." : "",
"Transfer" : "",
"Transfer In" : "Átmozgatva",
"Transfer To" : "Átmozgatva",
"Transfer an animal" : "",
"Transfer from Municipal Shelter" : "",
"Transfer from Other Shelter" : "",
"Transfer successfully created." : "Az örökbe fogadás sikeresen megtörtént.",
"Transfer?" : "",
"Transferred" : "Átmozgatva",
"Transferred From" : "Átmozgatva",
"Transferred In" : "Átmozgatva",
"Transferred In {0}" : "Átmozgatva",
"Transferred Out" : "Átmozgatva",
"Transferred Out {0}" : "Átmozgatva",
"Transfers must have a valid transfer date." : "",
"Transport" : "",
"Transport Book" : "",
"Transport Types" : "",
"Transport book" : "",
"Transport requires an animal" : "",
"Transports must have valid pickup and dropoff dates and times." : "",
"Trap Loans" : "Csapda kölcsönzések",
"Trap Number" : "Csapdaszám",
"Trap Types" : "",
"Trap loan" : "",
"Trap loans" : "",
"Treat animals at retailers as part of the shelter inventory" : "",
"Treat foster animals as part of the shelter inventory" : "",
"Treat trial adoptions as part of the shelter inventory" : "",
"Treatment" : "Kezelés",
"Treatment Given" : "",
"Treatment marked as given for {0} - {1}" : "",
"Treatment name cannot be blank" : "Profil név nem maradhat üresen",
"Treatments" : "Kezelések",
"Treeing Walker Coonhound" : "",
"Trial Adoption" : "",
"Trial adoption" : "",
"Trial adoption book" : "",
"Trial ends on" : "",
"Tricolour" : "trikolor",
"Trigger Batch Processes" : "Indított batch folyamatok",
"Tu" : "Kedd",
"Tue" : "Kedd",
"Tuesday" : "Kedd",
"Tumblr" : "",
"Turkey" : "Pulyka",
"Turkish Angora" : "",
"Turkish Van" : "",
"Turtle" : "Teknős",
"Twitter" : "",
"Type" : "Típus",
"Type of animal links to show" : "Mutatandó állat-linkek száma",
"U (Unwanted Cat)" : "U (nem kívánt macska)",
"UK Giftaid" : "",
"URL" : "URL",
"UUUUUUUUUU or UUUU = unique number" : "UUUUUUUUU vagy UUUU = egyedi szám",
"Unable to Afford" : "Nem engedheti meg magának",
"Unable to Cope" : "Képtelen kezelni",
"Unaltered" : "Változatlan",
"Unaltered Adopted Animals" : "Vátozatlan örökbefogadott állatok",
"Unaltered Dog - 1 year" : "Változtatás nélküli kutya - 1 év",
"Unaltered Dog - 3 year" : "Változtatás nélküli kutya - 3 év",
"Unavailable" : "Nem elérhető",
"Under {0} weeks old" : "{0} hetesnél fiatalabb",
"Unit" : "Egység",
"Unit Price" : "Egységár",
"Unit within the location, eg: pen or cage number" : "",
"Units" : "",
"Unknown" : "Ismeretlen",
"Unknown microchip brand" : "",
"Unpaid Fines" : "",
"Unreserved" : "",
"Unsaved Changes" : "",
"Unspecified" : "Nincs megadva",
"Unsuitable Accomodation" : "",
"Up for adoption" : "Nem örökbefogadható",
"Upcoming medical items" : "",
"Update" : "",
"Update publishing options" : "",
"Update system options" : "",
"Update the daily boarding cost for this animal" : "",
"Updated database to version {0}" : "",
"Updated." : "",
"Updating..." : "Betöltés…",
"Upload" : "",
"Upload Document" : "",
"Upload ODT" : "",
"Upload Photo" : "",
"Upload a new OpenOffice template" : "",
"Upload all available images for animals" : "",
"Upload an SQL script" : "",
"Upload splash.jpg and logo.jpg to override the login screen image and logo at the top left of ASM." : "",
"Uploading..." : "Betöltés…",
"Urgencies" : "",
"Urgency" : "",
"Urgent" : "",
"Usage Date" : "",
"Usage Type" : "",
"Usage explains why this stock record was created or adjusted. Usage records will only be created if the balance changes." : "",
"Use Automatic Insurance Numbers" : "",
"Use HTML5 client side image scaling where available to speed up image uploads" : "",
"Use SQL Interface" : "",
"Use a single breed field" : "",
"Use animal comments" : "",
"Use fancy tooltips" : "",
"Use notes from preferred photo" : "",
"Use the icon in the lower right of notes fields to view them in a separate window." : "",
"User Accounts" : "Új fiók",
"User Roles" : "Felhasználói szerepkörök",
"User accounts that will only ever call the Service API should set this to No." : "",
"User roles" : "Felhasználói szerepkörök",
"Username" : "Felhasználónév",
"Username '{0}' already exists" : "'{0}' felhasználónév már létezik",
"Users" : "Felhasználók",
"Users need a username, password and at least one role or the superuser flag setting." : "A felhasználóknak egy felhasználónévre, jelszóra és legalább egy szerepkörre vagy szuper-felhasználói megjelölésre van szüksége",
"Vacation" : "Szabadság",
"Vaccinate" : "Oltás beadása",
"Vaccinate Animal" : "Állat oltása",
"Vaccination" : "Oltás",
"Vaccination Book" : "Oltási könyv",
"Vaccination Given" : "Beadott oltások",
"Vaccination Types" : "Oltás típusai",
"Vaccination book" : "Oltási könyv",
"Vaccination marked as given for {0} - {1}" : "Oltás megjelölése beadottként {0} - {1}",
"Vaccinations" : "Oltások",
"Vaccinations need an animal and at least a required date." : "Az oltásokhoz egy állat és legalább egy dátum megadása szükséges",
"Vaccinations require an animal" : "Az oltásokhoz egy állat megadása szükséges",
"Vaccinations: {0}, Tests: {1}, Medical Treatments: {2}, Transport: {3}, Costs: {4}, Total Costs: {5} Total Payments: {6}, Balance: {7}" : "Oltások: {0}, Tesztek: {1}, Gyógyszeres kezelések: {2}, Szállítás: {3}, Költségek: {4}, Teljes költség: {5}, Teljes kifizetés: {6}, Egyenleg: {7}",
"Valid tokens for the subject and text" : "",
"Value" : "Érték",
"Various" : "vegyes",
"Vertical Pitch" : "",
"Very Large" : "Nagyon nagy",
"Vet" : "Állatorvos",
"Vet Visit" : "Állatorvosi látogatás",
"Victim" : "Áldozat",
"Victim Name" : "Áldozat neve",
"Video Link" : "Videó link",
"Vietnamese Pot Bellied" : "Vietnámi csüngőhasú malac",
"View" : "Megtekint",
"View Accounts" : "Új fiókok",
"View Animals" : "Állatok megtekintése",
"View Audit Trail" : "",
"View Citations" : "Hivatkozás megtekintése",
"View Clinic Appointment" : "",
"View Cost" : "Költség megtekintése",
"View Diary" : "Napló megtekintése",
"View Diets" : "Étrend megtekintése",
"View Document" : "Dokumentum megtekintése",
"View Document Repository" : "Dokumentumgyüjtemény megtekintése",
"View Found Animal" : "Talált állat megtekintése",
"View Incidents" : "Esetek megtekintése",
"View Incoming Forms" : "Bejövő nyomtatványok megtekintése",
"View Investigations" : "Nyomozások megtekintése",
"View Licenses" : "Engedély megtekintése",
"View Litter" : "Alom megtekintése",
"View Log" : "Napló megtekintése",
"View Lost Animal" : "Elveszett állat megtekintése",
"View Manual" : "Kézikönyv megtekintése",
"View Media" : "Média megtekintése",
"View Medical Records" : "Orvosi jelentés megtekintése",
"View Movement" : "Tétel megtekintése",
"View PDF" : "PDF megtekintése",
"View Payments" : "Fizetések megtekintése",
"View Person" : "Személy megtekintése",
"View Person Links" : "Személy linkek megtekintése",
"View Report" : "Jelentés megtekintése",
"View Roles" : "Szerepek megtekintése",
"View Rota" : "Sorrend megtekintése",
"View Shelter Animals" : "Menhelyi állatok megtekintése",
"View Staff Person Records" : "Személyzet bejegyzéseinek megtekintése",
"View Stock" : "Állomány megtekintése",
"View Tests" : "Tesztek megtekintése",
"View Training Videos" : "Tréningvideók megtekintése",
"View Transport" : "Transzport megtekintése",
"View Trap Loans" : "Csapda kölcsönzések megtekintése",
"View Vaccinations" : "Oltások megtekintése",
"View Volunteer Person Records" : "Önkéntes adatainak megtekintése",
"View Vouchers" : "Bizonylat megtekintése",
"View Waiting List" : "Várólista megtekintése",
"View animals matching publishing options" : "Közzétételi opcióknak megfelelő állatok megtekintése",
"View littermates" : "Alomtestvérek megtekintése",
"View matching records" : "Összeillő jelentések megtekintése",
"View media" : "Média megtekintése",
"View publishing logs" : "Közzétételi naplóbejegyzések megtekintése",
"Visual Theme" : "",
"Vizsla" : "vizsla",
"Volunteer" : "",
"Voucher Types" : "",
"Vouchers" : "",
"Vouchers need an issue and expiry date." : "",
"WARNING: This animal has not been microchipped" : "",
"WARNING: This animal is over 6 months old and has not been neutered/spayed" : "",
"Waiting" : "",
"Waiting List" : "Várólista hozzáadása",
"Waiting List - Additional" : "",
"Waiting List - Details" : "",
"Waiting List - Removal" : "",
"Waiting List Contact" : "",
"Waiting List Donation" : "",
"Waiting List {0}" : "Várólista: { 0 }",
"Waiting List: {0}" : "Várólista: { 0 }",
"Waiting Room" : "",
"Waiting for documents..." : "",
"Waiting list donations" : "",
"Waiting list entries matching '{0}'." : "",
"Waiting list entries must have a contact" : "",
"Waiting list entry for {0} ({1})" : "",
"Waiting list entry successfully added." : "",
"Waiting list urgency update period in days" : "",
"Warmblood" : "",
"Warn if the name of the new animal is similar to one entered recently" : "",
"Warn when adopting an animal who has not been microchipped" : "",
"Warn when adopting an unaltered animal" : "",
"Warn when adopting to a person who has been banned from adopting animals" : "",
"Warn when adopting to a person who has not been homechecked" : "",
"Warn when adopting to a person who has previously brought an animal to the shelter" : "",
"Warn when adopting to a person who lives in the same area as the original owner" : "",
"Warn when creating multiple reservations on the same animal" : "",
"Warnings" : "Figyelmeztetések",
"Wasted" : "Elpazarolt",
"Water Bills" : "Vízszámlák",
"We" : "Sze",
"Wed" : "Szer",
"Wednesday" : "Szerda",
"Week" : "Hét",
"Week beginning {0}" : "A hét kezdete: {0} ",
"Weekly" : "",
"Weight" : "",
"Weimaraner" : "Weimari vizsla",
"Welcome!" : "",
"Welsh Corgi" : "Welsh corgi",
"Welsh Springer Spaniel" : "Welsh springer spániel",
"Welsh Terrier" : "Welsh terrier",
"West Highland White Terrier Westie" : "Westie",
"Wheaten Terrier" : "",
"When" : "",
"When ASM should stop showing this message" : "",
"When I change the location of an animal, make a note of it in the log with this type" : "",
"When I change the weight of an animal, make a note of it in the log with this type" : "",
"When I generate a document, make a note of it in the log with this type" : "",
"When I mark an animal held, make a note of it in the log with this type" : "",
"When I set a new GDPR Opt-In contact option, make a note of it in the log with this type" : "",
"When a message is created, email it to each matching user" : "",
"When creating payments from the Move menu screens, mark them due instead of received" : "",
"When displaying calendars, the first day of the week is" : "",
"When displaying person names, use the format" : "",
"When entering dates, hold down CTRL and use the cursor keys to move around the calendar. Press t to go to today." : "",
"When entering vaccinations, default the last batch number and manufacturer for that type" : "",
"When matching lost animals, include shelter animals" : "",
"When publishing to third party services, add this extra text to the bottom of all animal descriptions" : "",
"When receiving multiple payments, allow the due and received dates to be set" : "",
"When receiving payments, allow a quantity and unit price to be set" : "",
"When receiving payments, allow recording of sales tax with a default rate of" : "",
"When receiving payments, allow the deposit account to be overridden" : "",
"When you use Move > Adopt an animal, ASM will automatically return any open foster or retailer movement before creating the adoption." : "",
"When you use Move > Foster an animal, ASM will automatically return any open foster movement before moving the animal to its new home." : "",
"Where this animal is located within the shelter" : "",
"Whippet" : "Whippet",
"White" : "Fehér",
"White German Shepherd" : "Fehér németjuhász",
"White and Black" : "Fehér és fekete",
"White and Brindle" : "Tigriscsíkos/fehér",
"White and Brown" : "Barna/fehér",
"White and Grey" : "Szürke/fehér",
"White and Liver" : "Májbarna/fehér",
"White and Tabby" : "Cirmos / fehér",
"White and Tan" : "Fehér/cser",
"White and Torti" : "Sárga/fehér",
"Will this owner give a donation?" : "Fog adományozni a tulajdonos? ",
"Wire-haired Pointing Griffon" : "Drótszőrű griffon",
"Wirehaired Terrier" : "Drótszőrű foxterrier ",
"With Vet" : "állatorvossal",
"With overnight batch" : "Egynapos adaggal",
"Withdrawal" : "Visszavonás",
"Wk" : "",
"Work" : "Munka",
"Work Phone" : "Munkahelyi telefonszám ",
"Work Types" : "",
"XXX or XX = number unique for this year" : "XXX vagy XX = egyedi szám ebben az évben",
"Xoloitzcuintle/Mexican Hairless" : "Mexikói meztelen kutya",
"YY or YYYY = current year" : "YY vagy YYYY = folyó év",
"Yellow Labrador Retriever" : "Sárga labrador retriever",
"Yellow and Grey" : "Sárga és Szürke",
"Yes" : "Igen",
"Yes/No" : "Igen/Nem",
"Yes/No/Unknown" : "Igen/Nem/Nem ismert",
"Yorkshire Terrier Yorkie" : "Yorkshire terrier",
"You can bookmark search results, animals, people and most data entry screens." : "Megjelölheti a keresési eredményeket, az állatokat, az embereket és a legtöbb adatbeviteli képernyőt.",
"You can drag and drop animals in shelter view to change their locations." : "Menhely nézetben áthúzhatja és áthelyezheti az állatokat új helyszínre. ",
"You can middle click a link to open it in a new browser tab (push the wheel on most modern mice)." : "A linkre kattintva középső kattintással megnyithatja azt egy új böngésző lapon (nyomja meg a kereket a legtöbb modern egérnél).",
"You can override the search result sort by adding one of the following to the end of your search - sort:az, sort:za, sort:mr, sort:lr" : "A keresési eredmény sorrendjét felülírhatja, ha hozzáadja a keresés végéhez a következők egyikét - csoportosit: az, csoportosit: za, csoportosit: mr, csoportosit: lr",
"You can prefix your term in the search box with a: to search only animals, p: to search only people, wl: to search waiting list entries, la: to search lost animals and fa: to search found animals." : "Előfordulhat, hogy a kifejezést a keresőmezőben a következővel írja be: a: csak állatok keresése, p: csak emberek keresése, wl: a várólistán szereplő bejegyzések keresése, la: az elveszett állatok és a fa keresése: a megtalált állatok keresése.",
"You can set a default amount for different payment types in the Settings- Lookup Data screen. Very handy when creating adoptions." : "Beállíthat egy alapértelmezett összeget a különféle fizetési módokhoz a Beállítások - Keresési adatok képernyőn. Nagyon praktikus örökbefogadások létrehozásakor.",
"You can sort tables by clicking on the column headings." : "A táblázatokat rendezheti az oszlopok fejlécére kattintva.",
"You can upload images called logo.jpg and splash.jpg to the Settings- Reports-Extra Images screen to override the login splash screen and logo in the upper left corner of the application." : "A logo.jpg és a splash.jpg nevű képeket feltöltheti a Beállítások- Jelentések-Extra képek képernyőre, hogy felülbírálja a bejelentkezési képernyőt és logot az alkalmazás bal felső sarkában.",
"You can use incoming forms to create new records or attach them to existing records." : "A bejövő nyomtatványokkal új feljegyzéseket hozhat létre, vagy csatolhatja azokat a meglévő feljegyzésekhez.",
"You can't have a return without a movement." : "Mozgatás nélkül nem lehet visszavétel.",
"You didn't specify any search criteria, so an on-shelter search was assumed." : "Nem határozott meg keresési kritériumokat, ezért a keresést a menhelyen lévő állatokra indítottuk.",
"You have unsaved changes, are you sure you want to leave this page?" : "Az oldalon el nem mentett változtatások vannak, biztosan el akarja hagyni ezt az oldalt?",
"You must supply a code." : "Meg kell adnia egy kódot. ",
"Young Adult" : "Fiatal felnőtt",
"Your CSV file should have a header row with field names ASM recognises." : "CSV-fájljának fejléc sorának az ASM által felismert mezőnevekkel kell rendelkeznie.",
"Your sheltermanager.com account is due to expire on {0}, please renew {1}" : "A sheltermanager.com fiókod {0} napján jár le, kérjük, újítd meg {1}",
"Zipcode" : "Irányítószám",
"Zipcode contains" : "Az irányítószám tartalmaz",
"[None]" : "[Egyik sem]",
"after connecting, chdir to" : "csatlakozás után, chdir to",
"and" : "és",
"are sent to" : "elküldve ",
"at" : "-kor",
"cm" : "cm",
"days" : "napok",
"estimate" : "becsült",
"filters: a:animal, p:person, wl:waitinglist, la:lostanimal, fa:foundanimal keywords: onshelter/os, notforadoption, aco, banned, donors, deceased, vets, retailers, staff, fosterers, volunteers, homecheckers, members, activelost, activefound" : "szűrők: a: állat, p: személy, wl: várólista, la: elveszett állat, fa: talált állat alapvető kulcsszavak: onshelter / os, notforadoption, aco, tiltott, adományozók, elhunyt, állatorvosok, kiskereskedők, személyzet, ideiglenes befogadók, önkéntesek, lelátogatók, tagok , aktív veszteség, aktív alap",
"inches" : "hüvelyk",
"invalid" : "érvénytelen",
"kg" : "kg",
"lb" : "font",
"less" : "kevesebb",
"mins" : "percek",
"months" : "hónapok",
"more" : "több",
"on" : "ekkor",
"or" : "vagy",
"or estimated age in years" : "vagy becsült kor években",
"oz" : "uncia",
"to" : "-ig",
"today" : "ma",
"treatments" : "kezelések",
"treatments, every" : "kezelések, összes",
"weekdays" : "hétköznapok",
"weeks" : "hetek",
"weeks after last contact." : "az utolsó kapcsolatfelvétel óta eltelt hetek",
"years" : "évek",
"yesterday" : "tegnap",
"{0} (under {1} months)" : "{0} ({1} hónap alatt)",
"{0} - {1} ({2} {3} aged {4})" : "{0} - {1} ({2} {3} {4} éves)",
"{0} - {1} {2}" : "{0} - {1} {2}",
"{0} - {1} {2} ({3}), contact {4} ({5}) - lost in {6}, postcode {7}, on {8}" : "{0} - {1} {2} ({3}), kapcsolattartó {4} ({5}) - elveszett a {6} -ben, {7} irányítószám, {8} -kor",
"{0} animals successfully updated." : "{0} állatok sikeresen frissítve",
"{0} cannot be blank" : "{0} nem lehet üres",
"{0} fine, paid" : "{0} eredmény, fizetve",
"{0} fine, unpaid" : "{0} eredmény, kifizetetlen",
"{0} incurred in costs" : "{0} felmerült költségek",
"{0} is running ({1}% complete)." : "A(z) {0} fut ({1} és # 37; kész).",
"{0} payment records created." : "{0} létrehozott fizetési nyilvántartások.",
"{0} received" : "{0} érkezett",
"{0} record(s) match the mail merge." : "{0} nyilvántartás(ok) megegyeznek a levélegyesítéssel.",
"{0} results." : "{0} találat",
"{0} rows affected." : "{0} sor érintett.",
"{0} selected" : "{0} kiválasztott ",
"{0} treatments every {1} days" : "{0} kezelés minden {1} napokon",
"{0} treatments every {1} months" : "{0} kezelés {1} havonta",
"{0} treatments every {1} weekdays" : "{0} kezelések minden {1} hétköznap",
"{0} treatments every {1} weeks" : "{0} kezelés minden {1} héten",
"{0} treatments every {1} years" : "{0} kezelés {1} évente",
"{0} {1} ({2} treatments)" : "{0} {1} ({2} kezelések)",
"{0} {1} aged {2}" : "{0} {1} {2} éves",
"{0} {1} {2} aged {3}" : "{0} {1} {2} {3} éves",
"{0} {1}: Moved from {2} to {3}" : "{0} {1}: {2} -ről {3} -re áthelyezve",
"{0} {1}: adopted by {2}" : "{0} {1}: örökbefogadva {2}által",
"{0} {1}: altered" : "{0} {1}: megváltoztatva",
"{0} {1}: available for adoption" : "{0} {1}: örökbefogadható",
"{0} {1}: died ({2})" : "{0} {1}: meghalt ({2})",
"{0} {1}: entered the shelter" : "{0} {1}: menhelyre érkezett",
"{0} {1}: escaped" : "{0} {1}: megszökött",
"{0} {1}: euthanised ({2})" : "{0} {1}: elaltatva ({2})",
"{0} {1}: fostered to {2}" : "{0} {1}: ideiglenesen befogadva {2} által",
"{0} {1}: held" : "{0} {1}: fenntartott",
"{0} {1}: microchipped" : "{0} {1}: chipezett",
"{0} {1}: not available for adoption" : "{0} {1}: nem örökbefogadható",
"{0} {1}: quarantined" : "{0} {1}: karanténba került",
"{0} {1}: received {2}" : "{0} {1}: érkezett {2}",
"{0} {1}: reclaimed by {2}" : "{0} {1}: visszakövetelve {2} által",
"{0} {1}: released" : "{0} {1}: szabadon engedve ",
"{0} {1}: reserved by {2}" : "{0} {1}: foglalva {2} által",
"{0} {1}: returned by {2}" : "{0} {1}: visszahozva {2} által",
"{0} {1}: sent to retailer {2}" : "{0} {1}: kiskereskedőhöz továbbítva {2}",
"{0} {1}: stolen" : "{0} {1}: ellopták",
"{0} {1}: tested positive for FIV" : "{0} {1}: FIV tesztje pozitív",
"{0} {1}: tested positive for FeLV" : "{0} {1}: FeLV tesztje pozitív",
"{0} {1}: tested positive for Heartworm" : "{0} {1}: szívféreg tesztje pozitív",
"{0} {1}: transferred to {2}" : "{0} {1}: átkerült a (z) {2} -be",
"{0}, Week {1}" : "{0}, hét {1}",
"{0}: Entered shelter {1}, Last changed on {2} by {3}. {4} {5} {6} aged {7}" : "{0}: Menhelyre került {1}, Utoljára {2}-on módosította {3}. {4} {5} {6} {7} éves",
"{0}: closed {1} ({2})" : "{0}: lezárva {1} ({2})",
"{0}: opened {1}" : "{0}: nyitva {1}",
"{0}: waiting list - {1}" : "{0}: várólista - {1}",
"{0}: {1} {2} - {3} {4}" : "{0}: {1} {2} - {3} {4}",
"{2}: found in {1}: {0}" : "{2}: megtalálható a {1} -ban: {0}",
"{2}: lost in {1}: {0}" : "{2}: elveszett a(z) {1} -ban: {0}",
"{plural0} animal as dead on arrival" : "{plural0} állat meghalt érkezéskor",
"{plural0} animal control call due for followup today" : "{plural0} állat kontroll hívás esedékes a mai napra",
"{plural0} animal died" : "{plural0} állat meghalt",
"{plural0} animal entered the shelter" : "{plural0} állat került a menhelyre",
"{plural0} animal has a hold ending today" : "{plural0} állat van foglalva a mai napig ",
"{plural0} animal has been on the shelter longer than {0} months" : "{plural0} Valamennyi állat, amely több mint {0} hónapja van a menhelyen.",
"{plural0} animal is not available for adoption" : "{plural0} állat nem örökbefogadható",
"{plural0} animal was adopted" : "{plural0} állatot fogadtak örökbe",
"{plural0} animal was euthanized" : "{plural0} állat elaltatva.",
"{plural0} animal was reclaimed by its owner" : "{plural0} állat visszakövetelve a tulajdonos által",
"{plural0} animal was transferred to another shelter" : "{plural0} átszállítva másik menhelyre ",
"{plural0} day." : "{plural0} nap.",
"{plural0} incomplete animal control call" : "{plural0} elvégzetlen állat kontroll hívás",
"{plural0} item of stock expires in the next month" : "{plural0} raktáron lévő termék a következő hónapban jár le",
"{plural0} item of stock has expired" : "{plural0} raktáron lévő lejárt termék",
"{plural0} medical treatment needs to be administered today" : "{plural0} gyógykezelést a mai napon kell elvégezni",
"{plural0} month." : "{plural0} hónap.",
"{plural0} new online form submission" : "{plural0} új online űrlap benyújtása",
"{plural0} person has an overdue payment" : "{plural0} személy késedelmes fizetéssel rendelkezik",
"{plural0} person with an active reservation has not been homechecked" : "{plural0} aktív foglalással rendelkező személy nem volt még lelátogatva",
"{plural0} potential match for a lost animal" : "{plural0} potenciális egyezés elveszett állat esetén",
"{plural0} recent publisher run had errors" : "{plural0} kiadói újabb futtatás hibákat tartalmazott",
"{plural0} reservation has been active over a week without adoption" : "{plural0} gazdijelölt megjelölés egy héten át aktív volt, de örökbefogadás nem történt",
"{plural0} result found in {1} seconds. Order: {2}" : "{plural0} eredmény létrehozása {1} másodperc alatt. Rendelés: {2}",
"{plural0} shelter animal has not been microchipped" : "{plural0} menhelyi állat nem lett chipezve",
"{plural0} shelter animal has people looking for them" : "{plural0} menhelyi állatok, amelyekre van érdeklődő",
"{plural0} test needs to be performed today" : "{plural0} tesztet kell a mai napon elvégezni",
"{plural0} transport does not have a driver assigned" : "{plural0} a transzporthoz nincs sofőr hozzárendelve",
"{plural0} trap is overdue for return" : "{plural0} csapdát nem hoztak vissza a megjelölt időben",
"{plural0} trial adoption has ended" : "{plural0} próbaidő lejárt ",
"{plural0} unaltered animal has been adopted in the last month" : "{plural0} az elmúlt hónapban örökbefogadott állatok",
"{plural0} undispatched animal control call" : "{plural0} el nem végzett állati kontroll hívás",
"{plural0} unpaid fine" : "{plural0} kifizetetlen bírság",
"{plural0} urgent entry on the waiting list" : "{plural0} sürgős bejegyzés a várólistán",
"{plural0} vaccination has expired" : "{plural0} lejárt oltás",
"{plural0} vaccination needs to be administered today" : "{plural0} ma esedékes oltások",
"{plural0} week." : "{plural0} hét.",
"{plural0} year." : "{plural0} év.",
"{plural1} animal control calls due for followup today" : "{plural1} mai nap esedékes állat kontroll hívások",
"{plural1} animals are not available for adoption" : "{plural1} nem örökbefogadható állat",
"{plural1} animals died" : "{plural1} meghalt állatok",
"{plural1} animals entered the shelter" : "{plural1} menhelyre került állat",
"{plural1} animals have been on the shelter longer than {0} months" : "{plural1} Valamennyi állat, amely több mint {0} hónapja van a menhelyen.",
"{plural1} animals have holds ending today" : "{plural1} gazdijelöltnek fenntartott kutya előjegyzése lejár",
"{plural1} animals were adopted" : "{plural1} örökbefogadott állat",
"{plural1} animals were dead on arrival" : "{plural1} beérkezéskor meghalt kutyák ",
"{plural1} animals were euthanized" : "{plural1} elaltatott állat",
"{plural1} animals were reclaimed by their owners" : "{plural1} eredeti gazda által visszakövetelet állat",
"{plural1} animals were transferred to other shelters" : "{plural1} állat átszállítása másik menhelyre",
"{plural1} days." : "{plural1} napok.",
"{plural1} incomplete animal control calls" : "{plural1} befejeztlen állat kontroll hívás",
"{plural1} items of stock expire in the next month" : "{plural1} raktáron lévő termék a jövő hónapban lejár",
"{plural1} items of stock have expired" : "{plural1} lejárt raktáron lévő termék",
"{plural1} medical treatments need to be administered today" : "{plural1} mai napon esedékes gyógykezelések",
"{plural1} months." : "{plural1} hónap.",
"{plural1} new online form submissions" : "{plural1} új online nyomtatvány benyújtása",
"{plural1} people have overdue payments" : "{plural1} személynek van kifizetetlen tartozása",
"{plural1} people with active reservations have not been homechecked" : "{plural1} gazdijelölt még nem lett lelátogatva",
"{plural1} potential matches for lost animals" : "{plural1} potenciális egyezések elveszett állatokra",
"{plural1} recent publisher runs had errors" : "{plural1} legutóbbi közzétételi futtatások hibásak voltak",
"{plural1} reservations have been active over a week without adoption" : "{plural1} gazdijelölt megjelölés egy héten át aktív volt, de örökbefogadás nem történt",
"{plural1} results found in {1} seconds. Order: {2}" : "{plural1} keresési találat {1} másodperc alatt. Utasítás: {2}",
"{plural1} shelter animals have not been microchipped" : "{plural1} mikrochippel nem rendelkező állat",
"{plural1} shelter animals have people looking for them" : "{plural1} menhelyi állatok, amelyekre van érdeklődő",
"{plural1} tests need to be performed today" : "{plural1} mai napon elvégzendő teszt",
"{plural1} transports do not have a driver assigned" : "{plural1} nincs sofőr hozzárendelve a szállításokhoz ",
"{plural1} traps are overdue for return" : "{plural1} csapdát nem hoztak vissza a megjelölt időben",
"{plural1} trial adoptions have ended" : "{plural1} próbaidő lejárt ",
"{plural1} unaltered animals have been adopted in the last month" : "{plural1} az elmúlt hónapban örökbefogadott állat",
"{plural1} undispatched animal control calls" : "{plural1} el nem végzett állat kontroll hívások",
"{plural1} unpaid fines" : "{plural1} kifizetetlen büntetések",
"{plural1} urgent entries on the waiting list" : "{plural1} sürgős feljegyzések a várólistán",
"{plural1} vaccinations have expired" : "{plural1} oltás lejárt",
"{plural1} vaccinations need to be administered today" : "{plural1} mai napon esedékes oltás ",
"{plural1} weeks." : "{plural1} hetek",
"{plural1} years." : "{plural1} évek.",
"{plural2} animal control calls due for followup today" : "{plural2} mai nap esedékes állat kontroll hívások",
"{plural2} animals are not available for adoption" : "{plural2} nem örökbefogadható állat",
"{plural2} animals died" : "{plural2} meghalt állat",
"{plural2} animals entered the shelter" : "{plural2} menhelyre került állat",
"{plural2} animals have been on the shelter longer than {0} months" : "{plural2} Valamennyi állat, amely több mint {0} hónapja van a menhelyen",
"{plural2} animals have holds ending today" : "{plural2} gazdijelöltnek fenntartott kutya előjegyzése lejár",
"{plural2} animals were adopted" : "{plural2} örökbefogadott állat",
"{plural2} animals were dead on arrival" : "{plural2} bekerüléskor halott állat",
"{plural2} animals were euthanized" : "{plural2} elaltatott állat",
"{plural2} animals were reclaimed by their owners" : "{plural2} eredeti gazda álltal visszakövetelt állat ",
"{plural2} animals were transferred to other shelters" : "{plural2} más menhelyre átszállított állat",
"{plural2} days." : "{plural2} napok.",
"{plural2} incomplete animal control calls" : "{plural2} befejezetlen állat kontroll hívás",
"{plural2} items of stock expire in the next month" : "{plural2} raktáron lévő termék a követekző hónapban lejár",
"{plural2} items of stock have expired" : "{plural2} raktáron lévő termék szavatossága lejárt",
"{plural2} medical treatments need to be administered today" : "{plural2} mai napon esedékes gyógykezelések",
"{plural2} months." : "{plural2} hónapok.",
"{plural2} new online form submissions" : "{plural2} új online nyomtatvány benyújtása",
"{plural2} people have overdue payments" : "{plural2} személynek van kifizetetlen tartozása ",
"{plural2} people with active reservations have not been homechecked" : "{plural2} gazdijelölt még nem lett lelátogatva",
"{plural2} potential matches for lost animals" : "{plural2} potenciális egyezések elveszett állatokra",
"{plural2} recent publisher runs had errors" : "{plural2} legutóbbi közzétételi futtatások hibásak voltak",
"{plural2} reservations have been active over a week without adoption" : "{plural2} gazdijelölt megjelölés egy héten át aktív volt, de örökbefogadás nem történt",
"{plural2} results found in {1} seconds. Order: {2}" : " {plural2} keresési találat {1} másodperc alatt. Utasítás: {2}",
"{plural2} shelter animals have not been microchipped" : "{plural2} mikrochippel nem rendelkező állat",
"{plural2} shelter animals have people looking for them" : "{plural2} menhelyi állatok, amelyekre van érdeklődő",
"{plural2} tests need to be performed today" : "{plural2} mai napon elvégzendő teszt",
"{plural2} transports do not have a driver assigned" : "{plural2} transzporthoz nincs hozzárendelve sofőr",
"{plural2} traps are overdue for return" : "{plural2} csapdát nem hoztak vissza a megjelölt időben",
"{plural2} trial adoptions have ended" : "{plural2} próbaidő vége",
"{plural2} unaltered animals have been adopted in the last month" : "{plural2} az elmúlt hónapban örökbefogadott állat",
"{plural2} undispatched animal control calls" : "{plural2} el nem végzett állat kontroll hívások",
"{plural2} unpaid fines" : "{plural2} kifizetetlen büntetések",
"{plural2} urgent entries on the waiting list" : "{plural2} sürgős feljegyzések a várólistán",
"{plural2} vaccinations have expired" : "{plural2} lejárt oltások.",
"{plural2} vaccinations need to be administered today" : "{plural2} mai nap beadandó oltások. ",
"{plural2} weeks." : "{plural2} hetek.",
"{plural2} years." : "{plural2} évek.",
"{plural3} animal control calls due for followup today" : "{plural3} mai nap elvégzendő állat ellenőrző hívások ",
"{plural3} animals are not available for adoption" : "{plural3} nem orokbefogadható állatok",
"{plural3} animals died" : "{plural3} meghalt állatok",
"{plural3} animals entered the shelter" : "{plural3} menhelyre ékrezett állatok",
"{plural3} animals have been on the shelter longer than {0} months" : "{plural3} több mint {0} hónapja a menhelyen lévő állatok",
"{plural3} animals have holds ending today" : "{plural3} mai nappal lejáró állat előjegyzések.",
"{plural3} animals were adopted" : "{plural3} orokbefogadott állatok",
"{plural3} animals were dead on arrival" : "{plural3} halva érkezett állatok",
"{plural3} animals were euthanized" : "{plural3} elaltatott állatok",
"{plural3} animals were reclaimed by their owners" : "{plural3} eredeti gazdájuk által visszakövetelt állatok",
"{plural3} animals were transferred to other shelters" : "{plural3} állatok átszállítása más menhelyekre",
"{plural3} days." : "{plural3} napok.",
"{plural3} incomplete animal control calls" : "{plural3} befejezetlen állat ellenőrző hívások",
"{plural3} items of stock expire in the next month" : "{plural3} a raktárkészletek a jövő hónapban lejárnak",
"{plural3} items of stock have expired" : "{plural3} a raktárkészletek lejártak",
"{plural3} medical treatments need to be administered today" : "{plural3} mai napra esedékes elvégzendő gyógykezelések",
"{plural3} months." : "{plural3} hónapok",
"{plural3} new online form submissions" : "{plural3} új online nyomtatvány beadása",
"{plural3} people have overdue payments" : "{plural3} személynek lejárt fizetési kötelezettsége áll fenn",
"{plural3} people with active reservations have not been homechecked" : "{plural3} aktív foglalással rendelkező személyek még nem lettek lelátogatva.",
"{plural3} potential matches for lost animals" : "{plural3} potenciális egyezések elveszett állatokra",
"{plural3} recent publisher runs had errors" : "{plural3} legutóbbi közzétételek hibásak voltak",
"{plural3} reservations have been active over a week without adoption" : "{plural3} A foglalások több mint egy hetesek, de az örökbefogadás még nem történt meg. ",
"{plural3} results found in {1} seconds. Order: {2}" : "{plural3} eredmény találat {1} másodperc. Utasítás {2}",
"{plural3} shelter animals have not been microchipped" : "{plural3} menhelyi állatok, amelyek nincsenek chipezve",
"{plural3} shelter animals have people looking for them" : "{plural3} menhelyi állatok, amelyekre van érdeklődő",
"{plural3} tests need to be performed today" : "{plural3} mai nappal elvégzendő tesztek. ",
"{plural3} transports do not have a driver assigned" : "{plural3} a transzporthoz nincs sofőr hozzárendelve.",
"{plural3} traps are overdue for return" : "{plural3} a csapdák vissza hozatalának határideje lejárt",
"{plural3} trial adoptions have ended" : "{plural3} a próbaidő lejárt.",
"{plural3} unaltered animals have been adopted in the last month" : "{plural3} az elmúlt hónapban örökbefogadott állat",
"{plural3} undispatched animal control calls" : "{plural3} el nem végzett állat ellenőrző hívások",
"{plural3} unpaid fines" : "{plural3} kifizetetlen díjak.",
"{plural3} urgent entries on the waiting list" : "{plural3} sürgős esetek a várólistán.",
"{plural3} vaccinations have expired" : "{plural3} oltások lejártak.",
"{plural3} vaccinations need to be administered today" : "{plural3} ma beadandó oltások. ",
"{plural3} weeks." : "{plural3} hetek."
}
|
bobintetley/asm3
|
src/asm3/locales/locale_hu.py
|
Python
|
gpl-3.0
| 197,966
|
[
"Amber",
"VisIt"
] |
47306a87ff22b64a1ec644611a52ec3cdc3580616c0392b980ff8a9ff8951f28
|
from uaperrors import StepError
import sys
import os
from logging import getLogger
from abstract_step import AbstractStep
logger = getLogger('uap_logger')
class Macs2(AbstractStep):
'''
Model-based Analysis of ChIP-Seq (MACS) is a algorithm, for the identifcation
of transcript factor binding sites. MACS captures the influence of genome
complexity to evaluate the significance of enriched ChIP regions, and MACS
improves the spatial resolution of binding sites through combining the
information of both sequencing tag position and orientation. MACS can be
easily used for ChIP-Seq data alone, or with control sample data to increase
the specificity.
https://github.com/taoliu/MACS
typical command line for single-end data::
macs2 callpeak --treatment <aligned-reads> [--control <aligned-reads>]
--name <run-id> --gsize 2.7e9
'''
def __init__(self, pipeline):
super(Macs2, self).__init__(pipeline)
self.set_cores(4)
self.add_connection('in/alignments')
self.add_connection(
'out/log',
optional = True)
self.add_connection(
'out/diagnosis',
optional = True)
self.add_connection(
'out/model',
optional = True)
# Narrow peak information
self.add_connection(
'out/narrowpeaks',
optional = True)
self.add_connection(
'out/narrowpeaks-xls',
optional = True)
self.add_connection(
'out/summits',
optional = True)
# Broad peak information
self.add_connection(
'out/broadpeaks',
optional = True)
self.add_connection(
'out/broadpeaks-xls',
optional = True)
self.add_connection(
'out/gappedpeaks',
optional = True)
# Step was tested for macs2 release 2.1.1.20160309
self.require_tool('macs2')
# Step was tested for mkdir (GNU coreutils) release 8.25
self.require_tool('mkdir')
# Step was tested for mv (GNU coreutils) release 8.25
self.require_tool('mv')
# Options for MACS2 callpeak subcommand
# Input file arguments:
self.add_option('control', dict, optional=False,
description="Defines the controls and correspondent "
"treatments in a YAML hash. Hash keys are the run IDs "
"of the control datasets and hash values are the run "
"IDs of the treatment datasets.")
self.add_option(
'format',
str,
optional = True,
default='AUTO',
choices=[
'AUTO',
'ELAND',
'ELANDMULTI',
'ELANDMULTIPET',
'ELANDEXPORT',
'BED',
'BEDPE',
'SAM',
'BAM',
'BAMPE',
'BOWTIE'],
description="Format of tag file, can be 'ELAND', "
"'BED', 'ELANDMULTI', 'ELANDEXPORT', 'ELANDMULTIPET' "
"(for pair-end tags), 'SAM', 'BAM', 'BOWTIE', 'BAMPE' "
"or 'BEDPE'. Default is 'AUTO' which will allow MACS "
"to decide the format automatically. 'AUTO' is also "
"useful when you combine different formats of files. "
"Note that MACS can't detect 'BAMPE' or 'BEDPE' format "
"with 'AUTO', and you have to implicitly specify the "
"format for 'BAMPE' and 'BEDPE'. For more information "
"about the formats see https://github.com/taoliu/MACS/")
self.add_option(
'gsize',
str,
optional = True,
default='2.7e9',
description="PLEASE assign this parameter to fit "
"your needs! It's the mappable genome size or effective "
"genome size which is defined as the genome size which "
"can be sequenced. Because of the repetitive features "
"on the chromsomes, the actual mappable genome size "
"will be smaller than the original size, about 90% or "
"70% of the genome size. The default hs -- 2.7e9 is "
"recommended for UCSC human hg18 assembly. Here are "
"all precompiled parameters for effective genome size: "
"hs:2.7e9; mm:1.87e9; ce:9e7; dm:1.2e8")
self.add_option(
'tsize',
int,
optional=True,
description="The size of sequencing tags. If you "
"don't specify it, MACS will try to use the first 10 "
"sequences from your input treatment file to determine "
"the tag size. Specifying it will override the "
"automatically determined tag size.")
self.add_option(
'bw',
int,
optional=True,
description="The band width which is used to scan "
"the genome ONLY for model building. You can set this "
"parameter as the sonication fragment size expected "
"from wet experiment. The previous side effect on the "
"peak detection process has been removed. So this "
"parameter only affects the model building.")
self.add_option(
'qvalue',
float,
optional=True,
description="The qvalue (minimum FDR) cutoff to call "
"significant regions. Default is 0.05. For broad marks, "
"you can try 0.05 as cutoff. Q-values are calculated "
"from p-values using Benjamini-Hochberg procedure.")
self.add_option(
'pvalue',
float,
optional=True,
description="The pvalue cutoff. If 'pvalue' is "
"specified, MACS2 will use pvalue instead of qvalue.")
self.add_option(
'mfold',
str,
optional=True,
description="This parameter is used to select the "
"regions within MFOLD range of high-confidence "
"enrichment ratio against background to build model. "
"The regions must be lower than upper limit, and higher "
"than the lower limit of fold enrichment. DEFAULT:5,50 "
"means using all regions not too low (>5) and not too "
"high (<50) to build paired-peaks model. If MACS can "
"not find more than 100 regions to build model, it will "
"use the --extsize parameter to continue the peak "
"detection ONLY if --fix-bimodal is set.")
self.add_option(
'nolambda',
bool,
optional=True,
description="With this flag on, MACS will use the "
"background lambda as local lambda. This means MACS "
"will not consider the local bias at peak candidate "
"regions.")
self.add_option(
'slocal',
str,
optional=True,
description="'slocal' and 'llocal' control which two "
"levels of regions will be checked around the peak "
"regions to calculate the maximum lambda as local "
"lambda. By default, MACS considers 1000bp for small "
"local region(--slocal), and 10000bps for large local "
"region(--llocal) which captures the bias from a long "
"range effect like an open chromatin domain. You can "
"tweak these according to your project. Remember that "
"if the region is set too small, a sharp spike in the "
"input data may kill the significant peak.")
self.add_option(
'llocal',
str,
optional=True,
description="'slocal' and 'llocal' control which two "
"levels of regions will be checked around the peak "
"regions to calculate the maximum lambda as local "
"lambda. By default, MACS considers 1000bp for small "
"local region(--slocal), and 10000bps for large local "
"region(--llocal) which captures the bias from a long "
"range effect like an open chromatin domain. You can "
"tweak these according to your project. Remember that "
"if the region is set too small, a sharp spike in the "
"input data may kill the significant peak.")
self.add_option(
'fix-bimodal',
bool,
optional=True,
description="Whether turn on the auto paired-peak "
"model process. If it's set, when MACS failed to build "
"paired model, it will use the nomodel settings, the "
"'--extsize' parameter to extend each tags. If set, "
"MACS will be terminated if paried-peak model is "
"failed.")
self.add_option(
'nomodel',
bool,
optional=True,
description="While on, MACS will bypass building the "
"shifting model.")
self.add_option(
'extsize',
int,
optional=True,
description="While '--nomodel' is set, MACS uses this "
"parameter to extend reads in 5'->3' direction to "
"fix-sized fragments. For example, if the size of "
"binding region for your transcription factor is 200 "
"bp, and you want to bypass the model building by MACS, "
"this parameter can be set as 200. This option is only "
"valid when --nomodel is set or when MACS fails to "
"build model and --fix-bimodal is on.")
self.add_option(
'shift',
int,
optional=True,
decsription="Note, this is NOT the legacy --shiftsize "
"option which is replaced by --extsize! You can set an "
"arbitrary shift in bp here. Please Use discretion "
"while setting it other than default value (0). When "
"--nomodel is set, MACS will use this value to move "
"cutting ends (5') then apply --extsize from 5' to 3' "
"direction to extend them to fragments. When this value "
"is negative, ends will be moved toward 3'->5' "
"direction, otherwise 5'->3' direction. Recommended to "
"keep it as default 0 for ChIP-Seq datasets, or -1 * "
"half of EXTSIZE together with --extsize option for "
"detecting enriched cutting loci such as certain "
"DNAseI-Seq datasets. Note, you can't set values other "
"than 0 if format is BAMPE or BEDPE for paired-end "
"data. Default is 0. "
"Here are some examples for combining --shift and "
"--extsize: "
"1. To find enriched cutting sites such as some "
"DNAse-Seq datasets. In this case, all 5' ends of "
"sequenced reads should be extended in both direction "
"to smooth the pileup signals. If the wanted smoothing "
"window is 200bps, then use '--nomodel --shift -100 "
"--extsize 200'. "
"2. For certain nucleosome-seq data, we need to pileup "
"the centers of nucleosomes using a half-nucleosome "
"size for wavelet analysis (e.g. NPS algorithm). Since "
"the DNA wrapped on nucleosome is about 147bps, this "
"option can be used: '--nomodel --shift 37 --extsize "
"73'.")
self.add_option(
'keep-dup',
int,
optional=True,
description="It controls the MACS behavior towards "
"duplicate tags at the exact same location -- the same "
"coordination and the same strand. The default 'auto' "
"option makes MACS calculate the maximum tags at the "
"exact same location based on binomal distribution "
"using 1e-5 as pvalue cutoff; and the 'all' option "
"keeps every tags. If an integer is given, at most this "
"number of tags will be kept at the same location. The "
"default is to keep one tag at the same location. "
"Default: 1")
self.add_option(
'broad',
bool,
optional=True,
default=False,
description="When this flag is on, MACS will try to "
"composite broad regions in BED12 ( a gene-model-like "
"format ) by putting nearby highly enriched regions "
"into a broad region with loose cutoff. The broad "
"region is controlled by another cutoff through "
"--broad-cutoff. The maximum length of broad region "
"length is 4 times of d from MACS. DEFAULT: False")
# use "broad-cutoff" only in conjuction with "broad"
self.add_option(
'broad-cutoff',
float,
optional=True,
description="Cutoff for broad region. This option "
"is not available unless --broad is set. If -p is set, "
"this is a pvalue cutoff, otherwise, it's a qvalue "
"cutoff. DEFAULT: 0.1")
self.add_option(
'to-large',
bool,
optional=True,
description="When set, linearly scale the smaller "
"dataset to the same depth as larger dataset, by "
"default, the larger dataset will be scaled towards "
"the smaller dataset. Beware, to scale up small data "
"would cause more false positives.")
self.add_option(
'down-sample',
bool,
optional=True,
description="When set, random sampling method will "
"scale down the bigger sample. By default, MACS uses "
"linear scaling. This option will make the results "
"unstable and irreproducible since each time, random "
"reads would be selected, especially the numbers "
"(pileup, pvalue, qvalue) would change. Consider to "
"use 'randsample' script before MACS2 runs instead.")
self.add_option(
'bdg',
bool,
optional=True,
description="If this flag is on, MACS will store the "
"fragment pileup, control lambda, -log10pvalue and "
"-log10qvalue scores in bedGraph files. The bedGraph "
"files will be stored in current directory named "
"NAME+'_treat_pileup.bdg' for treatment data, "
"NAME+'_control_lambda.bdg' for local lambda values "
"from control, NAME+'_treat_pvalue.bdg' for Poisson "
"pvalue scores (in -log10(pvalue) form), and "
"NAME+'_treat_qvalue.bdg' for q-value scores from "
"Benjamini-Hochberg-Yekutieli procedure "
"<http://en.wikipedia.org/wiki/False_discovery_rate#Dependent_tests>")
self.add_option(
'call-summits',
bool,
optional=True,
description="MACS will now reanalyze the shape of "
"signal profile (p or q-score depending on cutoff "
"setting) to deconvolve subpeaks within each peak "
"called from general procedure. It's highly recommended "
"to detect adjacent binding events. While used, the "
"output subpeaks of a big peak region will have the "
"same peak boundaries, and different scores and peak "
"summit positions.")
self.add_option(
'verbose',
int,
default=0,
choices=[0, 1, 2, 3],
optional=True,
description="If you don't want to see any message "
"during the running of MACS, set it to 0. But the "
"CRITICAL messages will never be hidden. If you want "
"to see rich information like how many peaks are "
"called for every chromosome, you can set it to 3 or "
"larger than 3.")
# LEGACY options
self.add_option(
'buffer-size',
int,
optional=True,
description="LEGACY option.")
self.add_option(
'read-length',
int,
optional=True,
description="LEGACY option.")
def runs(self, run_ids_connections_files):
# Compile the list of options
options = ['format', 'gsize', 'tsize', 'bw', 'qvalue', 'pvalue',
'mfold', 'nolambda', 'slocal', 'llocal', 'fix-bimodal',
'nomodel', 'extsize', 'shift', 'keep-dup', 'broad',
'broad-cutoff', 'to-large', 'down-sample', 'bdg',
'call-summits', 'verbose',
# LEGACY options
'buffer-size', 'read-length']
set_options = [option for option in options if
self.is_option_set_in_config(option)]
option_list = list()
for option in set_options:
if isinstance(self.get_option(option), bool):
if self.get_option(option):
option_list.append('--%s' % option)
else:
option_list.append('--%s' % option)
option_list.append(str(self.get_option(option)))
control_samples = self.get_option('control')
for control_id, treatment_list in control_samples.items():
# Check for existence of control files
control_files = list()
if control_id != 'None':
try:
control_files = run_ids_connections_files[control_id]['in/alignments']
control_id = "-" + control_id
except KeyError:
raise StepError(
self, "No control for ID '%s' found." %
control_id)
else:
control_id = ""
# Check for existence of treatment files
for tr in treatment_list:
treatments = dict()
try:
treatments[tr] = run_ids_connections_files[tr]['in/alignments']
except KeyError:
raise StepError(
self,
"No treatment for ID '%s' found." %
tr)
# Assemble rund ID
run_id = "%s%s" % (tr, control_id)
# Create list of input files
input_paths = [f for l in [treatments[tr], control_files]
for f in l]
with self.declare_run(run_id) as run:
# Create empty output connections depending on ...
result_files = dict()
if not self.is_option_set_in_config('nomodel'):
result_files["%s_model.r" % run_id] = run.add_output_file(
'model', '%s-macs2-model.r' % run_id, input_paths)
if not self.get_option('broad'):
# Result files for narrow peaks
narrow_peak = "%s_peaks.narrowPeak" % run_id
result_files[narrow_peak] = run.add_output_file(
'narrowpeaks',
'%s-macs2-narrowPeaks.narrowPeak' % run_id,
input_paths
)
narrow_peak_xls = "%s_peaks.xls" % run_id
result_files[narrow_peak_xls] = run.add_output_file(
'narrowpeaks-xls',
'%s-macs2-narrowPeaks.xls' % run_id,
input_paths
)
summits = "%s_summits.bed" % run_id
result_files[summits] = run.add_output_file(
'summits',
'%s-macs2-summits.bed' % run_id,
input_paths
)
else:
# Files which are created by using --broad
broad_peak = "%s_peaks.broadPeak" % run_id
result_files[broad_peak] = run.add_output_file(
'broadpeaks',
'%s-macs2_broadPeaks.broadPeak' % run_id,
input_paths
)
broad_peak_xls = "%s_peaks.xls" % run_id
result_files[broad_peak_xls] = run.add_output_file(
'broadpeaks-xls',
'%s-macs2-broadPeaks.xls' % run_id,
input_paths
)
gapped_peak = "%s_peaks.gappedPeak" % run_id
result_files[gapped_peak] = run.add_output_file(
'gappedpeaks',
'%s-macs2_peaks.gappedPeak' % run_id,
input_paths
)
# Let's compile our commands
temp_dir = str
with run.new_exec_group() as macs2_exec_group:
# 1. Create temporary directory for MACS output
temp_dir = run.add_temporary_directory('macs2-out')
mkdir = [self.get_tool('mkdir'), temp_dir]
macs2_exec_group.add_command(mkdir)
# 2. MACS2 command
macs2 = [self.get_tool('macs2'), 'callpeak']
macs2.append('--treatment')
macs2.extend(treatments[tr])
# Append control information
if control_files:
macs2.append('--control')
macs2.extend(control_files)
# Append known info (--name, --directory)
macs2.extend([
'--name', run_id,
'--outdir', temp_dir
])
macs2.extend(option_list)
macs2_exec_group.add_command(macs2)
with run.new_exec_group() as mv_exec_group:
for orig, dest_path in result_files.items():
# 3. Move file from temp directory to expected
# position
orig_path = os.path.join(temp_dir, orig)
mv = [self.get_tool('mv'), orig_path, dest_path]
mv_exec_group.add_command(mv)
|
kmpf/uap
|
include/steps/macs2.py
|
Python
|
gpl-3.0
| 22,914
|
[
"Bowtie"
] |
d6c52a6b99b90ae4da20ad1f250fa1cd1b9ecadf6074269c348e4d3641b502d6
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=consider-iterating-dictionary
# pylint: disable=super-init-not-called
"""Text token embeddings."""
from __future__ import absolute_import
from __future__ import print_function
import io
import logging
import os
import tarfile
import warnings
import zipfile
from . import _constants as C
from . import vocab
from ... import ndarray as nd
from ... import registry
def register(embedding_cls):
"""Registers a new token embedding.
Once an embedding is registered, we can create an instance of this embedding with
:func:`~mxnet.contrib.text.embedding.create`.
Examples
--------
>>> @mxnet.contrib.text.embedding.register
... class MyTextEmbed(mxnet.contrib.text.embedding._TokenEmbedding):
... def __init__(self, pretrained_file_name='my_pretrain_file'):
... pass
>>> embed = mxnet.contrib.text.embedding.create('MyTokenEmbed')
>>> print(type(embed))
<class '__main__.MyTokenEmbed'>
"""
register_text_embedding = registry.get_register_func(_TokenEmbedding, 'token embedding')
return register_text_embedding(embedding_cls)
def create(embedding_name, **kwargs):
"""Creates an instance of token embedding.
Creates a token embedding instance by loading embedding vectors from an externally hosted
pre-trained token embedding file, such as those of GloVe and FastText. To get all the valid
`embedding_name` and `pretrained_file_name`, use
`mxnet.contrib.text.embedding.get_pretrained_file_names()`.
Parameters
----------
embedding_name : str
The token embedding name (case-insensitive).
Returns
-------
An instance of `mxnet.contrib.text.glossary._TokenEmbedding`:
A token embedding instance that loads embedding vectors from an externally hosted
pre-trained token embedding file.
"""
create_text_embedding = registry.get_create_func(_TokenEmbedding, 'token embedding')
return create_text_embedding(embedding_name, **kwargs)
def get_pretrained_file_names(embedding_name=None):
"""Get valid token embedding names and their pre-trained file names.
To load token embedding vectors from an externally hosted pre-trained token embedding file,
such as those of GloVe and FastText, one should use
`mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`.
This method returns all the valid names of `pretrained_file_name` for the specified
`embedding_name`. If `embedding_name` is set to None, this method returns all the valid
names of `embedding_name` with their associated `pretrained_file_name`.
Parameters
----------
embedding_name : str or None, default None
The pre-trained token embedding name.
Returns
-------
dict or list:
A list of all the valid pre-trained token embedding file names (`pretrained_file_name`)
for the specified token embedding name (`embedding_name`). If the text embeding name is
set to None, returns a dict mapping each valid token embedding name to a list of valid
pre-trained files (`pretrained_file_name`). They can be plugged into
`mxnet.contrib.text.embedding.create(embedding_name,
pretrained_file_name)`.
"""
text_embedding_reg = registry.get_registry(_TokenEmbedding)
if embedding_name is not None:
if embedding_name not in text_embedding_reg:
raise KeyError('Cannot find `embedding_name` %s. Use '
'`get_pretrained_file_names('
'embedding_name=None).keys()` to get all the valid embedding '
'names.' % embedding_name)
return list(text_embedding_reg[embedding_name].pretrained_file_name_sha1.keys())
else:
return {embedding_name: list(embedding_cls.pretrained_file_name_sha1.keys())
for embedding_name, embedding_cls in registry.get_registry(_TokenEmbedding).items()}
class _TokenEmbedding(vocab.Vocabulary):
"""Token embedding base class.
To load token embeddings from an externally hosted pre-trained token embedding file, such as
those of GloVe and FastText, use
:func:`~mxnet.contrib.text.embedding.create(embedding_name, pretrained_file_name)`.
To get all the available `embedding_name` and `pretrained_file_name`, use
:func:`~mxnet.contrib.text.embedding.get_pretrained_file_names()`.
Alternatively, to load embedding vectors from a custom pre-trained token embedding file, use
:class:`~mxnet.contrib.text.embedding.CustomEmbedding`.
Moreover, to load composite embedding vectors, such as to concatenate embedding vectors, use
:class:`~mxnet.contrib.text.embedding.CompositeEmbedding`.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
token embedding vector initialized by `init_unknown_vec`.
If a token is encountered multiple times in the pre-trained token embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
The indexed tokens in a text token embedding may come from a vocabulary or from the loaded
embedding vectors. In the former case, only the indexed tokens in a vocabulary are associated
with the loaded embedding vectors, such as loaded from a pre-trained token embedding file. In
the later case, all the tokens from the loaded embedding vectors, such as loaded from a
pre-trained token embedding file, are taken as the indexed tokens of the embedding.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
def __init__(self, **kwargs):
super(_TokenEmbedding, self).__init__(**kwargs)
@classmethod
def _get_download_file_name(cls, pretrained_file_name):
return pretrained_file_name
@classmethod
def _get_pretrained_file_url(cls, pretrained_file_name):
repo_url = os.environ.get('MXNET_GLUON_REPO', C.APACHE_REPO_URL)
embedding_cls = cls.__name__.lower()
url_format = '{repo_url}gluon/embeddings/{cls}/{file_name}'
return url_format.format(repo_url=repo_url, cls=embedding_cls,
file_name=cls._get_download_file_name(pretrained_file_name))
@classmethod
def _get_pretrained_file(cls, embedding_root, pretrained_file_name):
from ...gluon.utils import check_sha1, download
embedding_cls = cls.__name__.lower()
embedding_root = os.path.expanduser(embedding_root)
url = cls._get_pretrained_file_url(pretrained_file_name)
embedding_dir = os.path.join(embedding_root, embedding_cls)
pretrained_file_path = os.path.join(embedding_dir, pretrained_file_name)
downloaded_file = os.path.basename(url)
downloaded_file_path = os.path.join(embedding_dir, downloaded_file)
expected_file_hash = cls.pretrained_file_name_sha1[pretrained_file_name]
if hasattr(cls, 'pretrained_archive_name_sha1'):
expected_downloaded_hash = \
cls.pretrained_archive_name_sha1[downloaded_file]
else:
expected_downloaded_hash = expected_file_hash
if not os.path.exists(pretrained_file_path) \
or not check_sha1(pretrained_file_path, expected_file_hash):
download(url, downloaded_file_path, sha1_hash=expected_downloaded_hash)
ext = os.path.splitext(downloaded_file)[1]
if ext == '.zip':
with zipfile.ZipFile(downloaded_file_path, 'r') as zf:
zf.extractall(embedding_dir)
elif ext == '.gz':
with tarfile.open(downloaded_file_path, 'r:gz') as tar:
tar.extractall(path=embedding_dir)
return pretrained_file_path
def _load_embedding(self, pretrained_file_path, elem_delim, init_unknown_vec, encoding='utf8'):
"""Load embedding vectors from the pre-trained token embedding file.
For every unknown token, if its representation `self.unknown_token` is encountered in the
pre-trained token embedding file, index 0 of `self.idx_to_vec` maps to the pre-trained token
embedding vector loaded from the file; otherwise, index 0 of `self.idx_to_vec` maps to the
text embedding vector initialized by `init_unknown_vec`.
If a token is encountered multiple times in the pre-trained text embedding file, only the
first-encountered token embedding vector will be loaded and the rest will be skipped.
"""
pretrained_file_path = os.path.expanduser(pretrained_file_path)
if not os.path.isfile(pretrained_file_path):
raise ValueError('`pretrained_file_path` must be a valid path to '
'the pre-trained token embedding file.')
logging.info('Loading pre-trained token embedding vectors from %s', pretrained_file_path)
vec_len = None
all_elems = []
tokens = set()
loaded_unknown_vec = None
line_num = 0
with io.open(pretrained_file_path, 'r', encoding=encoding) as f:
for line in f:
line_num += 1
elems = line.rstrip().split(elem_delim)
assert len(elems) > 1, 'At line %d of the pre-trained text embedding file: the ' \
'data format of the pre-trained token embedding file %s ' \
'is unexpected.' % (line_num, pretrained_file_path)
token, elems = elems[0], [float(i) for i in elems[1:]]
if token == self.unknown_token and loaded_unknown_vec is None:
loaded_unknown_vec = elems
tokens.add(self.unknown_token)
elif token in tokens:
warnings.warn('At line %d of the pre-trained token embedding file: the '
'embedding vector for token %s has been loaded and a duplicate '
'embedding for the same token is seen and skipped.' %
(line_num, token))
elif len(elems) == 1:
warnings.warn('At line %d of the pre-trained text embedding file: token %s '
'with 1-dimensional vector %s is likely a header and is '
'skipped.' % (line_num, token, elems))
else:
if vec_len is None:
vec_len = len(elems)
# Reserve a vector slot for the unknown token at the very beggining because
# the unknown index is 0.
all_elems.extend([0] * vec_len)
else:
assert len(elems) == vec_len, \
'At line %d of the pre-trained token embedding file: the dimension ' \
'of token %s is %d but the dimension of previous tokens is %d. ' \
'Dimensions of all the tokens must be the same.' \
% (line_num, token, len(elems), vec_len)
all_elems.extend(elems)
self._idx_to_token.append(token)
self._token_to_idx[token] = len(self._idx_to_token) - 1
tokens.add(token)
self._vec_len = vec_len
self._idx_to_vec = nd.array(all_elems).reshape((-1, self.vec_len))
if loaded_unknown_vec is None:
self._idx_to_vec[C.UNKNOWN_IDX] = init_unknown_vec(shape=self.vec_len)
else:
self._idx_to_vec[C.UNKNOWN_IDX] = nd.array(loaded_unknown_vec)
def _index_tokens_from_vocabulary(self, vocabulary):
self._token_to_idx = vocabulary.token_to_idx.copy() \
if vocabulary.token_to_idx is not None else None
self._idx_to_token = vocabulary.idx_to_token[:] \
if vocabulary.idx_to_token is not None else None
self._unknown_token = vocabulary.unknown_token
self._reserved_tokens = vocabulary.reserved_tokens[:] \
if vocabulary.reserved_tokens is not None else None
def _set_idx_to_vec_by_embeddings(self, token_embeddings, vocab_len, vocab_idx_to_token):
"""Sets the mapping between token indices and token embedding vectors.
Parameters
----------
token_embeddings : instance or list `mxnet.contrib.text.embedding._TokenEmbedding`
One or multiple pre-trained token embeddings to load. If it is a list of multiple
embeddings, these embedding vectors will be concatenated for each token.
vocab_len : int
Length of vocabulary whose tokens are indexed in the token embedding.
vocab_idx_to_token: list of str
A list of indexed tokens in the vocabulary. These tokens are indexed in the token
embedding.
"""
new_vec_len = sum(embed.vec_len for embed in token_embeddings)
new_idx_to_vec = nd.zeros(shape=(vocab_len, new_vec_len))
col_start = 0
# Concatenate all the embedding vectors in token_embeddings.
for embed in token_embeddings:
col_end = col_start + embed.vec_len
# Cancatenate vectors of the unknown token.
new_idx_to_vec[0, col_start:col_end] = embed.idx_to_vec[0]
new_idx_to_vec[1:, col_start:col_end] = embed.get_vecs_by_tokens(vocab_idx_to_token[1:])
col_start = col_end
self._vec_len = new_vec_len
self._idx_to_vec = new_idx_to_vec
def _build_embedding_for_vocabulary(self, vocabulary):
if vocabulary is not None:
assert isinstance(vocabulary, vocab.Vocabulary), \
'The argument `vocabulary` must be an instance of ' \
'mxnet.contrib.text.vocab.Vocabulary.'
# Set _idx_to_vec so that indices of tokens from vocabulary are associated with the
# loaded token embedding vectors.
self._set_idx_to_vec_by_embeddings([self], len(vocabulary), vocabulary.idx_to_token)
# Index tokens from vocabulary.
self._index_tokens_from_vocabulary(vocabulary)
@property
def vec_len(self):
return self._vec_len
@property
def idx_to_vec(self):
return self._idx_to_vec
def get_vecs_by_tokens(self, tokens, lower_case_backup=False):
"""Look up embedding vectors of tokens.
Parameters
----------
tokens : str or list of strs
A token or a list of tokens.
lower_case_backup : bool, default False
If False, each token in the original case will be looked up; if True, each token in the
original case will be looked up first, if not found in the keys of the property
`token_to_idx`, the token in the lower case will be looked up.
Returns
-------
mxnet.ndarray.NDArray:
The embedding vector(s) of the token(s). According to numpy conventions, if `tokens` is
a string, returns a 1-D NDArray of shape `self.vec_len`; if `tokens` is a list of
strings, returns a 2-D NDArray of shape=(len(tokens), self.vec_len).
"""
to_reduce = False
if not isinstance(tokens, list):
tokens = [tokens]
to_reduce = True
if not lower_case_backup:
indices = [self.token_to_idx.get(token, C.UNKNOWN_IDX) for token in tokens]
else:
indices = [self.token_to_idx[token] if token in self.token_to_idx
else self.token_to_idx.get(token.lower(), C.UNKNOWN_IDX)
for token in tokens]
vecs = nd.Embedding(nd.array(indices), self.idx_to_vec, self.idx_to_vec.shape[0],
self.idx_to_vec.shape[1])
return vecs[0] if to_reduce else vecs
def update_token_vectors(self, tokens, new_vectors):
"""Updates embedding vectors for tokens.
Parameters
----------
tokens : str or a list of strs
A token or a list of tokens whose embedding vector are to be updated.
new_vectors : mxnet.ndarray.NDArray
An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal
to the number of `tokens` and its width must be equal to the dimension of embeddings of
the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list
of multiple strings, it must be 2-D.
"""
assert self.idx_to_vec is not None, 'The property `idx_to_vec` has not been properly set.'
if not isinstance(tokens, list) or len(tokens) == 1:
assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) in [1, 2], \
'`new_vectors` must be a 1-D or 2-D NDArray if `tokens` is a singleton.'
if not isinstance(tokens, list):
tokens = [tokens]
if len(new_vectors.shape) == 1:
new_vectors = new_vectors.expand_dims(0)
else:
assert isinstance(new_vectors, nd.NDArray) and len(new_vectors.shape) == 2, \
'`new_vectors` must be a 2-D NDArray if `tokens` is a list of multiple strings.'
assert new_vectors.shape == (len(tokens), self.vec_len), \
'The length of new_vectors must be equal to the number of tokens and the width of' \
'new_vectors must be equal to the dimension of embeddings of the glossary.'
indices = []
for token in tokens:
if token in self.token_to_idx:
indices.append(self.token_to_idx[token])
else:
raise ValueError('Token %s is unknown. To update the embedding vector for an '
'unknown token, please specify it explicitly as the '
'`unknown_token` %s in `tokens`. This is to avoid unintended '
'updates.' % (token, self.idx_to_token[C.UNKNOWN_IDX]))
self._idx_to_vec[nd.array(indices)] = new_vectors
@classmethod
def _check_pretrained_file_names(cls, pretrained_file_name):
"""Checks if a pre-trained token embedding file name is valid.
Parameters
----------
pretrained_file_name : str
The pre-trained token embedding file.
"""
embedding_name = cls.__name__.lower()
if pretrained_file_name not in cls.pretrained_file_name_sha1:
raise KeyError('Cannot find pretrained file %s for token embedding %s. Valid '
'pretrained files for embedding %s: %s' %
(pretrained_file_name, embedding_name, embedding_name,
', '.join(cls.pretrained_file_name_sha1.keys())))
@register
class GloVe(_TokenEmbedding):
"""The GloVe word embedding.
GloVe is an unsupervised learning algorithm for obtaining vector representations for words.
Training is performed on aggregated global word-word co-occurrence statistics from a corpus, and
the resulting representations showcase interesting linear substructures of the word vector
space. (Source from https://nlp.stanford.edu/projects/glove/)
Reference:
GloVe: Global Vectors for Word Representation.
Jeffrey Pennington, Richard Socher, and Christopher D. Manning.
https://nlp.stanford.edu/pubs/glove.pdf
Website:
https://nlp.stanford.edu/projects/glove/
To get the updated URLs to the externally hosted pre-trained token embedding
files, visit https://nlp.stanford.edu/projects/glove/
License for pre-trained embeddings:
https://opendatacommons.org/licenses/pddl/
Parameters
----------
pretrained_file_name : str, default 'glove.840B.300d.txt'
The name of the pre-trained token embedding file.
embedding_root : str, default os.path.join('~', '.mxnet', 'embeddings')
The root directory for storing embedding-related files.
init_unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
vocabulary : :class:`~mxnet.contrib.text.vocab.Vocabulary`, default None
It contains the tokens to index. Each indexed token will be associated with the loaded
embedding vectors, such as loaded from a pre-trained token embedding file. If None, all the
tokens from the loaded embedding vectors, such as loaded from a pre-trained token embedding
file, will be indexed.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
# Map a pre-trained token embedding archive file and its SHA-1 hash.
pretrained_archive_name_sha1 = C.GLOVE_PRETRAINED_FILE_SHA1
# Map a pre-trained token embedding file and its SHA-1 hash.
pretrained_file_name_sha1 = C.GLOVE_PRETRAINED_ARCHIVE_SHA1
@classmethod
def _get_download_file_name(cls, pretrained_file_name):
# Map a pre-trained embedding file to its archive to download.
src_archive = {archive.split('.')[1]: archive for archive in
GloVe.pretrained_archive_name_sha1.keys()}
archive = src_archive[pretrained_file_name.split('.')[1]]
return archive
def __init__(self, pretrained_file_name='glove.840B.300d.txt',
embedding_root=os.path.join('~', '.mxnet', 'embeddings'),
init_unknown_vec=nd.zeros, vocabulary=None, **kwargs):
GloVe._check_pretrained_file_names(pretrained_file_name)
super(GloVe, self).__init__(**kwargs)
pretrained_file_path = GloVe._get_pretrained_file(embedding_root, pretrained_file_name)
self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)
if vocabulary is not None:
self._build_embedding_for_vocabulary(vocabulary)
@register
class FastText(_TokenEmbedding):
"""The fastText word embedding.
FastText is an open-source, free, lightweight library that allows users to learn text
representations and text classifiers. It works on standard, generic hardware. Models can later
be reduced in size to even fit on mobile devices. (Source from https://fasttext.cc/)
References:
Enriching Word Vectors with Subword Information.
Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov.
https://arxiv.org/abs/1607.04606
Bag of Tricks for Efficient Text Classification.
Armand Joulin, Edouard Grave, Piotr Bojanowski, and Tomas Mikolov.
https://arxiv.org/abs/1607.01759
FastText.zip: Compressing text classification models.
Armand Joulin, Edouard Grave, Piotr Bojanowski, Matthijs Douze, Herve Jegou,
and Tomas Mikolov.
https://arxiv.org/abs/1612.03651
For 'wiki.multi' embeddings:
Word Translation Without Parallel Data
Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer,
and Herve Jegou.
https://arxiv.org/abs/1710.04087
Website:
https://fasttext.cc/
To get the updated URLs to the externally hosted pre-trained token embedding files, visit
https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md
License for pre-trained embeddings:
https://creativecommons.org/licenses/by-sa/3.0/
Parameters
----------
pretrained_file_name : str, default 'wiki.en.vec'
The name of the pre-trained token embedding file.
embedding_root : str, default os.path.join('~', '.mxnet', 'embeddings')
The root directory for storing embedding-related files.
init_unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
vocabulary : :class:`~mxnet.contrib.text.vocab.Vocabulary`, default None
It contains the tokens to index. Each indexed token will be associated with the loaded
embedding vectors, such as loaded from a pre-trained token embedding file. If None, all the
tokens from the loaded embedding vectors, such as loaded from a pre-trained token embedding
file, will be indexed.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
# Map a pre-trained token embedding archive file and its SHA-1 hash.
pretrained_archive_name_sha1 = C.FAST_TEXT_ARCHIVE_SHA1
# Map a pre-trained token embedding file and its SHA-1 hash.
pretrained_file_name_sha1 = C.FAST_TEXT_FILE_SHA1
@classmethod
def _get_download_file_name(cls, pretrained_file_name):
# Map a pre-trained embedding file to its archive to download.
return '.'.join(pretrained_file_name.split('.')[:-1])+'.zip'
def __init__(self, pretrained_file_name='wiki.simple.vec',
embedding_root=os.path.join('~', '.mxnet', 'embeddings'),
init_unknown_vec=nd.zeros, vocabulary=None, **kwargs):
FastText._check_pretrained_file_names(pretrained_file_name)
super(FastText, self).__init__(**kwargs)
pretrained_file_path = FastText._get_pretrained_file(embedding_root, pretrained_file_name)
self._load_embedding(pretrained_file_path, ' ', init_unknown_vec)
if vocabulary is not None:
self._build_embedding_for_vocabulary(vocabulary)
class CustomEmbedding(_TokenEmbedding):
"""User-defined token embedding.
This is to load embedding vectors from a user-defined pre-trained text embedding file.
Denote by '[ed]' the argument `elem_delim`. Denote by [v_ij] the j-th element of the token
embedding vector for [token_i], the expected format of a custom pre-trained token embedding file
is:
'[token_1][ed][v_11][ed][v_12][ed]...[ed][v_1k]\\\\n[token_2][ed][v_21][ed][v_22][ed]...[ed]
[v_2k]\\\\n...'
where k is the length of the embedding vector `vec_len`.
Parameters
----------
pretrained_file_path : str
The path to the custom pre-trained token embedding file.
elem_delim : str, default ' '
The delimiter for splitting a token and every embedding vector element value on the same
line of the custom pre-trained token embedding file.
encoding : str, default 'utf8'
The encoding scheme for reading the custom pre-trained token embedding file.
init_unknown_vec : callback
The callback used to initialize the embedding vector for the unknown token.
vocabulary : :class:`~mxnet.contrib.text.vocab.Vocabulary`, default None
It contains the tokens to index. Each indexed token will be associated with the loaded
embedding vectors, such as loaded from a pre-trained token embedding file. If None, all the
tokens from the loaded embedding vectors, such as loaded from a pre-trained token embedding
file, will be indexed.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
def __init__(self, pretrained_file_path, elem_delim=' ', encoding='utf8',
init_unknown_vec=nd.zeros, vocabulary=None, **kwargs):
super(CustomEmbedding, self).__init__(**kwargs)
self._load_embedding(pretrained_file_path, elem_delim, init_unknown_vec, encoding)
if vocabulary is not None:
self._build_embedding_for_vocabulary(vocabulary)
class CompositeEmbedding(_TokenEmbedding):
"""Composite token embeddings.
For each indexed token in a vocabulary, multiple embedding vectors, such as concatenated
multiple embedding vectors, will be associated with it. Such embedding vectors can be loaded
from externally hosted or custom pre-trained token embedding files, such as via token embedding
instances.
Parameters
----------
vocabulary : :class:`~mxnet.contrib.text.vocab.Vocabulary`
For each indexed token in a vocabulary, multiple embedding vectors, such as concatenated
multiple embedding vectors, will be associated with it.
token_embeddings : instance or list of `mxnet.contrib.text.embedding._TokenEmbedding`
One or multiple pre-trained token embeddings to load. If it is a list of multiple
embeddings, these embedding vectors will be concatenated for each token.
Properties
----------
token_to_idx : dict mapping str to int
A dict mapping each token to its index integer.
idx_to_token : list of strs
A list of indexed tokens where the list indices and the token indices are aligned.
unknown_token : hashable object
The representation for any unknown token. In other words, any unknown token will be indexed
as the same representation.
reserved_tokens : list of strs or None
A list of reserved tokens that will always be indexed.
vec_len : int
The length of the embedding vector for each token.
idx_to_vec : mxnet.ndarray.NDArray
For all the indexed tokens in this embedding, this NDArray maps each token's index to an
embedding vector. The largest valid index maps to the initialized embedding vector for every
reserved token, such as an unknown_token token and a padding token.
"""
def __init__(self, vocabulary, token_embeddings):
# Sanity checks.
assert isinstance(vocabulary, vocab.Vocabulary), \
'The argument `vocabulary` must be an instance of ' \
'mxnet.contrib.text.indexer.Vocabulary.'
if not isinstance(token_embeddings, list):
token_embeddings = [token_embeddings]
for embed in token_embeddings:
assert isinstance(embed, _TokenEmbedding), \
'The argument `token_embeddings` must be an instance or a list of instances ' \
'of `mxnet.contrib.text.embedding.TextEmbedding` whose embedding vectors will be' \
'loaded or concatenated-then-loaded to map to the indexed tokens.'
# Index tokens.
self._index_tokens_from_vocabulary(vocabulary)
# Set _idx_to_vec so that indices of tokens from keys of `counter` are associated with token
# embedding vectors from `token_embeddings`.
self._set_idx_to_vec_by_embeddings(token_embeddings, len(self), self.idx_to_token)
|
jiajiechen/mxnet
|
python/mxnet/contrib/text/embedding.py
|
Python
|
apache-2.0
| 34,598
|
[
"VisIt"
] |
c6b3e0dfd9cce8ed621863f7fc3d1e7cd6fe6f72fd9902c3cdee44f8c8c17ec3
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from . import scf_response
|
rmcgibbo/psi4public
|
psi4/driver/procrouting/response/__init__.py
|
Python
|
lgpl-3.0
| 941
|
[
"Psi4"
] |
c6edc5ff89dbec5f7d80b85d285bb7f5af5bb5cb5c79969711c39b8d5eeca1db
|
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Gavin E. Crooks 2001-11-03
# Minor extensions, some bug fixes, and major changes to the interface
import re
"""A collection of residues from a PDB structure."""
_pdbid_re = re.compile(r"^(\w\w\w\w)(?:$|\s+|_)(.*)")
_fragment_re = re.compile(r"\(?(\w:)?(-?\w*)-?(-?\w*)\)?(.*)")
class Residues :
"""A collection of residues from a PDB structure.
This class provides code to work with SCOP domain definitions. These
are concisely expressed as a one or more chain fragments. For example,
"(1bba A:10-20,B:)" indicates residue 10 through 20 (inclusive) of
chain A, and every residue of chain B in the pdb structure 1bba. The pdb
id and brackets are optional. In addition "-" indicates every residue of
a pbd structure with one unnamed chain.
Start and end residue ids consist of the residue sequence number and an
optional single letter insertion code. e.g. "12", "-1", "1a", "1000"
pdbid -- An optional PDB id, e.g. "1bba"
fragments -- A sequence of tuples (chainID, startResID, endResID)
"""
def __init__(self, str=None) :
self.pdbid = ''
self.fragments = ()
if str is not None : self._parse(str)
def _parse(self, str):
str = str.strip()
#Is there a pdbid at the front? e.g. 1bba A:1-100
m = _pdbid_re.match(str)
if m is not None :
self.pdbid = m.group(1)
str = m.group(2) # Everything else
if str=='' or str == '-' or str=='(-)': # no fragments, whole sequence
return
fragments = []
for l in str.split(",") :
m = _fragment_re.match(l)
if m is None:
raise ValueError, "I don't understand the format of %s" % l
chain, start, end, postfix = m.groups()
if postfix != "" :
raise ValueError, "I don't understand the format of %s" % l
if chain:
if chain[-1] != ':':
raise ValueError, "I don't understand the chain in %s" % l
chain = chain[:-1] # chop off the ':'
else :
chain =""
fragments.append((chain, start, end))
self.fragments = tuple(fragments)
def __str__(self):
prefix =""
if self.pdbid :
prefix =self.pdbid +' '
if not self.fragments: return prefix+'-'
strs = []
for chain, start, end in self.fragments:
s = []
if chain: s.append("%s:" % chain)
if start: s.append("%s-%s" % (start, end))
strs.append("".join(s))
return prefix+ ",".join(strs)
|
dbmi-pitt/DIKB-Micropublication
|
scripts/mp-scripts/Bio/SCOP/Residues.py
|
Python
|
apache-2.0
| 2,928
|
[
"Biopython"
] |
d8b91334615b5c8fe227c3f7eac7783f8984dbed0724fd23c88d80e8bf40589c
|
#!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for the modular DevTools build.
"""
from os import path
import os
try:
import simplejson as json
except ImportError:
import json
def read_file(filename):
with open(path.normpath(filename), 'rt') as input:
return input.read()
def write_file(filename, content):
if path.exists(filename):
os.remove(filename)
with open(filename, 'wt') as output:
output.write(content)
def bail_error(message):
raise Exception(message)
def concatenate_scripts(file_names, module_dir, output_dir, output):
for file_name in file_names:
output.write('/* %s */\n' % file_name)
file_path = path.join(module_dir, file_name)
if not path.isfile(file_path):
file_path = path.join(output_dir, path.basename(module_dir), file_name)
output.write(read_file(file_path))
output.write(';')
class Descriptors:
def __init__(self, application_dir, application_descriptor, module_descriptors, application_json):
self.application_dir = application_dir
self.application = application_descriptor
self.modules = module_descriptors
self.application_json = application_json
def all_compiled_files(self):
files = {}
for name in self.modules:
module = self.modules[name]
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files[path.normpath(path.join(self.application_dir, name, script))] = True
return files.keys()
def module_compiled_files(self, name):
files = []
module = self.modules[name]
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files.append(script)
return files
def sorted_modules(self):
result = []
unvisited_modules = set(self.modules)
temp_modules = set()
def visit(parent, name):
if name not in unvisited_modules:
return None
if name not in self.modules:
return (parent, name)
if name in temp_modules:
bail_error('Dependency cycle found at module "%s"' % name)
temp_modules.add(name)
deps = self.modules[name].get('dependencies')
if deps:
for dep_name in deps:
bad_dep = visit(name, dep_name)
if bad_dep:
return bad_dep
unvisited_modules.remove(name)
temp_modules.remove(name)
result.append(name)
return None
while len(unvisited_modules):
for next in unvisited_modules:
break
failure = visit(None, next)
if failure:
# failure[0] can never be None
bail_error('Unknown module "%s" encountered in dependencies of "%s"' % (failure[1], failure[0]))
return result
def sorted_dependencies_closure(self, module_name):
visited = set()
def sorted_deps_for_module(name):
result = []
desc = self.modules[name]
deps = desc.get('dependencies', [])
for dep in deps:
result += sorted_deps_for_module(dep)
if name not in visited:
result.append(name)
visited.add(name)
return result
return sorted_deps_for_module(module_name)
class DescriptorLoader:
def __init__(self, application_dir):
self.application_dir = application_dir
def load_application(self, application_descriptor_name):
application_descriptor_filename = path.join(self.application_dir, application_descriptor_name)
application_descriptor_json = read_file(application_descriptor_filename)
application_descriptor = {desc['name']: desc for desc in json.loads(application_descriptor_json)}
module_descriptors = {}
for (module_name, module) in application_descriptor.items():
if module_descriptors.get(module_name):
bail_error('Duplicate definition of module "%s" in %s' % (module_name, application_descriptor_filename))
module_json_filename = path.join(self.application_dir, module_name, 'module.json')
module_descriptors[module_name] = self._read_module_descriptor(module_name, application_descriptor_filename)
for module in module_descriptors.values():
deps = module.get('dependencies', [])
for dep in deps:
if dep not in application_descriptor:
bail_error('Module "%s" (dependency of "%s") not listed in application descriptor %s' % (dep, module['name'], application_descriptor_filename))
return Descriptors(self.application_dir, application_descriptor, module_descriptors, application_descriptor_json)
def _read_module_descriptor(self, module_name, application_descriptor_filename):
json_filename = path.join(self.application_dir, module_name, 'module.json')
if not path.exists(json_filename):
bail_error('Module descriptor %s referenced in %s is missing' % (json_filename, application_descriptor_filename))
module_json = json.loads(read_file(json_filename))
module_json['name'] = module_name
return module_json
|
temasek/android_external_chromium_org_third_party_WebKit
|
Source/devtools/scripts/modular_build.py
|
Python
|
bsd-3-clause
| 5,698
|
[
"VisIt"
] |
752c7313cd0f6ab458c5e407150963a4c51a1e75b98a7926c3dc50ab20d9a3c0
|
import numpy as np
from ase.units import Hartree
from gpaw.occupations import FermiDirac, MethfesselPaxton
class KPoint:
eps_n = np.empty(1)
f_n = np.empty(1)
weight = 1.0
s = 0
k = KPoint()
def f(occ, x):
k.eps_n[0] = x
n, dnde, x, S = occ.distribution(k, 0.0)
return n, dnde, S
def test(occ):
print occ
for e in [-0.3 / Hartree, 0, 0.1 / Hartree, 1.2 / Hartree]:
n0, d0, S0 = f(occ, e)
x = 0.000001
np, dp, Sp = f(occ, e + x)
nm, dm, Sm = f(occ, e - x)
d = -(np - nm) / (2 * x)
dS = Sp - Sm
dn = np - nm
print d - d0, dS - e * dn
assert abs(d - d0) < 3e-5
assert abs(dS - e * dn) < 1e-13
for w in [0.1, 0.5]:
test(FermiDirac(w))
for n in range(4):
test(MethfesselPaxton(w, n))
|
qsnake/gpaw
|
gpaw/test/occupations.py
|
Python
|
gpl-3.0
| 820
|
[
"ASE",
"GPAW"
] |
7dbb00ef2c0a9226d8a5654a2c3b6273f7bad141567ac47f967b06be7c7b7b23
|
"""
fs.contrib.dropboxfs
========
A FS object that integrates with Dropbox.
"""
import time
import shutil
import optparse
import datetime
import tempfile
import calendar
from UserDict import UserDict
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.filelike import StringIO
from dropbox import rest
from dropbox import client
from dropbox import session
# Items in cache are considered expired after 5 minutes.
CACHE_TTL = 300
# The format Dropbox uses for times.
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S +0000'
# Max size for spooling to memory before using disk (5M).
MAX_BUFFER = 1024 ** 2 * 5
class ContextManagerStream(object):
def __init__(self, temp, name):
self.temp = temp
self.name = name
def __iter__(self):
while True:
data = self.read(16384)
if not data:
break
yield data
def __getattr__(self, name):
return getattr(self.temp, name)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# TODO: these classes can probably be replaced with
# tempfile.SpooledTemporaryFile, however I am unsure at this moment if doing
# so would be bad since it is only available in Python 2.6+.
class SpooledWriter(ContextManagerStream):
"""Spools bytes to a StringIO buffer until it reaches max_buffer. At that
point it switches to a temporary file."""
def __init__(self, client, name, max_buffer=MAX_BUFFER):
self.client = client
self.max_buffer = max_buffer
self.bytes = 0
super(SpooledWriter, self).__init__(StringIO(), name)
def __len__(self):
return self.bytes
def write(self, data):
if self.temp.tell() + len(data) >= self.max_buffer:
# We reached the max_buffer size that we want to keep in memory.
# Switch to an on-disk temp file. Copy what has been written so
# far to it.
temp = tempfile.TemporaryFile()
self.temp.seek(0)
shutil.copyfileobj(self.temp, temp)
self.temp = temp
self.temp.write(data)
self.bytes += len(data)
def close(self):
# Need to flush temporary file (but not StringIO).
if hasattr(self.temp, 'flush'):
self.temp.flush()
self.temp.seek(0)
self.client.put_file(self.name, self, overwrite=True)
self.temp.close()
class SpooledReader(ContextManagerStream):
"""
Reads the entire file from the remote server into a buffer or temporary
file. It can then satisfy read(), seek() and other calls using the local
file.
"""
def __init__(self, client, name, max_buffer=MAX_BUFFER):
self.client = client
r = self.client.get_file(name)
self.bytes = int(r.getheader('Content-Length'))
if r > max_buffer:
temp = tempfile.TemporaryFile()
else:
temp = StringIO()
shutil.copyfileobj(r, temp)
temp.seek(0)
super(SpooledReader, self).__init__(temp, name)
def __len__(self):
return self.bytes
class ChunkedReader(ContextManagerStream):
""" A file-like that provides access to a file with dropbox API"""
"""Reads the file from the remote server as requested.
It can then satisfy read()."""
def __init__(self, client, name):
self.client = client
try:
self.r = self.client.get_file(name)
except rest.ErrorResponse, e:
raise RemoteConnectionError(opname='get_file', path=name,
details=e)
self.bytes = int(self.r.getheader('Content-Length'))
self.name = name
self.closed = False
self.pos = 0
self.seek_pos = 0
def __len__(self):
return self.bytes
def __iter__(self):
return self
def seek(self, offset, whence=0):
"""
Change the stream position to the given byte offset in the file-like
object.
"""
if (whence == 0):
self.seek_pos = offset
elif (whence == 1):
self.seek_pos += offset
elif (whence == 2):
self.seek_pos = self.size + offset
def tell(self):
""" Return the current stream position. """
return self.seek_pos
def next(self):
"""
Read the data until all data is read.
data is empty string when there is no more data to read.
"""
data = self.read()
if data is None:
raise StopIteration()
return data
def read(self, amt=None):
""" Read a piece of the file from dropbox. """
if not self.r.isclosed():
# Do some fake seeking
if self.seek_pos < self.pos:
self.r.close()
self.r = self.client.get_file(self.name)
self.r.read(self.seek_pos)
elif self.seek_pos > self.pos:
# Read ahead enough to reconcile pos and seek_pos
self.r.read(self.pos - self.seek_pos)
self.pos = self.seek_pos
# Update position pointers
if amt:
self.pos += amt
self.seek_pos += amt
else:
self.pos = self.bytes
self.seek_pos = self.bytes
return self.r.read(amt)
else:
self.close()
def readline(self, size=-1):
""" Not implemented. Read and return one line from the stream. """
raise NotImplementedError()
def readlines(self, hint=-1):
"""
Not implemented. Read and return a list of lines from the stream.
"""
raise NotImplementedError()
def writable(self):
""" The stream does not support writing. """
return False
def writelines(self, lines):
""" Not implemented. Write a list of lines to the stream. """
raise NotImplementedError()
def close(self):
"""
Flush and close this stream. This method has no effect if the file
is already closed. As a convenience, it is allowed to call this method
more than once; only the first call, however, will have an effect.
"""
# It's a memory leak if self.r not closed.
if not self.r.isclosed():
self.r.close()
if not self.closed:
self.closed = True
class CacheItem(object):
"""Represents a path in the cache. There are two components to a path.
It's individual metadata, and the children contained within it."""
def __init__(self, metadata=None, children=None, timestamp=None):
self.metadata = metadata
self.children = children
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
def add_child(self, name):
if self.children is None:
self.children = [name]
else:
self.children.append(name)
def del_child(self, name):
if self.children is None:
return
try:
i = self.children.index(name)
except ValueError:
return
self.children.pop(i)
def _get_expired(self):
if self.timestamp <= time.time() - CACHE_TTL:
return True
expired = property(_get_expired)
def renew(self):
self.timestamp = time.time()
class DropboxCache(UserDict):
def set(self, path, metadata):
self[path] = CacheItem(metadata)
dname, bname = pathsplit(path)
item = self.get(dname)
if item:
item.add_child(bname)
def pop(self, path, default=None):
value = UserDict.pop(self, path, default)
dname, bname = pathsplit(path)
item = self.get(dname)
if item:
item.del_child(bname)
return value
class DropboxClient(client.DropboxClient):
"""A wrapper around the official DropboxClient. This wrapper performs
caching as well as converting errors to fs exceptions."""
def __init__(self, *args, **kwargs):
super(DropboxClient, self).__init__(*args, **kwargs)
self.cache = DropboxCache()
# Below we split the DropboxClient metadata() method into two methods
# metadata() and children(). This allows for more fine-grained fetches
# and caching.
def metadata(self, path, cache_read=True):
"Gets metadata for a given path."
item = self.cache.get(path) if cache_read else None
if not item or item.metadata is None or item.expired:
try:
metadata = super(DropboxClient, self).metadata(
path, include_deleted=False, list=False)
except rest.ErrorResponse, e:
if e.status == 404:
raise ResourceNotFoundError(path)
raise RemoteConnectionError(opname='metadata', path=path,
details=e)
if metadata.get('is_deleted', False):
raise ResourceNotFoundError(path)
item = self.cache[path] = CacheItem(metadata)
# Copy the info so the caller cannot affect our cache.
return dict(item.metadata.items())
def children(self, path):
"Gets children of a given path."
update, hash = False, None
item = self.cache.get(path)
if item:
if item.expired:
update = True
if item.metadata and item.children:
hash = item.metadata['hash']
else:
if not item.metadata.get('is_dir'):
raise ResourceInvalidError(path)
if not item.children:
update = True
else:
update = True
if update:
try:
metadata = super(DropboxClient, self).metadata(
path, hash=hash, include_deleted=False, list=True)
children = []
contents = metadata.pop('contents')
for child in contents:
if child.get('is_deleted', False):
continue
children.append(basename(child['path']))
self.cache[child['path']] = CacheItem(child)
item = self.cache[path] = CacheItem(metadata, children)
except rest.ErrorResponse, e:
if not item or e.status != 304:
raise RemoteConnectionError(opname='metadata', path=path,
details=e)
# We have an item from cache (perhaps expired), but it's
# hash is still valid (as far as Dropbox is concerned),
# so just renew it and keep using it.
item.renew()
return item.children
def file_create_folder(self, path):
"Add newly created directory to cache."
try:
metadata = super(DropboxClient, self).file_create_folder(path)
except rest.ErrorResponse, e:
if e.status == 404:
raise ParentDirectoryMissingError(path)
if e.status == 403:
raise DestinationExistsError(path)
raise RemoteConnectionError(opname='file_create_folder', path=path,
details=e)
self.cache.set(path, metadata)
def file_copy(self, src, dst):
try:
metadata = super(DropboxClient, self).file_copy(src, dst)
except rest.ErrorResponse, e:
if e.status == 404:
raise ResourceNotFoundError(src)
if e.status == 403:
raise DestinationExistsError(dst)
raise RemoteConnectionError(opname='file_copy', path=path,
details=e)
self.cache.set(dst, metadata)
def file_move(self, src, dst):
try:
metadata = super(DropboxClient, self).file_move(src, dst)
except rest.ErrorResponse, e:
if e.status == 404:
raise ResourceNotFoundError(src)
if e.status == 403:
raise DestinationExistsError(dst)
raise RemoteConnectionError(opname='file_move', path=path,
details=e)
self.cache.pop(src, None)
self.cache.set(dst, metadata)
def file_delete(self, path):
try:
super(DropboxClient, self).file_delete(path)
except rest.ErrorResponse, e:
if e.status == 404:
raise ResourceNotFoundError(path)
if e.status == 400 and 'must not be empty' in str(e):
raise DirectoryNotEmptyError(path)
raise
self.cache.pop(path, None)
def put_file(self, path, f, overwrite=False):
try:
super(DropboxClient, self).put_file(path, f, overwrite=overwrite)
except rest.ErrorResponse, e:
raise RemoteConnectionError(opname='put_file', path=path,
details=e)
self.cache.pop(dirname(path), None)
def create_client(app_key, app_secret, access_type, token_key, token_secret):
"""Uses token from create_token() to gain access to the API."""
s = session.DropboxSession(app_key, app_secret, access_type)
s.set_token(token_key, token_secret)
return DropboxClient(s)
def metadata_to_info(metadata, localtime=False):
isdir = metadata.pop('is_dir', False)
info = {
'size': metadata.pop('bytes', 0),
'isdir': isdir,
'isfile': not isdir,
}
try:
if 'client_mtime' in metadata:
mtime = metadata.get('client_mtime')
else:
mtime = metadata.get('modified')
if mtime:
# Parse date/time from Dropbox as struct_time.
mtime = time.strptime(mtime, TIME_FORMAT)
if localtime:
# Convert time to local timezone in seconds.
mtime = calendar.timegm(mtime)
else:
mtime = time.mktime(mtime)
# Convert to datetime object, store in modified_time
info['modified_time'] = datetime.datetime.fromtimestamp(mtime)
except KeyError:
pass
return info
class DropboxFS(FS):
"""A FileSystem that stores data in Dropbox."""
_meta = {'thread_safe': True,
'virtual': False,
'read_only': False,
'unicode_paths': True,
'case_insensitive_paths': True,
'network': True,
'atomic.setcontents': False,
'atomic.makedir': True,
'atomic.rename': True,
'mime_type': 'virtual/dropbox', }
def __init__(self, app_key, app_secret, access_type, token_key,
token_secret, localtime=False, thread_synchronize=True):
"""Create an fs that interacts with Dropbox.
:param app_key: Your app key assigned by Dropbox.
:param app_secret: Your app secret assigned by Dropbox.
:param access_type: Type of access requested, 'dropbox' or 'app_folder'.
:param token_key: The oAuth key you received after authorization.
:param token_secret: The oAuth secret you received after authorization.
:param thread_synchronize: set to True (default) to enable thread-safety
"""
super(DropboxFS, self).__init__(thread_synchronize=thread_synchronize)
self.client = create_client(app_key, app_secret, access_type,
token_key, token_secret)
self.localtime = localtime
def __str__(self):
return "<DropboxFS: >"
def __unicode__(self):
return u"<DropboxFS: >"
def getmeta(self, meta_name, default=NoDefaultMeta):
if meta_name == 'read_only':
return self.read_only
return super(DropboxFS, self).getmeta(meta_name, default)
@synchronize
def open(self, path, mode="rb", **kwargs):
if 'r' in mode:
return ChunkedReader(self.client, path)
else:
return SpooledWriter(self.client, path)
@synchronize
def getcontents(self, path, mode="rb"):
path = abspath(normpath(path))
return self.open(self, path, mode).read()
def setcontents(self, path, data, *args, **kwargs):
path = abspath(normpath(path))
self.client.put_file(path, data, overwrite=True)
def desc(self, path):
return "%s in Dropbox" % path
def getsyspath(self, path, allow_none=False):
"Returns a path as the Dropbox API specifies."
if allow_none:
return None
return client.format_path(abspath(normpath(path)))
def isdir(self, path):
try:
info = self.getinfo(path)
return info.get('isdir', False)
except ResourceNotFoundError:
return False
def isfile(self, path):
try:
info = self.getinfo(path)
return not info.get('isdir', False)
except ResourceNotFoundError:
return False
def exists(self, path):
try:
self.getinfo(path)
return True
except ResourceNotFoundError:
return False
def listdir(self, path="/", wildcard=None, full=False, absolute=False,
dirs_only=False, files_only=False):
path = abspath(normpath(path))
children = self.client.children(path)
return self._listdir_helper(path, children, wildcard, full, absolute,
dirs_only, files_only)
@synchronize
def getinfo(self, path, cache_read=True):
path = abspath(normpath(path))
metadata = self.client.metadata(path, cache_read=cache_read)
return metadata_to_info(metadata, localtime=self.localtime)
def copy(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_copy(src, dst)
def copydir(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_copy(src, dst)
def move(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_move(src, dst)
def movedir(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_move(src, dst)
def rename(self, src, dst, *args, **kwargs):
src = abspath(normpath(src))
dst = abspath(normpath(dst))
self.client.file_move(src, dst)
def makedir(self, path, recursive=False, allow_recreate=False):
path = abspath(normpath(path))
self.client.file_create_folder(path)
# This does not work, httplib refuses to send a Content-Length: 0 header
# even though the header is required. We can't make a 0-length file.
#def createfile(self, path, wipe=False):
# self.client.put_file(path, '', overwrite=False)
def remove(self, path):
path = abspath(normpath(path))
self.client.file_delete(path)
def removedir(self, path, *args, **kwargs):
path = abspath(normpath(path))
self.client.file_delete(path)
def main():
parser = optparse.OptionParser(prog="dropboxfs",
description="CLI harness for DropboxFS.")
parser.add_option(
"-k",
"--app-key",
help="Your Dropbox app key.")
parser.add_option(
"-s",
"--app-secret",
help="Your Dropbox app secret.")
parser.add_option(
"-t",
"--type",
default='dropbox',
choices=('dropbox', 'app_folder'),
help="Your Dropbox app access type.")
parser.add_option(
"-a",
"--token-key",
help="Your access token key (if you previously obtained one.")
parser.add_option(
"-b",
"--token-secret",
help="Your access token secret (if you previously obtained one.")
(options, args) = parser.parse_args()
# Can't operate without these parameters.
if not options.app_key or not options.app_secret:
parser.error('You must obtain an app key and secret from Dropbox at the following URL.\n\nhttps://www.dropbox.com/developers/apps')
# Instantiate a client one way or another.
if not options.token_key and not options.token_secret:
s = session.DropboxSession(options.app_key, options.app_secret,
options.type)
# Get a temporary token, so we can make oAuth calls.
t = s.obtain_request_token()
print "Please visit the following URL and authorize this application.\n"
print s.build_authorize_url(t)
print "\nWhen you are done, please press <enter>."
raw_input()
# Trade up to permanent access token.
a = s.obtain_access_token(t)
token_key, token_secret = a.key, a.secret
print 'Your access token will be printed below, store it for later use.'
print 'For future accesses, you can pass the --token-key and --token-secret'
print ' arguments.\n'
print 'Access token:', a.key
print 'Access token secret:', a.secret
print "\nWhen you are done, please press <enter>."
raw_input()
elif not options.token_key or not options.token_secret:
parser.error('You must provide both the access token and the '
'access token secret.')
else:
token_key, token_secret = options.token_key, options.token_secret
fs = DropboxFS(options.app_key, options.app_secret, options.type,
token_key, token_secret)
print fs.getinfo('/')
print fs.getinfo('/Public')
if fs.exists('/Bar'):
fs.removedir('/Bar')
print fs.listdir('/')
fs.makedir('/Bar')
print fs.listdir('/')
print fs.listdir('/Foo')
filelike = fs.open('/big-file.pdf')
print filelike.read(100)
filelike.seek(100)
chunk2 = filelike.read(100)
print chunk2
filelike.seek(200)
print filelike.read(100)
filelike.seek(100)
chunk2a = filelike.read(100)
print chunk2a
assert chunk2 == chunk2a
if __name__ == '__main__':
main()
|
freak3dot/cdnjs
|
dropboxfs.py
|
Python
|
mit
| 22,330
|
[
"VisIt"
] |
3db034ae0d38e1c44c9d7d1270be453399da184cb07c48ffad7d9d6079769af1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.