text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
from check_grad import check_grad
from utils import *
from logistic import *
import matplotlib.pyplot as plt
def run_logistic_regression(hyperparameters):
# TODO specify training data
train_inputs, train_targets = load_train()
valid_inputs, valid_targets = load_valid()
# N is number of examples; M is the number of features per example.
N, M = train_inputs.shape
# Logistic regression weights
# TODO:Initialize to random weights here.
weights = 0.1*np.random.randn(M+1,1)
# Verify that your logistic function produces the right gradient.
# diff should be very close to 0.
run_check_grad(hyperparameters)
# Begin learning with gradient descent
logging = np.zeros((hyperparameters['num_iterations'], 5))
for t in xrange(hyperparameters['num_iterations']):
# Find the negative log likelihood and its derivatives w.r.t. the weights.
f, df, predictions = logistic(weights, train_inputs, train_targets, hyperparameters)
# Evaluate the prediction.
cross_entropy_train, frac_correct_train = evaluate(train_targets, predictions)
if np.isnan(f) or np.isinf(f):
raise ValueError("nan/inf error")
# update parameters
weights = weights - hyperparameters['learning_rate'] * df / N
# Make a prediction on the valid_inputs.
predictions_valid = logistic_predict(weights, valid_inputs)
# Evaluate the prediction.
cross_entropy_valid, frac_correct_valid = evaluate(valid_targets, predictions_valid)
logging[t] = [f/N, cross_entropy_train, frac_correct_train*100, cross_entropy_valid, frac_correct_valid*100]
return logging
def run_check_grad(hyperparameters):
"""Performs gradient check on logistic function.
"""
# This creates small random data with 7 examples and
# 9 dimensions and checks the gradient on that data.
num_examples = 7
num_dimensions = 9
weights = np.random.randn(num_dimensions+1, 1)
data = np.random.randn(num_examples, num_dimensions)
targets = (np.random.rand(num_examples, 1) > 0.5).astype(int)
diff = check_grad(logistic, # function to check
weights,
0.001, # perturbation
data,
targets,
hyperparameters)
print "diff =", diff
if __name__ == '__main__':
# TODO: Set hyperparameters
hyperparameters = {
'learning_rate': 0.1,
'weight_regularization':False, # boolean, True for using Gaussian prior on weights
'num_iterations': 300,
'weight_decay': 0.1, # related to standard deviation of weight prior
}
# average over multiple runs
num_runs = 1
logging = np.zeros((hyperparameters['num_iterations'], 5))
for i in xrange(num_runs):
logging += run_logistic_regression(hyperparameters)
logging /= num_runs
# TODO generate plots
plt.plot(logging[:,1],marker='+',label='learning rate=0.1')
# TODO: Set hyperparameters
hyperparameters = {
'learning_rate': 0.3,
'weight_regularization':False, # boolean, True for using Gaussian prior on weights
'num_iterations': 300,
'weight_decay': 0.1, # related to standard deviation of weight prior
}
# average over multiple runs
num_runs = 1
logging = np.zeros((hyperparameters['num_iterations'], 5))
for i in xrange(num_runs):
logging += run_logistic_regression(hyperparameters)
logging /= num_runs
# TODO generate plots
plt.plot(logging[:,1],marker='*',label='learning rate=0.3')
# TODO: Set hyperparameters
hyperparameters = {
'learning_rate': 0.5,
'weight_regularization':False, # boolean, True for using Gaussian prior on weights
'num_iterations': 300,
'weight_decay': 0.1, # related to standard deviation of weight prior
}
# average over multiple runs
num_runs = 1
logging = np.zeros((hyperparameters['num_iterations'], 5))
for i in xrange(num_runs):
logging += run_logistic_regression(hyperparameters)
logging /= num_runs
# TODO generate plots
plt.plot(logging[:,1],marker='d',label='learning rate=0.5')
# TODO: Set hyperparameters
hyperparameters = {
'learning_rate': 0.7,
'weight_regularization':False, # boolean, True for using Gaussian prior on weights
'num_iterations': 300,
'weight_decay': 0.1, # related to standard deviation of weight prior
}
# average over multiple runs
num_runs = 1
logging = np.zeros((hyperparameters['num_iterations'], 5))
for i in xrange(num_runs):
logging += run_logistic_regression(hyperparameters)
logging /= num_runs
# TODO generate plots
plt.plot(logging[:,1],marker='h',label='learning rate=0.7')
# TODO: Set hyperparameters
hyperparameters = {
'learning_rate': 0.9,
'weight_regularization':False, # boolean, True for using Gaussian prior on weights
'num_iterations': 300,
'weight_decay': 0.1, # related to standard deviation of weight prior
}
# average over multiple runs
num_runs = 1
logging = np.zeros((hyperparameters['num_iterations'], 5))
for i in xrange(num_runs):
logging += run_logistic_regression(hyperparameters)
logging /= num_runs
# TODO generate plots
plt.plot(logging[:,1],marker='s',label='learning rate=0.9')
plt.xlim(-10,50)
plt.ylim(-10,500)
plt.legend(loc='upper right')
plt.title('Plot of Cross Entropy vs. Iteration Times under different learning rate')
plt.xlabel('Iteration Times')
plt.ylabel('Cross Entropy of Training Set')
plt.show()
| ouyangyike/Machine-Learning-and-Data-Mining | Logistic Regression/logistic_regression_rate1.py | Python | mit | 6,096 | [
"Gaussian"
] | 47e5f34133cd4891c63fee5ba7c1167132460991072197c65b5f6a2288af3847 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Copyright (c) 2015 Eric Pascual
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
"""
An assortment of classes modeling specific features of the EV3 brick.
"""
from .core import *
OUTPUT_A = 'outA'
OUTPUT_B = 'outB'
OUTPUT_C = 'outC'
OUTPUT_D = 'outD'
INPUT_1 = 'in1'
INPUT_2 = 'in2'
INPUT_3 = 'in3'
INPUT_4 = 'in4'
class Leds(object):
"""
The EV3 LEDs.
"""
# ~autogen led-colors platforms.ev3.led>currentClass
red_left = Led(name_pattern='ev3:left:red:ev3dev')
red_right = Led(name_pattern='ev3:right:red:ev3dev')
green_left = Led(name_pattern='ev3:left:green:ev3dev')
green_right = Led(name_pattern='ev3:right:green:ev3dev')
LEFT = ( red_left, green_left, )
RIGHT = ( red_right, green_right, )
RED = ( 1, 0, )
GREEN = ( 0, 1, )
AMBER = ( 1, 1, )
ORANGE = ( 1, 0.5, )
YELLOW = ( 0.5, 1, )
@staticmethod
def set_color(group, color, pct=1):
"""
Sets brigthness of leds in the given group to the values specified in
color tuple. When percentage is specified, brightness of each led is
reduced proportionally.
Example::
Leds.set_color(LEFT, AMBER)
"""
for l, v in zip(group, color):
l.brightness_pct = v * pct
@staticmethod
def set(group, **kwargs):
"""
Set attributes for each led in group.
Example::
Leds.set(LEFT, brightness_pct=0.5, trigger='timer')
"""
for led in group:
for k in kwargs:
setattr(led, k, kwargs[k])
@staticmethod
def all_off():
"""
Turn all leds off
"""
Leds.red_left.brightness = 0
Leds.red_right.brightness = 0
Leds.green_left.brightness = 0
Leds.green_right.brightness = 0
# ~autogen
class Button(ButtonEVIO):
"""
EV3 Buttons
"""
# ~autogen button-property platforms.ev3.button>currentClass
@staticmethod
def on_up(state):
"""
This handler is called by `process()` whenever state of 'up' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_down(state):
"""
This handler is called by `process()` whenever state of 'down' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_left(state):
"""
This handler is called by `process()` whenever state of 'left' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_right(state):
"""
This handler is called by `process()` whenever state of 'right' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_enter(state):
"""
This handler is called by `process()` whenever state of 'enter' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
@staticmethod
def on_backspace(state):
"""
This handler is called by `process()` whenever state of 'backspace' button
has changed since last `process()` call. `state` parameter is the new
state of the button.
"""
pass
_buttons = {
'up': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 103},
'down': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 108},
'left': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 105},
'right': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 106},
'enter': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 28},
'backspace': {'name': '/dev/input/by-path/platform-gpio-keys.0-event', 'value': 14},
}
@property
def up(self):
"""
Check if 'up' button is pressed.
"""
return 'up' in self.buttons_pressed
@property
def down(self):
"""
Check if 'down' button is pressed.
"""
return 'down' in self.buttons_pressed
@property
def left(self):
"""
Check if 'left' button is pressed.
"""
return 'left' in self.buttons_pressed
@property
def right(self):
"""
Check if 'right' button is pressed.
"""
return 'right' in self.buttons_pressed
@property
def enter(self):
"""
Check if 'enter' button is pressed.
"""
return 'enter' in self.buttons_pressed
@property
def backspace(self):
"""
Check if 'backspace' button is pressed.
"""
return 'backspace' in self.buttons_pressed
# ~autogen
| ensonic/ev3dev-lang-python-1 | ev3dev/ev3.py | Python | mit | 6,244 | [
"Amber"
] | 16933750f705d461912d55847c644a5f94c5091802d214555b0803f967690ee6 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from itertools import combinations
import numpy as np
import pandas as pd
import scipy.special
from scipy.stats import pearsonr, spearmanr, kendalltau
from skbio.stats.distance import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def mantel(x, y, method='pearson', permutations=999, alternative='two-sided',
strict=True, lookup=None):
"""Compute correlation between distance matrices using the Mantel test.
The Mantel test compares two distance matrices by computing the correlation
between the distances in the lower (or upper) triangular portions of the
symmetric distance matrices. Correlation can be computed using Pearson's
product-moment correlation coefficient or Spearman's rank correlation
coefficient.
As defined in [1]_, the Mantel test computes a test statistic :math:`r_M`
given two symmetric distance matrices :math:`D_X` and :math:`D_Y`.
:math:`r_M` is defined as
.. math::
r_M=\\frac{1}{d-1}\\sum_{i=1}^{n-1}\\sum_{j=i+1}^{n}
stand(D_X)_{ij}stand(D_Y)_{ij}
where
.. math::
d=\\frac{n(n-1)}{2}
and :math:`n` is the number of rows/columns in each of the distance
matrices. :math:`stand(D_X)` and :math:`stand(D_Y)` are distance matrices
with their upper triangles containing standardized distances. Note that
since :math:`D_X` and :math:`D_Y` are symmetric, the lower triangular
portions of the matrices could equivalently have been used instead of the
upper triangular portions (the current function behaves in this manner).
If ``method='spearman'``, the above equation operates on ranked distances
instead of the original distances.
Statistical significance is assessed via a permutation test. The rows and
columns of the first distance matrix (`x`) are randomly permuted a
number of times (controlled via `permutations`). A correlation coefficient
is computed for each permutation and the p-value is the proportion of
permuted correlation coefficients that are equal to or more extreme
than the original (unpermuted) correlation coefficient. Whether a permuted
correlation coefficient is "more extreme" than the original correlation
coefficient depends on the alternative hypothesis (controlled via
`alternative`).
Parameters
----------
x, y : DistanceMatrix or array_like
Input distance matrices to compare. If `x` and `y` are both
``DistanceMatrix`` instances, they will be reordered based on matching
IDs (see `strict` and `lookup` below for handling matching/mismatching
IDs); thus they are not required to be in the same ID order. If `x` and
`y` are ``array_like``, no reordering is applied and both matrices must
have the same shape. In either case, `x` and `y` must be at least 3x3
in size *after* reordering and matching of IDs.
method : {'pearson', 'spearman','kendalltau'}
Method used to compute the correlation between distance matrices.
permutations : int, optional
Number of times to randomly permute `x` when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
alternative : {'two-sided', 'greater', 'less'}
Alternative hypothesis to use when calculating statistical
significance. The default ``'two-sided'`` alternative hypothesis
calculates the proportion of permuted correlation coefficients whose
magnitude (i.e. after taking the absolute value) is greater than or
equal to the absolute value of the original correlation coefficient.
``'greater'`` calculates the proportion of permuted coefficients that
are greater than or equal to the original coefficient. ``'less'``
calculates the proportion of permuted coefficients that are less than
or equal to the original coefficient.
strict : bool, optional
If ``True``, raises a ``ValueError`` if IDs are found that do not exist
in both distance matrices. If ``False``, any nonmatching IDs are
discarded before running the test. See `n` (in Returns section below)
for the number of matching IDs that were used in the test. This
parameter is ignored if `x` and `y` are ``array_like``.
lookup : dict, optional
Maps each ID in the distance matrices to a new ID. Used to match up IDs
across distance matrices prior to running the Mantel test. If the IDs
already match between the distance matrices, this parameter is not
necessary. This parameter is disallowed if `x` and `y` are
``array_like``.
Returns
-------
corr_coeff : float
Correlation coefficient of the test (depends on `method`).
p_value : float
p-value of the test.
n : int
Number of rows/columns in each of the distance matrices, after any
reordering/matching of IDs. If ``strict=False``, nonmatching IDs may
have been discarded from one or both of the distance matrices prior to
running the Mantel test, so this value may be important as it indicates
the *actual* size of the matrices that were compared.
Raises
------
ValueError
If `x` and `y` are not at least 3x3 in size after reordering/matching
of IDs, or an invalid `method`, number of `permutations`, or
`alternative` are provided.
TypeError
If `x` and `y` are not both ``DistanceMatrix`` instances or
``array_like``.
See Also
--------
DistanceMatrix
scipy.stats.pearsonr
scipy.stats.spearmanr
pwmantel
Notes
-----
The Mantel test was first described in [2]_. The general algorithm and
interface are similar to ``vegan::mantel``, available in R's vegan
package [3]_.
``np.nan`` will be returned for the p-value if `permutations` is zero or if
the correlation coefficient is ``np.nan``. The correlation coefficient will
be ``np.nan`` if one or both of the inputs does not have any variation
(i.e. the distances are all constant) and ``method='spearman'``.
References
----------
.. [1] Legendre, P. and Legendre, L. (2012) Numerical Ecology. 3rd English
Edition. Elsevier.
.. [2] Mantel, N. (1967). "The detection of disease clustering and a
generalized regression approach". Cancer Research 27 (2): 209-220. PMID
6018555.
.. [3] http://cran.r-project.org/web/packages/vegan/index.html
Examples
--------
Import the functionality we'll use in the following examples:
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import mantel
Define two 3x3 distance matrices:
>>> x = DistanceMatrix([[0, 1, 2],
... [1, 0, 3],
... [2, 3, 0]])
>>> y = DistanceMatrix([[0, 2, 7],
... [2, 0, 6],
... [7, 6, 0]])
Compute the Pearson correlation between them and assess significance using
a two-sided test with 999 permutations:
>>> coeff, p_value, n = mantel(x, y)
>>> print(round(coeff, 4))
0.7559
Thus, we see a moderate-to-strong positive correlation (:math:`r_M=0.7559`)
between the two matrices.
In the previous example, the distance matrices (``x`` and ``y``) have the
same IDs, in the same order:
>>> x.ids
('0', '1', '2')
>>> y.ids
('0', '1', '2')
If necessary, ``mantel`` will reorder the distance matrices prior to
running the test. The function also supports a ``lookup`` dictionary that
maps distance matrix IDs to new IDs, providing a way to match IDs between
distance matrices prior to running the Mantel test.
For example, let's reassign the distance matrices' IDs so that there are no
matching IDs between them:
>>> x.ids = ('a', 'b', 'c')
>>> y.ids = ('d', 'e', 'f')
If we rerun ``mantel``, we get the following error notifying us that there
are nonmatching IDs (this is the default behavior with ``strict=True``):
>>> mantel(x, y)
Traceback (most recent call last):
...
ValueError: IDs exist that are not in both distance matrices.
If we pass ``strict=False`` to ignore/discard nonmatching IDs, we see that
no matches exist between `x` and `y`, so the Mantel test still cannot be
run:
>>> mantel(x, y, strict=False)
Traceback (most recent call last):
...
ValueError: No matching IDs exist between the distance matrices.
To work around this, we can define a ``lookup`` dictionary to specify how
the IDs should be matched between distance matrices:
>>> lookup = {'a': 'A', 'b': 'B', 'c': 'C',
... 'd': 'A', 'e': 'B', 'f': 'C'}
``lookup`` maps each ID to ``'A'``, ``'B'``, or ``'C'``. If we rerun
``mantel`` with ``lookup``, we get the same results as the original
example where all distance matrix IDs matched:
>>> coeff, p_value, n = mantel(x, y, lookup=lookup)
>>> print(round(coeff, 4))
0.7559
``mantel`` also accepts input that is ``array_like``. For example, if we
redefine `x` and `y` as nested Python lists instead of ``DistanceMatrix``
instances, we obtain the same result:
>>> x = [[0, 1, 2],
... [1, 0, 3],
... [2, 3, 0]]
>>> y = [[0, 2, 7],
... [2, 0, 6],
... [7, 6, 0]]
>>> coeff, p_value, n = mantel(x, y)
>>> print(round(coeff, 4))
0.7559
It is import to note that reordering/matching of IDs (and hence the
``strict`` and ``lookup`` parameters) do not apply when input is
``array_like`` because there is no notion of IDs.
"""
if method == 'pearson':
corr_func = pearsonr
elif method == 'spearman':
corr_func = spearmanr
elif method == 'kendalltau':
corr_func = kendalltau
else:
raise ValueError("Invalid correlation method '%s'." % method)
if permutations < 0:
raise ValueError("Number of permutations must be greater than or "
"equal to zero.")
if alternative not in ('two-sided', 'greater', 'less'):
raise ValueError("Invalid alternative hypothesis '%s'." % alternative)
x, y = _order_dms(x, y, strict=strict, lookup=lookup)
n = x.shape[0]
if n < 3:
raise ValueError("Distance matrices must have at least 3 matching IDs "
"between them (i.e., minimum 3x3 in size).")
x_flat = x.condensed_form()
y_flat = y.condensed_form()
orig_stat = corr_func(x_flat, y_flat)[0]
if permutations == 0 or np.isnan(orig_stat):
p_value = np.nan
else:
perm_gen = (corr_func(x.permute(condensed=True), y_flat)[0]
for _ in range(permutations))
permuted_stats = np.fromiter(perm_gen, np.float, count=permutations)
if alternative == 'two-sided':
count_better = (np.absolute(permuted_stats) >=
np.absolute(orig_stat)).sum()
elif alternative == 'greater':
count_better = (permuted_stats >= orig_stat).sum()
else:
count_better = (permuted_stats <= orig_stat).sum()
p_value = (count_better + 1) / (permutations + 1)
return orig_stat, p_value, n
@experimental(as_of="0.4.0")
def pwmantel(dms, labels=None, method='pearson', permutations=999,
alternative='two-sided', strict=True, lookup=None):
"""Run Mantel tests for every pair of given distance matrices.
Runs a Mantel test for each pair of distance matrices and collates the
results in a ``DataFrame``. Distance matrices do not need to be in the same
ID order if they are ``DistanceMatrix`` instances. Distance matrices will
be re-ordered prior to running each pairwise test, and if ``strict=False``,
IDs that don't match between a pair of distance matrices will be dropped
prior to running the test (otherwise a ``ValueError`` will be raised if
there are nonmatching IDs between any pair of distance matrices).
Parameters
----------
dms : iterable of DistanceMatrix objects, array_like objects, or filepaths
to distance matrices. If they are ``array_like``, no reordering or
matching of IDs will be performed.
labels : iterable of str or int, optional
Labels for each distance matrix in `dms`. These are used in the results
``DataFrame`` to identify the pair of distance matrices used in a
pairwise Mantel test. If ``None``, defaults to monotonically-increasing
integers starting at zero.
method : {'pearson', 'spearman'}
Correlation method. See ``mantel`` function for more details.
permutations : int, optional
Number of permutations. See ``mantel`` function for more details.
alternative : {'two-sided', 'greater', 'less'}
Alternative hypothesis. See ``mantel`` function for more details.
strict : bool, optional
Handling of nonmatching IDs. See ``mantel`` function for more details.
lookup : dict, optional
Map existing IDs to new IDs. See ``mantel`` function for more details.
Returns
-------
pandas.DataFrame
``DataFrame`` containing the results of each pairwise test (one per
row). Includes the number of objects considered in each test as column
``n`` (after applying `lookup` and filtering nonmatching IDs if
``strict=False``). Column ``p-value`` will display p-values as ``NaN``
if p-values could not be computed (they are stored as ``np.nan`` within
the ``DataFrame``; see ``mantel`` for more details).
See Also
--------
mantel
DistanceMatrix.read
Notes
--------
Passing a list of filepaths can be useful as it allows for a smaller amount
of memory consumption as it only loads two matrices at a time as opposed to
loading all distance matrices into memory.
Examples
--------
Import the functionality we'll use in the following examples:
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import pwmantel
Define three 3x3 distance matrices:
>>> x = DistanceMatrix([[0, 1, 2],
... [1, 0, 3],
... [2, 3, 0]])
>>> y = DistanceMatrix([[0, 2, 7],
... [2, 0, 6],
... [7, 6, 0]])
>>> z = DistanceMatrix([[0, 5, 6],
... [5, 0, 1],
... [6, 1, 0]])
Run Mantel tests for each pair of distance matrices (there are 3 possible
pairs):
>>> pwmantel((x, y, z), labels=('x', 'y', 'z'),
... permutations=0) # doctest: +NORMALIZE_WHITESPACE
statistic p-value n method permutations alternative
dm1 dm2
x y 0.755929 NaN 3 pearson 0 two-sided
z -0.755929 NaN 3 pearson 0 two-sided
y z -0.142857 NaN 3 pearson 0 two-sided
Note that we passed ``permutations=0`` to suppress significance tests; the
p-values in the output are labelled ``NaN``.
"""
num_dms = len(dms)
if num_dms < 2:
raise ValueError("Must provide at least two distance matrices.")
if labels is None:
labels = range(num_dms)
else:
if num_dms != len(labels):
raise ValueError("Number of labels must match the number of "
"distance matrices.")
if len(set(labels)) != len(labels):
raise ValueError("Labels must be unique.")
num_combs = scipy.special.comb(num_dms, 2, exact=True)
results_dtype = [('dm1', object), ('dm2', object), ('statistic', float),
('p-value', float), ('n', int), ('method', object),
('permutations', int), ('alternative', object)]
results = np.empty(num_combs, dtype=results_dtype)
for i, pair in enumerate(combinations(zip(labels, dms), 2)):
(xlabel, x), (ylabel, y) = pair
if isinstance(x, str):
x = DistanceMatrix.read(x)
if isinstance(y, str):
y = DistanceMatrix.read(y)
stat, p_val, n = mantel(x, y, method=method, permutations=permutations,
alternative=alternative, strict=strict,
lookup=lookup)
results[i] = (xlabel, ylabel, stat, p_val, n, method, permutations,
alternative)
return pd.DataFrame.from_records(results, index=('dm1', 'dm2'))
def _order_dms(x, y, strict=True, lookup=None):
"""Intersect distance matrices and put them in the same order."""
x_is_dm = isinstance(x, DistanceMatrix)
y_is_dm = isinstance(y, DistanceMatrix)
if (x_is_dm and not y_is_dm) or (y_is_dm and not x_is_dm):
raise TypeError(
"Mixing DistanceMatrix and array_like input types is not "
"supported. Both x and y must either be DistanceMatrix instances "
"or array_like, but not mixed.")
elif x_is_dm and y_is_dm:
if lookup is not None:
x = _remap_ids(x, lookup, 'x', 'first')
y = _remap_ids(y, lookup, 'y', 'second')
id_order = [id_ for id_ in x.ids if id_ in y]
num_matches = len(id_order)
if (strict and ((num_matches != len(x.ids)) or
(num_matches != len(y.ids)))):
raise ValueError("IDs exist that are not in both distance "
"matrices.")
if num_matches < 1:
raise ValueError("No matching IDs exist between the distance "
"matrices.")
return x.filter(id_order), y.filter(id_order)
else:
# Both x and y aren't DistanceMatrix instances.
if lookup is not None:
raise ValueError("ID lookup can only be provided if inputs are "
"DistanceMatrix instances.")
x = DistanceMatrix(x)
y = DistanceMatrix(y)
if x.shape != y.shape:
raise ValueError("Distance matrices must have the same shape.")
return x, y
def _remap_ids(dm, lookup, label, order):
"Return a copy of `dm` with its IDs remapped based on `lookup`."""
try:
remapped_ids = [lookup[id_] for id_ in dm.ids]
except KeyError as e:
raise KeyError("All IDs in the %s distance matrix (%s) must be in "
"the lookup. Missing ID: %s" % (order, label, str(e)))
# Create a copy as we'll be modifying the IDs in place.
dm_copy = dm.copy()
dm_copy.ids = remapped_ids
return dm_copy
| gregcaporaso/scikit-bio | skbio/stats/distance/_mantel.py | Python | bsd-3-clause | 19,157 | [
"scikit-bio"
] | 175023a93857d95e350ec281e7f99fec06ec13c5e9d6bc325c701b6b0225dc8e |
import pytest
from .addons import using_networkx
from .utils import *
import math
import numpy as np
import qcelemental as qcel
import psi4
from psi4.driver import qcdb
pytestmark = pytest.mark.quick
def hide_test_xtpl_fn_fn_error():
psi4.geometry('He')
with pytest.raises(psi4.UpgradeHelper) as e:
psi4.energy('cbs', scf_basis='cc-pvdz', scf_scheme=psi4.driver_cbs.xtpl_highest_1)
assert 'Replace extrapolation function with function name' in str(e.value)
def hide_test_xtpl_cbs_fn_error():
psi4.geometry('He')
with pytest.raises(psi4.UpgradeHelper) as e:
psi4.energy(psi4.cbs, scf_basis='cc-pvdz')
#psi4.energy(psi4.driver.driver_cbs.complete_basis_set, scf_basis='cc-pvdz')
assert 'Replace cbs or complete_basis_set function with cbs string' in str(e.value)
@pytest.mark.parametrize("inp,out", [
((2, 'C2V'), 2),
(('A2', 'c2v'), 2),
(('2', 'C2V'), 2),
])
def test_parse_cotton_irreps(inp, out):
idx = psi4.driver.driver_util.parse_cotton_irreps(*inp)
assert idx == out
@pytest.mark.parametrize("inp", [
((5, 'cs')),
(('5', 'cs')),
((0, 'cs')),
(('a2', 'cs')),
])
def test_parse_cotton_irreps_error(inp):
with pytest.raises(psi4.ValidationError) as e:
psi4.driver.driver_util.parse_cotton_irreps(*inp)
assert 'not valid for point group' in str(e.value)
# <<< TODO Deprecated! Delete in Psi4 v1.5 >>>
@using_networkx
def test_deprecated_qcdb_align_b787():
soco10 = """
O 1.0 0.0 0.0
C 0.0 0.0 0.0
O -1.0 0.0 0.0
units ang
"""
sooc12 = """
O 1.2 4.0 0.0
O -1.2 4.0 0.0
C 0.0 4.0 0.0
units ang
"""
ref_rmsd = math.sqrt(2. * 0.2 * 0.2 / 3.) # RMSD always in Angstroms
oco10 = qcel.molparse.from_string(soco10)
oco12 = qcel.molparse.from_string(sooc12)
oco10_geom_au = oco10['qm']['geom'].reshape((-1, 3)) / qcel.constants.bohr2angstroms
oco12_geom_au = oco12['qm']['geom'].reshape((-1, 3)) / qcel.constants.bohr2angstroms
with pytest.warns(FutureWarning) as err:
rmsd, mill = qcdb.align.B787(
oco10_geom_au, oco12_geom_au, np.array(['O', 'C', 'O']), np.array(['O', 'O', 'C']), verbose=4, do_plot=False)
assert compare_values(ref_rmsd, rmsd, 6, 'known rmsd B787')
def test_deprecated_qcdb_align_scramble():
with pytest.warns(FutureWarning) as err:
mill = qcdb.align.compute_scramble(4, do_resort=False, do_shift=False, do_rotate=False, deflection=1.0, do_mirror=False)
assert compare_arrays([0,1,2,3], mill.atommap, 4, 'atommap')
# <<< TODO Deprecated! Delete when the error messages are removed. >>>
def test_deprecated_dcft_calls():
psi4.geometry('He')
err_substr = "All instances of 'dcft' should be replaced with 'dct'."
driver_calls = [psi4.energy, psi4.optimize, psi4.gradient, psi4.hessian, psi4.frequencies]
for call in driver_calls:
with pytest.raises(psi4.UpgradeHelper) as e:
call('dcft', basis='cc-pvdz')
assert err_substr in str(e.value)
# The errors trapped below are C-side, so they're nameless, Py-side.
with pytest.raises(Exception) as e:
psi4.set_module_options('dcft', {'e_convergence': 9})
assert err_substr in str(e.value)
with pytest.raises(Exception) as e:
psi4.set_module_options('dct', {'dcft_functional': 'odc-06'})
assert err_substr in str(e.value)
| CDSherrill/psi4 | tests/pytests/test_misc.py | Python | lgpl-3.0 | 3,413 | [
"Psi4"
] | 41fda16feb9d56551027f23a48d9042f8cb078ff7cd84632652fab6878797710 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from numpy.testing import (
assert_,
assert_equal,
)
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysis.tests.datafiles import (
LAMMPSdata,
LAMMPScnt, LAMMPScnt2,
LAMMPShyd, LAMMPShyd2,
)
class LammpsBase(ParserBase):
parser = mda.topology.LAMMPSParser.DATAParser
expected_n_segments = 1
expected_attrs = ['types', 'resids', 'masses', 'charges']
def test_n_atom_types(self):
assert_equal(len(set(self.top.types.values)), self.expected_n_atom_types)
def test_n_bonds(self):
if self.ref_n_bonds:
assert_equal(len(self.top.bonds.values),
self.ref_n_bonds)
else:
assert_(not hasattr(self.top, 'bonds'))
def test_bond_member(self):
if self.ref_n_bonds:
assert_(self.ref_bond in self.top.bonds.values)
def test_n_angles(self):
if self.ref_n_angles:
assert_equal(len(self.top.angles.values),
self.ref_n_angles)
else:
assert_(not hasattr(self.top, 'angles'))
def test_angle_member(self):
if self.ref_n_angles:
assert_(self.ref_angle in self.top.angles.values)
def test_n_dihedrals(self):
if self.ref_n_dihedrals:
assert_equal(len(self.top.dihedrals.values),
self.ref_n_dihedrals)
else:
assert_(not hasattr(self.top, 'dihedrals'))
def test_dihedral_member(self):
if self.ref_n_dihedrals:
assert_(self.ref_dihedral in self.top.dihedrals.values)
def test_n_impropers(self):
if self.ref_n_impropers:
assert_equal(len(self.top.impropers.values),
self.ref_n_impropers)
else:
assert_(not hasattr(self.top, 'impropers'))
def test_improper_member(self):
if self.ref_n_impropers:
assert_(self.ref_improper in self.top.impropers.values)
def test_creates_universe(self):
u = mda.Universe(self.filename, format='DATA')
class TestLammpsData(LammpsBase):
"""Tests the reading of lammps .data topology files.
The reading of coords and velocities is done separately in
test_coordinates
"""
filename = LAMMPSdata
expected_n_atoms = 18364
expected_n_atom_types = 10
expected_n_residues = 25
ref_n_bonds = 18336
ref_bond = (12, 14)
ref_n_angles = 29904
ref_angle = (3, 6, 9)
ref_n_dihedrals = 5712
ref_dihedral = (82, 85, 88, 89)
ref_n_impropers = 0
class TestLAMMPSCNT(LammpsBase):
filename = LAMMPScnt
expected_n_atoms = 604
expected_n_atom_types = 1
expected_n_residues = 1
ref_n_bonds = 906
ref_bond = (9, 467)
ref_n_angles = 1812
ref_angle = (17, 16, 31)
ref_n_dihedrals = 3624
ref_dihedral = (22, 39, 40, 41)
ref_n_impropers = 604
ref_improper = (210, 159, 212, 566)
class TestLAMMPSCNT2(TestLAMMPSCNT):
filename = LAMMPScnt2
class TestLAMMPSHYD(LammpsBase):
filename = LAMMPShyd
expected_n_atoms = 2
expected_n_atom_types = 1
expected_n_residues = 1
ref_n_bonds = 1
ref_bond = (0, 1)
ref_n_angles = 0
ref_n_dihedrals = 0
ref_n_impropers = 0
class TestLAMMPSHYD2(TestLAMMPSHYD):
filename = LAMMPShyd2
| alejob/mdanalysis | testsuite/MDAnalysisTests/topology/test_lammpsdata.py | Python | gpl-2.0 | 4,366 | [
"LAMMPS",
"MDAnalysis"
] | fa607b44fdee368cdd8a4dccb4a7cd7ea531fa89d55d9195041fe6a319842c04 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'misorientationUI.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Misorientation(object):
def setupUi(self, Misorientation):
Misorientation.setObjectName(_fromUtf8("Misorientation"))
Misorientation.resize(1200, 887)
self.centralwidget = QtGui.QWidget(Misorientation)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.gridLayout_11 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_11.setObjectName(_fromUtf8("gridLayout_11"))
self.tabWidget = QtGui.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.settings = QtGui.QWidget()
self.settings.setObjectName(_fromUtf8("settings"))
self.gridLayout_7 = QtGui.QGridLayout(self.settings)
self.gridLayout_7.setMargin(0)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.groupBox = QtGui.QGroupBox(self.settings)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.e_label = QtGui.QLabel(self.groupBox)
self.e_label.setObjectName(_fromUtf8("e_label"))
self.gridLayout_2.addWidget(self.e_label, 3, 0, 1, 2)
self.hexa_button = QtGui.QCheckBox(self.groupBox)
self.hexa_button.setObjectName(_fromUtf8("hexa_button"))
self.gridLayout_2.addWidget(self.hexa_button, 4, 0, 1, 6)
self.alphabetagamma_entry = QtGui.QLineEdit(self.groupBox)
self.alphabetagamma_entry.setObjectName(_fromUtf8("alphabetagamma_entry"))
self.gridLayout_2.addWidget(self.alphabetagamma_entry, 2, 1, 1, 5)
self.structure_box = QtGui.QComboBox(self.groupBox)
self.structure_box.setObjectName(_fromUtf8("structure_box"))
self.gridLayout_2.addWidget(self.structure_box, 0, 0, 1, 6)
self.dm_button = QtGui.QPushButton(self.groupBox)
self.dm_button.setObjectName(_fromUtf8("dm_button"))
self.gridLayout_2.addWidget(self.dm_button, 11, 1, 1, 1)
self.dp_button = QtGui.QPushButton(self.groupBox)
self.dp_button.setObjectName(_fromUtf8("dp_button"))
self.gridLayout_2.addWidget(self.dp_button, 11, 3, 1, 1)
self.abc_label = QtGui.QLabel(self.groupBox)
self.abc_label.setObjectName(_fromUtf8("abc_label"))
self.gridLayout_2.addWidget(self.abc_label, 1, 0, 1, 1)
self.color_trace_bleu = QtGui.QRadioButton(self.groupBox)
self.color_trace_bleu.setObjectName(_fromUtf8("color_trace_bleu"))
self.gridLayout_2.addWidget(self.color_trace_bleu, 6, 0, 1, 1)
self.d_label = QtGui.QLabel(self.groupBox)
self.d_label.setObjectName(_fromUtf8("d_label"))
self.gridLayout_2.addWidget(self.d_label, 11, 0, 1, 1)
self.abc_entry = QtGui.QLineEdit(self.groupBox)
self.abc_entry.setObjectName(_fromUtf8("abc_entry"))
self.gridLayout_2.addWidget(self.abc_entry, 1, 1, 1, 5)
self.d_label_var = QtGui.QLabel(self.groupBox)
self.d_label_var.setText(_fromUtf8(""))
self.d_label_var.setObjectName(_fromUtf8("d_label_var"))
self.gridLayout_2.addWidget(self.d_label_var, 11, 4, 1, 2)
self.color_trace_rouge = QtGui.QRadioButton(self.groupBox)
self.color_trace_rouge.setObjectName(_fromUtf8("color_trace_rouge"))
self.gridLayout_2.addWidget(self.color_trace_rouge, 6, 2, 1, 1)
self.alphabetagamma_label = QtGui.QLabel(self.groupBox)
self.alphabetagamma_label.setObjectName(_fromUtf8("alphabetagamma_label"))
self.gridLayout_2.addWidget(self.alphabetagamma_label, 2, 0, 1, 1)
self.d_entry = QtGui.QLineEdit(self.groupBox)
self.d_entry.setObjectName(_fromUtf8("d_entry"))
self.gridLayout_2.addWidget(self.d_entry, 11, 2, 1, 1)
self.e_entry = QtGui.QLineEdit(self.groupBox)
self.e_entry.setObjectName(_fromUtf8("e_entry"))
self.gridLayout_2.addWidget(self.e_entry, 3, 3, 1, 3)
self.color_trace_vert = QtGui.QRadioButton(self.groupBox)
self.color_trace_vert.setObjectName(_fromUtf8("color_trace_vert"))
self.gridLayout_2.addWidget(self.color_trace_vert, 6, 1, 1, 1)
self.style_box = QtGui.QCheckBox(self.groupBox)
self.style_box.setObjectName(_fromUtf8("style_box"))
self.gridLayout_2.addWidget(self.style_box, 7, 0, 1, 4)
self.gridLayout_7.addWidget(self.groupBox, 0, 2, 2, 1)
self.crystal2_box = QtGui.QGroupBox(self.settings)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.crystal2_box.sizePolicy().hasHeightForWidth())
self.crystal2_box.setSizePolicy(sizePolicy)
self.crystal2_box.setObjectName(_fromUtf8("crystal2_box"))
self.gridLayout_4 = QtGui.QGridLayout(self.crystal2_box)
self.gridLayout_4.setMargin(5)
self.gridLayout_4.setSpacing(5)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.d_label_var_2 = QtGui.QLabel(self.crystal2_box)
self.d_label_var_2.setText(_fromUtf8(""))
self.d_label_var_2.setObjectName(_fromUtf8("d_label_var_2"))
self.gridLayout_4.addWidget(self.d_label_var_2, 12, 5, 1, 2)
self.dm_button_2 = QtGui.QPushButton(self.crystal2_box)
self.dm_button_2.setObjectName(_fromUtf8("dm_button_2"))
self.gridLayout_4.addWidget(self.dm_button_2, 12, 1, 1, 1)
self.style_box_2 = QtGui.QCheckBox(self.crystal2_box)
self.style_box_2.setObjectName(_fromUtf8("style_box_2"))
self.gridLayout_4.addWidget(self.style_box_2, 8, 0, 1, 7)
self.abc_label_2 = QtGui.QLabel(self.crystal2_box)
self.abc_label_2.setObjectName(_fromUtf8("abc_label_2"))
self.gridLayout_4.addWidget(self.abc_label_2, 1, 0, 1, 1)
self.d_entry_2 = QtGui.QLineEdit(self.crystal2_box)
self.d_entry_2.setObjectName(_fromUtf8("d_entry_2"))
self.gridLayout_4.addWidget(self.d_entry_2, 12, 2, 1, 1)
self.color_trace_rouge_2 = QtGui.QRadioButton(self.crystal2_box)
self.color_trace_rouge_2.setObjectName(_fromUtf8("color_trace_rouge_2"))
self.gridLayout_4.addWidget(self.color_trace_rouge_2, 7, 2, 1, 1)
self.alphabetagamma_label_2 = QtGui.QLabel(self.crystal2_box)
self.alphabetagamma_label_2.setObjectName(_fromUtf8("alphabetagamma_label_2"))
self.gridLayout_4.addWidget(self.alphabetagamma_label_2, 3, 0, 1, 1)
self.e_label_2 = QtGui.QLabel(self.crystal2_box)
self.e_label_2.setObjectName(_fromUtf8("e_label_2"))
self.gridLayout_4.addWidget(self.e_label_2, 5, 0, 1, 4)
self.e_entry_2 = QtGui.QLineEdit(self.crystal2_box)
self.e_entry_2.setObjectName(_fromUtf8("e_entry_2"))
self.gridLayout_4.addWidget(self.e_entry_2, 5, 4, 1, 3)
self.structure2_box = QtGui.QComboBox(self.crystal2_box)
self.structure2_box.setObjectName(_fromUtf8("structure2_box"))
self.gridLayout_4.addWidget(self.structure2_box, 0, 0, 1, 7)
self.hexa_button_2 = QtGui.QCheckBox(self.crystal2_box)
self.hexa_button_2.setObjectName(_fromUtf8("hexa_button_2"))
self.gridLayout_4.addWidget(self.hexa_button_2, 6, 0, 1, 7)
self.dp_button_2 = QtGui.QPushButton(self.crystal2_box)
self.dp_button_2.setObjectName(_fromUtf8("dp_button_2"))
self.gridLayout_4.addWidget(self.dp_button_2, 12, 4, 1, 1)
self.color_trace_bleu_2 = QtGui.QRadioButton(self.crystal2_box)
self.color_trace_bleu_2.setObjectName(_fromUtf8("color_trace_bleu_2"))
self.gridLayout_4.addWidget(self.color_trace_bleu_2, 7, 0, 1, 1)
self.alphabetagamma_entry_2 = QtGui.QLineEdit(self.crystal2_box)
self.alphabetagamma_entry_2.setObjectName(_fromUtf8("alphabetagamma_entry_2"))
self.gridLayout_4.addWidget(self.alphabetagamma_entry_2, 3, 1, 1, 6)
self.abc_entry_2 = QtGui.QLineEdit(self.crystal2_box)
self.abc_entry_2.setObjectName(_fromUtf8("abc_entry_2"))
self.gridLayout_4.addWidget(self.abc_entry_2, 1, 1, 1, 6)
self.d_label_2 = QtGui.QLabel(self.crystal2_box)
self.d_label_2.setObjectName(_fromUtf8("d_label_2"))
self.gridLayout_4.addWidget(self.d_label_2, 12, 0, 1, 1)
self.color_trace_vert_2 = QtGui.QRadioButton(self.crystal2_box)
self.color_trace_vert_2.setObjectName(_fromUtf8("color_trace_vert_2"))
self.gridLayout_4.addWidget(self.color_trace_vert_2, 7, 1, 1, 1)
self.gridLayout_7.addWidget(self.crystal2_box, 0, 3, 2, 1)
self.groupBox_7 = QtGui.QGroupBox(self.settings)
self.groupBox_7.setObjectName(_fromUtf8("groupBox_7"))
self.gridLayout_12 = QtGui.QGridLayout(self.groupBox_7)
self.gridLayout_12.setObjectName(_fromUtf8("gridLayout_12"))
self.image_angle_entry = QtGui.QLineEdit(self.groupBox_7)
self.image_angle_entry.setObjectName(_fromUtf8("image_angle_entry"))
self.gridLayout_12.addWidget(self.image_angle_entry, 1, 1, 1, 1)
self.real_space_checkBox = QtGui.QCheckBox(self.groupBox_7)
self.real_space_checkBox.setObjectName(_fromUtf8("real_space_checkBox"))
self.gridLayout_12.addWidget(self.real_space_checkBox, 2, 0, 1, 1)
self.image_tilt_y_label = QtGui.QLabel(self.groupBox_7)
self.image_tilt_y_label.setObjectName(_fromUtf8("image_tilt_y_label"))
self.gridLayout_12.addWidget(self.image_tilt_y_label, 1, 0, 1, 1)
self.tilt_angle_entry = QtGui.QLineEdit(self.groupBox_7)
self.tilt_angle_entry.setObjectName(_fromUtf8("tilt_angle_entry"))
self.gridLayout_12.addWidget(self.tilt_angle_entry, 0, 1, 1, 1)
self.tilt_angle_label = QtGui.QLabel(self.groupBox_7)
self.tilt_angle_label.setObjectName(_fromUtf8("tilt_angle_label"))
self.gridLayout_12.addWidget(self.tilt_angle_label, 0, 0, 1, 1)
self.uvw_button = QtGui.QCheckBox(self.groupBox_7)
self.uvw_button.setObjectName(_fromUtf8("uvw_button"))
self.gridLayout_12.addWidget(self.uvw_button, 3, 0, 1, 1)
self.gridLayout_7.addWidget(self.groupBox_7, 0, 0, 1, 2)
self.groupBox_10 = QtGui.QGroupBox(self.settings)
self.groupBox_10.setObjectName(_fromUtf8("groupBox_10"))
self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_10)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.text_size_entry = QtGui.QLineEdit(self.groupBox_10)
self.text_size_entry.setObjectName(_fromUtf8("text_size_entry"))
self.gridLayout_3.addWidget(self.text_size_entry, 3, 1, 1, 1)
self.wulff_button = QtGui.QCheckBox(self.groupBox_10)
self.wulff_button.setObjectName(_fromUtf8("wulff_button"))
self.gridLayout_3.addWidget(self.wulff_button, 0, 0, 1, 2)
self.text_size_label = QtGui.QLabel(self.groupBox_10)
self.text_size_label.setObjectName(_fromUtf8("text_size_label"))
self.gridLayout_3.addWidget(self.text_size_label, 3, 0, 1, 1)
self.size_var = QtGui.QLineEdit(self.groupBox_10)
self.size_var.setObjectName(_fromUtf8("size_var"))
self.gridLayout_3.addWidget(self.size_var, 2, 1, 1, 1)
self.reset_view_button = QtGui.QPushButton(self.groupBox_10)
self.reset_view_button.setObjectName(_fromUtf8("reset_view_button"))
self.gridLayout_3.addWidget(self.reset_view_button, 4, 0, 1, 2)
self.size_var_label = QtGui.QLabel(self.groupBox_10)
self.size_var_label.setObjectName(_fromUtf8("size_var_label"))
self.gridLayout_3.addWidget(self.size_var_label, 2, 0, 1, 1)
self.gridLayout_7.addWidget(self.groupBox_10, 1, 0, 1, 1)
self.tabWidget.addTab(self.settings, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.gridLayout_10 = QtGui.QGridLayout(self.tab_2)
self.gridLayout_10.setMargin(0)
self.gridLayout_10.setSpacing(0)
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.groupBox_6 = QtGui.QGroupBox(self.tab_2)
self.groupBox_6.setMaximumSize(QtCore.QSize(250, 16777215))
self.groupBox_6.setTitle(_fromUtf8(""))
self.groupBox_6.setFlat(False)
self.groupBox_6.setObjectName(_fromUtf8("groupBox_6"))
self.gridLayout = QtGui.QGridLayout(self.groupBox_6)
self.gridLayout.setMargin(0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox_5 = QtGui.QGroupBox(self.groupBox_6)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.gridLayout_6 = QtGui.QGridLayout(self.groupBox_5)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.lab_coord = QtGui.QLabel(self.groupBox_5)
self.lab_coord.setObjectName(_fromUtf8("lab_coord"))
self.gridLayout_6.addWidget(self.lab_coord, 6, 0, 1, 1)
self.label_2 = QtGui.QLabel(self.groupBox_5)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_6.addWidget(self.label_2, 3, 0, 1, 1)
self.angle_euler_label = QtGui.QLabel(self.groupBox_5)
self.angle_euler_label.setText(_fromUtf8(""))
self.angle_euler_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextSelectableByMouse)
self.angle_euler_label.setObjectName(_fromUtf8("angle_euler_label"))
self.gridLayout_6.addWidget(self.angle_euler_label, 1, 1, 1, 2)
self.label = QtGui.QLabel(self.groupBox_5)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_6.addWidget(self.label, 0, 0, 1, 1)
self.angle_euler_label_2 = QtGui.QLabel(self.groupBox_5)
self.angle_euler_label_2.setText(_fromUtf8(""))
self.angle_euler_label_2.setObjectName(_fromUtf8("angle_euler_label_2"))
self.gridLayout_6.addWidget(self.angle_euler_label_2, 4, 1, 1, 2)
self.phi1phiphi2_entry = QtGui.QLineEdit(self.groupBox_5)
self.phi1phiphi2_entry.setObjectName(_fromUtf8("phi1phiphi2_entry"))
self.gridLayout_6.addWidget(self.phi1phiphi2_entry, 0, 1, 1, 2)
self.label_3 = QtGui.QLabel(self.groupBox_5)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_6.addWidget(self.label_3, 4, 0, 1, 1)
self.button_trace2 = QtGui.QPushButton(self.groupBox_5)
self.button_trace2.setObjectName(_fromUtf8("button_trace2"))
self.gridLayout_6.addWidget(self.button_trace2, 5, 1, 1, 2)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_6.addItem(spacerItem, 7, 1, 1, 4)
self.coord_label = QtGui.QLabel(self.groupBox_5)
self.coord_label.setText(_fromUtf8(""))
self.coord_label.setObjectName(_fromUtf8("coord_label"))
self.gridLayout_6.addWidget(self.coord_label, 6, 1, 1, 1)
self.phi1phiphi2_2_entry = QtGui.QLineEdit(self.groupBox_5)
self.phi1phiphi2_2_entry.setObjectName(_fromUtf8("phi1phiphi2_2_entry"))
self.gridLayout_6.addWidget(self.phi1phiphi2_2_entry, 3, 1, 1, 2)
self.lab_euler2 = QtGui.QLabel(self.groupBox_5)
self.lab_euler2.setObjectName(_fromUtf8("lab_euler2"))
self.gridLayout_6.addWidget(self.lab_euler2, 1, 0, 1, 1)
self.gridLayout.addWidget(self.groupBox_5, 3, 0, 1, 2)
self.groupBox_4 = QtGui.QGroupBox(self.groupBox_6)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.gridLayout_5 = QtGui.QGridLayout(self.groupBox_4)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.pole_entry = QtGui.QLineEdit(self.groupBox_4)
self.pole_entry.setObjectName(_fromUtf8("pole_entry"))
self.gridLayout_5.addWidget(self.pole_entry, 0, 0, 1, 3)
self.undo_trace_plan_sym_button = QtGui.QPushButton(self.groupBox_4)
self.undo_trace_plan_sym_button.setObjectName(_fromUtf8("undo_trace_plan_sym_button"))
self.gridLayout_5.addWidget(self.undo_trace_plan_sym_button, 4, 2, 1, 1)
self.trace_plan_sym_button = QtGui.QPushButton(self.groupBox_4)
self.trace_plan_sym_button.setObjectName(_fromUtf8("trace_plan_sym_button"))
self.gridLayout_5.addWidget(self.trace_plan_sym_button, 4, 0, 1, 2)
self.addpole_button = QtGui.QPushButton(self.groupBox_4)
self.addpole_button.setObjectName(_fromUtf8("addpole_button"))
self.gridLayout_5.addWidget(self.addpole_button, 1, 0, 1, 2)
self.undo_addpole_button = QtGui.QPushButton(self.groupBox_4)
self.undo_addpole_button.setObjectName(_fromUtf8("undo_addpole_button"))
self.gridLayout_5.addWidget(self.undo_addpole_button, 1, 2, 1, 1)
self.undo_sym_button = QtGui.QPushButton(self.groupBox_4)
self.undo_sym_button.setObjectName(_fromUtf8("undo_sym_button"))
self.gridLayout_5.addWidget(self.undo_sym_button, 2, 2, 1, 1)
self.sym_button = QtGui.QPushButton(self.groupBox_4)
self.sym_button.setObjectName(_fromUtf8("sym_button"))
self.gridLayout_5.addWidget(self.sym_button, 2, 0, 1, 2)
self.trace_plan_button = QtGui.QPushButton(self.groupBox_4)
self.trace_plan_button.setObjectName(_fromUtf8("trace_plan_button"))
self.gridLayout_5.addWidget(self.trace_plan_button, 3, 0, 1, 2)
self.undo_trace_plan_button = QtGui.QPushButton(self.groupBox_4)
self.undo_trace_plan_button.setObjectName(_fromUtf8("undo_trace_plan_button"))
self.gridLayout_5.addWidget(self.undo_trace_plan_button, 3, 2, 1, 1)
self.gridLayout.addWidget(self.groupBox_4, 2, 0, 1, 2)
self.groupBox_2 = QtGui.QGroupBox(self.groupBox_6)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_8 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.crystal1_radioButton = QtGui.QRadioButton(self.groupBox_2)
self.crystal1_radioButton.setObjectName(_fromUtf8("crystal1_radioButton"))
self.gridLayout_8.addWidget(self.crystal1_radioButton, 0, 0, 1, 1)
self.crystal2_radioButton = QtGui.QRadioButton(self.groupBox_2)
self.crystal2_radioButton.setObjectName(_fromUtf8("crystal2_radioButton"))
self.gridLayout_8.addWidget(self.crystal2_radioButton, 0, 1, 1, 1)
self.gridLayout.addWidget(self.groupBox_2, 0, 0, 1, 1)
self.groupBox_3 = QtGui.QGroupBox(self.groupBox_6)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_9 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.angle_alpha_label_2 = QtGui.QLabel(self.groupBox_3)
self.angle_alpha_label_2.setText(_fromUtf8(""))
self.angle_alpha_label_2.setObjectName(_fromUtf8("angle_alpha_label_2"))
self.gridLayout_9.addWidget(self.angle_alpha_label_2, 2, 3, 1, 1)
self.angle_beta_label_2 = QtGui.QLabel(self.groupBox_3)
self.angle_beta_label_2.setText(_fromUtf8(""))
self.angle_beta_label_2.setObjectName(_fromUtf8("angle_beta_label_2"))
self.gridLayout_9.addWidget(self.angle_beta_label_2, 4, 3, 1, 1)
self.angle_z_label_2 = QtGui.QLabel(self.groupBox_3)
self.angle_z_label_2.setText(_fromUtf8(""))
self.angle_z_label_2.setObjectName(_fromUtf8("angle_z_label_2"))
self.gridLayout_9.addWidget(self.angle_z_label_2, 6, 3, 1, 1)
self.angle_z_buttonp = QtGui.QPushButton(self.groupBox_3)
self.angle_z_buttonp.setObjectName(_fromUtf8("angle_z_buttonp"))
self.gridLayout_9.addWidget(self.angle_z_buttonp, 6, 2, 1, 1)
self.angle_alpha_buttonm = QtGui.QPushButton(self.groupBox_3)
self.angle_alpha_buttonm.setObjectName(_fromUtf8("angle_alpha_buttonm"))
self.gridLayout_9.addWidget(self.angle_alpha_buttonm, 2, 0, 1, 1)
self.angle_alpha_label = QtGui.QLabel(self.groupBox_3)
self.angle_alpha_label.setObjectName(_fromUtf8("angle_alpha_label"))
self.gridLayout_9.addWidget(self.angle_alpha_label, 1, 0, 1, 2)
self.lock_checkButton = QtGui.QCheckBox(self.groupBox_3)
self.lock_checkButton.setObjectName(_fromUtf8("lock_checkButton"))
self.gridLayout_9.addWidget(self.lock_checkButton, 0, 0, 1, 3)
self.angle_z_label = QtGui.QLabel(self.groupBox_3)
self.angle_z_label.setObjectName(_fromUtf8("angle_z_label"))
self.gridLayout_9.addWidget(self.angle_z_label, 5, 0, 1, 2)
self.angle_z_entry = QtGui.QLineEdit(self.groupBox_3)
self.angle_z_entry.setObjectName(_fromUtf8("angle_z_entry"))
self.gridLayout_9.addWidget(self.angle_z_entry, 6, 1, 1, 1)
self.angle_z_buttonm = QtGui.QPushButton(self.groupBox_3)
self.angle_z_buttonm.setObjectName(_fromUtf8("angle_z_buttonm"))
self.gridLayout_9.addWidget(self.angle_z_buttonm, 6, 0, 1, 1)
self.theta_signBox = QtGui.QCheckBox(self.groupBox_3)
self.theta_signBox.setText(_fromUtf8(""))
self.theta_signBox.setObjectName(_fromUtf8("theta_signBox"))
self.gridLayout_9.addWidget(self.theta_signBox, 5, 2, 1, 1)
self.angle_beta_buttonp = QtGui.QPushButton(self.groupBox_3)
self.angle_beta_buttonp.setObjectName(_fromUtf8("angle_beta_buttonp"))
self.gridLayout_9.addWidget(self.angle_beta_buttonp, 4, 2, 1, 1)
self.angle_beta_entry = QtGui.QLineEdit(self.groupBox_3)
self.angle_beta_entry.setObjectName(_fromUtf8("angle_beta_entry"))
self.gridLayout_9.addWidget(self.angle_beta_entry, 4, 1, 1, 1)
self.angle_beta_buttonm = QtGui.QPushButton(self.groupBox_3)
self.angle_beta_buttonm.setObjectName(_fromUtf8("angle_beta_buttonm"))
self.gridLayout_9.addWidget(self.angle_beta_buttonm, 4, 0, 1, 1)
self.beta_signBox = QtGui.QCheckBox(self.groupBox_3)
self.beta_signBox.setText(_fromUtf8(""))
self.beta_signBox.setObjectName(_fromUtf8("beta_signBox"))
self.gridLayout_9.addWidget(self.beta_signBox, 3, 2, 1, 1)
self.alpha_signBox = QtGui.QCheckBox(self.groupBox_3)
self.alpha_signBox.setText(_fromUtf8(""))
self.alpha_signBox.setObjectName(_fromUtf8("alpha_signBox"))
self.gridLayout_9.addWidget(self.alpha_signBox, 1, 2, 1, 1)
self.angle_alpha_buttonp = QtGui.QPushButton(self.groupBox_3)
self.angle_alpha_buttonp.setObjectName(_fromUtf8("angle_alpha_buttonp"))
self.gridLayout_9.addWidget(self.angle_alpha_buttonp, 2, 2, 1, 1)
self.angle_alpha_entry = QtGui.QLineEdit(self.groupBox_3)
self.angle_alpha_entry.setObjectName(_fromUtf8("angle_alpha_entry"))
self.gridLayout_9.addWidget(self.angle_alpha_entry, 2, 1, 1, 1)
self.angle_beta_label = QtGui.QLabel(self.groupBox_3)
self.angle_beta_label.setObjectName(_fromUtf8("angle_beta_label"))
self.gridLayout_9.addWidget(self.angle_beta_label, 3, 0, 1, 2)
self.rot_gp_button = QtGui.QPushButton(self.groupBox_3)
self.rot_gp_button.setObjectName(_fromUtf8("rot_gp_button"))
self.gridLayout_9.addWidget(self.rot_gp_button, 8, 2, 1, 1)
self.rot_g_entry = QtGui.QLineEdit(self.groupBox_3)
self.rot_g_entry.setObjectName(_fromUtf8("rot_g_entry"))
self.gridLayout_9.addWidget(self.rot_g_entry, 8, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.groupBox_3)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_9.addWidget(self.label_4, 7, 0, 1, 1)
self.rot_gm_button = QtGui.QPushButton(self.groupBox_3)
self.rot_gm_button.setObjectName(_fromUtf8("rot_gm_button"))
self.gridLayout_9.addWidget(self.rot_gm_button, 8, 0, 1, 1)
self.rg_label = QtGui.QLabel(self.groupBox_3)
self.rg_label.setText(_fromUtf8(""))
self.rg_label.setObjectName(_fromUtf8("rg_label"))
self.gridLayout_9.addWidget(self.rg_label, 8, 3, 1, 1)
self.gridLayout.addWidget(self.groupBox_3, 1, 0, 1, 1)
self.gridLayout_10.addWidget(self.groupBox_6, 0, 1, 1, 1)
self.mplvl = QtGui.QGridLayout()
self.mplvl.setObjectName(_fromUtf8("mplvl"))
self.gridLayout_10.addLayout(self.mplvl, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.misorientation = QtGui.QWidget()
self.misorientation.setObjectName(_fromUtf8("misorientation"))
self.gridLayout_13 = QtGui.QGridLayout(self.misorientation)
self.gridLayout_13.setMargin(0)
self.gridLayout_13.setObjectName(_fromUtf8("gridLayout_13"))
self.misorientation_button = QtGui.QPushButton(self.misorientation)
self.misorientation_button.setObjectName(_fromUtf8("misorientation_button"))
self.gridLayout_13.addWidget(self.misorientation_button, 0, 0, 1, 1)
self.numbers_checkBox = QtGui.QCheckBox(self.misorientation)
self.numbers_checkBox.setObjectName(_fromUtf8("numbers_checkBox"))
self.gridLayout_13.addWidget(self.numbers_checkBox, 2, 0, 1, 1)
self.axis_checkBox = QtGui.QCheckBox(self.misorientation)
self.axis_checkBox.setObjectName(_fromUtf8("axis_checkBox"))
self.gridLayout_13.addWidget(self.axis_checkBox, 3, 0, 1, 1)
self.misorientation_list = QtGui.QListWidget(self.misorientation)
self.misorientation_list.setObjectName(_fromUtf8("misorientation_list"))
self.gridLayout_13.addWidget(self.misorientation_list, 4, 0, 1, 1)
self.clear_misorientation_button = QtGui.QPushButton(self.misorientation)
self.clear_misorientation_button.setObjectName(_fromUtf8("clear_misorientation_button"))
self.gridLayout_13.addWidget(self.clear_misorientation_button, 1, 0, 1, 1)
self.tabWidget.addTab(self.misorientation, _fromUtf8(""))
self.gridLayout_11.addWidget(self.tabWidget, 0, 0, 1, 1)
Misorientation.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(Misorientation)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1200, 23))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuSave = QtGui.QMenu(self.menubar)
self.menuSave.setObjectName(_fromUtf8("menuSave"))
Misorientation.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(Misorientation)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
Misorientation.setStatusBar(self.statusbar)
self.actionSave_figure = QtGui.QAction(Misorientation)
self.actionSave_figure.setObjectName(_fromUtf8("actionSave_figure"))
self.actionCalculate_Schmid_factor = QtGui.QAction(Misorientation)
self.actionCalculate_Schmid_factor.setObjectName(_fromUtf8("actionCalculate_Schmid_factor"))
self.actionCalculate_angle = QtGui.QAction(Misorientation)
self.actionCalculate_angle.setObjectName(_fromUtf8("actionCalculate_angle"))
self.actionCalculate_xyz = QtGui.QAction(Misorientation)
self.actionCalculate_xyz.setObjectName(_fromUtf8("actionCalculate_xyz"))
self.actionCalculate_apparent_width = QtGui.QAction(Misorientation)
self.actionCalculate_apparent_width.setObjectName(_fromUtf8("actionCalculate_apparent_width"))
self.actionPlanes = QtGui.QAction(Misorientation)
self.actionPlanes.setObjectName(_fromUtf8("actionPlanes"))
self.actionProj_directions = QtGui.QAction(Misorientation)
self.actionProj_directions.setObjectName(_fromUtf8("actionProj_directions"))
self.actionPlane_cone = QtGui.QAction(Misorientation)
self.actionPlane_cone.setObjectName(_fromUtf8("actionPlane_cone"))
self.actionCalculate_intersections = QtGui.QAction(Misorientation)
self.actionCalculate_intersections.setObjectName(_fromUtf8("actionCalculate_intersections"))
self.actionHkl_uvw = QtGui.QAction(Misorientation)
self.actionHkl_uvw.setObjectName(_fromUtf8("actionHkl_uvw"))
self.actionPlot_Kikuchi_lines = QtGui.QAction(Misorientation)
self.actionPlot_Kikuchi_lines.setObjectName(_fromUtf8("actionPlot_Kikuchi_lines"))
self.menuSave.addAction(self.actionSave_figure)
self.menubar.addAction(self.menuSave.menuAction())
self.retranslateUi(Misorientation)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Misorientation)
Misorientation.setTabOrder(self.tabWidget, self.tilt_angle_entry)
Misorientation.setTabOrder(self.tilt_angle_entry, self.image_angle_entry)
Misorientation.setTabOrder(self.image_angle_entry, self.structure_box)
Misorientation.setTabOrder(self.structure_box, self.abc_entry)
Misorientation.setTabOrder(self.abc_entry, self.alphabetagamma_entry)
Misorientation.setTabOrder(self.alphabetagamma_entry, self.e_entry)
Misorientation.setTabOrder(self.e_entry, self.dm_button)
Misorientation.setTabOrder(self.dm_button, self.structure2_box)
Misorientation.setTabOrder(self.structure2_box, self.abc_entry_2)
Misorientation.setTabOrder(self.abc_entry_2, self.alphabetagamma_entry_2)
Misorientation.setTabOrder(self.alphabetagamma_entry_2, self.e_entry_2)
Misorientation.setTabOrder(self.e_entry_2, self.lock_checkButton)
Misorientation.setTabOrder(self.lock_checkButton, self.alpha_signBox)
Misorientation.setTabOrder(self.alpha_signBox, self.angle_alpha_buttonm)
Misorientation.setTabOrder(self.angle_alpha_buttonm, self.angle_alpha_entry)
Misorientation.setTabOrder(self.angle_alpha_entry, self.angle_alpha_buttonp)
Misorientation.setTabOrder(self.angle_alpha_buttonp, self.beta_signBox)
Misorientation.setTabOrder(self.beta_signBox, self.angle_beta_buttonm)
Misorientation.setTabOrder(self.angle_beta_buttonm, self.angle_beta_entry)
Misorientation.setTabOrder(self.angle_beta_entry, self.angle_beta_buttonp)
Misorientation.setTabOrder(self.angle_beta_buttonp, self.theta_signBox)
Misorientation.setTabOrder(self.theta_signBox, self.angle_z_buttonm)
Misorientation.setTabOrder(self.angle_z_buttonm, self.angle_z_entry)
Misorientation.setTabOrder(self.angle_z_entry, self.angle_z_buttonp)
Misorientation.setTabOrder(self.angle_z_buttonp, self.pole_entry)
Misorientation.setTabOrder(self.pole_entry, self.addpole_button)
Misorientation.setTabOrder(self.addpole_button, self.undo_addpole_button)
Misorientation.setTabOrder(self.undo_addpole_button, self.sym_button)
Misorientation.setTabOrder(self.sym_button, self.undo_sym_button)
Misorientation.setTabOrder(self.undo_sym_button, self.trace_plan_button)
Misorientation.setTabOrder(self.trace_plan_button, self.undo_trace_plan_button)
Misorientation.setTabOrder(self.undo_trace_plan_button, self.trace_plan_sym_button)
Misorientation.setTabOrder(self.trace_plan_sym_button, self.undo_trace_plan_sym_button)
Misorientation.setTabOrder(self.undo_trace_plan_sym_button, self.phi1phiphi2_entry)
Misorientation.setTabOrder(self.phi1phiphi2_entry, self.button_trace2)
def retranslateUi(self, Misorientation):
Misorientation.setWindowTitle(_translate("Misorientation", "Misorientation", None))
self.groupBox.setTitle(_translate("Misorientation", "Crystal 1", None))
self.e_label.setText(_translate("Misorientation", "max indices", None))
self.hexa_button.setText(_translate("Misorientation", "hexa", None))
self.dm_button.setText(_translate("Misorientation", "-", None))
self.dp_button.setText(_translate("Misorientation", "+", None))
self.abc_label.setText(_translate("Misorientation", "a,b,c", None))
self.color_trace_bleu.setText(_translate("Misorientation", "blue", None))
self.d_label.setText(_translate("Misorientation", "d", None))
self.color_trace_rouge.setText(_translate("Misorientation", "red", None))
self.alphabetagamma_label.setText(_translate("Misorientation", "<p>α, β, γ</p>", None))
self.color_trace_vert.setText(_translate("Misorientation", "green", None))
self.style_box.setText(_translate("Misorientation", "open/filled", None))
self.crystal2_box.setTitle(_translate("Misorientation", "Crystal 2", None))
self.dm_button_2.setText(_translate("Misorientation", "-", None))
self.style_box_2.setText(_translate("Misorientation", "open/filled", None))
self.abc_label_2.setText(_translate("Misorientation", "a,b,c", None))
self.color_trace_rouge_2.setText(_translate("Misorientation", "red", None))
self.alphabetagamma_label_2.setText(_translate("Misorientation", "<p>α, β, γ</p>", None))
self.e_label_2.setText(_translate("Misorientation", "max indices", None))
self.hexa_button_2.setText(_translate("Misorientation", "hexa", None))
self.dp_button_2.setText(_translate("Misorientation", "+", None))
self.color_trace_bleu_2.setText(_translate("Misorientation", "blue", None))
self.d_label_2.setText(_translate("Misorientation", "d", None))
self.color_trace_vert_2.setText(_translate("Misorientation", "green", None))
self.groupBox_7.setTitle(_translate("Misorientation", "Settings", None))
self.real_space_checkBox.setText(_translate("Misorientation", "work in real space", None))
self.image_tilt_y_label.setText(_translate("Misorientation", "Image α-tilt/y angle", None))
self.tilt_angle_label.setText(_translate("Misorientation", "Diffraction α-tilt/y angle", None))
self.uvw_button.setText(_translate("Misorientation", "uvw", None))
self.groupBox_10.setTitle(_translate("Misorientation", "Layout", None))
self.wulff_button.setText(_translate("Misorientation", "Wulff net", None))
self.text_size_label.setText(_translate("Misorientation", "Text size", None))
self.reset_view_button.setText(_translate("Misorientation", "Update/Reset view", None))
self.size_var_label.setText(_translate("Misorientation", "Marker size", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.settings), _translate("Misorientation", "settings", None))
self.groupBox_5.setTitle(_translate("Misorientation", "Euler Angles", None))
self.lab_coord.setText(_translate("Misorientation", "Tilt, Inclin.", None))
self.label_2.setText(_translate("Misorientation", "Crystal 2", None))
self.label.setText(_translate("Misorientation", "Crystal 1", None))
self.label_3.setText(_translate("Misorientation", "φ 1 , Φ , φ2", None))
self.button_trace2.setText(_translate("Misorientation", "PLOT", None))
self.lab_euler2.setText(_translate("Misorientation", "<html><head/><body><p>φ<span style=\" vertical-align:sub;\"> 1</span> , Φ , φ<span style=\" vertical-align:sub;\">2</span></p></body></html>", None))
self.groupBox_4.setTitle(_translate("Misorientation", "Pole/Direction", None))
self.undo_trace_plan_sym_button.setText(_translate("Misorientation", "-", None))
self.trace_plan_sym_button.setText(_translate("Misorientation", "Sym Plane", None))
self.addpole_button.setText(_translate("Misorientation", "Add", None))
self.undo_addpole_button.setText(_translate("Misorientation", "-", None))
self.undo_sym_button.setText(_translate("Misorientation", "-", None))
self.sym_button.setText(_translate("Misorientation", "Symmetry", None))
self.trace_plan_button.setText(_translate("Misorientation", " Plane", None))
self.undo_trace_plan_button.setText(_translate("Misorientation", "-", None))
self.groupBox_2.setTitle(_translate("Misorientation", "Switch", None))
self.crystal1_radioButton.setText(_translate("Misorientation", "crystal 1", None))
self.crystal2_radioButton.setText(_translate("Misorientation", "crystal 2", None))
self.groupBox_3.setTitle(_translate("Misorientation", "Rotation", None))
self.angle_z_buttonp.setText(_translate("Misorientation", "+", None))
self.angle_alpha_buttonm.setText(_translate("Misorientation", "-", None))
self.angle_alpha_label.setText(_translate("Misorientation", "<html><head/><body><p>α (AC)</p></body></html>", None))
self.lock_checkButton.setText(_translate("Misorientation", "Lock Axes", None))
self.angle_z_label.setText(_translate("Misorientation", "<html><head/><body><p>θ (AC)</p></body></html>", None))
self.angle_z_buttonm.setText(_translate("Misorientation", "-", None))
self.angle_beta_buttonp.setText(_translate("Misorientation", "+", None))
self.angle_beta_buttonm.setText(_translate("Misorientation", "-", None))
self.angle_alpha_buttonp.setText(_translate("Misorientation", "+", None))
self.angle_beta_label.setText(_translate("Misorientation", "<html><head/><body><p>β (AC)</p></body></html>", None))
self.rot_gp_button.setText(_translate("Misorientation", "+", None))
self.label_4.setText(_translate("Misorientation", "g", None))
self.rot_gm_button.setText(_translate("Misorientation", "-", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Misorientation", "stereo-proj", None))
self.misorientation_button.setText(_translate("Misorientation", "Find misorientation & Plot Angle/Axes", None))
self.numbers_checkBox.setText(_translate("Misorientation", "show numbers", None))
self.axis_checkBox.setText(_translate("Misorientation", "show axes", None))
self.clear_misorientation_button.setText(_translate("Misorientation", "Clear plot", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.misorientation), _translate("Misorientation", "misorientation", None))
self.menuSave.setTitle(_translate("Misorientation", "Save", None))
self.actionSave_figure.setText(_translate("Misorientation", "Save figure", None))
self.actionCalculate_Schmid_factor.setText(_translate("Misorientation", "calculate Schmid factor", None))
self.actionCalculate_angle.setText(_translate("Misorientation", "Calculate angle", None))
self.actionCalculate_xyz.setText(_translate("Misorientation", "calculate xyz directions", None))
self.actionCalculate_apparent_width.setText(_translate("Misorientation", "Calculate apparent width", None))
self.actionPlanes.setText(_translate("Misorientation", "planes", None))
self.actionProj_directions.setText(_translate("Misorientation", "proj. directions", None))
self.actionPlane_cone.setText(_translate("Misorientation", "plane-cone", None))
self.actionCalculate_intersections.setText(_translate("Misorientation", "Calculate intersections", None))
self.actionHkl_uvw.setText(_translate("Misorientation", "hkl <> uvw", None))
self.actionPlot_Kikuchi_lines.setText(_translate("Misorientation", "plot Kikuchi lines or diffraction pattern", None))
| mompiou/misorientation | misorientationUI.py | Python | gpl-2.0 | 40,179 | [
"CRYSTAL"
] | 38b24437e480f908b87ee9410d746a531d9b6c30fae64f0a258906ff64a6961c |
import pymol
import sys
file1 = "3NXY.pdb"
pymol.finish_launching()
pymol.cmd.load(file1)
| rishiraj824/PymolDSA | extrastuff/files1.py | Python | gpl-3.0 | 104 | [
"PyMOL"
] | 5dbf2b6a7848e5840d7de55d7357826532c7e1138f091874d4c9e2fc66dc7c7b |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs):
for d in skipped_ungenerated_files:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
# determine if the file is automatically generated
generated = is_generated_file(filename, data, regexs)
basename = os.path.basename(filename)
extension = file_extension(filename)
if generated:
if extension == "go":
extension = "generatego"
elif extension == "bzl":
extension = "generatebzl"
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove extra content from the top of files
if extension == "go" or extension == "generatego":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
elif extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' % filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' % filename, file=verbose_out)
return False
if not generated:
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" % filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
"pkg/kubectl/generated/bindata.go"]
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = ['hack/build-ui.sh', 'hack/lib/swagger.sh',
'hack/boilerplate/boilerplate.py']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile( 'DO NOT EDIT' )
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
| krzyzacy/kubernetes | hack/boilerplate/boilerplate.py | Python | apache-2.0 | 7,404 | [
"VisIt"
] | 05c341f8036b6c14ee48ae2c537f1c566d3484d226ee22b0ce5ed5a07617e683 |
"""
tint.tracks
===========
Cell_tracks class.
"""
import copy
import datetime
import numpy as np
import pandas as pd
from .grid_utils import get_grid_size, get_radar_info, extract_grid_data
from .helpers import Record, Counter
from .phase_correlation import get_global_shift
from .matching import get_pairs
from .objects import init_current_objects, update_current_objects
from .objects import get_object_prop, write_tracks
# Tracking Parameter Defaults
FIELD_THRESH = 32
ISO_THRESH = 8
ISO_SMOOTH = 3
MIN_SIZE = 32
NEAR_THRESH = 4
SEARCH_MARGIN = 8
FLOW_MARGIN = 20
MAX_DISPARITY = 999
MAX_FLOW_MAG = 50
MAX_SHIFT_DISP = 15
GS_ALT = 1500
"""
Tracking Parameter Guide
------------------------
FIELD_THRESH : units of 'field' attribute
The threshold used for object detection. Detected objects are connnected
pixels above this threshold.
ISO_THRESH : units of 'field' attribute
Used in isolated cell classification. Isolated cells must not be connected
to any other cell by contiguous pixels above this threshold.
ISO_SMOOTH : pixels
Gaussian smoothing parameter in peak detection preprocessing. See
single_max in tint.objects.
MIN_SIZE : pixels
The minimum size threshold in pixels for an object to be detected.
SEARCH_MARGIN : pixels
The radius of the search box around the predicted object center.
FLOW_MARGIN : pixels
The margin size around the object extent on which to perform phase
correlation.
MAX_DISPARITY : float
Maximum allowable disparity value. Larger disparity values are sent to
LARGE_NUM.
MAX_FLOW_MAG : pixels
Maximum allowable global shift magnitude. See get_global_shift in
tint.phase_correlation.
MAX_SHIFT_DISP : float
Maximum magnitude of difference in meters per second for two shifts to be
considered in agreement. See correct_shift in tint.matching.
GS_ALT : meters
Altitude in meters at which to perform phase correlation for global shift
calculation. See correct_shift in tint.matching.
"""
class Cell_tracks(object):
"""
This is the main class in the module. It allows tracks
objects to be built using lists of pyart grid objects.
Attributes
----------
params : dict
Parameters for the tracking algorithm.
field : str
String specifying pyart grid field to be used for tracking. Default is
'reflectivity'.
grid_size : array
Array containing z, y, and x mesh size in meters respectively.
last_grid : Grid
Contains the most recent grid object tracked. This is used for dynamic
updates.
counter : Counter
See Counter class.
record : Record
See Record class.
current_objects : dict
Contains information about objects in the current scan.
tracks : DataFrame
__saved_record : Record
Deep copy of Record at the penultimate scan in the sequence. This and
following 2 attributes used for link-up in dynamic updates.
__saved_counter : Counter
Deep copy of Counter.
__saved_objects : dict
Deep copy of current_objects.
"""
def __init__(self, field='reflectivity'):
self.params = {'FIELD_THRESH': FIELD_THRESH,
'MIN_SIZE': MIN_SIZE,
'SEARCH_MARGIN': SEARCH_MARGIN,
'FLOW_MARGIN': FLOW_MARGIN,
'MAX_FLOW_MAG': MAX_FLOW_MAG,
'MAX_DISPARITY': MAX_DISPARITY,
'MAX_SHIFT_DISP': MAX_SHIFT_DISP,
'ISO_THRESH': ISO_THRESH,
'ISO_SMOOTH': ISO_SMOOTH,
'GS_ALT': GS_ALT}
self.field = field
self.grid_size = None
self.radar_info = None
self.last_grid = None
self.counter = None
self.record = None
self.current_objects = None
self.tracks = pd.DataFrame()
self.__saved_record = None
self.__saved_counter = None
self.__saved_objects = None
def __save(self):
""" Saves deep copies of record, counter, and current_objects. """
self.__saved_record = copy.deepcopy(self.record)
self.__saved_counter = copy.deepcopy(self.counter)
self.__saved_objects = copy.deepcopy(self.current_objects)
def __load(self):
""" Loads saved copies of record, counter, and current_objects. If new
tracks are appended to existing tracks via the get_tracks method, the
most recent scan prior to the addition must be overwritten to link up
with the new scans. Because of this, record, counter and
current_objects must be reverted to their state in the penultimate
iteration of the loop in get_tracks. See get_tracks for details. """
self.record = self.__saved_record
self.counter = self.__saved_counter
self.current_objects = self.__saved_objects
def get_tracks(self, grids):
""" Obtains tracks given a list of pyart grid objects. This is the
primary method of the tracks class. This method makes use of all of the
functions and helper classes defined above. """
start_time = datetime.datetime.now()
if self.record is None:
# tracks object being initialized
grid_obj2 = next(grids)
self.grid_size = get_grid_size(grid_obj2)
self.radar_info = get_radar_info(grid_obj2)
self.counter = Counter()
self.record = Record(grid_obj2)
else:
# tracks object being updated
grid_obj2 = self.last_grid
self.tracks.drop(self.record.scan + 1) # last scan is overwritten
if self.current_objects is None:
newRain = True
else:
newRain = False
raw2, frame2 = extract_grid_data(grid_obj2, self.field, self.grid_size,
self.params)
while grid_obj2 is not None:
grid_obj1 = grid_obj2
raw1 = raw2
frame1 = frame2
try:
grid_obj2 = next(grids)
except StopIteration:
grid_obj2 = None
if grid_obj2 is not None:
self.record.update_scan_and_time(grid_obj1, grid_obj2)
raw2, frame2 = extract_grid_data(grid_obj2,
self.field,
self.grid_size,
self.params)
else:
# setup to write final scan
self.__save()
self.last_grid = grid_obj1
self.record.update_scan_and_time(grid_obj1)
raw2 = None
frame2 = np.zeros_like(frame1)
if np.max(frame1) == 0:
newRain = True
print('No cells found in scan', self.record.scan)
self.current_objects = None
continue
global_shift = get_global_shift(raw1, raw2, self.params)
pairs = get_pairs(frame1,
frame2,
global_shift,
self.current_objects,
self.record,
self.params)
if newRain:
# first nonempty scan after a period of empty scans
self.current_objects, self.counter = init_current_objects(
frame1,
frame2,
pairs,
self.counter
)
newRain = False
else:
self.current_objects, self.counter = update_current_objects(
frame1,
frame2,
pairs,
self.current_objects,
self.counter
)
obj_props = get_object_prop(frame1, grid_obj1, self.field,
self.record, self.params)
self.record.add_uids(self.current_objects)
self.tracks = write_tracks(self.tracks, self.record,
self.current_objects, obj_props)
del grid_obj1, raw1, frame1, global_shift, pairs, obj_props
# scan loop end
self.__load()
time_elapsed = datetime.datetime.now() - start_time
print('\n')
print('time elapsed', np.round(time_elapsed.seconds/60, 1), 'minutes')
return
| mhpicel/tracking | tracking/core/tracks.py | Python | bsd-2-clause | 8,556 | [
"Gaussian"
] | db3718008ae959e487442ed79790e802eb89fa1b770e0fe5a0a3581b97a733da |
# -*- coding: utf-8 -*-
"""
pygments.lexers._asybuiltins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This file contains the asy-function names and asy-variable names of
Asymptote.
Do not edit the ASYFUNCNAME and ASYVARNAME sets by hand.
TODO: perl/python script in Asymptote SVN similar to asy-list.pl but only
for function and variable names.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import unicode_literals
ASYFUNCNAME = set([
'AND',
'Arc',
'ArcArrow',
'ArcArrows',
'Arrow',
'Arrows',
'Automatic',
'AvantGarde',
'BBox',
'BWRainbow',
'BWRainbow2',
'Bar',
'Bars',
'BeginArcArrow',
'BeginArrow',
'BeginBar',
'BeginDotMargin',
'BeginMargin',
'BeginPenMargin',
'Blank',
'Bookman',
'Bottom',
'BottomTop',
'Bounds',
'Break',
'Broken',
'BrokenLog',
'Ceil',
'Circle',
'CircleBarIntervalMarker',
'Cos',
'Courier',
'CrossIntervalMarker',
'DefaultFormat',
'DefaultLogFormat',
'Degrees',
'Dir',
'DotMargin',
'DotMargins',
'Dotted',
'Draw',
'Drawline',
'Embed',
'EndArcArrow',
'EndArrow',
'EndBar',
'EndDotMargin',
'EndMargin',
'EndPenMargin',
'Fill',
'FillDraw',
'Floor',
'Format',
'Full',
'Gaussian',
'Gaussrand',
'Gaussrandpair',
'Gradient',
'Grayscale',
'Helvetica',
'Hermite',
'HookHead',
'InOutTicks',
'InTicks',
'J',
'Label',
'Landscape',
'Left',
'LeftRight',
'LeftTicks',
'Legend',
'Linear',
'Link',
'Log',
'LogFormat',
'Margin',
'Margins',
'Mark',
'MidArcArrow',
'MidArrow',
'NOT',
'NewCenturySchoolBook',
'NoBox',
'NoMargin',
'NoModifier',
'NoTicks',
'NoTicks3',
'NoZero',
'NoZeroFormat',
'None',
'OR',
'OmitFormat',
'OmitTick',
'OutTicks',
'Ox',
'Oy',
'Palatino',
'PaletteTicks',
'Pen',
'PenMargin',
'PenMargins',
'Pentype',
'Portrait',
'RadialShade',
'Rainbow',
'Range',
'Relative',
'Right',
'RightTicks',
'Rotate',
'Round',
'SQR',
'Scale',
'ScaleX',
'ScaleY',
'ScaleZ',
'Seascape',
'Shift',
'Sin',
'Slant',
'Spline',
'StickIntervalMarker',
'Straight',
'Symbol',
'Tan',
'TeXify',
'Ticks',
'Ticks3',
'TildeIntervalMarker',
'TimesRoman',
'Top',
'TrueMargin',
'UnFill',
'UpsideDown',
'Wheel',
'X',
'XEquals',
'XOR',
'XY',
'XYEquals',
'XYZero',
'XYgrid',
'XZEquals',
'XZZero',
'XZero',
'XZgrid',
'Y',
'YEquals',
'YXgrid',
'YZ',
'YZEquals',
'YZZero',
'YZero',
'YZgrid',
'Z',
'ZX',
'ZXgrid',
'ZYgrid',
'ZapfChancery',
'ZapfDingbats',
'_cputime',
'_draw',
'_eval',
'_image',
'_labelpath',
'_projection',
'_strokepath',
'_texpath',
'aCos',
'aSin',
'aTan',
'abort',
'abs',
'accel',
'acos',
'acosh',
'acot',
'acsc',
'add',
'addArrow',
'addMargins',
'addSaveFunction',
'addnode',
'addnodes',
'addpenarc',
'addpenline',
'addseg',
'adjust',
'alias',
'align',
'all',
'altitude',
'angabscissa',
'angle',
'angpoint',
'animate',
'annotate',
'anticomplementary',
'antipedal',
'apply',
'approximate',
'arc',
'arcarrowsize',
'arccircle',
'arcdir',
'arcfromcenter',
'arcfromfocus',
'arclength',
'arcnodesnumber',
'arcpoint',
'arcsubtended',
'arcsubtendedcenter',
'arctime',
'arctopath',
'array',
'arrow',
'arrow2',
'arrowbase',
'arrowbasepoints',
'arrowsize',
'asec',
'asin',
'asinh',
'ask',
'assert',
'asy',
'asycode',
'asydir',
'asyfigure',
'asyfilecode',
'asyinclude',
'asywrite',
'atan',
'atan2',
'atanh',
'atbreakpoint',
'atexit',
'atime',
'attach',
'attract',
'atupdate',
'autoformat',
'autoscale',
'autoscale3',
'axes',
'axes3',
'axialshade',
'axis',
'axiscoverage',
'azimuth',
'babel',
'background',
'bangles',
'bar',
'barmarksize',
'barsize',
'basealign',
'baseline',
'bbox',
'beep',
'begin',
'beginclip',
'begingroup',
'beginpoint',
'between',
'bevel',
'bezier',
'bezierP',
'bezierPP',
'bezierPPP',
'bezulate',
'bibliography',
'bibliographystyle',
'binarytree',
'binarytreeNode',
'binomial',
'binput',
'bins',
'bisector',
'bisectorpoint',
'blend',
'boutput',
'box',
'bqe',
'breakpoint',
'breakpoints',
'brick',
'buildRestoreDefaults',
'buildRestoreThunk',
'buildcycle',
'bulletcolor',
'canonical',
'canonicalcartesiansystem',
'cartesiansystem',
'case1',
'case2',
'case3',
'cbrt',
'cd',
'ceil',
'center',
'centerToFocus',
'centroid',
'cevian',
'change2',
'changecoordsys',
'checkSegment',
'checkconditionlength',
'checker',
'checklengths',
'checkposition',
'checktriangle',
'choose',
'circle',
'circlebarframe',
'circlemarkradius',
'circlenodesnumber',
'circumcenter',
'circumcircle',
'clamped',
'clear',
'clip',
'clipdraw',
'close',
'cmyk',
'code',
'colatitude',
'collect',
'collinear',
'color',
'colorless',
'colors',
'colorspace',
'comma',
'compassmark',
'complement',
'complementary',
'concat',
'concurrent',
'cone',
'conic',
'conicnodesnumber',
'conictype',
'conj',
'connect',
'containmentTree',
'contains',
'contour',
'contour3',
'controlSpecifier',
'convert',
'coordinates',
'coordsys',
'copy',
'cos',
'cosh',
'cot',
'countIntersections',
'cputime',
'crop',
'cropcode',
'cross',
'crossframe',
'crosshatch',
'crossmarksize',
'csc',
'cubicroots',
'curabscissa',
'curlSpecifier',
'curpoint',
'currentarrow',
'currentexitfunction',
'currentmomarrow',
'currentpolarconicroutine',
'curve',
'cut',
'cutafter',
'cutbefore',
'cyclic',
'cylinder',
'debugger',
'deconstruct',
'defaultdir',
'defaultformat',
'defaultpen',
'defined',
'degenerate',
'degrees',
'delete',
'deletepreamble',
'determinant',
'diagonal',
'diamond',
'diffdiv',
'dir',
'dirSpecifier',
'dirtime',
'display',
'distance',
'divisors',
'do_overpaint',
'dot',
'dotframe',
'dotsize',
'downcase',
'draw',
'drawAll',
'drawDoubleLine',
'drawFermion',
'drawGhost',
'drawGluon',
'drawMomArrow',
'drawPhoton',
'drawScalar',
'drawVertex',
'drawVertexBox',
'drawVertexBoxO',
'drawVertexBoxX',
'drawVertexO',
'drawVertexOX',
'drawVertexTriangle',
'drawVertexTriangleO',
'drawVertexX',
'drawarrow',
'drawarrow2',
'drawline',
'drawtick',
'duplicate',
'elle',
'ellipse',
'ellipsenodesnumber',
'embed',
'embed3',
'empty',
'enclose',
'end',
'endScript',
'endclip',
'endgroup',
'endl',
'endpoint',
'endpoints',
'eof',
'eol',
'equation',
'equations',
'erase',
'erasestep',
'erf',
'erfc',
'error',
'errorbar',
'errorbars',
'eval',
'excenter',
'excircle',
'exit',
'exitXasyMode',
'exitfunction',
'exp',
'expfactors',
'expi',
'expm1',
'exradius',
'extend',
'extension',
'extouch',
'fabs',
'factorial',
'fermat',
'fft',
'fhorner',
'figure',
'file',
'filecode',
'fill',
'filldraw',
'filloutside',
'fillrule',
'filltype',
'find',
'finite',
'finiteDifferenceJacobian',
'firstcut',
'firstframe',
'fit',
'fit2',
'fixedscaling',
'floor',
'flush',
'fmdefaults',
'fmod',
'focusToCenter',
'font',
'fontcommand',
'fontsize',
'foot',
'format',
'frac',
'frequency',
'fromCenter',
'fromFocus',
'fspline',
'functionshade',
'gamma',
'generate_random_backtrace',
'generateticks',
'gergonne',
'getc',
'getint',
'getpair',
'getreal',
'getstring',
'gettriple',
'gluon',
'gouraudshade',
'graph',
'graphic',
'gray',
'grestore',
'grid',
'grid3',
'gsave',
'halfbox',
'hatch',
'hdiffdiv',
'hermite',
'hex',
'histogram',
'history',
'hline',
'hprojection',
'hsv',
'hyperbola',
'hyperbolanodesnumber',
'hyperlink',
'hypot',
'identity',
'image',
'incenter',
'incentral',
'incircle',
'increasing',
'incrementposition',
'indexedTransform',
'indexedfigure',
'initXasyMode',
'initdefaults',
'input',
'inradius',
'insert',
'inside',
'integrate',
'interactive',
'interior',
'interp',
'interpolate',
'intersect',
'intersection',
'intersectionpoint',
'intersectionpoints',
'intersections',
'intouch',
'inverse',
'inversion',
'invisible',
'is3D',
'isDuplicate',
'isogonal',
'isogonalconjugate',
'isotomic',
'isotomicconjugate',
'isparabola',
'italic',
'item',
'key',
'kurtosis',
'kurtosisexcess',
'label',
'labelaxis',
'labelmargin',
'labelpath',
'labels',
'labeltick',
'labelx',
'labelx3',
'labely',
'labely3',
'labelz',
'labelz3',
'lastcut',
'latex',
'latitude',
'latticeshade',
'layer',
'layout',
'ldexp',
'leastsquares',
'legend',
'legenditem',
'length',
'lift',
'light',
'limits',
'line',
'linear',
'linecap',
'lineinversion',
'linejoin',
'linemargin',
'lineskip',
'linetype',
'linewidth',
'link',
'list',
'lm_enorm',
'lm_evaluate_default',
'lm_lmdif',
'lm_lmpar',
'lm_minimize',
'lm_print_default',
'lm_print_quiet',
'lm_qrfac',
'lm_qrsolv',
'locale',
'locate',
'locatefile',
'location',
'log',
'log10',
'log1p',
'logaxiscoverage',
'longitude',
'lookup',
'magnetize',
'makeNode',
'makedraw',
'makepen',
'map',
'margin',
'markangle',
'markangleradius',
'markanglespace',
'markarc',
'marker',
'markinterval',
'marknodes',
'markrightangle',
'markuniform',
'mass',
'masscenter',
'massformat',
'math',
'max',
'max3',
'maxbezier',
'maxbound',
'maxcoords',
'maxlength',
'maxratio',
'maxtimes',
'mean',
'medial',
'median',
'midpoint',
'min',
'min3',
'minbezier',
'minbound',
'minipage',
'minratio',
'mintimes',
'miterlimit',
'momArrowPath',
'momarrowsize',
'monotonic',
'multifigure',
'nativeformat',
'natural',
'needshipout',
'newl',
'newpage',
'newslide',
'newton',
'newtree',
'nextframe',
'nextnormal',
'nextpage',
'nib',
'nodabscissa',
'none',
'norm',
'normalvideo',
'notaknot',
'nowarn',
'numberpage',
'nurb',
'object',
'offset',
'onpath',
'opacity',
'opposite',
'orientation',
'orig_circlenodesnumber',
'orig_circlenodesnumber1',
'orig_draw',
'orig_ellipsenodesnumber',
'orig_ellipsenodesnumber1',
'orig_hyperbolanodesnumber',
'orig_parabolanodesnumber',
'origin',
'orthic',
'orthocentercenter',
'outformat',
'outline',
'outprefix',
'output',
'overloadedMessage',
'overwrite',
'pack',
'pad',
'pairs',
'palette',
'parabola',
'parabolanodesnumber',
'parallel',
'partialsum',
'path',
'path3',
'pattern',
'pause',
'pdf',
'pedal',
'periodic',
'perp',
'perpendicular',
'perpendicularmark',
'phantom',
'phi1',
'phi2',
'phi3',
'photon',
'piecewisestraight',
'point',
'polar',
'polarconicroutine',
'polargraph',
'polygon',
'postcontrol',
'postscript',
'pow10',
'ppoint',
'prc',
'prc0',
'precision',
'precontrol',
'prepend',
'print_random_addresses',
'project',
'projection',
'purge',
'pwhermite',
'quadrant',
'quadraticroots',
'quantize',
'quarticroots',
'quotient',
'radialshade',
'radians',
'radicalcenter',
'radicalline',
'radius',
'rand',
'randompath',
'rd',
'readline',
'realmult',
'realquarticroots',
'rectangle',
'rectangular',
'rectify',
'reflect',
'relabscissa',
'relative',
'relativedistance',
'reldir',
'relpoint',
'reltime',
'remainder',
'remark',
'removeDuplicates',
'rename',
'replace',
'report',
'resetdefaultpen',
'restore',
'restoredefaults',
'reverse',
'reversevideo',
'rf',
'rfind',
'rgb',
'rgba',
'rgbint',
'rms',
'rotate',
'rotateO',
'rotation',
'round',
'roundbox',
'roundedpath',
'roundrectangle',
'samecoordsys',
'sameside',
'sample',
'save',
'savedefaults',
'saveline',
'scale',
'scale3',
'scaleO',
'scaleT',
'scaleless',
'scientific',
'search',
'searchtree',
'sec',
'secondaryX',
'secondaryY',
'seconds',
'section',
'sector',
'seek',
'seekeof',
'segment',
'sequence',
'setpens',
'sgn',
'sgnd',
'sharpangle',
'sharpdegrees',
'shift',
'shiftless',
'shipout',
'shipout3',
'show',
'side',
'simeq',
'simpson',
'sin',
'single',
'sinh',
'size',
'size3',
'skewness',
'skip',
'slant',
'sleep',
'slope',
'slopefield',
'solve',
'solveBVP',
'sort',
'sourceline',
'sphere',
'split',
'sqrt',
'square',
'srand',
'standardizecoordsys',
'startScript',
'startTrembling',
'stdev',
'step',
'stickframe',
'stickmarksize',
'stickmarkspace',
'stop',
'straight',
'straightness',
'string',
'stripdirectory',
'stripextension',
'stripfile',
'strokepath',
'subdivide',
'subitem',
'subpath',
'substr',
'sum',
'surface',
'symmedial',
'symmedian',
'system',
'tab',
'tableau',
'tan',
'tangent',
'tangential',
'tangents',
'tanh',
'tell',
'tensionSpecifier',
'tensorshade',
'tex',
'texcolor',
'texify',
'texpath',
'texpreamble',
'texreset',
'texshipout',
'texsize',
'textpath',
'thick',
'thin',
'tick',
'tickMax',
'tickMax3',
'tickMin',
'tickMin3',
'ticklabelshift',
'ticklocate',
'tildeframe',
'tildemarksize',
'tile',
'tiling',
'time',
'times',
'title',
'titlepage',
'topbox',
'transform',
'transformation',
'transpose',
'tremble',
'trembleFuzz',
'tremble_circlenodesnumber',
'tremble_circlenodesnumber1',
'tremble_draw',
'tremble_ellipsenodesnumber',
'tremble_ellipsenodesnumber1',
'tremble_hyperbolanodesnumber',
'tremble_marknodes',
'tremble_markuniform',
'tremble_parabolanodesnumber',
'triangle',
'triangleAbc',
'triangleabc',
'triangulate',
'tricoef',
'tridiagonal',
'trilinear',
'trim',
'trueMagnetize',
'truepoint',
'tube',
'uncycle',
'unfill',
'uniform',
'unit',
'unitrand',
'unitsize',
'unityroot',
'unstraighten',
'upcase',
'updatefunction',
'uperiodic',
'upscale',
'uptodate',
'usepackage',
'usersetting',
'usetypescript',
'usleep',
'value',
'variance',
'variancebiased',
'vbox',
'vector',
'vectorfield',
'verbatim',
'view',
'vline',
'vperiodic',
'vprojection',
'warn',
'warning',
'windingnumber',
'write',
'xaxis',
'xaxis3',
'xaxis3At',
'xaxisAt',
'xequals',
'xinput',
'xlimits',
'xoutput',
'xpart',
'xscale',
'xscaleO',
'xtick',
'xtick3',
'xtrans',
'yaxis',
'yaxis3',
'yaxis3At',
'yaxisAt',
'yequals',
'ylimits',
'ypart',
'yscale',
'yscaleO',
'ytick',
'ytick3',
'ytrans',
'zaxis3',
'zaxis3At',
'zero',
'zero3',
'zlimits',
'zpart',
'ztick',
'ztick3',
'ztrans'
])
ASYVARNAME = set([
'AliceBlue',
'Align',
'Allow',
'AntiqueWhite',
'Apricot',
'Aqua',
'Aquamarine',
'Aspect',
'Azure',
'BeginPoint',
'Beige',
'Bisque',
'Bittersweet',
'Black',
'BlanchedAlmond',
'Blue',
'BlueGreen',
'BlueViolet',
'Both',
'Break',
'BrickRed',
'Brown',
'BurlyWood',
'BurntOrange',
'CCW',
'CW',
'CadetBlue',
'CarnationPink',
'Center',
'Centered',
'Cerulean',
'Chartreuse',
'Chocolate',
'Coeff',
'Coral',
'CornflowerBlue',
'Cornsilk',
'Crimson',
'Crop',
'Cyan',
'Dandelion',
'DarkBlue',
'DarkCyan',
'DarkGoldenrod',
'DarkGray',
'DarkGreen',
'DarkKhaki',
'DarkMagenta',
'DarkOliveGreen',
'DarkOrange',
'DarkOrchid',
'DarkRed',
'DarkSalmon',
'DarkSeaGreen',
'DarkSlateBlue',
'DarkSlateGray',
'DarkTurquoise',
'DarkViolet',
'DeepPink',
'DeepSkyBlue',
'DefaultHead',
'DimGray',
'DodgerBlue',
'Dotted',
'Draw',
'E',
'ENE',
'EPS',
'ESE',
'E_Euler',
'E_PC',
'E_RK2',
'E_RK3BS',
'Emerald',
'EndPoint',
'Euler',
'Fill',
'FillDraw',
'FireBrick',
'FloralWhite',
'ForestGreen',
'Fuchsia',
'Gainsboro',
'GhostWhite',
'Gold',
'Goldenrod',
'Gray',
'Green',
'GreenYellow',
'Honeydew',
'HookHead',
'Horizontal',
'HotPink',
'I',
'IgnoreAspect',
'IndianRed',
'Indigo',
'Ivory',
'JOIN_IN',
'JOIN_OUT',
'JungleGreen',
'Khaki',
'LM_DWARF',
'LM_MACHEP',
'LM_SQRT_DWARF',
'LM_SQRT_GIANT',
'LM_USERTOL',
'Label',
'Lavender',
'LavenderBlush',
'LawnGreen',
'LeftJustified',
'LeftSide',
'LemonChiffon',
'LightBlue',
'LightCoral',
'LightCyan',
'LightGoldenrodYellow',
'LightGreen',
'LightGrey',
'LightPink',
'LightSalmon',
'LightSeaGreen',
'LightSkyBlue',
'LightSlateGray',
'LightSteelBlue',
'LightYellow',
'Lime',
'LimeGreen',
'Linear',
'Linen',
'Log',
'Logarithmic',
'Magenta',
'Mahogany',
'Mark',
'MarkFill',
'Maroon',
'Max',
'MediumAquamarine',
'MediumBlue',
'MediumOrchid',
'MediumPurple',
'MediumSeaGreen',
'MediumSlateBlue',
'MediumSpringGreen',
'MediumTurquoise',
'MediumVioletRed',
'Melon',
'MidPoint',
'MidnightBlue',
'Min',
'MintCream',
'MistyRose',
'Moccasin',
'Move',
'MoveQuiet',
'Mulberry',
'N',
'NE',
'NNE',
'NNW',
'NW',
'NavajoWhite',
'Navy',
'NavyBlue',
'NoAlign',
'NoCrop',
'NoFill',
'NoSide',
'OldLace',
'Olive',
'OliveDrab',
'OliveGreen',
'Orange',
'OrangeRed',
'Orchid',
'Ox',
'Oy',
'PC',
'PaleGoldenrod',
'PaleGreen',
'PaleTurquoise',
'PaleVioletRed',
'PapayaWhip',
'Peach',
'PeachPuff',
'Periwinkle',
'Peru',
'PineGreen',
'Pink',
'Plum',
'PowderBlue',
'ProcessBlue',
'Purple',
'RK2',
'RK3',
'RK3BS',
'RK4',
'RK5',
'RK5DP',
'RK5F',
'RawSienna',
'Red',
'RedOrange',
'RedViolet',
'Rhodamine',
'RightJustified',
'RightSide',
'RosyBrown',
'RoyalBlue',
'RoyalPurple',
'RubineRed',
'S',
'SE',
'SSE',
'SSW',
'SW',
'SaddleBrown',
'Salmon',
'SandyBrown',
'SeaGreen',
'Seashell',
'Sepia',
'Sienna',
'Silver',
'SimpleHead',
'SkyBlue',
'SlateBlue',
'SlateGray',
'Snow',
'SpringGreen',
'SteelBlue',
'Suppress',
'SuppressQuiet',
'Tan',
'TeXHead',
'Teal',
'TealBlue',
'Thistle',
'Ticksize',
'Tomato',
'Turquoise',
'UnFill',
'VERSION',
'Value',
'Vertical',
'Violet',
'VioletRed',
'W',
'WNW',
'WSW',
'Wheat',
'White',
'WhiteSmoke',
'WildStrawberry',
'XYAlign',
'YAlign',
'Yellow',
'YellowGreen',
'YellowOrange',
'addpenarc',
'addpenline',
'align',
'allowstepping',
'angularsystem',
'animationdelay',
'appendsuffix',
'arcarrowangle',
'arcarrowfactor',
'arrow2sizelimit',
'arrowangle',
'arrowbarb',
'arrowdir',
'arrowfactor',
'arrowhookfactor',
'arrowlength',
'arrowsizelimit',
'arrowtexfactor',
'authorpen',
'axis',
'axiscoverage',
'axislabelfactor',
'background',
'backgroundcolor',
'backgroundpen',
'barfactor',
'barmarksizefactor',
'basealign',
'baselinetemplate',
'beveljoin',
'bigvertexpen',
'bigvertexsize',
'black',
'blue',
'bm',
'bottom',
'bp',
'brown',
'bullet',
'byfoci',
'byvertices',
'camerafactor',
'chartreuse',
'circlemarkradiusfactor',
'circlenodesnumberfactor',
'circleprecision',
'circlescale',
'cm',
'codefile',
'codepen',
'codeskip',
'colorPen',
'coloredNodes',
'coloredSegments',
'conditionlength',
'conicnodesfactor',
'count',
'cputimeformat',
'crossmarksizefactor',
'currentcoordsys',
'currentlight',
'currentpatterns',
'currentpen',
'currentpicture',
'currentposition',
'currentprojection',
'curvilinearsystem',
'cuttings',
'cyan',
'darkblue',
'darkbrown',
'darkcyan',
'darkgray',
'darkgreen',
'darkgrey',
'darkmagenta',
'darkolive',
'darkred',
'dashdotted',
'dashed',
'datepen',
'dateskip',
'debuggerlines',
'debugging',
'deepblue',
'deepcyan',
'deepgray',
'deepgreen',
'deepgrey',
'deepmagenta',
'deepred',
'default',
'defaultControl',
'defaultS',
'defaultbackpen',
'defaultcoordsys',
'defaultfilename',
'defaultformat',
'defaultmassformat',
'defaultpen',
'diagnostics',
'differentlengths',
'dot',
'dotfactor',
'dotframe',
'dotted',
'doublelinepen',
'doublelinespacing',
'down',
'duplicateFuzz',
'ellipsenodesnumberfactor',
'eps',
'epsgeo',
'epsilon',
'evenodd',
'extendcap',
'fermionpen',
'figureborder',
'figuremattpen',
'firstnode',
'firststep',
'foregroundcolor',
'fuchsia',
'fuzz',
'gapfactor',
'ghostpen',
'gluonamplitude',
'gluonpen',
'gluonratio',
'gray',
'green',
'grey',
'hatchepsilon',
'havepagenumber',
'heavyblue',
'heavycyan',
'heavygray',
'heavygreen',
'heavygrey',
'heavymagenta',
'heavyred',
'hline',
'hwratio',
'hyperbolanodesnumberfactor',
'identity4',
'ignore',
'inXasyMode',
'inch',
'inches',
'includegraphicscommand',
'inf',
'infinity',
'institutionpen',
'intMax',
'intMin',
'invert',
'invisible',
'itempen',
'itemskip',
'itemstep',
'labelmargin',
'landscape',
'lastnode',
'left',
'legendhskip',
'legendlinelength',
'legendmargin',
'legendmarkersize',
'legendmaxrelativewidth',
'legendvskip',
'lightblue',
'lightcyan',
'lightgray',
'lightgreen',
'lightgrey',
'lightmagenta',
'lightolive',
'lightred',
'lightyellow',
'linemargin',
'lm_infmsg',
'lm_shortmsg',
'longdashdotted',
'longdashed',
'magenta',
'magneticPoints',
'magneticRadius',
'mantissaBits',
'markangleradius',
'markangleradiusfactor',
'markanglespace',
'markanglespacefactor',
'mediumblue',
'mediumcyan',
'mediumgray',
'mediumgreen',
'mediumgrey',
'mediummagenta',
'mediumred',
'mediumyellow',
'middle',
'minDistDefault',
'minblockheight',
'minblockwidth',
'mincirclediameter',
'minipagemargin',
'minipagewidth',
'minvertexangle',
'miterjoin',
'mm',
'momarrowfactor',
'momarrowlength',
'momarrowmargin',
'momarrowoffset',
'momarrowpen',
'monoPen',
'morepoints',
'nCircle',
'newbulletcolor',
'ngraph',
'nil',
'nmesh',
'nobasealign',
'nodeMarginDefault',
'nodesystem',
'nomarker',
'nopoint',
'noprimary',
'nullpath',
'nullpen',
'numarray',
'ocgindex',
'oldbulletcolor',
'olive',
'orange',
'origin',
'overpaint',
'page',
'pageheight',
'pagemargin',
'pagenumberalign',
'pagenumberpen',
'pagenumberposition',
'pagewidth',
'paleblue',
'palecyan',
'palegray',
'palegreen',
'palegrey',
'palemagenta',
'palered',
'paleyellow',
'parabolanodesnumberfactor',
'perpfactor',
'phi',
'photonamplitude',
'photonpen',
'photonratio',
'pi',
'pink',
'plain',
'plus',
'preamblenodes',
'pt',
'purple',
'r3',
'r4a',
'r4b',
'randMax',
'realDigits',
'realEpsilon',
'realMax',
'realMin',
'red',
'relativesystem',
'reverse',
'right',
'roundcap',
'roundjoin',
'royalblue',
'salmon',
'saveFunctions',
'scalarpen',
'sequencereal',
'settings',
'shipped',
'signedtrailingzero',
'solid',
'springgreen',
'sqrtEpsilon',
'squarecap',
'squarepen',
'startposition',
'stdin',
'stdout',
'stepfactor',
'stepfraction',
'steppagenumberpen',
'stepping',
'stickframe',
'stickmarksizefactor',
'stickmarkspacefactor',
'textpen',
'ticksize',
'tildeframe',
'tildemarksizefactor',
'tinv',
'titlealign',
'titlepagepen',
'titlepageposition',
'titlepen',
'titleskip',
'top',
'trailingzero',
'treeLevelStep',
'treeMinNodeWidth',
'treeNodeStep',
'trembleAngle',
'trembleFrequency',
'trembleRandom',
'tremblingMode',
'undefined',
'unitcircle',
'unitsquare',
'up',
'urlpen',
'urlskip',
'version',
'vertexpen',
'vertexsize',
'viewportmargin',
'viewportsize',
'vline',
'white',
'wye',
'xformStack',
'yellow',
'ylabelwidth',
'zerotickfuzz',
'zerowinding'
])
| davy39/eric | ThirdParty/Pygments/pygments/lexers/_asybuiltins.py | Python | gpl-3.0 | 27,360 | [
"Gaussian"
] | 30a293f2ee6c21ef0af94441f13d7ac5ac8518532836234829e29c0f1b543803 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 VT SuperDARN Lab
# Full license can be found in LICENSE.txt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""music processing module
A module for running the MUltiple SIgnal Classification (MUSIC) algorithm for the detection of
MSTIDs and wave-like structures in SuperDARN data.
For usage examples, please see the iPython notebooks included in the docs folder of the DaViTPy distribution.
References
----------
See Samson et al. [1990] and Bristow et al. [1994] for details regarding the MUSIC algorithm and SuperDARN-observed MSTIDs.
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Samson, J. C., R. A. Greenwald, J. M. Ruohoniemi, A. Frey, and K. B. Baker (1990), Goose Bay radar observations of Earth-reflected,
atmospheric gravity waves in the high-latitude ionosphere, J. Geophys. Res., 95(A6), 7693-7709, doi:10.1029/JA095iA06p07693.
Module author:: Nathaniel A. Frissell, Fall 2013
Functions
--------------------------------------------------------------------------------------------------------------------------
getDataSet get music data object from music array object
stringify_signal convert dictionary to a string
stringify_signal_list convert list of dictionaries into strings
beamInterpolation interpolate music array object along beams
defineLimits set limits for chosen data set
checkDataQuality mark data as bad base on radar operations
applyLimits remove data outside of limits
determineRelativePosition find center of cell in music array object
timeInterpolation interpolate music array object along time
filterTimes calculate time range for data set
detrend linear detrend of music array/data object
nan_to_num convert undefined numbers to finite numbers
windowData apply window to music array object
calculateFFT calculate spectrum of an object
calculateDlm calculate the cross-spectral matrix of a musicArray/musicDataObj object.
calculateKarr calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
simulator insert a simulated MSTID into the processing chain.
scale_karr scale/normalize kArr for plotting and signal detection.
detectSignals detect local maxima of signals
add_signal add signal to detected signal list
del_signal remove signal from detected signal list
--------------------------------------------------------------------------------------------------------------------------
Classes
-----------------------------------------------------------
emptyObj create an empty object
SigDetect information about detected signals
musicDataObj basic container for holding MUSIC data.
musicArray container object for holding musicDataObj's
filter a filter object for VT sig/siStruct objects
-----------------------------------------------------------
"""
import numpy as np
import datetime
import time
import copy
import logging
Re = 6378 #Earth radius
def getDataSet(dataObj,dataSet='active'):
"""Returns a specified musicDataObj from a musicArray object. If the musicArray object has the exact attribute
specified in the dataSet keyword, then that attribute is returned. If not, all attributes of the musicArray object
will be searched for attributes which contain the string specified in the dataSet keyword. If more than one are
found, the last attribute of a sorted list will be returned. If no attributes are found which contain the specified
string, the 'active' dataSet is returned.
Parameters
----------
dataObj : musicArray
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj object
Written by Nathaniel A. Frissell, Fall 2013
"""
lst = dir(dataObj)
if dataSet not in lst:
tmp = []
for item in lst:
if dataSet in item:
tmp.append(item)
if len(tmp) == 0:
dataSet = 'active'
else:
tmp.sort()
dataSet = tmp[-1]
currentData = getattr(dataObj,dataSet)
return currentData
class emptyObj(object):
"""Create an empty object.
"""
def __init__(self):
pass
def stringify_signal(sig):
"""Method to convert a signal information dictionary into a string.
Parameters
----------
sig : dict
Information about a detected signal.
Returns
-------
sigInfo : str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
sigInfo = {}
if sig.has_key('order'):
sigInfo['order'] = '%d' % sig['order'] #Order of signals by strength as detected by image detection algorithm
if sig.has_key('kx'):
sigInfo['kx'] = '%.5f' % sig['kx']
if sig.has_key('ky'):
sigInfo['ky'] = '%.5f' % sig['ky']
if sig.has_key('k'):
sigInfo['k'] = '%.3f' % sig['k']
if sig.has_key('lambda'):
if np.isinf(sig['lambda']):
sigInfo['lambda'] = 'inf'
else:
sigInfo['lambda'] = '%d' % np.round(sig['lambda']) # km
if sig.has_key('lambda_x'):
if np.isinf(sig['lambda_x']):
sigInfo['lambda_x'] = 'inf'
else:
sigInfo['lambda_x'] = '%d' % np.round(sig['lambda_x']) # km
if sig.has_key('lambda_y'):
if np.isinf(sig['lambda_y']):
sigInfo['lambda_y'] = 'inf'
else:
sigInfo['lambda_y'] = '%d' % np.round(sig['lambda_y']) # km
if sig.has_key('azm'):
sigInfo['azm'] = '%d' % np.round(sig['azm']) # degrees
if sig.has_key('freq'):
sigInfo['freq'] = '%.2f' % (sig['freq']*1000.) # mHz
if sig.has_key('period'):
sigInfo['period'] = '%d' % np.round(sig['period']/60.) # minutes
if sig.has_key('vel'):
if np.isinf(np.round(sig['vel'])):
sigInfo['vel'] = 'Inf'
else:
sigInfo['vel'] = '%d' % np.round(sig['vel']) # km/s
if sig.has_key('area'):
sigInfo['area'] = '%d' % sig['area'] # Pixels
if sig.has_key('max'):
sigInfo['max'] = '%.4f' % sig['max'] # Value from kArr in arbitrary units, probably with some normalization
if sig.has_key('maxpos'):
sigInfo['maxpos'] = str(sig['maxpos']) # Index position in kArr of maximum value.
if sig.has_key('labelInx'):
sigInfo['labelInx'] = '%d' % sig['labelInx'] # Label value from image processing
if sig.has_key('serialNr'):
sigInfo['serialNr'] = '%d' % sig['serialNr'] # Label value from image processing
return sigInfo
def stringify_signal_list(signal_list,sort_key='order'):
"""Method to convert a list of signal dictionaries into strings.
Parameters
----------
signal_list : list of dict
Information about a detected signal.
sort_key : Optional[string]
Dictionary key to sort on, or None for no sort. 'order' will sort the signal list
from strongest signal to weakest, as determined by the MUSIC algorithm.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
string_info = []
if sort_key is not None:
orders = [x[sort_key] for x in signal_list]
orders.sort()
for order in orders:
for sig in signal_list:
if sig[sort_key] == order:
string_info.append(stringify_signal(sig))
signal_list.remove(sig)
else:
for sig in signal_list:
string_info.append(stringify_signal(sig))
return string_info
class SigDetect(object):
"""Class to hold information about detected signals.
Methods
-------
string
reorder
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self):
pass
def string(self):
"""Method to convert a list of signal dictionaries into strings.
Returns
-------
stringInfo : list of str
String representation of the signal information.
Written by Nathaniel A. Frissell, Fall 2013
"""
return stringify_signal_list(self.info)
def reorder(self):
"""Method to sort items in .info by signal maximum value (from the scaled kArr) and update nrSignals.
Written by Nathaniel A. Frissell, Fall 2013
"""
#Do the sorting...
from operator import itemgetter
newlist = sorted(self.info,key=itemgetter('max'),reverse=True)
#Put in the order numbers...
order = 1
for item in newlist:
item['order'] = order
order = order + 1
#Save the list to the dataObj...
self.info = newlist
#Update the nrSigs
self.nrSigs = len(newlist)
class musicDataObj(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
time : list of datetime.datetime
list of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
comment : Optional[str]
String to be appended to the history of this object
parent : Optional[musicArray]
reference to parent musicArray object
**metadata
keywords sent to matplot lib, etc.
Attributes
----------
time : numpy.array of datetime.datetime
numpy array of times corresponding to data
data : numpy.array
3-dimensional array of data
fov : Optional[pydarn.radar.radFov.fov]
Radar field-of-view object.
metadata : dict
keywords sent to matplot lib, etc.
history : dict
Methods
---------
copy
setActive
nyquistFrequency
samplePeriod
applyLimits
setMetadata
printMetadata
appendHistory
printHistory
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, time, data, fov=None, comment=None, parent=0, **metadata):
self.parent = parent
self.time = np.array(time)
self.data = np.array(data)
self.fov = fov
self.metadata = {}
for key in metadata: self.metadata[key] = metadata[key]
self.history = {datetime.datetime.now():comment}
def copy(self,newsig,comment):
"""Copy a musicDataObj object. This deep copies data and metadata, updates the serial
number, and logs a comment in the history. Methods such as plot are kept as a reference.
Parameters
----------
newsig : str
Name for the new musicDataObj object.
comment : str
Comment describing the new musicDataObj object.
Returns
-------
newsigobj : musicDataObj
Copy of the original musicDataObj with new name and history entry.
Written by Nathaniel A. Frissell, Fall 2013
"""
serial = self.metadata['serial'] + 1
newsig = '_'.join(['DS%03d' % serial,newsig])
setattr(self.parent,newsig,copy.copy(self))
newsigobj = getattr(self.parent,newsig)
newsigobj.time = copy.deepcopy(self.time)
newsigobj.data = copy.deepcopy(self.data)
newsigobj.fov = copy.deepcopy(self.fov)
newsigobj.metadata = copy.deepcopy(self.metadata)
newsigobj.history = copy.deepcopy(self.history)
newsigobj.metadata['dataSetName'] = newsig
newsigobj.metadata['serial'] = serial
newsigobj.history[datetime.datetime.now()] = '['+newsig+'] '+comment
return newsigobj
def setActive(self):
"""Sets this signal as the currently active signal.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.parent.active = self
def nyquistFrequency(self,timeVec=None):
"""Calculate the Nyquist frequency of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
nq : float
Nyquist frequency of the signal in Hz.
Written by Nathaniel A. Frissell, Fall 2013
"""
dt = self.samplePeriod(timeVec=timeVec)
nyq = float(1. / (2*dt))
return nyq
def samplePeriod(self,timeVec=None):
"""Calculate the sample period of a vt sigStruct signal.
Parameters
----------
timeVec : Optional[list of datetime.datetime]
List of datetime.datetime to use instead of self.time.
Returns
-------
samplePeriod : float
samplePeriod: sample period of signal in seconds.
Written by Nathaniel A. Frissell, Fall 2013
"""
if timeVec == None: timeVec = self.time
diffs = np.diff(timeVec)
diffs_unq = np.unique(diffs)
self.diffs = diffs_unq
if len(diffs_unq) == 1:
samplePeriod = diffs[0].total_seconds()
else:
diffs_sec = np.array([x.total_seconds() for x in diffs])
maxDt = np.max(diffs_sec)
avg = np.mean(diffs_sec)
md = self.metadata
warn = 'WARNING'
if md.has_key('title'): warn = ' '.join([warn,'FOR','"'+md['title']+'"'])
logging.warning(warn + ':')
logging.warning(' Date time vector is not regularly sampled!')
logging.warning(' Maximum difference in sampling rates is ' + str(maxDt) + ' sec.')
logging.warning(' Using average sampling period of ' + str(avg) + ' sec.')
samplePeriod = avg
import ipdb; ipdb.set_trace()
return samplePeriod
def applyLimits(self,rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment='Limits Applied'):
"""Removes data outside of the rangeLimits, gateLimits, and timeLimits boundaries.
Parameters
----------
rangeLimits : Optional[interable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
timeLimits : Optional[]
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Returns
-------
newMusicDataObj : musicDataObj
New musicDataObj. The musicDataObj is also stored in it's parent musicArray object.
Written by Nathaniel A. Frissell, Fall 2013
"""
return applyLimits(self.parent,self.metadata['dataSetName'],rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits,newDataSetName=newDataSetName,comment=comment)
def setMetadata(self,**metadata):
"""Adds information to the current musicDataObj's metadata dictionary.
Metadata affects various plotting parameters and signal processing routinges.
Parameters
----------
**metadata :
keywords sent to matplot lib, etc.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.metadata = dict(self.metadata.items() + metadata.items())
def printMetadata(self):
"""Nicely print all of the metadata associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.metadata.keys()
keys.sort()
for key in keys:
print key+':',self.metadata[key]
def appendHistory(self,comment):
"""Add an entry to the processing history dictionary of the current musicDataObj object.
Parameters
----------
comment : string
Infomation to add to history dictionary.
Written by Nathaniel A. Frissell, Fall 2013
"""
self.history[datetime.datetime.now()] = '['+self.metadata['dataSetName']+'] '+comment
def printHistory(self):
"""Nicely print all of the processing history associated with the current musicDataObj object.
Written by Nathaniel A. Frissell, Fall 2013
"""
keys = self.history.keys()
keys.sort()
for key in keys:
print key,self.history[key]
class musicArray(object):
"""This class is the basic container for holding MUSIC data.
Parameters
----------
myPtr : pydarn.sdio.radDataTypes.radDataPtr
contains the pipeline to the data we are after
sTime : Optional[datetime.datetime]
start time UT (if None myPtr.sTime is used)
eTime : Optional[datetime.datetime]
end time UT (if None myPtr.eTime is used)
param : Optional[str]
Radar FIT parameter to load and process. Any appropriate attribute of the
FIT data structure is allowed.
gscat : Optional[int]
Ground scatter flag.
0: all backscatter data
1: ground backscatter only
2: ionospheric backscatter only
3: all backscatter data with a ground backscatter flag.
fovElevation : Optional[float]
Passed directly to pydarn.radar.radFov.fov()
fovModel : Optional[str]
Scatter mapping model.
'GS': Ground Scatter Mapping Model. See Bristow et al. [1994]
'IS': Standard SuperDARN scatter mapping model.
fovCoords : Optional[str]
Map coordinate system. WARNING: 'geo' is curently only tested coordinate system.
full_array : Optional[bool]
If True, make the data array the full beam, gate dimensions listed in the hdw.dat file.
If False, truncate the array to the maximum dimensions that there is actually data.
False will save space without throwing out any data, but sometimes it is easier to work
with the full-size array.
Attributes
----------
messages : list
prm :
Methods
-------
get_data_sets
Example
-------
#Set basic event parameters.
rad ='wal'
sTime = datetime.datetime(2011,5,9,8,0)
eTime = datetime.datetime(2011,5,9,19,0)
#Connect to a SuperDARN data source.
myPtr = pydarn.sdio.radDataOpen(sTime,rad,eTime=eTime)
#Create the musicArray Object.
dataObj = music.musicArray(myPtr,fovModel='GS')
References
----------
Bristow, W. A., R. A. Greenwald, and J. C. Samson (1994), Identification of high-latitude acoustic gravity wave sources
using the Goose Bay HF Radar, J. Geophys. Res., 99(A1), 319-331, doi:10.1029/93JA01470.
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self,myPtr,sTime=None,eTime=None,param='p_l',gscat=1,
fovElevation=None,fovModel='GS',fovCoords='geo',full_array=False):
from davitpy import pydarn
# Create a list that can be used to store top-level messages.
self.messages = []
no_data_message = 'No data for this time period.'
# If no data, report and return.
if myPtr is None:
self.messages.append(no_data_message)
return
if sTime == None: sTime = myPtr.sTime
if eTime == None: eTime = myPtr.eTime
scanTimeList = []
dataList = []
cpidList = []
#Subscripts of columns in the dataList/dataArray
scanInx = 0
dateInx = 1
beamInx = 2
gateInx = 3
dataInx = 4
beamTime = sTime
scanNr = np.uint64(0)
fov = None
# Create a place to store the prm data.
prm = emptyObj()
prm.time = []
prm.mplgs = []
prm.nave = []
prm.noisesearch = []
prm.scan = []
prm.smsep = []
prm.mplgexs = []
prm.xcf = []
prm.noisesky = []
prm.rsep = []
prm.mppul = []
prm.inttsc = []
prm.frang = []
prm.bmazm = []
prm.lagfr = []
prm.ifmode = []
prm.noisemean = []
prm.tfreq = []
prm.inttus = []
prm.rxrise = []
prm.mpinc = []
prm.nrang = []
while beamTime < eTime:
#Load one scan into memory.
# myScan = pydarn.sdio.radDataRead.radDataReadScan(myPtr)
myScan = myPtr.readScan()
if myScan == None: break
goodScan = False # This flag turns to True as soon as good data is found for the scan.
for myBeam in myScan:
#Calculate the field of view if it has not yet been calculated.
if fov == None:
radStruct = pydarn.radar.radStruct.radar(radId=myPtr.stid)
site = pydarn.radar.radStruct.site(radId=myPtr.stid,dt=sTime)
fov = pydarn.radar.radFov.fov(frang=myBeam.prm.frang, rsep=myBeam.prm.rsep, site=site,elevation=fovElevation,model=fovModel,coords=fovCoords)
#Get information from each beam in the scan.
beamTime = myBeam.time
bmnum = myBeam.bmnum
# Save all of the radar operational parameters.
prm.time.append(beamTime)
prm.mplgs.append(myBeam.prm.mplgs)
prm.nave.append(myBeam.prm.nave)
prm.noisesearch.append(myBeam.prm.noisesearch)
prm.scan.append(myBeam.prm.scan)
prm.smsep.append(myBeam.prm.smsep)
prm.mplgexs.append(myBeam.prm.mplgexs)
prm.xcf.append(myBeam.prm.xcf)
prm.noisesky.append(myBeam.prm.noisesky)
prm.rsep.append(myBeam.prm.rsep)
prm.mppul.append(myBeam.prm.mppul)
prm.inttsc.append(myBeam.prm.inttsc)
prm.frang.append(myBeam.prm.frang)
prm.bmazm.append(myBeam.prm.bmazm)
prm.lagfr.append(myBeam.prm.lagfr)
prm.ifmode.append(myBeam.prm.ifmode)
prm.noisemean.append(myBeam.prm.noisemean)
prm.tfreq.append(myBeam.prm.tfreq)
prm.inttus.append(myBeam.prm.inttus)
prm.rxrise.append(myBeam.prm.rxrise)
prm.mpinc.append(myBeam.prm.mpinc)
prm.nrang.append(myBeam.prm.nrang)
#Get the fitData.
fitDataList = getattr(myBeam.fit,param)
slist = getattr(myBeam.fit,'slist')
gflag = getattr(myBeam.fit,'gflg')
if len(slist) > 1:
for (gate,data,flag) in zip(slist,fitDataList,gflag):
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
elif len(slist) == 1:
gate,data,flag = (slist[0],fitDataList[0],gflag[0])
#Get information from each gate in scan. Skip record if the chosen ground scatter option is not met.
if (gscat == 1) and (flag == 0): continue
if (gscat == 2) and (flag == 1): continue
tmp = (scanNr,beamTime,bmnum,gate,data)
dataList.append(tmp)
goodScan = True
else:
continue
if goodScan:
#Determine the start time for each scan and save to list.
scanTimeList.append(min([x.time for x in myScan]))
#Advance to the next scan number.
scanNr = scanNr + 1
#Convert lists to numpy arrays.
timeArray = np.array(scanTimeList)
dataListArray = np.array(dataList)
# If no data, report and return.
if dataListArray.size == 0:
self.messages.append(no_data_message)
return
#Figure out what size arrays we need and initialize the arrays...
nrTimes = np.max(dataListArray[:,scanInx]) + 1
if full_array:
nrBeams = fov.beams.max() + 1
nrGates = fov.gates.max() + 1
else:
nrBeams = np.max(dataListArray[:,beamInx]) + 1
nrGates = np.max(dataListArray[:,gateInx]) + 1
#Make sure the FOV is the same size as the data array.
if len(fov.beams) != nrBeams:
fov.beams = fov.beams[0:nrBeams]
fov.latCenter = fov.latCenter[0:nrBeams,:]
fov.lonCenter = fov.lonCenter[0:nrBeams,:]
fov.slantRCenter = fov.slantRCenter[0:nrBeams,:]
fov.latFull = fov.latFull[0:nrBeams+1,:]
fov.lonFull = fov.lonFull[0:nrBeams+1,:]
fov.slantRFull = fov.slantRFull[0:nrBeams+1,:]
if len(fov.gates) != nrGates:
fov.gates = fov.gates[0:nrGates]
fov.latCenter = fov.latCenter[:,0:nrGates]
fov.lonCenter = fov.lonCenter[:,0:nrGates]
fov.slantRCenter = fov.slantRCenter[:,0:nrGates]
fov.latFull = fov.latFull[:,0:nrGates+1]
fov.lonFull = fov.lonFull[:,0:nrGates+1]
fov.slantRFull = fov.slantRFull[:,0:nrGates+1]
#Convert the dataListArray into a 3 dimensional array.
dataArray = np.ndarray([nrTimes,nrBeams,nrGates])
dataArray[:] = np.nan
for inx in range(len(dataListArray)):
dataArray[dataListArray[inx,scanInx],dataListArray[inx,beamInx],dataListArray[inx,gateInx]] = dataListArray[inx,dataInx]
#Make metadata block to hold information about the processing.
metadata = {}
metadata['dType'] = myPtr.dType
metadata['stid'] = myPtr.stid
metadata['name'] = radStruct.name
metadata['code'] = radStruct.code
metadata['fType'] = myPtr.fType
metadata['cp'] = myPtr.cp
metadata['channel'] = myPtr.channel
metadata['sTime'] = sTime
metadata['eTime'] = eTime
metadata['param'] = param
metadata['gscat'] = gscat
metadata['elevation'] = fovElevation
metadata['model'] = fovModel
metadata['coords'] = fovCoords
dataSet = 'DS000_originalFit'
metadata['dataSetName'] = dataSet
metadata['serial'] = 0
comment = '['+dataSet+'] '+ 'Original Fit Data'
#Save data to be returned as self.variables
setattr(self,dataSet,musicDataObj(timeArray,dataArray,fov=fov,parent=self,comment=comment))
newSigObj = getattr(self,dataSet)
setattr(newSigObj,'metadata',metadata)
#Set the new data active.
newSigObj.setActive()
#Make prm data part of the object.
self.prm = prm
def get_data_sets(self):
"""Return a sorted list of musicDataObj's contained in this musicArray.
Returns
-------
dataSets : list of str
Names of musicDataObj's contained in this musicArray.
Written by Nathaniel A. Frissell, Fall 2013
"""
attrs = dir(self)
dataSets = []
for item in attrs:
if item.startswith('DS'):
dataSets.append(item)
dataSets.sort()
return dataSets
def beamInterpolation(dataObj,dataSet='active',newDataSetName='beamInterpolated',comment='Beam Linear Interpolation'):
"""Interpolates the data in a musicArray object along the beams of the radar. This method will ensure that no
rangegates are missing data. Ranges outside of metadata['gateLimits'] will be set to 0.
The result is stored as a new musicDataObj in the given musicArray object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
currentData = getDataSet(dataObj,dataSet)
nrTimes = len(currentData.time)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for tt in range(nrTimes):
for bb in range(nrBeams):
rangeVec = currentData.fov.slantRCenter[bb,:]
input_x = copy.copy(rangeVec)
input_y = currentData.data[tt,bb,:]
#If metadata['gateLimits'], select only those measurements...
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates <= limits[1]))[0]
if len(gateInx) < 2: continue
input_x = input_x[gateInx]
input_y = input_y[gateInx]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
intFn = interp1d(input_x,input_y,bounds_error=False,fill_value=0)
interpArr[tt,bb,:] = intFn(rangeVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = interpArr
newDataSet.setActive()
def defineLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,beamLimits=None,timeLimits=None):
"""Sets the range, gate, beam, and time limits for the chosen data set. This method only changes metadata;
it does not create a new data set or alter the data in any way. If you specify rangeLimits, they will be changed to correspond
with the center value of the range cell. Gate limits always override range limits.
Use the applyLimits() method to remove data outside of the data limits.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
try:
if (rangeLimits != None) or (gateLimits != None):
if (rangeLimits != None) and (gateLimits == None):
inx = np.where(np.logical_and(currentData.fov.slantRCenter >= rangeLimits[0],currentData.fov.slantRCenter <= rangeLimits[1]))
gateLimits = [np.min(inx[1][:]),np.max(inx[1][:])]
if gateLimits != None:
rangeMin = np.int(np.min(currentData.fov.slantRCenter[:,gateLimits[0]]))
rangeMax = np.int(np.max(currentData.fov.slantRCenter[:,gateLimits[1]]))
rangeLimits = [rangeMin,rangeMax]
currentData.metadata['gateLimits'] = gateLimits
currentData.metadata['rangeLimits'] = rangeLimits
if beamLimits != None:
currentData.metadata['beamLimits'] = beamLimits
if timeLimits != None:
currentData.metadata['timeLimits'] = timeLimits
except:
logging.warning("An error occured while defining limits. No limits set. Check your input values.")
def checkDataQuality(dataObj,dataSet='active',max_off_time=10,sTime=None,eTime=None):
"""Mark the data set as bad (metadata['good_period'] = False) if the radar was not operational within the chosen time period
for a specified length of time.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
max_off_time : Optional[int/float]
Maximum length in minutes radar may remain off.
sTime : Optional[datetime.datetime]
Starting time of checking period. If None, min(currentData.time) is used.
eTime : Optional[datetime.datetime]
End time of checking period. If None, max(currentData.time) is used.
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
if sTime is None:
sTime = np.min(currentData.time)
if eTime is None:
eTime = np.max(currentData.time)
time_vec = currentData.time[np.logical_and(currentData.time > sTime, currentData.time < eTime)]
time_vec = np.concatenate(([sTime],time_vec,[eTime]))
max_diff = np.max(np.diff(time_vec))
if max_diff > datetime.timedelta(minutes=max_off_time):
currentData.setMetadata(good_period=False)
else:
currentData.setMetadata(good_period=True)
return dataObj
def applyLimits(dataObj,dataSet='active',rangeLimits=None,gateLimits=None,timeLimits=None,newDataSetName='limitsApplied',comment=None):
"""Removes data outside of the rangeLimits and gateLimits boundaries.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
rangeLimits : Optional[iterable]
Two-element array defining the maximum and minumum slant ranges to use. [km]
gateLimits : Optional[iterable]
Two-element array defining the maximum and minumum gates to use.
beamLimits : Optional[iterable]
Two-element array defining the maximum and minumum beams to use.
timeLimits : Optional[iterable]
Two-element array of datetime.datetime objects defining the maximum and minumum times to use.
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Returns
-------
newData : musicDataObj
Processed version of input musicDataObj (if succeeded), or the original musicDataObj (if failed).
Written by Nathaniel A. Frissell, Fall 2013
"""
if (rangeLimits != None) or (gateLimits != None) or (timeLimits != None):
defineLimits(dataObj,dataSet='active',rangeLimits=rangeLimits,gateLimits=gateLimits,timeLimits=timeLimits)
currentData = getDataSet(dataObj,dataSet)
try:
#Make a copy of the current data set.
commentList = []
if (currentData.metadata.has_key('timeLimits') == False and
currentData.metadata.has_key('beamLimits') == False and
currentData.metadata.has_key('gateLimits') == False):
return currentData
newData = currentData.copy(newDataSetName,comment)
#Apply the gateLimits
if currentData.metadata.has_key('gateLimits'):
limits = currentData.metadata['gateLimits']
gateInx = np.where(np.logical_and(currentData.fov.gates >= limits[0],currentData.fov.gates<= limits[1]))[0]
newData.data = newData.data[:,:,gateInx]
newData.fov.gates = newData.fov.gates[gateInx]
newData.fov.latCenter = newData.fov.latCenter[:,gateInx]
newData.fov.lonCenter = newData.fov.lonCenter[:,gateInx]
newData.fov.slantRCenter = newData.fov.slantRCenter[:,gateInx]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
gateInxFull = np.append(gateInx,gateInx[-1]+1) #We need that extra gate since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[:,gateInxFull]
newData.fov.lonFull = newData.fov.lonFull[:,gateInxFull]
newData.fov.slantRFull = newData.fov.slantRFull[:,gateInxFull]
commentList.append('gate: %i,%i' % tuple(limits))
rangeLim = (np.min(newData.fov.slantRCenter), np.max(newData.fov.slantRCenter))
commentList.append('range [km]: %i,%i' % rangeLim)
#Remove limiting item from metadata.
newData.metadata.pop('gateLimits')
if newData.metadata.has_key('rangeLimits'): newData.metadata.pop('rangeLimits')
#Apply the beamLimits.
if currentData.metadata.has_key('beamLimits'):
limits = currentData.metadata['beamLimits']
beamInx = np.where(np.logical_and(currentData.fov.beams >= limits[0],currentData.fov.beams <= limits[1]))[0]
newData.data = newData.data[:,beamInx,:]
newData.fov.beams = newData.fov.beams[beamInx]
newData.fov.latCenter = newData.fov.latCenter[beamInx,:]
newData.fov.lonCenter = newData.fov.lonCenter[beamInx,:]
newData.fov.slantRCenter = newData.fov.slantRCenter[beamInx,:]
#Update the full FOV.
#This works as long as we look at only consecutive gates. If we ever do something where we are not looking at consecutive gates
#(typically for computational speed reasons), we will have to do something else.
beamInxFull = np.append(beamInx,beamInx[-1]+1) #We need that extra beam since this is the full FOV.
newData.fov.latFull = newData.fov.latFull[beamInxFull,:]
newData.fov.lonFull = newData.fov.lonFull[beamInxFull,:]
newData.fov.slantRFull = newData.fov.slantRFull[beamInxFull,:]
commentList.append('beam: %i,%i' % tuple(limits))
#Remove limiting item from metadata.
newData.metadata.pop('beamLimits')
#Apply the time limits.
if currentData.metadata.has_key('timeLimits'):
limits = currentData.metadata['timeLimits']
timeInx = np.where(np.logical_and(currentData.time >= limits[0],currentData.time <= limits[1]))[0]
newData.data = newData.data[timeInx,:,:]
newData.time = newData.time[timeInx]
commentList.append('time: '+limits[0].strftime('%Y-%m-%d/%H:%M,')+limits[1].strftime('%Y-%m-%d/%H:%M'))
#Remove limiting item from metadata.
newData.metadata.pop('timeLimits')
#Update the history with what limits were applied.
comment = 'Limits Applied'
commentStr = '['+newData.metadata['dataSetName']+'] '+comment+': '+'; '.join(commentList)
key = max(newData.history.keys())
newData.history[key] = commentStr
logging.debug(commentStr)
newData.setActive()
return newData
except:
if hasattr(dataObj,newDataSetName): delattr(dataObj,newDataSetName)
# print 'Warning! Limits not applied.'
return currentData
def determineRelativePosition(dataObj,dataSet='active',altitude=250.):
"""Finds the center cell of the field-of-view of a musicArray data object.
The range, azimuth, x-range, and y-range from the center to each cell in the FOV
is calculated and saved to the FOV object. The following objects are added to
dataObj.dataSet:
fov.relative_centerInx: [beam, gate] index of the center cell
fov.relative_azm: Azimuth relative to center cell [deg]
fov.relative_range: Range relative to center cell [km]
fov.relative_x: X-range relative to center cell [km]
fov.relative_y: Y-range relative to center cell [km]
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
altitude : Optional[float]
altitude added to Re = 6378.1 km [km]
Returns
-------
None
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
#Get the chosen dataset.
currentData = getDataSet(dataObj,dataSet)
#Determine center beam.
ctrBeamInx = len(currentData.fov.beams)/2
ctrGateInx = len(currentData.fov.gates)/2
currentData.fov.relative_centerInx = [ctrBeamInx, ctrGateInx]
#Set arrays of lat1/lon1 to the center cell value. Use this to calculate all other positions
#with numpy array math.
lat1 = np.zeros_like(currentData.fov.latCenter)
lon1 = np.zeros_like(currentData.fov.latCenter)
lat1[:] = currentData.fov.latCenter[ctrBeamInx,ctrGateInx]
lon1[:] = currentData.fov.lonCenter[ctrBeamInx,ctrGateInx]
#Make lat2/lon2 the center position array of the dataset.
lat2 = currentData.fov.latCenter
lon2 = currentData.fov.lonCenter
#Calculate the azimuth and distance from the centerpoint to the endpoint.
azm = utils.greatCircleAzm(lat1,lon1,lat2,lon2)
dist = (Re + altitude)*utils.greatCircleDist(lat1,lon1,lat2,lon2)
#Save calculated values to the current data object, as well as calculate the
#X and Y relatvie positions of each cell.
currentData.fov.relative_azm = azm
currentData.fov.relative_range = dist
currentData.fov.relative_x = dist * np.sin(np.radians(azm))
currentData.fov.relative_y = dist * np.cos(np.radians(azm))
return None
def timeInterpolation(dataObj,dataSet='active',newDataSetName='timeInterpolated',comment='Time Linear Interpolation',timeRes=10,newTimeVec=None):
"""Interpolates the data in a musicArray object to a regular time grid.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object.
timeRes : Optional[float]
time resolution of new time vector [seconds]
newTimeVec : Optional[list of datetime.datetime]
Sequence of datetime.datetime objects that data will be interpolated to. This overides timeRes.
Written by Nathaniel A. Frissell, Fall 2013
"""
from scipy.interpolate import interp1d
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
sTime = currentData.time[0]
sTime = datetime.datetime(sTime.year,sTime.month,sTime.day,sTime.hour,sTime.minute) #Make start time a round time.
fTime = currentData.time[-1]
#Create new time vector.
if newTimeVec == None:
newTimeVec = [sTime]
while newTimeVec[-1] < fTime:
newTimeVec.append(newTimeVec[-1] + datetime.timedelta(seconds=timeRes))
#Ensure that the new time vector is within the bounds of the actual data set.
newTimeVec = np.array(newTimeVec)
good = np.where(np.logical_and(newTimeVec > min(currentData.time),newTimeVec < max(currentData.time)))
newTimeVec = newTimeVec[good]
newEpochVec = utils.datetimeToEpoch(newTimeVec)
#Initialize interpolated data.
nrTimes = len(newTimeVec)
nrBeams = len(currentData.fov.beams)
nrGates = len(currentData.fov.gates)
interpArr = np.zeros([nrTimes,nrBeams,nrGates])
for rg in range(nrGates):
for bb in range(nrBeams):
input_x = currentData.time[:]
input_y = currentData.data[:,bb,rg]
good = np.where(np.isfinite(input_y))[0]
if len(good) < 2: continue
input_x = input_x[good]
input_y = input_y[good]
input_x = utils.datetimeToEpoch(input_x)
intFn = interp1d(input_x,input_y,bounds_error=False)#,fill_value=0)
interpArr[:,bb,rg] = intFn(newEpochVec)
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.time = newTimeVec
newDataSet.data = interpArr
newDataSet.setActive()
def filterTimes(sTime,eTime,timeRes,numTaps):
"""The linear filter is going to cause a delay in the signal and also won't get to the end of the signal.
This function will calcuate the full time period of data that needs to be loaded in order to provide filtered data
for the event requested.
Parameters
----------
sTime : datetime.datetime
Start time of event.
eTime : datetime.datetime
End time of event.
timeRes : float
Time resolution in seconds of data to be sent to filter.
numtaps : int
Length of the filter
Returns
-------
newSTime, newETime : datetime.datetime, datetime.datetime
Start and end times of data that needs to be fed into the filter.
Written by Nathaniel A. Frissell, Fall 2013
"""
td = datetime.timedelta(seconds=(numTaps*timeRes/2.))
newSTime = sTime - td
newETime = eTime + td
return (newSTime, newETime)
class filter(object):
"""Filter a VT sig/sigStruct object and define a FIR filter object.
If only cutoff_low is defined, this is a high pass filter.
If only cutoff_high is defined, this is a low pass filter.
If both cutoff_low and cutoff_high is defined, this is a band pass filter.
Uses scipy.signal.firwin()
High pass and band pass filters inspired by Matti Pastell's page:
http://mpastell.com/2010/01/18/fir-with-scipy/
Metadata keys:
'filter_cutoff_low' --> cutoff_low
'filter_cutoff_high' --> cutoff_high
'filter_numtaps' --> cutoff_numtaps
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
numtaps : Optional[int]
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
If dataObj.dataSet.metadata['filter_numptaps'] is set and this keyword is None,
the metadata value will be used.
cutoff_low : Optional[float, 1D array_like or None]
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`. If None, a low-pass filter will not
be applied.
If dataObj.dataSet.metadata['filter_cutoff_low'] is set and this keyword is None,
the metadata value will be used.
cutoff_high : Optional[float, 1D array_like, or None]
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
If dataObj.dataSet.metadata['filter_cutoff_high'] is set and this keyword is None,
the metadata value will be used.
width : Optional[float]
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : Optional[string or tuple of string and parameter values]
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : Optional[bool]
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : Optional[bool]
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero is True);
nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
Attributes
----------
comment : str
cutoff_low : float, 1D array_like or None
High pass cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges).
cutoff_high : float, 1D array_like, or None
Like cutoff_low, but this is the low pass cutoff frequency of the filter.
nyq : float
the Nyquist rate
ir :
Methods
-------
plotTransferFunction
plotImpulseResponse
filter
Written by Nathaniel A. Frissell, Fall 2013
"""
def __init__(self, dataObj, dataSet='active', numtaps=None, cutoff_low=None, cutoff_high=None, width=None, window='blackman', pass_zero=True, scale=True,newDataSetName='filtered'):
import scipy as sp
sigObj = getattr(dataObj,dataSet)
nyq = sigObj.nyquistFrequency()
#Get metadata for cutoffs and numtaps.
md = sigObj.metadata
if cutoff_high == None:
if md.has_key('filter_cutoff_high'):
cutoff_high = md['filter_cutoff_high']
if cutoff_low == None:
if md.has_key('filter_cutoff_low'):
cutoff_low = md['filter_cutoff_low']
if numtaps == None:
if md.has_key('filter_numtaps'):
numtaps = md['filter_numtaps']
else:
logging.warning('You must provide numtaps.')
return
if cutoff_high != None: #Low pass
lp = sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_high, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
d = lp
if cutoff_low != None: #High pass
hp = -sp.signal.firwin(numtaps=numtaps, cutoff=cutoff_low, width=width, window=window, pass_zero=pass_zero, scale=scale, nyq=nyq)
hp[numtaps/2] = hp[numtaps/2] + 1
d = hp
if cutoff_high != None and cutoff_low != None:
d = -(lp+hp)
d[numtaps/2] = d[numtaps/2] + 1
d = -1.*d #Needed to correct 180 deg phase shift.
if cutoff_high == None and cutoff_low == None:
logging.warning("You must define cutoff frequencies!")
return
self.comment = ' '.join(['Filter:',window+',','Nyquist:',str(nyq),'Hz,','Cuttoff:','['+str(cutoff_low)+', '+str(cutoff_high)+']','Hz,','Numtaps:',str(numtaps)])
self.cutoff_low = cutoff_low
self.cutoff_high = cutoff_high
self.nyq = nyq
self.ir = d
self.filter(dataObj,dataSet=dataSet,newDataSetName=newDataSetName)
def __str__(self):
return self.comment
def plotTransferFunction(self,xmin=0,xmax=None,ymin_mag=-150,ymax_mag=5,ymin_phase=None,ymax_phase=None,worN=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_mag : Optional[float]
Minimum value for y-axis for the frequency response plot.
ymax_mag : Optional[float]
Maximum value for y-axis for the frequency response plot.
ymin_phase : Optional[float]
Minimum value for y-axis for the phase response plot.
ymax_phase : Optional[float]
Maximum value for y-axis for the phase response plot.
worN : Optional[int]
passed to scipy.signal.freqz()
If None, then compute at 512 frequencies around the unit circle.
If the len(filter) > 512, then compute at len(filter) frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
if worN == None:
if len(self.ir) > 512: worN = len(self.ir)
else: worN = None
else: pass
w,h = sp.signal.freqz(self.ir,1,worN=worN)
h_dB = 20 * np.log10(abs(h))
axis = fig.add_subplot(211)
#Compute frequency vector.
w = w/max(w) * self.nyq
axis.plot(w,h_dB,'.-')
#mp.axvline(x=self.fMax,color='r',ls='--',lw=2)
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_mag is not None: axis.set_ylim(ymin=ymin_mag)
if ymax_mag is not None: axis.set_ylim(ymax=ymax_mag)
axis.set_xlabel(r'Frequency (Hz)')
axis.set_ylabel('Magnitude (db)')
axis.set_title(r'Frequency response')
axis = fig.add_subplot(212)
h_Phase = np.unwrap(np.arctan2(np.imag(h),np.real(h)))
axis.plot(w,h_Phase,'.-')
if xmin is not None: axis.set_xlim(xmin=xmin)
if xmax is not None: axis.set_xlim(xmax=xmax)
if ymin_phase is not None: axis.set_ylim(ymin=ymin_phase)
if ymax_phase is not None: axis.set_ylim(ymax=ymax_phase)
axis.set_ylabel('Phase (radians)')
axis.set_xlabel(r'Frequency (Hz)')
axis.set_title(r'Phase response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def plotImpulseResponse(self,xmin=None,xmax=None,ymin_imp=None,ymax_imp=None,ymin_step=None,ymax_step=None,fig=None):
import scipy as sp
"""Plot the frequency and phase response of the filter object.
Parameters
----------
xmin : Optional[float]
Minimum value for x-axis.
xmax : Optional[float]
Maximum value for x-axis.
ymin_imp : Optional[float]
Minimum value for y-axis for the impulse response plot.
ymax_imp : Optional[float]
Maximum value for y-axis for the impulse response plot.
ymin_step : Optional[float]
Minimum value for y-axis for the step response plot.
ymax_step : Optional[float]
Maximum value for y-axis for the step response plot.
fig : Optional[matplotlib.Figure]
Figure object on which to plot. If None, a figure will be created.
Returns
-------
fig : matplotlib.Figure
Figure object containing the plot.
Written by Nathaniel A. Frissell, Fall 2013
"""
if fig == None:
from matplotlib import pyplot as plt
fig = plt.figure(figsize=(20,10))
l = len(self.ir)
impulse = np.repeat(0.,l); impulse[0] =1.
x = np.arange(0,l)
response = sp.signal.lfilter(self.ir,1,impulse)
axis = fig.add_subplot(211)
axis.stem(x, response)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Impulse response')
axis = fig.add_subplot(212)
step = np.cumsum(response)
axis.stem(x, step)
axis.set_ylabel('Amplitude')
axis.set_xlabel(r'n (samples)')
axis.set_title(r'Step response')
fig.suptitle(self.comment)
fig.subplots_adjust(hspace=0.5)
return fig
def filter(self,dataObj,dataSet='active',newDataSetName='filtered'):
"""Apply the filter to a vtsig object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
sigobj = getattr(dataObj,dataSet)
vtsig = sigobj.parent
nrTimes,nrBeams,nrGates = np.shape(sigobj.data)
#Filter causes a delay in the signal and also doesn't get the tail end of the signal... Shift signal around, provide info about where the signal is valid.
shift = np.int32(-np.floor(len(self.ir)/2.))
start_line = np.zeros(nrTimes)
start_line[0] = 1
start_line = np.roll(start_line,shift)
tinx0 = abs(shift)
tinx1 = np.where(start_line == 1)[0][0]
val_tm0 = sigobj.time[tinx0]
val_tm1 = sigobj.time[tinx1]
filteredData = np.zeros_like(sigobj.data)
#Apply filter
for bm in range(nrBeams):
for rg in range(nrGates):
tmp = sp.signal.lfilter(self.ir,[1.0],sigobj.data[:,bm,rg])
tmp = np.roll(tmp,shift)
filteredData[:,bm,rg] = tmp[:]
#Create new signal object.
newsigobj = sigobj.copy(newDataSetName,self.comment)
#Put in the filtered data.
newsigobj.data = copy.copy(filteredData)
newsigobj.time = copy.copy(sigobj.time)
#Clear out ymin and ymax from metadata; make sure meta data block exists.
#If not, create it.
if hasattr(newsigobj,'metadata'):
delMeta = ['ymin','ymax','ylim']
for key in delMeta:
if newsigobj.metadata.has_key(key):
del newsigobj.metadata[key]
else:
newsigobj.metadata = {}
newsigobj.metadata['timeLimits'] = (val_tm0,val_tm1)
key = 'title'
if newsigobj.metadata.has_key(key):
newsigobj.metadata[key] = ' '.join(['Filtered',newsigobj.metadata[key]])
else:
newsigobj.metadata[key] = 'Filtered'
newsigobj.metadata['fir_filter'] = (self.cutoff_low,self.cutoff_high)
newsigobj.setActive()
def detrend(dataObj,dataSet='active',newDataSetName='detrended',comment=None,type='linear'):
"""Linearly detrend a data in a musicArray/musicDataObj object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
type : Optional[str]
The type of detrending. If type == 'linear' (default), the result of a linear least-squares fit to data
is subtracted from data. If type == 'constant', only the mean of data is subtracted.
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
try:
newDataArr[:,bm,rg] = sp.signal.detrend(currentData.data[:,bm,rg],type=type)
except:
newDataArr[:,bm,rg] = np.nan
if comment == None:
comment = type.capitalize() + ' detrend (scipy.signal.detrend)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def nan_to_num(dataObj,dataSet='active',newDataSetName='nan_to_num',comment=None):
"""Convert all NANs and INFs to finite numbers using numpy.nan_to_num().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
if comment == None:
comment = 'numpy.nan_to_num'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = np.nan_to_num(currentData.data)
newDataSet.setActive()
def windowData(dataObj,dataSet='active',newDataSetName='windowed',comment=None,window='hann'):
"""Apply a window to a musicArray object. The window is calculated using scipy.signal.get_window().
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
window : Optional[str]
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall,
barthann, kaiser (needs beta), gaussian (needs std), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
win = sp.signal.get_window(window,nrTimes,fftbins=False)
newDataArr= np.zeros_like(currentData.data)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = currentData.data[:,bm,rg] * win
if comment == None:
comment = window.capitalize() + ' window applied (scipy.signal.get_window)'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = newDataArr
newDataSet.setActive()
def calculateFFT(dataObj,dataSet='active',comment=None):
"""Calculate the spectrum of an object.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
import scipy as sp
currentData = getDataSet(dataObj,dataSet)
currentData = currentData.applyLimits()
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Determine frequency axis.
nyq = currentData.nyquistFrequency()
freq_ax = np.arange(nrTimes,dtype='f8')
freq_ax = (freq_ax / max(freq_ax)) - 0.5
freq_ax = freq_ax * 2. * nyq
#Use complex64, not complex128! If you use complex128, too much numerical noise will accumulate and the final plot will be bad!
newDataArr= np.zeros((nrTimes,nrBeams,nrGates),dtype=np.complex64)
for bm in range(nrBeams):
for rg in range(nrGates):
newDataArr[:,bm,rg] = sp.fftpack.fftshift(sp.fftpack.fft(currentData.data[:,bm,rg])) / np.size(currentData.data[:,bm,rg])
currentData.freqVec = freq_ax
currentData.spectrum = newDataArr
# Calculate the dominant frequency #############################################
posFreqInx = np.where(currentData.freqVec >= 0)[0]
posFreqVec = currentData.freqVec[posFreqInx]
npf = len(posFreqVec) #Number of positive frequencies
data = np.abs(currentData.spectrum[posFreqInx,:,:]) #Use the magnitude of the positive frequency data.
#Average Power Spectral Density
avg_psd = np.zeros(npf)
for x in range(npf): avg_psd[x] = np.mean(data[x,:,:])
currentData.dominantFreq = posFreqVec[np.argmax(avg_psd)]
currentData.appendHistory('Calculated FFT')
def calculateDlm(dataObj,dataSet='active',comment=None):
"""Calculate the cross-spectral matrix of a musicaArray object. FFT must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
nCells = nrBeams * nrGates
currentData.llLookupTable = np.zeros([5,nCells])
currentData.Dlm = np.zeros([nCells,nCells],dtype=np.complex128)
#Only use positive frequencies...
posInx = np.where(currentData.freqVec > 0)[0]
#Explicitly write out gate/range indices...
llList = []
for gg in xrange(nrGates):
for bb in xrange(nrBeams):
llList.append((bb,gg))
for ll in range(nCells):
llAI = llList[ll]
ew_dist = currentData.fov.relative_x[llAI]
ns_dist = currentData.fov.relative_y[llAI]
currentData.llLookupTable[:,ll] = [ll, currentData.fov.beams[llAI[0]], currentData.fov.gates[llAI[1]],ns_dist,ew_dist]
spectL = currentData.spectrum[posInx,llAI[0],llAI[1]]
for mm in range(nCells):
mmAI = llList[mm]
spectM = currentData.spectrum[posInx,mmAI[0],mmAI[1]]
currentData.Dlm[ll,mm] = np.sum(spectL * np.conj(spectM))
currentData.appendHistory('Calculated Cross-Spectral Matrix Dlm')
def calculateKarr(dataObj,dataSet='active',kxMax=0.05,kyMax=0.05,dkx=0.001,dky=0.001,threshold=0.15):
"""Calculate the two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Cross-spectrum array Dlm must already have been calculated.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
kxMax : Optional[float]
Maximum kx (East-West) wavenumber to calculate [rad/km]
kyMax : Optional[float]
Maximum ky (North-South) wavenumber to calculate [rad/km]
dkx : Optional[float]
kx resolution [rad/km]
dky : Optional[float]
ky resolution [rad/km]
threshold : Optional[float]
threshold of signals to detect as a fraction of the maximum eigenvalue
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
nrTimes, nrBeams, nrGates = np.shape(currentData.data)
#Calculate eigenvalues, eigenvectors
eVals,eVecs = np.linalg.eig(np.transpose(dataObj.active.Dlm))
nkx = np.ceil(2*kxMax/dkx)
if (nkx % 2) == 0: nkx = nkx+1
kxVec = kxMax * (2*np.arange(nkx)/(nkx-1) - 1)
nky = np.ceil(2*kyMax/dky)
if (nky % 2) == 0: nky = nky+1
kyVec = kyMax * (2*np.arange(nky)/(nky-1) - 1)
nkx = int(nkx)
nky = int(nky)
xm = currentData.llLookupTable[4,:] #x is in the E-W direction.
ym = currentData.llLookupTable[3,:] #y is in the N-S direction.
threshold = 0.15
maxEval = np.max(np.abs(eVals))
minEvalsInx = np.where(eVals <= threshold*maxEval)[0]
cnt = np.size(minEvalsInx)
maxEvalsInx = np.where(eVals > threshold*maxEval)[0]
nSigs = np.size(maxEvalsInx)
if cnt < 3:
logging.warning('Not enough small eigenvalues!')
import ipdb; ipdb.set_trace()
logging.info('K-Array: ' + str(nkx) + ' x ' + str(nky))
logging.info('Kx Max: ' + str(kxMax))
logging.info('Kx Res: ' + str(dkx))
logging.info('Ky Max: ' + str(kyMax))
logging.info('Ky Res: ' + str(dky))
logging.info('')
logging.info('Signal Threshold: ' + str(threshold))
logging.info('Number of Det Signals: ' + str(nSigs))
logging.info('Number of Noise Evals: ' + str(cnt))
logging.info('Starting kArr Calculation...')
t0 = datetime.datetime.now()
def vCalc(um,v):
return np.dot( np.conj(um), v) * np.dot( np.conj(v), um)
vList = [eVecs[:,minEvalsInx[ee]] for ee in xrange(cnt)]
kArr = np.zeros((nkx,nky),dtype=np.complex64)
for kk_kx in xrange(nkx):
kx = kxVec[kk_kx]
for kk_ky in xrange(nky):
ky = kyVec[kk_ky]
um = np.exp(1j*(kx*xm + ky*ym))
kArr[kk_kx,kk_ky]= 1. / np.sum(map(lambda v: vCalc(um,v), vList))
t1 = datetime.datetime.now()
logging.info('Finished kArr Calculation. Total time: ' + str(t1-t0))
currentData.karr = kArr
currentData.kxVec = kxVec
currentData.kyVec = kyVec
currentData.appendHistory('Calculated kArr')
def simulator(dataObj, dataSet='active',newDataSetName='simulated',comment=None,keepLocalRange=True,sigs=None,noiseFactor=0):
"""Replace SuperDARN Data with simulated MSTID(s). This is useful for understanding how the signal processing
routines of this module affect ideal data.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
newDataSetName : Optional[str]
Name of the new musicDataObj to be created in the current musicArray object as a result of this processing.
comment : Optional[str]
String to be appended to the history of this object. Set to None for the Default comment (recommended).
keepLocalRange : Optional[bool]
If true, the locations calculated for the actual radar field of view will be used. If false,
a linearly-spaced will replace the true grid.
sigs : Optional[list of tuples]
A list of tuples defining the characteristics of the simulated signal. Sample list is as follows.
If this keyword is None, the values in this sample list are used as the default values.::
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
Each signal is evaluated as a cosine and then summed together. The cosine evaluated is::
sig = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
noiseFactor : Optional[float]
Add white gaussian noise to the simulated signal. noiseFactor is a scalar such that:
noise = noiseFactor*np.random.standard_normal(nSteps)
Written by Nathaniel A. Frissell, Fall 2013
"""
from davitpy import utils
currentData = getDataSet(dataObj,dataSet)
#Typical TID Parameters:
# Frequency: 0.0003 mHz
# Period: 55.5 min
# H. Wavelength: 314 km
# k: 0.02 /km
if keepLocalRange == True:
nx, ny = np.shape(currentData.fov.relative_x)
xRange = np.max(currentData.fov.relative_x) - np.min(currentData.fov.relative_x)
yRange = np.max(currentData.fov.relative_y) - np.min(currentData.fov.relative_y)
xgrid = currentData.fov.relative_x
ygrid = currentData.fov.relative_y
else:
nx = 16
xRange = 800.
ny = 25
yRange = 600.
xvec = np.linspace(-xRange/2.,xRange/2.,nx)
yvec = np.linspace(-yRange/2.,yRange/2.,ny)
dx = np.diff(xvec)[0]
dy = np.diff(yvec)[0]
xaxis = np.append(xvec,xvec[-1]+dx)
yayis = np.append(yvec,yvec[-1]+dy)
xgrid = np.zeros((nx,ny))
ygrid = np.zeros((nx,ny))
for kk in xrange(nx): ygrid[kk,:] = yvec[:]
for kk in xrange(ny): xgrid[kk,:] = yvec[:]
if sigs == None:
#Set some default signals.
sigs = []
# (amp, kx, ky, f, phi, dcOffset)
sigs.append(( 5, 0.01, -0.010, 0.0004, 0, 5.))
sigs.append(( 5, 0.022, -0.023, 0.0004, 0, 5.))
secVec = np.array(utils.datetimeToEpoch(currentData.time))
secVec = secVec - secVec[0]
nSteps = len(secVec)
dt = currentData.samplePeriod()
dataArr = np.zeros((nSteps,nx,ny))
for step in xrange(nSteps):
t = secVec[step]
for kk in xrange(len(sigs)):
amp = sigs[kk][0]
kx = sigs[kk][1]
ky = sigs[kk][2]
f = sigs[kk][3]
phi = sigs[kk][4]
dc = sigs[kk][5]
if 1./dt <= 2.*f:
logging.warning('Nyquist Violation in f.')
logging.warning('Signal #: %i' % kk)
# if 1./dx <= 2.*kx/(2.*np.pi):
# print 'WARNING: Nyquist Violation in kx.'
# print 'Signal #: %i' % kk
#
# if 1./dy <= 2.*ky/(2.*np.pi):
# print 'WARNING: Nyquist Violation in ky.'
# print 'Signal #: %i' % kk
temp = amp * np.cos(kx*xgrid + ky*ygrid - 2.*np.pi*f*t + phi) + dc
dataArr[step,:,:] = dataArr[step,:,:] + temp
#Signal RMS
sig_rms = np.zeros((nx,ny))
for xx in xrange(nx):
for yy in xrange(ny):
sig_rms[xx,yy] = np.sqrt(np.mean((dataArr[:,xx,yy])**2.))
noise_rms = np.zeros((nx,ny))
if noiseFactor > 0:
nf = noiseFactor
#Temporal White Noise
for xx in xrange(nx):
for yy in xrange(ny):
noise = nf*np.random.standard_normal(nSteps)
noise_rms[xx,yy] = np.sqrt(np.mean(noise**2))
dataArr[:,xx,yy] = dataArr[:,xx,yy] + noise
xx = np.arange(ny)
mu = (ny-1.)/2.
sigma2 = 10.0
sigma = np.sqrt(sigma2)
rgDist = 1./(sigma*np.sqrt(2.*np.pi)) * np.exp(-0.5 * ((xx-mu)/sigma)**2)
rgDist = rgDist / np.max(rgDist)
mask = np.zeros((nx,ny))
for nn in xrange(nx): mask[nn,:] = rgDist[:]
mask3d = np.zeros((nSteps,nx,ny))
for nn in xrange(nSteps): mask3d[nn,:,:] = mask[:]
#Apply Range Gate Dependence
dataArr = dataArr * mask3d
snr = (sig_rms/noise_rms)**2
snr_db = 10.*np.log10(snr)
if comment == None:
comment = 'Simulated data injected.'
newDataSet = currentData.copy(newDataSetName,comment)
newDataSet.data = dataArr
newDataSet.setActive()
#OPENW,unit,'simstats.txt',/GET_LUN,WIDTH=300
#stats$ = ' Mean: ' + NUMSTR(MEAN(sig_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(sig_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(sig_rms)^2,3)
#PRINTF,unit,'SIG_RMS'
#PRINTF,unit,stats$
#PRINTF,unit,sig_rms
#
#PRINTF,unit,''
#PRINTF,unit,'NOISE_RMS'
#stats$ = ' Mean: ' + NUMSTR(MEAN(noise_rms),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(noise_rms),3) $
# + ' Var: ' + NUMSTR(STDDEV(noise_rms)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,noise_rms
#
#PRINTF,unit,''
#PRINTF,unit,'SNR_DB'
#stats$ = ' Mean: ' + NUMSTR(MEAN(snr_db),3) $
# + ' STDDEV: ' + NUMSTR(STDDEV(snr_db),3) $
# + ' Var: ' + NUMSTR(STDDEV(snr_db)^2,3)
#PRINTF,unit,stats$
#PRINTF,unit,snr_db
#CLOSE,unit
def scale_karr(kArr):
from scipy import stats
"""Scale/normalize kArr for plotting and signal detection.
Parameters
----------
kArr : 2D numpy.array
Two-dimensional horizontal wavenumber array of a musicArray/musicDataObj object.
Returns
-------
data : 2D numpy.array
Scaled and normalized version of kArr.
Written by Nathaniel A. Frissell, Fall 2013
"""
data = np.abs(kArr) - np.min(np.abs(kArr))
#Determine scale for colorbar.
scale = [0.,1.]
sd = stats.nanstd(data,axis=None)
mean = stats.nanmean(data,axis=None)
scMax = mean + 6.5*sd
data = data / scMax
return data
def detectSignals(dataObj,dataSet='active',threshold=0.35,neighborhood=(10,10)):
"""Automatically detects local maxima/signals in a calculated kArr. This routine uses the watershed
algorithm from the skimage image processing library. Results are automatically stored in
dataObj.dataSet.sigDetect.
Parameters
----------
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
threshold : Optional[float]
Scaled input data must be above this value to be detected. A higher number
will reduce the number of signals detected.
neighborhood : Optional[tuple]
Local region in which to search for peaks at every point in the image/array.
(10,10) will search a 10x10 pixel area.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
################################################################################
#Feature detection...
#Now lets do a little image processing...
from scipy import ndimage
from skimage.morphology import watershed
from skimage.feature import peak_local_max
#sudo pip install cython
#sudo pip install scikit-image
data = scale_karr(currentData.karr)
mask = data > threshold
labels, nb = ndimage.label(mask)
distance = ndimage.distance_transform_edt(mask)
local_maxi = peak_local_max(distance,footprint=np.ones(neighborhood),indices=False)
markers,nb = ndimage.label(local_maxi)
labels = watershed(-distance,markers,mask=mask)
areas = ndimage.sum(mask,labels,xrange(1,labels.max()+1))
maxima = ndimage.maximum(data,labels,xrange(1, labels.max()+1))
order = np.argsort(maxima)[::-1] + 1
maxpos = ndimage.maximum_position(data,labels,xrange(1, labels.max()+1))
sigDetect = SigDetect()
sigDetect.mask = mask
sigDetect.labels = labels
sigDetect.nrSigs = nb
sigDetect.info = []
for x in xrange(labels.max()):
info = {}
info['labelInx'] = x+1
info['order'] = order[x]
info['area'] = areas[x]
info['max'] = maxima[x]
info['maxpos'] = maxpos[x]
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = currentData.dominantFreq
info['period'] = 1./currentData.dominantFreq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
sigDetect.info.append(info)
currentData.appendHistory('Detected KArr Signals')
currentData.sigDetect = sigDetect
return currentData
def add_signal(kx,ky,dataObj,dataSet='active',frequency=None):
"""Manually add a signal to the detected signal list. All signals will be re-ordered according to value in the
scaled kArr. Added signals can be distinguished from autodetected signals because
'labelInx' and 'area' will both be set to -1.
Parameters
----------
kx : float
Value of kx of new signal.
ky : float
Value of ky of new signal.
dataObj : musicArray
musicArray object
dataSet : Optional[str]
which dataSet in the musicArray object to process
frequency : Optional[float]
Frequency to use to calculate period, phase velocity, etc. If None,
the calculated dominant frequency will be used.
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
def find_nearest_inx(array,value):
return (np.abs(array-value)).argmin()
kx_inx = find_nearest_inx(currentData.kxVec,kx)
ky_inx = find_nearest_inx(currentData.kyVec,ky)
maxpos = (kx_inx,ky_inx)
value = data[kx_inx,ky_inx]
true_value = currentData.karr[kx_inx,ky_inx] #Get the unscaled kArr value.
if frequency == None:
freq = currentData.dominantFreq
else:
freq = frequency
info = {}
info['labelInx'] = -1
info['area'] = -1
info['order'] = -1
info['max'] = value
info['true_max'] = true_value #Unscaled kArr value
info['maxpos'] = maxpos
info['kx'] = currentData.kxVec[info['maxpos'][0]]
info['ky'] = currentData.kyVec[info['maxpos'][1]]
info['k'] = np.sqrt( info['kx']**2 + info['ky']**2 )
info['lambda_x'] = 2*np.pi / info['kx']
info['lambda_y'] = 2*np.pi / info['ky']
info['lambda'] = 2*np.pi / info['k']
info['azm'] = np.degrees(np.arctan2(info['kx'],info['ky']))
info['freq'] = freq
info['period'] = 1./freq
info['vel'] = (2.*np.pi/info['k']) * info['freq'] * 1000.
currentData.sigDetect.info.append(info)
currentData.sigDetect.reorder()
currentData.appendHistory('Appended Signal to sigDetect List')
return currentData
def del_signal(order,dataObj,dataSet='active'):
"""Remove a signal to the detected signal list.
Parameters
----------
order :
Single value of list of signal orders (ID's) to be removed from the list.
dataObj : musicArray
object
dataSet : Optional[str]
which dataSet in the musicArray object to process
Returns
-------
currentData : musicDataObj
object
Written by Nathaniel A. Frissell, Fall 2013
"""
currentData = getDataSet(dataObj,dataSet)
data = scale_karr(currentData.karr)
orderArr = np.array(order)
for item in list(currentData.sigDetect.info):
if item['order'] in orderArr:
currentData.sigDetect.info.remove(item)
currentData.sigDetect.reorder()
currentData.appendHistory('Deleted Signals from sigDetect List')
return currentData
| MuhammadVT/davitpy | davitpy/pydarn/proc/music/music.py | Python | gpl-3.0 | 84,879 | [
"Gaussian"
] | 85c464d7c505b792ac453e539a627bbb6a5bdb655e7d119ba74b7b68df21241d |
# encoding: utf-8
import gtk
from gettext import gettext as _
import numpy as np
from ase.gui.widgets import pack, oops, AseGuiCancelException
import sys
import re
import time
class DummyProgressIndicator:
def begin(self, **kwargs):
pass
def end(self):
pass
class DefaultProgressIndicator(gtk.Window):
"Window for reporting progress."
waittime = 3 # Time (in sec) after which a progress bar appears.
updatetime = 0.1 # Minimum time (in sec) between updates of the progress bars.
def __init__(self):
gtk.Window.__init__(self)
self.set_title(_("Progress"))
self.globalbox = gtk.VBox()
self.nextupdate = 0
self.fmax_max = 1.0
# Scaling deformation progress frame
self.scalebox = gtk.VBox()
self.scaleframe = gtk.Frame(_("Scaling deformation:"))
vbox = gtk.VBox()
self.scaleframe.add(vbox)
pack(self.scalebox, [self.scaleframe])
pack(self.scalebox, gtk.Label(""))
self.label_scale_stepno_format = _("Step number %s of %s.")
self.label_scale_stepno = gtk.Label(
self.label_scale_stepno_format % ("-" , "-"))
pack(vbox, [self.label_scale_stepno])
self.scale_progress = gtk.ProgressBar()
self.scale_progress.modify_bg(gtk.STATE_PRELIGHT,
gtk.gdk.color_parse('#00AA00'))
pack(vbox, [self.scale_progress])
vbox.show()
self.scaleframe.show()
self.globalbox.pack_start(self.scalebox)
# Minimization progress frame
self.minbox = gtk.VBox() # Box containing frame and spacing
self.minframe = gtk.Frame(_("Energy minimization:"))
vbox = gtk.VBox() # Box containing the frames content.
self.minframe.add(vbox)
pack(self.minbox, [self.minframe])
pack(self.minbox, gtk.Label(""))
self.label_min_stepno = gtk.Label("-")
pack(vbox, [gtk.Label(_("Step number: ")), self.label_min_stepno])
lbl = gtk.Label()
lbl.set_markup(_("F<sub>max</sub>: "))
self.minimize_progress = gtk.ProgressBar()
pack(vbox, [lbl, self.minimize_progress])
self.label_min_fmax = gtk.Label("-")
lbl = gtk.Label()
lbl.set_markup(_("Convergence criterion: F<sub>max</sub> = "))
pack(vbox, [lbl, self.label_min_fmax])
self.label_min_maxsteps = gtk.Label("-")
pack(vbox, [gtk.Label(_("Max. number of steps: ")),
self.label_min_maxsteps])
vbox.show()
self.minframe.show()
self.globalbox.pack_start(self.minbox)
self.globalbox.show()
self.add(self.globalbox)
# Make the cancel button
self.cancelbut = gtk.Button(stock=gtk.STOCK_CANCEL)
self.cancelbut.connect('clicked', self.cancel)
pack(self.globalbox, [self.cancelbut], end=True, bottom=True)
def begin(self, mode=None, algo=None, fmax=None, steps=None,
scalesteps=None):
self.mode = mode
# Hide all mode-specific boxes
self.scalebox.hide()
self.minbox.hide()
# Activate any relevant box
if mode == "scale" or mode == "scale/min":
self.scalesteps = int(scalesteps)
self.scalebox.show()
self.set_scale_progress(0, init=True)
if mode == "min" or mode == "scale/min":
# It is a minimization.
self.minbox.show()
self.label_min_stepno.set_text("-")
self.label_min_fmax.set_text("%.3f" % (fmax,))
self.label_min_maxsteps.set_text(str(int(steps)))
self.minimize_progress.set_fraction(0)
self.minimize_progress.set_text(_("unknown"))
# Record starting time
self.starttime = time.time()
self.active = None # Becoming active
self.raisecancelexception = False
def end(self):
self.hide()
self.active = False
def activity(self):
"Register that activity occurred."
if self.active is None and time.time() > self.starttime + self.waittime:
# This has taken so long that a progress bar is needed.
self.show()
self.active = True
# Allow GTK to update display
if self.active:
while gtk.events_pending():
gtk.main_iteration()
if self.raisecancelexception:
self.cancelbut.set_sensitive(True)
raise AseGuiCancelException
def cancel(self, widget):
print "CANCEL pressed."
# We cannot raise the exception here, as this function is
# called by the GTK main loop.
self.raisecancelexception = True
self.cancelbut.set_sensitive(False)
def set_scale_progress(self, step, init=False):
"Set the step number in scaling deformation."
self.label_scale_stepno.set_text(
self.label_scale_stepno_format % (step, self.scalesteps))
percent = 1.0 * step / self.scalesteps
self.scale_progress.set_fraction(percent)
self.scale_progress.set_text("%i%%" % (round(100*percent),))
if not init:
self.activity()
def logger_write(self, line):
if time.time() > self.nextupdate:
if self.mode == "min" or self.mode == "scale/min":
# Update the minimization progress bar.
w = line.split()
fmax = float(w[-1])
step = w[1]
if fmax > self.fmax_max:
self.fmax_max = np.ceil(fmax)
self.minimize_progress.set_fraction(fmax / self.fmax_max)
self.minimize_progress.set_text(w[-1])
self.label_min_stepno.set_text(step)
else:
raise RuntimeError(
"ProgressIndicator.logger_write called unexpectedly")
self.activity()
self.nextupdate = time.time() + self.updatetime
def get_logger_stream(self):
return LoggerStream(self)
class GpawProgressIndicator(DefaultProgressIndicator):
"Window for reporting GPAW progress."
def __init__(self):
DefaultProgressIndicator.__init__(self)
# GPAW progress frame
self.gpawframe = gtk.Frame("GPAW progress:")
vbox = self.gpawvbox = gtk.VBox()
self.gpawframe.add(vbox)
self.table = gtk.Table(1, 2)
self.tablerows = 0
pack(vbox, self.table)
self.status = gtk.Label("-")
self.tablepack([gtk.Label(_("Status: ")), self.status])
self.iteration = gtk.Label("-")
self.tablepack([gtk.Label(_("Iteration: ")), self.iteration])
self.tablepack([gtk.Label("")])
lbl = gtk.Label()
lbl.set_markup(_("log<sub>10</sub>(change):"))
self.tablepack([gtk.Label(""), lbl])
self.wfs_progress = gtk.ProgressBar()
self.tablepack([gtk.Label(_("Wave functions: ")), self.wfs_progress])
self.dens_progress = gtk.ProgressBar()
self.tablepack([gtk.Label(_("Density: ")), self.dens_progress])
self.energy_progress = gtk.ProgressBar()
self.tablepack([gtk.Label(_("Energy: ")), self.energy_progress])
self.tablepack([gtk.Label("")])
self.versionlabel = gtk.Label("")
self.tablepack([gtk.Label(_("GPAW version: ")), self.versionlabel])
self.natomslabel = gtk.Label("")
self.tablepack([gtk.Label(_("Number of atoms: ")), self.natomslabel])
self.memorylabel = gtk.Label(_("N/A"))
self.tablepack([gtk.Label(_("Memory estimate: ")), self.memorylabel])
self.globalbox.pack_start(self.gpawframe)
self.gpawframe.show()
vbox.show()
self.active = False
def tablepack(self, widgets):
self.tablerows += 1
self.table.resize(self.tablerows, 2)
for i, w in enumerate(widgets):
self.table.attach(w, i, i+1, self.tablerows-1, self.tablerows)
if hasattr(w, "set_alignment"):
w.set_alignment(0, 0.5)
w.show()
def begin(self, **kwargs):
DefaultProgressIndicator.begin(self, **kwargs)
# Set GPAW specific stuff.
self.active = True
self.oldenergy = None
self.poscount = None
self.reset_gpaw_bars()
# With GPAW, all calculations are slow: Show progress window
# immediately.
self.show()
while gtk.events_pending():
gtk.main_iteration()
def reset_gpaw_bars(self):
for lbl in (self.status, self.iteration):
lbl.set_text("-")
for bar in (self.wfs_progress, self.dens_progress,
self.energy_progress):
bar.set_fraction(0.0)
bar.set_text(_("No info"))
def gpaw_write(self, txt):
#if not self.active:
# self.begin()
sys.stdout.write(txt)
versearch = re.search("\|[ |_.]+([0-9]+\.[0-9]+\.[0-9]+)", txt)
if versearch:
# Starting a gpaw calculation.
self.versionlabel.set_text(versearch.group(1))
self.status.set_text(_("Initializing"))
elif txt.startswith(_("Positions:")):
# Start counting atoms
self.poscount = True
self.reset_gpaw_bars()
self.status.set_text(_("Starting calculation"))
self.oldenergy = None
elif txt.strip() == "":
# Stop counting atoms
self.poscount = False
elif self.poscount:
# Count atoms.
w = txt.split()
assert(len(w) == 5)
self.natoms = int(w[0]) + 1
self.natomslabel.set_text(str(self.natoms))
elif txt.startswith("iter:"):
# Found iteration line.
wfs = txt[self.wfs_idx:self.density_idx].strip()
dens = txt[self.density_idx:self.energy_idx].strip()
energy = txt[self.energy_idx:self.fermi_idx].strip()
if wfs:
p = fraction(float(wfs), -9.0)
self.wfs_progress.set_fraction(p)
self.wfs_progress.set_text(wfs)
if dens:
p = fraction(float(dens), -4.0)
self.dens_progress.set_fraction(p)
self.dens_progress.set_text(dens)
if energy:
if self.oldenergy is None:
self.oldenergy = float(energy)
else:
de = abs(self.oldenergy - float(energy))
self.oldenergy = float(energy)
if de > 1e-10:
de = np.log10(de/self.natoms)
p = fraction(de, -3.0)
self.energy_progress.set_fraction(p)
self.energy_progress.set_text("%.1f" % de)
else:
self.energy_progress.set_fraction(1)
self.energy_progress.set_text(_("unchanged"))
words = txt.split()
self.iteration.set_text(words[1])
elif (-1 < txt.find("WFS") < txt.find("Density") < txt.find("Energy")
< txt.find("Fermi")):
# Found header of convergence table
self.wfs_idx = txt.find("WFS")
self.density_idx = txt.find("Density")
self.energy_idx = txt.find("Energy")
self.fermi_idx = txt.find("Fermi")
self.status.set_text(_("Self-consistency loop"))
self.iteration.set_text("0")
elif txt.find("Converged After") != -1:
# SCF loop has converged.
words = txt.split()
self.status.set_text(_("Calculating forces"))
self.iteration.set_text(words[2] + _(" (converged)"))
elif -1 < txt.find("Calculator") < txt.find("MiB"):
# Memory estimate
words = txt.split()
self.memorylabel.set_text(words[1]+" "+words[2])
self.activity()
def get_gpaw_stream(self):
return GpawStream(self)
class LoggerStream:
"A file-like object feeding minimizer logs to GpawProgressWindow."
def __init__(self, progresswindow):
self.window = progresswindow
def write(self, txt):
self.window.logger_write(txt)
def flush(self):
pass
class GpawStream:
"A file-like object feeding GPAWs txt file to GpawProgressWindow."
def __init__(self, progresswindow):
self.window = progresswindow
def write(self, txt):
if txt == "":
return
endline = txt[-1] == '\n'
if endline:
txt = txt[:-1]
lines = txt.split("\n")
if endline:
for l in lines:
self.window.gpaw_write(l+'\n')
else:
for l in lines[:-1]:
self.window.gpaw_write(l+'\n')
self.window.gpaw_write(lines[-1])
def flush(self):
pass
def fraction(value, maximum):
p = value/maximum
if p < 0.0:
return 0.0
elif p > 1.0:
return 1.0
else:
return p
| grhawk/ASE | tools/ase/gui/progress.py | Python | gpl-2.0 | 13,183 | [
"ASE",
"GPAW"
] | f4736f549538833470f27a11102a8704e622ee0b13ff71ce8c3442ff6e205057 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from urllib3.exceptions import SSLError
from txclib import utils, web
from txclib.log import set_log_level, logger
from txclib.parsers import tx_main_parser
from txclib.exceptions import AuthenticationError
# use pyOpenSSL if available
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
# This block ensures that ^C interrupts are handled quietly.
try:
import signal
def exithandler(signum, frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(1)
signal.signal(signal.SIGINT, exithandler)
signal.signal(signal.SIGTERM, exithandler)
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except KeyboardInterrupt:
sys.exit(1)
# In python 3 default encoding is utf-8
if sys.version_info < (3, 0):
reload(sys) # WTF? Otherwise setdefaultencoding doesn't work
# When we open file with f = codecs.open we specify
# FROM what encoding to read.
# This sets the encoding for the strings which are created with f.read()
sys.setdefaultencoding('utf-8')
def main(argv=None):
"""
Here we parse the flags (short, long) and we instantiate the classes.
"""
parser = tx_main_parser()
options, rest = parser.parse_known_args()
if not options.command:
parser.print_help()
sys.exit(1)
utils.DISABLE_COLORS = options.color_disable
# set log level
if options.quiet:
set_log_level('WARNING')
elif options.debug:
set_log_level('DEBUG')
web.cacerts_file = options.cacert
# find .tx
path_to_tx = options.root_dir or utils.find_dot_tx()
cmd = options.command
try:
utils.exec_command(cmd, rest, path_to_tx)
except SSLError as e:
logger.error("SSL error %s" % e)
except utils.UnknownCommandError:
logger.error("Command %s not found" % cmd)
except AuthenticationError:
authentication_failed_message = """
Error: Authentication failed. Please make sure your credentials are valid.
For more information, visit:
https://docs.transifex.com/client/client-configuration#-transifexrc.
"""
logger.error(authentication_failed_message)
except Exception as e:
import traceback
if options.trace:
traceback.print_exc()
else:
msg = "Unknown error" if not str(e) else str(e)
logger.error(msg)
# The else statement will be executed only if the command raised no
# exceptions. If an exception was raised, we want to return a non-zero exit
# code
else:
return
sys.exit(1)
if __name__ == "__main__":
# sys.argv[0] is the name of the script that we’re running.
main()
| transifex/transifex-client | txclib/cmdline.py | Python | gpl-2.0 | 2,867 | [
"VisIt"
] | 6703b1297989095f70f42888c768e62281afe77744292a042aff7d23f7dbc027 |
"""
Determines whether a soldier can exit the vehicle, based on dragging a ball of set radius through
the vehicle. If no path that can fit the ball exists, the soldier cannot exit.
"""
import logging, os
import numpy as np
from scipy import ndimage
import data_io
## Define labels used for various vehicle parts. Must be kept in sync with the labels used in
# ingress_egress.py (and applied to the voxelated model).
VL_NONE, VL_ANY, VL_HULL, VL_DOORS, VL_HATCHES, VL_MANIKINS = 0, 1, 2, 4, 8, 16
class Exit_Tunnel(object):
"""
Given a voxelated vehicle model and a set of manikin positions (ijk vox indices), use image
dilation to locate possible paths to the exit
"""
def __init__(self, vehicle_obj, mani_pts_ijk, ball_radius_vox=5):
self.voxel_data = vehicle_obj.occupied_voxels
self.mani_pts = mani_pts_ijk
# Get the ball that will be dragged through the object to determine ok-sized paths
self.ball = None
self.make_ball_struct(ball_radius_vox)
# Label interior regions of voxelated grid, then mark mani pts on grid of same size
self.air_zones = None
self.mani_grid = None
self.get_air()
### Fine tuning done for arrays regardless of which exit type is considered
self.mani_grid = ndimage.binary_dilation(self.mani_grid, self.ball)
## Find the set of labels that contain manikins.
self.interior_labels = set(self.air_zones[tuple(v)] for v in self.mani_pts)
## Make a new air voxel array for only the air regions containing manikins.
self.air_inside = np.in1d(self.air_zones, list(self.interior_labels)).reshape(
self.air_zones.shape)
def make_ball_struct(self, radius):
"""
Make a voxelated ball with ``radius`` voxels either side of the center voxel.
Returns a numpy array of ``shape = (cells, cells, cells)`` where ``cells = radius * 2 + 1``.
"""
cells = radius * 2 + 1
ball = np.zeros((cells, cells, cells), dtype=np.bool)
ball_i = np.where(ball == False)
for i, j, k in zip(*ball_i):
i_o = float(abs(i - radius))
j_o = float(abs(j - radius))
k_o = float(abs(k - radius))
if np.sqrt(i_o**2 + j_o**2 + k_o**2) < (radius + 0.5):
ball[i, j, k] = True
self.ball = ball
def get_air(self):
"""
Locate the region of the interior cabin volume that is air
"""
air = self.voxel_data == VL_NONE
## Label every disconnected air zone. (Will include air inside and outside vehicle)
air_zoned, num_zones = ndimage.label(air)
## Set troop starting points and create a voxel array to represent them.
manis = np.zeros_like(air, dtype=np.bool)
for v in self.mani_pts:
manis[tuple(v)] = True
self.mani_grid = manis
self.air_zones = air_zoned
def get_exits(self, exit_type="door"):
"""
Get the location of exit points for each exit of ``exit_type`` (door, hatch, both)
"""
# Set the correct label to look for
if exit_type == "door":
find_label = VL_DOORS
elif exit_type == "hatch":
find_label = VL_HATCHES
elif exit_type == "both":
find_label = (VL_DOORS | VL_HATCHES)
else:
raise Exception("Exit route checker requested unknown exit type: {}".format(exit_type))
exits = (self.voxel_data & find_label) != 0
## Label every separate exit.
exit_labels, num_exits = ndimage.label(exits)
## For each labelled exit store 1 ijk coord contained within it.
exit_points = []
for e in xrange(1, num_exits + 1):
loc_index = np.argmax(exit_labels == e)
loc_crds = np.unravel_index(loc_index, exit_labels.shape)
exit_points.append(loc_crds)
#locs = np.vstack(np.where(exit_labels == e)).T
#exit_points.append(locs[0])
exit_points = np.array(exit_points)
logging.debug("EXIT POINTS of type {}: {}".format(exit_type, exit_points))
return exits, exit_points
def find_path_to_exits(self, exit_type="door", debug=False):
"""
Find whether a path exists between a given position and a given exit,
"""
# Results placeholder
exit_results = {tuple(p): False for p in self.mani_pts}
# Find exit points for exit of specified type
exits, exit_points = self.get_exits(exit_type=exit_type)
exits_dilated = ndimage.binary_dilation(exits, self.ball)
## Make an "outline" of voxels surrounding the air inside. This will be "open" where air
## inside meets the exits.
eroded = ndimage.binary_erosion(self.air_inside | exits_dilated)
outline = self.air_inside & ~eroded
## Dilate outline using ball, making a voxel grid that ball.radius away from any surface.
dil_out = ndimage.binary_dilation(outline, self.ball)
## Cut away from air_inside the dilated outline but add back the troop locations.
air_voxgrid = (self.air_inside & ~dil_out) | self.mani_grid
## Finally label a composite of the remaining air inside and the solid exits.
## If 2 ijk points have the same label they must be connected.
air_labels, air_nums = ndimage.label(air_voxgrid | exits)
for pt in self.mani_pts:
logging.info("Troop at '{}' can reach exits:".format(pt))
pt_lab = air_labels[tuple(pt)]
for ex in exit_points:
if air_labels[tuple(ex)] == pt_lab:
logging.info("\t'{}'".format(ex))
exit_results[tuple(pt)] = True
if debug is True:
return exit_results, exits, air_voxgrid
else:
return exit_results
def main(vehicle_obj, mani_pts_ijk, ball_radius_vox=5, debug=False):
"""
Find whether a path exists between mani position and exits of ``exit_type`` (can be "door",
"hatch", or "both")
Returns additional arrays (for 3d viewing) if ``debug`` is True
"""
# Keep track of whether each starting point has paths to hatch and door. Track separately
# for compatibility with TB034 + TB035
all_exit_checks = {tuple(p): {"door": False, "hatch": False}
for p in mani_pts_ijk}
tunnel_checker = Exit_Tunnel(vehicle_obj, mani_pts_ijk, ball_radius_vox=ball_radius_vox)
### Do exit calculations for specific types of exit
can_reach_door = tunnel_checker.find_path_to_exits(exit_type="door")
can_reach_hatch = tunnel_checker.find_path_to_exits(exit_type="hatch")
# Store overall results
for k in all_exit_checks:
all_exit_checks[k]["door"] = can_reach_door[k]
all_exit_checks[k]["hatch"] = can_reach_hatch[k]
if debug is True:
# Return results suitable for rendering (showing ability to reach both doors)
can_reach_both, exits, air_vox = tunnel_checker.find_path_to_exits(exit_type="both",
debug=True)
return all_exit_checks, exits, air_vox
else:
return all_exit_checks
if __name__ == "__main__":
import json
from voxel_methods import Vehicle
################
# Setup
# Load the settings file directly; no api functionality is needed for this script
with open(r"settings.js", "r") as f:
SETTINGS = json.load(f)
## Print log INFO and above to the console
logging.basicConfig(filename="debug_escape_routes_ball.log",
filemode="w",
format="%(levelname)s %(asctime)s %(message)s",
level=logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
logging.getLogger("").addHandler(console)
##########
# Load the pre-voxelated vehicle model for testing
b_fldr = r"voxelated_models/vehicles"
vox_size_str = str(SETTINGS["voxel_size"])
vox_folder = os.path.join(b_fldr, SETTINGS["run_id"], vox_size_str)
vox_veh_file = "voxels_{}_vox{}".format(SETTINGS["run_id"], vox_size_str)
voxel_dat = data_io.load_array(vox_folder, vox_veh_file, True)
vehicle = Vehicle(voxel_dat)
### Hardcoded dummy troop positions that may be meaningless. Uses 3D voxel indices. These
# values are for testing only, based on the Ricardo master assembly 11/11/2013.
crew_points = np.array([[40, 40, 25], [40, 80, 25], [20, 145, 40]])
# Perform exit checks and return combined results
can_exit, exit_grid, air_inside = main(vehicle, crew_points, debug=True)
# Print exit status: do all points pass exit check?
exit_ok = all(v.values() for v in can_exit.values())
if SETTINGS["debug"]:
## Render a 3D image, for debugging purposes only
from mayavi import mlab
class Voxel_Plotter(object):
"""
Plotting class (for use with matlab, visualization/debug purposes only)
"""
def __init__(self, voxel_data):
self.voxel_data = voxel_data
def __call__(self, grid, color, opacity=1.0):
if grid.dtype == np.bool:
xo, yo, zo = np.where(grid)
else:
xo, yo, zo = grid[:, 0], grid[:, 1], grid[:, 2]
mlab.points3d(self.voxel_data["x_grid"][xo],
self.voxel_data["y_grid"][yo],
self.voxel_data["z_grid"][zo],
color=color,
scale_mode="none",
scale_factor=self.voxel_data["vox_size"],
mode='cube',
opacity=opacity)
# Make an outline of the air_inside for quicker visualization.
ero_vis = ndimage.binary_erosion(air_inside)
air_inside = air_inside & ~ero_vis
scene = mlab.figure(size=(400, 400))
scene.scene.background = (0.2, 0.2, 0.2)
vox_plotter = Voxel_Plotter(voxel_dat)
vox_plotter(air_inside, (0.6, 0.6, 0.8), 0.1)
vox_plotter(exit_grid, (1.0, 0.0, 0.0))
vox_plotter(crew_points, (0.3, 1.0, 0.3))
mlab.show()
| pombredanne/metamorphosys-desktop | metamorphosys/META/analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/escape_routes_ball.py | Python | mit | 10,707 | [
"Mayavi"
] | 30dd34861fde53b59a7955ff88301726658bfd5f5e1850c7b74c408e01cf6b9c |
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from datetime import datetime
import calendar
def isoformat(utc_date):
return datetime.isoformat(utc_date).replace('T', ' ')
def get_file_mtime(location, iso=True):
"""
Return a string containing the last modified date of a file formatted
as an ISO time stamp is ISO is True or a as raw number since epoch.
"""
date = ''
# FIXME: use file types
if not os.path.isdir(location):
mtime = os.stat(location).st_mtime
if iso:
utc_date = datetime.utcfromtimestamp(mtime)
date = isoformat(utc_date)
else:
date = str(mtime)
return date
def secs_from_epoch(d):
"""
Return a number of seconds since epoch for a date time stamp
"""
# FIXME: what does this do?
return calendar.timegm(datetime.strptime(d.split('.')[0],
'%Y-%m-%d %H:%M:%S').timetuple())
| retrography/scancode-toolkit | src/commoncode/date.py | Python | apache-2.0 | 2,322 | [
"VisIt"
] | 72ed9458d3cef10ca1cee638fac156c32c61d344c73ded5d7aa33cab6c299974 |
import datetime
from django.core.management.base import NoArgsCommand
from django.contrib.sites.models import Site
from feedback.models import Feedback
class Command(NoArgsCommand):
help = 'Report all the feedbac that needs attention'
args = ''
def handle_noargs(self, **options):
pending = Feedback.objects.filter( status='pending' )
# If there are no reports to deal with then exit
if not pending.exists():
return
subject = "Feedback requires attention - %u pending reports" % pending.count()
message = "Please visit the admin and process the feedback as needed."
url = 'http://%s/admin/feedback/feedback/?status=pending' % Site.objects.get_current().domain
print subject
print
print message
print
print url
| Hutspace/odekro | mzalendo/feedback/management/commands/feedback_report_pending.py | Python | agpl-3.0 | 865 | [
"VisIt"
] | 986d200ca3f50824c641daa8d8bdc826d94b393f41cc7e3ec81b53dec89ae971 |
../../../../share/pyshared/orca/script_utilities.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/script_utilities.py | Python | gpl-3.0 | 51 | [
"ORCA"
] | 79d66d7dcad7db762d2eedddcf53fe7c9fa51b9d7f3f25d1eaa3196bee1dd474 |
#!/usr/bin/env python
#
# Copyright (c) 2014-2016 Apple Inc. All rights reserved.
# Copyright (C) 2015 Canon Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# Builtins generator templates, which can be filled with string.Template.
class BuiltinsGeneratorTemplates:
DefaultCopyright = "2016 Apple Inc. All rights reserved."
LicenseText = (
"""Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
""")
DoNotEditWarning = (
"""// DO NOT EDIT THIS FILE. It is automatically generated from JavaScript files for
// builtins by the script: Source/JavaScriptCore/Scripts/generate-js-builtins.py""")
HeaderIncludeGuard = (
"""#pragma once""")
NamespaceTop = (
"""namespace ${namespace} {""")
NamespaceBottom = (
"""} // namespace ${namespace}""")
CombinedHeaderStaticMacros = (
"""#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
${macroPrefix}_FOREACH_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
#undef DECLARE_BUILTIN_GENERATOR""")
SeparateHeaderStaticMacros = (
"""#define DECLARE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM&);
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DECLARE_BUILTIN_GENERATOR)
#undef DECLARE_BUILTIN_GENERATOR""")
CombinedJSCImplementationStaticMacros = (
"""
#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \\
{\\
return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
}
${macroPrefix}_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
#undef DEFINE_BUILTIN_GENERATOR
""")
SeparateJSCImplementationStaticMacros = (
"""
#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \\
{\\
return vm.builtinExecutables()->codeName##Executable()->link(vm, vm.builtinExecutables()->codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \
}
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
#undef DEFINE_BUILTIN_GENERATOR
""")
CombinedWebCoreImplementationStaticMacros = (
"""
#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \\
{\\
JSVMClientData* clientData = static_cast<JSVMClientData*>(vm.clientData); \\
return clientData->builtinFunctions().${objectNameLC}Builtins().codeName##Executable()->link(vm, clientData->builtinFunctions().${objectNameLC}Builtins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \\
}
${macroPrefix}_FOREACH_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
#undef DEFINE_BUILTIN_GENERATOR
""")
SeparateWebCoreImplementationStaticMacros = (
"""
#define DEFINE_BUILTIN_GENERATOR(codeName, functionName, argumentCount) \\
JSC::FunctionExecutable* codeName##Generator(JSC::VM& vm) \\
{\\
JSVMClientData* clientData = static_cast<JSVMClientData*>(vm.clientData); \\
return clientData->builtinFunctions().${objectNameLC}Builtins().codeName##Executable()->link(vm, clientData->builtinFunctions().${objectNameLC}Builtins().codeName##Source(), std::nullopt, s_##codeName##Intrinsic); \\
}
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DEFINE_BUILTIN_GENERATOR)
#undef DEFINE_BUILTIN_GENERATOR
""")
SeparateHeaderWrapperBoilerplate = (
"""class ${objectName}BuiltinsWrapper : private JSC::WeakHandleOwner {
public:
explicit ${objectName}BuiltinsWrapper(JSC::VM* vm)
: m_vm(*vm)
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(INITIALIZE_BUILTIN_NAMES)
#define INITIALIZE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) , m_##name##Source(JSC::makeSource(StringImpl::createFromLiteral(s_##name, length), { }))
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(INITIALIZE_BUILTIN_SOURCE_MEMBERS)
#undef INITIALIZE_BUILTIN_SOURCE_MEMBERS
{
}
#define EXPOSE_BUILTIN_EXECUTABLES(name, functionName, length) \\
JSC::UnlinkedFunctionExecutable* name##Executable(); \\
const JSC::SourceCode& name##Source() const { return m_##name##Source; }
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(EXPOSE_BUILTIN_EXECUTABLES)
#undef EXPOSE_BUILTIN_EXECUTABLES
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_IDENTIFIER_ACCESSOR)
void exportNames();
private:
JSC::VM& m_vm;
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_NAMES)
#define DECLARE_BUILTIN_SOURCE_MEMBERS(name, functionName, length) \\
JSC::SourceCode m_##name##Source;\\
JSC::Weak<JSC::UnlinkedFunctionExecutable> m_##name##Executable;
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DECLARE_BUILTIN_SOURCE_MEMBERS)
#undef DECLARE_BUILTIN_SOURCE_MEMBERS
};
#define DEFINE_BUILTIN_EXECUTABLES(name, functionName, length) \\
inline JSC::UnlinkedFunctionExecutable* ${objectName}BuiltinsWrapper::name##Executable() \\
{\\
if (!m_##name##Executable)\\
m_##name##Executable = JSC::Weak<JSC::UnlinkedFunctionExecutable>(JSC::createBuiltinExecutable(m_vm, m_##name##Source, functionName##PublicName(), s_##name##ConstructAbility), this, &m_##name##Executable);\\
return m_##name##Executable.get();\\
}
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(DEFINE_BUILTIN_EXECUTABLES)
#undef DEFINE_BUILTIN_EXECUTABLES
inline void ${objectName}BuiltinsWrapper::exportNames()
{
#define EXPORT_FUNCTION_NAME(name) m_vm.propertyNames->appendExternalName(name##PublicName(), name##PrivateName());
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(EXPORT_FUNCTION_NAME)
#undef EXPORT_FUNCTION_NAME
}""")
SeparateHeaderInternalFunctionsBoilerplate = (
"""class ${objectName}BuiltinFunctions {
public:
explicit ${objectName}BuiltinFunctions(JSC::VM& vm) : m_vm(vm) { }
void init(JSC::JSGlobalObject&);
void visit(JSC::SlotVisitor&);
public:
JSC::VM& m_vm;
#define DECLARE_BUILTIN_SOURCE_MEMBERS(functionName) \\
JSC::WriteBarrier<JSC::JSFunction> m_##functionName##Function;
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(DECLARE_BUILTIN_SOURCE_MEMBERS)
#undef DECLARE_BUILTIN_SOURCE_MEMBERS
};
inline void ${objectName}BuiltinFunctions::init(JSC::JSGlobalObject& globalObject)
{
#define EXPORT_FUNCTION(codeName, functionName, length)\\
m_##functionName##Function.set(m_vm, &globalObject, JSC::JSFunction::createBuiltinFunction(m_vm, codeName##Generator(m_vm), &globalObject));
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_CODE(EXPORT_FUNCTION)
#undef EXPORT_FUNCTION
}
inline void ${objectName}BuiltinFunctions::visit(JSC::SlotVisitor& visitor)
{
#define VISIT_FUNCTION(name) visitor.append(m_##name##Function);
${macroPrefix}_FOREACH_${objectMacro}_BUILTIN_FUNCTION_NAME(VISIT_FUNCTION)
#undef VISIT_FUNCTION
}
""")
| Debian/openjfx | modules/web/src/main/native/Source/JavaScriptCore/Scripts/builtins/builtins_templates.py | Python | gpl-2.0 | 9,456 | [
"VisIt"
] | 16a8201b4993d15e2adff49a2b3d56b950de6d3284fa256efc1f0a78f8f8ff1e |
#
# Copyright (C) 2001 greg Landrum
#
""" unit testing code for the descriptor COM server
"""
from __future__ import print_function
import unittest
from rdkit import RDConfig
import numpy as np
try:
from win32com.client import Dispatch
except ImportError:
Dispatch = None
@unittest.skipIf(Dispatch is None, 'Windows test')
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: ' % self.shortDescription(), end='')
def testConnectToCOMServer(self):
# " testing connection "
Dispatch('RD.DescCalc')
def testLoadCalculator(self):
# " testing load "
c = Dispatch('RD.DescCalc')
c.LoadCalculator(RDConfig.RDCodeDir + '/ml/descriptors/test_data/ferro.dsc')
def testNames(self):
# " testing GetDescriptorNames "
c = Dispatch('RD.DescCalc')
c.LoadCalculator(RDConfig.RDCodeDir + '/ml/descriptors/test_data/ferro.dsc')
names = c.GetDescriptorNames()
expectedNames = ('MAX_DED', 'has3d', 'has4d', 'has5d', 'elconc', 'atvol')
assert names == expectedNames, 'GetDescriptorNames failed (%s != %s)' % (repr(names),
repr(expectedNames))
def testCalc(self):
# " testing descriptor calculation "
argV = ['CrPt3', 'fcc', 'AuCu3', 58.09549962, 1, 4, 0.228898, 8.876, 1]
nameV = ['Compound', 'Structure', 'Structure_Type', 'Volume', 'Z', 'Atoms_per_Formula_Unit',
'Hardness', 'RawDOS_Ef', 'IsFerromagnetic']
c = Dispatch('RD.DescCalc')
c.LoadCalculator(RDConfig.RDCodeDir + '/ml/descriptors/test_data/ferro.dsc')
descVect = np.array(c.CalcDescriptors(argV, nameV))
expected = np.array((3.67481803894, 1, 0, 1, 0.619669341609, 14.523874905))
diffV = abs(descVect - expected)
assert max(diffV) < 0.0001, 'bad descriptors: %s, %s' % (str(expected), str(descVect))
if __name__ == '__main__': # pragma: nocover
unittest.main()
| rvianello/rdkit | rdkit/ML/Descriptors/UnitTestCOMServer.py | Python | bsd-3-clause | 1,919 | [
"RDKit"
] | 7e93fb3b93b96f6847fca7d38303b6453a2af8d80c0a26295d23d3df60938dff |
# -*- coding: utf-8 -*-
#
# Natural Language Toolkit: Snowball Stemmer
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Peter Michael Stahl <pemistahl@gmail.com>
# Peter Ljunglof <peter.ljunglof@heatherleaf.se> (revisions)
# Algorithms: Dr Martin Porter <martin@tartarus.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Snowball stemmers
This module provides a port of the Snowball stemmers
developed by Martin Porter.
There is also a demo function: `snowball.demo()`.
"""
from __future__ import unicode_literals, print_function
from nltk import compat
from nltk.corpus import stopwords
from nltk.stem import porter
from nltk.stem.util import suffix_replace
from nltk.stem.api import StemmerI
class SnowballStemmer(StemmerI):
"""
Snowball Stemmer
The following languages are supported:
Danish, Dutch, English, Finnish, French, German,
Hungarian, Italian, Norwegian, Portuguese, Romanian, Russian,
Spanish and Swedish.
The algorithm for English is documented here:
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
The algorithms have been developed by Martin Porter.
These stemmers are called Snowball, because Porter created
a programming language with this name for creating
new stemming algorithms. There is more information available
at http://snowball.tartarus.org/
The stemmer is invoked as shown below:
>>> from nltk.stem import SnowballStemmer
>>> print(" ".join(SnowballStemmer.languages)) # See which languages are supported
danish dutch english finnish french german hungarian
italian norwegian porter portuguese romanian russian
spanish swedish
>>> stemmer = SnowballStemmer("german") # Choose a language
>>> stemmer.stem("Autobahnen") # Stem a word
'autobahn'
Invoking the stemmers that way is useful if you do not know the
language to be stemmed at runtime. Alternatively, if you already know
the language, then you can invoke the language specific stemmer directly:
>>> from nltk.stem.snowball import GermanStemmer
>>> stemmer = GermanStemmer()
>>> stemmer.stem("Autobahnen")
'autobahn'
:param language: The language whose subclass is instantiated.
:type language: str or unicode
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
:raise ValueError: If there is no stemmer for the specified
language, a ValueError is raised.
"""
languages = ("danish", "dutch", "english", "finnish", "french", "german",
"hungarian", "italian", "norwegian", "porter", "portuguese",
"romanian", "russian", "spanish", "swedish")
def __init__(self, language, ignore_stopwords=False):
if language not in self.languages:
raise ValueError("The language '%s' is not supported." % language)
stemmerclass = globals()[language.capitalize() + "Stemmer"]
self.stemmer = stemmerclass(ignore_stopwords)
self.stem = self.stemmer.stem
self.stopwords = self.stemmer.stopwords
@compat.python_2_unicode_compatible
class _LanguageSpecificStemmer(StemmerI):
"""
This helper subclass offers the possibility
to invoke a specific stemmer directly.
This is useful if you already know the language to be stemmed at runtime.
Create an instance of the Snowball stemmer.
:param ignore_stopwords: If set to True, stopwords are
not stemmed and returned unchanged.
Set to False by default.
:type ignore_stopwords: bool
"""
def __init__(self, ignore_stopwords=False):
# The language is the name of the class, minus the final "Stemmer".
language = type(self).__name__.lower()
if language.endswith("stemmer"):
language = language[:-7]
self.stopwords = set()
if ignore_stopwords:
try:
for word in stopwords.words(language):
self.stopwords.add(word)
except IOError:
raise ValueError("%r has no list of stopwords. Please set"
" 'ignore_stopwords' to 'False'." % self)
def __repr__(self):
"""
Print out the string representation of the respective class.
"""
return "<%s>" % type(self).__name__
class PorterStemmer(_LanguageSpecificStemmer, porter.PorterStemmer):
"""
A word stemmer based on the original Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of the module
nltk.stem.porter for more information.
"""
def __init__(self, ignore_stopwords=False):
_LanguageSpecificStemmer.__init__(self, ignore_stopwords)
porter.PorterStemmer.__init__(self)
class _ScandinavianStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) >= 3:
r1 = word[i+1:]
else:
return word
break
return r1
class _StandardStemmer(_LanguageSpecificStemmer):
"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
elif word[0] in vowels and word[1] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i+1:]
break
else:
rv = word[3:]
return rv
class DanishStemmer(_ScandinavianStemmer):
"""
The Danish Snowball stemmer.
:cvar __vowels: The Danish vowels.
:type __vowels: unicode
:cvar __consonants: The Danish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Danish double consonants.
:type __double_consonants: tuple
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Danish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/danish/stemmer.html
"""
# The language's vowels and other important characters are defined.
__vowels = "aeiouy\xE6\xE5\xF8"
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__s_ending = "abcdfghjklmnoprtvyz\xE5"
# The different suffixes, divided into the algorithm's steps
# and organized by length, are listed in tuples.
__step1_suffixes = ("erendes", "erende", "hedens", "ethed",
"erede", "heden", "heder", "endes",
"ernes", "erens", "erets", "ered",
"ende", "erne", "eren", "erer", "heds",
"enes", "eres", "eret", "hed", "ene", "ere",
"ens", "ers", "ets", "en", "er", "es", "et",
"e", "s")
__step2_suffixes = ("gd", "dt", "gt", "kt")
__step3_suffixes = ("elig", "l\xF8st", "lig", "els", "ig")
def stem(self, word):
"""
Stem a Danish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
# Every word is put into lower case for normalization.
word = word.lower()
if word in self.stopwords:
return word
# After this, the required regions are generated
# by the respective helper method.
r1 = self._r1_scandinavian(word, self.__vowels)
# Then the actual stemming process starts.
# Every new step is explicitly indicated
# according to the descriptions on the Snowball website.
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
if r1.endswith("igst"):
word = word[:-2]
r1 = r1[:-2]
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "l\xF8st":
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith(self.__step2_suffixes):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 4: Undouble
for double_cons in self.__double_consonants:
if word.endswith(double_cons) and len(word) > 3:
word = word[:-1]
break
return word
class DutchStemmer(_StandardStemmer):
"""
The Dutch Snowball stemmer.
:cvar __vowels: The Dutch vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step3b_suffixes: Suffixes to be deleted in step 3b of the algorithm.
:type __step3b_suffixes: tuple
:note: A detailed description of the Dutch
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/dutch/stemmer.html
"""
__vowels = "aeiouy\xE8"
__step1_suffixes = ("heden", "ene", "en", "se", "s")
__step3b_suffixes = ("baar", "lijk", "bar", "end", "ing", "ig")
def stem(self, word):
"""
Stem a Dutch word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step2_success = False
# Vowel accents are removed.
word = (word.replace("\xE4", "a").replace("\xE1", "a")
.replace("\xEB", "e").replace("\xE9", "e")
.replace("\xED", "i").replace("\xEF", "i")
.replace("\xF6", "o").replace("\xF3", "o")
.replace("\xFC", "u").replace("\xFA", "u"))
# An initial 'y', a 'y' after a vowel,
# and an 'i' between self.__vowels is put into upper case.
# As from now these are treated as consonants.
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
for i in range(1, len(word)-1):
if (word[i-1] in self.__vowels and word[i] == "i" and
word[i+1] in self.__vowels):
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "heden":
word = suffix_replace(word, suffix, "heid")
r1 = suffix_replace(r1, suffix, "heid")
if r2.endswith("heden"):
r2 = suffix_replace(r2, suffix, "heid")
elif (suffix in ("ene", "en") and
not word.endswith("heden") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-3:-len(suffix)] != "gem"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif (suffix in ("se", "s") and
word[-len(suffix)-1] not in self.__vowels and
word[-len(suffix)-1] != "j"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
if r1.endswith("e") and word[-2] not in self.__vowels:
step2_success = True
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3a
if r2.endswith("heid") and word[-5] != "c":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
if (r1.endswith("en") and word[-3] not in self.__vowels and
word[-5:-2] != "gem"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
# STEP 3b: Derivational suffixes
for suffix in self.__step3b_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ing"):
word = word[:-3]
r2 = r2[:-3]
if r2.endswith("ig") and word[-3] != "e":
word = word[:-2]
else:
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "ig" and word[-3] != "e":
word = word[:-2]
elif suffix == "lijk":
word = word[:-4]
r1 = r1[:-4]
if r1.endswith("e") and word[-2] not in self.__vowels:
word = word[:-1]
if word.endswith(("kk", "dd", "tt")):
word = word[:-1]
elif suffix == "baar":
word = word[:-4]
elif suffix == "bar" and step2_success:
word = word[:-3]
break
# STEP 4: Undouble vowel
if len(word) >= 4:
if word[-1] not in self.__vowels and word[-1] != "I":
if word[-3:-1] in ("aa", "ee", "oo", "uu"):
if word[-4] not in self.__vowels:
word = "".join((word[:-3], word[-3], word[-1]))
# All occurrences of 'I' and 'Y' are put back into lower case.
word = word.replace("I", "i").replace("Y", "y")
return word
class EnglishStemmer(_StandardStemmer):
"""
The English Snowball stemmer.
:cvar __vowels: The English vowels.
:type __vowels: unicode
:cvar __double_consonants: The English double consonants.
:type __double_consonants: tuple
:cvar __li_ending: Letters that may directly appear before a word final 'li'.
:type __li_ending: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1a_suffixes: Suffixes to be deleted in step 1a of the algorithm.
:type __step1a_suffixes: tuple
:cvar __step1b_suffixes: Suffixes to be deleted in step 1b of the algorithm.
:type __step1b_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __special_words: A dictionary containing words
which have to be stemmed specially.
:type __special_words: dict
:note: A detailed description of the English
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/english/stemmer.html
"""
__vowels = "aeiouy"
__double_consonants = ("bb", "dd", "ff", "gg", "mm", "nn",
"pp", "rr", "tt")
__li_ending = "cdeghkmnrt"
__step0_suffixes = ("'s'", "'s", "'")
__step1a_suffixes = ("sses", "ied", "ies", "us", "ss", "s")
__step1b_suffixes = ("eedly", "ingly", "edly", "eed", "ing", "ed")
__step2_suffixes = ('ization', 'ational', 'fulness', 'ousness',
'iveness', 'tional', 'biliti', 'lessli',
'entli', 'ation', 'alism', 'aliti', 'ousli',
'iviti', 'fulli', 'enci', 'anci', 'abli',
'izer', 'ator', 'alli', 'bli', 'ogi', 'li')
__step3_suffixes = ('ational', 'tional', 'alize', 'icate', 'iciti',
'ative', 'ical', 'ness', 'ful')
__step4_suffixes = ('ement', 'ance', 'ence', 'able', 'ible', 'ment',
'ant', 'ent', 'ism', 'ate', 'iti', 'ous',
'ive', 'ize', 'ion', 'al', 'er', 'ic')
__step5_suffixes = ("e", "l")
__special_words = {"skis" : "ski",
"skies" : "sky",
"dying" : "die",
"lying" : "lie",
"tying" : "tie",
"idly" : "idl",
"gently" : "gentl",
"ugly" : "ugli",
"early" : "earli",
"only" : "onli",
"singly" : "singl",
"sky" : "sky",
"news" : "news",
"howe" : "howe",
"atlas" : "atlas",
"cosmos" : "cosmos",
"bias" : "bias",
"andes" : "andes",
"inning" : "inning",
"innings" : "inning",
"outing" : "outing",
"outings" : "outing",
"canning" : "canning",
"cannings" : "canning",
"herring" : "herring",
"herrings" : "herring",
"earring" : "earring",
"earrings" : "earring",
"proceed" : "proceed",
"proceeds" : "proceed",
"proceeded" : "proceed",
"proceeding" : "proceed",
"exceed" : "exceed",
"exceeds" : "exceed",
"exceeded" : "exceed",
"exceeding" : "exceed",
"succeed" : "succeed",
"succeeds" : "succeed",
"succeeded" : "succeed",
"succeeding" : "succeed"}
def stem(self, word):
"""
Stem an English word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords or len(word) <= 2:
return word
elif word in self.__special_words:
return self.__special_words[word]
# Map the different apostrophe characters to a single consistent one
word = (word.replace("\u2019", "\x27")
.replace("\u2018", "\x27")
.replace("\u201B", "\x27"))
if word.startswith("\x27"):
word = word[1:]
if word.startswith("y"):
word = "".join(("Y", word[1:]))
for i in range(1, len(word)):
if word[i-1] in self.__vowels and word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
step1a_vowel_found = False
step1b_vowel_found = False
r1 = ""
r2 = ""
if word.startswith(("gener", "commun", "arsen")):
if word.startswith(("gener", "arsen")):
r1 = word[5:]
else:
r1 = word[6:]
for i in range(1, len(r1)):
if r1[i] not in self.__vowels and r1[i-1] in self.__vowels:
r2 = r1[i+1:]
break
else:
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 0
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 1a
for suffix in self.__step1a_suffixes:
if word.endswith(suffix):
if suffix == "sses":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("ied", "ies"):
if len(word[:-len(suffix)]) > 1:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix == "s":
for letter in word[:-2]:
if letter in self.__vowels:
step1a_vowel_found = True
break
if step1a_vowel_found:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
break
# STEP 1b
for suffix in self.__step1b_suffixes:
if word.endswith(suffix):
if suffix in ("eed", "eedly"):
if r1.endswith(suffix):
word = suffix_replace(word, suffix, "ee")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ee")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ee")
else:
r2 = ""
else:
for letter in word[:-len(suffix)]:
if letter in self.__vowels:
step1b_vowel_found = True
break
if step1b_vowel_found:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
if word.endswith(("at", "bl", "iz")):
word = "".join((word, "e"))
r1 = "".join((r1, "e"))
if len(word) > 5 or len(r1) >=3:
r2 = "".join((r2, "e"))
elif word.endswith(self.__double_consonants):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif ((r1 == "" and len(word) >= 3 and
word[-1] not in self.__vowels and
word[-1] not in "wxY" and
word[-2] in self.__vowels and
word[-3] not in self.__vowels)
or
(r1 == "" and len(word) == 2 and
word[0] in self.__vowels and
word[1] not in self.__vowels)):
word = "".join((word, "e"))
if len(r1) > 0:
r1 = "".join((r1, "e"))
if len(r2) > 0:
r2 = "".join((r2, "e"))
break
# STEP 1c
if len(word) > 2 and word[-1] in "yY" and word[-2] not in self.__vowels:
word = "".join((word[:-1], "i"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "i"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "i"))
else:
r2 = ""
# STEP 2
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("enci", "anci", "abli"):
word = "".join((word[:-1], "e"))
if len(r1) >= 1:
r1 = "".join((r1[:-1], "e"))
else:
r1 = ""
if len(r2) >= 1:
r2 = "".join((r2[:-1], "e"))
else:
r2 = ""
elif suffix == "entli":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix in ("izer", "ization"):
word = suffix_replace(word, suffix, "ize")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ize")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ize")
else:
r2 = ""
elif suffix in ("ational", "ation", "ator"):
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = "e"
elif suffix in ("alism", "aliti", "alli"):
word = suffix_replace(word, suffix, "al")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "al")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "al")
else:
r2 = ""
elif suffix == "fulness":
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
elif suffix in ("ousli", "ousness"):
word = suffix_replace(word, suffix, "ous")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ous")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ous")
else:
r2 = ""
elif suffix in ("iveness", "iviti"):
word = suffix_replace(word, suffix, "ive")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ive")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ive")
else:
r2 = "e"
elif suffix in ("biliti", "bli"):
word = suffix_replace(word, suffix, "ble")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ble")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ble")
else:
r2 = ""
elif suffix == "ogi" and word[-4] == "l":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
elif suffix in ("fulli", "lessli"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "li" and word[-3] in self.__li_ending:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
break
# STEP 3
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "tional":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ational":
word = suffix_replace(word, suffix, "ate")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ate")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ate")
else:
r2 = ""
elif suffix == "alize":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
elif suffix in ("icate", "iciti", "ical"):
word = suffix_replace(word, suffix, "ic")
if len(r1) >= len(suffix):
r1 = suffix_replace(r1, suffix, "ic")
else:
r1 = ""
if len(r2) >= len(suffix):
r2 = suffix_replace(r2, suffix, "ic")
else:
r2 = ""
elif suffix in ("ful", "ness"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
elif suffix == "ative" and r2.endswith(suffix):
word = word[:-5]
r1 = r1[:-5]
r2 = r2[:-5]
break
# STEP 4
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if r2.endswith(suffix):
if suffix == "ion":
if word[-4] in "st":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5
if r2.endswith("l") and word[-2] == "l":
word = word[:-1]
elif r2.endswith("e"):
word = word[:-1]
elif r1.endswith("e"):
if len(word) >= 4 and (word[-2] in self.__vowels or
word[-2] in "wxY" or
word[-3] not in self.__vowels or
word[-4] in self.__vowels):
word = word[:-1]
word = word.replace("Y", "y")
return word
class FinnishStemmer(_StandardStemmer):
"""
The Finnish Snowball stemmer.
:cvar __vowels: The Finnish vowels.
:type __vowels: unicode
:cvar __restricted_vowels: A subset of the Finnish vowels.
:type __restricted_vowels: unicode
:cvar __long_vowels: The Finnish vowels in their long forms.
:type __long_vowels: tuple
:cvar __consonants: The Finnish consonants.
:type __consonants: unicode
:cvar __double_consonants: The Finnish double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Finnish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/finnish/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6"
__restricted_vowels = "aeiou\xE4\xF6"
__long_vowels = ("aa", "ee", "ii", "oo", "uu", "\xE4\xE4",
"\xF6\xF6")
__consonants = "bcdfghjklmnpqrstvwxz"
__double_consonants = ("bb", "cc", "dd", "ff", "gg", "hh", "jj",
"kk", "ll", "mm", "nn", "pp", "qq", "rr",
"ss", "tt", "vv", "ww", "xx", "zz")
__step1_suffixes = ('kaan', 'k\xE4\xE4n', 'sti', 'kin', 'han',
'h\xE4n', 'ko', 'k\xF6', 'pa', 'p\xE4')
__step2_suffixes = ('nsa', 'ns\xE4', 'mme', 'nne', 'si', 'ni',
'an', '\xE4n', 'en')
__step3_suffixes = ('siin', 'tten', 'seen', 'han', 'hen', 'hin',
'hon', 'h\xE4n', 'h\xF6n', 'den', 'tta',
'tt\xE4', 'ssa', 'ss\xE4', 'sta',
'st\xE4', 'lla', 'll\xE4', 'lta',
'lt\xE4', 'lle', 'ksi', 'ine', 'ta',
't\xE4', 'na', 'n\xE4', 'a', '\xE4',
'n')
__step4_suffixes = ('impi', 'impa', 'imp\xE4', 'immi', 'imma',
'imm\xE4', 'mpi', 'mpa', 'mp\xE4', 'mmi',
'mma', 'mm\xE4', 'eja', 'ej\xE4')
def stem(self, word):
"""
Stem a Finnish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step3_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
# STEP 1: Particles etc.
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "sti":
if suffix in r2:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
if word[-len(suffix)-1] in "ntaeiouy\xE4\xF6":
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2: Possessives
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "si":
if word[-3] != "k":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "ni":
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
if word.endswith("kse"):
word = suffix_replace(word, "kse", "ksi")
if r1.endswith("kse"):
r1 = suffix_replace(r1, "kse", "ksi")
if r2.endswith("kse"):
r2 = suffix_replace(r2, "kse", "ksi")
elif suffix == "an":
if (word[-4:-2] in ("ta", "na") or
word[-5:-2] in ("ssa", "sta", "lla", "lta")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "\xE4n":
if (word[-4:-2] in ("t\xE4", "n\xE4") or
word[-5:-2] in ("ss\xE4", "st\xE4",
"ll\xE4", "lt\xE4")):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
elif suffix == "en":
if word[-5:-2] in ("lle", "ine"):
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
break
# STEP 3: Cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("han", "hen", "hin", "hon", "h\xE4n",
"h\xF6n"):
if ((suffix == "han" and word[-4] == "a") or
(suffix == "hen" and word[-4] == "e") or
(suffix == "hin" and word[-4] == "i") or
(suffix == "hon" and word[-4] == "o") or
(suffix == "h\xE4n" and word[-4] == "\xE4") or
(suffix == "h\xF6n" and word[-4] == "\xF6")):
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix in ("siin", "den", "tten"):
if (word[-len(suffix)-1] == "i" and
word[-len(suffix)-2] in self.__restricted_vowels):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
else:
continue
elif suffix == "seen":
if word[-6:-4] in self.__long_vowels:
word = word[:-4]
r1 = r1[:-4]
r2 = r2[:-4]
step3_success = True
else:
continue
elif suffix in ("a", "\xE4"):
if word[-2] in self.__vowels and word[-3] in self.__consonants:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
elif suffix in ("tta", "tt\xE4"):
if word[-4] == "e":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
step3_success = True
elif suffix == "n":
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
step3_success = True
if word[-2:] == "ie" or word[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
step3_success = True
break
# STEP 4: Other endings
for suffix in self.__step4_suffixes:
if r2.endswith(suffix):
if suffix in ("mpi", "mpa", "mp\xE4", "mmi", "mma",
"mm\xE4"):
if word[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
r2 = r2[:-3]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 5: Plurals
if step3_success and len(r1) >= 1 and r1[-1] in "ij":
word = word[:-1]
r1 = r1[:-1]
elif (not step3_success and len(r1) >= 2 and
r1[-1] == "t" and r1[-2] in self.__vowels):
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
if r2.endswith("imma"):
word = word[:-4]
r1 = r1[:-4]
elif r2.endswith("mma") and r2[-5:-3] != "po":
word = word[:-3]
r1 = r1[:-3]
# STEP 6: Tidying up
if r1[-2:] in self.__long_vowels:
word = word[:-1]
r1 = r1[:-1]
if (len(r1) >= 2 and r1[-2] in self.__consonants and
r1[-1] in "a\xE4ei"):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith(("oj", "uj")):
word = word[:-1]
r1 = r1[:-1]
if r1.endswith("jo"):
word = word[:-1]
r1 = r1[:-1]
# If the word ends with a double consonant
# followed by zero or more vowels, the last consonant is removed.
for i in range(1, len(word)):
if word[-i] in self.__vowels:
continue
else:
if i == 1:
if word[-i-1:] in self.__double_consonants:
word = word[:-1]
else:
if word[-i-1:-i+1] in self.__double_consonants:
word = "".join((word[:-i], word[-i+1:]))
break
return word
class FrenchStemmer(_StandardStemmer):
"""
The French Snowball stemmer.
:cvar __vowels: The French vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the French
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/french/stemmer.html
"""
__vowels = "aeiouy\xE2\xE0\xEB\xE9\xEA\xE8\xEF\xEE\xF4\xFB\xF9"
__step1_suffixes = ('issements', 'issement', 'atrices', 'atrice',
'ateurs', 'ations', 'logies', 'usions',
'utions', 'ements', 'amment', 'emment',
'ances', 'iqUes', 'ismes', 'ables', 'istes',
'ateur', 'ation', 'logie', 'usion', 'ution',
'ences', 'ement', 'euses', 'ments', 'ance',
'iqUe', 'isme', 'able', 'iste', 'ence',
'it\xE9s', 'ives', 'eaux', 'euse', 'ment',
'eux', 'it\xE9', 'ive', 'ifs', 'aux', 'if')
__step2a_suffixes = ('issaIent', 'issantes', 'iraIent', 'issante',
'issants', 'issions', 'irions', 'issais',
'issait', 'issant', 'issent', 'issiez', 'issons',
'irais', 'irait', 'irent', 'iriez', 'irons',
'iront', 'isses', 'issez', '\xEEmes',
'\xEEtes', 'irai', 'iras', 'irez', 'isse',
'ies', 'ira', '\xEEt', 'ie', 'ir', 'is',
'it', 'i')
__step2b_suffixes = ('eraIent', 'assions', 'erions', 'assent',
'assiez', '\xE8rent', 'erais', 'erait',
'eriez', 'erons', 'eront', 'aIent', 'antes',
'asses', 'ions', 'erai', 'eras', 'erez',
'\xE2mes', '\xE2tes', 'ante', 'ants',
'asse', '\xE9es', 'era', 'iez', 'ais',
'ait', 'ant', '\xE9e', '\xE9s', 'er',
'ez', '\xE2t', 'ai', 'as', '\xE9', 'a')
__step4_suffixes = ('i\xE8re', 'I\xE8re', 'ion', 'ier', 'Ier',
'e', '\xEB')
def stem(self, word):
"""
Stem a French word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
rv_ending_found = False
step2a_success = False
step2b_success = False
# Every occurrence of 'u' after 'q' is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
# Every occurrence of 'y' preceded or
# followed by a vowel is also put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
if word[i-1] in self.__vowels or word[i+1] in self.__vowels:
if word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self.__rv_french(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "eaux":
word = word[:-1]
step1_success = True
elif suffix in ("euse", "euses"):
if suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in r1:
word = suffix_replace(word, suffix, "eux")
step1_success = True
elif suffix in ("ement", "ements") and suffix in rv:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "iv" and "iv" in r2:
word = word[:-2]
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
elif word[-3:] == "eus":
if "eus" in r2:
word = word[:-3]
elif "eus" in r1:
word = "".join((word[:-1], "x"))
elif word[-3:] in ("abl", "iqU"):
if "abl" in r2 or "iqU" in r2:
word = word[:-3]
elif word[-3:] in ("i\xE8r", "I\xE8r"):
if "i\xE8r" in rv or "I\xE8r" in rv:
word = "".join((word[:-3], "i"))
elif suffix == "amment" and suffix in rv:
word = suffix_replace(word, "amment", "ant")
rv = suffix_replace(rv, "amment", "ant")
rv_ending_found = True
elif suffix == "emment" and suffix in rv:
word = suffix_replace(word, "emment", "ent")
rv_ending_found = True
elif (suffix in ("ment", "ments") and suffix in rv and
not rv.startswith(suffix) and
rv[rv.rindex(suffix)-1] in self.__vowels):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
rv_ending_found = True
elif suffix == "aux" and suffix in r1:
word = "".join((word[:-2], "l"))
step1_success = True
elif (suffix in ("issement", "issements") and suffix in r1
and word[-len(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("ance", "iqUe", "isme", "able", "iste",
"eux", "ances", "iqUes", "ismes",
"ables", "istes") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
elif suffix in ("atrice", "ateur", "ation", "atrices",
"ateurs", "ations") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif suffix in ("logie", "logies") and suffix in r2:
word = suffix_replace(word, suffix, "log")
step1_success = True
elif (suffix in ("usion", "ution", "usions", "utions") and
suffix in r2):
word = suffix_replace(word, suffix, "u")
step1_success = True
elif suffix in ("ence", "ences") and suffix in r2:
word = suffix_replace(word, suffix, "ent")
step1_success = True
elif suffix in ("it\xE9", "it\xE9s") and suffix in r2:
word = word[:-len(suffix)]
step1_success = True
if word[-4:] == "abil":
if "abil" in r2:
word = word[:-4]
else:
word = "".join((word[:-2], "l"))
elif word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
elif word[-2:] == "iv":
if "iv" in r2:
word = word[:-2]
elif (suffix in ("if", "ive", "ifs", "ives") and
suffix in r2):
word = word[:-len(suffix)]
step1_success = True
if word[-2:] == "at" and "at" in r2:
word = word[:-2]
if word[-2:] == "ic":
if "ic" in r2:
word = word[:-2]
else:
word = "".join((word[:-2], "iqU"))
break
# STEP 2a: Verb suffixes beginning 'i'
if not step1_success or rv_ending_found:
for suffix in self.__step2a_suffixes:
if word.endswith(suffix):
if (suffix in rv and len(rv) > len(suffix) and
rv[rv.rindex(suffix)-1] not in self.__vowels):
word = word[:-len(suffix)]
step2a_success = True
break
# STEP 2b: Other verb suffixes
if not step2a_success:
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix == "ions" and "ions" in r2:
word = word[:-4]
step2b_success = True
elif suffix in ('eraIent', 'erions', '\xE8rent',
'erais', 'erait', 'eriez',
'erons', 'eront', 'erai', 'eras',
'erez', '\xE9es', 'era', 'iez',
'\xE9e', '\xE9s', 'er', 'ez',
'\xE9'):
word = word[:-len(suffix)]
step2b_success = True
elif suffix in ('assions', 'assent', 'assiez',
'aIent', 'antes', 'asses',
'\xE2mes', '\xE2tes', 'ante',
'ants', 'asse', 'ais', 'ait',
'ant', '\xE2t', 'ai', 'as',
'a'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
step2b_success = True
if rv.endswith("e"):
word = word[:-1]
break
# STEP 3
if step1_success or step2a_success or step2b_success:
if word[-1] == "Y":
word = "".join((word[:-1], "i"))
elif word[-1] == "\xE7":
word = "".join((word[:-1], "c"))
# STEP 4: Residual suffixes
else:
if (len(word) >= 2 and word[-1] == "s" and
word[-2] not in "aiou\xE8s"):
word = word[:-1]
for suffix in self.__step4_suffixes:
if word.endswith(suffix):
if suffix in rv:
if (suffix == "ion" and suffix in r2 and
rv[-4] in "st"):
word = word[:-3]
elif suffix in ("ier", "i\xE8re", "Ier",
"I\xE8re"):
word = suffix_replace(word, suffix, "i")
elif suffix == "e":
word = word[:-1]
elif suffix == "\xEB" and word[-3:-1] == "gu":
word = word[:-1]
break
# STEP 5: Undouble
if word.endswith(("enn", "onn", "ett", "ell", "eill")):
word = word[:-1]
# STEP 6: Un-accent
for i in range(1, len(word)):
if word[-i] not in self.__vowels:
i += 1
else:
if i != 1 and word[-i] in ("\xE9", "\xE8"):
word = "".join((word[:-i], "e", word[-i+1:]))
break
word = (word.replace("I", "i")
.replace("U", "u")
.replace("Y", "y"))
return word
def __rv_french(self, word, vowels):
"""
Return the region RV that is used by the French stemmer.
If the word begins with two vowels, RV is the region after
the third letter. Otherwise, it is the region after the first
vowel not at the beginning of the word, or the end of the word
if these positions cannot be found. (Exceptionally, u'par',
u'col' or u'tap' at the beginning of a word is also taken to
define RV as the region to their right.)
:param word: The French word whose region RV is determined.
:type word: str or unicode
:param vowels: The French vowels that are used to determine
the region RV.
:type vowels: unicode
:return: the region RV for the respective French word.
:rtype: unicode
:note: This helper method is invoked by the stem method of
the subclass FrenchStemmer. It is not to be invoked directly!
"""
rv = ""
if len(word) >= 2:
if (word.startswith(("par", "col", "tap")) or
(word[0] in vowels and word[1] in vowels)):
rv = word[3:]
else:
for i in range(1, len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
return rv
class GermanStemmer(_StandardStemmer):
"""
The German Snowball stemmer.
:cvar __vowels: The German vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __st_ending: Letter that may directly appear before a word final 'st'.
:type __st_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the German
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/german/stemmer.html
"""
__vowels = "aeiouy\xE4\xF6\xFC"
__s_ending = "bdfghklmnrt"
__st_ending = "bdfghklmnt"
__step1_suffixes = ("ern", "em", "er", "en", "es", "e", "s")
__step2_suffixes = ("est", "en", "er", "st")
__step3_suffixes = ("isch", "lich", "heit", "keit",
"end", "ung", "ig", "ik")
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
word = word.replace("\xDF", "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix)-4:-len(suffix)] == "niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix)-2:-len(suffix)] and
"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix)-2:-len(suffix)] or
"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif "ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace("\xE4", "a").replace("\xF6", "o")
.replace("\xFC", "u").replace("U", "u")
.replace("Y", "y"))
return word
class HungarianStemmer(_LanguageSpecificStemmer):
"""
The Hungarian Snowball stemmer.
:cvar __vowels: The Hungarian vowels.
:type __vowels: unicode
:cvar __digraphs: The Hungarian digraphs.
:type __digraphs: tuple
:cvar __double_consonants: The Hungarian double consonants.
:type __double_consonants: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:cvar __step5_suffixes: Suffixes to be deleted in step 5 of the algorithm.
:type __step5_suffixes: tuple
:cvar __step6_suffixes: Suffixes to be deleted in step 6 of the algorithm.
:type __step6_suffixes: tuple
:cvar __step7_suffixes: Suffixes to be deleted in step 7 of the algorithm.
:type __step7_suffixes: tuple
:cvar __step8_suffixes: Suffixes to be deleted in step 8 of the algorithm.
:type __step8_suffixes: tuple
:cvar __step9_suffixes: Suffixes to be deleted in step 9 of the algorithm.
:type __step9_suffixes: tuple
:note: A detailed description of the Hungarian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/hungarian/stemmer.html
"""
__vowels = "aeiou\xF6\xFC\xE1\xE9\xED\xF3\xF5\xFA\xFB"
__digraphs = ("cs", "dz", "dzs", "gy", "ly", "ny", "ty", "zs")
__double_consonants = ("bb", "cc", "ccs", "dd", "ff", "gg",
"ggy", "jj", "kk", "ll", "lly", "mm",
"nn", "nny", "pp", "rr", "ss", "ssz",
"tt", "tty", "vv", "zz", "zzs")
__step1_suffixes = ("al", "el")
__step2_suffixes = ('k\xE9ppen', 'onk\xE9nt', 'enk\xE9nt',
'ank\xE9nt', 'k\xE9pp', 'k\xE9nt', 'ban',
'ben', 'nak', 'nek', 'val', 'vel', 't\xF3l',
't\xF5l', 'r\xF3l', 'r\xF5l', 'b\xF3l',
'b\xF5l', 'hoz', 'hez', 'h\xF6z',
'n\xE1l', 'n\xE9l', '\xE9rt', 'kor',
'ba', 'be', 'ra', 're', 'ig', 'at', 'et',
'ot', '\xF6t', 'ul', '\xFCl', 'v\xE1',
'v\xE9', 'en', 'on', 'an', '\xF6n',
'n', 't')
__step3_suffixes = ("\xE1nk\xE9nt", "\xE1n", "\xE9n")
__step4_suffixes = ('astul', 'est\xFCl', '\xE1stul',
'\xE9st\xFCl', 'stul', 'st\xFCl')
__step5_suffixes = ("\xE1", "\xE9")
__step6_suffixes = ('ok\xE9', '\xF6k\xE9', 'ak\xE9',
'ek\xE9', '\xE1k\xE9', '\xE1\xE9i',
'\xE9k\xE9', '\xE9\xE9i', 'k\xE9',
'\xE9i', '\xE9\xE9', '\xE9')
__step7_suffixes = ('\xE1juk', '\xE9j\xFCk', '\xFCnk',
'unk', 'juk', 'j\xFCk', '\xE1nk',
'\xE9nk', 'nk', 'uk', '\xFCk', 'em',
'om', 'am', 'od', 'ed', 'ad', '\xF6d',
'ja', 'je', '\xE1m', '\xE1d', '\xE9m',
'\xE9d', 'm', 'd', 'a', 'e', 'o',
'\xE1', '\xE9')
__step8_suffixes = ('jaitok', 'jeitek', 'jaink', 'jeink', 'aitok',
'eitek', '\xE1itok', '\xE9itek', 'jaim',
'jeim', 'jaid', 'jeid', 'eink', 'aink',
'itek', 'jeik', 'jaik', '\xE1ink',
'\xE9ink', 'aim', 'eim', 'aid', 'eid',
'jai', 'jei', 'ink', 'aik', 'eik',
'\xE1im', '\xE1id', '\xE1ik', '\xE9im',
'\xE9id', '\xE9ik', 'im', 'id', 'ai',
'ei', 'ik', '\xE1i', '\xE9i', 'i')
__step9_suffixes = ("\xE1k", "\xE9k", "\xF6k", "ok",
"ek", "ak", "k")
def stem(self, word):
"""
Stem an Hungarian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self.__r1_hungarian(word, self.__vowels, self.__digraphs)
# STEP 1: Remove instrumental case
if r1.endswith(self.__step1_suffixes):
for double_cons in self.__double_consonants:
if word[-2-len(double_cons):-2] == double_cons:
word = "".join((word[:-4], word[-3]))
if r1[-2-len(double_cons):-2] == double_cons:
r1 = "".join((r1[:-4], r1[-3]))
break
# STEP 2: Remove frequent cases
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
if r1.endswith("\xE1"):
word = "".join((word[:-1], "a"))
r1 = suffix_replace(r1, "\xE1", "a")
elif r1.endswith("\xE9"):
word = "".join((word[:-1], "e"))
r1 = suffix_replace(r1, "\xE9", "e")
break
# STEP 3: Remove special cases
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix == "\xE9n":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
break
# STEP 4: Remove other cases
for suffix in self.__step4_suffixes:
if r1.endswith(suffix):
if suffix == "\xE1stul":
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix == "\xE9st\xFCl":
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 5: Remove factive case
for suffix in self.__step5_suffixes:
if r1.endswith(suffix):
for double_cons in self.__double_consonants:
if word[-1-len(double_cons):-1] == double_cons:
word = "".join((word[:-3], word[-2]))
if r1[-1-len(double_cons):-1] == double_cons:
r1 = "".join((r1[:-3], r1[-2]))
break
# STEP 6: Remove owned
for suffix in self.__step6_suffixes:
if r1.endswith(suffix):
if suffix in ("\xE1k\xE9", "\xE1\xE9i"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9k\xE9", "\xE9\xE9i",
"\xE9\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 7: Remove singular owner suffixes
for suffix in self.__step7_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1nk", "\xE1juk", "\xE1m",
"\xE1d", "\xE1"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9nk", "\xE9j\xFCk",
"\xE9m", "\xE9d", "\xE9"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 8: Remove plural owner suffixes
for suffix in self.__step8_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix in ("\xE1im", "\xE1id", "\xE1i",
"\xE1ink", "\xE1itok", "\xE1ik"):
word = suffix_replace(word, suffix, "a")
r1 = suffix_replace(r1, suffix, "a")
elif suffix in ("\xE9im", "\xE9id", "\xE9i",
"\xE9ink", "\xE9itek", "\xE9ik"):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 9: Remove plural suffixes
for suffix in self.__step9_suffixes:
if word.endswith(suffix):
if r1.endswith(suffix):
if suffix == "\xE1k":
word = suffix_replace(word, suffix, "a")
elif suffix == "\xE9k":
word = suffix_replace(word, suffix, "e")
else:
word = word[:-len(suffix)]
break
return word
def __r1_hungarian(self, word, vowels, digraphs):
"""
Return the region R1 that is used by the Hungarian stemmer.
If the word begins with a vowel, R1 is defined as the region
after the first consonant or digraph (= two letters stand for
one phoneme) in the word. If the word begins with a consonant,
it is defined as the region after the first vowel in the word.
If the word does not contain both a vowel and consonant, R1
is the null region at the end of the word.
:param word: The Hungarian word whose region R1 is determined.
:type word: str or unicode
:param vowels: The Hungarian vowels that are used to determine
the region R1.
:type vowels: unicode
:param digraphs: The digraphs that are used to determine the
region R1.
:type digraphs: tuple
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
HungarianStemmer. It is not to be invoked directly!
"""
r1 = ""
if word[0] in vowels:
for digraph in digraphs:
if digraph in word[1:]:
r1 = word[word.index(digraph[-1])+1:]
return r1
for i in range(1, len(word)):
if word[i] not in vowels:
r1 = word[i+1:]
break
else:
for i in range(1, len(word)):
if word[i] in vowels:
r1 = word[i+1:]
break
return r1
class ItalianStemmer(_StandardStemmer):
"""
The Italian Snowball stemmer.
:cvar __vowels: The Italian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:note: A detailed description of the Italian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/italian/stemmer.html
"""
__vowels = "aeiou\xE0\xE8\xEC\xF2\xF9"
__step0_suffixes = ('gliela', 'gliele', 'glieli', 'glielo',
'gliene', 'sene', 'mela', 'mele', 'meli',
'melo', 'mene', 'tela', 'tele', 'teli',
'telo', 'tene', 'cela', 'cele', 'celi',
'celo', 'cene', 'vela', 'vele', 'veli',
'velo', 'vene', 'gli', 'ci', 'la', 'le',
'li', 'lo', 'mi', 'ne', 'si', 'ti', 'vi')
__step1_suffixes = ('atrice', 'atrici', 'azione', 'azioni',
'uzione', 'uzioni', 'usione', 'usioni',
'amento', 'amenti', 'imento', 'imenti',
'amente', 'abile', 'abili', 'ibile', 'ibili',
'mente', 'atore', 'atori', 'logia', 'logie',
'anza', 'anze', 'iche', 'ichi', 'ismo',
'ismi', 'ista', 'iste', 'isti', 'ist\xE0',
'ist\xE8', 'ist\xEC', 'ante', 'anti',
'enza', 'enze', 'ico', 'ici', 'ica', 'ice',
'oso', 'osi', 'osa', 'ose', 'it\xE0',
'ivo', 'ivi', 'iva', 'ive')
__step2_suffixes = ('erebbero', 'irebbero', 'assero', 'assimo',
'eranno', 'erebbe', 'eremmo', 'ereste',
'eresti', 'essero', 'iranno', 'irebbe',
'iremmo', 'ireste', 'iresti', 'iscano',
'iscono', 'issero', 'arono', 'avamo', 'avano',
'avate', 'eremo', 'erete', 'erono', 'evamo',
'evano', 'evate', 'iremo', 'irete', 'irono',
'ivamo', 'ivano', 'ivate', 'ammo', 'ando',
'asse', 'assi', 'emmo', 'enda', 'ende',
'endi', 'endo', 'erai', 'erei', 'Yamo',
'iamo', 'immo', 'irai', 'irei', 'isca',
'isce', 'isci', 'isco', 'ano', 'are', 'ata',
'ate', 'ati', 'ato', 'ava', 'avi', 'avo',
'er\xE0', 'ere', 'er\xF2', 'ete', 'eva',
'evi', 'evo', 'ir\xE0', 'ire', 'ir\xF2',
'ita', 'ite', 'iti', 'ito', 'iva', 'ivi',
'ivo', 'ono', 'uta', 'ute', 'uti', 'uto',
'ar', 'ir')
def stem(self, word):
"""
Stem an Italian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
# All acute accents are replaced by grave accents.
word = (word.replace("\xE1", "\xE0")
.replace("\xE9", "\xE8")
.replace("\xED", "\xEC")
.replace("\xF3", "\xF2")
.replace("\xFA", "\xF9"))
# Every occurrence of 'u' after 'q'
# is put into upper case.
for i in range(1, len(word)):
if word[i-1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word [i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if rv.endswith(suffix):
if rv[-len(suffix)-4:-len(suffix)] in ("ando", "endo"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[-len(suffix)-2:-len(suffix)] in
("ar", "er", "ir")):
word = suffix_replace(word, suffix, "e")
r1 = suffix_replace(r1, suffix, "e")
r2 = suffix_replace(r2, suffix, "e")
rv = suffix_replace(rv, suffix, "e")
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic")):
word = word[:-2]
rv = rv[:-2]
elif r2 .endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif (suffix in ("amento", "amenti",
"imento", "imenti") and
rv.endswith(suffix)):
step1_success = True
word = word[:-6]
rv = rv[:-6]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("azione", "azioni", "atore", "atori"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("logia", "logie"):
word = word[:-2]
rv = word[:-2]
elif suffix in ("uzione", "uzioni",
"usione", "usioni"):
word = word[:-5]
rv = rv[:-5]
elif suffix in ("enza", "enze"):
word = suffix_replace(word, suffix, "te")
rv = suffix_replace(rv, suffix, "te")
elif suffix == "it\xE0":
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("ivo", "ivi", "iva", "ive"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith("at"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3a
if rv.endswith(("a", "e", "i", "o", "\xE0", "\xE8",
"\xEC", "\xF2")):
word = word[:-1]
rv = rv[:-1]
if rv.endswith("i"):
word = word[:-1]
rv = rv[:-1]
# STEP 3b
if rv.endswith(("ch", "gh")):
word = word[:-1]
word = word.replace("I", "i").replace("U", "u")
return word
class NorwegianStemmer(_ScandinavianStemmer):
"""
The Norwegian Snowball stemmer.
:cvar __vowels: The Norwegian vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Norwegian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/norwegian/stemmer.html
"""
__vowels = "aeiouy\xE6\xE5\xF8"
__s_ending = "bcdfghjlmnoprtvyz"
__step1_suffixes = ("hetenes", "hetene", "hetens", "heter",
"heten", "endes", "ande", "ende", "edes",
"enes", "erte", "ede", "ane", "ene", "ens",
"ers", "ets", "het", "ast", "ert", "en",
"ar", "er", "as", "es", "et", "a", "e", "s")
__step2_suffixes = ("dt", "vt")
__step3_suffixes = ("hetslov", "eleg", "elig", "elov", "slov",
"leg", "eig", "lig", "els", "lov", "ig")
def stem(self, word):
"""
Stem a Norwegian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix in ("erte", "ert"):
word = suffix_replace(word, suffix, "er")
r1 = suffix_replace(r1, suffix, "er")
elif suffix == "s":
if (word[-2] in self.__s_ending or
(word[-2] == "k" and word[-3] not in self.__vowels)):
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
word = word[:-len(suffix)]
break
return word
class PortugueseStemmer(_StandardStemmer):
"""
The Portuguese Snowball stemmer.
:cvar __vowels: The Portuguese vowels.
:type __vowels: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step4_suffixes: Suffixes to be deleted in step 4 of the algorithm.
:type __step4_suffixes: tuple
:note: A detailed description of the Portuguese
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/portuguese/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xE2\xEA\xF4"
__step1_suffixes = ('amentos', 'imentos', 'uço~es', 'amento',
'imento', 'adoras', 'adores', 'a\xE7o~es',
'logias', '\xEAncias', 'amente',
'idades', 'an\xE7as', 'ismos', 'istas', 'adora',
'a\xE7a~o', 'antes', '\xE2ncia',
'logia', 'uça~o', '\xEAncia',
'mente', 'idade', 'an\xE7a', 'ezas', 'icos', 'icas',
'ismo', '\xE1vel', '\xEDvel', 'ista',
'osos', 'osas', 'ador', 'ante', 'ivas',
'ivos', 'iras', 'eza', 'ico', 'ica',
'oso', 'osa', 'iva', 'ivo', 'ira')
__step2_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'\xE1ssemos', '\xEAssemos', '\xEDssemos',
'ar\xEDeis', 'er\xEDeis', 'ir\xEDeis',
'\xE1sseis', '\xE9sseis', '\xEDsseis',
'\xE1ramos', '\xE9ramos', '\xEDramos',
'\xE1vamos', 'aremos', 'eremos', 'iremos',
'ariam', 'eriam', 'iriam', 'assem', 'essem',
'issem', 'ara~o', 'era~o', 'ira~o', 'arias',
'erias', 'irias', 'ardes', 'erdes', 'irdes',
'asses', 'esses', 'isses', 'astes', 'estes',
'istes', '\xE1reis', 'areis', '\xE9reis',
'ereis', '\xEDreis', 'ireis', '\xE1veis',
'\xEDamos', 'armos', 'ermos', 'irmos',
'aria', 'eria', 'iria', 'asse', 'esse',
'isse', 'aste', 'este', 'iste', 'arei',
'erei', 'irei', 'aram', 'eram', 'iram',
'avam', 'arem', 'erem', 'irem',
'ando', 'endo', 'indo', 'adas', 'idas',
'ar\xE1s', 'aras', 'er\xE1s', 'eras',
'ir\xE1s', 'avas', 'ares', 'eres', 'ires',
'\xEDeis', 'ados', 'idos', '\xE1mos',
'amos', 'emos', 'imos', 'iras', 'ada', 'ida',
'ar\xE1', 'ara', 'er\xE1', 'era',
'ir\xE1', 'ava', 'iam', 'ado', 'ido',
'ias', 'ais', 'eis', 'ira', 'ia', 'ei', 'am',
'em', 'ar', 'er', 'ir', 'as',
'es', 'is', 'eu', 'iu', 'ou')
__step4_suffixes = ("os", "a", "i", "o", "\xE1",
"\xED", "\xF3")
def stem(self, word):
"""
Stem a Portuguese word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
word = (word.replace("\xE3", "a~")
.replace("\xF5", "o~")
.replace("q\xFC", "qu")
.replace("g\xFC", "gu"))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif (suffix in ("ira", "iras") and rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "e"):
step1_success = True
word = suffix_replace(word, suffix, "ir")
rv = suffix_replace(rv, suffix, "ir")
elif r2.endswith(suffix):
step1_success = True
if suffix in ("logia", "logias"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uça~o", "uço~es"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("\xEAncia", "\xEAncias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith(("ante", "avel", "ivel")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idade", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("iva", "ivo", "ivas", "ivos"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
step2_success = True
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3
if step1_success or step2_success:
if rv.endswith("i") and word[-2] == "c":
word = word[:-1]
rv = rv[:-1]
### STEP 4: Residual suffix
if not step1_success and not step2_success:
for suffix in self.__step4_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 5
if rv.endswith(("e", "\xE9", "\xEA")):
word = word[:-1]
rv = rv[:-1]
if ((word.endswith("gu") and rv.endswith("u")) or
(word.endswith("ci") and rv.endswith("i"))):
word = word[:-1]
elif word.endswith("\xE7"):
word = suffix_replace(word, "\xE7", "c")
word = word.replace("a~", "\xE3").replace("o~", "\xF5")
return word
class RomanianStemmer(_StandardStemmer):
"""
The Romanian Snowball stemmer.
:cvar __vowels: The Romanian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Romanian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/romanian/stemmer.html
"""
__vowels = "aeiou\u0103\xE2\xEE"
__step0_suffixes = ('iilor', 'ului', 'elor', 'iile', 'ilor',
'atei', 'a\u0163ie', 'a\u0163ia', 'aua',
'ele', 'iua', 'iei', 'ile', 'ul', 'ea',
'ii')
__step1_suffixes = ('abilitate', 'abilitati', 'abilit\u0103\u0163i',
'ibilitate', 'abilit\u0103i', 'ivitate',
'ivitati', 'ivit\u0103\u0163i', 'icitate',
'icitati', 'icit\u0103\u0163i', 'icatori',
'ivit\u0103i', 'icit\u0103i', 'icator',
'a\u0163iune', 'atoare', '\u0103toare',
'i\u0163iune', 'itoare', 'iciva', 'icive',
'icivi', 'iciv\u0103', 'icala', 'icale',
'icali', 'ical\u0103', 'ativa', 'ative',
'ativi', 'ativ\u0103', 'atori', '\u0103tori',
'itiva', 'itive', 'itivi', 'itiv\u0103',
'itori', 'iciv', 'ical', 'ativ', 'ator',
'\u0103tor', 'itiv', 'itor')
__step2_suffixes = ('abila', 'abile', 'abili', 'abil\u0103',
'ibila', 'ibile', 'ibili', 'ibil\u0103',
'atori', 'itate', 'itati', 'it\u0103\u0163i',
'abil', 'ibil', 'oasa', 'oas\u0103', 'oase',
'anta', 'ante', 'anti', 'ant\u0103', 'ator',
'it\u0103i', 'iune', 'iuni', 'isme', 'ista',
'iste', 'isti', 'ist\u0103', 'i\u015Fti',
'ata', 'at\u0103', 'ati', 'ate', 'uta',
'ut\u0103', 'uti', 'ute', 'ita', 'it\u0103',
'iti', 'ite', 'ica', 'ice', 'ici', 'ic\u0103',
'osi', 'o\u015Fi', 'ant', 'iva', 'ive', 'ivi',
'iv\u0103', 'ism', 'ist', 'at', 'ut', 'it',
'ic', 'os', 'iv')
__step3_suffixes = ('seser\u0103\u0163i', 'aser\u0103\u0163i',
'iser\u0103\u0163i', '\xE2ser\u0103\u0163i',
'user\u0103\u0163i', 'seser\u0103m',
'aser\u0103m', 'iser\u0103m', '\xE2ser\u0103m',
'user\u0103m', 'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'easc\u0103', 'ar\u0103\u0163i',
'ur\u0103\u0163i', 'ir\u0103\u0163i',
'\xE2r\u0103\u0163i', 'ase\u015Fi',
'aser\u0103', 'ise\u015Fi', 'iser\u0103',
'\xe2se\u015Fi', '\xE2ser\u0103',
'use\u015Fi', 'user\u0103', 'ser\u0103m',
'sesem', 'indu', '\xE2ndu', 'eaz\u0103',
'e\u015Fti', 'e\u015Fte', '\u0103\u015Fti',
'\u0103\u015Fte', 'ea\u0163i', 'ia\u0163i',
'ar\u0103m', 'ur\u0103m', 'ir\u0103m',
'\xE2r\u0103m', 'asem', 'isem',
'\xE2sem', 'usem', 'se\u015Fi', 'ser\u0103',
'sese', 'are', 'ere', 'ire', '\xE2re',
'ind', '\xE2nd', 'eze', 'ezi', 'esc',
'\u0103sc', 'eam', 'eai', 'eau', 'iam',
'iai', 'iau', 'a\u015Fi', 'ar\u0103',
'u\u015Fi', 'ur\u0103', 'i\u015Fi', 'ir\u0103',
'\xE2\u015Fi', '\xe2r\u0103', 'ase',
'ise', '\xE2se', 'use', 'a\u0163i',
'e\u0163i', 'i\u0163i', '\xe2\u0163i', 'sei',
'ez', 'am', 'ai', 'au', 'ea', 'ia', 'ui',
'\xE2i', '\u0103m', 'em', 'im', '\xE2m',
'se')
def stem(self, word):
"""
Stem a Romanian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
step2_success = False
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Removal of plurals and other simplifications
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if suffix in r1:
if suffix in ("ul", "ului"):
word = word[:-len(suffix)]
if suffix in rv:
rv = rv[:-len(suffix)]
else:
rv = ""
elif (suffix == "aua" or suffix == "atei" or
(suffix == "ile" and word[-5:-3] != "ab")):
word = word[:-2]
elif suffix in ("ea", "ele", "elor"):
word = suffix_replace(word, suffix, "e")
if suffix in rv:
rv = suffix_replace(rv, suffix, "e")
else:
rv = ""
elif suffix in ("ii", "iua", "iei",
"iile", "iilor", "ilor"):
word = suffix_replace(word, suffix, "i")
if suffix in rv:
rv = suffix_replace(rv, suffix, "i")
else:
rv = ""
elif suffix in ("a\u0163ie", "a\u0163ia"):
word = word[:-1]
break
# STEP 1: Reduction of combining suffixes
while True:
replacement_done = False
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix in r1:
step1_success = True
replacement_done = True
if suffix in ("abilitate", "abilitati",
"abilit\u0103i",
"abilit\u0103\u0163i"):
word = suffix_replace(word, suffix, "abil")
elif suffix == "ibilitate":
word = word[:-5]
elif suffix in ("ivitate", "ivitati",
"ivit\u0103i",
"ivit\u0103\u0163i"):
word = suffix_replace(word, suffix, "iv")
elif suffix in ("icitate", "icitati", "icit\u0103i",
"icit\u0103\u0163i", "icator",
"icatori", "iciv", "iciva",
"icive", "icivi", "iciv\u0103",
"ical", "icala", "icale", "icali",
"ical\u0103"):
word = suffix_replace(word, suffix, "ic")
elif suffix in ("ativ", "ativa", "ative", "ativi",
"ativ\u0103", "a\u0163iune",
"atoare", "ator", "atori",
"\u0103toare",
"\u0103tor", "\u0103tori"):
word = suffix_replace(word, suffix, "at")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "at")
elif suffix in ("itiv", "itiva", "itive", "itivi",
"itiv\u0103", "i\u0163iune",
"itoare", "itor", "itori"):
word = suffix_replace(word, suffix, "it")
if suffix in r2:
r2 = suffix_replace(r2, suffix, "it")
else:
step1_success = False
break
if not replacement_done:
break
# STEP 2: Removal of standard suffixes
for suffix in self.__step2_suffixes:
if word.endswith(suffix):
if suffix in r2:
step2_success = True
if suffix in ("iune", "iuni"):
if word[-5] == "\u0163":
word = "".join((word[:-5], "t"))
elif suffix in ("ism", "isme", "ist", "ista", "iste",
"isti", "ist\u0103", "i\u015Fti"):
word = suffix_replace(word, suffix, "ist")
else:
word = word[:-len(suffix)]
break
# STEP 3: Removal of verb suffixes
if not step1_success and not step2_success:
for suffix in self.__step3_suffixes:
if word.endswith(suffix):
if suffix in rv:
if suffix in ('seser\u0103\u0163i', 'seser\u0103m',
'ser\u0103\u0163i', 'sese\u015Fi',
'seser\u0103', 'ser\u0103m', 'sesem',
'se\u015Fi', 'ser\u0103', 'sese',
'a\u0163i', 'e\u0163i', 'i\u0163i',
'\xE2\u0163i', 'sei', '\u0103m',
'em', 'im', '\xE2m', 'se'):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
else:
if (not rv.startswith(suffix) and
rv[rv.index(suffix)-1] not in
"aeio\u0103\xE2\xEE"):
word = word[:-len(suffix)]
break
# STEP 4: Removal of final vowel
for suffix in ("ie", "a", "e", "i", "\u0103"):
if word.endswith(suffix):
if suffix in rv:
word = word[:-len(suffix)]
break
word = word.replace("I", "i").replace("U", "u")
return word
class RussianStemmer(_LanguageSpecificStemmer):
"""
The Russian Snowball stemmer.
:cvar __perfective_gerund_suffixes: Suffixes to be deleted.
:type __perfective_gerund_suffixes: tuple
:cvar __adjectival_suffixes: Suffixes to be deleted.
:type __adjectival_suffixes: tuple
:cvar __reflexive_suffixes: Suffixes to be deleted.
:type __reflexive_suffixes: tuple
:cvar __verb_suffixes: Suffixes to be deleted.
:type __verb_suffixes: tuple
:cvar __noun_suffixes: Suffixes to be deleted.
:type __noun_suffixes: tuple
:cvar __superlative_suffixes: Suffixes to be deleted.
:type __superlative_suffixes: tuple
:cvar __derivational_suffixes: Suffixes to be deleted.
:type __derivational_suffixes: tuple
:note: A detailed description of the Russian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/russian/stemmer.html
"""
__perfective_gerund_suffixes = ("ivshis'", "yvshis'", "vshis'",
"ivshi", "yvshi", "vshi", "iv",
"yv", "v")
__adjectival_suffixes = ('ui^ushchi^ui^u', 'ui^ushchi^ai^a',
'ui^ushchimi', 'ui^ushchymi', 'ui^ushchego',
'ui^ushchogo', 'ui^ushchemu', 'ui^ushchomu',
'ui^ushchikh', 'ui^ushchykh',
'ui^ushchui^u', 'ui^ushchaia',
'ui^ushchoi^u', 'ui^ushchei^u',
'i^ushchi^ui^u', 'i^ushchi^ai^a',
'ui^ushchee', 'ui^ushchie',
'ui^ushchye', 'ui^ushchoe', 'ui^ushchei`',
'ui^ushchii`', 'ui^ushchyi`',
'ui^ushchoi`', 'ui^ushchem', 'ui^ushchim',
'ui^ushchym', 'ui^ushchom', 'i^ushchimi',
'i^ushchymi', 'i^ushchego', 'i^ushchogo',
'i^ushchemu', 'i^ushchomu', 'i^ushchikh',
'i^ushchykh', 'i^ushchui^u', 'i^ushchai^a',
'i^ushchoi^u', 'i^ushchei^u', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`',
'i^ushchyi`', 'i^ushchoi`', 'i^ushchem',
'i^ushchim', 'i^ushchym', 'i^ushchom',
'shchi^ui^u', 'shchi^ai^a', 'ivshi^ui^u',
'ivshi^ai^a', 'yvshi^ui^u', 'yvshi^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'shchui^u', 'shchai^a', 'shchoi^u',
'shchei^u', 'ivshimi', 'ivshymi',
'ivshego', 'ivshogo', 'ivshemu', 'ivshomu',
'ivshikh', 'ivshykh', 'ivshui^u',
'ivshai^a', 'ivshoi^u', 'ivshei^u',
'yvshimi', 'yvshymi', 'yvshego', 'yvshogo',
'yvshemu', 'yvshomu', 'yvshikh', 'yvshykh',
'yvshui^u', 'yvshai^a', 'yvshoi^u',
'yvshei^u', 'vshi^ui^u', 'vshi^ai^a',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'ivshee', 'ivshie', 'ivshye', 'ivshoe',
'ivshei`', 'ivshii`', 'ivshyi`',
'ivshoi`', 'ivshem', 'ivshim', 'ivshym',
'ivshom', 'yvshee', 'yvshie', 'yvshye',
'yvshoe', 'yvshei`', 'yvshii`',
'yvshyi`', 'yvshoi`', 'yvshem',
'yvshim', 'yvshym', 'yvshom', 'vshimi',
'vshymi', 'vshego', 'vshogo', 'vshemu',
'vshomu', 'vshikh', 'vshykh', 'vshui^u',
'vshai^a', 'vshoi^u', 'vshei^u',
'emi^ui^u', 'emi^ai^a', 'nni^ui^u',
'nni^ai^a', 'vshee',
'vshie', 'vshye', 'vshoe', 'vshei`',
'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'emee', 'emie', 'emye', 'emoe',
'emei`', 'emii`', 'emyi`',
'emoi`', 'emem', 'emim', 'emym',
'emom', 'nnee', 'nnie', 'nnye', 'nnoe',
'nnei`', 'nnii`', 'nnyi`',
'nnoi`', 'nnem', 'nnim', 'nnym',
'nnom', 'i^ui^u', 'i^ai^a', 'imi', 'ymi',
'ego', 'ogo', 'emu', 'omu', 'ikh',
'ykh', 'ui^u', 'ai^a', 'oi^u', 'ei^u',
'ee', 'ie', 'ye', 'oe', 'ei`',
'ii`', 'yi`', 'oi`', 'em',
'im', 'ym', 'om')
__reflexive_suffixes = ("si^a", "s'")
__verb_suffixes = ("esh'", 'ei`te', 'ui`te', 'ui^ut',
"ish'", 'ete', 'i`te', 'i^ut', 'nno',
'ila', 'yla', 'ena', 'ite', 'ili', 'yli',
'ilo', 'ylo', 'eno', 'i^at', 'uet', 'eny',
"it'", "yt'", 'ui^u', 'la', 'na', 'li',
'em', 'lo', 'no', 'et', 'ny', "t'",
'ei`', 'ui`', 'il', 'yl', 'im',
'ym', 'en', 'it', 'yt', 'i^u', 'i`',
'l', 'n')
__noun_suffixes = ('ii^ami', 'ii^akh', 'i^ami', 'ii^am', 'i^akh',
'ami', 'iei`', 'i^am', 'iem', 'akh',
'ii^u', "'i^u", 'ii^a', "'i^a", 'ev', 'ov',
'ie', "'e", 'ei', 'ii', 'ei`',
'oi`', 'ii`', 'em', 'am', 'om',
'i^u', 'i^a', 'a', 'e', 'i', 'i`',
'o', 'u', 'y', "'")
__superlative_suffixes = ("ei`she", "ei`sh")
__derivational_suffixes = ("ost'", "ost")
def stem(self, word):
"""
Stem a Russian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
if word in self.stopwords:
return word
chr_exceeded = False
for i in range(len(word)):
if ord(word[i]) > 255:
chr_exceeded = True
break
if chr_exceeded:
word = self.__cyrillic_to_roman(word)
step1_success = False
adjectival_removed = False
verb_removed = False
undouble_success = False
superlative_removed = False
rv, r2 = self.__regions_russian(word)
# Step 1
for suffix in self.__perfective_gerund_suffixes:
if rv.endswith(suffix):
if suffix in ("v", "vshi", "vshis'"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
step1_success = True
break
if not step1_success:
for suffix in self.__reflexive_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
for suffix in self.__adjectival_suffixes:
if rv.endswith(suffix):
if suffix in ('i^ushchi^ui^u', 'i^ushchi^ai^a',
'i^ushchui^u', 'i^ushchai^a', 'i^ushchoi^u',
'i^ushchei^u', 'i^ushchimi', 'i^ushchymi',
'i^ushchego', 'i^ushchogo', 'i^ushchemu',
'i^ushchomu', 'i^ushchikh', 'i^ushchykh',
'shchi^ui^u', 'shchi^ai^a', 'i^ushchee',
'i^ushchie', 'i^ushchye', 'i^ushchoe',
'i^ushchei`', 'i^ushchii`', 'i^ushchyi`',
'i^ushchoi`', 'i^ushchem', 'i^ushchim',
'i^ushchym', 'i^ushchom', 'vshi^ui^u',
'vshi^ai^a', 'shchui^u', 'shchai^a',
'shchoi^u', 'shchei^u', 'emi^ui^u',
'emi^ai^a', 'nni^ui^u', 'nni^ai^a',
'shchimi', 'shchymi', 'shchego', 'shchogo',
'shchemu', 'shchomu', 'shchikh', 'shchykh',
'vshui^u', 'vshai^a', 'vshoi^u', 'vshei^u',
'shchee', 'shchie', 'shchye', 'shchoe',
'shchei`', 'shchii`', 'shchyi`', 'shchoi`',
'shchem', 'shchim', 'shchym', 'shchom',
'vshimi', 'vshymi', 'vshego', 'vshogo',
'vshemu', 'vshomu', 'vshikh', 'vshykh',
'emui^u', 'emai^a', 'emoi^u', 'emei^u',
'nnui^u', 'nnai^a', 'nnoi^u', 'nnei^u',
'vshee', 'vshie', 'vshye', 'vshoe',
'vshei`', 'vshii`', 'vshyi`', 'vshoi`',
'vshem', 'vshim', 'vshym', 'vshom',
'emimi', 'emymi', 'emego', 'emogo',
'ememu', 'emomu', 'emikh', 'emykh',
'nnimi', 'nnymi', 'nnego', 'nnogo',
'nnemu', 'nnomu', 'nnikh', 'nnykh',
'emee', 'emie', 'emye', 'emoe', 'emei`',
'emii`', 'emyi`', 'emoi`', 'emem', 'emim',
'emym', 'emom', 'nnee', 'nnie', 'nnye',
'nnoe', 'nnei`', 'nnii`', 'nnyi`', 'nnoi`',
'nnem', 'nnim', 'nnym', 'nnom'):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
adjectival_removed = True
break
if not adjectival_removed:
for suffix in self.__verb_suffixes:
if rv.endswith(suffix):
if suffix in ("la", "na", "ete", "i`te", "li",
"i`", "l", "em", "n", "lo", "no",
"et", "i^ut", "ny", "t'", "esh'",
"nno"):
if (rv[-len(suffix)-3:-len(suffix)] == "i^a" or
rv[-len(suffix)-1:-len(suffix)] == "a"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
else:
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
verb_removed = True
break
if not adjectival_removed and not verb_removed:
for suffix in self.__noun_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# Step 2
if rv.endswith("i"):
word = word[:-1]
r2 = r2[:-1]
# Step 3
for suffix in self.__derivational_suffixes:
if r2.endswith(suffix):
word = word[:-len(suffix)]
break
# Step 4
if word.endswith("nn"):
word = word[:-1]
undouble_success = True
if not undouble_success:
for suffix in self.__superlative_suffixes:
if word.endswith(suffix):
word = word[:-len(suffix)]
superlative_removed = True
break
if word.endswith("nn"):
word = word[:-1]
if not undouble_success and not superlative_removed:
if word.endswith("'"):
word = word[:-1]
if chr_exceeded:
word = self.__roman_to_cyrillic(word)
return word
def __regions_russian(self, word):
"""
Return the regions RV and R2 which are used by the Russian stemmer.
In any word, RV is the region after the first vowel,
or the end of the word if it contains no vowel.
R2 is the region after the first non-vowel following
a vowel in R1, or the end of the word if there is no such non-vowel.
R1 is the region after the first non-vowel following a vowel,
or the end of the word if there is no such non-vowel.
:param word: The Russian word whose regions RV and R2 are determined.
:type word: str or unicode
:return: the regions RV and R2 for the respective Russian word.
:rtype: tuple
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
r1 = ""
r2 = ""
rv = ""
vowels = ("A", "U", "E", "a", "e", "i", "o", "u", "y")
word = (word.replace("i^a", "A")
.replace("i^u", "U")
.replace("e`", "E"))
for i in range(1, len(word)):
if word[i] not in vowels and word[i-1] in vowels:
r1 = word[i+1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i-1] in vowels:
r2 = r1[i+1:]
break
for i in range(len(word)):
if word[i] in vowels:
rv = word[i+1:]
break
r2 = (r2.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
rv = (rv.replace("A", "i^a")
.replace("U", "i^u")
.replace("E", "e`"))
return (rv, r2)
def __cyrillic_to_roman(self, word):
"""
Transliterate a Russian word into the Roman alphabet.
A Russian word whose letters consist of the Cyrillic
alphabet are transliterated into the Roman alphabet
in order to ease the forthcoming stemming process.
:param word: The word that is transliterated.
:type word: unicode
:return: the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("\u0410", "a").replace("\u0430", "a")
.replace("\u0411", "b").replace("\u0431", "b")
.replace("\u0412", "v").replace("\u0432", "v")
.replace("\u0413", "g").replace("\u0433", "g")
.replace("\u0414", "d").replace("\u0434", "d")
.replace("\u0415", "e").replace("\u0435", "e")
.replace("\u0401", "e").replace("\u0451", "e")
.replace("\u0416", "zh").replace("\u0436", "zh")
.replace("\u0417", "z").replace("\u0437", "z")
.replace("\u0418", "i").replace("\u0438", "i")
.replace("\u0419", "i`").replace("\u0439", "i`")
.replace("\u041A", "k").replace("\u043A", "k")
.replace("\u041B", "l").replace("\u043B", "l")
.replace("\u041C", "m").replace("\u043C", "m")
.replace("\u041D", "n").replace("\u043D", "n")
.replace("\u041E", "o").replace("\u043E", "o")
.replace("\u041F", "p").replace("\u043F", "p")
.replace("\u0420", "r").replace("\u0440", "r")
.replace("\u0421", "s").replace("\u0441", "s")
.replace("\u0422", "t").replace("\u0442", "t")
.replace("\u0423", "u").replace("\u0443", "u")
.replace("\u0424", "f").replace("\u0444", "f")
.replace("\u0425", "kh").replace("\u0445", "kh")
.replace("\u0426", "t^s").replace("\u0446", "t^s")
.replace("\u0427", "ch").replace("\u0447", "ch")
.replace("\u0428", "sh").replace("\u0448", "sh")
.replace("\u0429", "shch").replace("\u0449", "shch")
.replace("\u042A", "''").replace("\u044A", "''")
.replace("\u042B", "y").replace("\u044B", "y")
.replace("\u042C", "'").replace("\u044C", "'")
.replace("\u042D", "e`").replace("\u044D", "e`")
.replace("\u042E", "i^u").replace("\u044E", "i^u")
.replace("\u042F", "i^a").replace("\u044F", "i^a"))
return word
def __roman_to_cyrillic(self, word):
"""
Transliterate a Russian word back into the Cyrillic alphabet.
A Russian word formerly transliterated into the Roman alphabet
in order to ease the stemming process, is transliterated back
into the Cyrillic alphabet, its original form.
:param word: The word that is transliterated.
:type word: str or unicode
:return: word, the transliterated word.
:rtype: unicode
:note: This helper method is invoked by the stem method of the subclass
RussianStemmer. It is not to be invoked directly!
"""
word = (word.replace("i^u", "\u044E").replace("i^a", "\u044F")
.replace("shch", "\u0449").replace("kh", "\u0445")
.replace("t^s", "\u0446").replace("ch", "\u0447")
.replace("e`", "\u044D").replace("i`", "\u0439")
.replace("sh", "\u0448").replace("k", "\u043A")
.replace("e", "\u0435").replace("zh", "\u0436")
.replace("a", "\u0430").replace("b", "\u0431")
.replace("v", "\u0432").replace("g", "\u0433")
.replace("d", "\u0434").replace("e", "\u0435")
.replace("z", "\u0437").replace("i", "\u0438")
.replace("l", "\u043B").replace("m", "\u043C")
.replace("n", "\u043D").replace("o", "\u043E")
.replace("p", "\u043F").replace("r", "\u0440")
.replace("s", "\u0441").replace("t", "\u0442")
.replace("u", "\u0443").replace("f", "\u0444")
.replace("''", "\u044A").replace("y", "\u044B")
.replace("'", "\u044C"))
return word
class SpanishStemmer(_StandardStemmer):
"""
The Spanish Snowball stemmer.
:cvar __vowels: The Spanish vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Spanish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/spanish/stemmer.html
"""
__vowels = "aeiou\xE1\xE9\xED\xF3\xFA\xFC"
__step0_suffixes = ("selas", "selos", "sela", "selo", "las",
"les", "los", "nos", "me", "se", "la", "le",
"lo")
__step1_suffixes = ('amientos', 'imientos', 'amiento', 'imiento',
'aciones', 'uciones', 'adoras', 'adores',
'ancias', 'log\xEDas', 'encias', 'amente',
'idades', 'anzas', 'ismos', 'ables', 'ibles',
'istas', 'adora', 'aci\xF3n', 'antes',
'ancia', 'log\xEDa', 'uci\xf3n', 'encia',
'mente', 'anza', 'icos', 'icas', 'ismo',
'able', 'ible', 'ista', 'osos', 'osas',
'ador', 'ante', 'idad', 'ivas', 'ivos',
'ico',
'ica', 'oso', 'osa', 'iva', 'ivo')
__step2a_suffixes = ('yeron', 'yendo', 'yamos', 'yais', 'yan',
'yen', 'yas', 'yes', 'ya', 'ye', 'yo',
'y\xF3')
__step2b_suffixes = ('ar\xEDamos', 'er\xEDamos', 'ir\xEDamos',
'i\xE9ramos', 'i\xE9semos', 'ar\xEDais',
'aremos', 'er\xEDais', 'eremos',
'ir\xEDais', 'iremos', 'ierais', 'ieseis',
'asteis', 'isteis', '\xE1bamos',
'\xE1ramos', '\xE1semos', 'ar\xEDan',
'ar\xEDas', 'ar\xE9is', 'er\xEDan',
'er\xEDas', 'er\xE9is', 'ir\xEDan',
'ir\xEDas', 'ir\xE9is',
'ieran', 'iesen', 'ieron', 'iendo', 'ieras',
'ieses', 'abais', 'arais', 'aseis',
'\xE9amos', 'ar\xE1n', 'ar\xE1s',
'ar\xEDa', 'er\xE1n', 'er\xE1s',
'er\xEDa', 'ir\xE1n', 'ir\xE1s',
'ir\xEDa', 'iera', 'iese', 'aste', 'iste',
'aban', 'aran', 'asen', 'aron', 'ando',
'abas', 'adas', 'idas', 'aras', 'ases',
'\xEDais', 'ados', 'idos', 'amos', 'imos',
'emos', 'ar\xE1', 'ar\xE9', 'er\xE1',
'er\xE9', 'ir\xE1', 'ir\xE9', 'aba',
'ada', 'ida', 'ara', 'ase', '\xEDan',
'ado', 'ido', '\xEDas', '\xE1is',
'\xE9is', '\xEDa', 'ad', 'ed', 'id',
'an', 'i\xF3', 'ar', 'er', 'ir', 'as',
'\xEDs', 'en', 'es')
__step3_suffixes = ("os", "a", "e", "o", "\xE1",
"\xE9", "\xED", "\xF3")
def stem(self, word):
"""
Stem a Spanish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if not (word.endswith(suffix) and rv.endswith(suffix)):
continue
if ((rv[:-len(suffix)].endswith(("ando", "\xE1ndo",
"ar", "\xE1r",
"er", "\xE9r",
"iendo", "i\xE9ndo",
"ir", "\xEDr"))) or
(rv[:-len(suffix)].endswith("yendo") and
word[:-len(suffix)].endswith("uyendo"))):
word = self.__replace_accented(word[:-len(suffix)])
r1 = self.__replace_accented(r1[:-len(suffix)])
r2 = self.__replace_accented(r2[:-len(suffix)])
rv = self.__replace_accented(rv[:-len(suffix)])
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if not word.endswith(suffix):
continue
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("adora", "ador", "aci\xF3n", "adoras",
"adores", "aciones", "ante", "antes",
"ancia", "ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("log\xEDa", "log\xEDas"):
word = suffix_replace(word, suffix, "log")
rv = suffix_replace(rv, suffix, "log")
elif suffix in ("uci\xF3n", "uciones"):
word = suffix_replace(word, suffix, "u")
rv = suffix_replace(rv, suffix, "u")
elif suffix in ("encia", "encias"):
word = suffix_replace(word, suffix, "ente")
rv = suffix_replace(rv, suffix, "ente")
elif suffix == "mente":
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith(("ante", "able", "ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idad", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in ("abil", "ic", "iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in ("ivo", "iva", "ivos", "ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix)-1:-len(suffix)] == "u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if suffix in ("en", "es", "\xE9is", "emos"):
if word.endswith("gu"):
word = word[:-1]
if rv.endswith("gu"):
rv = rv[:-1]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
if suffix in ("e", "\xE9"):
rv = rv[:-len(suffix)]
if word[-2:] == "gu" and rv.endswith("u"):
word = word[:-1]
break
word = self.__replace_accented(word)
return word
def __replace_accented(self, word):
"""
Replaces all accented letters on a word with their non-accented
counterparts.
:param word: A spanish word, with or without accents
:type word: str or unicode
:return: a word with the accented letters (á, é, í, ó, ú) replaced with
their non-accented counterparts (a, e, i, o, u)
:rtype: str or unicode
"""
return (word.replace("\xE1", "a")
.replace("\xE9", "e")
.replace("\xED", "i")
.replace("\xF3", "o")
.replace("\xFA", "u"))
class SwedishStemmer(_ScandinavianStemmer):
"""
The Swedish Snowball stemmer.
:cvar __vowels: The Swedish vowels.
:type __vowels: unicode
:cvar __s_ending: Letters that may directly appear before a word final 's'.
:type __s_ending: unicode
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Swedish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/swedish/stemmer.html
"""
__vowels = "aeiouy\xE4\xE5\xF6"
__s_ending = "bcdfghjklmnoprtvy"
__step1_suffixes = ("heterna", "hetens", "heter", "heten",
"anden", "arnas", "ernas", "ornas", "andes",
"andet", "arens", "arna", "erna", "orna",
"ande", "arne", "aste", "aren", "ades",
"erns", "ade", "are", "ern", "ens", "het",
"ast", "ad", "en", "ar", "er", "or", "as",
"es", "at", "a", "e", "s")
__step2_suffixes = ("dd", "gd", "nn", "dt", "gt", "kt", "tt")
__step3_suffixes = ("fullt", "l\xF6st", "els", "lig", "ig")
def stem(self, word):
"""
Stem a Swedish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
if word in self.stopwords:
return word
r1 = self._r1_scandinavian(word, self.__vowels)
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
word = word[:-1]
r1 = r1[:-1]
break
# STEP 3
for suffix in self.__step3_suffixes:
if r1.endswith(suffix):
if suffix in ("els", "lig", "ig"):
word = word[:-len(suffix)]
elif suffix in ("fullt", "l\xF6st"):
word = word[:-1]
break
return word
def demo():
"""
This function provides a demonstration of the Snowball stemmers.
After invoking this function and specifying a language,
it stems an excerpt of the Universal Declaration of Human Rights
(which is a part of the NLTK corpus collection) and then prints
out the original and the stemmed text.
"""
import re
from nltk.corpus import udhr
udhr_corpus = {"danish": "Danish_Dansk-Latin1",
"dutch": "Dutch_Nederlands-Latin1",
"english": "English-Latin1",
"finnish": "Finnish_Suomi-Latin1",
"french": "French_Francais-Latin1",
"german": "German_Deutsch-Latin1",
"hungarian": "Hungarian_Magyar-UTF8",
"italian": "Italian_Italiano-Latin1",
"norwegian": "Norwegian-Latin1",
"porter": "English-Latin1",
"portuguese": "Portuguese_Portugues-Latin1",
"romanian": "Romanian_Romana-Latin2",
"russian": "Russian-UTF8",
"spanish": "Spanish-Latin1",
"swedish": "Swedish_Svenska-Latin1",
}
print("\n")
print("******************************")
print("Demo for the Snowball stemmers")
print("******************************")
while True:
language = compat.raw_input("Please enter the name of the language " +
"to be demonstrated\n" +
"/".join(SnowballStemmer.languages) +
"\n" +
"(enter 'exit' in order to leave): ")
if language == "exit":
break
if language not in SnowballStemmer.languages:
print(("\nOops, there is no stemmer for this language. " +
"Please try again.\n"))
continue
stemmer = SnowballStemmer(language)
excerpt = udhr.words(udhr_corpus[language]) [:300]
stemmed = " ".join(stemmer.stem(word) for word in excerpt)
stemmed = re.sub(r"(.{,70})\s", r'\1\n', stemmed+' ').rstrip()
excerpt = " ".join(excerpt)
excerpt = re.sub(r"(.{,70})\s", r'\1\n', excerpt+' ').rstrip()
print("\n")
print('-' * 70)
print('ORIGINAL'.center(70))
print(excerpt)
print("\n\n")
print('STEMMED RESULTS'.center(70))
print(stemmed)
print('-' * 70)
print("\n")
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| Reagankm/KnockKnock | venv/lib/python3.4/site-packages/nltk/stem/snowball.py | Python | gpl-2.0 | 145,962 | [
"ASE"
] | 28d631f015750260173b4129f6b8801860c57054511a56a395219721f70ab027 |
# Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from setuptools import setup
from setuptools import distutils
import os
import sys
def get_version_from_pkg_info():
metadata = distutils.dist.DistributionMetadata("PKG-INFO")
return metadata.version
def get_version_from_pyver():
try:
import pyver
except ImportError:
if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
raise ImportError('You must install pyver to create a package')
else:
return 'noversion'
version, version_info = pyver.get_version(pkg="datalake_ingester",
public=True)
return version
def get_version():
if os.path.exists("PKG-INFO"):
return get_version_from_pkg_info()
else:
return get_version_from_pyver()
setup(name='datalake_ingester',
url='https://github.com/planetlabs/datalake-ingester',
version=get_version(),
description='datalake_ingester ingests datalake metadata records',
author='Brian Cavagnolo',
author_email='brian@planet.com',
packages=['datalake_ingester'],
install_requires=[
'pyver>=1.0.18',
'boto>=2.38.0',
'configargparse>=0.9.3',
'memoized_property>=1.0.2',
'simplejson>=3.3.1',
'datalake-common>=0.26',
'raven>=5.6.0',
'click>=5.1',
],
extras_require={
'test': [
'pytest==2.7.2',
'pip==7.1.0',
'wheel==0.24.0',
'moto==0.4.22',
'flake8==2.5.0',
'freezegun==0.3.9',
]
},
entry_points="""
[console_scripts]
datalake_tool=datalake_ingester.cli:cli
""")
| planetlabs/datalake-ingester | setup.py | Python | apache-2.0 | 2,293 | [
"Brian"
] | 180715c1bcd36ea70eeae2054a1ba279e4819edc7a3c1c90980de79950889aa1 |
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# for python3 compatibility
from __future__ import print_function
# pdb tool
oneline = "Read, write PDB files in combo with LAMMPS snapshots"
docstr = """
p = pdbfile("3CRO") create pdb object from PDB file or WWW
p = pdbfile("pep1 pep2") read in multiple PDB files
p = pdbfile("pep*") can use wildcards
p = pdbfile(d) read in snapshot data with no PDB file
p = pdbfile("3CRO",d) read in single PDB file with snapshot data
string arg contains one or more PDB files
don't need .pdb suffix except wildcard must expand to file.pdb
if only one 4-char file specified and it is not found,
it will be downloaded from http://www.rcsb.org as 3CRO.pdb
d arg is object with atom coordinates (dump, data)
p.one() write all output as one big PDB file to tmp.pdb
p.one("mine") write to mine.pdb
p.many() write one PDB file per snapshot: tmp0000.pdb, ...
p.many("mine") write as mine0000.pdb, mine0001.pdb, ...
p.single(N) write timestamp N as tmp.pdb
p.single(N,"new") write as new.pdb
how new PDB files are created depends on constructor inputs:
if no d: one new PDB file for each file in string arg (just a copy)
if only d specified: one new PDB file per snapshot in generic format
if one file in str arg and d: one new PDB file per snapshot
using input PDB file as template
multiple input PDB files with a d is not allowed
index,time,flag = p.iterator(0)
index,time,flag = p.iterator(1)
iterator = loop over number of PDB files
call first time with arg = 0, thereafter with arg = 1
N = length = # of snapshots or # of input PDB files
index = index of snapshot or input PDB file (0 to N-1)
time = timestep value (time stamp for snapshot, index for multiple PDB)
flag = -1 when iteration is done, 1 otherwise
typically call p.single(time) in iterated loop to write out one PDB file
"""
# History
# 8/05, Steve Plimpton (SNL): original version
# ToDo list
# for generic PDB file (no template) from a LJ unit system,
# the atoms in PDB file are too close together
# Variables
# files = list of input PDB files
# data = data object (ccell,data,dump) to read snapshots from
# atomlines = dict of ATOM lines in original PDB file
# key = atom id, value = tuple of (beginning,end) of line
# Imports and external programs
import sys, types, glob, urllib
# Class definition
class pdbfile:
# --------------------------------------------------------------------
def __init__(self,*args):
if len(args) == 1:
if type(args[0]) is bytes:
filestr = args[0]
self.data = None
else:
filestr = None
self.data = args[0]
elif len(args) == 2:
filestr = args[0]
self.data = args[1]
else: raise Exception("invalid args for pdb()")
# flist = full list of all PDB input file names
# append .pdb if needed
if filestr:
list = filestr.split()
flist = []
for file in list:
if '*' in file: flist += glob.glob(file)
else: flist.append(file)
for i in xrange(len(flist)):
if flist[i][-4:] != ".pdb": flist[i] += ".pdb"
if len(flist) == 0:
raise Exception("no PDB file specified")
self.files = flist
else: self.files = []
if len(self.files) > 1 and self.data:
raise Exception("cannot use multiple PDB files with data object")
if len(self.files) == 0 and not self.data:
raise Exception("no input PDB file(s)")
# grab PDB file from http://rcsb.org if not a local file
if len(self.files) == 1 and len(self.files[0]) == 8:
try:
open(self.files[0],'r').close()
except:
print("downloading %s from http://rcsb.org" % self.files[0])
fetchstr = "http://www.rcsb.org/pdb/cgi/export.cgi/%s?format=PDB&pdbId=2cpk&compression=None" % self.files[0]
urllib.urlretrieve(fetchstr,self.files[0])
if self.data and len(self.files): self.read_template(self.files[0])
# --------------------------------------------------------------------
# write a single large PDB file for concatenating all input data or files
# if data exists:
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# concatenate all input files to one output file
def one(self,*args):
if len(args) == 0: file = "tmp.pdb"
elif args[0][-4:] == ".pdb": file = args[0]
else: file = args[0] + ".pdb"
f = open(file,'w')
# use template PDB file with each snapshot
if self.data:
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
self.convert(f,which)
print("END",file=f)
print(time,end='')
sys.stdout.flush()
n += 1
else:
for file in self.files:
f.write(open(file,'r').read())
print("END",file=f)
print(file,end='')
sys.stdout.flush()
f.close()
print("\nwrote %d datasets to %s in PDB format" % (n,file))
# --------------------------------------------------------------------
# write series of numbered PDB files
# if data exists:
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# just copy all input files to output files
def many(self,*args):
if len(args) == 0: root = "tmp"
else: root = args[0]
if self.data:
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".pdb"
f = open(file,'w')
self.convert(f,which)
f.close()
print(time,end='')
sys.stdout.flush()
n += 1
else:
n = 0
for infile in self.files:
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".pdb"
f = open(file,'w')
f.write(open(infile,'r').read())
f.close()
print(file,end='')
sys.stdout.flush()
n += 1
print("\nwrote %d datasets to %s*.pdb in PDB format" % (n,root))
# --------------------------------------------------------------------
# write a single PDB file
# if data exists:
# time is timestamp in snapshot
# only selected atoms returned by extract
# atoms written in order they appear in snapshot
# atom only written if its tag is in PDB template file
# if no data:
# time is index into list of input PDB files
# just copy one input file to output file
def single(self,time,*args):
if len(args) == 0: file = "tmp.pdb"
elif args[0][-4:] == ".pdb": file = args[0]
else: file = args[0] + ".pdb"
f = open(file,'w')
if self.data:
which = self.data.findtime(time)
self.convert(f,which)
else:
f.write(open(self.files[time],'r').read())
f.close()
# --------------------------------------------------------------------
# iterate over list of input files or selected snapshots
# latter is done via data objects iterator
def iterator(self,flag):
if not self.data:
if not flag: self.iterate = 0
else:
self.iterate += 1
if self.iterate > len(self.files): return 0,0,-1
return self.iterate,self.iterate,1
return self.data.iterator(flag)
# --------------------------------------------------------------------
# read a PDB file and store ATOM lines
def read_template(self,file):
lines = open(file,'r').readlines()
self.atomlines = {}
for line in lines:
if line.find("ATOM") == 0:
tag = int(line[4:11])
begin = line[:30]
end = line[54:]
self.atomlines[tag] = (begin,end)
# --------------------------------------------------------------------
# convert one set of atoms to PDB format and write to f
def convert(self,f,which):
time,box,atoms,bonds,tris,lines = self.data.viz(which)
if len(self.files):
for atom in atoms:
id = atom[0]
if id in self.atomlines:
(begin,end) = self.atomlines[id]
line = "%s%8.3f%8.3f%8.3f%s" % (begin,atom[2],atom[3],atom[4],end)
print(line,file=f,end='')
else:
for atom in atoms:
begin = "ATOM %6d %2d R00 1 " % (atom[0],atom[1])
middle = "%8.3f%8.3f%8.3f" % (atom[2],atom[3],atom[4])
end = " 1.00 0.00 NONE"
print(begin+middle+end,file=f)
| arielzn/lammps | python/examples/pizza/pdbfile.py | Python | gpl-2.0 | 9,430 | [
"LAMMPS"
] | 5939cab7e8ea7197a91dd5d895e7dc38a5f0d42513d7f8d3a4814827ceeb5ad6 |
__author__ = 'cmueller'
#Todo: Get rid of "GaussianBeams" import call which is a predecessor to pylase and no longer exists
import scipy.fftpack as spfft
import scipy.misc
import scipy.special
import numpy as np
import GaussianBeams.gaussBeamFunctions as gb
import warnings
def propagate(e0, d, scale=1, wvlnt=266e-9):
""" Propagates the field defined in e0 by a distance d
This method is used to numerically propagate an electric field defined by :code:`e0`, with
coordinate matrices defined by :code:`x` and :code:`y` by a distance :code:`d`.
:param e0: A 2d complex array defining the electic field
:param d: The distance to propagate the field
:param scale: The physical size of each element of e0
:param wvlnt: The wavelength of the radiation represented by e0
:type e0: 2d ndarray
:type d: float
:type scale: float
:type wvlnt: float
:return: The electric field at the new location
:rtype: 2d ndarray
"""
# Size and wavenumber
Ny, Nx = e0.shape
k0 = 2*np.pi/wvlnt
# Create phase propagator
kvec_y = 2*np.pi * spfft.fftfreq(Ny, scale)
kvec_x = 2*np.pi * spfft.fftfreq(Nx, scale)
phs = np.outer(np.exp(1j/(2*k0) * d * kvec_y**2), np.exp(1j/(2*k0) * d * kvec_x**2))
# Propagate and return
return spfft.ifft2(spfft.fft2(e0) * phs)
def field_gaussian(q, size, wvlnt, nPoints=(1024, 1024), mode=(0, 0)):
""" Returns a 2d array representing the Gaussian field described by the given q parameter.
:param q: The q parameter which defines the Gaussian beam
:param size: The physical size of the grid (in the same units as q)
:param wvlnt: The wavelength of the radiation
:param nPoints: The number of points of the grid
:param mode: Describes the mode in the Hermite-Gauss basis
:type q: complex
:type size: 2 element tuple or int
:type wvlnt: float
:type nPoints: 2 element tuple of int
:type mode: 2 element tuple
:return: 2d array representing the electric field of the
Gaussain beam as well as two arrays with the x and y coordinates
:rtype: (2d complex ndarray, 2d real ndarray, 2d real ndarray)
"""
# Parse inputs
if hasattr(size, '__len__'):
if len(size) == 2:
dx, dy = size
else:
raise TypeError('size should be a two element tuple or an int')
elif type(size) is float or type(size) is int:
dx, dy = size, size
else:
raise TypeError('size should be a two element tuple or an int')
if hasattr(nPoints, '__len__'):
if len(nPoints) == 2:
sx, sy = nPoints
else:
raise TypeError('nPoints should be a two element tuple or an int')
elif type(nPoints) is int:
sx, sy = nPoints, nPoints
else:
raise TypeError('nPoints should be a two element tuple or an int')
n, m = mode
# Create the physical grid
x_list = np.linspace(-dx/2, dx/2, sx)
y_list = np.linspace(-dy/2, dy/2, sy)
x, y = np.meshgrid(x_list, y_list)
# Calculate some parameters
q0 = 1j*np.imag(q)
w0 = gb.qOmega0(q, wvlnt)
w = gb.qOmega(q, wvlnt)
k = 2*np.pi/wvlnt
x_herm = np.polynomial.hermite.hermval(2**(1/2)*x_list/w, [0 for x in range(n-1)] + [1])
y_herm = np.polynomial.hermite.hermval(2**(1/2)*y_list/w, [0 for x in range(m-1)] + [1])
n_fac = scipy.misc.factorial(n)
m_fac = scipy.misc.factorial(m)
# Create the field
x_field = (2/np.pi)**(1/4) * (1/(2**n * n_fac * w0))**(1/2) * \
(q0/np.conjugate(q0)*np.conjugate(q)/q)**(n/2) * x_herm * \
np.exp(-1j*k/2 * x_list**2/q)
y_field = (2/np.pi)**(1/4) * (1/(2**m * m_fac * w0))**(1/2) * \
(q0/np.conjugate(q0) * np.conjugate(q)/q)**(m/2) * y_herm * \
np.exp(-1j*k/2 * y_list**2/q)
e0 = np.outer(y_field, x_field)
return e0, x, y
def phase_mirror(radius, x, y, wvlnt):
""" Returns the phase transformation picked up at a mirror.
If the beam incident on the mirror is given by e0, and the phase returned from this function
is :code:`phase = phase_mirror(radius, size)`, then the beam reflected from the mirror is
:code:`ef = e0 * phs`.
:param radius: The radius of curvature of the mirror
:param x: The x coordinate matrix which describes the x coordinates of the beam
:param y: The y-coordinate matrix which describes the y coordinates of the beam.
:type radius: float
:type x: 2d ndarray
:type y: 2d ndarray
:return: The phase picked up by the beam in reflection from the mirror.
:rtype: 2d ndarray (same size as x and y)
"""
# Check things
x, y = np.array(x), np.array(y)
if not ((type(radius) is int) or (type(radius) is float)):
raise TypeError('radius should be a scalar')
if (len(x.shape) is not 2) or (len(y.shape) is not 2):
raise TypeError('x and y should be 2d arrays')
if not x.shape == y.shape:
raise TypeError('x and y should be the same size')
# Calculate r
r = (x**2 + y**2)**(1/2)
if np.any(r > abs(radius)):
warnings.warn('Maximum grid coordinate is larger than the mirror radius of curvature, ' +
'these will be set to zero.')
r[r > abs(radius) ] = 0
# Calculate height and phase
height = 2 * (radius - (radius**2 - r**2)**(1/2))
phs = np.exp(1j * 2*np.pi/wvlnt * height)
return phs
| chrisark7/pylase | pylase/fft_propagation.py | Python | gpl-3.0 | 5,424 | [
"Gaussian"
] | 3218d093f693653c4f22c1ae1e5b0c60d1cf234c96e9569266de61094a51d95f |
# -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-02 18:39:05
# @Last Modified by: YangZhou
# @Last Modified time: 2017-06-20 14:57:30
from __future__ import print_function
import os
import aces.config as config
from .inequality import inequality
import aces.tools as tl
from ase.io import read
from aces.env import SRCHOME
def getRatio(path):
if(not os.path.exists(path)):
return 0.0
fp = open(path, "r")
fp.next()
natom = int(fp.next().split()[0])
ntype = int(fp.next().split()[0])
if ntype == 1:
return 0.0
n = 0
label = ""
while(label != "Atoms" and n < 20):
label = fp.next().strip()
n += 1
fp.next()
a = [0.0] * ntype
for line in fp:
type = int(line.split()[1])
a[type - 1] += 1
return float(a[1]) / natom
def getQueryInfo(workPath, pid, runTime):
pid = filter(str.isdigit, str(pid))
lastline = tl.shell_exec("tail -3 %s/log.out" % workPath)
qstat = tl.shell_exec("qstat %s 2>&1|tail -1 " % pid)
step = lastline.split()[0]
if step.isdigit():
percent = "%.1f%%" % (float(step) / runTime * 100)
else:
percent = "0"
if(qstat.find("Unknown Job Id") >= 0): # 该任务已从任务队列中去除*/
time = "complete"
if(lastline.find("builds") >= 0):
status = "C"
percent = "100%"
else: # 异常退出*/
status = "E"
else: # 正在运行或等待R&Q&C*/
time, status, queue = qstat.split()[3:6]
info = tl.shell_exec("qstat -f %s 2>&1|grep nodes" % pid)
info = info.split()[2]
return (percent, status)
def kappa():
kappaline = tl.shell_exec("tail -1 result.txt 2>err;")
kappa = kappaline.split('=')
if len(kappa) > 1:
kappa = kappa[1]
return kappa
return 0.0
def tEnerty():
"""total energy
[description]
Returns:
[type] -- [description]
"""
totalEline = tl.shell_exec("cd minimize;tail -22 log.out| head -1;")
a = totalEline.split()
if(len(a) > 1):
totalE = [1]
return float(totalE)
return 0.0
def nAtom():
# atom number */
Natomline = tl.shell_exec("cd minimize;grep atoms log.out ;")
Natom = Natomline.split()[0]
if(Natom.isdigit() and Natom > 0):
return Natom
return 0
def tDisorder():
# disorder degree*/
now = tl.pwd()
tl.cd('minimize')
tl.mkcd('disorder')
disorderLine = tl.shell_exec(
"cp %s" %
SRCHOME +
"/in.disorder .;" +
config.lammps +
" <in.disorder 2>err 1>log;tail -1 disorder.txt 2>err;")
k = disorderLine.split()[1:3]
if len(k) == 1:
k.append("")
disorder, rd = k
tl.cd(now)
return (disorder, rd)
def drawStructure():
atoms = read('minimize/range', format='lammps')
if len(atoms) < 200:
atoms.write('minimize.png')
def ineq(m):
now = tl.pwd()
species = m.species
if not (species in ["CN-small"]):
return 0.0
tl.cd('minimize')
tl.mkdir('nonequ')
tl.cd('nonequ')
ie = inequality()
nonequ5 = ie.run()
tl.cd(now)
return nonequ5
| vanceeasleaf/aces | aces/runners/mdTc/query.py | Python | gpl-2.0 | 3,194 | [
"ASE",
"LAMMPS"
] | 8b6552da3f7c974344448b2bb73f0064f55d0df0991e3b97d9e643b53bd04b9a |
"""Hamiltonian systems encapsulating energy functions and their derivatives."""
from abc import ABC, abstractmethod
import numpy as np
from mici.states import cache_in_state, cache_in_state_with_aux
import mici.matrices as matrices
from mici.autodiff import autodiff_fallback
class System(ABC):
r"""Base class for Hamiltonian systems.
The Hamiltonian function \(h\) is assumed to have the general form
\[ h(q, p) = h_1(q) + h_2(q, p) \]
where \(q\) and \(p\) are the position and momentum variables respectively,
and \(h_1\) and \(h_2\) Hamiltonian component functions. The exact
Hamiltonian flow for the \(h_1\) component can be always be computed as it
depends only on the position variable however depending on the form of
\(h_2\) the corresponding exact Hamiltonian flow may or may not be
simulable.
By default \(h_1\) is assumed to correspond to the negative logarithm of an
unnormalized density on the position variables with respect to the Lebesgue
measure, with the corresponding distribution on the position space being
the target distribution it is wished to draw approximate samples from.
"""
def __init__(self, neg_log_dens, grad_neg_log_dens=None):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the Lebesgue measure, with the corresponding
distribution on the position space being the target
distribution it is wished to draw approximate samples from.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
"""
self._neg_log_dens = neg_log_dens
self._grad_neg_log_dens = autodiff_fallback(
grad_neg_log_dens, neg_log_dens, "grad_and_value", "grad_neg_log_dens"
)
@cache_in_state("pos")
def neg_log_dens(self, state):
"""Negative logarithm of unnormalized density of target distribution.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
float: Value of computed negative log density.
"""
return self._neg_log_dens(state.pos)
@cache_in_state_with_aux("pos", "neg_log_dens")
def grad_neg_log_dens(self, state):
"""Derivative of negative log density with respect to position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of `neg_log_dens(state)` derivative with respect to
`state.pos`.
"""
return self._grad_neg_log_dens(state.pos)
def h1(self, state):
"""Hamiltonian component depending only on position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
float: Value of `h1` Hamiltonian component.
"""
return self.neg_log_dens(state)
def dh1_dpos(self, state):
"""Derivative of `h1` Hamiltonian component with respect to position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of computed `h1` derivative.
"""
return self.grad_neg_log_dens(state)
def h1_flow(self, state, dt):
"""Apply exact flow map corresponding to `h1` Hamiltonian component.
`state` argument is modified in place.
Args:
state (mici.states.ChainState): State to start flow at.
dt (float): Time interval to simulate flow for.
"""
state.mom -= dt * self.dh1_dpos(state)
@abstractmethod
def h2(self, state):
"""Hamiltonian component depending on momentum and optionally position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
float: Value of `h2` Hamiltonian component.
"""
@abstractmethod
def dh2_dmom(self, state):
"""Derivative of `h2` Hamiltonian component with respect to momentum.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of `h2(state)` derivative with respect to `state.pos`.
"""
def h(self, state):
"""Hamiltonian function for system.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
float: Value of Hamiltonian.
"""
return self.h1(state) + self.h2(state)
def dh_dpos(self, state):
"""Derivative of Hamiltonian with respect to position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of `h(state)` derivative with respect to `state.pos`.
"""
if hasattr(self, "dh2_dpos"):
return self.dh1_dpos(state) + self.dh2_dpos(state)
else:
return self.dh1_dpos(state)
def dh_dmom(self, state):
"""Derivative of Hamiltonian with respect to momentum.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of `h(state)` derivative with respect to `state.mom`.
"""
return self.dh2_dmom(state)
@abstractmethod
def sample_momentum(self, state, rng):
"""
Sample a momentum from its conditional distribution given a position.
Args:
state (mici.states.ChainState): State defining position to
condition on.
Returns:
mom (array): Sampled momentum.
"""
class EuclideanMetricSystem(System):
r"""Hamiltonian system with a Euclidean metric on the position space.
Here Euclidean metric is defined to mean a metric with a fixed positive
definite matrix representation \(M\). The momentum variables are taken to
be independent of the position variables and with a zero-mean Gaussian
marginal distribution with covariance specified by \(M\), so that the
\(h_2\) Hamiltonian component is
\[ h_2(q, p) = \frac{1}{2} p^T M^{-1} p \]
where \(q\) and \(p\) are the position and momentum variables respectively.
The \(h_1\) Hamiltonian component function is
\[ h_1(q) = \ell(q) \]
where \(\ell(q)\) is the negative log (unnormalized) density of
the target distribution with respect to the Lebesgue measure.
"""
def __init__(self, neg_log_dens, metric=None, grad_neg_log_dens=None):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the Lebesgue measure, with the corresponding
distribution on the position space being the target
distribution it is wished to draw approximate samples from.
metric (None or array or PositiveDefiniteMatrix): Matrix object
corresponding to matrix representation of metric on position
space and covariance of Gaussian marginal distribution on
momentum vector. If `None` is passed (the default), the
identity matrix will be used. If a 1D array is passed then this
is assumed to specify a metric with positive diagonal matrix
representation and the array the matrix diagonal. If a 2D array
is passed then this is assumed to specify a metric with a dense
positive definite matrix representation specified by the array.
Otherwise if the value is a subclass of
`mici.matrices.PositiveDefiniteMatrix` it is assumed to
directly specify the metric matrix representation.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
"""
super().__init__(neg_log_dens, grad_neg_log_dens)
if metric is None:
self.metric = matrices.IdentityMatrix()
elif isinstance(metric, np.ndarray):
if metric.ndim == 1:
self.metric = matrices.PositiveDiagonalMatrix(metric)
elif metric.ndim == 2:
self.metric = matrices.DensePositiveDefiniteMatrix(metric)
else:
raise ValueError(
"If NumPy ndarray value is used for `metric`"
" must be either 1D (diagonal matrix) or 2D "
"(dense positive definite matrix)"
)
else:
self.metric = metric
@cache_in_state("mom")
def h2(self, state):
return 0.5 * state.mom @ self.dh2_dmom(state)
@cache_in_state("mom")
def dh2_dmom(self, state):
return self.metric.inv @ state.mom
def h2_flow(self, state, dt):
"""Apply exact flow map corresponding to `h2` Hamiltonian component.
`state` argument is modified in place.
Args:
state (mici.states.ChainState): State to start flow at.
dt (float): Time interval to simulate flow for.
"""
state.pos += dt * self.dh2_dmom(state)
def dh2_flow_dmom(self, dt):
"""Derivatives of `h2_flow` flow map with respect to input momentum.
Args:
dt (float): Time interval flow simulated for.
Returns:
dpos_dmom (mici.matrices.Matrix): Matrix representing derivative
(Jacobian) of position output of `h2_flow` with respect to the
value of the momentum component of the initial input state.
dmom_dmom (mici.matrices.Matrix): Matrix representing derivative
(Jacobian) of momentum output of `h2_flow` with respect to the
value of the momentum component of the initial input state.
"""
return (dt * self.metric.inv, matrices.IdentityMatrix(self.metric.shape[0]))
def sample_momentum(self, state, rng):
return self.metric.sqrt @ rng.standard_normal(state.pos.shape)
class GaussianEuclideanMetricSystem(EuclideanMetricSystem):
r"""Euclidean Hamiltonian system with a tractable Gaussian component.
Here Euclidean metric is defined to mean a metric with a fixed positive
definite matrix representation \(M\). The momentum variables are taken to
be independent of the position variables and with a zero-mean Gaussian
marginal distribution with covariance specified by \(M\).
Additionally the target distribution on the position variables is assumed
to be defined by an unnormalized density with respect to the standard
Gaussian measure on the position space (with identity covariance and zero
mean), with the Hamiltonian component \(h_1\) corresponding to the negative
logarithm of this density rather than the density with respect to the
Lebesgue measure on the position space, i.e.
\[ h_1(q) = \ell(q) - \frac{1}{2} q^T q \]
where \(q\) is the position and \(\ell(q)\) is the negative log
(unnormalized) density of the target distribution with respect to the
Lebesgue measure at \(q\). The Hamiltonian component function \(h_2\) is
then assumed to have the form
\[ h_2(q, p) = \frac{1}{2} q^T q + \frac{1}{2} p^T M^{-1} p \]
where \(p\) is the momentum. In this case the Hamiltonian flow due to the
quadratic \(h_2\) component can be solved for analytically, allowing an
integrator to be defined using this alternative splitting of the
Hamiltonian [1].
References:
1. Shahbaba, B., Lan, S., Johnson, W.O. and Neal, R.M., 2014. Split
Hamiltonian Monte Carlo. Statistics and Computing, 24(3), pp.339-349.
"""
def __init__(self, neg_log_dens, metric=None, grad_neg_log_dens=None):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the standard Gaussian measure on the position space,
with the corresponding distribution on the position space being
the target distribution it is wished to draw approximate
samples from.
metric (None or array or PositiveDefiniteMatrix): Matrix object
corresponding to matrix representation of metric on position
space and covariance of Gaussian marginal distribution on
momentum vector. If `None` is passed (the default), the
identity matrix will be used. If a 1D array is passed then this
is assumed to specify a metric with positive diagonal matrix
representation and the array the matrix diagonal. If a 2D array
is passed then this is assumed to specify a metric with a dense
positive definite matrix representation specified by the array.
Otherwise if the value is a subclass of
`mici.matrices.PositiveDefiniteMatrix` it is assumed to
directly specify the metric matrix representation.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
"""
super().__init__(neg_log_dens, metric, grad_neg_log_dens)
def h2(self, state):
return (
0.5 * state.pos @ state.pos + 0.5 * state.mom @ self.metric.inv @ state.mom
)
@cache_in_state("mom")
def dh2_dmom(self, state):
return self.metric.inv @ state.mom
@cache_in_state("mom")
def dh2_dpos(self, state):
return state.pos
def h2_flow(self, state, dt):
omega = 1.0 / self.metric.eigval ** 0.5
sin_omega_dt, cos_omega_dt = np.sin(omega * dt), np.cos(omega * dt)
eigvec_T_pos = self.metric.eigvec.T @ state.pos
eigvec_T_mom = self.metric.eigvec.T @ state.mom
state.pos = self.metric.eigvec @ (
cos_omega_dt * eigvec_T_pos + (sin_omega_dt * omega) * eigvec_T_mom
)
state.mom = self.metric.eigvec @ (
cos_omega_dt * eigvec_T_mom - (sin_omega_dt / omega) * eigvec_T_pos
)
def dh2_flow_dmom(self, dt):
omega = 1.0 / self.metric.eigval ** 0.5
sin_omega_dt, cos_omega_dt = np.sin(omega * dt), np.cos(omega * dt)
return (
matrices.EigendecomposedSymmetricMatrix(
self.metric.eigvec, sin_omega_dt * omega
),
matrices.EigendecomposedSymmetricMatrix(self.metric.eigvec, cos_omega_dt),
)
class ConstrainedEuclideanMetricSystem(EuclideanMetricSystem):
r"""Base class for Euclidean Hamiltonian systems subject to constraints.
The (constrained) position space is assumed to be a differentiable manifold
embedded with a \(Q\)-dimensional ambient Euclidean space. The \(Q-C\)
dimensional manifold \(\mathcal{M}\) is implicitly defined by an equation
\(\mathcal{M} = \lbrace q \in \mathbb{R}^Q : c(q) = 0 \rbrace\) with
\(c: \mathbb{R}^Q \to \mathbb{R}^C\) the *constraint function*.
The ambient Euclidean space is assumed to be equipped with a metric with
constant positive-definite matrix representation \(M\) which further
specifies the covariance of the zero-mean Gaussian distribution
\(\mathcal{N}(0, M)\) on the *unconstrained* momentum (co-)vector \(p\)
with corresponding \(h_2\) Hamiltonian component defined as
\[ h_2(q, p) = \frac{1}{2} p^T M^{-1} p. \]
The time-derivative of the constraint equation implies a further set of
constraints on the momentum \(q\) with \( \partial c(q) M^{-1} p = 0\)
at all time points, corresponding to the momentum (velocity) being in the
co-tangent space (tangent space) to the manifold.
The target distribution is either assumed to be directly specified with
unnormalized density \(\exp(-\ell(q))\) with respect to the Hausdorff
measure on the manifold (under the metric induced from the ambient metric)
with in this case the \(h_1\) Hamiltonian component then simply
\[ h_1(q) = \ell(q), \]
or alternatively it is assumed a prior distribution on the position \(q\)
with density \(\exp(-\ell(q))\) with respect to the Lebesgue measure on
the ambient space is specifed and the target distribution is the posterior
distribution on \(q\) when conditioning on the event \(c(q) = 0\). The
negative logarithm of the posterior distribution density with respect to
the Hausdorff measure (and so \(h_1\) Hamiltonian component) is then
\[
h_1(q) =
\ell(q) + \frac{1}{2} \log\left|\partial c(q)M^{-1}\partial c(q)^T\right|
\]
with an additional second *Gram matrix* determinant term to give the
correct density with respect to the Hausdorff measure on the manifold.
Due to the requirement to enforce the constraints on the position and
momentum, a constraint-preserving numerical integrator needs to be used
when simulating the Hamiltonian dynamic associated with the system, e.g.
`mici.integrators.ConstrainedLeapfrogIntegrator`.
References:
1. Lelièvre, T., Rousset, M. and Stoltz, G., 2019. Hybrid Monte Carlo
methods for sampling probability measures on submanifolds. Numerische
Mathematik, 143(2), pp.379-421.
2. Graham, M.M. and Storkey, A.J., 2017. Asymptotically exact inference
in differentiable generative models. Electronic Journal of Statistics,
11(2), pp.5105-5164.
"""
def __init__(
self,
neg_log_dens,
constr,
metric=None,
dens_wrt_hausdorff=True,
grad_neg_log_dens=None,
jacob_constr=None,
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the constrained position
space with respect to the Hausdorff measure on the constraint
manifold (if `dens_wrt_hausdorff == True`) or alternatively the
negative logarithm of an unnormalized probability density on
the unconstrained (ambient) position space with respect to the
Lebesgue measure. In the former case the target distribution it
is wished to draw approximate samples from is assumed to be
directly specified by the density function on the manifold. In
the latter case the density function is instead taken to
specify a prior distribution on the ambient space with the
target distribution then corresponding to the posterior
distribution when conditioning on the (zero Lebesgue measure)
event `constr(pos) == 0`. This target posterior distribution
has support on the differentiable manifold implicitly defined
by the constraint equation, with density with respect to the
Hausdorff measure on the manifold corresponding to the ratio of
the prior density (specified by `neg_log_dens`) and the
square-root of the determinant of the Gram matrix defined by
gram(q) = jacob_constr(q) @ inv(metric) @ jacob_constr(q).T
where `jacob_constr` is the Jacobian of the constraint function
`constr` and `metric` is the matrix representation of the
metric on the ambient space.
constr (Callable[[array], array]): Function which given a position
array return as a 1D array the value of the (vector-valued)
constraint function, the zero level-set of which implicitly
defines the manifold the dynamic is simulated on.
metric (None or array or PositiveDefiniteMatrix): Matrix object
corresponding to matrix representation of metric on
*unconstrained* position space and covariance of Gaussian
marginal distribution on *unconstrained* momentum vector. If
`None` is passed (the default), the identity matrix will be
used. If a 1D array is passed then this is assumed to specify a
metric with positive diagonal matrix representation and the
array the matrix diagonal. If a 2D array is passed then this is
assumed to specify a metric with a dense positive definite
matrix representation specified by the array. Otherwise if the
value is a `mici.matrices.PositiveDefiniteMatrix` subclass it
is assumed to directly specify the metric matrix
representation.
dens_wrt_hausdorff (bool): Whether the `neg_log_dens` function
specifies the (negative logarithm) of the density of the target
distribution with respect to the Hausdorff measure on the
manifold directly (True) or alternatively the negative
logarithm of a density of a prior distriubtion on the
unconstrained (ambient) position space with respect to the
Lebesgue measure, with the target distribution then
corresponding to the posterior distribution when conditioning
on the event `const(pos) == 0` (False). Note that in the former
case the base Hausdorff measure on the manifold depends on the
metric defined on the ambient space, with the Hausdorff measure
being defined with respect to the metric induced on the
manifold from this ambient metric.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct a function to compute the derivative (and value)
of `neg_log_dens` automatically.
jacob_constr (
None or Callable[[array], array or Tuple[array, array]]):
Function which given a position array computes the Jacobian
(matrix / 2D array of partial derivatives) of the output of the
constraint function `c = constr(q)` with respect to the position
array argument `q`, returning the computed Jacobian as a 2D
array `jacob` with
jacob[i, j] = ∂c[i] / ∂q[j]
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the Jacobian and
the second being the value of `constr` evaluated at the passed
position array. If `None` is passed (the default) an automatic
differentiation fallback will be used to attempt to construct a
function to compute the Jacobian (and value) of `constr`
automatically.
"""
super().__init__(
neg_log_dens=neg_log_dens,
metric=metric,
grad_neg_log_dens=grad_neg_log_dens,
)
self._constr = constr
self.dens_wrt_hausdorff = dens_wrt_hausdorff
self._jacob_constr = autodiff_fallback(
jacob_constr, constr, "jacobian_and_value", "jacob_constr"
)
@cache_in_state("pos")
def constr(self, state):
"""Constraint function at the current position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of `constr(state.pos)` as 1D array.
"""
return self._constr(state.pos)
@cache_in_state_with_aux("pos", "constr")
def jacob_constr(self, state):
"""Jacobian of constraint function at the current position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of Jacobian of `constr(state.pos)` as 2D array.
"""
return self._jacob_constr(state.pos)
@abstractmethod
def jacob_constr_inner_product(
self, jacob_constr_1, inner_product_matrix, jacob_constr_2=None
):
"""Compute inner product of rows of constraint Jacobian matrices.
Computes `jacob_constr_1 @ inner_product_matrix @ jacob_constr_2.T`
potentially exploiting any structure / sparsity in `jacob_constr_1`,
`jacob_constr_2` and `inner_product_matrix`.
Args:
jacob_constr_1 (Matrix): First constraint Jacobian in product.
inner_product_matrix (Matrix): Positive-definite matrix defining
inner-product between rows of two constraint Jacobians.
jacob_constr_2 (None or Matrix): Second constraint Jacobian in
product. Defaults to `jacob_constr_1` if set to `None`.
Returns
Matrix: Object corresponding to computed inner products of
the constraint Jacobian rows.
"""
@cache_in_state("pos")
def gram(self, state):
"""Gram matrix at current position.
The Gram matrix as a position `q` is defined as
gram(q) = jacob_constr(q) @ inv(metric) @ jacob_constr(q).T
where `jacob_constr` is the Jacobian of the constraint function
`constr` and `metric` is the matrix representation of the metric on the
ambient space.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
mici.matrices.PositiveDefiniteMatrix: Gram matrix as matrix object.
"""
return self.jacob_constr_inner_product(
self.jacob_constr(state), self.metric.inv
)
def inv_gram(self, state):
"""Inverse of Gram matrix at current position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
mici.matrices.PositiveDefiniteMatrix: Inverse of Gram matrix as
matrix object.
"""
return self.gram(state).inv
def log_det_sqrt_gram(self, state):
"""Value of (half of) log-determinant of Gram matrix."""
return 0.5 * self.gram(state).log_abs_det
@abstractmethod
def grad_log_det_sqrt_gram(self, state):
"""Derivative of (half of) log-determinant of Gram matrix wrt position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of `log_det_sqrt_gram(state)` derivative with respect
to `state.pos`.
"""
def h1(self, state):
if self.dens_wrt_hausdorff:
return self.neg_log_dens(state)
else:
return self.neg_log_dens(state) + self.log_det_sqrt_gram(state)
def dh1_dpos(self, state):
if self.dens_wrt_hausdorff:
return self.grad_neg_log_dens(state)
else:
return self.grad_neg_log_dens(state) + self.grad_log_det_sqrt_gram(state)
def project_onto_cotangent_space(self, mom, state):
"""Project a momentum on to the co-tangent space at a position.
Args:
mom (array): Momentum (co-)vector as 1D array to project on to
co-tangent space.
state (mici.states.ChainState): State definining position on the
manifold to project in to the co-tangent space of.
Returns:
array: Projected momentum in the co-tangent space at `state.pos`.
"""
# Use parenthesis to force right-to-left evaluation to avoid
# matrix-matrix products
mom -= self.jacob_constr(state).T @ (
self.inv_gram(state) @ (self.jacob_constr(state) @ (self.metric.inv @ mom))
)
return mom
def sample_momentum(self, state, rng):
mom = super().sample_momentum(state, rng)
mom = self.project_onto_cotangent_space(mom, state)
return mom
class DenseConstrainedEuclideanMetricSystem(ConstrainedEuclideanMetricSystem):
r"""Euclidean Hamiltonian system subject to a dense set of constraints.
See `ConstrainedEuclideanMetricSystem` for more details about constrained
systems.
"""
def __init__(
self,
neg_log_dens,
constr,
metric=None,
dens_wrt_hausdorff=True,
grad_neg_log_dens=None,
jacob_constr=None,
mhp_constr=None,
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the constrained position
space with respect to the Hausdorff measure on the constraint
manifold (if `dens_wrt_hausdorff == True`) or alternatively the
negative logarithm of an unnormalized probability density on
the unconstrained (ambient) position space with respect to the
Lebesgue measure. In the former case the target distribution it
is wished to draw approximate samples from is assumed to be
directly specified by the density function on the manifold. In
the latter case the density function is instead taken to
specify a prior distribution on the ambient space with the
target distribution then corresponding to the posterior
distribution when conditioning on the (zero Lebesgue measure)
event `constr(pos) == 0`. This target posterior distribution
has support on the differentiable manifold implicitly defined
by the constraint equation, with density with respect to the
Hausdorff measure on the manifold corresponding to the ratio of
the prior density (specified by `neg_log_dens`) and the
square-root of the determinant of the Gram matrix defined by
gram(q) = jacob_constr(q) @ inv(metric) @ jacob_constr(q).T
where `jacob_constr` is the Jacobian of the constraint function
`constr` and `metric` is the matrix representation of the
metric on the ambient space.
constr (Callable[[array], array]): Function which given a position
array return as a 1D array the value of the (vector-valued)
constraint function, the zero level-set of which implicitly
defines the manifold the dynamic is simulated on.
metric (None or array or PositiveDefiniteMatrix): Matrix object
corresponding to matrix representation of metric on
*unconstrained* position space and covariance of Gaussian
marginal distribution on *unconstrained* momentum vector. If
`None` is passed (the default), the identity matrix will be
used. If a 1D array is passed then this is assumed to specify a
metric with positive diagonal matrix representation and the
array the matrix diagonal. If a 2D array is passed then this is
assumed to specify a metric with a dense positive definite
matrix representation specified by the array. Otherwise if the
value is a `mici.matrices.PositiveDefiniteMatrix` subclass it
is assumed to directly specify the metric matrix
representation.
dens_wrt_hausdorff (bool): Whether the `neg_log_dens` function
specifies the (negative logarithm) of the density of the target
distribution with respect to the Hausdorff measure on the
manifold directly (True) or alternatively the negative
logarithm of a density of a prior distriubtion on the
unconstrained (ambient) position space with respect to the
Lebesgue measure, with the target distribution then
corresponding to the posterior distribution when conditioning
on the event `const(pos) == 0` (False). Note that in the former
case the base Hausdorff measure on the manifold depends on the
metric defined on the ambient space, with the Hausdorff measure
being defined with respect to the metric induced on the
manifold from this ambient metric.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct a function to compute the derivative (and value)
of `neg_log_dens` automatically.
jacob_constr (
None or Callable[[array], array or Tuple[array, array]]):
Function which given a position array computes the Jacobian
(matrix / 2D array of partial derivatives) of the output of the
constraint function `c = constr(q)` with respect to the
position array argument `q`, returning the computed Jacobian as
a 2D array `jacob` with
jacob[i, j] = ∂c[i] / ∂q[j]
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the Jacobian
and the second being the value of `constr` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct a function to compute the Jacobian (and value) of
`neg_log_dens` automatically.
mhp_constr (None or
Callable[[array], Callable[[array], array]] or
Callable[[array], Tuple[Callable, array, array]]):
Function which given a position array returns another function
which takes a 2D array as an argument and returns the
*matrix-Hessian-product* (MHP) of the constraint function
`constr` with respect to the position array argument. The MHP
is here defined as a function of a `(dim_constr, dim_pos)`
shaped 2D array `m`
mhp(m) = sum(m[:, :, None] * hess[:, :, :], axis=(0, 1))
where `hess` is the `(dim_constr, dim_pos, dim_pos)` shaped
vector-Hessian of `c = constr(q)` with respect to `q` i.e. the
array of second-order partial derivatives of such that
hess[i, j, k] = ∂²c[i] / (∂q[j] ∂q[k])
Optionally the function may instead return a 3-tuple of values
with the first a function to compute a MHP of `constr`, the
second a 2D array corresponding to the Jacobian of `constr`,
and the third the value of `constr`, all evaluated at the
passed position array. If `None` is passed (the default) an
automatic differentiation fallback will be used to attempt to
construct a function which calculates the MHP (and Jacobian and
value) of `constr` automatically.
"""
super().__init__(
neg_log_dens=neg_log_dens,
constr=constr,
metric=metric,
dens_wrt_hausdorff=dens_wrt_hausdorff,
grad_neg_log_dens=grad_neg_log_dens,
jacob_constr=jacob_constr,
)
if not dens_wrt_hausdorff:
self._mhp_constr = autodiff_fallback(
mhp_constr, constr, "mhp_jacobian_and_value", "mhp_constr"
)
@cache_in_state_with_aux("pos", ("jacob_constr", "constr"))
def mhp_constr(self, state):
return self._mhp_constr(state.pos)
def jacob_constr_inner_product(
self, jacob_constr_1, inner_product_matrix, jacob_constr_2=None
):
if jacob_constr_2 is None or jacob_constr_2 is jacob_constr_1:
return matrices.DensePositiveDefiniteMatrix(
jacob_constr_1 @ (inner_product_matrix @ jacob_constr_1.T)
)
else:
return matrices.DenseSquareMatrix(
jacob_constr_1 @ (inner_product_matrix @ jacob_constr_2.T)
)
@cache_in_state("pos")
def grad_log_det_sqrt_gram(self, state):
# Evaluate MHP of constraint function before Jacobian as Jacobian value
# will potentially be computed in 'forward' pass and cached
mhp_constr = self.mhp_constr(state)
return mhp_constr(
self.inv_gram(state) @ self.jacob_constr(state) @ self.metric.inv
)
class GaussianDenseConstrainedEuclideanMetricSystem(
GaussianEuclideanMetricSystem, DenseConstrainedEuclideanMetricSystem
):
r"""Gaussian Euclidean Hamiltonian system st. a dense set of constraints.
See `ConstrainedEuclideanMetricSystem` for more details about constrained
systems and `GaussianEuclideanMetricSystem` for Gaussian Euclidean metric
systems.
"""
def __init__(
self,
neg_log_dens,
constr,
metric=None,
grad_neg_log_dens=None,
jacob_constr=None,
mhp_constr=None,
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the unconstrained (ambient)
position space with respect to the standard Gaussian measure.
The density function is taken to specify a prior distribution
on the ambient space with the target distribution then
corresponding to the posterior distribution when conditioning
on the (zero Lebesgue measure) event `constr(pos) == 0`. This
target posterior distribution has support on the differentiable
manifold implicitly defined by the constraint equation, with
density with respect to the Hausdorff measure on the manifold
corresponding to the ratio of the prior density (specified by
`neg_log_dens`) and the square-root of the determinant of the
Gram matrix defined by
gram(q) = jacob_constr(q) @ inv(metric) @ jacob_constr(q).T
where `jacob_constr` is the Jacobian of the constraint function
`constr` and `metric` is the matrix representation of the
metric on the ambient space.
constr (Callable[[array], array]): Function which given a position
array return as a 1D array the value of the (vector-valued)
constraint function, the zero level-set of which implicitly
defines the manifold the dynamic is simulated on.
metric (None or array or PositiveDefiniteMatrix): Matrix object
corresponding to matrix representation of metric on
*unconstrained* position space and covariance of Gaussian
marginal distribution on *unconstrained* momentum vector. If
`None` is passed (the default), the identity matrix will be
used. If a 1D array is passed then this is assumed to specify a
metric with positive diagonal matrix representation and the
array the matrix diagonal. If a 2D array is passed then this is
assumed to specify a metric with a dense positive definite
matrix representation specified by the array. Otherwise if
a subclass of `mici.matrices.PositiveDefiniteMatrix` it is
assumed to directly specify the metric matrix representation.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct a function to compute the derivative (and value)
of `neg_log_dens` automatically.
jacob_constr (
None or Callable[[array], array or Tuple[array, array]]):
Function which given a position array computes the Jacobian
(matrix / 2D array of partial derivatives) of the output of the
constraint function `c = constr(q)` with respect to the
position array argument `q`, returning the computed Jacobian as
a 2D array `jacob` with
jacob[i, j] = ∂c[i] / ∂q[j]
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the Jacobian
and the second being the value of `constr` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct a function to compute the Jacobian (and value) of
`neg_log_dens` automatically.
mhp_constr (None or
Callable[[array], Callable[[array], array]] or
Callable[[array], Tuple[Callable, array, array]]):
Function which given a position array returns another function
which takes a 2D array as an argument and returns the
*matrix-Hessian-product* (MHP) of the constraint function
`constr` with respect to the position array argument. The MHP
is here defined as a function of a `(dim_constr, dim_pos)`
shaped 2D array `m`
mhp(m) = sum(m[:, :, None] * hess[:, :, :], axis=(0, 1))
where `hess` is the `(dim_constr, dim_pos, dim_pos)` shaped
vector-Hessian of `c = constr(q)` with respect to `q` i.e. the
array of second-order partial derivatives of such that
hess[i, j, k] = ∂²c[i] / (∂q[j] ∂q[k])
Optionally the function may instead return a 3-tuple of values
with the first a function to compute a MHP of `constr`, the
second a 2D array corresponding to the Jacobian of `constr`,
and the third the value of `constr`, all evaluated at the
passed position array. If `None` is passed (the default) an
automatic differentiation fallback will be used to attempt to
construct a function which calculates the MHP (and Jacobian and
value) of `constr` automatically.
"""
DenseConstrainedEuclideanMetricSystem.__init__(
self,
neg_log_dens=neg_log_dens,
constr=constr,
metric=metric,
dens_wrt_hausdorff=False,
grad_neg_log_dens=grad_neg_log_dens,
jacob_constr=jacob_constr,
mhp_constr=mhp_constr,
)
def jacob_constr_inner_product(
self, jacob_constr_1, inner_product_matrix, jacob_constr_2=None
):
if jacob_constr_2 is None or jacob_constr_2 is jacob_constr_1:
return matrices.DenseSymmetricMatrix(
jacob_constr_1 @ (inner_product_matrix @ jacob_constr_1.T)
)
else:
return matrices.DenseSquareMatrix(
jacob_constr_1 @ (inner_product_matrix @ jacob_constr_2.T)
)
class RiemannianMetricSystem(System):
r"""Riemannian Hamiltonian system with a position-dependent metric.
This class allows for metric matrix representations of any generic type.
In most cases a specialized subclass such as `DenseRiemannianMetricSystem`,
`CholeskyFactoredRiemannianMetricSystem`, `DiagonalRiemannianMetricSystem`,
`ScalarRiemannianMetricSystem` or `SoftAbsRiemannianMetricSystem` will
provide a simpler method of constructng a system with a metric matrix
representation of a specific type.
The position space is assumed to be a Riemannian manifold with a metric
with position-dependent positive definite matrix-representation \(M(q)\)
where \(q\) is a position vector. The momentum \(p\) is then taken to have
a zero-mean Gaussian conditional distribution given the position \(q\),
with covariance \(M(q)\), i.e. \(p \sim \mathcal{N}(0, M(q))\) [1].
The \(h_1\) Hamiltonian component is then
\[ h_1(q) = \ell(q) + \frac{1}{2}\log\left|M(q)\right| \]
where \(\ell(q)\) is the negative log (unnormalized) density of the target
distribution with respect to the Lebesgue measure at \(q\). The \(h_2\)
Hamiltonian component is
\[ h_2(q, p) = \frac{1}{2} p^T (M(q))^{-1} p. \]
Due to the coupling between the position and momentum variables in \(h_2\),
the Hamiltonian system is non-separable, requiring use of a numerical
integrator with implicit steps when simulating the Hamiltonian dynamic
associated with the system, e.g.
`mici.integrators.ImplicitLeapfrogIntegrator`.
References:
1. Girolami, M. and Calderhead, B., 2011. Riemann manifold Langevin and
Hamiltonian Monte Varlo methods. Journal of the Royal Statistical
Society: Series B (Statistical Methodology), 73(2), pp.123-214.
"""
def __init__(
self,
neg_log_dens,
metric_matrix_class,
metric_func,
vjp_metric_func=None,
grad_neg_log_dens=None,
metric_kwargs=None,
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the Lebesgue measure, with the corresponding
distribution on the position space being the target
distribution it is wished to draw approximate samples from.
metric_matrix_class (type[PositiveDefiniteMatrix]): Class (or
factory function returning an instance of the class) which
defines type of matrix representation of metric. The class
initializer should take a single positional argument which will
be passed the array outputted by `metric_func`, and which is
assumed to be a parameter which fully defines the resulting
matrix (e.g. the diagonal of a `mici.matrices.DiagonalMatrix`).
The class initializer may also optionally take one or more
keyword arguments, with the `metric_kwargs` argument used to
specify the value of these, if any. Together this means the
metric matrix representation at a position `pos` is constructed
as
metric = metric_matrix_class(
metric_func(pos), **metric_kwargs)
The `mici.matrices.PositiveDefiniteMatrix` subclass should as a
minimum define `inv`, `log_abs_det`, `grad_log_abs_det`,
`grad_quadratic_form_inv`, `__matmul__` and `__rmatmul__`
methods / properties (see documentation of
`mici.matrices.PositiveDefiniteMatrix` and
`mici.matrices.DifferentiableMatrix` for definitions of the
expected behaviour of these methods).
metric_func (Callable[[array], array]): Function which given a
position array returns an array containing the parameter value
of the metric matrix representation passed as the single
positional argument to the `metric_matrix_class` initializer.
vjp_metric_func (None or
Callable[[array], Callable[[array], array]] or
Callable[[array], Tuple[Callable[[array], array], array]]):
Function which given a position array returns another function
which takes an array as an argument and returns the
*vector-Jacobian-product* (VJP) of `metric_func` with respect
to the position array argument. The VJP is here defined as a
function of an array `v` (of the same shape as the output of
`metric_func`) corresponding to
vjp(v) = sum(v[..., None] * jacob, tuple(range(v.ndim))
where `jacob` is the Jacobian of `m = metric_func(q)` wrt `q`
i.e. the array of partial derivatives of the function such that
jacob[..., i] = ∂m[...] / ∂q[i]
Optionally the function may instead return a 2-tuple of values
with the first a function to compute a VJP of `metric_func` and
the second an array containing the value of `metric_func`, both
evaluated at the passed position array. If `None` is passed
(the default) an automatic differentiation fallback will be
used to attempt to construct a function which calculates the
VJP (and value) of `metric_func` automatically.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
metric_kwargs (None or Dict[str, object]): An optional dictionary
of any additional keyword arguments to the initializer of
`metric_matrix_class`.
"""
self._metric_matrix_class = metric_matrix_class
self._metric_func = metric_func
self._vjp_metric_func = autodiff_fallback(
vjp_metric_func, metric_func, "vjp_and_value", "vjp_metric_func"
)
self._metric_kwargs = {} if metric_kwargs is None else metric_kwargs
super().__init__(neg_log_dens, grad_neg_log_dens)
@cache_in_state("pos")
def metric_func(self, state):
"""
Function computing the parameter of the metric matrix representation.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
array: Value of `metric_func(state.pos)`.
"""
return self._metric_func(state.pos)
@cache_in_state_with_aux("pos", "metric_func")
def vjp_metric_func(self, state):
"""
Function constructing a vector-Jacobian-product for `metric_func`.
The vector-Jacobian-product is here defined as a function of an array
`v` (of the same shape as the output of `metric_func`) corresponding to
vjp(v) = sum(v[..., None] * jacob, axis=tuple(range(v.ndim))
where `jacob` is the Jacobian of `m = metric_func(q)` wrt `q` i.e.
the array of partial derivatives of the function such that
jacob[..., i] = ∂m[...] / ∂q[i]
Args:
state (mici.states.ChainState): State to compute VJP at.
Returns:
Callable[[array], array]: Vector-Jacobian-product function.
"""
return self._vjp_metric_func(state.pos)
@cache_in_state("pos")
def metric(self, state):
"""
Function computing the metric matrix representation.
The returned type of this function is that specified by the
`metric_matrix_class` argument to the initializer.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
mici.matrices.PositiveDefiniteMatrix: Metric matrix representation.
"""
return self._metric_matrix_class(self.metric_func(state), **self._metric_kwargs)
def h(self, state):
return self.h1(state) + self.h2(state)
def h1(self, state):
return self.neg_log_dens(state) + 0.5 * self.metric(state).log_abs_det
def dh1_dpos(self, state):
# Evaluate VJP of metric function before metric as metric value will
# potentially be computed in forward pass and cached
vjp_metric = self.vjp_metric_func(state)
return self.grad_neg_log_dens(state) + 0.5 * vjp_metric(
self.metric(state).grad_log_abs_det
)
def h2(self, state):
return 0.5 * state.mom @ self.metric(state).inv @ state.mom
def dh2_dpos(self, state):
# Evaluate VJP of metric function before metric as metric value will
# potentially be computed in forward pass and cached
vjp_metric = self.vjp_metric_func(state)
return 0.5 * vjp_metric(self.metric(state).grad_quadratic_form_inv(state.mom))
def dh2_dmom(self, state):
return self.metric(state).inv @ state.mom
def sample_momentum(self, state, rng):
return self.metric(state).sqrt @ rng.normal(size=state.pos.shape)
class ScalarRiemannianMetricSystem(RiemannianMetricSystem):
"""Riemannian-metric system with scaled identity matrix representation.
Hamiltonian system with a position dependent scaled identity metric matrix
representation which is specified by a scalar function
`metric_scalar_function` of the position `q` which outputs a strictly
positive scalar `s = metric_scalar_func(q)` with the the metric matrix
representation then taken to be `s * identity(q.shape[0])`.
See documentation of `RiemannianMetricSystem` for more general details
about Riemannian-metric Hamiltonian systems.
"""
def __init__(
self,
neg_log_dens,
metric_scalar_func,
vjp_metric_scalar_func=None,
grad_neg_log_dens=None,
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the Lebesgue measure, with the corresponding
distribution on the position space being the target
distribution it is wished to draw approximate samples from.
metric_scalar_func (Callable[[array], float]): Function which
given a position array returns a strictly positive scalar
corresponding to the parameter value of the scaled identity
metric matrix representation.
vjp_metric_scalar_func (None or
Callable[[array], Callable[[array], float]] or
Callable[[array], Tuple[Callable[[array, float]], float]]):
Function which given a position array returns another function
which takes a scalar as an argument and returns the
*vector-Jacobian-product* (VJP) of `metric_scalar_func` with
respect to the position array argument. The VJP is here defined
as a function of a scalar `v`
vjp(v) = v * grad
where `grad` is the `(dim_pos,)` shaped Jacobian (gradient) of
`s = metric_scalar_func(q)` with respect to `q` i.e. the array
of partial derivatives of the function such that
grad[i] = ∂s / ∂q[i]
Optionally the function may instead return a 2-tuple of values
with the first a function to compute a VJP of
`metric_scalar_func` and the second a float containing the
value of `metric_scalar_func`, both evaluated at the passed
position array. If `None` is passed (the default) an automatic
differentiation fallback will be used to attempt to construct a
function which calculates the VJP (and value) of
`metric_scalar_func` automatically.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
"""
super().__init__(
neg_log_dens,
matrices.PositiveScaledIdentityMatrix,
metric_scalar_func,
vjp_metric_scalar_func,
grad_neg_log_dens,
)
@cache_in_state("pos")
def metric(self, state):
return self._metric_matrix_class(
self.metric_func(state), size=state.pos.shape[0]
)
class DiagonalRiemannianMetricSystem(RiemannianMetricSystem):
"""Riemannian-metric system with diagonal matrix representation.
Hamiltonian system with a position dependent diagonal metric matrix
representation which is specified by a vector-valued function
`metric_diagonal_func` of the position `q` which outputs a 1D array with
strictly positive elements `d = metric_diagonal_func(q)` with the metric
matrix representation then taken to be `diag(d)`.
See documentation of `RiemannianMetricSystem` for more general details
about Riemannian-metric Hamiltonian systems.
"""
def __init__(
self,
neg_log_dens,
metric_diagonal_func,
vjp_metric_diagonal_func=None,
grad_neg_log_dens=None,
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the Lebesgue measure, with the corresponding
distribution on the position space being the target
distribution it is wished to draw approximate samples from.
metric_diagonal_func (Callable[[array], array]): Function which
given a position array returns a 1D array with strictly
positive values corresponding to the diagonal values
(left-to-right) of the diagonal metric matrix representation.
vjp_metric_diagonal_func (None or
Callable[[array], Callable[[array], array]] or
Callable[[array], Tuple[Callable[[array], array], array]]):
Function which given a position array returns another function
which takes a 1D array as an argument and returns the
*vector-Jacobian-product* (VJP) of `metric_diagonal_func` with
respect to the position array argument. The VJP is here defined
as a function of a 1D array `v`
vjp(v) = sum(v[:, None] * jacob[:, :], axis=0)
where `jacob` is the `(dim_pos, dim_pos)` shaped Jacobian of
`d = metric_diagonal_func(q)` with respect to `q` i.e. the
array of partial derivatives of the function such that
jacob[i, j] = ∂d[i] / ∂q[j]
Optionally the function may instead return a 2-tuple of values
with the first a function to compute a VJP of
`metric_diagonal_func` and the second a 1D array containing the
value of `metric_diagonal_func`, both evaluated at the passed
position array. If `None` is passed (the default) an automatic
differentiation fallback will be used to attempt to construct a
function which calculates the VJP (and value) of
`metric_diagonal_func` automatically.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
"""
super().__init__(
neg_log_dens,
matrices.PositiveDiagonalMatrix,
metric_diagonal_func,
vjp_metric_diagonal_func,
grad_neg_log_dens,
)
class CholeskyFactoredRiemannianMetricSystem(RiemannianMetricSystem):
"""Riemannian-metric system with Cholesky-factored matrix representation.
Hamiltonian system with a position dependent metric matrix representation
which is specified by its Cholesky factor by a matrix function
`metric_chol_func` of the position `q` which outputs a lower-triangular
matrix `L = metric_chol_func(q)` with the metric matrix representation then
taken to be `L @ L.T`.
See documentation of `RiemannianMetricSystem` for more general details
about Riemannian-metric Hamiltonian systems.
"""
def __init__(
self,
neg_log_dens,
metric_chol_func,
vjp_metric_chol_func=None,
grad_neg_log_dens=None,
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the Lebesgue measure, with the corresponding
distribution on the position space being the target
distribution it is wished to draw approximate samples from.
metric_chol_func (Callable[[array], array]): Function which given
a position array returns a 2D array with zeros above the
diagonal corresponding to the lower-triangular Cholesky-factor
of the positive definite metric matrix representation.
vjp_metric_chol_func (None or
Callable[[array], Callable[[array], array]] or
Callable[[array], Tuple[Callable[[array], array], array]]):
Function which given a position array returns another function
which takes a lower-triangular 2D array as an argument (any
values in the array above the diagonal are ignored) and returns
the *vector-Jacobian-product* (VJP) of `metric_chol_func` with
respect to the position array argument. The VJP is here defined
as a function of a 2D array `v`
vjp(v) = sum(v[:, :, None] * jacob[:, :, :], axis=(0, 1))
where `jacob` is the `(dim_pos, dim_pos, dim_pos)` shaped
Jacobian of `L = metric_chol_func(q)` with respect to `q` i.e.
the array of partial derivatives of the function such that
jacob[i, j, k] = ∂L[i, j] / ∂q[k]
Optionally the function may instead return a 2-tuple of values
with the first a function to compute a VJP of
`metric_chol_func` and the second a 2D array containing the
value of `metric_chol_func`, both evaluated at the passed
position array. If `None` is passed (the default) an automatic
differentiation fallback will be used to attempt to construct a
function which calculates the VJP (and value) of
`metric_chol_func` automatically.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
"""
super().__init__(
neg_log_dens,
matrices.TriangularFactoredPositiveDefiniteMatrix,
metric_chol_func,
vjp_metric_chol_func,
grad_neg_log_dens,
metric_kwargs={"factor_is_lower": True},
)
class DenseRiemannianMetricSystem(RiemannianMetricSystem):
"""Riemannian-metric system with dense matrix representation.
Hamiltonian system with a position dependent metric matrix representation
which is specified to be a dense matrix function `metric_func` of the
position `q` which is guaranteed to be positive definite almost-everywhere,
with `M = metric_func(q)` then the metric matrix representation.
See documentation of `RiemannianMetricSystem` for more general details
about Riemannian-metric Hamiltonian systems.
"""
def __init__(
self, neg_log_dens, metric_func, vjp_metric_func=None, grad_neg_log_dens=None
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the Lebesgue measure, with the corresponding
distribution on the position space being the target
distribution it is wished to draw approximate samples from.
metric_func (Callable[[array], array]): Function which given a
position array returns a 2D array corresponding to the positive
definite metric matrix representation. The returned matrices
(2D arrays) are assumed to be positive-definite for all input
positions and a `LinAlgError` exception may be raised if this
fails to be the case.
vjp_metric_func (None or
Callable[[array], Callable[[array], array]] or
Callable[[array], Tuple[Callable[[array], array], array]]):
Function which given a position array returns another function
which takes a 2D array as an argument and returns the
*vector-Jacobian-product* (VJP) of `metric_func` with respect
to the position array argument. The VJP is here defined as a
function of a 2D array `v`
vjp(v) = sum(v[:, :, None] * jacob[:, :, :], axis=(0, 1))
where `jacob` is the `(dim_pos, dim_pos, dim_pos)` shaped
Jacobian of `M = metric_func(q)` with respect to `q` i.e. the
array of partial derivatives of the function such that
jacob[i, j, k] = ∂M[i, j] / ∂q[k]
Optionally the function may instead return a 2-tuple of values
with the first a function to compute a VJP of `metric_func` and
the second a 2D array containing the value of `metric_func`,
both evaluated at the passed position array. If `None` is
passed (the default) an automatic differentiation fallback will
be used to attempt to construct a function which calculates the
VJP (and value) of `metric_func` automatically.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
"""
super().__init__(
neg_log_dens,
matrices.DensePositiveDefiniteMatrix,
metric_func,
vjp_metric_func,
grad_neg_log_dens,
)
class SoftAbsRiemannianMetricSystem(RiemannianMetricSystem):
"""SoftAbs Riemmanian metric Hamiltonian system.
Hamiltonian system with a position dependent metric matrix representation
which is specified to be a dense matrix function `metric_func` of the
position `q` which is guaranteed to be positive definite almost-everywhere,
with `M = metric_func(q)` then the metric matrix representation.
Hamiltonian system with a position dependent metric matrix representation
which is specified to be an eigenvalue-regularized transformation of the
Hessian of the negative log density function (the symmetric matrix of
second derivatives the negative log density function with respect to the
position array components. Specifically if `hess_neg_log_dens` is a
symmetric 2D square array valued function of the position `q`, with
`H = hess_neg_log_dens(q)` then if an eigenvalue decomposition of `H` is
computed, i.e. `eigval, eigvec = eigh(H)`, with `eigval` a 1D array of
real eigenvalues, and `eigvec` the corresponding 2D array (orthogonal
matrix) with eigenvectors as columns, then the resulting positive-definite
metric matrix representation `M` is computed as
M = eigvec @ diag(softabs(eigval, softabs_coeff)) @ eigvec.T
with `softabs(x, softabs_coeff) = x / tanh(x * softabs_coeff)` an
elementwise function which acts as a smooth approximation to the absolute
function (ensuring all the eigenvalues of `M` are strictly positive) with
the additional scalar parameter `softabs_coeff` controlling the smoothness
of the approximation, with `softabs` tending to the piecewise linear `abs`
function as `softabs_coeff` tends to infinity, and becoming increasingly
smooth as `softabs_coeff` tends to zero.
See documentation of `RiemannianMetricSystem` for more general details
about Riemannian-metric Hamiltonian systems.
References:
1. Betancourt, M., 2013. A general metric for Riemannian manifold
Hamiltonian Monte Carlo. In Geometric science of information
(pp. 327-334).
"""
def __init__(
self,
neg_log_dens,
grad_neg_log_dens=None,
hess_neg_log_dens=None,
mtp_neg_log_dens=None,
softabs_coeff=1.0,
):
"""
Args:
neg_log_dens (Callable[[array], float]): Function which given a
position array returns the negative logarithm of an
unnormalized probability density on the position space with
respect to the Lebesgue measure, with the corresponding
distribution on the position space being the target
distribution it is wished to draw approximate samples from.
grad_neg_log_dens (
None or Callable[[array], array or Tuple[array, float]]):
Function which given a position array returns the derivative of
`neg_log_dens` with respect to the position array argument.
Optionally the function may instead return a 2-tuple of values
with the first being the array corresponding to the derivative
and the second being the value of the `neg_log_dens` evaluated
at the passed position array. If `None` is passed (the default)
an automatic differentiation fallback will be used to attempt
to construct the derivative of `neg_log_dens` automatically.
hess_neg_log_dens (None or
Callable[[array], array or Tuple[array, array, float]]):
Function which given a position array returns the Hessian of
`neg_log_dens` with respect to the position array argument as a
2D array. Optionally the function may instead return a 3-tuple
of values with the first a 2D array containting the Hessian of
`neg_log_dens`, the second a 1D array containing the gradient
of `neg_log_dens` and the third the value of `neg_log_dens`,
all evaluated at the passed position array. If `None` is passed
(the default) an automatic differentiation fallback will be
used to attempt to construct a function which calculates the
Hessian (and gradient and value) of `neg_log_dens`
automatically.
mtp_neg_log_dens (None or
Callable[[array], Callable[[array], array]] or
Callable[[array], Tuple[Callable, array, array, float]]):
Function which given a position array returns another function
which takes a 2D array (matrix) as an argument and returns the
*matrix-Tressian-product* (MTP) of `neg_log_dens` with respect
to the position array argument. The MTP is here defined as a
function of a matrix `m` corresponding to
mtp(m) = sum(m[:, :, None] * tress[:, :, :], axis=(0, 1))
where `tress` is the 'Tressian' of `f = neg_log_dens(q)` wrt
`q` i.e. the 3D array of third-order partial derivatives of the
scalar-valued function such that
tress[i, j, k] = ∂³f / (∂q[i] ∂q[j] ∂q[k])
Optionally the function may instead return a 4-tuple of values
with the first a function to compute a MTP of `neg_log_dens`,
the second a 2D array containing the Hessian of `neg_log_dens`,
the third a 1D array containing the gradient of `neg_log_dens`
and the fourth the value of `neg_log_dens`, all evaluated at
the passed position array. If `None` is passed (the default) an
automatic differentiation fallback will be used to attempt to
construct a function which calculates the MTP (and Hesisan and
gradient and value) of `neg_log_dens` automatically.
softabs_coeff (float): Positive regularisation coefficient for
smooth approximation to absolute value used to regularize
Hessian eigenvalues in metric matrix representation. As the
value tends to infinity the approximation becomes increasingly
close to the absolute function.
"""
self._hess_neg_log_dens = autodiff_fallback(
hess_neg_log_dens, neg_log_dens, "hessian_grad_and_value", "neg_log_dens"
)
self._mtp_neg_log_dens = autodiff_fallback(
mtp_neg_log_dens,
neg_log_dens,
"mtp_hessian_grad_and_value",
"mtp_neg_log_dens",
)
super().__init__(
neg_log_dens,
matrices.SoftAbsRegularizedPositiveDefiniteMatrix,
self._hess_neg_log_dens,
self._mtp_neg_log_dens,
grad_neg_log_dens,
metric_kwargs={"softabs_coeff": softabs_coeff},
)
def metric_func(self, state):
return self.hess_neg_log_dens(state)
def vjp_metric_func(self, state):
return self.mtp_neg_log_dens(state)
@cache_in_state_with_aux("pos", ("grad_neg_log_dens", "neg_log_dens"))
def hess_neg_log_dens(self, state):
"""Hessian of negative log density with respect to position.
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
hessian (array): 2D array of `neg_log_dens(state)` second
derivatives with respect to `state.pos`, with `hessian[i, j]`
the second derivative of `neg_log_dens(state)` with respect to
`state.pos[i]` and `state.pos[j]`.
"""
return self._hess_neg_log_dens(state.pos)
@cache_in_state_with_aux(
"pos", ("hess_neg_log_dens", "grad_neg_log_dens", "neg_log_dens")
)
def mtp_neg_log_dens(self, state):
"""Generate MTP of negative log density with respect to position.
The matrix-Tressian-product (MTP) is here defined as a function of a
matrix `m` corresponding to
mtp(m) = sum(m[:, :, None] * tress[:, :, :], axis=(0, 1))
where `tress` is the 'Tressian' of `f = neg_log_dens(q)` with respect
to `q = state.pos` i.e. the 3D array of third-order partial derivatives
of the scalar-valued function such that
tress[i, j, k] = ∂³f / (∂q[i] ∂q[j] ∂q[k])
Args:
state (mici.states.ChainState): State to compute value at.
Returns:
mtp (Callable[[array], array]): Function which accepts a 2D array
of shape `(state.pos.shape[0], state.pos.shape[0])` as an
argument and returns an array of shape `state.pos.shape`
containing the computed MTP value.
"""
return self._mtp_neg_log_dens(state.pos)
| matt-graham/hmc | mici/systems.py | Python | mit | 82,193 | [
"Gaussian"
] | e00077af2f11b77a0def7443c2434ee7cb0c776459be267d54f3731ae9ded9db |
from HotSpotterAPI import HotSpotterAPI
from PyQt4.Qt import QObject, pyqtSlot, QTreeWidgetItem, QDialog, QInputDialog
from pylab import find
from other.logger import logdbg, logerr, logmsg, func_log, hsl
import other.crossplat as crossplat
import other.messages as messages
import sys
import time
import os.path
import numpy as np
# Globals
clbls = ['cid', 'gid', 'nid', 'name', 'roi', 'theta']
glbls = ['gid', 'gname', 'num_c', 'cids']
nlbls = ['nid', 'name', 'cids']
class Facade(QObject):
'A friendlier interface into HotSpotter.'
# Initialization, Opening, and Saving
def __init__(fac, use_gui=True, autoload=True):
super( Facade, fac ).__init__()
# Create API
fac.hs = HotSpotterAPI(autoload=False)
if use_gui: #Make GUI?
logdbg('Starting with gui')
uim = fac.hs.uim
uim.start_gui(fac)
fac.show_main_window()
else: #HACKY HACKY HACK
logdbg('Starting without gui')
fac.hs.dm.fignum = 1
fac.hs.uim.start_gui(fac) #TODO: Remove
try: # Open previous database
fac.open_db(None, autoload)
except Exception as ex:
import traceback
print("Error occurred in autoload")
print(str(ex))
print('<<<<<< Traceback >>>>>')
traceback.print_exc()
print("Error occurred in autoload")
@pyqtSlot(name='run_experiments')
def run_experiments(fac):
fac.hs.em.run_experiment()
@pyqtSlot(name='open_db')
@func_log
def open_db(fac, db_dpath='', autoload=True):
'Opens the database db_dpath. Enters UIMode db_dpath is \'\' '
uim = fac.hs.uim
uim.update_state('open_database')
fac.hs.restart(db_dpath, autoload)
fac.unselect()
uim.populate_tables()
def merge_db(fac, db_dpath):
fac.hs.merge_database(db_dpath)
#fac.hs.merge_database(r'D:\data\work\Lionfish\LF_OPTIMIZADAS_NI_V_E')
#fac.hs.merge_database(r'D:\data\work\Lionfish\LF_WEST_POINT_OPTIMIZADAS')
#fac.hs.merge_database(r'D:\data\work\Lionfish\LF_Bajo_bonito')
#fac.hs.merge_database(r'D:\data\work\Lionfish\LF_Juan')
uim.populate_tables()
@func_log
def unselect(fac):
uim = fac.hs.uim
uim.update_state('splash_view')
uim.unselect_all()
uim.draw()
@pyqtSlot(name='save_database')
@func_log
def save_db(fac):
'Saves the database chip, image, and name tables'
iom, uim = fac.hs.get_managers('iom','uim')
old_state = uim.update_state('save_database')
fac.hs.save_database()
uim.update_state(old_state)
# ---------------
@pyqtSlot(name='import_images')
@func_log
def import_images(fac):
uim = fac.hs.uim
old_state = uim.update_state('import_images')
fac.hs.iom.save_tables()
image_list = fac.hs.uim.select_images_on_disk()
fac.hs.add_image_list(image_list)
uim.populate_tables()
uim.update_state(old_state)
@pyqtSlot(name='add_chip')
@func_log
def add_chip(fac, gid=None):
gm, nm, cm, uim = fac.hs.get_managers('gm', 'nm','cm', 'uim')
if gid=='None' or gid == None:
gx = uim.sel_gx()
else:
gx = gm.gx(gid)
uim.select_gid(gid)
uim.update_state('add_chip')
new_roi = uim.annotate_roi()
theta = 0
uim.update_state('image_view')
new_cid = cm.add_chip(-1, nm.UNIDEN_NX(), gx, new_roi, theta, delete_prev=True)
uim.select_cid(new_cid)
print 'New Chip: '+fac.hs.cm.info(new_cid, clbls)
#If in beast mode, then move to the next ROI without drawing
if uim.ui_prefs.quick_roi_select and fac.next_empty_image():
num_empty = len(fac.hs.gm.get_empty_gxs())
print 'Only %d left to go!' % num_empty
else:
uim.populate_tables()
uim.draw()
@pyqtSlot(name='reselect_roi')
@func_log
def reselect_roi(fac):
uim = fac.hs.uim
new_roi = uim.annotate_roi()
sel_cx = uim.sel_cx()
fac.hs.cm.change_roi(sel_cx, new_roi)
uim.draw()
@pyqtSlot(name='reselect_orientation')
@func_log
def reselect_orientation(fac):
uim = fac.hs.uim
new_theta = uim.annotate_orientation()
sel_cx = uim.sel_cx()
fac.hs.cm.change_orientation(sel_cx, new_theta)
uim.draw()
@pyqtSlot(str, int, name='rename_cid')
@func_log
def rename_cid(fac, new_name, cid=-1):
cm, uim = fac.hs.get_managers('cm','uim')
if cid == -1:
cid = uim.sel_cid
cm.rename_chip(cm.cx(cid), str(new_name))
uim.populate_tables()
@pyqtSlot(str, int, name='change_chip_prop')
@func_log
def change_chip_prop(fac, propname, newval, cid=-1):
cm, uim = fac.hs.get_managers('cm','uim')
if cid == -1:
cid = uim.sel_cid
cx = cm.cx(cid)
cm.user_props[str(propname)][cx] = str(newval).replace('\n','\t').replace(',',';;')
uim.populate_tables()
@pyqtSlot(name='add_new_prop')
@func_log
def add_new_prop(fac, propname=None):
'add a new property to keep track of'
if propname is None:
# User ask
dlg = QInputDialog()
textres = dlg.getText(None, 'New Metadata Property','What is the new property name? ')
if not textres[1]:
logmsg('Cancelled new property')
return
propname = str(textres[0])
logmsg('Adding property '+propname)
fac.hs.cm.add_user_prop(propname)
fac.hs.uim.populate_tables()
@pyqtSlot(name='remove_cid')
@func_log
def remove_cid(fac, cid=None):
uim = fac.hs.uim
uim.update_state('image_view')
if cid == 'None' or cid == None:
cx = uim.sel_cx()
else:
uim.select_cid(cid)
cx = cm.cx(cid)
fac.hs.cm.remove_chip(cx)
uim.select_gid(uim.sel_gid)
uim.populate_tables()
uim.draw()
@func_log
@pyqtSlot(name='selc')
def selc(fac, cid):
uim = fac.hs.uim
uim.update_state('chip_view')
uim.select_cid(cid)
uim.draw()
@func_log
@pyqtSlot(name='selg')
def selg(fac, gid):
uim = fac.hs.uim
uim.update_state('image_view')
uim.select_gid(gid)
uim.draw()
@pyqtSlot(int, name='change_view')
@func_log
def change_view(fac, new_state):
uim = fac.hs.uim
prevBlock = uim.hsgui.main_skel.tablesTabWidget.blockSignals(True)
# THIS LIST IS IN THE ORDER OF THE TABS.
# THIS SHOULD CHANGE TO BE INDEPENDENT OF THAT FIXME
if not new_state in uim.tab_order:
if new_state in xrange(len(uim.tab_order)):
new_state = uim.tab_order[new_state]+'_view'
else:
logerr('State is: '+str(new_state)+', but it must be one of: '+str(uim.tab_order))
uim.update_state(new_state)
uim.draw()
uim.hsgui.main_skel.tablesTabWidget.blockSignals(prevBlock)
@pyqtSlot(name='query')
@func_log
def query(fac, qcid=None):
'Performs a query'
uim, cm, qm, vm, nm = fac.hs.get_managers('uim', 'cm','qm','vm', 'nm')
try:
if qcid is None:
qcid = uim.sel_cid
else:
uim.select_cid(qcid)
qcx = cm.cx(qcid)
uim.update_state('Querying')
print('Querying Chip: '+cm.cx2_info(qcx, clbls))
logdbg('\n\nQuerying Chip: '+cm.cx2_info(qcx, clbls))
uim.sel_res = fac.hs.query(qcid)
logmsg('\n\nFinished Query')
uim.update_state('done_querying')
logmsg(str(uim.sel_res))
logdbg('\n\n*** Populating Results Tables ***')
uim.populate_result_table()
logdbg('\n\n*** Switching To Result Views ***')
uim.update_state('result_view')
logdbg('\n\n*** Redrawing UI ***')
uim.draw()
logdbg('\n\n*** Done Redrawing UI ***')
# QUICK AND DIRTY CODE. PLEASE FIXME
try:
cx1 = uim.sel_res.rr.qcx
cx2 = uim.sel_res.top_cx()[0]
if uim.ui_prefs.prompt_after_result and cm.cx2_nx[cx1] == nm.UNIDEN_NX() and cm.cx2_nx[cx2] != nm.UNIDEN_NX():
logdbg('Quick and dirty prompting')
fac._quick_and_dirty_result_prompt(uim.sel_res.rr.qcx, uim.sel_res.top_cx()[0])
else:
logdbg('No Quick and dirty prompting')
except Exception as ex:
logdbg('bad quick and dirty facade code: '+str(ex))
pass
logdbg('\n\n-----------Query OVER-------------\n\n')
except Exception as ex:
uim.update_state('done_querying')
uim.update_state('query_failed')
raise
def _quick_and_dirty_batch_rename(fac):
from front.ChangeNameDialog import Ui_changeNameDialog
cm, nm, uim = fac.hs.get_managers('cm','nm', 'uim')
try:
if uim.sel_cid != None:
name = cm.cx2_name(uim.sel_cx())
else:
name = ''
except Exception as ex:
print 'A quick and dirty exception was caught'
logdbg(str(ex))
name = ''
class ChangeNameDialog(QDialog):
def __init__(self, name, fac):
super( ChangeNameDialog, self ).__init__()
self.dlg_skel = Ui_changeNameDialog()
self.dlg_skel.setupUi(self)
self.dlg_skel.oldNameEdit.setText(name)
def qad_batch_rename():
print 'qad batch renaming'
try:
name1 = str(self.dlg_skel.oldNameEdit.text())
name2 = str(self.dlg_skel.newNameEdit.text())
fac.hs.batch_rename(name1, name2)
except Exception as ex:
logerr(str(ex))
fac.hs.uim.populate_tables()
fac.hs.uim.draw()
self.close()
self.dlg_skel.buttonBox.ApplyRole = self.dlg_skel.buttonBox.AcceptRole
self.dlg_skel.buttonBox.accepted.connect(qad_batch_rename)
self.show()
changeNameDlg = ChangeNameDialog(name, fac)
self = changeNameDlg
def _quick_and_dirty_result_prompt(fac, cx_query, cx_result):
from PyQt4.Qt import QDialog
from front.ResultDialog import Ui_ResultDialog
from tpl.other.matplotlibwidget import MatplotlibWidget
cm, nm = fac.hs.get_managers('cm','nm')
chip1 = cm.cx2_chip(cx_query)
chip2 = cm.cx2_chip(cx_result)
query_cid = cm.cid(cx_query)
top_name = cm.cx2_name(cx_result)
class ResultDialog(QDialog):
def __init__(self, chip1, chip2, title1, title2, change_func, fac):
super( ResultDialog, self ).__init__()
self.dlg_skel = Ui_ResultDialog()
self.dlg_skel.setupUi(self)
self.pltWidget1 = MatplotlibWidget(self)
self.pltWidget2 = MatplotlibWidget(self)
self.dlg_skel.horizontalLayout.addWidget(self.pltWidget1)
self.dlg_skel.horizontalLayout.addWidget(self.pltWidget2)
def acceptSlot():
print 'Accepted QaD Match'
print change_func
change_func()
self.close()
fac.hs.uim.draw()
def rejectSlot():
print 'Rejected QaD Match'
self.close()
self.dlg_skel.buttonBox.accepted.connect(acceptSlot)
self.dlg_skel.buttonBox.rejected.connect(rejectSlot)
self.fig1 = self.pltWidget1.figure
self.fig1.show = lambda: self.pltWidget1.show() #HACKY HACK HACK
self.fig2 = self.pltWidget2.figure
self.fig2.show = lambda: self.pltWidget2.show() #HACKY HACK HACK
ax1 = self.fig1.add_subplot(111)
ax1.imshow(chip1)
ax1.set_title(title1)
ax2 = self.fig2.add_subplot(111)
ax2.imshow(chip2)
ax2.set_title(title2)
self.pltWidget1.show()
self.pltWidget1.draw()
self.pltWidget2.show()
self.pltWidget2.draw()
self.show()
resdlg = ResultDialog(chip1, chip2, 'Unknown Query', 'Accept Match to '+str(top_name)+'?', lambda: fac.rename_cid(top_name, query_cid), fac)
@pyqtSlot(int, name='set_fignum')
@func_log
def set_fignum(fac, fignum):
# This should be a preference
uim, dm = fac.hs.get_managers('uim','dm')
dm.fignum = fignum
uim.set_fignum(fignum)
uim.draw()
# Printing Functions --------------------------------------------
# - print image/chip/name table
def gtbl(fac):
gm = fac.hs.gm
print gm.gx2_info(lbls=glbls)
def ctbl(fac):
cm = fac.hs.cm
print cm.cx2_info(lbls=clbls)
def ntbl(fac):
nm = fac.hs.nm
print nm.nx2_info(lbls=nlbls)
def print_database_stats(fac):
print( fac.hs.get_database_stat_str() )
def print_selected(fac):
uim = fac.hs.uim
print '''
HotSpotter State: '''+uim.state+'''
Selected CID: '''+str(uim.sel_cid)+'''
Selected GID: '''+str(uim.sel_gid)
if uim.sel_gid != None:
gx = uim.sel_gx()
lbls = ['gid','gname','num_c']
print 'Image Info: \n'+fac.hs.gm.gx2_info(gx,lbls).replace('\n', ' \n')
if uim.sel_cid != None:
cx = uim.sel_cx()
lbls = ['cid','gid','nid','name','roi']
print 'Chip Info: \n'+fac.hs.cm.cx2_info(cx, lbls).replace('\n', ' \n')
def print_status(fac):
print('\n\n ___STATUS___')
fac.print_database_stats()
fac.print_selected()
print('\n Need Help? type print_help()')
sys.stdout.flush()
@pyqtSlot(name='vdd')
def vdd(fac):
'Opens the database directory window'
crossplat.view_directory(fac.hs.db_dpath)
@pyqtSlot(name='vdi')
def vdi(fac):
'View the .hsInternal directory'
crossplat.view_directory(fac.hs.iom.get_internal_dpath())
@pyqtSlot(name='vd')
def vd(fac, dname=None):
'View a specific directory (defaults to source directory)'
if dname == None:
dname = fac.hs.iom.hsroot()
crossplat.view_directory(dname)
@pyqtSlot(name='select_next')
@func_log
def select_next(fac):
uim = fac.hs.uim
if uim.state == 'chip_view':
fac.next_unident_chip()
elif uim.state == 'image_view':
if not fac.next_empty_image():
if not fac.next_equal_size_chip():
fac.next_0_theta_chip()
else:
logerr('Cannot goto next in state: '+uim.state)
@func_log
def next_unident_chip(fac):
'Next call that finds a chip that is unidentified'
empty_cxs = find(np.logical_and(fac.hs.cm.cx2_nx == fac.hs.nm.UNIDEN_NX(), fac.hs.cm.cx2_cid > 0))
if len(empty_cxs) == 0:
print 'There are no more empty images'
return False
cx = empty_cxs[0]
cid = fac.hs.cm.cx2_cid[cx]
fac.selc(cid)
return True
@func_log
def next_empty_image(fac):
'Next call that finds an image without a chip'
empty_gxs = fac.hs.gm.get_empty_gxs()
if len(empty_gxs) == 0:
print 'There are no more empty images.'
return False
gx = empty_gxs[0]
gid = fac.hs.gm.gx2_gid[gx]
fac.selg(gid)
return True
@func_log
def next_equal_size_chip(fac):
'Next call that finds a chip where the entire image is the roi'
cm = fac.hs.cm
gm = fac.hs.gm
valid_cxs = cm.get_valid_cxs()
fac.hs.cm.cx2_nx
gid = -1
for cx in iter(valid_cxs):
(gw,gh) = gm.gx2_img_size(cm.cx2_gx[cx])
(_,_,cw,ch) = cm.cx2_roi[cx]
if gw == cw and ch == gh:
gid = fac.hs.cm.cx2_gid(cx)
break
if gid == -1:
print 'There are no more unrefined rois'
return False
fac.selg(gid)
@func_log
def next_0_theta_chip(fac):
'Next call that finds a chip without an orientation'
cm = fac.hs.cm
gm = fac.hs.gm
valid_cxs = cm.get_valid_cxs()
fac.hs.cm.cx2_nx
gid = -1
for cx in iter(valid_cxs):
(gw,gh) = gm.gx2_img_size(cm.cx2_gx[cx])
(_,_,cw,ch) = cm.cx2_roi[cx]
if cm.cx2_theta[cx] == 0:
gid = fac.hs.cm.cx2_gid(cx)
break
if gid == -1:
print 'There are no more 0 theta rois'
return False
fac.selg(gid)
@pyqtSlot(name='toggle_ellipse')
def toggle_ellipse(fac):
dm, uim = fac.hs.get_managers('dm','uim')
dm.draw_prefs.toggle('ellipse_bit')
uim.draw()
@pyqtSlot(name='toggle_points')
def toggle_points(fac):
dm, uim = fac.hs.get_managers('dm','uim')
dm.draw_prefs.toggle('points_bit')
uim.draw()
def logs(fac, use_blacklist_bit=True):
'Prints current logs to the screen'
print hsl.hidden_logs(use_blacklist_bit)
@pyqtSlot(name='write_logs')
def write_logs(fac):
'Write current logs to a timestamped file and open in an editor'
timestamp = str(time.time())
logfname = fac.hs.iom.get_temp_fpath('hotspotter_logs_'+timestamp+'.txt')
with open(logfname,'w') as logfile:
logfile.write(str(hsl))
crossplat.view_text_file(logfname)
def print_help(fac):
print messages.cmd_help
def profile(cmd):
# Meliae # from meliae import loader # om = loader.load('filename.json') # s = om.summarize();
import cProfile, sys, os
print('Profiling Command: '+cmd)
cProfOut_fpath = 'OpenGLContext.profile'
cProfile.runctx( cmd, globals(), locals(), filename=cProfOut_fpath )
# RUN SNAKE
print('Profiled Output: '+cProfOut_fpath)
if sys.platform == 'win32':
rsr_fpath = 'C:/Python27/Scripts/runsnake.exe'
else:
rsr_fpath = 'runsnake'
view_cmd = rsr_fpath+' "'+cProfOut_fpath+'"'
os.system(view_cmd)
#import pstat
#stats = pstats.Stats(cProfOut_fpath)
#stats.print()
@func_log
def line_profile(fac, cmd='fac.query(1)'):
# Meliae # from meliae import loader # om = loader.load('filename.json') # s = om.summarize();
import line_profiler
iom = fac.hs.iom
logmsg('Line Profiling Command: '+cmd)
line_profile_fpath = iom.get_temp_fpath('line_profile.'+cmd+'.profile')
lp = line_profiler.LineProfiler()
from inspect import getmembers, isfunction, ismethod
for module in [fac.hs, fac.hs.cm, fac.hs.gm, fac.hs.nm, fac.hs.qm, fac.hs.vm, fac.hs.am, fac.hs.dm]:
for (method_name, method) in getmembers(module, ismethod):
lp.add_function(method.im_func)
#functions_list = [o for o in getmembers(module, isfunction)]
lp.runctx( cmd, globals(), locals())
lp.dump_stats(line_profile_fpath)
lp.print_stats()
rsr_fpath = 'C:\\Python27\\Scripts\\runsnake.exe'
view_cmd = rsr_fpath+' '+line_profile_fpath
os.system(view_cmd)
return lp
def call_graph(fac, cmd='fac.query(1)'):
import pycallgraph
import Image
iom = fac.hs.iom
logmsg('Call Graph Command: '+cmd)
callgraph_fpath = iom.get_temp_fpath('callgraph'+cmd+'.png')
pycallgraph.start_trace()
eval(cmd)
pycallgraph.stop_trace()
pycallgraph.make_dot_graph(callgraph_fpath)
Image.open(callgraph_fpath).show()
@pyqtSlot(name='convert_all_images_to_chips')
def convert_all_images_to_chips(fac):
uim = fac.hs.uim
uim.update_state('working')
fac.hs.add_roi_to_all_images()
uim.populate_tables()
uim.update_state('chip_view')
uim.draw()
def show_main_window(fac):
uim = fac.hs.uim
if uim.hsgui != None:
uim.hsgui.show()
else:
logerr('GUI does not exist')
def show_edit_preference_widget(fac):
uim = fac.hs.uim
if not uim.hsgui is None:
uim.hsgui.epw.show()
else:
logerr('GUI does not exist')
def redraw(fac):
uim = fac.hs.uim
uim.draw()
@pyqtSlot(name='default_prefs')
def default_prefs(fac):
uim = fac.hs.uim
fac.hs.reload_preferences()
logmsg('The change to defaults will not become permanent until you save or change one')
if uim.hsgui != None:
uim.hsgui.epw.pref_model.layoutChanged.emit()
def unload_features_and_models(fac):
fac.hs.unload_all_features()
def figure_for_paper(fac):
fac.set_fignum(1)
fac.hs.dm.draw_prefs.ellipse_bit = True
fac.hs.dm.draw_prefs.points_bit = False
#fac.selg(7)
import random
random.seed(0)
fsel_ret = fac.hs.dm.show_chip(1, in_raw_chip=True, fsel='rand', ell_alpha=1, bbox_bit=False, color=[0,0,1], ell_bit=True, xy_bit=False)
#fsel_ret = fac.hs.dm.show_chip(1, in_raw_chip=True, fsel=fsel_ret, ell_alpha=1, bbox_bit=False)
return fsel_ret
def write_database_stats(fac):
'Writes some statistics to disk and returns them'
import numpy as np
cm, nm, gm, iom = fac.hs.get_managers('cm','nm','gm','iom')
num_images = gm.num_g
num_chips = cm.num_c
num_names = nm.num_n
vgx2_nChips = [] # Num Chips Per Image
for gx in iter(gm.get_valid_gxs()):
vgx2_nChips += [len(gm.gx2_cx_list[gx])]
vgx2_nChips = np.array(vgx2_nChips)
chips_per_image_mean = np.mean(vgx2_nChips)
chips_per_image_std = np.std(vgx2_nChips)
chips_per_image_mean_gt0 = np.mean(vgx2_nChips[vgx2_nChips > 0])
chips_per_image_std_gt0 = np.std(vgx2_nChips[vgx2_nChips > 0])
db_stats = \
[
'Num Images: %d' % num_images ,
'Num Chips: %d' % num_chips,
'Num Names: %d' % num_names,
'Num Tagged Images: %d' % (vgx2_nChips >= 1).sum(),
'Num Untagged Images: %d' % (vgx2_nChips == 0).sum(),
'Num Chips/TaggedImage: %.2f += %.2f ' % ( chips_per_image_mean_gt0, chips_per_image_std_gt0 ),
'Num Chips/Image: %.2f += %.2f ' % ( chips_per_image_mean, chips_per_image_std ),
]
db_stats_str = '\n'.join(db_stats)
iom.write_to_user_fpath('database_stats.txt', db_stats_str)
return db_stats_str
def SetNamesFromLionfishGroundTruth(fac):
import os.path
import re
cm = fac.hs.cm
nm = fac.hs.nm
gm = fac.hs.gm
name_fn = lambda path: os.path.splitext(os.path.split(path)[1])[0]
re_lfname = re.compile(r'(?P<DATASET>.*)-(?P<NAMEID>\d\d)-(?P<SIGHTINGID>[a-z])')
for cx in iter(cm.get_valid_cxs()):
gx = cm.cx2_gx[cx]
name = name_fn(gm.gx2_gname[gx])
match_obj = re_lfname.match(name)
if match_obj != None:
match_dict = match_obj.groupdict()
name_id = match_dict['NAMEID']
dataset = match_dict['DATASET']
sightingid = match_dict['SIGHTINGID']
if dataset.find('BB') > -1:
dataset = 'BB'
if dataset.find('SA-WP') > -1:
dataset = 'WP'
if dataset.find('SA-NV') > -1:
dataset = 'NV'
if dataset.find('SA-VE') > -1:
dataset = 'VE'
new_name = 'Lionfish_'+str(dataset)+str(name_id)
cm.rename_chip(cx, new_name)
@pyqtSlot(name='expand_rois')
@func_log
def expand_rois(fac, percent_increase=None):
'expand rois by a percentage of the diagonal'
if percent_increase == None:
# User ask
dlg = QInputDialog()
percentres = dlg.getText(None, 'ROI Expansion Factor',
'Enter the percentage to expand the ROIs.\n'+
'The percentage is in terms of diagonal length')
if not percentres[1]:
logmsg('Cancelled all match')
return
try:
percent_increase = float(str(percentres[0]))
except ValueError:
logerr('The percentage must be a number')
cm = fac.hs.cm
gm = fac.hs.gm
logmsg('Resizing all chips')
for cx in iter(cm.get_valid_cxs()):
logmsg('Resizing cx='+str(cx))
# Get ROI size and Image size
[rx, ry, rw, rh] = cm.cx2_roi[cx]
[gw, gh] = gm.gx2_img_size(cm.cx2_gx[cx])
# Find Diagonal Increase
diag = np.sqrt(rw**2 + rh**2)
scale_factor = percent_increase/100.0
diag_increase = scale_factor * diag
target_diag = diag + diag_increase
# Find Width/Height Increase
ar = float(rw)/float(rh)
w_increase = np.sqrt(ar**2 * diag_increase**2 / (ar**2 + 1))
h_increase = w_increase / ar
# Find New xywh within image constriants
new_x = int(max(0, round(rx - w_increase / 2.0)))
new_y = int(max(0, round(ry - h_increase / 2.0)))
new_w = int(min(gw - new_x, round(rw + w_increase)))
new_h = int(min(gh - new_y, round(rh + h_increase)))
new_roi = [new_x, new_y, new_w, new_h]
logmsg('Old Roi: '+repr([rx, ry, rw, rh]))
cm.change_roi(cx, new_roi)
logmsg('\n')
logmsg('Done resizing all chips')
@pyqtSlot(name='match_all_above_thresh')
def match_all_above_thresh(fac, threshold=None):
'do matching and assign all above thresh'
if threshold == None:
# User ask
dlg = QInputDialog()
threshres = dlg.getText(None, 'Threshold Selector',
'Enter a matching threshold.\n'+
'The system will query each chip and assign all matches above this thresh')
if not threshres[1]:
logmsg('Cancelled all match')
return
try:
threshold = float(str(threshres[0]))
except ValueError:
logerr('The threshold must be a number')
qm = fac.hs.qm
cm = fac.hs.cm
nm = fac.hs.nm
vm = fac.hs.vm
# Get model ready
vm.sample_train_set()
fac.hs.ensure_model()
# Do all queries
for qcx in iter(cm.get_valid_cxs()):
qcid = cm.cx2_cid[qcx]
logmsg('Querying CID='+str(qcid))
query_name = cm.cx2_name(qcx)
logdbg(str(qcx))
logdbg(str(type(qcx)))
cm.load_features(qcx)
res = fac.hs.query(qcid)
# Match only those above a thresh
res.num_top_min = 0
res.num_extra_return = 0
res.top_thresh = threshold
top_cx = res.top_cx()
if len(top_cx) == 0:
print('No matched for cid='+str(qcid))
continue
top_names = cm.cx2_name(top_cx)
all_names = np.append(top_names,[query_name])
if all([nm.UNIDEN_NAME() == name for name in all_names]):
# If all names haven't been identified, make a new one
new_name = nm.get_new_name()
else:
# Rename to the most frequent non ____ name seen
from collections import Counter
name_freq = Counter(np.append(top_names,[query_name])).most_common()
new_name = name_freq[0][0]
if new_name == nm.UNIDEN_NAME():
new_name = name_freq[1][0]
# Do renaming
cm.rename_chip(qcx, new_name)
for cx in top_cx:
cm.rename_chip(cx, new_name)
fac.hs.uim.populate_tables()
@pyqtSlot(str, name='logdbgSlot')
def logdbgSlot(fac, msg):
# This function is a hack so MainWin can call logdbg
logdbg(msg)
@pyqtSlot(name='run_matching_experiment')
@func_log
def run_matching_experiment(fac):
fac.hs.em.run_matching_experiment()
@pyqtSlot(name='run_name_consistency_experiment')
@func_log
def run_name_consistency_experiment(fac):
fac.hs.em.run_name_consistency_experiment()
@pyqtSlot(name='view_documentation')
@func_log
def view_documentation(fac):
import os.path
pdf_name = 'HotSpotterUserGuide.pdf'
doc_path = os.path.join(fac.hs.iom.hsroot(), 'documentation')
pdf_fpath = os.path.join(doc_path, pdf_name)
# Tries to open pdf, if it fails it opens the documentation folder
if os.system('open '+pdf_fpath) == 1:
if os.system(pdf_fpath) == 1:
crossplat.view_directory(doc_path)
@pyqtSlot(name='precompute')
@func_log
def precompute(fac):
fac.hs.ensure_model()
| Erotemic/hotspotter | _graveyard/oldhotspotter/Facade.py | Python | apache-2.0 | 29,956 | [
"EPW"
] | 260a7c2b6dad15d3d09c2a8f87d988e66601cb4e5aff5bd0f4ae8ab25ecd4672 |
import numpy as np
from robo.maximizers.base_maximizer import BaseMaximizer
from robo.initial_design import init_random_uniform
class RandomSampling(BaseMaximizer):
def __init__(self, objective_function, lower, upper, n_samples=500, rng=None):
"""
Samples candidates uniformly at random and returns the point with the highest objective value.
Parameters
----------
objective_function: acquisition function
The acquisition function which will be maximized
lower: np.ndarray (D)
Lower bounds of the input space
upper: np.ndarray (D)
Upper bounds of the input space
n_samples: int
Number of candidates that are samples
"""
self.n_samples = n_samples
super(RandomSampling, self).__init__(objective_function, lower, upper, rng)
def maximize(self):
"""
Maximizes the given acquisition function.
Returns
-------
np.ndarray(N,D)
Point with highest acquisition value.
"""
# Sample random points uniformly over the whole space
rand = init_random_uniform(self.lower, self.upper,
int(self.n_samples * .7))
# Put a Gaussian on the incumbent and sample from that
loc = self.objective_func.model.get_incumbent()[0],
scale = np.ones([self.lower.shape[0]]) * 0.1
rand_incs = np.array([np.clip(np.random.normal(loc, scale), self.lower, self.upper)[0]
for _ in range(int(self.n_samples * 0.3))])
X = np.concatenate((rand, rand_incs), axis=0)
y = self.objective_func(X)
x_star = X[y.argmax()]
return x_star
| numairmansur/RoBO | robo/maximizers/random_sampling.py | Python | bsd-3-clause | 1,748 | [
"Gaussian"
] | 7557a447e760dbe7dd9456cfd520ce94e6e1ba1c2dff434ae0d52e6e4d6d8e9e |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import sqlite3
import sys
from collections import OrderedDict, defaultdict
from functools import wraps
from warnings import warn
import numpy as np
import pyproj
import regex
from cf_units import Unit
from compliance_checker import cfutil
from compliance_checker.base import BaseCheck, BaseNCCheck, Result, TestCtx
from compliance_checker.cf import util
from compliance_checker.cf.appendix_d import (
dimless_vertical_coordinates_1_6,
dimless_vertical_coordinates_1_7,
no_missing_terms,
)
from compliance_checker.cf.appendix_e import cell_methods16, cell_methods17
from compliance_checker.cf.appendix_f import (
ellipsoid_names17,
grid_mapping_attr_types16,
grid_mapping_attr_types17,
grid_mapping_dict16,
grid_mapping_dict17,
horizontal_datum_names17,
prime_meridian_names17,
)
logger = logging.getLogger(__name__)
def print_exceptions(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
from traceback import print_exc
print_exc()
return wrapper
# helper to see if we should do DSG tests
def is_likely_dsg(func):
@wraps(func)
def _dec(s, ds):
if hasattr(ds, "featureType"):
return func(s, ds)
# @TODO: skips if we have formalized skips
return None
return _dec
class CFBaseCheck(BaseCheck):
"""
CF Convention Checker Base
"""
def __init__(self, options=None):
# The compliance checker can be run on multiple datasets in a single
# instantiation, so caching values has be done by the unique identifier
# for each dataset loaded.
# Each default dict is a key, value mapping from the dataset object to
# a list of variables
super(CFBaseCheck, self).__init__(options)
self._coord_vars = defaultdict(list)
self._ancillary_vars = defaultdict(list)
self._clim_vars = defaultdict(list)
self._metadata_vars = defaultdict(list)
self._boundary_vars = defaultdict(list)
self._geophysical_vars = defaultdict(list)
self._aux_coords = defaultdict(list)
self._std_names = util.StandardNameTable()
self.section_titles = { # dict of section headers shared by grouped checks
"2.2": "§2.2 Data Types",
"2.3": "§2.3 Naming Conventions",
"2.4": "§2.4 Dimensions",
"2.5": "§2.5 Variables",
"2.6": "§2.6 Attributes",
"3.1": "§3.1 Units",
"3.2": "§3.2 Long Name",
"3.3": "§3.3 Standard Name",
"3.4": "§3.4 Ancillary Data",
"3.5": "§3.5 Flags",
"4": "§4 Coordinate Types",
"4.1": "§4.1 Latitude Coordinate",
"4.2": "§4.2 Longitude Coordinate",
"4.3": "§4.3 Vertical Coordinate",
"4.4": "§4.4 Time Coordinate",
"4.5": "§4.5 Discrete Axis",
"5": "§5 Coordinate Systems",
"5.1": "§5.1 Independent Latitude, Longitude, Vertical, and Time Axes",
"5.2": "§5.2 2-D Latitude, Longitude, Coordinate Variables",
"5.3": "§5.3 Reduced Horizontal Grid",
"5.4": "§5.4 Timeseries of Station Data",
"5.5": "§5.5 Trajectories",
"5.6": "§5.6 Horizontal Coordinate Reference Systems, Grid Mappings, Projections",
"5.7": "§5.7 Scalar Coordinate Variables",
"6.1": "§6.1 Labels",
"6.2": "§6.2 Alternative Coordinates",
"7.1": "§7.1 Cell Boundaries",
"7.2": "§7.2 Cell Measures",
"7.3": "§7.3 Cell Methods",
"7.4": "§7.4 Climatological Statistics",
"8.1": "§8.1 Packed Data",
"8.2": "§8.2 Compression by Gathering",
"9.1": "§9.1 Features and feature types",
"9.2": "§9.2 Collections, instances, and elements",
"9.3": "§9.3 Representations of Collections of features in data variables",
"9.4": "§9.4 The featureType attribute",
"9.5": "§9.5 Coordinates and metadata",
"9.6": "§9.6 Missing Data",
}
################################################################################
# Helper Methods - var classifications, etc
################################################################################
def setup(self, ds):
"""
Initialize various special variable types within the class.
Mutates a number of instance variables.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
self.coord_vars = self._find_coord_vars(ds)
self._find_aux_coord_vars(ds)
self._find_ancillary_vars(ds)
self._find_clim_vars(ds)
self._find_boundary_vars(ds)
self._find_metadata_vars(ds)
self._find_cf_standard_name_table(ds)
self._find_geophysical_vars(ds)
coord_containing_vars = ds.get_variables_by_attributes(
coordinates=lambda val: isinstance(val, str)
)
# coordinate data variables
# Excerpt from "§1.3 Overview" on coordinate data
# There are two methods used to identify variables that contain
# coordinate data. The first is to use the NUG-defined "coordinate
# variables." The use of coordinate variables is required for all
# dimensions that correspond to one dimensional space or time
# coordinates . In cases where coordinate variables are not applicable,
# the variables containing coordinate data are identified by the
# coordinates attribute.
# first read in variables referred to in coordinates which exist
# in the dataset
self.coord_data_vars = set()
for var in coord_containing_vars:
for coord_var_name in var.coordinates.strip().split(" "):
if coord_var_name in ds.variables:
self.coord_data_vars.add(coord_var_name)
# then add in the NUG coordinate variables -- single dimension with
# dimension name the same as coordinates
self.coord_data_vars.update(self.coord_vars)
def check_grid_mapping(self, ds):
"""
5.6 When the coordinate variables for a horizontal grid are not
longitude and latitude, it is required that the true latitude and
longitude coordinates be supplied via the coordinates attribute. If in
addition it is desired to describe the mapping between the given
coordinate variables and the true latitude and longitude coordinates,
the attribute grid_mapping may be used to supply this description.
This attribute is attached to data variables so that variables with
different mappings may be present in a single file. The attribute takes
a string value which is the name of another variable in the file that
provides the description of the mapping via a collection of attached
attributes. This variable is called a grid mapping variable and is of
arbitrary type since it contains no data. Its purpose is to act as a
container for the attributes that define the mapping.
The one attribute that all grid mapping variables must have is
grid_mapping_name which takes a string value that contains the mapping's
name. The other attributes that define a specific mapping depend on the
value of grid_mapping_name. The valid values of grid_mapping_name along
with the attributes that provide specific map parameter values are
described in Appendix F, Grid Mappings.
When the coordinate variables for a horizontal grid are longitude and
latitude, a grid mapping variable with grid_mapping_name of
latitude_longitude may be used to specify the ellipsoid and prime
meridian.
In order to make use of a grid mapping to directly calculate latitude
and longitude values it is necessary to associate the coordinate
variables with the independent variables of the mapping. This is done by
assigning a standard_name to the coordinate variable. The appropriate
values of the standard_name depend on the grid mapping and are given in
Appendix F, Grid Mappings.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = OrderedDict()
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
# Check the grid_mapping attribute to be a non-empty string and that its reference exists
for variable in ds.get_variables_by_attributes(
grid_mapping=lambda x: x is not None
):
grid_mapping = getattr(variable, "grid_mapping", None)
defines_grid_mapping = self.get_test_ctx(
BaseCheck.HIGH, self.section_titles["5.6"], variable.name
)
defines_grid_mapping.assert_true(
(isinstance(grid_mapping, str) and grid_mapping),
"{}'s grid_mapping attribute must be a "
+ "space-separated non-empty string".format(variable.name),
)
if isinstance(grid_mapping, str):
# TODO (badams): refactor functionality to split functionality
# into requisite classes
if ":" in grid_mapping and self._cc_spec_version >= "1.7":
colon_count = grid_mapping.count(":")
re_all = regex.findall(
r"(\w+):\s*((?:\w+\s+)*(?:\w+)(?![\w:]))", grid_mapping
)
if colon_count != len(re_all):
defines_grid_mapping.out_of += 1
defines_grid_mapping.messages.append(
"Could not consume entire grid_mapping expression, please check for well-formedness"
)
else:
for grid_var_name, coord_var_str in re_all:
defines_grid_mapping.assert_true(
grid_var_name in ds.variables,
"grid mapping variable {} must exist in this dataset".format(
grid_var_name
),
)
for ref_var in coord_var_str.split():
defines_grid_mapping.assert_true(
ref_var in ds.variables,
"Coordinate-related variable {} referenced by grid_mapping variable {} must exist in this dataset".format(
ref_var, grid_var_name
),
)
else:
for grid_var_name in grid_mapping.split():
defines_grid_mapping.assert_true(
grid_var_name in ds.variables,
"grid mapping variable {} must exist in this dataset".format(
grid_var_name
),
)
ret_val[variable.name] = defines_grid_mapping.to_result()
# Check the grid mapping variables themselves
for grid_var_name in grid_mapping_variables:
valid_grid_mapping = self.get_test_ctx(
BaseCheck.HIGH, self.section_titles["5.6"], grid_var_name
)
grid_var = ds.variables[grid_var_name]
grid_mapping_name = getattr(grid_var, "grid_mapping_name", None)
# Grid mapping name must be in appendix F
valid_grid_mapping.assert_true(
grid_mapping_name in self.grid_mapping_dict,
"{} is not a valid grid_mapping_name.".format(grid_mapping_name)
+ " See Appendix F for valid grid mappings",
)
# The self.grid_mapping_dict has a values of:
# - required attributes
# - optional attributes (can't check)
# - required standard_names defined
# - at least one of these attributes must be defined
# We can't do any of the other grid mapping checks if it's not a valid grid mapping name
if grid_mapping_name not in self.grid_mapping_dict:
ret_val[grid_mapping_name] = valid_grid_mapping.to_result()
continue
grid_mapping = self.grid_mapping_dict[grid_mapping_name]
required_attrs = grid_mapping[0]
# Make sure all the required attributes are defined
for req in required_attrs:
valid_grid_mapping.assert_true(
hasattr(grid_var, req),
"{} is a required attribute for grid mapping {}".format(
req, grid_mapping_name
),
)
# Make sure that exactly one of the exclusive attributes exist
if len(grid_mapping) == 4:
at_least_attr = grid_mapping[3]
number_found = 0
for attr in at_least_attr:
if hasattr(grid_var, attr):
number_found += 1
valid_grid_mapping.assert_true(
number_found == 1,
"grid mapping {}".format(grid_mapping_name)
+ "must define exactly one of these attributes: "
+ "{}".format(" or ".join(at_least_attr)),
)
# Make sure that exactly one variable is defined for each of the required standard_names
expected_std_names = grid_mapping[2]
for expected_std_name in expected_std_names:
found_vars = ds.get_variables_by_attributes(
standard_name=expected_std_name
)
valid_grid_mapping.assert_true(
len(found_vars) == 1,
"grid mapping {} requires exactly ".format(grid_mapping_name)
+ "one variable with standard_name "
+ "{} to be defined".format(expected_std_name),
)
ret_val[grid_var_name] = valid_grid_mapping.to_result()
return ret_val
def check_conventions_version(self, ds):
"""
CF §2.6.1 the NUG defined global attribute Conventions to the string
value "CF-<version_number>"; check the Conventions attribute contains
the appropriate string.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
valid = False
reasoning = []
correct_version_string = "{}-{}".format(
self._cc_spec, self._cc_spec_version
).upper()
if hasattr(ds, "Conventions"):
conventions = regex.split(r",|\s+", getattr(ds, "Conventions", ""))
for convention in conventions:
if convention == correct_version_string:
valid = True
break
else:
reasoning = [
"§2.6.1 Conventions global attribute does not contain "
'"{}"'.format(correct_version_string)
]
else:
valid = False
reasoning = ["§2.6.1 Conventions field is not present"]
return Result(
BaseCheck.MEDIUM, valid, self.section_titles["2.6"], msgs=reasoning
)
def _check_dimensionless_vertical_coordinates(
self,
ds,
deprecated_units,
version_specific_check,
version_specific_dimless_vertical_coord_dict,
):
"""
Check the validity of dimensionless coordinates under CF
:param netCDF4.Dataset ds: An open netCDF dataset
:param list deprecated_units: list of string names of deprecated units
:param function version_specific_check: version-specific implementation to check dimensionless vertical coord
:param dict version_specific_dimless_coord_dict: version-specific dict of dimensionless vertical coords and computed standard names
:return: List of results
"""
ret_val = []
z_variables = cfutil.get_z_variables(ds)
# call version-specific implementation
for name in z_variables:
version_specific_check(
ds,
name,
deprecated_units,
ret_val,
version_specific_dimless_vertical_coord_dict,
)
return ret_val
def _check_formula_terms(self, ds, coord, dimless_coords_dict):
"""
Checks a dimensionless vertical coordinate contains valid formula_terms
- formula_terms is a non-empty string
- formula_terms matches regdimless_coords_dictx
- every variable defined in formula_terms exists
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[coord]
standard_name = getattr(variable, "standard_name", None)
formula_terms = getattr(variable, "formula_terms", None)
valid_formula_terms = TestCtx(BaseCheck.HIGH, self.section_titles["4.3"])
valid_formula_terms.assert_true(
isinstance(formula_terms, str) and formula_terms,
"§4.3.2: {}'s formula_terms is a required attribute and must be a non-empty string"
"".format(coord),
)
# We can't check any more
if not formula_terms:
return valid_formula_terms.to_result()
# check that the formula_terms are well formed and are present
# The pattern for formula terms is always component: variable_name
# the regex grouping always has component names in even positions and
# the corresponding variable name in odd positions.
matches = regex.findall(
r"([A-Za-z][A-Za-z0-9_]*: )([A-Za-z][A-Za-z0-9_]*)", variable.formula_terms
)
terms = set(m[0][:-2] for m in matches)
# get the variables named in the formula terms and check if any
# are not present in the dataset
missing_vars = sorted(set(m[1] for m in matches) - set(ds.variables))
missing_fmt = "The following variable(s) referenced in {}:formula_terms are not present in the dataset: {}"
valid_formula_terms.assert_true(
len(missing_vars) == 0, missing_fmt.format(coord, ", ".join(missing_vars))
)
# try to reconstruct formula_terms by adding space in between the regex
# matches. If it doesn't exactly match the original, the formatting
# of the attribute is incorrect
reconstructed_formula = " ".join(m[0] + m[1] for m in matches)
valid_formula_terms.assert_true(
reconstructed_formula == formula_terms,
"Attribute formula_terms is not well-formed",
)
valid_formula_terms.assert_true(
standard_name in dimless_coords_dict,
"unknown standard_name '{}' for dimensionless vertical coordinate {}"
"".format(standard_name, coord),
)
if standard_name not in dimless_coords_dict:
return valid_formula_terms.to_result()
valid_formula_terms.assert_true(
no_missing_terms(standard_name, terms, dimless_coords_dict),
"{}'s formula_terms are invalid for {}, please see appendix D of CF 1.6"
"".format(coord, standard_name),
)
return valid_formula_terms.to_result()
def _check_grid_mapping_attr_condition(self, attr, attr_name, ret_val):
"""
Evaluate a condition (or series of conditions) for a particular
attribute. Designed to be overloaded in subclass implementations.
:param attr: attribute to teset condition for
:param str attr_name: name of the attribute
:param list ret_val: list of results to append to
:rtype None
:return None
"""
raise NotImplementedError
def _dims_in_order(self, dimension_order):
"""
:param list dimension_order: A list of axes
:rtype: bool
:return: Returns True if the dimensions are in order U*, T, Z, Y, X,
False otherwise
"""
regx = regex.compile(r"^[^TZYX]*T?Z?Y?X?$")
dimension_string = "".join(dimension_order)
return regx.match(dimension_string) is not None
def _parent_var_attr_type_check(self, attr_name, var, ctx):
"""
Checks that an attribute has an equivalent value to a parent variable.
Takes an attribute name, variable, and test context on which to operate.
:param str attr_name: The name of the attribute to be checked
:param netCDF4.Variable var: The variable against which to be checked
:param compliance_checker.base.TestCtx ctx: The associated test context to modify
:rtype None
:return None
"""
attr_val = var.getncattr(attr_name)
if isinstance(attr_val, (str, bytes)):
type_match = (var.dtype is str) or (var.dtype.kind == "S")
val_type = type(attr_val)
else:
val_type = attr_val.dtype.type
type_match = val_type == var.dtype.type
ctx.assert_true(
type_match,
"Attribute '{}' (type: {}) and parent variable '{}' (type: {}) "
"must have equivalent datatypes".format(
attr_name, val_type, var.name, var.dtype.type
),
)
def _find_aux_coord_vars(self, ds, refresh=False):
"""
Returns a list of auxiliary coordinate variables
An auxiliary coordinate variable is any netCDF variable that contains
coordinate data, but is not a coordinate variable (in the sense of the term
defined by CF).
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: List of variable names (str) that are defined to be auxiliary
coordinate variables.
"""
if self._aux_coords.get(ds, None) and refresh is False:
return self._aux_coords[ds]
self._aux_coords[ds] = cfutil.get_auxiliary_coordinate_variables(ds)
return self._aux_coords[ds]
def _find_boundary_vars(self, ds, refresh=False):
"""
Returns dictionary of boundary variables mapping the variable instance
to the name of the variable acting as a boundary variable.
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list containing strings with boundary variable names.
"""
if self._boundary_vars.get(ds, None) and refresh is False:
return self._boundary_vars[ds]
self._boundary_vars[ds] = cfutil.get_cell_boundary_variables(ds)
return self._boundary_vars[ds]
def _find_ancillary_vars(self, ds, refresh=False):
"""
Returns a list of variable names that are defined as ancillary
variables in the dataset ds.
An ancillary variable generally is a metadata container and referenced
from other variables via a string reference in an attribute.
- via ancillary_variables (3.4)
- "grid mapping var" (5.6)
- TODO: more?
The result is cached by the passed in dataset object inside of this
checker. Pass refresh=True to redo the cached value.
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: List of variable names (str) that are defined as ancillary
variables in the dataset ds.
"""
# Used the cached version if it exists and is not empty
if self._ancillary_vars.get(ds, None) and refresh is False:
return self._ancillary_vars[ds]
# Invalidate the cache at all costs
self._ancillary_vars[ds] = []
for name, var in ds.variables.items():
if hasattr(var, "ancillary_variables"):
for anc_name in var.ancillary_variables.split(" "):
if anc_name in ds.variables:
self._ancillary_vars[ds].append(anc_name)
if hasattr(var, "grid_mapping"):
gm_name = var.grid_mapping
if gm_name in ds.variables:
self._ancillary_vars[ds].append(gm_name)
return self._ancillary_vars[ds]
def _find_clim_vars(self, ds, refresh=False):
"""
Returns a list of variables that are likely to be climatology variables based on CF §7.4
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list containing strings with geophysical variable
names.
"""
if self._clim_vars.get(ds, None) and refresh is False:
return self._clim_vars[ds]
climatology_variable = cfutil.get_climatology_variable(ds)
if climatology_variable:
self._clim_vars[ds].append(climatology_variable)
return self._clim_vars[ds]
def _find_cf_standard_name_table(self, ds):
"""
Parse out the `standard_name_vocabulary` attribute and download that
version of the cf standard name table. If the standard name table has
already been downloaded, use the cached version. Modifies `_std_names`
attribute to store standard names. Returns True if the file exists and
False if it fails to download.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: bool
"""
# Get the standard name vocab
standard_name_vocabulary = getattr(ds, "standard_name_vocabulary", "")
# Try to parse this attribute to get version
version = None
try:
if "cf standard name table" in standard_name_vocabulary.lower():
version = [
s.strip("(").strip(")").strip("v").strip(",")
for s in standard_name_vocabulary.split()
]
# This assumes that table version number won't start with 0.
version = [
s
for s in version
if s.isdigit() and len(s) <= 2 and not s.startswith("0")
]
if len(version) > 1:
return False
else:
try:
version = version[0]
except IndexError:
warn(
"Cannot extract CF standard name version number "
"from standard_name_vocabulary string"
)
return False
else:
# Can't parse the attribute, use the packaged version
return False
# usually raised from .lower() with an incompatible (non-string)
# data type
except AttributeError:
warn(
"Cannot convert standard name table to lowercase. This can "
"occur if a non-string standard_name_vocabulary global "
"attribute is supplied"
)
return False
if version.startswith("v"): # i.e 'v34' -> '34' drop the v
version = version[1:]
# If the packaged version is what we're after, then we're good
if version == self._std_names._version:
print(
"Using packaged standard name table v{0}".format(version),
file=sys.stderr,
)
return False
# Try to download the version specified
try:
data_directory = util.create_cached_data_dir()
location = os.path.join(
data_directory, "cf-standard-name-table-test-{0}.xml".format(version)
)
# Did we already download this before?
if not os.path.isfile(location):
util.download_cf_standard_name_table(version, location)
print(
"Using downloaded standard name table v{0}".format(version),
file=sys.stderr,
)
else:
print(
"Using cached standard name table v{0} from {1}".format(
version, location
),
file=sys.stderr,
)
self._std_names = util.StandardNameTable(location)
return True
except Exception as e:
# There was an error downloading the CF table. That's ok, we'll just use the packaged version
warn(
"Problem fetching standard name table:\n{0}\n"
"Using packaged v{1}".format(e, self._std_names._version)
)
return False
def _find_coord_vars(self, ds, refresh=False):
"""
Returns a list of variable names that identify as coordinate variables.
The result is cached by the passed in dataset object inside of this
checker. Pass refresh=True to redo the cached value.
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list of variables names (str) that are defined as coordinate
variables in the dataset ds.
"""
if ds in self._coord_vars and refresh is False:
return self._coord_vars[ds]
self._coord_vars[ds] = cfutil.get_coordinate_variables(ds)
return self._coord_vars[ds]
def _find_geophysical_vars(self, ds, refresh=False):
"""
Returns a list of geophysical variables. Modifies
`self._geophysical_vars`
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list containing strings with geophysical variable
names.
"""
if self._geophysical_vars.get(ds, None) and refresh is False:
return self._geophysical_vars[ds]
self._geophysical_vars[ds] = cfutil.get_geophysical_variables(ds)
return self._geophysical_vars[ds]
def _find_metadata_vars(self, ds, refresh=False):
"""
Returns a list of netCDF variable instances for those that are likely metadata variables
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: List of variable names (str) that are likely metadata
variable candidates.
"""
if self._metadata_vars.get(ds, None) and refresh is False:
return self._metadata_vars[ds]
self._metadata_vars[ds] = []
for name, var in ds.variables.items():
if name in self._find_ancillary_vars(ds) or name in self._find_coord_vars(
ds
):
continue
if name in (
"platform_name",
"station_name",
"instrument_name",
"station_id",
"platform_id",
"surface_altitude",
):
self._metadata_vars[ds].append(name)
elif getattr(var, "cf_role", "") != "":
self._metadata_vars[ds].append(name)
elif (
getattr(var, "standard_name", None) is None and len(var.dimensions) == 0
):
self._metadata_vars[ds].append(name)
return self._metadata_vars[ds]
def _get_coord_axis_map(self, ds):
"""
Returns a dictionary mapping each coordinate to a letter identifier
describing the _kind_ of coordinate.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: dict
:return: A dictionary with variable names mapped to axis abbreviations,
i.e. {'longitude': 'X', ... 'pressure': 'Z'}
"""
expected = ["T", "Z", "Y", "X"]
coord_vars = self._find_coord_vars(ds)
coord_axis_map = {}
# L - Unlimited Coordinates
# T - Time coordinates
# Z - Depth/Altitude Coordinate
# Y - Y-Coordinate (latitude)
# X - X-Coordinate (longitude)
# A - Auxiliary Coordinate
# I - Instance Coordinate
time_variables = cfutil.get_time_variables(ds)
lat_variables = cfutil.get_latitude_variables(ds)
lon_variables = cfutil.get_longitude_variables(ds)
z_variables = cfutil.get_z_variables(ds)
for coord_name in coord_vars:
coord_var = ds.variables[coord_name]
axis = getattr(coord_var, "axis", None)
standard_name = getattr(coord_var, "standard_name", None)
# Unlimited dimensions must come first
if ds.dimensions[coord_name].isunlimited():
coord_axis_map[coord_name] = "L"
# axis takes precedence over standard_name
elif axis in expected:
coord_axis_map[coord_name] = axis
elif standard_name == "time":
coord_axis_map[coord_name] = "T"
elif standard_name == "longitude":
coord_axis_map[coord_name] = "X"
elif standard_name == "latitude":
coord_axis_map[coord_name] = "Y"
elif standard_name in ["height", "depth", "altitude"]:
coord_axis_map[coord_name] = "Z"
elif cfutil.is_compression_coordinate(ds, coord_name):
coord_axis_map[coord_name] = "C"
elif coord_name in time_variables:
coord_axis_map[coord_name] = "T"
elif coord_name in z_variables:
coord_axis_map[coord_name] = "Z"
elif coord_name in lat_variables:
coord_axis_map[coord_name] = "Y"
elif coord_name in lon_variables:
coord_axis_map[coord_name] = "X"
else:
# mark the coordinate variable as unknown
coord_axis_map[coord_name] = "U"
for dimension in self._get_instance_dimensions(ds):
if dimension not in coord_axis_map:
coord_axis_map[dimension] = "I"
# Dimensions of auxiliary coordinate variables will be marked with A.
# This is useful to help determine if the dimensions are used like a
# mapping from grid coordinates to physical lat/lon
for coord_name in self._find_aux_coord_vars(ds):
coord_var = ds.variables[coord_name]
# Skip label auxiliary coordinates
if hasattr(coord_var.dtype, "char") and coord_var.dtype.char == "S":
continue
elif coord_var.dtype == str:
continue
for dimension in coord_var.dimensions:
if dimension not in coord_axis_map:
coord_axis_map[dimension] = "A"
# If a dimension does not have a coordinate variable mark it as unknown
# 'U'
for dimension in ds.dimensions:
if dimension not in coord_axis_map:
coord_axis_map[dimension] = "U"
return coord_axis_map
def _get_coord_vars(self, ds):
coord_vars = []
for name, var in ds.variables.items():
if (name,) == var.dimensions:
coord_vars.append(name)
return coord_vars
def _get_dimension_order(self, ds, name, coord_axis_map):
"""
Returns a list of strings corresponding to the named axis of the dimensions for a variable.
Example::
self._get_dimension_order(ds, 'temperature', coord_axis_map)
--> ['T', 'Y', 'X']
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Name of the variable
:param dict coord_axis_map: A dictionary mapping each coordinate variable and dimension to a named axis
:rtype: list
:return: A list of strings corresponding to the named axis of the dimensions for a variable
"""
retval = []
variable = ds.variables[name]
for dim in variable.dimensions:
retval.append(coord_axis_map[dim])
return retval
def _get_instance_dimensions(self, ds):
"""
Returns a list of dimensions marked as instance dimensions
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:returns: A list of variable dimensions
"""
ret_val = []
for variable in ds.get_variables_by_attributes(
cf_role=lambda x: isinstance(x, str)
):
if variable.ndim > 0:
ret_val.append(variable.dimensions[0])
return ret_val
def _get_pretty_dimension_order(self, ds, name):
"""
Returns a comma separated string of the dimensions for a specified
variable
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: A string with a valid NetCDF variable name for the
dataset
:rtype: str
:return: A comma separated string of the variable's dimensions
"""
dim_names = []
for dim in ds.variables[name].dimensions:
dim_name = dim
if ds.dimensions[dim].isunlimited():
dim_name += " (Unlimited)"
dim_names.append(dim_name)
return ", ".join(dim_names)
def _get_pretty_dimension_order_with_type(self, ds, name, dim_types):
"""
Returns a comma separated string of the dimensions for a specified
variable of format "DIMENSIONS_NAME (DIMENSION_TYPE[, unlimited])"
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: A string with a valid NetCDF variable name for the
dataset
:param list dim_types: A list of strings returned by
_get_dimension_order for the same "name"
:rtype: str
:return: A comma separated string of the variable's dimensions
"""
dim_names = []
for dim, dim_type in zip(ds.variables[name].dimensions, dim_types):
dim_name = "{} ({}".format(dim, dim_type)
if ds.dimensions[dim].isunlimited():
dim_name += ", unlimited)"
else:
dim_name += ")"
dim_names.append(dim_name)
return ", ".join(dim_names)
def _is_station_var(self, var):
"""
Returns True if the NetCDF variable is associated with a station, False
otherwise.
:param netCDF4.Variable var: a variable in an existing NetCDF dataset
:rtype: bool
:return: Status of whether variable appears to be associated with a
station
"""
if getattr(var, "standard_name", None) in (
"platform_name",
"station_name",
"instrument_name",
):
return True
return False
def _split_standard_name(self, standard_name):
"""
Returns a tuple of the standard_name and standard_name modifier
Nones are used to represent the absence of a modifier or standard_name
:rtype: tuple
:return: 2-tuple of standard_name and modifier as strings
"""
if isinstance(standard_name, str) and " " in standard_name:
return standard_name.split(" ", 1)
# if this isn't a string, then it doesn't make sense to split
# -- treat value as standard name with no modifier
else:
return standard_name, None
def check_appendix_a(self, ds):
"""
Validates a CF dataset against the contents of its Appendix A table for
attribute types and locations. Returns a list of results with the
outcomes of the Appendix A validation results against the existing
attributes in the docstring.
:param netCDF4.Variable var: a variable in an existing NetCDF dataset
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: A list of results corresponding to the results returned
"""
# if 'enable_appendix_a_checks' isn't specified in the checks,
# don't do anything on this check
results = []
if "enable_appendix_a_checks" not in self.options:
return results
possible_global_atts = set(ds.ncattrs()).intersection(self.appendix_a.keys())
attr_location_ident = {
"G": "global attributes",
"C": "coordinate data",
"D": "non-coordinate data",
}
def att_loc_print_helper(att_letter):
"""
Returns a string corresponding to attr_location ident in
human-readable form. E.g. an input of 'G' will return
"global attributes (G)"
:param str att_letter: An attribute letter corresponding to the
"Use" column in CF Appendix A
:rtype: str
:return: A string with a human-readable name followed by the input
letter specified
"""
return "{} ({})".format(
attr_location_ident.get(att_letter, "other"), att_letter
)
def _att_loc_msg(att_loc):
"""
Helper method for formatting an error message when an attribute
appears in the improper location corresponding to the "Use" column
in CF Appendix A.
:param set att_loc: A set with the possible valid locations of the
attribute corresponding to the "Use" column
in CF Appendix A
:rtype: str
:return: A human-readable string with the possible valid locations
of the attribute
"""
att_loc_len = len(att_loc)
# this is a fallback in case an empty att_loc is passed
# it generally should not occur
valid_loc = "no locations in the dataset"
loc_sort = sorted(att_loc)
if att_loc_len == 1:
valid_loc = att_loc_print_helper(loc_sort[0])
elif att_loc_len == 2:
valid_loc = "{} and {}".format(
att_loc_print_helper(loc_sort[0]), att_loc_print_helper(loc_sort[1])
)
# shouldn't be reached under normal circumstances, as any attribute
# should be either G, C, or D but if another
# category is added, this will be useful.
else:
valid_loc = ", ".join(loc_sort[:-1]) + ", and {}".format(
att_loc_print_helper(loc_sort[-1])
)
return "This attribute may only appear in {}.".format(valid_loc)
for global_att_name in possible_global_atts:
global_att = ds.getncattr(global_att_name)
att_dict = self.appendix_a[global_att_name]
att_loc = att_dict["attr_loc"]
valid_loc_warn = _att_loc_msg(att_loc)
if att_dict["cf_section"] is not None:
subsection_test = ".".join(att_dict["cf_section"].split(".")[:2])
section_loc = self.section_titles.get(
subsection_test, att_dict["cf_section"]
)
else:
section_loc = None
test_ctx = TestCtx(BaseCheck.HIGH, section_loc)
test_ctx.out_of += 1
if "G" not in att_loc:
test_ctx.messages.append(
'[Appendix A] Attribute "{}" should not be present in global (G) '
"attributes. {}".format(global_att_name, valid_loc_warn)
)
else:
result = self._handle_dtype_check(global_att, global_att_name, att_dict)
if not result[0]:
test_ctx.messages.append(result[1])
else:
test_ctx.score += 1
results.append(test_ctx.to_result())
noncoord_vars = set(ds.variables) - set(self.coord_data_vars)
for var_set, coord_letter in (
(self.coord_data_vars, "C"),
(noncoord_vars, "D"),
):
for var_name in var_set:
var = ds.variables[var_name]
possible_attrs = set(var.ncattrs()).intersection(self.appendix_a.keys())
for att_name in possible_attrs:
att_dict = self.appendix_a[att_name]
if att_dict["cf_section"] is not None:
subsection_test = ".".join(
att_dict["cf_section"].split(".")[:2]
)
section_loc = self.section_titles.get(
subsection_test, att_dict["cf_section"]
)
else:
section_loc = None
test_ctx = TestCtx(BaseCheck.HIGH, section_loc, variable=var_name)
att_loc = att_dict["attr_loc"]
valid_loc_warn = _att_loc_msg(att_loc)
att = var.getncattr(att_name)
test_ctx.out_of += 1
if coord_letter not in att_loc:
test_ctx.messages.append(
'[Appendix A] Attribute "{}" should not be present in {} '
'variable "{}". {}'.format(
att_name,
att_loc_print_helper(coord_letter),
var_name,
valid_loc_warn,
)
)
else:
result = self._handle_dtype_check(att, att_name, att_dict, var)
if not result[0]:
test_ctx.messages.append(result[1])
else:
test_ctx.score += 1
results.append(test_ctx.to_result())
return results
def _check_attr_type(self, attr_name, attr_type, attribute, variable=None):
"""
Check if an attribute `attr` is of the type `attr_type`. Upon getting
a data type of 'D', the attr must have the same data type as the
variable it is assigned to.
Attributes designated type 'S' must be of type `str`. 'N' require
numeric types, and 'D' requires the attribute type match the type
of the variable it is assigned to.
:param str attr_name: name of attr being checked (to format message)
:param str attr_type: the correct type of the attribute
:param attribute: attribute to check
:param variable: if given, type should match attr
:rtype tuple
:return A two-tuple that contains pass/fail status as a boolean and
a message string (or None if unset) as the second element.
"""
if attr_type == "S":
if not isinstance(attribute, str):
return [False, "{} must be a string".format(attr_name)]
else:
# if it's not a string, it should have a numpy dtype
underlying_dtype = getattr(attribute, "dtype", None)
# TODO check for np.nan separately
if underlying_dtype is None:
return [False, "{} must be a numeric type".format(attr_name)]
# both D and N should be some kind of numeric value
is_numeric = np.issubdtype(underlying_dtype, np.number)
if attr_type == "N":
if not is_numeric:
return [False, "{} must be a numeric type".format(attr_name)]
elif attr_type == "D":
# TODO: handle edge case where variable is unset here
temp_ctx = TestCtx()
self._parent_var_attr_type_check(attr_name, variable, temp_ctx)
var_dtype = getattr(variable, "dtype", None)
if temp_ctx.messages:
return (
False,
"{} must be numeric and must be equivalent to {} dtype".format(
attr_name, var_dtype
),
)
else:
# If we reached here, we fell off with an unrecognized type
return (
False,
"{} has unrecognized type '{}'".format(attr_name, attr_type),
)
# pass if all other possible failure conditions have been evaluated
return (True, None)
def _handle_dtype_check(self, attribute, attr_name, attr_dict, variable=None):
"""
Helper function for Appendix A checks.
:param attribute: The value of the attribute being checked
:param str attr_name: The name of the attribute being processed
:param dict attr_dict: The dict entry with type and attribute location
information corresponding to this attribute
:param variable: if given, the variable whose type to check against
:rtype: tuple
:return: A two-tuple that contains pass/fail status as a boolean and
a message string (or None if unset) as the second element.
"""
attr_type = attr_dict["Type"]
if variable is None and "G" not in attr_dict["attr_loc"]:
raise ValueError(
"Non-global attributes must be associated with a " " variable"
)
attr_str = (
"Global attribute {}".format(attr_name)
if "G" in attr_dict["attr_loc"] and variable is None
else "Attribute {} in variable {}".format(attr_name, variable.name)
)
# check the type
return_value = self._check_attr_type(attr_name, attr_type, attribute, variable)
# if the second element is a string, format it
if isinstance(return_value[1], str):
return_value[1] = return_value[1].format(attr_str)
# convert to tuple for immutability and return
return tuple(return_value)
class CFNCCheck(BaseNCCheck, CFBaseCheck):
"""Inherits from both BaseNCCheck and CFBaseCheck to support
checking netCDF datasets. Must inherit in this order, or certain
attributes from BaseNCCheck (like supported_ds) will not be passed to
CFNCCheck."""
pass
appendix_a_base = {
"Conventions": {"Type": "S", "attr_loc": {"G"}, "cf_section": None},
"_FillValue": {"Type": "D", "attr_loc": {"D", "C"}, "cf_section": None},
"add_offset": {"Type": "N", "attr_loc": {"D"}, "cf_section": "8.1"},
"ancillary_variables": {"Type": "S", "attr_loc": {"D"}, "cf_section": "3.4"},
"axis": {"Type": "S", "attr_loc": {"C"}, "cf_section": "4"},
"bounds": {"Type": "S", "attr_loc": {"C"}, "cf_section": "7.1"},
"calendar": {"Type": "S", "attr_loc": {"C"}, "cf_section": "4.4.1"},
"cell_measures": {"Type": "S", "attr_loc": {"D"}, "cf_section": "7.2"},
"cell_methods": {"Type": "S", "attr_loc": {"D"}, "cf_section": "7.3"},
# cf_role type is "C" in document, which does not correspond
# to types used, replaced with "S"
"cf_role": {"Type": "S", "attr_loc": {"C"}, "cf_section": "9.5"},
"climatology": {"Type": "S", "attr_loc": {"C"}, "cf_section": "7.4"},
# comment was removed in this implementation
"compress": {"Type": "S", "attr_loc": {"C"}, "cf_section": "8.2"},
"coordinates": {"Type": "S", "attr_loc": {"D"}, "cf_section": "5"},
# featureType type is "C" in document, which does not
# correspond to types used, replaced with "S"
"featureType": {"Type": "S", "attr_loc": {"G"}, "cf_section": "9.4"},
"flag_masks": {"Type": "D", "attr_loc": {"D"}, "cf_section": "3.5"},
"flag_meanings": {"Type": "S", "attr_loc": {"D"}, "cf_section": "3.5"},
"flag_values": {"Type": "D", "attr_loc": {"D"}, "cf_section": "3.5"},
"formula_terms": {"Type": "S", "attr_loc": {"C"}, "cf_section": "4.3.2"},
"grid_mapping": {"Type": "S", "attr_loc": {"D"}, "cf_section": "5.6"},
"history": {"Type": "S", "attr_loc": {"G"}, "cf_section": None},
#'instance_dimension': {'Type': 'N', 'attr_loc': {'D'}, 'cf_section': '9.3'},
"institution": {"Type": "S", "attr_loc": {"G", "D"}, "cf_section": "2.6.2"},
"leap_month": {"Type": "N", "attr_loc": {"C"}, "cf_section": "4.4.1"},
"leap_year": {"Type": "N", "attr_loc": {"C"}, "cf_section": "4.4.1"},
"long_name": {"Type": "S", "attr_loc": {"D", "C"}, "cf_section": "3.2"},
"missing_value": {"Type": "D", "attr_loc": {"D", "C"}, "cf_section": "2.5.1"},
"month_lengths": {"Type": "N", "attr_loc": {"C"}, "cf_section": "4.4.1"},
"positive": {"Type": "S", "attr_loc": {"C"}, "cf_section": None},
"references": {"Type": "S", "attr_loc": {"G", "D"}, "cf_section": "2.6.2"},
#'sample_dimension': {'Type': 'N', 'attr_loc': {'D'}, 'cf_section': '9.3'},
"scale_factor": {"Type": "N", "attr_loc": {"D"}, "cf_section": "8.1"},
"source": {"Type": "S", "attr_loc": {"G", "D"}, "cf_section": "2.6.2"},
"standard_error_multiplier": {"Type": "N", "attr_loc": {"D"}, "cf_section": None},
"standard_name": {"Type": "S", "attr_loc": {"D", "C"}, "cf_section": "3.3"},
"title": {"Type": "S", "attr_loc": {"G"}, "cf_section": None},
"units": {"Type": "S", "attr_loc": {"D", "C"}, "cf_section": "3.1"},
"valid_max": {"Type": "N", "attr_loc": {"D", "C"}, "cf_section": None},
"valid_min": {"Type": "N", "attr_loc": {"D", "C"}, "cf_section": None},
"valid_range": {"Type": "N", "attr_loc": {"D", "C"}, "cf_section": None},
}
class CF1_6Check(CFNCCheck):
"""CF-1.6-specific implementation of CFBaseCheck; supports checking
netCDF datasets.
These checks are translated documents:
http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.6/cf-conventions.html
http://cf-pcmdi.llnl.gov/conformance/requirements-and-recommendations/1.6/"""
register_checker = True
_cc_spec = "cf"
_cc_spec_version = "1.6"
_cc_description = "Climate and Forecast Conventions (CF)"
_cc_url = "http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html"
_cc_display_headers = {3: "Errors", 2: "Warnings", 1: "Info"}
appendix_a = appendix_a_base
def __init__(self, options=None): # initialize with parent methods and data
super(CF1_6Check, self).__init__(options)
self.cell_methods = cell_methods16
self.grid_mapping_dict = grid_mapping_dict16
self.grid_mapping_attr_types = grid_mapping_attr_types16
###############################################################################
# Chapter 2: NetCDF Files and Components
###############################################################################
def check_data_types(self, ds):
"""
Checks the data type of all netCDF variables to ensure they are valid
data types under CF.
CF §2.2 The netCDF data types char, byte, short, int, float or real, and
double are all acceptable
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
fails = []
total = len(ds.variables)
for k, v in ds.variables.items():
if (
v.dtype is not str
and v.dtype.kind != "S"
and all(
v.dtype.type != t
for t in (
np.character,
np.dtype("|S1"),
np.dtype("b"),
np.dtype("i2"),
np.dtype("i4"),
np.float32,
np.double,
)
)
):
fails.append(
"The variable {} failed because the datatype is {}".format(
k, v.datatype
)
)
return Result(
BaseCheck.HIGH,
(total - len(fails), total),
self.section_titles["2.2"],
msgs=fails,
)
def check_child_attr_data_types(self, ds):
"""
For any variables which contain any of the following attributes:
- valid_min/valid_max
- valid_range
- scale_factor
- add_offset
- _FillValue
the data type of the attribute must match the type of its parent variable as specified in the
NetCDF User Guide (NUG) https://www.unidata.ucar.edu/software/netcdf/docs/attribute_conventions.html,
referenced in the CF Conventions in Section 2.5.2
(http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html#missing-data)
:param netCDF4.Dataset ds: open netCDF dataset object
:rtype: compliance_checker.base.Result
"""
ctx = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.5"])
special_attrs = {
"actual_range",
"valid_min",
"valid_max",
"valid_range",
"_FillValue",
}
for var_name, var in ds.variables.items():
for att_name in special_attrs.intersection(var.ncattrs()):
self._parent_var_attr_type_check(att_name, var, ctx)
return ctx.to_result()
def _check_add_offset_scale_factor_type(self, variable, attr_name):
"""
Reusable function for checking both add_offset and scale_factor.
"""
msg = (
f"Variable {variable.name} and {attr_name} must be quivalent "
"data types or {variable.name} must be of type byte, short, or int "
"and {attr_name} must be float or double"
)
att = getattr(variable, attr_name, None)
if not (isinstance(att, (np.number, float))): # can't compare dtypes
val = False
else:
val = (
att.dtype == variable.dtype
) or ( # will short-circuit or if first condition is true
isinstance(att.dtype, (np.float, np.double, float))
and isinstance(variable.dtype, (np.byte, np.short, np.int, int))
)
return Result(BaseCheck.MEDIUM, val, self.section_titles["8.1"], [msg])
def check_add_offset_scale_factor_type(self, ds):
"""
If a variable has the attributes add_offset and scale_factor,
check that the variables and attributes are of the same type
OR that the variable is of type byte, short or int and the
attributes are of type float or double.
"""
results = []
add_offset_vars = ds.get_variables_by_attributes(
add_offset=lambda x: x is not None
)
scale_factor_vars = ds.get_variables_by_attributes(
scale_factor=lambda x: x is not None
)
for _att_vars_tup in (
("add_offset", add_offset_vars),
("scale_factor", scale_factor_vars),
):
results.extend(
list(
map(
lambda x: self._check_scale_factor_add_offset(
ds.variables[x], _att_vars_tup[0]
),
_att_vars_tup[1],
)
)
)
return results
def check_naming_conventions(self, ds):
"""
Checks the variable names to ensure they are valid CF variable names under CF.
CF §2.3 Variable, dimension and attribute names should begin with a letter
and be composed of letters, digits, and underscores.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
ret_val = []
variable_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.3"])
dimension_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.3"])
attribute_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.3"])
ignore_attributes = [
"_FillValue",
"DODS",
"_ChunkSizes",
"_Coordinate",
"_Unsigned",
]
rname = regex.compile("^[A-Za-z][A-Za-z0-9_]*$")
for name, variable in ds.variables.items():
variable_naming.assert_true(
rname.match(name) is not None,
"variable {} should begin with a letter and be composed of "
"letters, digits, and underscores".format(name),
)
# Keep track of all the attributes, we'll need to check them
for attr in variable.ncattrs():
if attr in ignore_attributes:
continue
# Special attributes made by THREDDS
if attr.startswith("DODS"):
continue
# Ignore model produced attributes
if attr.startswith("_Coordinate"):
continue
attribute_naming.assert_true(
rname.match(attr) is not None,
"attribute {}:{} should begin with a letter and be composed of "
"letters, digits, and underscores".format(name, attr),
)
ret_val.append(variable_naming.to_result())
for dimension in ds.dimensions:
dimension_naming.assert_true(
rname.match(dimension) is not None,
"dimension {} should begin with a latter and be composed of "
"letters, digits, and underscores".format(dimension),
)
ret_val.append(dimension_naming.to_result())
for global_attr in ds.ncattrs():
# Special attributes made by THREDDS
if global_attr.startswith("DODS"):
continue
if global_attr.startswith("EXTRA_DIMENSION"):
continue
attribute_naming.assert_true(
rname.match(global_attr) is not None,
"global attribute {} should begin with a letter and be composed of "
"letters, digits, and underscores".format(global_attr),
)
ret_val.append(attribute_naming.to_result())
return ret_val
def check_names_unique(self, ds):
"""
Checks the variable names for uniqueness regardless of case.
CF §2.3 names should not be distinguished purely by case, i.e., if case
is disregarded, no two names should be the same.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
fails = []
total = len(ds.variables)
names = defaultdict(int)
for k in ds.variables:
names[k.lower()] += 1
fails = [
"Variables are not case sensitive. Duplicate variables named: %s" % k
for k, v in names.items()
if v > 1
]
return Result(
BaseCheck.MEDIUM,
(total - len(fails), total),
self.section_titles["2.3"],
msgs=fails,
)
def check_dimension_names(self, ds):
"""
Checks variables contain no duplicate dimension names.
CF §2.4 A variable may have any number of dimensions, including zero,
and the dimensions must all have different names.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
fails = []
total = len(ds.variables)
for k, v in ds.variables.items():
dims = defaultdict(int)
for d in v.dimensions:
dims[d] += 1
for dimension, count in dims.items():
if count > 1:
fails.append(
"%s has two or more dimensions named %s" % (k, dimension)
)
return Result(
BaseCheck.HIGH,
(total - len(fails), total),
self.section_titles["2.4"],
msgs=fails,
)
def check_dimension_order(self, ds):
"""
Checks each variable's dimension order to ensure that the order is
consistent and in order under CF §2.4
CF §2.4 If any or all of the dimensions of a variable have the
interpretations of "date or time" (T), "height or depth" (Z),
"latitude" (Y), or "longitude" (X) then we recommend, those dimensions
to appear in the relative order T, then Z, then Y, then X in the CDL
definition corresponding to the file. All other dimensions should,
whenever possible, be placed to the left of the spatiotemporal
dimensions.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
valid_dimension_order = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.4"])
# Build a map from coordinate variable to axis
coord_axis_map = self._get_coord_axis_map(ds)
# Check each variable's dimension order, excluding climatology and
# bounds variables
any_clim = cfutil.get_climatology_variable(ds)
any_bounds = cfutil.get_cell_boundary_variables(ds)
for name, variable in ds.variables.items():
# Skip bounds/climatology variables, as they should implicitly
# have the same order except for the bounds specific dimension.
# This is tested later in the respective checks
if name in any_bounds or name == any_clim:
continue
# Skip strings/labels
if hasattr(variable.dtype, "char") and variable.dtype.char == "S":
continue
elif variable.dtype == str:
continue
if variable.dimensions:
dimension_order = self._get_dimension_order(ds, name, coord_axis_map)
valid_dimension_order.assert_true(
self._dims_in_order(dimension_order),
"{}'s spatio-temporal dimensions are not in the "
"recommended order T, Z, Y, X and/or further dimensions "
"are not located left of T, Z, Y, X. The dimensions (and "
"their guessed types) are {} (with U: other/unknown; L: "
"unlimited).".format(
name,
self._get_pretty_dimension_order_with_type(
ds, name, dimension_order
),
),
)
return valid_dimension_order.to_result()
def check_fill_value_outside_valid_range(self, ds):
"""
Checks each variable's _FillValue to ensure that it's in valid_range or
between valid_min and valid_max according to CF §2.5.1
CF §2.5.1 The _FillValue should be outside the range specified by
valid_range (if used) for a variable.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of Results
"""
valid_fill_range = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.5"])
for name, variable in ds.variables.items():
# If the variable doesn't have a defined _FillValue don't check it.
if not hasattr(variable, "_FillValue"):
continue
fill_value = variable._FillValue
attrs = variable.ncattrs()
if "valid_range" in attrs:
if isinstance(variable.valid_range, str):
m = "§2.5.1 Fill Values should be outside the range specified by valid_range" # subsection message
valid_fill_range.assert_true(
False,
"{};\n\t{}:valid_range must be a numeric type not a string".format(
m, name
),
)
continue
rmin, rmax = variable.valid_range
spec_by = "valid_range"
elif "valid_min" in attrs and "valid_max" in attrs:
if isinstance(variable.valid_min, str):
valid_fill_range.assert_true(
False,
"{}:valid_min must be a numeric type not a string".format(name),
)
if isinstance(variable.valid_max, str):
valid_fill_range.assert_true(
False,
"{}:valid_max must be a numeric type not a string".format(name),
)
if isinstance(variable.valid_min, str) or isinstance(
variable.valid_max, str
):
continue
rmin = variable.valid_min
rmax = variable.valid_max
spec_by = "valid_min/valid_max"
else:
continue
if np.isnan(fill_value):
valid = True
else:
valid = fill_value < rmin or fill_value > rmax
valid_fill_range.assert_true(
valid,
"{}:_FillValue ({}) should be outside the range specified by {} ({}, {})"
"".format(name, fill_value, spec_by, rmin, rmax),
)
return valid_fill_range.to_result()
def check_convention_globals(self, ds):
"""
Check the common global attributes are strings if they exist.
CF §2.6.2 title/history global attributes, must be strings. Do not need
to exist.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of Results
"""
attrs = ["title", "history"]
valid_globals = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.6"])
for attr in attrs:
dataset_attr = getattr(ds, attr, None)
is_string = isinstance(dataset_attr, str)
valid_globals.assert_true(
is_string and len(dataset_attr),
"§2.6.2 global attribute {} should exist and be a non-empty string" # subsection message
"".format(attr),
)
return valid_globals.to_result()
def check_convention_possibly_var_attrs(self, ds):
"""
Check variable and global attributes are strings for recommended attributes under CF §2.6.2
CF §2.6.2 institution, source, references, and comment, either global
or assigned to individual variables. When an attribute appears both
globally and as a variable attribute, the variable's version has
precedence. Must be strings.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of Results
"""
# The attrs are optional and only needs to be a string and non-empty if it
# exists.
attrs = ["institution", "source", "references", "comment"]
valid_attributes = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.6"])
attr_bin = set()
# If the attribute is defined for any variable, check it and mark in
# the set that we've seen it at least once.
for name, variable in ds.variables.items():
for attribute in variable.ncattrs():
varattr = getattr(variable, attribute)
if attribute in attrs:
is_string = isinstance(varattr, str)
valid_attributes.assert_true(
is_string and len(varattr) > 0,
"§2.6.2 {}:{} should be a non-empty string"
"".format(name, attribute),
)
attr_bin.add(attribute)
# Check all the global attributes too and mark if we've seen them
for attribute in ds.ncattrs():
dsattr = getattr(ds, attribute)
if attribute in attrs:
is_string = isinstance(dsattr, str)
valid_attributes.assert_true(
is_string and len(dsattr) > 0,
"§2.6.2 {} global attribute should be a non-empty string"
"".format(attribute),
)
attr_bin.add(attribute)
return valid_attributes.to_result()
###############################################################################
# Chapter 3: Description of the Data
###############################################################################
def check_units(self, ds):
"""
Check the units attribute for all variables to ensure they are CF
compliant under CF §3.1
CF §3.1 The units attribute is required for all variables that represent dimensional quantities
(except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables
defined in Section 7.4, "Climatological Statistics").
Units are not required for dimensionless quantities. A variable with no units attribute is assumed
to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be
included.
- units required
- type must be recognized by udunits
- if standard name specified, must be consistent with standard name table, must also be consistent with a
specified cell_methods attribute if present
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
coordinate_variables = self._find_coord_vars(ds)
auxiliary_coordinates = self._find_aux_coord_vars(ds)
geophysical_variables = self._find_geophysical_vars(ds)
forecast_variables = cfutil.get_forecast_metadata_variables(ds)
unit_required_variables = set(
coordinate_variables
+ auxiliary_coordinates
+ geophysical_variables
+ forecast_variables
)
for name in unit_required_variables:
# For reduced horizontal grids, the compression index variable does
# not require units.
if cfutil.is_compression_coordinate(ds, name):
continue
variable = ds.variables[name]
# Skip instance coordinate variables
if getattr(variable, "cf_role", None) is not None:
continue
# Skip labels
if (
hasattr(variable.dtype, "char") and variable.dtype.char == "S"
) or variable.dtype == str:
continue
standard_name = getattr(variable, "standard_name", None)
standard_name, standard_name_modifier = self._split_standard_name(
standard_name
)
units = getattr(variable, "units", None)
valid_units = self._check_valid_cf_units(ds, name)
ret_val.append(valid_units)
units_attr_is_string = TestCtx(BaseCheck.MEDIUM, self.section_titles["3.1"])
# side effects, but better than teasing out the individual result
if units_attr_is_string.assert_true(
isinstance(units, str),
"units ({}) attribute of '{}' must be a string compatible with UDUNITS".format(
units, variable.name
),
):
valid_udunits = self._check_valid_udunits(ds, name)
ret_val.append(valid_udunits)
ret_val.append(units_attr_is_string.to_result())
if isinstance(standard_name, str):
valid_standard_units = self._check_valid_standard_units(ds, name)
ret_val.append(valid_standard_units)
return ret_val
def _check_valid_cf_units(self, ds, variable_name):
"""
Checks that the variable contains units attribute, the attribute is a
string and the value is not deprecated by CF
:param netCDF4.Dataset ds: An open netCDF dataset
:param str variable_name: Name of the variable to be checked
:rtype:
:return: List of results
"""
# This list is straight from section 3
deprecated = ["level", "layer", "sigma_level"]
variable = ds.variables[variable_name]
units = getattr(variable, "units", None)
standard_name_full = getattr(variable, "standard_name", None)
standard_name, standard_name_modifier = self._split_standard_name(
standard_name_full
)
std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(
self._std_names._root, standard_name
)
# Is this even in the database? also, if there is no standard_name,
# there's no way to know if it is dimensionless.
should_be_dimensionless = (
variable.dtype is str
or (hasattr(variable.dtype, "char") and variable.dtype.char == "S")
or std_name_units_dimensionless
or standard_name is None
)
# 1) Units must exist
valid_units = TestCtx(BaseCheck.HIGH, self.section_titles["3.1"])
valid_units.assert_true(
should_be_dimensionless or units is not None,
"units attribute is required for {} when variable is not a dimensionless quantity".format(
variable_name
),
)
# Don't bother checking the rest
if units is None and not should_be_dimensionless:
return valid_units.to_result()
# 2) units attribute must be a string
valid_units.assert_true(
should_be_dimensionless or isinstance(units, str),
"units attribute for {} needs to be a string".format(variable_name),
)
# 3) units are not deprecated
valid_units.assert_true(
units not in deprecated,
'units for {}, "{}" are deprecated by CF 1.6'.format(variable_name, units),
)
return valid_units.to_result()
def _check_valid_udunits(self, ds, variable_name):
"""
Checks that the variable's units are contained in UDUnits
:param netCDF4.Dataset ds: An open netCDF dataset
:param str variable_name: Name of the variable to be checked
"""
variable = ds.variables[variable_name]
units = getattr(variable, "units", None)
standard_name = getattr(variable, "standard_name", None)
standard_name, standard_name_modifier = self._split_standard_name(standard_name)
std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(
self._std_names._root, standard_name
)
# If the variable is supposed to be dimensionless, it automatically passes
should_be_dimensionless = (
variable.dtype is str
or (hasattr(variable.dtype, "char") and variable.dtype.char == "S")
or std_name_units_dimensionless
)
valid_udunits = TestCtx(BaseCheck.HIGH, self.section_titles["3.1"])
are_udunits = units is not None and util.units_known(units)
valid_udunits.assert_true(
should_be_dimensionless or are_udunits,
'units for {}, "{}" are not recognized by UDUNITS'.format(
variable_name, units
),
)
return valid_udunits.to_result()
def _check_valid_standard_units(self, ds, variable_name):
"""
Checks that the variable's units are appropriate for the standard name
according to the CF standard name table and coordinate sections in CF
1.6
:param netCDF4.Dataset ds: An open netCDF dataset
:param str variable_name: Name of the variable to be checked
"""
variable = ds.variables[variable_name]
units = getattr(variable, "units", None)
standard_name = getattr(variable, "standard_name", None)
valid_standard_units = TestCtx(BaseCheck.HIGH, self.section_titles["3.1"])
# If the variable is supposed to be dimensionless, it automatically passes
std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(
self._std_names._root, standard_name
)
standard_name, standard_name_modifier = self._split_standard_name(standard_name)
standard_entry = self._std_names.get(standard_name, None)
if standard_entry is not None:
canonical_units = standard_entry.canonical_units
else:
# Any unit comparisons with None returns False
canonical_units = None
# Other standard_name modifiers have the same units as the
# unmodified standard name or are not checked for units.
if standard_name_modifier == "number_of_observations":
canonical_units = "1"
# This section represents the different cases where simple udunits
# comparison isn't comprehensive enough to determine if the units are
# appropriate under CF
# UDUnits accepts "s" as a unit of time but it should be <unit> since <epoch>
if standard_name == "time":
valid_standard_units.assert_true(
util.units_convertible(units, "seconds since 1970-01-01"),
"time must be in a valid units format <unit> since <epoch> "
"not {}".format(units),
)
# UDunits can't tell the difference between east and north facing coordinates
elif standard_name == "latitude":
# degrees is allowed if using a transformed grid
allowed_units = cfutil.VALID_LAT_UNITS | {"degrees"}
valid_standard_units.assert_true(
units.lower() in allowed_units,
'variables defining latitude ("{}") must use degrees_north '
"or degrees if defining a transformed grid. Currently "
"{}".format(variable_name, units),
)
# UDunits can't tell the difference between east and north facing coordinates
elif standard_name == "longitude":
# degrees is allowed if using a transformed grid
allowed_units = cfutil.VALID_LON_UNITS | {"degrees"}
valid_standard_units.assert_true(
units.lower() in allowed_units,
'variables defining longitude ("{}") must use degrees_east '
"or degrees if defining a transformed grid. Currently "
"{}".format(variable_name, units),
)
# Standard Name table agrees the unit should be dimensionless
elif std_name_units_dimensionless:
valid_standard_units.assert_true(True, "")
elif canonical_units is not None:
valid_standard_units.assert_true(
util.units_convertible(canonical_units, units),
"units for variable {} must be convertible to {} "
"currently they are {}".format(variable_name, canonical_units, units),
)
return valid_standard_units.to_result()
def check_standard_name(self, ds):
"""
Check a variables's standard_name attribute to ensure that it meets CF
compliance.
CF §3.3 A standard name is associated with a variable via the attribute
standard_name which takes a string value comprised of a standard name
optionally followed by one or more blanks and a standard name modifier
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
coord_vars = self._find_coord_vars(ds)
aux_coord_vars = self._find_aux_coord_vars(ds)
axis_vars = cfutil.get_axis_variables(ds)
flag_vars = cfutil.get_flag_variables(ds)
geophysical_vars = self._find_geophysical_vars(ds)
variables_requiring_standard_names = (
coord_vars + aux_coord_vars + axis_vars + flag_vars + geophysical_vars
)
for name in set(variables_requiring_standard_names):
# Compression indices used in reduced horizontal grids or
# compression schemes do not require attributes other than compress
if cfutil.is_compression_coordinate(ds, name):
continue
ncvar = ds.variables[name]
# §9 doesn't explicitly allow instance variables as coordinates but
# it's loosely implied. Just in case, skip it.
if hasattr(ncvar, "cf_role"):
continue
# Unfortunately, §6.1 allows for string types to be listed as
# coordinates.
if hasattr(ncvar.dtype, "char") and ncvar.dtype.char == "S":
continue
elif ncvar.dtype == str:
continue
standard_name = getattr(ncvar, "standard_name", None)
standard_name, standard_name_modifier = self._split_standard_name(
standard_name
)
long_name = getattr(ncvar, "long_name", None)
long_or_std_name = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"])
if long_name is not None:
long_name_present = True
long_or_std_name.assert_true(
isinstance(long_name, str),
"Attribute long_name for variable {} must be a string".format(name),
)
else:
long_name_present = False
# §1.3 The long_name and standard_name attributes are used to
# describe the content of each variable. For backwards
# compatibility with COARDS neither is required, but use of at
# least one of them is strongly recommended.
# If standard_name is not defined but long_name is, don't continue
# the check for this variable
if standard_name is not None:
standard_name_present = True
valid_std_name = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"])
valid_std_name.assert_true(
isinstance(standard_name, str),
"Attribute standard_name for variable {} must be a string".format(
name
),
)
if isinstance(standard_name, str):
valid_std_name.assert_true(
standard_name in self._std_names,
"standard_name {} is not defined in Standard Name Table v{}".format(
standard_name or "undefined", self._std_names._version
),
)
ret_val.append(valid_std_name.to_result())
# 2) optional - if modifiers, should be in table
if standard_name_modifier is not None:
valid_modifier = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"])
allowed = [
"detection_minimum",
"number_of_observations",
"standard_error",
"status_flag",
]
valid_modifier.assert_true(
standard_name_modifier in allowed,
"standard_name modifier {} for variable {} is not a valid modifier "
"according to appendix C".format(standard_name_modifier, name),
)
ret_val.append(valid_modifier.to_result())
else:
standard_name_present = False
long_or_std_name.assert_true(
long_name_present or standard_name_present,
"Attribute long_name or/and standard_name is highly recommended for variable {}".format(
name
),
)
ret_val.append(long_or_std_name.to_result())
return ret_val
def check_ancillary_variables(self, ds):
"""
Checks the ancillary_variable attribute for all variables to ensure
they are CF compliant.
CF §3.4 It is a string attribute whose value is a blank separated list
of variable names. The nature of the relationship between variables
associated via ancillary_variables must be determined by other
attributes. The variables listed by the ancillary_variables attribute
will often have the standard name of the variable which points to them
including a modifier (Appendix C, Standard Name Modifiers) to indicate
the relationship.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for ncvar in ds.get_variables_by_attributes(
ancillary_variables=lambda x: x is not None
):
name = ncvar.name
valid_ancillary = TestCtx(BaseCheck.HIGH, self.section_titles["3.4"])
ancillary_variables = ncvar.ancillary_variables
valid_ancillary.assert_true(
isinstance(ancillary_variables, str),
"ancillary_variables attribute defined by {} "
"should be string".format(name),
)
# Can't perform the second check if it's not a string
if not isinstance(ancillary_variables, str):
ret_val.append(valid_ancillary.to_result())
continue
for ancillary_variable in ancillary_variables.split():
valid_ancillary.assert_true(
ancillary_variable in ds.variables,
"{} is not a variable in this dataset".format(ancillary_variable),
)
ret_val.append(valid_ancillary.to_result())
return ret_val
def check_flags(self, ds):
"""
Check the flag_values, flag_masks and flag_meanings attributes for
variables to ensure they are CF compliant.
CF §3.5 The attributes flag_values, flag_masks and flag_meanings are
intended to make variables that contain flag values self describing.
Status codes and Boolean (binary) condition flags may be expressed with
different combinations of flag_values and flag_masks attribute
definitions.
The flag_values and flag_meanings attributes describe a status flag
consisting of mutually exclusive coded values.
The flag_meanings attribute is a string whose value is a blank
separated list of descriptive words or phrases, one for each flag
value. Each word or phrase should consist of characters from the
alphanumeric set and the following five: '_', '-', '.', '+', '@'.
The flag_masks and flag_meanings attributes describe a number of
independent Boolean conditions using bit field notation by setting
unique bits in each flag_masks value.
The flag_masks, flag_values and flag_meanings attributes, used
together, describe a blend of independent Boolean conditions and
enumerated status codes. A flagged condition is identified by a bitwise
AND of the variable value and each flag_masks value; a result that
matches the flag_values value indicates a true condition.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for name in cfutil.get_flag_variables(ds):
variable = ds.variables[name]
flag_values = getattr(variable, "flag_values", None)
flag_masks = getattr(variable, "flag_masks", None)
valid_flags_var = TestCtx(BaseCheck.HIGH, self.section_titles["3.5"])
# Check that the variable defines mask or values
valid_flags_var.assert_true(
flag_values is not None or flag_masks is not None,
"{} does not define either flag_masks or flag_values".format(name),
)
ret_val.append(valid_flags_var.to_result())
valid_meanings = self._check_flag_meanings(ds, name)
ret_val.append(valid_meanings)
# check flag_values
if flag_values is not None:
valid_values = self._check_flag_values(ds, name)
ret_val.append(valid_values)
# check flag_masks
if flag_masks is not None:
valid_masks = self._check_flag_masks(ds, name)
ret_val.append(valid_masks)
if flag_values is not None and flag_masks is not None:
allv = list(
map(lambda a, b: a & b == a, list(zip(flag_values, flag_masks)))
)
allvr = Result(BaseCheck.MEDIUM, all(allv), self.section_titles["3.5"])
if not allvr.value:
allvr.msgs = [
"flag masks and flag values for '{}' combined don't equal flag value".format(
name
)
]
ret_val.append(allvr)
return ret_val
def _check_flag_values(self, ds, name):
"""
Checks a variable's flag_values attribute for compliance under CF
- flag_values exists as an array
- unique elements in flag_values
- flag_values si the same dtype as the variable
- flag_values is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Name of variable to check
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[name]
flag_values = getattr(variable, "flag_values", None)
flag_meanings = getattr(variable, "flag_meanings", None)
valid_values = TestCtx(BaseCheck.HIGH, self.section_titles["3.5"])
# flag_values must be a list of values, not a string or anything else
valid_values.assert_true(
isinstance(flag_values, np.ndarray),
"{}'s flag_values must be an array of values not {}".format(
name, type(flag_values)
),
)
# We can't perform any more checks
if not isinstance(flag_values, np.ndarray):
return valid_values.to_result()
# the flag values must be independent, no repeating values
flag_set = set(flag_values)
valid_values.assert_true(
len(flag_set) == len(flag_values),
"{}'s flag_values must be independent and can not be repeated".format(name),
)
# the data type for flag_values should be the same as the variable
valid_values.assert_true(
variable.dtype.type == flag_values.dtype.type,
"flag_values ({}) must be the same data type as {} ({})"
"".format(flag_values.dtype.type, name, variable.dtype.type),
)
if isinstance(flag_meanings, str):
flag_meanings = flag_meanings.split()
valid_values.assert_true(
len(flag_meanings) == len(flag_values),
"{}'s flag_meanings and flag_values should have the same number ".format(
name
)
+ "of elements.",
)
return valid_values.to_result()
def _check_flag_masks(self, ds, name):
"""
Check a variable's flag_masks attribute for compliance under CF
- flag_masks exists as an array
- flag_masks is the same dtype as the variable
- variable's dtype can support bit-field
- flag_masks is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[name]
flag_masks = variable.flag_masks
flag_meanings = getattr(ds, "flag_meanings", None)
valid_masks = TestCtx(BaseCheck.HIGH, self.section_titles["3.5"])
valid_masks.assert_true(
isinstance(flag_masks, np.ndarray),
"{}'s flag_masks must be an array of values not {}".format(
name, type(flag_masks).__name__
),
)
if not isinstance(flag_masks, np.ndarray):
return valid_masks.to_result()
valid_masks.assert_true(
variable.dtype.type == flag_masks.dtype.type,
"flag_masks ({}) mustbe the same data type as {} ({})"
"".format(flag_masks.dtype.type, name, variable.dtype.type),
)
type_ok = (
np.issubdtype(variable.dtype, np.integer)
or np.issubdtype(variable.dtype, "S")
or np.issubdtype(variable.dtype, "b")
)
valid_masks.assert_true(
type_ok,
"{}'s data type must be capable of bit-field expression".format(name),
)
if isinstance(flag_meanings, str):
flag_meanings = flag_meanings.split()
valid_masks.assert_true(
len(flag_meanings) == len(flag_masks),
"{} flag_meanings and flag_masks should have the same number ".format(
name
)
+ "of elements.",
)
return valid_masks.to_result()
def _check_flag_meanings(self, ds, name):
"""
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[name]
flag_meanings = getattr(variable, "flag_meanings", None)
valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles["3.5"])
valid_meanings.assert_true(
flag_meanings is not None,
"{}'s flag_meanings attribute is required for flag variables".format(name),
)
valid_meanings.assert_true(
isinstance(flag_meanings, str),
"{}'s flag_meanings attribute must be a string".format(name),
)
# We can't perform any additional checks if it's not a string
if not isinstance(flag_meanings, str):
return valid_meanings.to_result()
valid_meanings.assert_true(
len(flag_meanings) > 0, "{}'s flag_meanings can't be empty".format(name)
)
flag_regx = regex.compile(r"^[0-9A-Za-z_\-.+@]+$")
meanings = flag_meanings.split()
for meaning in meanings:
if flag_regx.match(meaning) is None:
valid_meanings.assert_true(
False,
"{}'s flag_meanings attribute defined an illegal flag meaning ".format(
name
)
+ "{}".format(meaning),
)
return valid_meanings.to_result()
###############################################################################
# Chapter 4: Coordinate Types
###############################################################################
def check_coordinate_types(self, ds):
"""
Check the axis attribute of coordinate variables
CF §4 The attribute axis may be attached to a coordinate variable and
given one of the values X, Y, Z or T which stand for a longitude,
latitude, vertical, or time axis respectively. Alternatively the
standard_name attribute may be used for direct identification.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for variable in ds.get_variables_by_attributes(axis=lambda x: x is not None):
name = variable.name
# Coordinate compressions should not be checked as a valid
# coordinate, which they are not. They are a mechanism to project
# an array of indices onto a 2-d grid containing valid coordinates.
if cfutil.is_compression_coordinate(ds, name):
continue
variable = ds.variables[name]
# Even though it's not allowed in CF 1.6, it is allowed in CF 1.7
# and we see people do it, often.
if hasattr(variable, "cf_role"):
continue
# §6.1 allows for labels to be referenced as auxiliary coordinate
# variables, which should not be checked like the rest of the
# coordinates.
if hasattr(variable.dtype, "char") and variable.dtype.char == "S":
continue
elif variable.dtype == str:
continue
axis = getattr(variable, "axis", None)
if axis is not None:
valid_axis = self._check_axis(ds, name)
ret_val.append(valid_axis)
return ret_val
def _check_axis(self, ds, name):
"""
Checks that the axis attribute is a string and an allowed value, namely
one of 'T', 'X', 'Y', or 'Z'.
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Name of the variable
:rtype: compliance_checker.base.Result
"""
allowed_axis = ["T", "X", "Y", "Z"]
variable = ds.variables[name]
axis = variable.axis
valid_axis = TestCtx(BaseCheck.HIGH, self.section_titles["4"])
axis_is_string = (isinstance(axis, str),)
valid_axis.assert_true(
axis_is_string and len(axis) > 0,
"{}'s axis attribute must be a non-empty string".format(name),
)
# If axis isn't a string we can't continue any checks
if not axis_is_string or len(axis) == 0:
return valid_axis.to_result()
valid_axis.assert_true(
axis in allowed_axis,
"{}'s axis attribute must be T, X, Y, or Z, ".format(name)
+ "currently {}".format(axis),
)
return valid_axis.to_result()
def check_latitude(self, ds):
"""
Check variable(s) that define latitude and are defined correctly according to CF.
CF §4.1 Variables representing latitude must always explicitly include
the units attribute; there is no default value. The recommended unit
of latitude is degrees_north. Also acceptable are degree_north,
degree_N, degrees_N, degreeN, and degreesN.
Optionally, the latitude type may be indicated additionally by
providing the standard_name attribute with the value latitude, and/or
the axis attribute with the value Y.
- Four checks per latitude variable
- (H) latitude has units attribute
- (M) latitude has an allowed units attribute
- (L) latitude uses degrees_north (if not in rotated pole)
- (M) latitude defines either standard_name or axis
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
allowed_lat_units = [
"degrees_north",
"degree_north",
"degree_n",
"degrees_n",
"degreen",
"degreesn",
]
# Determine the grid mappings in this dataset
grid_mapping = []
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
for name in grid_mapping_variables:
variable = ds.variables[name]
grid_mapping_name = getattr(variable, "grid_mapping_name", None)
if grid_mapping_name:
grid_mapping.append(grid_mapping_name)
latitude_variables = cfutil.get_latitude_variables(ds)
for latitude in latitude_variables:
variable = ds.variables[latitude]
units = getattr(variable, "units", None)
units_is_string = isinstance(units, str)
standard_name = getattr(variable, "standard_name", None)
axis = getattr(variable, "axis", None)
# Check that latitude defines units
valid_latitude = TestCtx(BaseCheck.HIGH, self.section_titles["4.1"])
valid_latitude.assert_true(
units is not None,
"latitude variable '{}' must define units".format(latitude),
)
ret_val.append(valid_latitude.to_result())
# Check that latitude uses allowed units
allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles["4.1"])
if standard_name == "grid_latitude":
e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS
# check that the units aren't in east and north degrees units,
# but are convertible to angular units
allowed_units.assert_true(
units not in e_n_units and Unit(units) == Unit("degree"),
"Grid latitude variable '{}' should use degree equivalent units without east or north components. "
"Current units are {}".format(latitude, units),
)
else:
allowed_units.assert_true(
units_is_string and units.lower() in allowed_lat_units,
"latitude variable '{}' should define valid units for latitude"
"".format(latitude),
)
ret_val.append(allowed_units.to_result())
# Check that latitude uses degrees_north
if standard_name == "latitude" and units != "degrees_north":
# This is only a recommendation and we won't penalize but we
# will include a recommended action.
msg = (
"CF recommends latitude variable '{}' to use units degrees_north"
"".format(latitude)
)
recommended_units = Result(
BaseCheck.LOW, (1, 1), self.section_titles["4.1"], [msg]
)
ret_val.append(recommended_units)
y_variables = ds.get_variables_by_attributes(axis="Y")
# Check that latitude defines either standard_name or axis
definition = TestCtx(BaseCheck.MEDIUM, self.section_titles["4.1"])
definition.assert_true(
standard_name == "latitude" or axis == "Y" or y_variables != [],
"latitude variable '{}' should define standard_name='latitude' or axis='Y'"
"".format(latitude),
)
ret_val.append(definition.to_result())
return ret_val
def check_longitude(self, ds):
"""
Check variable(s) that define longitude and are defined correctly according to CF.
CF §4.2 Variables representing longitude must always explicitly include
the units attribute; there is no default value. The recommended unit
of longitude is degrees_east. Also acceptable are degree_east,
degree_E, degrees_E, degreeE, and degreesE.
Optionally, the longitude type may be indicated additionally by
providing the standard_name attribute with the value longitude, and/or
the axis attribute with the value X.
- Four checks per longitude variable
- (H) longitude has units attribute
- (M) longitude has an allowed units attribute
- (L) longitude uses degrees_east (if not in rotated pole)
- (M) longitude defines either standard_name or axis
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
# TODO we already have a check_latitude... I'm sure we can make DRYer
ret_val = []
allowed_lon_units = [
"degrees_east",
"degree_east",
"degree_e",
"degrees_e",
"degreee",
"degreese",
]
# Determine the grid mappings in this dataset
grid_mapping = []
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
for name in grid_mapping_variables:
variable = ds.variables[name]
grid_mapping_name = getattr(variable, "grid_mapping_name", None)
if grid_mapping_name:
grid_mapping.append(grid_mapping_name)
longitude_variables = cfutil.get_longitude_variables(ds)
for longitude in longitude_variables:
variable = ds.variables[longitude]
units = getattr(variable, "units", None)
units_is_string = isinstance(units, str)
standard_name = getattr(variable, "standard_name", None)
axis = getattr(variable, "axis", None)
# NOTE see docstring--should below be 4.1 or 4.2?
# Check that longitude defines units
valid_longitude = TestCtx(BaseCheck.HIGH, self.section_titles["4.2"])
valid_longitude.assert_true(
units is not None,
"longitude variable '{}' must define units".format(longitude),
)
ret_val.append(valid_longitude.to_result())
# Check that longitude uses allowed units
allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles["4.2"])
if standard_name == "grid_longitude":
e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS
# check that the units aren't in east and north degrees units,
# but are convertible to angular units
allowed_units.assert_true(
units not in e_n_units and Unit(units) == Unit("degree"),
"Grid longitude variable '{}' should use degree equivalent units without east or north components. "
"Current units are {}".format(longitude, units),
)
else:
allowed_units.assert_true(
units_is_string and units.lower() in allowed_lon_units,
"longitude variable '{}' should define valid units for longitude"
"".format(longitude),
)
ret_val.append(allowed_units.to_result())
# Check that longitude uses degrees_east
if standard_name == "longitude" and units != "degrees_east":
# This is only a recommendation and we won't penalize but we
# will include a recommended action.
msg = (
"CF recommends longitude variable '{}' to use units degrees_east"
"".format(longitude)
)
recommended_units = Result(
BaseCheck.LOW, (1, 1), self.section_titles["4.2"], [msg]
)
ret_val.append(recommended_units)
x_variables = ds.get_variables_by_attributes(axis="X")
# Check that longitude defines either standard_name or axis
definition = TestCtx(BaseCheck.MEDIUM, self.section_titles["4.2"])
definition.assert_true(
standard_name == "longitude" or axis == "X" or x_variables != [],
"longitude variable '{}' should define standard_name='longitude' or axis='X'"
"".format(longitude),
)
ret_val.append(definition.to_result())
return ret_val
def check_dimensional_vertical_coordinate(
self, ds, dimless_vertical_coordinates=dimless_vertical_coordinates_1_6
):
"""
Check units for variables defining vertical position are valid under
CF.
CF §4.3.1 The units attribute for dimensional coordinates will be a string
formatted as per the udunits.dat file.
The acceptable units for vertical (depth or height) coordinate variables
are:
- units of pressure as listed in the file udunits.dat. For vertical axes
the most commonly used of these include include bar, millibar,
decibar, atmosphere (atm), pascal (Pa), and hPa.
- units of length as listed in the file udunits.dat. For vertical axes
the most commonly used of these include meter (metre, m), and
kilometer (km).
- other units listed in the file udunits.dat that may under certain
circumstances reference vertical position such as units of density or
temperature.
Plural forms are also acceptable.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
z_variables = cfutil.get_z_variables(ds)
# dimless_standard_names = [name for name, regx in dimless_vertical_coordinates]
for name in z_variables:
variable = ds.variables[name]
standard_name = getattr(variable, "standard_name", None)
units = getattr(variable, "units", None)
positive = getattr(variable, "positive", None)
# Skip the variable if it's dimensionless
if (
hasattr(variable, "formula_terms")
or standard_name in dimless_vertical_coordinates
):
continue
valid_vertical_coord = TestCtx(BaseCheck.HIGH, self.section_titles["4.3"])
valid_vertical_coord.assert_true(
isinstance(units, str) and units,
"§4.3.1 {}'s units must be defined for vertical coordinates, "
"there is no default".format(name),
)
if not util.units_convertible("bar", units):
valid_vertical_coord.assert_true(
positive in ("up", "down"),
"{}: vertical coordinates not defining pressure must include "
"a positive attribute that is either 'up' or 'down'".format(name),
)
# _check_valid_standard_units, part of the Chapter 3 checks,
# already verifies that this coordinate has valid units
ret_val.append(valid_vertical_coord.to_result())
return ret_val
def _check_dimensionless_vertical_coordinate_1_6(
self, ds, vname, deprecated_units, ret_val, dim_vert_coords_dict
):
"""
Check that a dimensionless vertical coordinate variable is valid under
CF-1.6.
:param netCDF4.Dataset ds: open netCDF4 dataset
:param str name: variable name
:param list ret_val: array to append Results to
:rtype None
"""
variable = ds.variables[vname]
standard_name = getattr(variable, "standard_name", None)
units = getattr(variable, "units", None)
formula_terms = getattr(variable, "formula_terms", None)
# Skip the variable if it's dimensional
if formula_terms is None and standard_name not in dim_vert_coords_dict:
return
is_not_deprecated = TestCtx(BaseCheck.LOW, self.section_titles["4.3"])
is_not_deprecated.assert_true(
units not in deprecated_units,
"§4.3.2: units are deprecated by CF in variable {}: {}"
"".format(vname, units),
)
# check the vertical coordinates
ret_val.append(is_not_deprecated.to_result())
ret_val.append(self._check_formula_terms(ds, vname, dim_vert_coords_dict))
def check_dimensionless_vertical_coordinates(self, ds):
"""
Check the validity of dimensionless coordinates under CF
CF §4.3.2 The units attribute is not required for dimensionless
coordinates.
The standard_name attribute associates a coordinate with its definition
from Appendix D, Dimensionless Vertical Coordinates. The definition
provides a mapping between the dimensionless coordinate values and
dimensional values that can positively and uniquely indicate the
location of the data.
A new attribute, formula_terms, is used to associate terms in the
definitions with variables in a netCDF file. To maintain backwards
compatibility with COARDS the use of these attributes is not required,
but is strongly recommended.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
z_variables = cfutil.get_z_variables(ds)
deprecated_units = ["level", "layer", "sigma_level"]
ret_val.extend(
self._check_dimensionless_vertical_coordinates(
ds,
deprecated_units,
self._check_dimensionless_vertical_coordinate_1_6,
dimless_vertical_coordinates_1_6,
)
)
return ret_val
def check_time_coordinate(self, ds):
"""
Check variables defining time are valid under CF
CF §4.4 Variables representing time must always explicitly include the
units attribute; there is no default value.
The units attribute takes a string value formatted as per the
recommendations in the Udunits package.
The acceptable units for time are listed in the udunits.dat file. The
most commonly used of these strings (and their abbreviations) includes
day (d), hour (hr, h), minute (min) and second (sec, s). Plural forms
are also acceptable. The reference time string (appearing after the
identifier since) may include date alone; date and time; or date, time,
and time zone. The reference time is required. A reference time in year
0 has a special meaning (see Section 7.4, "Climatological Statistics").
Recommend that the unit year be used with caution. It is not a calendar
year. For similar reasons the unit month should also be used with
caution.
A time coordinate is identifiable from its units string alone.
Optionally, the time coordinate may be indicated additionally by
providing the standard_name attribute with an appropriate value, and/or
the axis attribute with the value T.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for name in cfutil.get_time_variables(ds):
variable = ds.variables[name]
# Has units
has_units = hasattr(variable, "units")
if not has_units:
result = Result(
BaseCheck.HIGH,
False,
self.section_titles["4.4"],
["%s does not have units" % name],
)
ret_val.append(result)
continue
# Correct and identifiable units
result = Result(BaseCheck.HIGH, True, self.section_titles["4.4"])
ret_val.append(result)
correct_units = util.units_temporal(variable.units)
reasoning = None
if not correct_units:
reasoning = ["%s does not have correct time units" % name]
result = Result(
BaseCheck.HIGH, correct_units, self.section_titles["4.4"], reasoning
)
ret_val.append(result)
return ret_val
def check_calendar(self, ds):
"""
Check the calendar attribute for variables defining time and ensure it
is a valid calendar prescribed by CF.
CF §4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
valid_calendars = [
"gregorian",
"standard",
"proleptic_gregorian",
"noleap",
"365_day",
"all_leap",
"366_day",
"360_day",
"julian",
"none",
]
ret_val = []
# if has a calendar, check that it is within the valid values
# otherwise no calendar is valid
for time_var in ds.get_variables_by_attributes(
calendar=lambda c: c is not None
):
reasoning = None
valid_calendar = time_var.calendar in valid_calendars
if not valid_calendar:
reasoning = [
"§4.4.1 Variable %s should have a valid calendar: '%s' is not a valid calendar"
% (time_var.name, time_var.calendar)
]
# passes if the calendar is valid, otherwise notify of invalid
# calendar
result = Result(
BaseCheck.LOW, valid_calendar, self.section_titles["4.4"], reasoning
)
ret_val.append(result)
return ret_val
###############################################################################
# Chapter 5: Coordinate Systems
###############################################################################
def check_aux_coordinates(self, ds):
"""
Chapter 5 paragraph 3
The dimensions of an auxiliary coordinate variable must be a subset of
the dimensions of the variable with which the coordinate is associated,
with two exceptions. First, string-valued coordinates (Section 6.1,
"Labels") have a dimension for maximum string length. Second, in the
ragged array representations of data (Chapter 9, Discrete Sampling
Geometries), special methods are needed to connect the data and
coordinates.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
# for contiguous ragged array/indexed ragged array representations,
# coordinates are not required to adhere to the same principles;
# these representaitions can be identified by two attributes:
# required for contiguous
count_vars = ds.get_variables_by_attributes(
sample_dimension=lambda x: x is not None
)
# required for indexed
index_vars = ds.get_variables_by_attributes(
instance_dimension=lambda x: x is not None
)
# if these attributes exist, we don't need to test
# the coordinates
if count_vars or index_vars:
return ret_val
geophysical_variables = self._find_geophysical_vars(ds)
for name in geophysical_variables:
variable = ds.variables[name]
coordinates = getattr(variable, "coordinates", None)
# We use a set so we can assert
dim_set = set(variable.dimensions)
# No auxiliary coordinates, no check
if not isinstance(coordinates, str) or coordinates == "":
continue
valid_aux_coords = TestCtx(BaseCheck.HIGH, self.section_titles["5"])
for aux_coord in coordinates.split():
valid_aux_coords.assert_true(
aux_coord in ds.variables,
"{}'s auxiliary coordinate specified by the coordinates attribute, {}, "
"is not a variable in this dataset"
"".format(name, aux_coord),
)
if aux_coord not in ds.variables:
continue
# §6.1 Allows for "labels" to be referenced as coordinates
if (
hasattr(ds.variables[aux_coord].dtype, "char")
and ds.variables[aux_coord].dtype.char == "S"
):
continue
elif ds.variables[aux_coord].dtype == str:
continue
aux_coord_dims = set(ds.variables[aux_coord].dimensions)
valid_aux_coords.assert_true(
aux_coord_dims.issubset(dim_set),
"dimensions for auxiliary coordinate variable {} ({}) "
"are not a subset of dimensions for variable {} ({})"
"".format(
aux_coord, ", ".join(aux_coord_dims), name, ", ".join(dim_set)
),
)
ret_val.append(valid_aux_coords.to_result())
return ret_val
def check_duplicate_axis(self, ds):
"""
Checks that no variable contains two coordinates defining the same
axis.
Chapter 5 paragraph 6
If an axis attribute is attached to an auxiliary coordinate variable,
it can be used by applications in the same way the `axis` attribute
attached to a coordinate variable is used. However, it is not
permissible for a [geophysical variable] to have both a coordinate
variable and an auxiliary coordinate variable, or more than one of
either type of variable, having an `axis` attribute with any given
value e.g. there must be no more than one axis attribute for X for any
[geophysical variable].
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
:return: List of results
"""
ret_val = []
geophysical_variables = self._find_geophysical_vars(ds)
for name in geophysical_variables:
no_duplicates = TestCtx(BaseCheck.HIGH, self.section_titles["5"])
axis_map = cfutil.get_axis_map(ds, name)
axes = []
# For every coordinate associated with this variable, keep track of
# which coordinates define an axis and assert that there are no
# duplicate axis attributes defined in the set of associated
# coordinates. axis_map includes coordinates that don't actually have
# an axis attribute, so we need to ignore those here.
for axis, coords in axis_map.items():
coords = [c for c in coords if hasattr(ds.variables[c], "axis")]
no_duplicates.assert_true(
len(coords) <= 1,
"'{}' has duplicate axis {} defined by [{}]".format(
name, axis, ", ".join(sorted(coords))
),
)
ret_val.append(no_duplicates.to_result())
return ret_val
def check_multi_dimensional_coords(self, ds):
"""
Checks that no multidimensional coordinate shares a name with its
dimensions.
Chapter 5 paragraph 4
We recommend that the name of a [multidimensional coordinate] should
not match the name of any of its dimensions.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
# This can only apply to auxiliary coordinate variables
for coord in self._find_aux_coord_vars(ds):
variable = ds.variables[coord]
if variable.ndim < 2:
continue
not_matching = TestCtx(BaseCheck.MEDIUM, self.section_titles["5"])
not_matching.assert_true(
coord not in variable.dimensions,
"{} shares the same name as one of its dimensions" "".format(coord),
)
ret_val.append(not_matching.to_result())
return ret_val
# NOTE **********
# IS THIS EVEN NEEDED ANYMORE?
# ***************
def check_grid_coordinates(self, ds):
# def _check_grid_coordinates(self, ds):
"""
5.6 When the coordinate variables for a horizontal grid are not
longitude and latitude, it is required that the true latitude and
longitude coordinates be supplied via the coordinates attribute.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
latitudes = cfutil.get_true_latitude_variables(ds)
longitudes = cfutil.get_true_longitude_variables(ds)
check_featues = [
"2d-regular-grid",
"2d-static-grid",
"3d-regular-grid",
"3d-static-grid",
"mapped-grid",
"reduced-grid",
]
# This one is tricky because there's a very subtle difference between
# latitude as defined in Chapter 4 and "true" latitude as defined in
# chapter 5.
# For each geophysical variable that defines a grid, assert it is
# associated with a true latitude or longitude coordinate.
for variable in self._find_geophysical_vars(ds):
# We use a set so we can do set-wise comparisons with coordinate
# dimensions
dimensions = set(ds.variables[variable].dimensions)
# If it's not a grid, skip it
if cfutil.guess_feature_type(ds, variable) not in check_featues:
continue
has_coords = TestCtx(BaseCheck.HIGH, self.section_titles["5.6"])
# axis_map is a defaultdict(list) mapping the axis to a list of
# coordinate names. For example:
# {'X': ['lon'], 'Y':['lat'], 'Z':['lev']}
# The mapping comes from the dimensions of the variable and the
# contents of the `coordinates` attribute only.
axis_map = cfutil.get_axis_map(ds, variable)
msg = (
'{}\'s coordinate variable "{}" is not one of the variables identifying true '
+ "latitude/longitude and its dimensions are not a subset of {}'s dimensions"
)
alt = (
"{} has no coordinate associated with a variable identified as true latitude/longitude; "
+ "its coordinate variable should also share a subset of {}'s dimensions"
)
# Make sure we can find latitude and its dimensions are a subset
_lat = None
found_lat = False
for lat in axis_map["Y"]:
_lat = lat
is_subset_dims = set(ds.variables[lat].dimensions).issubset(dimensions)
if is_subset_dims and lat in latitudes:
found_lat = True
break
if _lat:
has_coords.assert_true(found_lat, msg.format(variable, _lat, variable))
else:
has_coords.assert_true(found_lat, alt.format(variable, variable))
# Make sure we can find longitude and its dimensions are a subset
_lon = None
found_lon = False
for lon in axis_map["X"]:
_lon = lon
is_subset_dims = set(ds.variables[lon].dimensions).issubset(dimensions)
if is_subset_dims and lon in longitudes:
found_lon = True
break
if _lon:
has_coords.assert_true(found_lon, msg.format(variable, _lon, variable))
else:
has_coords.assert_true(found_lon, alt.format(variable, variable))
ret_val.append(has_coords.to_result())
return ret_val
def check_reduced_horizontal_grid(self, ds):
"""
5.3 A "reduced" longitude-latitude grid is one in which the points are
arranged along constant latitude lines with the number of points on a
latitude line decreasing toward the poles.
Recommend that this type of gridded data be stored using the compression
scheme described in Section 8.2, "Compression by Gathering". The
compressed latitude and longitude auxiliary coordinate variables are
identified by the coordinates attribute.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
# Create a set of coordinate variables defining `compress`
lats = set(cfutil.get_latitude_variables(ds))
lons = set(cfutil.get_longitude_variables(ds))
for name in self._find_geophysical_vars(ds):
coords = getattr(ds.variables[name], "coordinates", None)
axis_map = cfutil.get_axis_map(ds, name)
# If this variable has no coordinate that defines compression
if "C" not in axis_map:
continue
valid_rgrid = TestCtx(BaseCheck.HIGH, self.section_titles["5.3"])
# Make sure reduced grid features define coordinates
valid_rgrid.assert_true(
isinstance(coords, str) and coords,
"reduced grid feature {} must define coordinates attribute"
"".format(name),
)
# We can't check anything else if there are no defined coordinates
if not isinstance(coords, str) and coords:
continue
coord_set = set(coords.split())
# Make sure it's associated with valid lat and valid lon
valid_rgrid.assert_true(
len(coord_set.intersection(lons)) > 0,
"{} must be associated with a valid longitude coordinate".format(name),
)
valid_rgrid.assert_true(
len(coord_set.intersection(lats)) > 0,
"{} must be associated with a valid latitude coordinate".format(name),
)
valid_rgrid.assert_true(
len(axis_map["C"]) == 1,
"{} can not be associated with more than one compressed coordinates: "
"({})".format(name, ", ".join(axis_map["C"])),
)
for compressed_coord in axis_map["C"]:
coord = ds.variables[compressed_coord]
compress = getattr(coord, "compress", None)
valid_rgrid.assert_true(
isinstance(compress, str) and compress,
"compress attribute for compression coordinate {} must be a non-empty string"
"".format(compressed_coord),
)
if not isinstance(compress, str):
continue
for dim in compress.split():
valid_rgrid.assert_true(
dim in ds.dimensions,
"dimension {} referenced by {}:compress must exist"
"".format(dim, compressed_coord),
)
ret_val.append(valid_rgrid.to_result())
return ret_val
def _check_grid_mapping_attr_condition(self, attr, attr_name):
"""
Evaluate a condition (or series of conditions) for a particular
attribute. Implementation for CF-1.6.
:param attr: attribute to teset condition for
:param str attr_name: name of the attribute
:rtype tuple
:return two-tuple of (bool, str)
"""
if attr_name == "latitude_of_projection_origin":
return self._evaluate_latitude_of_projection_origin(attr)
elif attr_name == "longitude_of_projection_origin":
return self._evaluate_longitude_of_projection_origin(attr)
elif attr_name == "longitude_of_central_meridian":
return self._evaluate_longitude_of_central_meridian(attr)
elif attr_name == "longitude_of_prime_meridian":
return self._evaluate_longitude_of_prime_meridian(attr)
elif attr_name == "scale_factor_at_central_meridian":
return self._evaluate_scale_factor_at_central_meridian(attr)
elif attr_name == "scale_factor_at_projection_origin":
return self._evaluate_scale_factor_at_projection_origin(attr)
elif attr_name == "standard_parallel":
return self._evaluate_standard_parallel(attr)
elif attr_name == "straight_vertical_longitude_from_pole":
return self._evaluate_straight_vertical_longitude_from_pole(attr)
else:
raise NotImplementedError(
"Evaluation for {} not yet implemented".format(attr_name)
)
def _evaluate_latitude_of_projection_origin(self, val):
"""
Evaluate the condition for `latitude_of_projection_origin` attribute.
Return result. Value must be -90 <= x <= 90.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -90.0) and (val <= 90.0),
"latitude_of_projection_origin must satisfy (-90 <= x <= 90)",
)
def _evaluate_longitude_of_projection_origin(self, val):
"""
Evaluate the condition for `longitude_of_projection_origin` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -180.0) and (val <= 180.0),
"longitude_of_projection_origin must satisfy (-180 <= x <= 180)",
)
def _evaluate_longitude_of_central_meridian(self, val):
"""
Evaluate the condition for `longitude_of_central_meridian` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -180.0) and (val <= 180.0),
"longitude_of_central_meridian must satisfy (-180 <= x <= 180)",
)
def _evaluate_longitude_of_prime_meridian(self, val):
"""
Evaluate the condition for `longitude_of_prime_meridian` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -180.0) and (val <= 180.0),
"longitude_of_prime_meridian must satisfy (-180 <= x <= 180)",
)
def _evaluate_scale_factor_at_central_meridian(self, val):
"""
Evaluate the condition for `scale_factor_at_central_meridian` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (val > 0.0, "scale_factor_at_central_meridian must be > 0.0")
def _evaluate_scale_factor_at_projection_origin(self, val):
"""
Evaluate the condition for `scale_factor_at_projection_origin` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (val > 0.0, "scale_factor_at_projection_origin must be > 0.0")
def _evaluate_standard_parallel(self, val):
"""
Evaluate the condition for `standard_parallel` attribute. Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -90.0) and (val <= 90),
"standard_parallel must satisfy (-90 <= x <= 90)",
)
def _evaluate_straight_vertical_longitude_from_pole(self, val):
"""
Evaluate the condition for `straight_vertical_longitude_from_pole`
attribute. Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -180.0) and (val <= 180),
"straight_vertical_longitude_from_pole must satisfy (-180 <= x <= 180)",
)
###############################################################################
# Chapter 6: Labels and Alternative Coordinates
###############################################################################
def check_geographic_region(self, ds):
"""
6.1.1 When data is representative of geographic regions which can be identified by names but which have complex
boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled
axis should be used to identify the regions.
Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate
that the label values are standardized the variable that contains the labels must be given the standard_name
attribute with the value region.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
region_list = (
[ # TODO maybe move this (and other info like it) into a config file?
"africa",
"antarctica",
"arabian_sea",
"aral_sea",
"arctic_ocean",
"asia",
"atlantic_ocean",
"australia",
"baltic_sea",
"barents_opening",
"barents_sea",
"beaufort_sea",
"bellingshausen_sea",
"bering_sea",
"bering_strait",
"black_sea",
"canadian_archipelago",
"caribbean_sea",
"caspian_sea",
"central_america",
"chukchi_sea",
"contiguous_united_states",
"denmark_strait",
"drake_passage",
"east_china_sea",
"english_channel",
"eurasia",
"europe",
"faroe_scotland_channel",
"florida_bahamas_strait",
"fram_strait",
"global",
"global_land",
"global_ocean",
"great_lakes",
"greenland",
"gulf_of_alaska",
"gulf_of_mexico",
"hudson_bay",
"iceland_faroe_channel",
"indian_ocean",
"indonesian_throughflow",
"indo_pacific_ocean",
"irish_sea",
"lake_baykal",
"lake_chad",
"lake_malawi",
"lake_tanganyika",
"lake_victoria",
"mediterranean_sea",
"mozambique_channel",
"north_america",
"north_sea",
"norwegian_sea",
"pacific_equatorial_undercurrent",
"pacific_ocean",
"persian_gulf",
"red_sea",
"ross_sea",
"sea_of_japan",
"sea_of_okhotsk",
"south_america",
"south_china_sea",
"southern_ocean",
"taiwan_luzon_straits",
"weddell_sea",
"windward_passage",
"yellow_sea",
]
)
for var in ds.get_variables_by_attributes(standard_name="region"):
valid_region = TestCtx(BaseCheck.MEDIUM, self.section_titles["6.1"])
region = var[:]
if np.ma.isMA(region):
region = region.data
valid_region.assert_true(
"".join(region.astype(str)).lower() in region_list,
"6.1.1 '{}' specified by '{}' is not a valid region".format(
"".join(region.astype(str)), var.name
),
)
ret_val.append(valid_region.to_result())
return ret_val
###############################################################################
# Chapter 7: Data Representative of Cells
###############################################################################
def check_cell_boundaries(self, ds):
"""
Checks the dimensions of cell boundary variables to ensure they are CF compliant.
7.1 To represent cells we add the attribute bounds to the appropriate coordinate variable(s). The value of bounds
is the name of the variable that contains the vertices of the cell boundaries. We refer to this type of variable as
a "boundary variable." A boundary variable will have one more dimension than its associated coordinate or auxiliary
coordinate variable. The additional dimension should be the most rapidly varying one, and its size is the maximum
number of cell vertices.
Applications that process cell boundary data often times need to determine whether or not adjacent cells share an
edge. In order to facilitate this type of processing the following restrictions are placed on the data in boundary
variables:
Bounds for 1-D coordinate variables
For a coordinate variable such as lat(lat) with associated boundary variable latbnd(x,2), the interval endpoints
must be ordered consistently with the associated coordinate, e.g., for an increasing coordinate, lat(1) > lat(0)
implies latbnd(i,1) >= latbnd(i,0) for all i
If adjacent intervals are contiguous, the shared endpoint must be represented identically in each instance where
it occurs in the boundary variable. For example, if the intervals that contain grid points lat(i) and lat(i+1) are
contiguous, then latbnd(i+1,0) = latbnd(i,1).
Bounds for 2-D coordinate variables with 4-sided cells
In the case where the horizontal grid is described by two-dimensional auxiliary coordinate variables in latitude
lat(n,m) and longitude lon(n,m), and the associated cells are four-sided, then the boundary variables are given
in the form latbnd(n,m,4) and lonbnd(n,m,4), where the trailing index runs over the four vertices of the cells.
Bounds for multi-dimensional coordinate variables with p-sided cells
In all other cases, the bounds should be dimensioned (...,n,p), where (...,n) are the dimensions of the auxiliary
coordinate variables, and p the number of vertices of the cells. The vertices must be traversed anticlockwise in the
lon-lat plane as viewed from above. The starting vertex is not specified.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
# Note that test does not check monotonicity
ret_val = []
reasoning = []
for variable_name, boundary_variable_name in cfutil.get_cell_boundary_map(
ds
).items():
variable = ds.variables[variable_name]
valid = True
reasoning = []
if boundary_variable_name not in ds.variables:
valid = False
reasoning.append(
"Boundary variable {} referenced by {} not ".format(
boundary_variable_name, variable.name
)
+ "found in dataset variables"
)
else:
boundary_variable = ds.variables[boundary_variable_name]
# The number of dimensions in the bounds variable should always be
# the number of dimensions in the referring variable + 1
if boundary_variable.ndim < 2:
valid = False
reasoning.append(
"Boundary variable {} specified by {}".format(
boundary_variable.name, variable.name
)
+ " should have at least two dimensions to enclose the base "
+ "case of a one dimensionsal variable"
)
if boundary_variable.ndim != variable.ndim + 1:
valid = False
reasoning.append(
"The number of dimensions of the variable %s is %s, but the "
"number of dimensions of the boundary variable %s is %s. The boundary variable "
"should have %s dimensions"
% (
variable.name,
variable.ndim,
boundary_variable.name,
boundary_variable.ndim,
variable.ndim + 1,
)
)
if variable.dimensions[:] != boundary_variable.dimensions[: variable.ndim]:
valid = False
reasoning.append(
"Boundary variable coordinates (for {}) are in improper order: {}. Bounds-specific dimensions should be last"
"".format(variable.name, boundary_variable.dimensions)
)
# ensure p vertices form a valid simplex given previous a...n
# previous auxiliary coordinates
if (
ds.dimensions[boundary_variable.dimensions[-1]].size
< len(boundary_variable.dimensions[:-1]) + 1
):
valid = False
reasoning.append(
"Dimension {} of boundary variable (for {}) must have at least {} elements to form a simplex/closed cell with previous dimensions {}.".format(
boundary_variable.name,
variable.name,
len(variable.dimensions) + 1,
boundary_variable.dimensions[:-1],
)
)
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["7.1"], reasoning
)
ret_val.append(result)
return ret_val
def check_cell_measures(self, ds):
"""
7.2 To indicate extra information about the spatial properties of a
variable's grid cells, a cell_measures attribute may be defined for a
variable. This is a string attribute comprising a list of
blank-separated pairs of words of the form "measure: name". "area" and
"volume" are the only defined measures.
The "name" is the name of the variable containing the measure values,
which we refer to as a "measure variable". The dimensions of the
measure variable should be the same as or a subset of the dimensions of
the variable to which they are related, but their order is not
restricted.
The variable must have a units attribute and may have other attributes
such as a standard_name.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
reasoning = []
variables = ds.get_variables_by_attributes(
cell_measures=lambda c: c is not None
)
for var in variables:
search_str = r"^(?:area|volume): (\w+)$"
search_res = regex.search(search_str, var.cell_measures)
if not search_res:
valid = False
reasoning.append(
"The cell_measures attribute for variable {} "
"is formatted incorrectly. It should take the"
" form of either 'area: cell_var' or "
"'volume: cell_var' where cell_var is the "
"variable describing the cell measures".format(var.name)
)
else:
valid = True
cell_meas_var_name = search_res.groups()[0]
# TODO: cache previous results
if cell_meas_var_name not in ds.variables:
valid = False
reasoning.append(
"Cell measure variable {} referred to by "
"{} is not present in dataset variables".format(
cell_meas_var_name, var.name
)
)
else:
cell_meas_var = ds.variables[cell_meas_var_name]
if not hasattr(cell_meas_var, "units"):
valid = False
reasoning.append(
"Cell measure variable {} is required "
"to have units attribute defined.".format(
cell_meas_var_name
)
)
if not set(cell_meas_var.dimensions).issubset(var.dimensions):
valid = False
reasoning.append(
"Cell measure variable {} must have "
"dimensions which are a subset of "
"those defined in variable {}.".format(
cell_meas_var_name, var.name
)
)
result = Result(
BaseCheck.MEDIUM, valid, (self.section_titles["7.2"]), reasoning
)
ret_val.append(result)
return ret_val
def check_cell_methods(self, ds):
"""
7.3 To describe the characteristic of a field that is represented by cell values, we define the cell_methods attribute
of the variable. This is a string attribute comprising a list of blank-separated words of the form "name: method". Each
"name: method" pair indicates that for an axis identified by name, the cell values representing the field have been
determined or derived by the specified method.
name can be a dimension of the variable, a scalar coordinate variable, a valid standard name, or the word "area"
values of method should be selected from the list in Appendix E, Cell Methods, which includes point, sum, mean, maximum,
minimum, mid_range, standard_deviation, variance, mode, and median. Case is not significant in the method name. Some
methods (e.g., variance) imply a change of units of the variable, as is indicated in Appendix E, Cell Methods.
Because the default interpretation for an intensive quantity differs from that of an extensive quantity and because this
distinction may not be understood by some users of the data, it is recommended that every data variable include for each
of its dimensions and each of its scalar coordinate variables the cell_methods information of interest (unless this
information would not be meaningful). It is especially recommended that cell_methods be explicitly specified for each
spatio-temporal dimension and each spatio-temporal scalar coordinate variable.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
psep = regex.compile(
r"(?P<vars>\w+: )+(?P<method>\w+) ?(?P<where>where (?P<wtypevar>\w+) "
r"?(?P<over>over (?P<otypevar>\w+))?| ?)(?:\((?P<paren_contents>[^)]*)\))?"
)
for var in ds.get_variables_by_attributes(cell_methods=lambda x: x is not None):
if not getattr(var, "cell_methods", ""):
continue
method = getattr(var, "cell_methods", "")
valid_attribute = TestCtx(
BaseCheck.HIGH, self.section_titles["7.3"]
) # changed from 7.1 to 7.3
valid_attribute.assert_true(
regex.match(psep, method) is not None,
'"{}" is not a valid format for cell_methods attribute of "{}"'
"".format(method, var.name),
)
ret_val.append(valid_attribute.to_result())
valid_cell_names = TestCtx(BaseCheck.MEDIUM, self.section_titles["7.3"])
# check that the name is valid
for match in regex.finditer(psep, method):
# it is possible to have "var1: var2: ... varn: ...", so handle
# that case
for var_raw_str in match.captures("vars"):
# strip off the ' :' at the end of each match
var_str = var_raw_str[:-2]
if (
var_str in var.dimensions
or var_str == "area"
or var_str in getattr(var, "coordinates", "")
):
valid = True
else:
valid = False
valid_cell_names.assert_true(
valid,
"{}'s cell_methods name component {} does not match a dimension, "
"area or auxiliary coordinate".format(var.name, var_str),
)
ret_val.append(valid_cell_names.to_result())
# Checks if the method value of the 'name: method' pair is acceptable
valid_cell_methods = TestCtx(BaseCheck.MEDIUM, self.section_titles["7.3"])
for match in regex.finditer(psep, method):
# CF section 7.3 - "Case is not significant in the method name."
valid_cell_methods.assert_true(
match.group("method").lower() in self.cell_methods,
"{}:cell_methods contains an invalid method: {}"
"".format(var.name, match.group("method")),
)
ret_val.append(valid_cell_methods.to_result())
for match in regex.finditer(psep, method):
if match.group("paren_contents") is not None:
# split along spaces followed by words with a colon
# not sure what to do if a comment contains a colon!
ret_val.append(
self._check_cell_methods_paren_info(
match.group("paren_contents"), var
).to_result()
)
return ret_val
def _check_cell_methods_paren_info(self, paren_contents, var):
"""
Checks that the spacing and/or comment info contained inside the
parentheses in cell_methods is well-formed
"""
valid_info = TestCtx(BaseCheck.MEDIUM, self.section_titles["7.3"])
# if there are no colons, this is a simple comment
# TODO: are empty comments considered valid?
if ":" not in paren_contents:
valid_info.out_of += 1
valid_info.score += 1
return valid_info
# otherwise, split into k/v pairs
kv_pair_pat = r"(\S+:)\s+(.*(?=\s+\w+:)|[^:]+$)\s*"
# otherwise, we must split further with intervals coming
# first, followed by non-standard comments
# we need the count of the matches, and re.findall() only returns
# groups if they are present and we wish to see if the entire match
# object concatenated together is the same as the original string
pmatches = [m for m in regex.finditer(kv_pair_pat, paren_contents)]
for i, pmatch in enumerate(pmatches):
keyword, val = pmatch.groups()
if keyword == "interval:":
valid_info.out_of += 2
interval_matches = regex.match(
r"^\s*(?P<interval_number>\S+)\s+(?P<interval_units>\S+)\s*$", val
)
# attempt to get the number for the interval
if not interval_matches:
valid_info.messages.append(
'§7.3.3 {}:cell_methods contains an interval specification that does not parse: "{}". Should be in format "interval: <number> <units>"'.format(
var.name, val
)
)
else:
try:
float(interval_matches.group("interval_number"))
except ValueError:
valid_info.messages.append(
'§7.3.3 {}:cell_methods contains an interval value that does not parse as a numeric value: "{}".'.format(
var.name, interval_matches.group("interval_number")
)
)
else:
valid_info.score += 1
# then the units
try:
Unit(interval_matches.group("interval_units"))
except ValueError:
valid_info.messages.append(
'§7.3.3 {}:cell_methods interval units "{}" is not parsable by UDUNITS.'.format(
var.name, interval_matches.group("interval_units")
)
)
else:
valid_info.score += 1
elif keyword == "comment:":
# comments can't really be invalid, except
# if they come first or aren't last, and
# maybe if they contain colons embedded in the
# comment string
valid_info.out_of += 1
if len(pmatches) == 1:
valid_info.messages.append(
"§7.3.3 If there is no standardized information, the keyword comment: should be omitted for variable {}".format(
var.name
)
)
# otherwise check that the comment is the last
# item in the parentheses
elif i != len(pmatches) - 1:
valid_info.messages.append(
'§7.3.3 The non-standard "comment:" element must come after any standard elements in cell_methods for variable {}'.format(
var.name
)
)
#
else:
valid_info.score += 1
else:
valid_info.out_of += 1
valid_info.messages.append(
'§7.3.3 Invalid cell_methods keyword "{}" for variable {}. Must be one of [interval, comment]'.format(
keyword, var.name
)
)
# Ensure concatenated reconstructed matches are the same as the
# original string. If they're not, there's likely a formatting error
valid_info.assert_true(
"".join(m.group(0) for m in pmatches) == paren_contents,
"§7.3.3 Parenthetical content inside {}:cell_methods is not well formed: {}".format(
var.name, paren_contents
),
)
return valid_info
def check_climatological_statistics(self, ds):
"""
7.4 A climatological time coordinate variable does not have a bounds attribute. Instead, it has a climatology
attribute, which names a variable with dimensions (n,2), n being the dimension of the climatological time axis.
Using the units and calendar of the time coordinate variable, element (i,0) of the climatology variable specifies
the beginning of the first subinterval and element (i,1) the end of the last subinterval used to evaluate the
climatological statistics with index i in the time dimension. The time coordinates should be values that are
representative of the climatological time intervals, such that an application which does not recognise climatological
time will nonetheless be able to make a reasonable interpretation.
A climatological axis may use different statistical methods to measure variation among years, within years, and within
days. The methods which can be specified are those listed in Appendix E, Cell Methods and each entry in the cell_methods
attribute may also contain non-standardised information in parentheses after the method. The value of the cell_method
attribute must be in one of the following forms:
- time: method1 within years time: method2 over years
- time: method1 within days time: method2 over days
- time: method1 within days time: method2 over days time: method3 over years
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
reasoning = []
ret_val = []
total_climate_count = 0
valid_climate_count = 0
all_clim_coord_var_names = []
methods = [
"point", # TODO change to appendix import once cf1.7 merged
"sum",
"mean",
"maximum",
"minimum",
"mid_range",
"standard_deviation",
"variance",
"mode",
"median",
]
# find any climatology axies variables; any variables which contain climatological stats will use
# these variables as coordinates
clim_time_coord_vars = ds.get_variables_by_attributes(
climatology=lambda s: s is not None
)
# first, to determine whether or not we have a valid climatological time
# coordinate variable, we need to make sure it has the attribute "climatology",
# but not the attribute "bounds"
for clim_coord_var in clim_time_coord_vars:
if hasattr(clim_coord_var, "bounds"):
reasoning.append(
"Variable {} has a climatology attribute and cannot also have a bounds attribute.".format(
clim_coord_var.name
)
)
result = Result(
BaseCheck.MEDIUM, False, (self.section_titles["7.4"]), reasoning
)
ret_val.append(result)
return ret_val
# make sure the climatology variable referenced actually exists
elif clim_coord_var.climatology not in ds.variables:
reasoning.append(
"Variable {} referenced in time's climatology attribute does not exist".format(
ds.variables["time"].climatology
)
)
result = Result(
BaseCheck.MEDIUM, False, (self.section_titles["7.4"]), reasoning
)
ret_val.append(result)
return ret_val
# check that coordinate bounds are in the proper order.
# make sure last elements are boundary variable specific dimensions
if (
clim_coord_var.dimensions[:]
!= ds.variables[clim_coord_var.climatology].dimensions[
: clim_coord_var.ndim
]
):
reasoning.append(
"Climatology variable coordinates are in improper order: {}. Bounds-specific dimensions should be last".format(
ds.variables[clim_coord_var.climatology].dimensions
)
)
return ret_val
elif (
ds.dimensions[
ds.variables[clim_coord_var.climatology].dimensions[-1]
].size
!= 2
):
reasoning.append(
"Climatology dimension {} should only contain two elements".format(
boundary_variable.dimensions
)
)
# passed all these checks, so we can add this clim_coord_var to our total list
all_clim_coord_var_names.append(clim_coord_var.name)
# for any variables which use a climatology time coordinate variable as a coordinate,
# if they have a cell_methods attribute, it must comply with the form:
# time: method1 within years time: method2 over years
# time: method1 within days time: method2 over days
# time: method1 within days time: method2 over days time: method3 over years
# optionally followed by parentheses for explaining additional
# info, e.g.
# "time: method1 within years time: method2 over years (sidereal years)"
meth_regex = "(?:{})".format(
"|".join(methods)
) # "or" comparison for the methods
re_string = (
r"^time: {0} within (years|days)" # regex string to test
r" time: {0} over \1(?<=days)(?: time: {0} over years)?"
r"(?: \([^)]+\))?$".format(meth_regex)
)
# find any variables with a valid climatological cell_methods
for cell_method_var in ds.get_variables_by_attributes(
cell_methods=lambda s: s is not None
):
if any(
[dim in all_clim_coord_var_names for dim in cell_method_var.dimensions]
):
total_climate_count += 1
if not regex.search(re_string, cell_method_var.cell_methods):
reasoning.append(
'The "time: method within years/days over years/days" format is not correct in variable {}.'.format(
cell_method_var.name
)
)
else:
valid_climate_count += 1
result = Result(
BaseCheck.MEDIUM,
(valid_climate_count, total_climate_count),
(self.section_titles["7.4"]),
reasoning,
)
ret_val.append(result)
return ret_val
###############################################################################
# Chapter 8: Reduction of Dataset Size
###############################################################################
def check_packed_data(self, ds):
"""
8.1 Simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and
add_offset. After the data values of a variable have been read, they are to be multiplied by the scale_factor,
and have add_offset added to them.
The units of a variable should be representative of the unpacked data.
If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked
data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset
attributes are of a different data type from the variable (containing the packed data) then the unpacked data
should match the type of these attributes, which must both be of type float or both be of type double. An additional
restriction in this case is that the variable containing the packed data must be of type byte, short or int. It is
not advised to unpack an int into a float as there is a potential precision loss.
When data to be packed contains missing values the attributes that indicate missing values (_FillValue, valid_min,
valid_max, valid_range) must be of the same data type as the packed data.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for name, var in ds.variables.items():
add_offset = getattr(var, "add_offset", None)
scale_factor = getattr(var, "scale_factor", None)
if not (add_offset or scale_factor):
continue
valid = True
reasoning = []
# if only one of these attributes is defined, assume they
# are the same type (value doesn't matter here)
if not add_offset:
add_offset = scale_factor
if not scale_factor:
scale_factor = add_offset
if type(add_offset) != type(scale_factor):
valid = False
reasoning.append(
"Attributes add_offset and scale_factor have different data type."
)
elif type(scale_factor) != var.dtype.type:
# Check both attributes are type float or double
if not isinstance(scale_factor, (float, np.floating)):
valid = False
reasoning.append(
"Attributes add_offset and scale_factor are not of type float or double."
)
else:
# Check variable type is byte, short or int
if var.dtype.type not in [
np.int,
np.int8,
np.int16,
np.int32,
np.int64,
]:
valid = False
reasoning.append("Variable is not of type byte, short, or int.")
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["8.1"], reasoning
)
ret_val.append(result)
reasoning = []
valid = True
# test further with _FillValue , valid_min , valid_max , valid_range
if hasattr(var, "_FillValue"):
if var._FillValue.dtype.type != var.dtype.type:
valid = False
reasoning.append(
"Type of %s:_FillValue attribute (%s) does not match variable type (%s)"
% (name, var._FillValue.dtype.name, var.dtype.name)
)
if hasattr(var, "valid_min"):
if var.valid_min.dtype.type != var.dtype.type:
valid = False
reasoning.append(
"Type of %svalid_min attribute (%s) does not match variable type (%s)"
% (name, var.valid_min.dtype.name, var.dtype.name)
)
if hasattr(var, "valid_max"):
if var.valid_max.dtype.type != var.dtype.type:
valid = False
reasoning.append(
"Type of %s:valid_max attribute (%s) does not match variable type (%s)"
% (name, var.valid_max.dtype.name, var.dtype.name)
)
if hasattr(var, "valid_range"):
if var.valid_range.dtype.type != var.dtype.type:
valid = False
reasoning.append(
"Type of %s:valid_range attribute (%s) does not match variable type (%s)"
% (name, var.valid_range.dtype.name, var.dtype.name)
)
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["8.1"], reasoning
)
ret_val.append(result)
return ret_val
def check_compression_gathering(self, ds):
"""
At the current time the netCDF interface does not provide for packing
data. However a simple packing may be achieved through the use of the
optional NUG defined attributes scale_factor and add_offset . After the
data values of a variable have been read, they are to be multiplied by
the scale_factor , and have add_offset added to them. If both
attributes are present, the data are scaled before the offset is added.
When scaled data are written, the application should first subtract the
offset and then divide by the scale factor. The units of a variable
should be representative of the unpacked data.
This standard is more restrictive than the NUG with respect to the use
of the scale_factor and add_offset attributes; ambiguities and
precision problems related to data type conversions are resolved by
these restrictions. If the scale_factor and add_offset attributes are
of the same data type as the associated variable, the unpacked data is
assumed to be of the same data type as the packed data. However, if the
scale_factor and add_offset attributes are of a different data type
from the variable (containing the packed data) then the unpacked data
should match the type of these attributes, which must both be of type
float or both be of type double . An additional restriction in this
case is that the variable containing the packed data must be of type
byte , short or int . It is not advised to unpack an int into a float
as there is a potential precision loss.
When data to be packed contains missing values the attributes that
indicate missing values ( _FillValue , valid_min , valid_max ,
valid_range ) must be of the same data type as
the packed data. See Section 2.5.1, “Missing Data” for a discussion of
how applications should treat variables that have attributes indicating
both missing values and transformations defined by a scale and/or
offset.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for compress_var in ds.get_variables_by_attributes(
compress=lambda s: s is not None
):
valid = True
reasoning = []
# puts the referenced variable being compressed into a set
compress_set = set(compress_var.compress.split(" "))
if compress_var.ndim != 1:
valid = False
reasoning.append(
"Compression variable {} may only have one dimension".format(
compress_var.name
)
)
# ensure compression variable is a proper index, and thus is an
# signed or unsigned integer type of some sort
if (compress_var.dtype is str) or (
compress_var.dtype.kind not in {"i", "u"}
):
valid = False
reasoning.append(
"Compression variable {} must be an integer type to form a proper array index".format(
compress_var.name
)
)
# make sure all the variables referred to are contained by the
# variables.
if not compress_set.issubset(ds.dimensions):
not_in_dims = sorted(compress_set.difference(ds.dimensions))
valid = False
reasoning.append(
"The following dimensions referenced by the compress attribute of variable {} do not exist: {}".format(
compress_var.name, not_in_dims
)
)
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["8.2"], reasoning
)
ret_val.append(result)
return ret_val
###############################################################################
# Chapter 9: Discrete Sampling Geometries
###############################################################################
def check_feature_type(self, ds):
"""
Check the global attribute featureType for valid CF featureTypes
9.4 A global attribute, featureType, is required for all Discrete Geometry representations except the orthogonal
multidimensional array representation, for which it is highly recommended.
The value assigned to the featureType attribute is case-insensitive.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
# Due to case insensitive requirement, we list the possible featuretypes
# in lower case and check using the .lower() method
feature_list = [
"point",
"timeseries",
"trajectory",
"profile",
"timeseriesprofile",
"trajectoryprofile",
]
feature_type = getattr(ds, "featureType", None)
valid_feature_type = TestCtx(
BaseCheck.HIGH, "§9.1 Dataset contains a valid featureType"
)
valid_feature_type.assert_true(
feature_type is None or feature_type.lower() in feature_list,
"{} is not a valid CF featureType. It must be one of {}"
"".format(feature_type, ", ".join(feature_list)),
)
return valid_feature_type.to_result()
def check_cf_role(self, ds):
"""
Check variables defining cf_role for legal cf_role values.
§9.5 The only acceptable values of cf_role for Discrete Geometry CF
data sets are timeseries_id, profile_id, and trajectory_id
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
valid_roles = ["timeseries_id", "profile_id", "trajectory_id"]
variable_count = 0
for variable in ds.get_variables_by_attributes(cf_role=lambda x: x is not None):
variable_count += 1
name = variable.name
valid_cf_role = TestCtx(BaseCheck.HIGH, self.section_titles["9.5"])
cf_role = variable.cf_role
valid_cf_role.assert_true(
cf_role in valid_roles,
"{} is not a valid cf_role value. It must be one of {}"
"".format(cf_role, ", ".join(valid_roles)),
)
if variable_count > 0:
m = (
"§9.5 The only acceptable values of cf_role for Discrete Geometry CF"
+ " data sets are timeseries_id, profile_id, and trajectory_id"
)
valid_cf_role.assert_true(variable_count < 3, m)
return valid_cf_role.to_result()
def check_variable_features(self, ds):
"""
Checks the variable feature types match the dataset featureType attribute.
If more than one unique feature type is found, report this as an error.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
feature_types_found = defaultdict(list)
ret_val = []
feature_list = {
"point",
"timeseries",
"trajectory",
"profile",
"timeseriesprofile",
"trajectoryprofile",
}
# Don't bother checking if it's not a legal featureType
# if the featureType attribute doesn't exist
feature_type = getattr(ds, "featureType", "")
if feature_type is not None and feature_type.lower() not in feature_list:
return []
_feature = feature_type.lower()
for name in self._find_geophysical_vars(ds):
variable_feature = cfutil.guess_feature_type(ds, name)
# If we can't figure it out, don't check it.
if variable_feature is None:
continue
feature_types_found[variable_feature].append(name)
matching_feature = TestCtx(BaseCheck.MEDIUM, self.section_titles["9.1"])
matching_feature.assert_true(
variable_feature.lower() == _feature,
"{} is not a {}, it is detected as a {}"
"".format(name, _feature, variable_feature),
)
ret_val.append(matching_feature.to_result())
# create explanation of all of the different featureTypes
# found in the dataset
feature_description = ", ".join(
[
"{} ({})".format(ftr, ", ".join(vrs))
for ftr, vrs in feature_types_found.items()
]
)
all_same_features = TestCtx(BaseCheck.HIGH, self.section_titles["9.1"])
all_same_features.assert_true(
len(feature_types_found) < 2,
"Different feature types discovered in this dataset: {}"
"".format(feature_description),
)
ret_val.append(all_same_features.to_result())
return ret_val
def check_hints(self, ds):
"""
Checks for potentially mislabeled metadata and makes suggestions for how to correct
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
ret_val.extend(self._check_hint_bounds(ds))
return ret_val
def _check_hint_bounds(self, ds):
"""
Checks for variables ending with _bounds, if they are not cell methods,
make the recommendation
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
boundary_variables = cfutil.get_cell_boundary_variables(ds)
for name in ds.variables:
if name.endswith("_bounds") and name not in boundary_variables:
msg = (
"{} might be a cell boundary variable but there are no variables that define it "
"as a boundary using the `bounds` attribute.".format(name)
)
result = Result(BaseCheck.LOW, True, self.section_titles["7.1"], [msg])
ret_val.append(result)
return ret_val
class CF1_7Check(CF1_6Check):
"""Implementation for CF v1.7. Inherits from CF1_6Check as most of the
checks are the same."""
# things that are specific to 1.7
_cc_spec_version = "1.7"
_cc_url = "http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html"
appendix_a = appendix_a_base.copy()
appendix_a.update(
{
"actual_range": {
"Type": "N",
"attr_loc": {"D", "C"},
"cf_section": "2.5.1",
},
"comment": {
"Type": "S",
"attr_loc": {"G", "D", "C"},
"cf_section": "2.6.2",
},
"external_variables": {
"Type": "S",
"attr_loc": {"G"},
"cf_section": "2.6.3",
},
"actual_range": {
"Type": "N",
"attr_loc": {"D", "C"},
"cf_section": "2.5.1",
},
"scale_factor": {"Type": "N", "attr_loc": {"D", "C"}, "cf_section": "8.1"},
}
)
def __init__(self, options=None):
super(CF1_7Check, self).__init__(options)
self.cell_methods = cell_methods17
self.grid_mapping_dict = grid_mapping_dict17
self.grid_mapping_attr_types = grid_mapping_attr_types17
def check_actual_range(self, ds):
"""Check the actual_range attribute of variables. As stated in
section 2.5.1 of version 1.7, this convention defines a two-element
vector attribute designed to describe the actual minimum and actual
maximum values of variables containing numeric data. Conditions:
- the fist value of the two-element vector must be equal to the
minimum of the data, and the second element equal to the maximum
- if the data is packed, the elements of actual_range should have
the same data type as the *unpacked* data
- if valid_range is specified, both elements of actual_range should
be within valid_range
If a variable does not have an actual_range attribute, let it pass;
including this attribute is only suggested. However, if the user is
specifying the actual_range, the Result will be considered
high-priority."""
ret_val = []
for name, variable in ds.variables.items():
msgs = []
score = 0
out_of = 0
if not hasattr(variable, "actual_range"):
continue # having this attr is only suggested, no Result needed
else:
out_of += 1
try:
if (
len(variable.actual_range) != 2
): # TODO is the attr also a numpy array? if so, .size
msgs.append(
"actual_range of '{}' must be 2 elements".format(name)
)
ret_val.append(
Result( # putting result into list
BaseCheck.HIGH,
(score, out_of),
self.section_titles["2.5"],
msgs,
)
)
continue # no need to keep checking if already completely wrong
else:
score += 1
except TypeError: # in case it's just a single number
msgs.append("actual_range of '{}' must be 2 elements".format(name))
ret_val.append(
Result( # putting result into list
BaseCheck.HIGH,
(score, out_of),
self.section_titles["2.5"],
msgs,
)
)
continue
# check equality to existing min/max values
# NOTE this is a data check
# If every value is masked, a data check of actual_range isn't
# appropriate, so skip.
if not (hasattr(variable[:], "mask") and variable[:].mask.all()):
# if min/max values aren't close to actual_range bounds,
# fail.
out_of += 1
if not np.isclose(
variable.actual_range[0], variable[:].min()
) or not np.isclose(variable.actual_range[1], variable[:].max()):
msgs.append(
"actual_range elements of '{}' inconsistent with its min/max values".format(
name
)
)
else:
score += 1
# check that the actual range is within the valid range
if hasattr(variable, "valid_range"): # check within valid_range
out_of += 1
if (variable.actual_range[0] < variable.valid_range[0]) or (
variable.actual_range[1] > variable.valid_range[1]
):
msgs.append(
'"{}"\'s actual_range must be within valid_range'.format(
name
)
)
else:
score += 1
# check the elements of the actual range have the appropriate
# relationship to the valid_min and valid_max
if hasattr(variable, "valid_min"):
out_of += 1
if variable.actual_range[0] < variable.valid_min:
msgs.append(
'"{}"\'s actual_range first element must be >= valid_min ({})'.format(
name, variable.valid_min
)
)
else:
score += 1
if hasattr(variable, "valid_max"):
out_of += 1
if variable.actual_range[1] > variable.valid_max:
msgs.append(
'"{}"\'s actual_range second element must be <= valid_max ({})'.format(
name, variable.valid_max
)
)
else:
score += 1
ret_val.append(
Result( # putting result into list
BaseCheck.HIGH, (score, out_of), self.section_titles["2.5"], msgs
)
)
return ret_val
def check_cell_boundaries(self, ds):
"""
Checks the dimensions of cell boundary variables to ensure they are CF compliant
per section 7.1.
This method extends the CF1_6Check method; please see the original method for the
complete doc string.
If any variable contains both a formula_terms attribute *and* a bounding variable,
that bounds variable must also have a formula_terms attribute.
:param netCDF4.Dataset ds: An open netCDF dataset
:returns list: List of results
"""
# Note that test does not check monotonicity
ret_val = []
reasoning = []
for variable_name, boundary_variable_name in cfutil.get_cell_boundary_map(
ds
).items():
variable = ds.variables[variable_name]
valid = True
reasoning = []
if boundary_variable_name not in ds.variables:
valid = False
reasoning.append(
"Boundary variable {} referenced by {} not ".format(
boundary_variable_name, variable.name
)
+ "found in dataset variables"
)
else:
boundary_variable = ds.variables[boundary_variable_name]
# The number of dimensions in the bounds variable should always be
# the number of dimensions in the referring variable + 1
if boundary_variable.ndim < 2:
valid = False
reasoning.append(
"Boundary variable {} specified by {}".format(
boundary_variable.name, variable.name
)
+ " should have at least two dimensions to enclose the base "
+ "case of a one dimensionsal variable"
)
if boundary_variable.ndim != variable.ndim + 1:
valid = False
reasoning.append(
"The number of dimensions of the variable %s is %s, but the "
"number of dimensions of the boundary variable %s is %s. The boundary variable "
"should have %s dimensions"
% (
variable.name,
variable.ndim,
boundary_variable.name,
boundary_variable.ndim,
variable.ndim + 1,
)
)
if variable.dimensions[:] != boundary_variable.dimensions[: variable.ndim]:
valid = False
reasoning.append(
"Boundary variable coordinates (for {}) are in improper order: {}. Bounds-specific dimensions should be last"
"".format(variable.name, boundary_variable.dimensions)
)
# ensure p vertices form a valid simplex given previous a...n
# previous auxiliary coordinates
if (
ds.dimensions[boundary_variable.dimensions[-1]].size
< len(boundary_variable.dimensions[:-1]) + 1
):
valid = False
reasoning.append(
"Dimension {} of boundary variable (for {}) must have at least {} elements to form a simplex/closed cell with previous dimensions {}.".format(
boundary_variable.name,
variable.name,
len(variable.dimensions) + 1,
boundary_variable.dimensions[:-1],
)
)
# check if formula_terms is present in the var; if so,
# the bounds variable must also have a formula_terms attr
if hasattr(variable, "formula_terms"):
if not hasattr(boundary_variable, "formula_terms"):
valid = False
reasoning.append(
"'{}' has 'formula_terms' attr, bounds variable '{}' must also have 'formula_terms'".format(
variable_name, boundary_variable_name
)
)
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["7.1"], reasoning
)
ret_val.append(result)
return ret_val
def check_cell_measures(self, ds):
"""
A method to over-ride the CF1_6Check method. In CF 1.7, it is specified
that variable referenced by cell_measures must be in the dataset OR
referenced by the global attribute "external_variables", which represent
all the variables used in the dataset but not found in the dataset.
7.2 To indicate extra information about the spatial properties of a
variable's grid cells, a cell_measures attribute may be defined for a
variable. This is a string attribute comprising a list of
blank-separated pairs of words of the form "measure: name". "area" and
"volume" are the only defined measures.
The "name" is the name of the variable containing the measure values,
which we refer to as a "measure variable". The dimensions of the
measure variable should be the same as or a subset of the dimensions of
the variable to which they are related, but their order is not
restricted.
The variable must have a units attribute and may have other attributes
such as a standard_name.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
reasoning = []
variables = ds.get_variables_by_attributes(
cell_measures=lambda c: c is not None
)
for var in variables:
search_str = r"^(?:area|volume): (\w+)$"
search_res = regex.search(search_str, var.cell_measures)
if not search_res:
valid = False
reasoning.append(
"The cell_measures attribute for variable {} "
"is formatted incorrectly. It should take the"
" form of either 'area: cell_var' or "
"'volume: cell_var' where cell_var is the "
"variable describing the cell measures".format(var.name)
)
else:
valid = True
cell_meas_var_name = search_res.groups()[0]
# TODO: cache previous results
# if the dataset has external_variables, get it
try:
external_variables = ds.getncattr("external_variables")
except AttributeError:
external_variables = []
if cell_meas_var_name not in ds.variables:
if cell_meas_var_name not in external_variables:
valid = False
reasoning.append(
"Cell measure variable {} referred to by {} is not present in dataset variables".format(
cell_meas_var_name, var.name
)
)
else:
valid = True
# make Result
result = Result(
BaseCheck.MEDIUM, valid, (self.section_titles["7.2"]), reasoning
)
ret_val.append(result)
continue # can't test anything on an external var
else:
cell_meas_var = ds.variables[cell_meas_var_name]
if not hasattr(cell_meas_var, "units"):
valid = False
reasoning.append(
"Cell measure variable {} is required "
"to have units attribute defined.".format(
cell_meas_var_name
)
)
if not set(cell_meas_var.dimensions).issubset(var.dimensions):
valid = False
reasoning.append(
"Cell measure variable {} must have "
"dimensions which are a subset of "
"those defined in variable {}.".format(
cell_meas_var_name, var.name
)
)
result = Result(
BaseCheck.MEDIUM, valid, (self.section_titles["7.2"]), reasoning
)
ret_val.append(result)
return ret_val
def _check_grid_mapping_attr_condition(self, attr, attr_name):
"""
Evaluate a condition (or series of conditions) for a particular
attribute. Implementation for CF-1.7.
:param attr: attribute to teset condition for
:param str attr_name: name of the attribute
:rtype tuple
:return two-tuple of (bool, str)
"""
if attr_name == "geographic_crs_name":
return self._evaluate_geographic_crs_name(attr)
elif attr_name == "geoid_name":
return self._evaluate_geoid_name(attr)
elif attr_name == "geopotential_datum_name":
return self._evaluate_geopotential_datum_name(attr)
elif attr_name == "horizontal_datum_name":
return self._evaluate_horizontal_datum_name(attr)
elif attr_name == "prime_meridian_name":
return self._evaluate_prime_meridian_name(attr)
elif attr_name == "projected_crs_name":
return self._evaluate_projected_crs_name(attr)
elif attr_name == "reference_ellipsoid_name":
return self._evaluate_reference_ellipsoid_name(attr)
elif attr_name == "towgs84":
return self._evaluate_towgs84(attr)
else: # invoke method from 1.6, as these names are all still valid
return super(CF1_7Check, self)._check_grid_mapping_attr_condition(
attr, attr_name
)
def _check_gmattr_existence_condition_geoid_name_geoptl_datum_name(self, var):
"""
Check to see if both geoid_name and geopotential_datum_name exist as attributes
for `var`. They should not.
:param netCDF4.Variable var
:rtype tuple
:return two-tuple (bool, str)
"""
msg = "Both geoid_name and geopotential_datum_name cannot exist"
if ("geoid_name" in var.ncattrs()) and (
"geopotential_datum_name" in var.ncattrs()
):
return (False, msg)
else:
return (True, msg)
def _check_gmattr_existence_condition_ell_pmerid_hdatum(self, var):
"""
If one of reference_ellipsoid_name, prime_meridian_name, or
horizontal_datum_name are defined as grid_mapping attributes,
they must all be defined.
:param netCDF4.Variable var
:rtype tuple
:return two-tuple (bool, str)
"""
msg = (
"If any of reference_ellipsoid_name, prime_meridian_name, "
"or horizontal_datum_name are defined, all must be defined."
)
_ncattrs = set(var.ncattrs())
if any(
[
x in _ncattrs
for x in [
"reference_ellipsoid_name",
"prime_meridian_name",
"horizontal_datum_name",
]
]
) and (
not set(
[
"reference_ellipsoid_name",
"prime_meridian_name",
"horizontal_datum_name",
]
).issubset(_ncattrs)
):
return (False, msg)
else:
return (True, msg)
def _get_projdb_conn(self):
"""
Return a SQLite Connection to the PROJ database.
Returns:
sqlite3.Connection
"""
proj_db_path = os.path.join(pyproj.datadir.get_data_dir(), "proj.db")
return sqlite3.connect(proj_db_path)
def _exec_query_str_with_params(self, qstr, argtuple):
"""
Execute a query string in a database connection with the given argument
tuple. Return a result set.
:param str qstr: desired query to be executed
:param tuple argtuple: tuple of arguments to be supplied to query
:rtype set
"""
conn = self._get_projdb_conn()
return conn.execute(qstr, argtuple)
def _evaluate_geographic_crs_name(self, val):
"""
Evaluate the condition for the geographic_crs_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
query_str = (
"SELECT 1 FROM geodetic_crs WHERE name = ? "
"UNION ALL " # need union in case contained in other tables
"SELECT 1 FROM alias_name WHERE alt_name = ? "
"AND table_name = 'geodetic_crs' LIMIT 1"
)
# try to find the value in the database
res_set = self._exec_query_str_with_params(query_str, (val, val))
# does it exist? if so, amt returned be > 1
return (
len(res_set.fetchall()) > 0,
"geographic_crs_name must correspond to a valid OGC WKT GEOGCS name",
)
def _evaluate_geoid_name(self, val):
"""
Evaluate the condition for the geod_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
query_str = (
"SELECT 1 FROM vertical_datum WHERE name = ? "
"UNION ALL "
"SELECT 1 FROM alias_name WHERE alt_name = ? "
"AND table_name = 'vertical_datum' LIMIT 1"
)
# try to find the value in the database
res_set = self._exec_query_str_with_params(query_str, (val, val))
return (
len(res_set.fetchall()) > 0,
"geoid_name must correspond to a valid OGC WKT VERT_DATUM name",
)
def _evaluate_geopotential_datum_name(self, val):
"""
Evaluate the condition for the geogpotential_datum_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
query_str = (
"SELECT 1 FROM vertical_datum WHERE name = ? "
"UNION ALL "
"SELECT 1 FROM alias_name WHERE alt_name = ? "
"AND table_name = 'vertical_datum' LIMIT 1"
)
# try to find the value in the database
res_set = self._exec_query_str_with_params(query_str, (val, val))
return (
len(res_set.fetchall()) > 0,
"geopotential_datum_name must correspond to a valid OGC WKT VERT_DATUM name",
)
def _evaluate_horizontal_datum_name(self, val):
"""
Evaluate the condition for the horizontal_datum_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
return (
val in horizontal_datum_names17,
(
"{} must be a valid Horizontal Datum Name; "
"see https://github.com/cf-convention/cf-conventions/wiki/Mapping-from-CF-Grid-Mapping-Attributes-to-CRS-WKT-Elements."
),
)
def _evaluate_prime_meridian_name(self, val):
"""
Evaluate the condition for the prime_meridian_name.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
return (
val in prime_meridian_names17,
(
"{} must be a valid Prime Meridian name; "
"see https://github.com/cf-convention/cf-conventions/wiki/csv/prime_meridian.csv."
),
)
def _evaluate_projected_crs_name(self, val):
"""
Evaluate the condition for the projected_crs attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
query_str = (
"SELECT 1 FROM projected_crs WHERE name = ? "
"UNION ALL "
"SELECT 1 FROM alias_name WHERE alt_name = ? "
"AND table_name = 'projected_crs' LIMIT 1"
)
# try to find the value in the database
res_set = self._exec_query_str_with_params(query_str, (val, val))
return (
len(res_set.fetchall()) > 0,
"projected_crs_name must correspond to a valid OGC WKT PROJCS name",
)
def _evaluate_reference_ellipsoid_name(self, val):
"""
Evaluate the condition for the reference_ellipsoid_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
return (
val in ellipsoid_names17,
(
"{} must be a valid Ellipsoid Name; "
"see https://github.com/cf-convention/cf-conventions/wiki/csv/ellipsoid.csv."
),
)
def _evaluate_towgs84(self, val):
"""
Evaluate the condition for the towgs84 attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
msg = (
"towgs84 must be an array of length 3, 6, or 7 of double-precision"
" and correspond to anm OGC WKT TOWGS84 node"
)
# if not numpy type, return false
if not getattr(val, "dtype", None):
return (False, msg)
# must be double-precision array
elif val.dtype != np.float64:
return (False, msg)
# must be of length 3, 6, or 7
elif not val.shape: # single value
return (False, msg)
elif not (val.size in (3, 6, 7)):
return (False, msg)
else:
return (True, msg)
def check_grid_mapping(self, ds):
__doc__ = super(CF1_7Check, self).check_grid_mapping.__doc__
prev_return = super(CF1_7Check, self).check_grid_mapping(ds)
ret_val = []
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
for var_name in sorted(grid_mapping_variables):
var = ds.variables[var_name]
test_ctx = self.get_test_ctx(
BaseCheck.HIGH, self.section_titles["5.6"], var.name
)
# TODO: check cases where crs_wkt provides part of a necessary
# grid_mapping attribute, or where a grid_mapping attribute
# overrides what has been provided in crs_wkt.
# attempt to parse crs_wkt if it is present
if "crs_wkt" in var.ncattrs():
crs_wkt = var.crs_wkt
if not isinstance(crs_wkt, str):
test_ctx.messages.append("crs_wkt attribute must be a string")
test_ctx.out_of += 1
else:
try:
pyproj.CRS.from_wkt(crs_wkt)
except pyproj.exceptions.CRSError as crs_error:
test_ctx.messages.append(
"Cannot parse crs_wkt attribute to CRS using Proj4. Proj4 error: {}".format(
str(crs_error)
)
)
else:
test_ctx.score += 1
test_ctx.out_of += 1
# existence_conditions
exist_cond_1 = (
self._check_gmattr_existence_condition_geoid_name_geoptl_datum_name(var)
)
test_ctx.assert_true(exist_cond_1[0], exist_cond_1[1])
exist_cond_2 = self._check_gmattr_existence_condition_ell_pmerid_hdatum(var)
test_ctx.assert_true(exist_cond_2[0], exist_cond_2[1])
# handle vertical datum related grid_mapping attributes
vert_datum_attrs = {}
possible_vert_datum_attrs = {"geoid_name", "geopotential_datum_name"}
vert_datum_attrs = possible_vert_datum_attrs.intersection(var.ncattrs())
len_vdatum_name_attrs = len(vert_datum_attrs)
# check that geoid_name and geopotential_datum_name are not both
# present in the grid_mapping variable
if len_vdatum_name_attrs == 2:
test_ctx.out_of += 1
test_ctx.messages.append(
"Cannot have both 'geoid_name' and "
"'geopotential_datum_name' attributes in "
"grid mapping variable '{}'".format(var.name)
)
elif len_vdatum_name_attrs == 1:
# should be one or zero attrs
proj_db_path = os.path.join(pyproj.datadir.get_data_dir(), "proj.db")
try:
with sqlite3.connect(proj_db_path) as conn:
v_datum_attr = next(iter(vert_datum_attrs))
v_datum_value = getattr(var, v_datum_attr)
v_datum_str_valid = self._process_v_datum_str(
v_datum_value, conn
)
invalid_msg = (
"Vertical datum value '{}' for "
"attribute '{}' in grid mapping "
"variable '{}' is not valid".format(
v_datum_value, v_datum_attr, var.name
)
)
test_ctx.assert_true(v_datum_str_valid, invalid_msg)
except sqlite3.Error as e:
# if we hit an error, skip the check
warn(
"Error occurred while trying to query "
"Proj4 SQLite database at {}: {}".format(proj_db_path, str(e))
)
prev_return[var.name] = test_ctx.to_result()
return prev_return
def _process_v_datum_str(self, v_datum_str, conn):
vdatum_query = """SELECT 1 FROM alias_name WHERE
table_name = 'vertical_datum' AND
alt_name = ?
UNION ALL
SELECT 1 FROM vertical_datum WHERE
name = ?
LIMIT 1"""
res_set = conn.execute(vdatum_query, (v_datum_str, v_datum_str))
return len(res_set.fetchall()) > 0
def _check_dimensionless_vertical_coordinate_1_7(
self, ds, vname, deprecated_units, ret_val, dim_vert_coords_dict
):
"""
Check that a dimensionless vertical coordinate variable is valid under
CF-1.7.
:param netCDF4.Dataset ds: open netCDF4 dataset
:param str name: variable name
:param list ret_val: array to append Results to
:rtype None
"""
variable = ds.variables[vname]
standard_name = getattr(variable, "standard_name", None)
units = getattr(variable, "units", None)
formula_terms = getattr(variable, "formula_terms", None)
# Skip the variable if it's dimensional
if formula_terms is None and standard_name not in dim_vert_coords_dict:
return
# assert that the computed_standard_name is maps to the standard_name correctly
correct_computed_std_name_ctx = TestCtx(
BaseCheck.MEDIUM, self.section_titles["4.3"]
)
_comp_std_name = dim_vert_coords_dict[standard_name][1]
correct_computed_std_name_ctx.assert_true(
getattr(variable, "computed_standard_name", None) in _comp_std_name,
"§4.3.3 The standard_name of `{}` must map to the correct computed_standard_name, `{}`".format(
vname, sorted(_comp_std_name)
),
)
ret_val.append(correct_computed_std_name_ctx.to_result())
def check_dimensionless_vertical_coordinates(self, ds):
"""
Check the validity of dimensionless coordinates under CF
CF §4.3.2 The units attribute is not required for dimensionless
coordinates.
The standard_name attribute associates a coordinate with its definition
from Appendix D, Dimensionless Vertical Coordinates. The definition
provides a mapping between the dimensionless coordinate values and
dimensional values that can positively and uniquely indicate the
location of the data.
A new attribute, formula_terms, is used to associate terms in the
definitions with variables in a netCDF file. To maintain backwards
compatibility with COARDS the use of these attributes is not required,
but is strongly recommended.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
z_variables = cfutil.get_z_variables(ds)
deprecated_units = ["level", "layer", "sigma_level"]
# compose this function to use the results from the CF-1.6 check
# and then extend it using a CF-1.7 addition
ret_val.extend(
self._check_dimensionless_vertical_coordinates(
ds,
deprecated_units,
self._check_dimensionless_vertical_coordinate_1_6,
dimless_vertical_coordinates_1_7,
)
)
ret_val.extend(
self._check_dimensionless_vertical_coordinates(
ds,
deprecated_units,
self._check_dimensionless_vertical_coordinate_1_7,
dimless_vertical_coordinates_1_7,
)
)
return ret_val
class CFNCCheck(BaseNCCheck, CFBaseCheck):
@classmethod
def beliefs(cls): # @TODO
return {}
| ocefpaf/compliance-checker | compliance_checker/cf/cf.py | Python | apache-2.0 | 220,751 | [
"NetCDF"
] | ce86a27b36af848e8e4a5a319b63e482b67b2a27bc4d104050a1cf1d4151a6f6 |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 31 23:03:00 2015
Defines the data model for Random Acts of Pizza
@author: Rupak Chakraborty
"""
import pandas as pd
import numpy as np
import math
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn import cross_validation
from sklearn import metrics
import ClassificationUtils
import time
import nltk
from nltk.tokenize import word_tokenize
filename = "Random Acts of Pizza/train.json"
class_map = {True:1,False:0}
jsonData = pd.read_json(filename)
jsonData = jsonData.iloc[np.random.permutation(len(jsonData))]
requester_pizza_status = np.array(map(lambda x: class_map[x],jsonData["requester_received_pizza"]))
class_labels = requester_pizza_status
data = np.zeros((len(jsonData),18))
#Function to Extract POS tag counts from a given text
def getNounAdjVerbs(text):
words = word_tokenize(text)
pos_tags = nltk.pos_tag(words)
nouns = 0
verbs = 0
adj = 0
for token in pos_tags:
k = token[1]
if k == "NN" or k == "NNP" or k == "NNS" or k == "NNPS":
nouns = nouns + 1
elif k == "JJ" or k == "JJR" or k == "JJS":
adj = adj + 1
elif k == "VB" or k == "VBD" or k == "VBG" or k == "VBN" or k == "VBP" or k == "VBZ":
verbs = verbs + 1
return nouns,adj,verbs
# Extract Text features
request_text__data = list(jsonData["request_text_edit_aware"])
request_text_title_data = list (jsonData["request_title"])
clean_text_data = list([])
clean_title_data = list([])
print "Starting feature loading and cleaning ..."
start = time.time()
for i in range(len(request_text__data)):
title_string = ClassificationUtils.textCleaningPipeline(request_text_title_data[i])
text_string = ClassificationUtils.textCleaningPipeline(request_text__data[i])
clean_text_data.append(text_string)
clean_title_data.append(title_string)
end = time.time()
print "Time taken to load and clean text features : ", end-start
# Extract whole features
number_of_downvotes_of_request_at_retrieval = np.array(jsonData["number_of_downvotes_of_request_at_retrieval"],dtype=float)
number_of_upvotes_of_request_at_retrieval = np.array(jsonData["number_of_upvotes_of_request_at_retrieval"],dtype=float)
request_number_of_comments_at_retrieval = np.array(jsonData["request_number_of_comments_at_retrieval"],dtype=float)
requester_number_of_subreddits_at_request = np.array(jsonData["requester_number_of_subreddits_at_request"],dtype=float)
whole_features = [number_of_downvotes_of_request_at_retrieval,number_of_upvotes_of_request_at_retrieval,\
request_number_of_comments_at_retrieval,requester_number_of_subreddits_at_request]
# Extract pairwise different features
requester_account_age_in_days_at_request = np.array(jsonData["requester_account_age_in_days_at_request"],dtype=float)
requester_account_age_in_days_at_retrieval = np.array(jsonData["requester_account_age_in_days_at_retrieval"],dtype=float)
requester_days_since_first_post_on_raop_at_request = np.array(jsonData["requester_days_since_first_post_on_raop_at_request"],dtype=float)
requester_days_since_first_post_on_raop_at_retrieval = np.array(jsonData["requester_days_since_first_post_on_raop_at_retrieval"],dtype=float)
requester_number_of_comments_at_request = np.array(jsonData["requester_number_of_comments_at_request"],dtype=float)
requester_number_of_comments_at_retrieval = np.array(jsonData["requester_number_of_comments_at_retrieval"],dtype=float)
requester_number_of_comments_in_raop_at_request = np.array(jsonData["requester_number_of_comments_in_raop_at_request"],dtype=float)
requester_number_of_comments_in_raop_at_retrieval = np.array(jsonData["requester_number_of_comments_in_raop_at_retrieval"],dtype=float)
requester_number_of_posts_at_request = np.array(jsonData["requester_number_of_posts_at_request"],dtype=float)
requester_number_of_posts_at_retrieval = np.array(jsonData["requester_number_of_posts_at_retrieval"],dtype=float)
requester_number_of_posts_on_raop_at_request = np.array(jsonData["requester_number_of_posts_on_raop_at_request"],dtype=float)
requester_number_of_posts_on_raop_at_retrieval = np.array(jsonData["requester_number_of_posts_on_raop_at_retrieval"],dtype=float)
requester_upvotes_minus_downvotes_at_request = np.array(jsonData["requester_upvotes_minus_downvotes_at_request"],dtype=float)
requester_upvotes_minus_downvotes_at_retrieval = np.array(jsonData["requester_upvotes_minus_downvotes_at_retrieval"],dtype=float)
requester_upvotes_plus_downvotes_at_request = np.array(jsonData["requester_upvotes_plus_downvotes_at_request"],dtype=float)
requester_upvotes_plus_downvotes_at_retrieval = np.array(jsonData["requester_upvotes_plus_downvotes_at_retrieval"],dtype=float)
request_features = [requester_account_age_in_days_at_request,requester_days_since_first_post_on_raop_at_request\
,requester_number_of_comments_at_request,requester_number_of_comments_in_raop_at_request,requester_number_of_posts_at_request\
,requester_number_of_posts_on_raop_at_request,requester_upvotes_minus_downvotes_at_request,requester_upvotes_plus_downvotes_at_request]
retrieval_features = [requester_account_age_in_days_at_retrieval,requester_days_since_first_post_on_raop_at_retrieval\
,requester_number_of_comments_at_retrieval,requester_number_of_comments_in_raop_at_retrieval,requester_number_of_posts_at_retrieval\
,requester_number_of_posts_on_raop_at_retrieval,requester_upvotes_minus_downvotes_at_retrieval,requester_upvotes_plus_downvotes_at_retrieval]
#Extracting and organizing the data in a numpy array
print "Starting feature organization and POS tagging"
start = time.time()
for i in range(len(data)):
feature_row = []
for whole in whole_features:
feature_row.append(whole[i])
for index,retrieval in enumerate(retrieval_features):
difference = retrieval[i] - request_features[index][i]
difference = ((difference + 1.0)/(request_features[index][i] + 1.0))*100.0
if math.isinf(difference) or math.isnan(difference):
difference = 1.0
feature_row.append(difference)
text_pos_tags = getNounAdjVerbs(clean_text_data[i])
title_post_tags = getNounAdjVerbs(clean_title_data[i])
total_pos_tag_count = text_pos_tags + title_post_tags
for tag_count in total_pos_tag_count:
feature_row.append(tag_count)
data[i,:] = feature_row
end = time.time()
print "Time Taken to extract all features : ", end-start
train_data,test_data,train_label,test_label = cross_validation.train_test_split(data,class_labels,test_size=0.3)
# Initializing the classifiers
rf = RandomForestClassifier(n_estimators=101)
ada = AdaBoostClassifier(n_estimators=101)
gradboost = GradientBoostingClassifier(n_estimators=101)
svm = SVC()
gnb = GaussianNB()
classifiers = [rf,ada,gradboost,svm,gnb]
classifier_names = ["Random Forests","AdaBoost","Gradient Boost","SVM","Gaussian NB"]
print "Starting Classification Performance Cycle ..."
start = time.time()
for classifier,classifier_name in zip(classifiers,classifier_names):
classifier.fit(train_data,train_label)
predicted_label = classifier.predict(test_data)
print "--------------------------------------------------------\n"
print "Accuracy for ",classifier_name, " : ",metrics.accuracy_score(test_label,predicted_label)
print "Confusion Matrix for ",classifier_name, " :\n ",metrics.confusion_matrix(test_label,predicted_label)
print "Classification Report for ",classifier_name, " : \n",metrics.classification_report(test_label,predicted_label)
print "--------------------------------------------------------\n"
end = time.time()
print "Time Taken for classification and performance Metrics calculation : ",end-start | rupakc/Kaggle-Compendium | Random Acts of Pizza/PizzaCombinedModel.py | Python | mit | 8,053 | [
"Gaussian"
] | 91154f1746f5570a18ee04940383d4238830e0aaffd8deeba9240c6399200a3c |
"""
viscount.api.workflows
Workflow related endpoints
"""
from flask import Blueprint, request, jsonify
from ..forms import NewWorkflowForm, UpdateWorkflowForm
from ..services import workflows as _workflows, tasks as _tasks
from . import ViscountFormException, route
from ..models import Workflow
from ..core import db
from .datatables import DataTables
from .cytoscape import render_to_cytoscape
bp = Blueprint('workflows', __name__, url_prefix='/workflows')
@route(bp, '/')
def list():
"""Returns a list of workflow instances."""
return _workflows.all()
@route(bp, '/', methods=['POST'])
def create():
"""Creates a new workflow. Returns the new workflow instance."""
form = NewWorkflowForm()
if form.validate_on_submit():
return _workflows.create(**request.json)
raise ViscountFormException(form.errors)
@route(bp, '/<workflow_id>')
def show(workflow_id):
"""Returns a workflow instance."""
return _workflows.get_or_404(workflow_id)
@route(bp, '/<workflow_id>', methods=['PUT'])
def update(workflow_id):
"""Updates a workflow. Returns the updated workflow instance."""
form = UpdateWorkflowForm()
if form.validate_on_submit():
return _workflows.update(_workflows.get_or_404(workflow_id), **request.json)
raise(ViscountFormException(form.errors))
@route(bp, '/<workflow_id>', methods=['DELETE'])
def delete(workflow_id):
"""Deletes a workflow. Returns a 204 response."""
_workflows.delete(_workflows.get_or_404(workflow_id))
return None, 204
@route(bp, '/<workflow_id>/tasks')
def tasks(workflow_id):
"""Returns a list of task instances belonging to a workflow."""
return _workflows.get_or_404(workflow_id).tasks
@route(bp, '/<workflow_id>/tasks/<task_id>', methods=['PUT'])
def add_task(workflow_id, task_id):
"""Adds a task to a workflow. Returns the task instance."""
return _workflows.add_task(_workflows.get_or_404(workflow_id), _tasks.get_or_404(task_id))
@route(bp, '/<workflow_id>/tasks/<task_id>', methods=['DELETE'])
def remove_task(workflow_id, task_id):
"""Removes a task form a workflow. Returns a 204 response."""
_workflows.remove_task(_workflows.get_or_404(workflow_id), _tasks.get_or_404(task_id))
return None, 204
@route(bp, '/datatables', methods=['GET', 'POST'])
def datatables():
column_whitelist = {
"id" : True,
"name" : True,
"description" : True,
"revision" : True,
"revised_from.id" : True,
"revised_from.name" : True,
"revised_from.description" : True,
"revised_from.revision" : True
}
query = db.session.query(Workflow)
rowTable = DataTables(request, Workflow, query, column_whitelist)
return rowTable.output_result(), 200
@route(bp, '/<workflow_id>/cytoscape', methods=['GET'])
def cytoscape(workflow_id):
wf = _workflows.get_or_404(workflow_id)
return render_to_cytoscape(wf), 200
| dacb/viscount | viscount/api/workflows.py | Python | bsd-2-clause | 2,786 | [
"Cytoscape"
] | 82aa08fd33182ab7bdbf935894f93ee6ec22fa5fb69812f44b5b279ace202882 |
#!/usr/bin/env python
# encoding: utf-8
#
# @Author: José Sánchez-Gallego
# @Date: Nov 1, 2017
# @Filename: test_spaxel.py
# @License: BSD 3-Clause
# @Copyright: José Sánchez-Gallego
from __future__ import absolute_import, division, print_function
import itertools
import os
import astropy.io.fits
import pytest
from marvin import config
from marvin.core.exceptions import MarvinDeprecationError, MarvinError
from marvin.tests import marvin_test_if, marvin_test_if_class
from marvin.tests.conftest import set_the_config
from marvin.tools.cube import Cube
from marvin.tools.maps import Maps
from marvin.tools.modelcube import ModelCube
from marvin.tools.quantities import Spectrum
from marvin.tools.spaxel import Spaxel
spaxel_modes = [True, False, 'object']
def _get_spaxel_helper(object, x, y, **kwargs):
try:
spaxel = object.getSpaxel(x=x, y=y, **kwargs)
return spaxel
except MarvinError as ee:
assert 'do not correspond to a valid binid' in str(ee)
pytest.skip()
@pytest.fixture(params=itertools.product(spaxel_modes, spaxel_modes, spaxel_modes))
def cube_maps_modelcube_modes(request):
return request.param
@marvin_test_if_class(mark='include', galaxy=dict(plateifu=['8485-1901']))
class TestSpaxel(object):
def test_SpaxelBase(self, galaxy, cube_maps_modelcube_modes):
plateifu = galaxy.plateifu
bintype = galaxy.bintype.name
template = galaxy.template.name
release = galaxy.release
x = galaxy.dap['x']
y = galaxy.dap['y']
cube, maps, modelcube = cube_maps_modelcube_modes
if cube == 'object':
cube = Cube(plateifu=plateifu, release=release)
if maps == 'object':
maps = Maps(plateifu=plateifu, bintype=bintype,
template=template, release=release)
if release == 'MPL-4':
modelcube = False
elif modelcube == 'object':
modelcube = ModelCube(plateifu=plateifu, bintype=bintype,
template=template, release=release)
if cube is False and maps is False and modelcube is False:
pytest.skip()
spaxel = Spaxel(x, y, plateifu=plateifu,
cube=cube, maps=maps, modelcube=modelcube,
template=template, bintype=bintype)
assert isinstance(spaxel, Spaxel)
if (spaxel.bintype is not None and spaxel.bintype.binned is True and
(spaxel._maps or spaxel._modelcube)):
assert isinstance(spaxel, Spaxel)
else:
assert isinstance(spaxel, Spaxel)
if spaxel._cube:
assert len(spaxel.cube_quantities) > 0
else:
assert len(spaxel.cube_quantities) == 0
if spaxel._maps:
assert len(spaxel.maps_quantities) > 0
else:
assert len(spaxel.maps_quantities) == 0
if spaxel._modelcube:
assert len(spaxel.modelcube_quantities) > 0
else:
assert len(spaxel.modelcube_quantities) == 0
assert spaxel.plateifu == galaxy.plateifu
assert spaxel.mangaid == galaxy.mangaid
assert isinstance(spaxel.getCube(), Cube)
assert isinstance(spaxel.getMaps(), Maps)
if release != 'MPL-4':
assert isinstance(spaxel.getModelCube(), ModelCube)
def test_dir(self, galaxy):
x = galaxy.dap['x']
y = galaxy.dap['y']
spaxel = Spaxel(x, y, plateifu='8485-1901', cube=True,
maps=True, modelcube=True)
dir_list = dir(spaxel)
assert 'flux' in dir_list
assert 'emline_gflux_ha_6564' in dir_list
assert 'binned_flux' in dir_list
def test_getattr(self, galaxy):
x = galaxy.dap['x']
y = galaxy.dap['y']
spaxel = Spaxel(x, y, plateifu='8485-1901', cube=True,
maps=True, modelcube=True)
assert spaxel.flux is not None
assert spaxel.emline_gflux_ha_6564 is not None
assert spaxel.binned_flux is not None
@pytest.mark.parametrize('force',
[('cube'),
('maps'),
('modelcube')],
ids=[])
def test_force_load(self, galaxy, force):
x = galaxy.dap['x']
y = galaxy.dap['y']
spaxel = Spaxel(x, y, plateifu=galaxy.plateifu, cube=True,
maps=False, modelcube=False)
assert spaxel.cube_quantities is not None
assert spaxel.maps_quantities == {}
assert spaxel.modelcube_quantities == {}
spaxel.load(force=force)
if force == 'cube':
assert spaxel.cube_quantities is not None
elif force == 'maps':
assert spaxel.maps_quantities is not None
elif force == 'modelcube':
assert spaxel.modelcube_quantities is not None
def test_wrong_force_load(self, galaxy):
x = galaxy.dap['x']
y = galaxy.dap['y']
spaxel = Spaxel(x, y, plateifu=galaxy.plateifu, cube=True,
maps=False, modelcube=False)
with pytest.raises(AssertionError) as ee:
spaxel.load(force='crap')
assert 'force can only be cube, maps, or modelcube' in str(ee)
def test_no_inputs(self):
with pytest.raises(MarvinError) as ee:
Spaxel(0, 0, cube=None, maps=None, modelcube=None)
assert 'no inputs defined' in str(ee)
def test_files_maps_modelcube(self, galaxy):
x = galaxy.dap['x']
y = galaxy.dap['y']
if galaxy.release == 'MPL-4':
modelcube_filename = None
else:
modelcube_filename = galaxy.modelpath
spaxel = Spaxel(x, y,
cube=galaxy.cubepath,
maps=galaxy.mapspath,
modelcube=modelcube_filename)
assert isinstance(spaxel, Spaxel)
assert isinstance(spaxel._cube, Cube)
assert isinstance(spaxel._maps, Maps)
if galaxy.release != 'MPL-4':
assert isinstance(spaxel._modelcube, ModelCube)
def test_files_modelcube(self, galaxy):
x = galaxy.dap['x']
y = galaxy.dap['y']
if galaxy.release == 'MPL-4':
pytest.skip()
else:
modelcube_filename = galaxy.modelpath
spaxel = Spaxel(x, y,
cube=False,
maps=False,
modelcube=modelcube_filename)
assert isinstance(spaxel, Spaxel)
assert not isinstance(spaxel._cube, Cube)
assert not isinstance(spaxel._maps, Maps)
if galaxy.release != 'MPL-4':
assert isinstance(spaxel._modelcube, ModelCube)
def test_files_maps(self, galaxy):
x = galaxy.dap['x']
y = galaxy.dap['y']
spaxel = Spaxel(x, y,
cube=False,
maps=galaxy.mapspath,
modelcube=False)
assert isinstance(spaxel, Spaxel)
assert not isinstance(spaxel._cube, Cube)
assert isinstance(spaxel._maps, Maps)
assert not isinstance(spaxel._modelcube, ModelCube)
class TestBinInfo(object):
def test_bad_binid(self):
spaxel = Spaxel(0, 0, plateifu='8485-1901', cube=True,
maps=True, modelcube=True, bintype='HYB10')
with pytest.raises(MarvinError) as ee:
spaxel.stellar_vel.bin.get_bin_spaxels()
assert 'do not correspond to a valid binid' in str(ee)
def test_load_all(self):
set_the_config('MPL-6')
spaxel = Spaxel(26, 13, plateifu='8485-1901', cube=True,
maps=True, modelcube=True, bintype='HYB10', release='MPL-6')
assert isinstance(spaxel, Spaxel)
bin_spaxels = spaxel.stellar_vel.bin.get_bin_spaxels(lazy=False)
assert len(bin_spaxels) > 0
assert bin_spaxels[0].loaded is True
def test_correct_binid(self):
"""Checks if the binid of the bin spaxels is the correct one (#457)"""
maps = Maps(plateifu='8485-1901', release='MPL-6', bintype='HYB10')
spaxel = maps[22, 14]
assert isinstance(spaxel, Spaxel)
assert spaxel.x == 14, spaxel.y == 22
bin_spaxels = spaxel.stellar_vel.bin.get_bin_spaxels()
for sp in bin_spaxels:
sp.load()
assert sp.stellar_vel.bin.binid == spaxel.stellar_vel.bin.binid
sp_bin = maps[sp.y, sp.x]
assert sp_bin.stellar_vel.bin.binid == spaxel.stellar_vel.bin.binid
class TestPickling(object):
def test_pickling_db_fails(self, temp_scratch, galaxy):
cube = Cube(plateifu=galaxy.plateifu)
spaxel = cube.getSpaxel(1, 3)
file = temp_scratch.join('test_spaxel.mpf')
with pytest.raises(MarvinError) as cm:
spaxel.save(str(file), overwrite=True)
assert 'objects with data_origin=\'db\' cannot be saved.' in str(cm.value)
def test_pickling_only_cube_file(self, temp_scratch, galaxy):
if galaxy.bintype.name != 'SPX':
pytest.skip("Can't instantiate a Spaxel from a binned Maps.")
cube = Cube(filename=galaxy.cubepath)
maps = Maps(filename=galaxy.mapspath)
spaxel = cube.getSpaxel(1, 3, properties=maps, models=False)
file = temp_scratch.join('test_spaxel.mpf')
path_saved = spaxel.save(str(file), overwrite=True)
assert file.check() is True
assert os.path.exists(path_saved)
del spaxel
spaxel_restored = Spaxel.restore(str(file))
assert spaxel_restored is not None
assert isinstance(spaxel_restored, Spaxel)
assert spaxel_restored._cube is not None
assert spaxel_restored._cube.data_origin == 'file'
assert isinstance(spaxel_restored._cube.data, astropy.io.fits.HDUList)
assert spaxel_restored._maps is not None
assert spaxel_restored._maps.data_origin == 'file'
assert isinstance(spaxel_restored._maps.data, astropy.io.fits.HDUList)
def test_pickling_all_api(self, temp_scratch, galaxy):
drpver, __ = config.lookUpVersions()
cube = Cube(plateifu=galaxy.plateifu, mode='remote')
maps = Maps(plateifu=galaxy.plateifu, mode='remote')
modelcube = ModelCube(plateifu=galaxy.plateifu, mode='remote')
spaxel = cube.getSpaxel(1, 3, properties=maps, models=modelcube)
assert spaxel._cube.data_origin == 'api'
assert spaxel._maps.data_origin == 'api'
assert spaxel._modelcube.data_origin == 'api'
file = temp_scratch.join('test_spaxel_api.mpf')
path_saved = spaxel.save(str(file), overwrite=True)
assert file.check() is True
assert os.path.exists(path_saved)
del spaxel
spaxel_restored = Spaxel.restore(str(file))
assert spaxel_restored is not None
assert isinstance(spaxel_restored, Spaxel)
assert spaxel_restored._cube is not None
assert isinstance(spaxel_restored._cube, Cube)
assert spaxel_restored._cube.data_origin == 'api'
assert spaxel_restored._cube.data is None
assert spaxel_restored._cube.header['VERSDRP3'] == drpver
assert spaxel_restored._maps is not None
assert isinstance(spaxel_restored._maps, Maps)
assert spaxel_restored._maps.data_origin == 'api'
assert spaxel_restored._maps.data is None
assert spaxel_restored._modelcube is not None
assert isinstance(spaxel_restored._modelcube, ModelCube)
assert spaxel_restored._modelcube.data_origin == 'api'
assert spaxel_restored._modelcube.data is None
def test_pickling_data(self, temp_scratch, galaxy):
drpver, __ = config.lookUpVersions()
maps = Maps(filename=galaxy.mapspath)
modelcube = ModelCube(filename=galaxy.modelpath)
spaxel = maps.getSpaxel(25, 15, xyorig='lower', drp=False, models=modelcube)
file = temp_scratch.join('test_spaxel.mpf')
path_saved = spaxel.save(str(file), overwrite=True)
assert file.check() is True
assert os.path.exists(path_saved)
del spaxel
spaxel_restored = Spaxel.restore(str(file))
assert spaxel_restored.stellar_vel.value is not None
assert spaxel_restored.stellar_vel.bin.binid is not None
class TestMaskbit(object):
@marvin_test_if(mark='include', galaxy=dict(release=['MPL-4']))
def test_quality_flags_mpl4(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
sp = maps.getSpaxel(0, 0, model=True)
assert len(sp.quality_flags) == 1
@marvin_test_if(mark='skip', galaxy=dict(release=['MPL-4']))
def test_quality_flags(self, galaxy):
maps = Maps(plateifu=galaxy.plateifu)
sp = maps.getSpaxel(0, 0, models=True)
assert len(sp.quality_flags) == 2
class TestCubeGetSpaxel(object):
def _dropNones(self, **kwargs):
for k, v in list(kwargs.items()):
if v is None:
del kwargs[k]
return kwargs
@pytest.mark.parametrize(
'x, y, ra, dec, excType, message',
[(1, None, 1, None, AssertionError, 'Either use (x, y) or (ra, dec)'),
(1, None, 1, 1, AssertionError, 'Either use (x, y) or (ra, dec)'),
(1, None, None, None, AssertionError, 'Specify both x and y'),
(None, 1, None, None, AssertionError, 'Specify both x and y'),
(None, None, 1, None, AssertionError, 'Specify both ra and dec'),
(None, None, None, 1, AssertionError, 'Specify both ra and dec'),
(None, None, None, None, ValueError, 'You need to specify either (x, y) or (ra, dec)'),
(-50, 1, None, None, MarvinError, 'some indices are out of limits'),
(50, 1, None, None, MarvinError, 'some indices are out of limits'),
(1, -50, None, None, MarvinError, 'some indices are out of limits'),
(1, 50, None, None, MarvinError, 'some indices are out of limits'),
(None, None, 1., 1., MarvinError, 'some indices are out of limits'),
(None, None, 100, 60, MarvinError, 'some indices are out of limits'),
(None, None, 232.546383, 1., MarvinError, 'some indices are out of limits'),
(None, None, 1., 48.6883954, MarvinError, 'some indices are out of limits')],
ids=['x-ra', 'x-ra-dec', 'x', 'y', 'ra', 'dec', 'no-inputs', '-50-1', '50-1', '1--50',
'1-50', '1-1', '100-60', '232.5-1', '1-48.6'])
def test_getSpaxel_inputs(self, galaxy, x, y, ra, dec, excType, message):
"""Tests exceptions when getSpaxel gets inappropriate inputs."""
kwargs = self._dropNones(x=x, y=y, ra=ra, dec=dec)
with pytest.raises(excType) as ee:
cube = Cube(plateifu=galaxy.plateifu, release=galaxy.release)
cube.getSpaxel(**kwargs)
assert message in str(ee.value)
@pytest.mark.parametrize('coord, xyorig',
[('xy', 'lower'),
('xy', 'center'),
('radec', None)])
def test_getSpaxel_flux(self, cube, galaxy, coord, xyorig):
if coord == 'xy':
x = galaxy.spaxel['x'] if xyorig == 'lower' else galaxy.spaxel['x_cen']
y = galaxy.spaxel['y'] if xyorig == 'lower' else galaxy.spaxel['y_cen']
params = {'x': x, 'y': y, 'xyorig': xyorig}
elif coord == 'radec':
ra = galaxy.spaxel['ra']
dec = galaxy.spaxel['dec']
params = {'ra': ra, 'dec': dec}
spaxel = cube.getSpaxel(**params)
flux = spaxel.flux.value
assert flux[galaxy.spaxel['specidx']] == pytest.approx(galaxy.spaxel['flux'])
@pytest.mark.parametrize('monkeyconfig',
[('sasurl', 'http://www.averywrongurl.com')],
ids=['wrongurl'], indirect=True)
def test_getSpaxel_remote_fail_badresponse(self, monkeyconfig):
assert config.urlmap is not None
with pytest.raises(MarvinError) as cm:
Cube(mangaid='1-209232', mode='remote')
assert 'Failed to establish a new connection' in str(cm.value)
@pytest.mark.parametrize('monkeyconfig',
[('release', 'MPL-5')],
ids=['mpl5'], indirect=True)
def test_getSpaxel_remote_drpver_differ_from_global(self, galaxy, monkeyconfig):
if galaxy.release == 'MPL-5':
pytest.skip('Skipping release for forced global MPL-5')
assert config.release == 'MPL-5'
cube = Cube(plateifu=galaxy.plateifu, mode='remote', release=galaxy.release)
expected = galaxy.spaxel['flux']
spectrum = cube.getSpaxel(ra=galaxy.spaxel['ra'], dec=galaxy.spaxel['dec']).flux
assert spectrum.value[galaxy.spaxel['specidx']] == pytest.approx(expected)
def test_getspaxel_matches_file_db_remote(self, galaxy):
cube_file = Cube(filename=galaxy.cubepath)
cube_db = Cube(plateifu=galaxy.plateifu)
cube_api = Cube(plateifu=galaxy.plateifu, mode='remote')
assert cube_file.data_origin == 'file'
assert cube_db.data_origin == 'db'
assert cube_api.data_origin == 'api'
xx = galaxy.spaxel['x']
yy = galaxy.spaxel['y']
spec_idx = galaxy.spaxel['specidx']
flux = galaxy.spaxel['flux']
ivar = galaxy.spaxel['ivar']
mask = galaxy.spaxel['mask']
spaxel_slice_file = cube_file[yy, xx]
spaxel_slice_db = cube_db[yy, xx]
spaxel_slice_api = cube_api[yy, xx]
assert spaxel_slice_file.flux.value[spec_idx] == pytest.approx(flux)
assert spaxel_slice_db.flux.value[spec_idx] == pytest.approx(flux)
assert spaxel_slice_api.flux.value[spec_idx] == pytest.approx(flux)
assert spaxel_slice_file.flux.ivar[spec_idx] == pytest.approx(ivar)
assert spaxel_slice_db.flux.ivar[spec_idx] == pytest.approx(ivar)
assert spaxel_slice_api.flux.ivar[spec_idx] == pytest.approx(ivar)
assert spaxel_slice_file.flux.mask[spec_idx] == pytest.approx(mask)
assert spaxel_slice_db.flux.mask[spec_idx] == pytest.approx(mask)
assert spaxel_slice_api.flux.mask[spec_idx] == pytest.approx(mask)
xx_cen = galaxy.spaxel['x_cen']
yy_cen = galaxy.spaxel['y_cen']
try:
spaxel_getspaxel_file = cube_file.getSpaxel(x=xx_cen, y=yy_cen)
spaxel_getspaxel_db = cube_db.getSpaxel(x=xx_cen, y=yy_cen)
spaxel_getspaxel_api = cube_api.getSpaxel(x=xx_cen, y=yy_cen)
except MarvinError as ee:
assert 'do not correspond to a valid binid' in str(ee)
pytest.skip()
assert spaxel_getspaxel_file.flux.value[spec_idx] == pytest.approx(flux, abs=1e-6)
assert spaxel_getspaxel_db.flux.value[spec_idx] == pytest.approx(flux, abs=1e-6)
assert spaxel_getspaxel_api.flux.value[spec_idx] == pytest.approx(flux, abs=1e-6)
assert spaxel_getspaxel_file.flux.ivar[spec_idx] == pytest.approx(ivar)
assert spaxel_getspaxel_db.flux.ivar[spec_idx] == pytest.approx(ivar)
assert spaxel_getspaxel_api.flux.ivar[spec_idx] == pytest.approx(ivar)
assert spaxel_getspaxel_file.flux.mask[spec_idx] == pytest.approx(mask)
assert spaxel_getspaxel_db.flux.mask[spec_idx] == pytest.approx(mask)
assert spaxel_getspaxel_api.flux.mask[spec_idx] == pytest.approx(mask)
class TestMapsGetSpaxel(object):
def _get_maps_kwargs(self, galaxy, data_origin):
if data_origin == 'file':
maps_kwargs = dict(filename=galaxy.mapspath)
else:
maps_kwargs = dict(plateifu=galaxy.plateifu, release=galaxy.release,
bintype=galaxy.bintype, template=galaxy.template,
mode='local' if data_origin == 'db' else 'remote')
return maps_kwargs
def test_get_spaxel(self, galaxy, data_origin):
maps = Maps(**self._get_maps_kwargs(galaxy, data_origin))
spaxel = _get_spaxel_helper(maps, 15, 8, xyorig='lower')
if maps.is_binned():
assert isinstance(spaxel, Spaxel)
else:
assert isinstance(spaxel, Spaxel)
expected = galaxy.stellar_vel_ivar_x15_y8_lower[galaxy.release][galaxy.template.name]
assert spaxel.maps_quantities['stellar_vel'].ivar == pytest.approx(expected, abs=1e-6)
assert len(spaxel.maps_quantities.keys()) > 0
def test_get_spaxel_test2(self, galaxy, data_origin):
maps = Maps(**self._get_maps_kwargs(galaxy, data_origin))
spaxel = _get_spaxel_helper(maps, 5, 5)
if maps.is_binned():
assert isinstance(spaxel, Spaxel)
else:
assert isinstance(spaxel, Spaxel)
assert len(spaxel.maps_quantities.keys()) > 0
def test_get_spaxel_no_db(self, galaxy, exporigin):
"""Tests getting an spaxel if there is no DB."""
maps = Maps(**self._get_maps_kwargs(galaxy, exporigin))
spaxel = _get_spaxel_helper(maps, 5, 5)
assert spaxel.getMaps().data_origin == exporigin
if maps.is_binned():
assert isinstance(spaxel, Spaxel)
else:
assert isinstance(spaxel, Spaxel)
assert len(spaxel.maps_quantities.keys()) > 0
@marvin_test_if(mark='include', galaxy=dict(bintype=['SPX', 'NONE']))
def test_values(self, galaxy, exporigin):
template = str(galaxy.template)
if template not in galaxy.dap:
pytest.skip()
maps = Maps(**self._get_maps_kwargs(galaxy, exporigin))
xx = galaxy.dap['x']
yy = galaxy.dap['y']
for channel in galaxy.dap[template]:
if channel == 'model':
continue
channel_data = galaxy.dap[template][channel]
map = maps[channel]
assert map[yy, xx].value == pytest.approx(channel_data['value'], abs=1.e-4)
assert map.unit.scale == 1e-17
assert map.unit.to_string() == channel_data['unit']
assert map[yy, xx].mask == pytest.approx(channel_data['mask'], abs=1.e-4)
assert map[yy, xx].ivar == pytest.approx(channel_data['ivar'], abs=1.e-4)
@marvin_test_if(mark='include', galaxy=dict(bintype=['SPX']))
def test_model_deprecated(self, galaxy, exporigin):
if exporigin != 'db':
pytest.skip()
maps = Maps(**self._get_maps_kwargs(galaxy, exporigin))
with pytest.raises(MarvinDeprecationError) as ee:
maps.getSpaxel(x=0, y=0, model=True)
assert 'the model parameter has been deprecated. Use models.' in str(ee)
@marvin_test_if_class(mark='skip', galaxy=dict(release=['MPL-4']))
class TestModelCubeGetSpaxel(object):
def _test_getspaxel(self, spaxel, galaxy):
spaxel_drpver, spaxel_dapver = config.lookUpVersions(spaxel.release)
assert spaxel_drpver == galaxy.drpver
assert spaxel_dapver == galaxy.dapver
assert spaxel.plateifu == galaxy.plateifu
assert spaxel.mangaid == galaxy.mangaid
assert spaxel.getModelCube() is not None
assert spaxel.getModelCube().bintype == galaxy.bintype
assert spaxel.getModelCube().template == galaxy.template
assert spaxel.template == galaxy.template
assert spaxel.template == galaxy.template
assert spaxel._parent_shape == tuple(galaxy.shape)
assert isinstance(spaxel.binned_flux, Spectrum)
assert isinstance(spaxel.full_fit, Spectrum)
assert isinstance(spaxel.emline_fit, Spectrum)
def test_getspaxel(self, galaxy, data_origin):
if data_origin == 'file':
kwargs = {'filename': galaxy.modelpath}
elif data_origin == 'db':
kwargs = {'plateifu': galaxy.plateifu}
elif data_origin == 'api':
kwargs = {'plateifu': galaxy.plateifu, 'mode': 'remote'}
model_cube = ModelCube(bintype=galaxy.bintype, template=galaxy.template,
release=galaxy.release, **kwargs)
spaxel = _get_spaxel_helper(model_cube, 1, 2)
self._test_getspaxel(spaxel, galaxy)
def test_getspaxel_db_api_model(self, galaxy):
model_cube = ModelCube(plateifu=galaxy.plateifu,
bintype=galaxy.bintype, template=galaxy.template,
release=galaxy.release, )
spaxel = _get_spaxel_helper(model_cube, 1, 2, properties=False, drp=False)
self._test_getspaxel(spaxel, galaxy)
assert isinstance(spaxel.getCube(), Cube)
assert 'flux' not in spaxel.cube_quantities
assert isinstance(spaxel.getMaps(), Maps)
assert len(spaxel.maps_quantities) == 0
def test_getspaxel_matches_file_db_remote(self, galaxy):
if galaxy.bintype != 'SPX':
pytest.skip()
modelcube_file = ModelCube(filename=galaxy.modelpath,
bintype=galaxy.bintype, template=galaxy.template,
release=galaxy.release)
modelcube_db = ModelCube(mangaid=galaxy.mangaid, bintype=galaxy.bintype,
template=galaxy.template, release=galaxy.release)
modelcube_api = ModelCube(mangaid=galaxy.mangaid, mode='remote',
bintype=galaxy.bintype, template=galaxy.template,
release=galaxy.release)
assert modelcube_file.data_origin == 'file'
assert modelcube_db.data_origin == 'db'
assert modelcube_api.data_origin == 'api'
idx = galaxy.spaxel['specidx']
flux = galaxy.spaxel['model_flux']
ivar = galaxy.spaxel['model_ivar']
mask = galaxy.spaxel['model_mask']
xx_cen = galaxy.spaxel['x_cen']
yy_cen = galaxy.spaxel['y_cen']
try:
spaxel_getspaxel_file = modelcube_file.getSpaxel(x=xx_cen, y=yy_cen)
spaxel_getspaxel_db = modelcube_db.getSpaxel(x=xx_cen, y=yy_cen)
spaxel_getspaxel_api = modelcube_api.getSpaxel(x=xx_cen, y=yy_cen)
except MarvinError as ee:
assert 'do not correspond to a valid binid' in str(ee)
pytest.skip()
assert spaxel_getspaxel_file.binned_flux.value[idx] == pytest.approx(flux, abs=1e-6)
assert spaxel_getspaxel_db.binned_flux.value[idx] == pytest.approx(flux, abs=1e-6)
assert spaxel_getspaxel_api.binned_flux.value[idx] == pytest.approx(flux, abs=1e-6)
assert spaxel_getspaxel_file.binned_flux.ivar[idx] == pytest.approx(ivar)
assert spaxel_getspaxel_db.binned_flux.ivar[idx] == pytest.approx(ivar)
assert spaxel_getspaxel_api.binned_flux.ivar[idx] == pytest.approx(ivar)
assert spaxel_getspaxel_file.binned_flux.mask[idx] == pytest.approx(mask)
assert spaxel_getspaxel_db.binned_flux.mask[idx] == pytest.approx(mask)
assert spaxel_getspaxel_api.binned_flux.mask[idx] == pytest.approx(mask)
| albireox/marvin | python/marvin/tests/tools/test_spaxel.py | Python | bsd-3-clause | 27,114 | [
"Galaxy"
] | 9c11c06255cbd372d046443fd6b0c6a467c4e49874db1e06df0509090513e459 |
from src.main.Node import Node
class Modifier(Node):
child = None
childNode = None
def __init__(self, string):
from src.main.NodeFactory import NodeFactory
self.string = string
string_without_sym = string.replace(self.symbol, "")
self.child = NodeFactory.fetch_part(string_without_sym)
def accept(self, visitor):
self.childNode = self.child.accept(visitor)
return visitor.visit(self)
def getValue(self):
return self.child.getValue()
| charleshamel73/diceroller | src/modifier/Modifier.py | Python | mit | 514 | [
"VisIt"
] | da7af7537f0ea9e5e8d714d8606e7a124ed77658342413fcfc144343bc68cd00 |
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
#import scipy.spatial.distance.pdist
from scipy.spatial.distance import pdist, squareform
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from lifelines import CoxPHFitter
from lifelines.datasets import load_regression_dataset
from lifelines.utils import k_fold_cross_validation
from lifelines import KaplanMeierFitter
from lifelines.statistics import logrank_test, multivariate_logrank_test
# cloudy blue #acc2d9
# dark pastel green #56ae57
# dust #b2996e
# electric lime #a8ff04
# fresh green #69d84f
# light eggplant #894585
# nasty green #70b23f
# really light blue #d4ffff
# tea #65ab7c
# warm purple #952e8f
# yellowish tan #fcfc81
# cement #a5a391
# dark grass green #388004
# dusty teal #4c9085
# grey teal #5e9b8a
# macaroni and cheese #efb435
# pinkish tan #d99b82
# spruce #0a5f38
# strong blue #0c06f7
# toxic green #61de2a
# windows blue #3778bf
# blue blue #2242c7
# blue with a hint of purple #533cc6
# booger #9bb53c
# bright sea green #05ffa6
# dark green blue #1f6357
# deep turquoise #017374
# green teal #0cb577
# strong pink #ff0789
# bland #afa88b
# deep aqua #08787f
# lavender pink #dd85d7
# light moss green #a6c875
# light seafoam green #a7ffb5
# olive yellow #c2b709
# pig pink #e78ea5
# deep lilac #966ebd
# desert #ccad60
# dusty lavender #ac86a8
# purpley grey #947e94
# purply #983fb2
# candy pink #ff63e9
# light pastel green #b2fba5
# boring green #63b365
# kiwi green #8ee53f
# light grey green #b7e1a1
# orange pink #ff6f52
# tea green #bdf8a3
# very light brown #d3b683
# egg shell #fffcc4
# eggplant purple #430541
# powder pink #ffb2d0
# reddish grey #997570
# baby shit brown #ad900d
# liliac #c48efd
# stormy blue #507b9c
# ugly brown #7d7103
# custard #fffd78
# darkish pink #da467d
tissue_color_names = ["windows blue", "amber", "greyish", "faded green", "dusty purple",\
"nice blue","rosy pink","sand brown","baby purple",\
"fern","creme","ugly blue","washed out green","squash",\
"cinnamon","radioactive green","cocoa","charcoal grey","indian red",\
"light lavendar","toupe","dark cream" ,"burple","tan green",\
"azul","bruise", "sunny yellow","deep brown","off blue",\
"custard","powder pink","deep lilac","kiwi green","orange pink"]
def main( data_location, results_location ):
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
save_dir = os.path.join( results_path, "kmeans_with_z_global" )
check_and_mkdir(save_dir)
size_per_unit = 0.25
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
Z = np.vstack( (Z_train.values, Z_val.values) )
n_z = Z.shape[1]
#pdb.set_trace()
z_names = ["z_%d"%z_idx for z_idx in range(Z.shape[1])]
Z = pd.DataFrame( Z, index = np.hstack( (Z_train.index.values, Z_val.index.values)), columns = z_names )
barcodes = np.union1d( Z_train.index.values, Z_val.index.values )
quantiles = (len(Z)*np.array( [0,0.33, 0.66, 1.0] )).astype(int)
quantiles = (len(Z)*np.array( [0,0.2, 0.4,0.6,0.8,1.0] )).astype(int)
#quantiles = (len(Z)*np.array( [0,0.1, 0.2,0.3,0.4,0.6,0.7,0.8,0.9,1.0] )).astype(int)
n_quantiles = len(quantiles)-1
start_q_id = -(n_quantiles-1)/2
Z=Z.loc[barcodes]
Z_values = Z.values
argsort_Z = np.argsort( Z_values, 0 )
Z_quantized = np.zeros( Z_values.shape, dtype=int )
for start_q, end_q in zip( quantiles[:-1], quantiles[1:] ):
for z_idx in range(n_z):
z_idx_order = argsort_Z[:,z_idx]
Z_quantized[ z_idx_order[start_q:end_q], z_idx] = start_q_id
start_q_id+=1
Z_quantized = pd.DataFrame(Z_quantized, index=barcodes, columns=z_names )
Z_quantized.to_csv( save_dir + "/Z_quantized.csv")
sub_bcs = np.array([ x+"_"+y for x,y in np.array(data_store["/CLINICAL/data"]["patient.stage_event.pathologic_stage"].index.tolist(),dtype=str)] )
sub_values = np.array( data_store["/CLINICAL/data"]["patient.stage_event.pathologic_stage"].values, dtype=str )
subtypes = pd.Series( sub_values, index = sub_bcs, name="subtypes")
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
# -----------------------------
# -----------------------------
ALL_SURVIVAL = data_store["/CLINICAL/data"][["patient.days_to_last_followup","patient.days_to_death","patient.days_to_birth"]]
tissue_barcodes = np.array( ALL_SURVIVAL.index.tolist(), dtype=str )
surv_barcodes = np.array([ x+"_"+y for x,y in tissue_barcodes])
NEW_SURVIVAL = pd.DataFrame( ALL_SURVIVAL.values, index =surv_barcodes, columns = ALL_SURVIVAL.columns )
NEW_SURVIVAL = NEW_SURVIVAL.loc[barcodes]
#clinical = data_store["/CLINICAL/data"].loc[barcodes]
Age = NEW_SURVIVAL[ "patient.days_to_birth" ].values.astype(int)
Times = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)+NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)
Events = (1-np.isnan( NEW_SURVIVAL[ "patient.days_to_death" ].astype(float)) ).astype(int)
ok_age_query = Age<-10
ok_age = pp.find(ok_age_query )
tissues = tissues[ ok_age_query ]
#pdb.set_trace()
Age=-Age[ok_age]
Times = Times[ok_age]
Events = Events[ok_age]
s_barcodes = barcodes[ok_age]
NEW_SURVIVAL = NEW_SURVIVAL.loc[s_barcodes]
#ok_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values>=0
#ok_followup = pp.find( ok_followup_query )
bad_followup_query = NEW_SURVIVAL[ "patient.days_to_last_followup" ].fillna(0).values.astype(int)<0
bad_followup = pp.find( bad_followup_query )
ok_followup_query = 1-bad_followup_query
ok_followup = pp.find( ok_followup_query )
bad_death_query = NEW_SURVIVAL[ "patient.days_to_death" ].fillna(0).values.astype(int)<0
bad_death = pp.find( bad_death_query )
#pdb.set_trace()
Age=Age[ok_followup]
Times = Times[ok_followup]
Events = Events[ok_followup]
s_barcodes = s_barcodes[ok_followup]
NEW_SURVIVAL = NEW_SURVIVAL.loc[s_barcodes]
# S = Z.loc[s_barcodes]
# S["E"] = Events
# S["T"] = Times
# S["Age"] = np.log(Age)
S = pd.DataFrame( np.vstack((Events,Times)).T, index = s_barcodes, columns=["E","T"])
#pdb.set_trace()
# -----------------------------
# -----------------------------
from sklearn.cluster import MiniBatchKMeans
# print "running kmeans"
# kmeans_patients = MiniBatchKMeans(n_clusters=10, random_state=0).fit(Z_quantized.values)
# kmeans_patients_labels = kmeans_patients.labels_
#
# kmeans_z = MiniBatchKMeans(n_clusters=10, random_state=0).fit(Z_quantized.values.T)
# kmeans_z_labels = kmeans_z.labels_
#
#
# order_labels = np.argsort(kmeans_patients_labels)
# order_labels_z = np.argsort(kmeans_z_labels)
# sorted_Z = pd.DataFrame( Z_quantized.values[order_labels,:], index=Z_quantized.index[order_labels], columns=Z_quantized.columns)
# sorted_Z = pd.DataFrame( sorted_Z.values[:,order_labels_z], index=sorted_Z.index, columns = sorted_Z.columns[order_labels_z] )
n = len(Z)
n_tissues = len(tissue_names)
K_p = 10
K_z = 15
k_pallette = sns.hls_palette(K_p)
# K_ps = [2,5,10,15,20]
# fit = []
# for K_p in K_ps:
# global_kmeans_patients = MiniBatchKMeans(n_clusters=K_p, random_state=0).fit(Z_quantized.values)
# fit.append( global_kmeans_patients.inertia_)
# print fit
global_kmeans_patients = MiniBatchKMeans(n_clusters=K_p, random_state=0).fit(Z_quantized.values)
global_kmeans_patients_labels = global_kmeans_patients.labels_
#pdb.set_trace()
global_kmeans_z = MiniBatchKMeans(n_clusters=K_z, random_state=0).fit(Z_quantized.values.T)
global_kmeans_z_labels = global_kmeans_z.labels_
bicluster_means = np.zeros( (K_p,K_z), dtype=float )
for kp in range(K_p):
ip = pp.find( global_kmeans_patients_labels==kp )
z_p = Z_quantized.values[ip,:]
for kz in range(K_z):
iz = pp.find( global_kmeans_z_labels==kz )
z_pz = z_p[:,iz]
bicluster_means[kp,kz]=z_pz.mean()
spread_rows = bicluster_means.max(1)-bicluster_means.min(1)
spread_cols = bicluster_means.max(0)-bicluster_means.min(0)
order_rows = np.argsort(spread_rows)
order_cols = np.argsort(spread_cols)
for t_idx in range(n_tissues):
tissue_name = tissue_names[t_idx]
print "working %s"%(tissue_name)
t_ids_cohort = tissue_idx == t_idx
n_tissue = np.sum(t_ids_cohort)
if n_tissue < 1:
continue
Z_cohort = Z_quantized[ t_ids_cohort ]
bcs = barcodes[t_ids_cohort]
#kmeans_patients = MiniBatchKMeans(n_clusters=K_p, random_state=0).fit(Z_cohort.values)
kmeans_patients_labels = global_kmeans_patients_labels[ t_ids_cohort ]
kmeans_z_labels = global_kmeans_z_labels
cohort_k = np.unique(kmeans_patients_labels)
kmeans_patients_labels = [order_rows[idx] for idx in kmeans_patients_labels]
kmeans_z_labels = [order_cols[idx] for idx in kmeans_z_labels]
#pdb.set_trace()
order_labels = np.argsort(kmeans_patients_labels)
order_labels_z = np.argsort(kmeans_z_labels)
sorted_Z = pd.DataFrame( Z_cohort.values[order_labels,:], index=Z_cohort.index[order_labels], columns=Z_cohort.columns)
sorted_Z = pd.DataFrame( sorted_Z.values[:,order_labels_z], index=sorted_Z.index, columns = sorted_Z.columns[order_labels_z] )
#cohort_subtypes = subtypes.loc[bcs]
#subtype_names = np.unique(cohort_subtypes.values)
#subtype2colors = OrderedDict( zip(subtype_names,sns.color_palette("Blues", len(subtype_names))) )
#subtype_colors = np.array( [subtype2colors[subtype] for subtype in cohort_subtypes.values] )
size1 = max( min( int( n_z*size_per_unit ), 12), 16 )
size2 = max( min( int( n_tissue*size_per_unit), 12), 16)
#f = pp.figure(figsize=(size1,size2))
#ax=f.add_subplot(111)
#k_pallette
k_colors = np.array([k_pallette[kmeans_patients_labels[i]] for i in order_labels] )
#pdb.set_trace()
h = sns.clustermap( sorted_Z, row_colors=k_colors, row_cluster=False, col_cluster=False, figsize=(size1,size2) )
#pdb.set_trace()
pp.setp(h.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
pp.setp(h.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
pp.setp(h.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
pp.setp(h.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
h.ax_row_dendrogram.set_visible(False)
h.ax_col_dendrogram.set_visible(False)
h.cax.set_visible(False)
h.ax_heatmap.hlines(len(kmeans_patients_labels)-pp.find(np.diff(np.array(kmeans_patients_labels)[order_labels]))-1, *h.ax_heatmap.get_xlim(), color="black", lw=5)
h.ax_heatmap.vlines(pp.find(np.diff(np.array(kmeans_z_labels)[order_labels_z]))+1, *h.ax_heatmap.get_ylim(), color="black", lw=5)
#pp.show()
#pdb.set_trace()
pp.savefig( save_dir + "/Z_kmeans_%s.png"%(tissue_name), fmt="png", dpi=300, bbox_inches='tight')
pp.close('all')
S_cohort = S.loc[bcs]
times = S_cohort["T"].values
events = S_cohort["E"].values
if len(np.unique(kmeans_patients_labels))>0:
results = multivariate_logrank_test(times, groups=kmeans_patients_labels, event_observed=events )
p_value = results.p_value
else:
p_value = 1
f = pp.figure()
ax= f.add_subplot(111)
kmf = KaplanMeierFitter()
for kp in range(K_p):
ids = pp.find( np.array(kmeans_patients_labels)==kp )
k_bcs = bcs[ ids ]
#pdb.set_trace()
S_cohort_k = S_cohort.loc[ k_bcs ]
times = S_cohort_k["T"].values
events = S_cohort_k["E"].values
if len(k_bcs) > 20:
kmf.fit(times, event_observed=events, label="k%d"%(kp) )
ax=kmf.plot(ax=ax,at_risk_counts=False,show_censors=True, color=k_pallette[kp],ci_show=False)
#pdb.set_trace()
#kmf.fit(times[z2_fifth], event_observed=events[z2_fifth], label="rest" )
#ax=kmf.plot(ax=ax,at_risk_counts=False,show_censors=True, color='red')
#pp.title( "%s z%d splits 1/5 v rest p-value = %g"%( tissue_name, z_idx, p_values_fifth[t_idx,z_idx]) )
pp.title("%s p-value = %0.5f"%(tissue_name,p_value))
pp.savefig( save_dir + "/%s_survival_%0.5f.png"%(tissue_name,p_value), format="png", dpi=300)
#pdb.set_trace()
#pdb.set_trace()
#d_mat = pdist( Z_cohort.values )
#s_form = squareform(d_mat)
#csr = csr_matrix(np.triu(s_form))
#Tcsr = minimum_spanning_tree(csr)
#as_mat = Tcsr.toarray()
#pp.figure(figsize=(16,16))
# i=0
# for x in Tcsr:
# indices = x.indices
# weights = x.data
#
# for j,w in zip(indices,weights):
# G.add_edge(bcs[i][-7:], bcs[j][-7:], weight=w)
# i+=1
# layout=nx.spring_layout
# #layout=nx.spectral_layout
# pos=layout(G)
# nx.draw(G,pos,
# with_labels=True,
# node_size=1000, hold=False, node_color='b'
# )
# G.clear()
# pp.title("%s"%(tissue_name))
# pp.savefig(save_dir + "/%s_mwst.png"%(tissue_name), fmt='png',dpi=300)
# pp.close('all')
# #pdb.set_trace()
# f = pp.figure()
# ax = f.add_subplot(111)
#
# size1 = max( int( n_z*size_per_unit ), 20 )
# size2 = min( max( int( n_tissue*size_per_unit ), 12 ), 20 )
#
# #
# # if len(subtype_names)>1:
# # h = sns.clustermap( Z_cohort, square=False, figsize=(size1,size2), row_colors = subtype_colors )
# # else:
# # h = sns.clustermap( Z_cohort, square=False, figsize=(size1,size2) )
# # pp.setp(h.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
# # pp.setp(h.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
# # pp.setp(h.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
# # pp.setp(h.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
# # h.ax_row_dendrogram.set_visible(False)
# # h.ax_col_dendrogram.set_visible(False)
# # h.cax.set_visible(False)
#
#
# pp.savefig( save_dir + "/Z_clustermap_%s.png"%(tissue_name), fmt="png", dpi=300, bbox_inches='tight')
# pp.close('all')
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location ) | tedmeeds/tcga_encoder | tcga_encoder/analyses/kmeans_from_z_space_global.py | Python | mit | 15,153 | [
"Amber"
] | 683d3a206285c7bbfcb9663b2a96d6dd9b3af4029a938511970b29b65f05ff77 |
"""
Swarmlinebot (Deprecated)
Swarmlinebot was an experiment in constructing a line by 'swarming', having the
bots compete for the privilege of being the next in line.
Stored for posterity.
NOTE: Deprecated. Superseded by LineBOT.
"""
from Kilobot import *
def load(sim):
# return Swarmlinebot(sim)
return Defaultbot(sim)
class Defaultbot(Kilobot):
def __init__(self, sim):
Kilobot.__init__(self, sim)
self.program = [self.loop0]
SYN = 2 # dodge lsb
SYNACK = 4
ACK = 6
LINE = 8
SWARM = 10
LO = 45
GOLD = 55
HI = 55
FAR = GOLD
NEAR = LO
class Swarmlinebot(Kilobot):
def __init__(self, sim):
Kilobot.__init__(self, sim)
self.id = self.secretID # bots have a unique numbering 0..(n-1)
self.hood = []
self.spos = 0
self.target = 0
self.tcount = 0
self.tvalid = False
self.spinmin = FAR
self.scount = 0
self.msg = [0,0,0,0]
self.timeout = 0
if (self.secretID == 0): # the leader
self.hood = self.secretNx
self.program = [self.activateL,
self.waitFirstSynAck,
self.leadloop
]
else: # others
self.program = [self.lineInit,
self.loop0, # end for first violin
self.activate,
self.get,
self.orbit,
self.stride,
self.closein,
self.loop0,
]
##
## Func
##
## Leader and first designation, line initiation
def activateL(self):
self.hood = self.secretNx
self.spos = 0
self.message_out(self.id, self.hood[0], SYN)
self.toggle_tx()
self.debug = "LEADER"
self.set_color(0,3,0)
def waitFirstSynAck(self):
self.get_message()
if (self.msgrx[5] == 1):
if (self.msgrx[0] == self.hood[0] and self.msgrx[2] == SYNACK):
print self.secretID, "got SYNACK"
self.message_out(self.id, self.hood[0], ACK)
elif (self.msgrx[2] == LINE): # notice that the swarm has begun
print self.secretID, "joins LINE"
self.message_out(0, 0, LINE) # join it to wake those nearby
return self.goto(self.leadloop)
self.PC -= 1
def leadloop(self):
self.PC -= 1
def lineInit(self):
self.get_message()
if (self.msgrx[5] == 1):
# you are the first after leader ...
if (self.msgrx[0:3] == (0, self.id, SYN)):
if (not self.tx_enabled):
print self.secretID, "got SYN"
self.message_out(self.id, self.msgrx[0], SYNACK)
self.toggle_tx()
self.spos = 1
self.debug = "FIRST"
self.set_color(0,0,3)
elif (self.msgrx[0:3] == (0, self.id, ACK)):
print self.secretID, "got ACK"
self.message_out(1, 1, LINE)
print self.secretID, "starting SWARM"
return self.goto(self.closein) # TODO: maintain, (orient)
# ... or you are not
elif (self.msgrx[2] == LINE):
print self.id, "begins target hunt"
return self.goto(self.activate)
self.PC -= 1
def activate(self):
self.target = 2 # leader=0, first=1
self.debug = "T:%d" % (self.target)
self.set_color(3,0,0)
##
## Line swarming
##
def get(self):
self.get_message()
if (self.msgrx[5] == 1):
self.msg = self.msgrx[0:4]
if (self.msg[2] == LINE):
self.timeout = 0
return self.goto(self.orbit)
else: # we heard something, but not what we were looking for
self.timeout = 0
self.timeout += 1
if (self.timeout > 10):
self.timeout = 0
self.msg = [0, 0, 0, FAR]
return self.goto(self.stride)
self.PC -= 1
def stride(self): # towards the swarm by any means necessary, currently black magic
if (self.sim.round < 500):
self.PC -= 1
return
self.scount += 1
if (self.scount == 1):
print self.id, "gone spinning"
elif (self.scount < 75):
self.get_message()
if (self.msgrx[5] == 1):
self.scount == 80
else:
self.ful/Users/Luiz/Desktop/kbsim/Bots/Swarmlinebot.pylCCW()
elif (self.scount < 90): self.fullFWRD()
else:
self.spinmin = FAR
self.scount = 0
self.goto(self.get)
self.PC -= 1
def orbit(self): # try and join the line
self.clear_rxbuf()
heard = self.msg[0]
dist = self.msg[3]
# adjust target
if (heard >= self.target):
self.target = heard + 1
print self.id, "target adjust to", self.target
self.debug = "T:%d" % (self.target)
self.tcount = 0
if (self.tcount > 10): # make those too close take another lap
print self.id, "too close, validity reset"
self.tvalid = False
# if (self.id == 2): print self.heard, self.dist, self._history_peek(), self.tcount, self.tvalid
# first visit head
if (heard == 0 and (GOLD-1 <= dist <= GOLD+1)):
self.tcount += 1
if (self.tcount == 60):
print self.id, "tvalid True"
self.tvalid = True
self.tcount = 0
# then go and add yourself to the tail
if (self.tvalid and
heard == self.target - 1 and (GOLD-1 <= dist <= GOLD+1)):
self.tcount += 1
if (self.tcount == 35): # magic number for timing
print self.id, "assumes position as", self.target
self.spos = self.target
self.message_out(self.spos, self.spos, LINE)
self.tx_enabled = 1
self.debug = "S:%d" % (self.spos)
self.set_color(0,0,3)
self.tcount = 0
return self.goto(self.closein)
elif (self.tvalid):
self.tcount = 0
# steering decision
peek = self._history_peek() # interference == not at line end, but in transit there
interference = (peek[0] != heard and peek[2] == LINE and abs(peek[3] - dist) >= 3)
if (dist <= LO or interference):
self.op = self.fullCW
elif (HI <= dist < FAR):
self.op = self.fullCCW
elif (dist == FAR):
self.op = self.fullCCW if self.rand() > 16 else self.fullCW
else:
self.op = self.fullFWRD
# update round
self._history_add(self.msg) # store current msg to memory
self.op()
return self.goto(self.get)
def closein(self): # the last mms are the hardest (?)
self.PC -= 1
self.get_message()
if (self.msgrx[5] == 1):
# self._history_add(self.msgrx[0:4])
if (self.msgrx[0] == self.spos - 1):
if (self.msgrx[3] >= NEAR):
self.op = self.fullCCW
""" # TODO more smarts in tagging along
diff = self._history_peek()[3] - self.msgrx[3]
if diff < -1:
print diff
self.op = self.fullCW if self.op == self.fullCCW else self.fullCCW
"""
else:
self.op = self.op_null
self.op()
| celibertojr/Kbsim | Bots/original/Swarmlinebot.py | Python | gpl-3.0 | 8,002 | [
"VisIt"
] | ad5ddc81ae13ddeefa723ecb85ee3dc8b82951762d7ed945ae817b96ce90866b |
# -*- coding: utf-8 -*-
'''
Describe test for Django
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2015 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from shutil import rmtree
from os.path import exists
from base64 import b64decode
from datetime import date
from django.utils import formats
from django.db.models import Q
from lucterios.framework.test import LucteriosTest
from lucterios.framework.filetools import get_user_dir, get_user_path
from lucterios.CORE.parameters import Params
from lucterios.CORE.views import StatusMenu
from lucterios.contacts.models import CustomField
from diacamma.accounting.views_entries import EntryAccountList, EntryAccountListing, EntryAccountEdit, EntryAccountShow, \
EntryAccountClose, EntryAccountCostAccounting, EntryAccountSearch
from diacamma.accounting.test_tools import default_compta_fr, initial_thirds_fr, fill_entries_fr, add_entry
from diacamma.accounting.views_other import CostAccountingList, CostAccountingClose, CostAccountingAddModify
from diacamma.accounting.views_reports import FiscalYearBalanceSheet, FiscalYearIncomeStatement, FiscalYearLedger, FiscalYearTrialBalance,\
CostAccountingTrialBalance, CostAccountingLedger, CostAccountingIncomeStatement,\
FiscalYearReportPrint
from diacamma.accounting.views_admin import FiscalYearExport
from diacamma.accounting.models import FiscalYear, Third
from diacamma.accounting.tools_reports import get_totalaccount_for_query, get_totalbudget_for_query
from diacamma.accounting.views_budget import BudgetList, BudgetAddModify, BudgetDel, BudgetImport
class CompletedEntryTest(LucteriosTest):
def setUp(self):
initial_thirds_fr()
LucteriosTest.setUp(self)
default_compta_fr(with8=True)
rmtree(get_user_dir(), True)
fill_entries_fr(1)
add_entry(1, 5, '2015-12-31', 'Bénévolat', '-1|19|0|-1234.000000|0|0|None|\n-2|18|0|1234.000000|0|0|None|', True)
last_year = FiscalYear.objects.create(begin='2014-01-01', end='2014-12-31', status=2) # id=2
current_year = FiscalYear.objects.get(id=1)
current_year.last_fiscalyear = last_year
current_year.save()
def _goto_entrylineaccountlist(self, journal, filterlist, code, nb_line, date_begin='', date_end=''):
self.factory.xfer = EntryAccountList()
filter_advance = (code != '') or (date_begin != '') or (date_end != '')
self.calljson('/diacamma.accounting/entryAccountList',
{'year': '1', 'journal': journal, 'filter': filterlist,
'filtercode': code, 'date_begin': date_begin, 'date_end': date_end, 'FilterAdvance': filter_advance}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('', 11 if filter_advance else 8)
self.assert_count_equal('entryline', nb_line)
def test_lastyear(self):
self._goto_entrylineaccountlist(1, 0, '', 3)
self.assert_json_equal('', 'entryline/@0/entry.num', '1')
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[106] 106')
self.assert_json_equal('', 'entryline/@0/credit', 1250.38)
self.assert_json_equal('', 'entryline/@1/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@1/debit', -1135.93)
self.assert_json_equal('', 'entryline/@2/entry_account', '[531] 531')
self.assert_json_equal('', 'entryline/@2/debit', -114.45)
def test_buying(self):
self._goto_entrylineaccountlist(2, 0, '', 6)
self.assert_json_equal('', 'entryline/@0/entry.num', None)
self.assert_json_equal('', 'entryline/@0/link', 'C')
self.assert_json_equal('', 'entryline/@0/entry_account', '[401 Dalton Avrel]')
self.assert_json_equal('', 'entryline/@0/credit', 194.08)
self.assert_json_equal('', 'entryline/@1/entry_account', '[607] 607')
self.assert_json_equal('', 'entryline/@2/entry.num', '2')
self.assert_json_equal('', 'entryline/@2/link', 'A')
self.assert_json_equal('', 'entryline/@2/entry_account', '[401 Minimum]')
self.assert_json_equal('', 'entryline/@2/credit', 63.94)
self.assert_json_equal('', 'entryline/@3/entry_account', '[602] 602')
self.assert_json_equal('', 'entryline/@4/entry.num', None)
self.assert_json_equal('', 'entryline/@4/link', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[401 Maximum]')
self.assert_json_equal('', 'entryline/@4/credit', 78.24)
self.assert_json_equal('', 'entryline/@5/entry_account', '[601] 601')
def test_selling(self):
self._goto_entrylineaccountlist(3, 0, '', 6)
self.assert_json_equal('', 'entryline/@0/entry.num', '4')
self.assert_json_equal('', 'entryline/@0/link', 'E')
self.assert_json_equal('', 'entryline/@0/entry_account', '[411 Dalton Joe]')
self.assert_json_equal('', 'entryline/@0/debit', -70.64)
self.assert_json_equal('', 'entryline/@1/entry.num', '4')
self.assert_json_equal('', 'entryline/@1/link', None)
self.assert_json_equal('', 'entryline/@1/entry_account', '[707] 707')
self.assert_json_equal('', 'entryline/@2/entry.num', '6')
self.assert_json_equal('', 'entryline/@2/link', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@2/debit', -125.97)
self.assert_json_equal('', 'entryline/@3/entry.num', '6')
self.assert_json_equal('', 'entryline/@3/link', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[707] 707')
self.assert_json_equal('', 'entryline/@4/entry.num', None)
self.assert_json_equal('', 'entryline/@4/link', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[411 Minimum]')
self.assert_json_equal('', 'entryline/@4/debit', -34.01)
self.assert_json_equal('', 'entryline/@5/entry.num', None)
self.assert_json_equal('', 'entryline/@5/link', None)
self.assert_json_equal('', 'entryline/@5/entry_account', '[707] 707')
def test_payment(self):
self._goto_entrylineaccountlist(4, 0, '', 6)
self.assert_json_equal('', 'entryline/@0/entry.num', '3')
self.assert_json_equal('', 'entryline/@0/link', 'A')
self.assert_json_equal('', 'entryline/@0/entry_account', '[401 Minimum]')
self.assert_json_equal('', 'entryline/@0/debit', -63.94)
self.assert_json_equal('', 'entryline/@1/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@2/entry.num', None)
self.assert_json_equal('', 'entryline/@2/link', 'C')
self.assert_json_equal('', 'entryline/@2/entry_account', '[401 Dalton Avrel]')
self.assert_json_equal('', 'entryline/@2/debit', -194.08)
self.assert_json_equal('', 'entryline/@3/entry_account', '[531] 531')
self.assert_json_equal('', 'entryline/@4/entry.num', '5')
self.assert_json_equal('', 'entryline/@4/link', 'E')
self.assert_json_equal('', 'entryline/@4/entry_account', '[411 Dalton Joe]')
self.assert_json_equal('', 'entryline/@4/credit', 70.64)
self.assert_json_equal('', 'entryline/@5/entry_account', '[512] 512')
def test_other(self):
self._goto_entrylineaccountlist(5, 0, '', 4)
self.assert_json_equal('', 'entryline/@0/entry.num', '7')
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@0/credit', 12.34)
self.assert_json_equal('', 'entryline/@1/entry_account', '[627] 627')
def _check_result(self):
return self.assert_json_equal('LABELFORM', 'result', [230.62, 348.60, -117.98, 1050.66, 1244.74])
def _check_result_with_filter(self):
return self.assert_json_equal('LABELFORM', 'result', [34.01, 0.00, 34.01, 70.64, 70.64])
def test_all(self):
self._goto_entrylineaccountlist(0, 0, '', 25)
self._check_result()
def test_noclose(self):
self._goto_entrylineaccountlist(0, 1, '', 8)
def test_close(self):
self._goto_entrylineaccountlist(0, 2, '', 17)
def test_letter(self):
self._goto_entrylineaccountlist(0, 3, '', 12)
def test_noletter(self):
self._goto_entrylineaccountlist(0, 4, '', 13)
def test_code(self):
self._goto_entrylineaccountlist(0, 0, '60', 6)
def test_date(self):
self._goto_entrylineaccountlist(0, 0, '', 11, '2015-01-01', '2015-02-19')
def test_summary(self):
self.factory.xfer = StatusMenu()
self.calljson('/CORE/statusMenu', {}, False)
self.assert_observer('core.custom', 'CORE', 'statusMenu')
self.assert_json_equal('LABELFORM', 'accounting_year',
"Exercice du 1 janvier 2015 au 31 décembre 2015 [en création]")
self.assert_json_equal('LABELFORM', 'accounting_result',
[230.62, 348.60, -117.98, 1050.66, 1244.74])
self.assert_json_equal('LABELFORM', 'accountingtitle', "Gestion comptable")
def test_listing(self):
self.factory.xfer = EntryAccountListing()
self.calljson('/diacamma.accounting/entryAccountListing',
{'PRINT_MODE': '4', 'MODEL': 7, 'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'entryAccountListing')
csv_value = b64decode(str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 38, str(content_csv))
self.assertEqual(content_csv[1].strip()[:20], '"Liste d\'écritures -')
self.assertEqual(content_csv[6].strip(), '"N°";"date d\'écriture";"date de pièce";"compte";"nom";"débit";"crédit";"lettrage";')
self.assertEqual(content_csv[7].strip(), '"1";"%s";"1 février 2015";"[106] 106";"Report à nouveau";"";"1 250,38 €";"";' % formats.date_format(date.today(), "DATE_FORMAT"))
self.assertEqual(content_csv[11].strip(), '"---";"---";"13 février 2015";"[607] 607";"depense 2";"194,08 €";"";"";')
self.factory.xfer = EntryAccountListing()
self.calljson('/diacamma.accounting/entryAccountListing',
{'PRINT_MODE': '4', 'MODEL': 7, 'year': '1', 'journal': '0', 'filter': '1'}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'entryAccountListing')
csv_value = b64decode(str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 21, str(content_csv))
self.factory.xfer = EntryAccountListing()
self.calljson('/diacamma.accounting/entryAccountListing',
{'PRINT_MODE': '4', 'MODEL': 7, 'year': '1', 'journal': '0', 'filter': '2'}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'entryAccountListing')
csv_value = b64decode(str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 30, str(content_csv))
self.factory.xfer = EntryAccountListing()
self.calljson('/diacamma.accounting/entryAccountListing',
{'PRINT_MODE': '4', 'MODEL': 7, 'year': '1', 'journal': '0', 'filter': '3'}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'entryAccountListing')
csv_value = b64decode(str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 25, str(content_csv))
self.factory.xfer = EntryAccountListing()
self.calljson('/diacamma.accounting/entryAccountListing',
{'PRINT_MODE': '4', 'MODEL': 7, 'year': '1', 'journal': '0', 'filter': '4'}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'entryAccountListing')
csv_value = b64decode(
str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 26, str(content_csv))
self.factory.xfer = EntryAccountListing()
self.calljson('/diacamma.accounting/entryAccountListing',
{'PRINT_MODE': '4', 'MODEL': 7, 'year': '1', 'journal': '4', 'filter': '0'}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'entryAccountListing')
csv_value = b64decode(
str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 19, str(content_csv))
def test_search(self):
self.factory.xfer = EntryAccountSearch()
self.calljson('/diacamma.accounting/entryAccountSearch',
{'year': '1', 'journal': '-1', 'filter': '0', 'CRITERIA': 'entry.year||8||1//account.code||6||7'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountSearch')
self.assert_count_equal('', 21)
self.assert_count_equal('entryline', 3)
def test_listing_search(self):
self.factory.xfer = EntryAccountListing()
self.calljson('/diacamma.accounting/entryAccountListing',
{'PRINT_MODE': '4', 'MODEL': 7, 'year': '1', 'journal': '-1', 'filter': '0', 'CRITERIA': 'entry.year||8||1//account.code||6||7'}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'entryAccountListing')
csv_value = b64decode(str(self.response_json['print']['content'])).decode("utf-8")
content_csv = csv_value.split('\n')
self.assertEqual(len(content_csv), 16, str(content_csv))
self.assertEqual(content_csv[1].strip()[:20], '"Liste d\'écritures -')
self.assertEqual(content_csv[6].strip(), '"N°";"date d\'écriture";"date de pièce";"compte";"nom";"débit";"crédit";"lettrage";')
self.assertEqual(content_csv[7].strip(), '"4";"%s";"21 février 2015";"[707] 707";"vente 1";"";"70,64 €";"";' % formats.date_format(date.today(), "DATE_FORMAT"))
self.assertEqual(content_csv[9].strip(), '"---";"---";"24 février 2015";"[707] 707";"vente 3";"";"34,01 €";"";')
def test_report_tool(self):
values, total = get_totalaccount_for_query(Q(account__type_of_account=0) & Q(entry__year_id=1))
self.assertAlmostEqual(1050.66 + 159.98, total, delta=0.0001)
self.assertEqual(3, len(values), values)
self.assertAlmostEqual(159.98, values['411'][0], delta=0.0001)
self.assertAlmostEqual(1130.29, values['512'][0], delta=0.0001)
self.assertAlmostEqual(-79.63, values['531'][0], delta=0.0001)
self.assertEqual('[411] 411', values['411'][1])
self.assertEqual('[512] 512', values['512'][1])
self.assertEqual('[531] 531', values['531'][1])
values, total = get_totalaccount_for_query(Q(account__code__regex=r'^4[0-9][0-9][0-9a-zA-Z]*$') & Q(entry__year_id=1), 1, True)
self.assertAlmostEqual(78.24, total, delta=0.0001)
self.assertEqual(1, len(values), values)
self.assertAlmostEqual(78.24, values['401#2'][0], delta=0.0001)
self.assertEqual('[401 Maximum]', values['401#2'][1])
values, total = get_totalaccount_for_query(Q(account__code__regex=r'^4[0-9][0-9][0-9a-zA-Z]*$') & Q(entry__year_id=1), -1, True)
self.assertAlmostEqual(159.98, total, delta=0.0001)
self.assertEqual(2, len(values), values)
self.assertAlmostEqual(34.01, values['411#4'][0], delta=0.0001)
self.assertAlmostEqual(125.97, values['411#5'][0], delta=0.0001)
self.assertEqual('[411 Minimum]', values['411#4'][1])
self.assertEqual('[411 Dalton William]', values['411#5'][1])
values, total = get_totalaccount_for_query(Q(account__type_of_account=3) & Q(entry__year_id=1))
self.assertAlmostEqual(230.62, total, delta=0.0001)
self.assertEqual(1, len(values), values)
self.assertAlmostEqual(230.62, values['707'][0], delta=0.0001)
self.assertEqual('[707] 707', values['707'][1])
values, total = get_totalbudget_for_query(Q(code__regex=r'^6.*$') & Q(year_id=1))
self.assertAlmostEqual(21.78, total, delta=0.0001)
self.assertAlmostEqual(8.19, values['601'][0], delta=0.0001)
self.assertEqual('[601] 601', values['601'][1])
self.assertAlmostEqual(7.35, values['602'][0], delta=0.0001)
self.assertEqual('[602] 602', values['602'][1])
self.assertAlmostEqual(6.24, values['604'][0], delta=0.0001)
self.assertEqual('[604] 604', values['604'][1])
def test_costaccounting(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 4)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryAccountShow()
self.calljson('/diacamma.accounting/entryAccountShow',
{'year': '1', 'journal': '2', 'entryaccount': '2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountShow')
self.assert_count_equal('', 10)
self.assert_json_equal('LABELFORM', 'designation', 'depense 1')
self.assert_count_equal('entrylineaccount', 2)
self.assert_json_equal('', 'entrylineaccount/@0/entry_account', '[401 Minimum]')
self.assert_json_equal('', 'entrylineaccount/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount/@1/entry_account', '[602] 602')
self.assert_json_equal('', 'entrylineaccount/@1/costaccounting', 'open')
self.assert_count_equal('#entrylineaccount/actions', 1)
self.assertEqual(len(self.json_actions), 1)
self.factory.xfer = EntryAccountShow()
self.calljson('/diacamma.accounting/entryAccountShow',
{'year': '1', 'journal': '2', 'entryaccount': '11'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountShow')
self.assert_count_equal('', 8)
self.assert_json_equal('LABELFORM', 'designation', 'Frais bancaire')
self.assert_count_equal('entrylineaccount', 2)
self.assert_json_equal('', 'entrylineaccount/@0/entry_account', '[512] 512')
self.assert_json_equal('', 'entrylineaccount/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount/@1/entry_account', '[627] 627')
self.assert_json_equal('', 'entrylineaccount/@1/costaccounting', 'close')
self.assert_count_equal('#entrylineaccount/actions', 0)
self.assertEqual(len(self.json_actions), 1)
def test_costaccounting_list(self):
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 0}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('', 5)
self.assert_count_equal('costaccounting', 1)
self.assert_json_equal('', '#costaccounting/headers/@6/@0', 'status')
self.assert_json_equal('', '#costaccounting/headers/@6/@1', 'statut')
self.assert_json_equal('', '#costaccounting/headers/@6/@2', {'0': 'ouverte', '1': 'clôturé'})
self.assert_json_equal('', '#costaccounting/headers/@6/@4', "%s")
self.assert_json_equal('', 'costaccounting/@0/name', 'open')
self.assert_json_equal('', 'costaccounting/@0/description', 'Open cost')
self.assert_json_equal('', 'costaccounting/@0/year', None)
self.assert_json_equal('', 'costaccounting/@0/total_revenue', 70.64)
self.assert_json_equal('', 'costaccounting/@0/total_expense', 258.02)
self.assert_json_equal('', 'costaccounting/@0/total_result', -187.38)
self.assert_json_equal('', 'costaccounting/@0/status', 0)
self.assert_json_equal('', 'costaccounting/@0/is_default', True)
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 1)
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': -1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 2)
self.factory.xfer = CostAccountingClose()
self.calljson('/diacamma.accounting/costAccountingClose',
{'costaccounting': 2}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'costAccountingClose')
self.assert_json_equal('', 'message', 'La comptabilité "open" a des écritures non validées !')
self.factory.xfer = EntryAccountClose()
self.calljson('/diacamma.accounting/entryAccountClose',
{'CONFIRME': 'YES', 'year': '1', 'journal': '2', "entryline": "8"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountClose')
self.factory.xfer = CostAccountingClose()
self.calljson('/diacamma.accounting/costAccountingClose',
{'CONFIRME': 'YES', 'costaccounting': 2}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'costAccountingClose')
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 0}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 0)
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': -1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 2)
def test_costaccouting_budget(self):
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {"SAVE": "YES", 'name': 'aaa', 'description': 'aaa', 'year': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'costAccountingAddModify') # id = 3
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assertEqual(len(self.json_actions), 4)
self.assert_count_equal('budget_revenue', 2)
self.assert_count_equal('#budget_revenue/actions', 2)
self.assert_json_equal('', 'budget_revenue/@0/budget', '[701] 701')
self.assert_json_equal('', 'budget_revenue/@0/montant', 67.89)
self.assert_json_equal('', 'budget_revenue/@1/budget', '[707] 707')
self.assert_json_equal('', 'budget_revenue/@1/montant', 123.45)
self.assert_count_equal('budget_expense', 3)
self.assert_json_equal('', 'budget_expense/@0/budget', '[601] 601')
self.assert_json_equal('', 'budget_expense/@0/montant', -8.19)
self.assert_json_equal('', 'budget_expense/@1/budget', '[602] 602')
self.assert_json_equal('', 'budget_expense/@1/montant', -7.35)
self.assert_json_equal('', 'budget_expense/@2/budget', '[604] 604')
self.assert_json_equal('', 'budget_expense/@2/montant', -6.24)
self.assert_count_equal('#budget_expense/actions', 2)
self.assert_json_equal('LABELFORM', 'result', 169.56)
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'cost_accounting': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 5)
self.assertEqual(len(self.json_actions), 4)
self.assert_count_equal('budget_revenue', 0)
self.assert_count_equal('#budget_revenue/actions', 2)
self.assert_count_equal('budget_expense', 0)
self.assert_count_equal('#budget_expense/actions', 2)
self.factory.xfer = BudgetAddModify()
self.calljson('/diacamma.accounting/budgetAddModify', {'cost_accounting': '3', 'code': '602', 'debit_val': '19.64', 'credit_val': '0.00', 'SAVE': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'budgetAddModify')
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'cost_accounting': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assertEqual(len(self.json_actions), 4)
self.assert_count_equal('budget_revenue', 0)
self.assert_count_equal('#budget_revenue/actions', 2)
self.assert_count_equal('budget_expense', 1)
self.assert_json_equal('', 'budget_expense/@0/id', '6')
self.assert_json_equal('', 'budget_expense/@0/budget', '[602] 602')
self.assert_json_equal('', 'budget_expense/@0/montant', -19.64)
self.assert_count_equal('#budget_expense/actions', 2)
self.assert_json_equal('LABELFORM', 'result', -19.64)
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assert_count_equal('budget_revenue', 2)
self.assert_count_equal('budget_expense', 3)
self.assert_json_equal('LABELFORM', 'result', 149.92)
self.factory.xfer = BudgetDel()
self.calljson('/diacamma.accounting/budgetDel', {'year': '1', 'budget_expense': 'C602', 'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'budgetDel')
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assert_count_equal('budget_revenue', 2)
self.assert_count_equal('budget_expense', 3)
self.assert_json_equal('LABELFORM', 'result', 157.27)
self.factory.xfer = BudgetDel()
self.calljson('/diacamma.accounting/budgetDel', {'cost_accounting': '3', 'budget_expense': '6', 'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'budgetDel')
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assert_count_equal('budget_revenue', 2)
self.assert_count_equal('budget_expense', 2)
self.assert_json_equal('LABELFORM', 'result', 176.91)
def test_costaccounting_change(self):
FiscalYear.objects.create(begin='2016-01-01', end='2016-12-31', status=0, last_fiscalyear_id=1)
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingAddModify')
self.assert_count_equal('', 5)
self.assert_select_equal('last_costaccounting', 3) # nb=3
self.assert_select_equal('year', 3) # nb=3
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {"SAVE": "YES", 'name': 'aaa', 'description': 'aaa', 'year': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'costAccountingAddModify') # id = 3
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {"SAVE": "YES", 'name': 'bbb', 'description': 'bbb', 'year': '3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'costAccountingAddModify') # id = 4
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 0}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 3)
self.assert_json_equal('', '#costaccounting/headers/@3/@0', 'total_revenue')
self.assert_json_equal('', '#costaccounting/headers/@3/@1', 'total des revenus')
self.assert_json_equal('', '#costaccounting/headers/@3/@2', "C2EUR")
self.assert_json_equal('', '#costaccounting/headers/@3/@4', "{[p align='right']}%s{[/p]}")
self.assert_json_equal('', '#costaccounting/headers/@4/@0', 'total_expense')
self.assert_json_equal('', '#costaccounting/headers/@4/@1', 'total des dépenses')
self.assert_json_equal('', '#costaccounting/headers/@4/@2', "C2EUR")
self.assert_json_equal('', '#costaccounting/headers/@4/@4', "{[p align='right']}%s{[/p]}")
self.assert_json_equal('', '#costaccounting/headers/@5/@0', 'total_result')
self.assert_json_equal('', '#costaccounting/headers/@5/@1', 'résultat')
self.assert_json_equal('', '#costaccounting/headers/@5/@2', "C2EUR")
self.assert_json_equal('', '#costaccounting/headers/@5/@4', "{[p align='right']}%s{[/p]}")
self.assert_json_equal('', 'costaccounting/@0/id', '3')
self.assert_json_equal('', 'costaccounting/@0/name', 'aaa')
self.assert_json_equal('', 'costaccounting/@0/year', 'Exercice du 1 janvier 2015 au 31 décembre 2015 [en création]')
self.assert_json_equal('', 'costaccounting/@0/total_revenue', 0.0)
self.assert_json_equal('', 'costaccounting/@0/total_expense', 0.0)
self.assert_json_equal('', 'costaccounting/@1/id', '4')
self.assert_json_equal('', 'costaccounting/@1/name', 'bbb')
self.assert_json_equal('', 'costaccounting/@1/year', 'Exercice du 1 janvier 2016 au 31 décembre 2016 [en création]')
self.assert_json_equal('', 'costaccounting/@1/total_revenue', 0.0)
self.assert_json_equal('', 'costaccounting/@1/total_expense', 0.0)
self.assert_json_equal('', 'costaccounting/@2/id', '2')
self.assert_json_equal('', 'costaccounting/@2/name', 'open')
self.assert_json_equal('', 'costaccounting/@2/year', None)
self.assert_json_equal('', 'costaccounting/@2/total_revenue', 70.64)
self.assert_json_equal('', 'costaccounting/@2/total_expense', 258.02)
self._goto_entrylineaccountlist(0, 0, '', 25)
self.assert_json_equal('', 'entryline/@3/id', '9')
self.assert_json_equal('', 'entryline/@3/entry.num', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[401 Dalton Avrel]')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/id', '8')
self.assert_json_equal('', 'entryline/@4/entry.num', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[607] 607')
self.assert_json_equal('', 'entryline/@4/costaccounting', 'open')
self.assert_json_equal('', 'entryline/@11/id', '13')
self.assert_json_equal('', 'entryline/@11/entry.num', None)
self.assert_json_equal('', 'entryline/@11/entry_account', '[401 Maximum]')
self.assert_json_equal('', 'entryline/@11/costaccounting', None)
self.assert_json_equal('', 'entryline/@12/id', '12')
self.assert_json_equal('', 'entryline/@12/entry.num', None)
self.assert_json_equal('', 'entryline/@12/entry_account', '[601] 601')
self.assert_json_equal('', 'entryline/@12/costaccounting', None)
self.assert_json_equal('', 'entryline/@17/id', '19')
self.assert_json_equal('', 'entryline/@17/entry.num', '6')
self.assert_json_equal('', 'entryline/@17/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@17/costaccounting', None)
self.assert_json_equal('', 'entryline/@18/id', '18')
self.assert_json_equal('', 'entryline/@18/entry.num', '6')
self.assert_json_equal('', 'entryline/@18/entry_account', '[707] 707')
self.assert_json_equal('', 'entryline/@18/costaccounting', None)
self.factory.xfer = EntryAccountCostAccounting()
self.calljson('/diacamma.accounting/entryAccountCostAccounting', {'entryline': '8;9;12;13;18;19'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountCostAccounting')
self.assert_count_equal('', 3)
self.assert_select_equal('cost_accounting_id', {0: None, 2: 'open', 3: 'aaa'}) # nb=3
self.assert_json_equal('SELECT', 'cost_accounting_id', '2')
self.factory.xfer = EntryAccountCostAccounting()
self.calljson('/diacamma.accounting/entryAccountCostAccounting', {"SAVE": "YES", 'entryline': '8;9;12;13;18;19', 'cost_accounting_id': '2'}, False) # -78.24 / +125.97
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountCostAccounting')
self._goto_entrylineaccountlist(0, 0, '', 25)
self.assert_json_equal('', 'entryline/@3/id', '9')
self.assert_json_equal('', 'entryline/@3/entry.num', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[401 Dalton Avrel]')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/id', '8')
self.assert_json_equal('', 'entryline/@4/entry.num', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[607] 607')
self.assert_json_equal('', 'entryline/@4/costaccounting', 'open')
self.assert_json_equal('', 'entryline/@11/id', '13')
self.assert_json_equal('', 'entryline/@11/entry.num', None)
self.assert_json_equal('', 'entryline/@11/entry_account', '[401 Maximum]')
self.assert_json_equal('', 'entryline/@11/costaccounting', None)
self.assert_json_equal('', 'entryline/@12/id', '12')
self.assert_json_equal('', 'entryline/@12/entry.num', None)
self.assert_json_equal('', 'entryline/@12/entry_account', '[601] 601')
self.assert_json_equal('', 'entryline/@12/costaccounting', 'open')
self.assert_json_equal('', 'entryline/@17/id', '19')
self.assert_json_equal('', 'entryline/@17/entry.num', '6')
self.assert_json_equal('', 'entryline/@17/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@17/costaccounting', None)
self.assert_json_equal('', 'entryline/@18/id', '18')
self.assert_json_equal('', 'entryline/@18/entry.num', '6')
self.assert_json_equal('', 'entryline/@18/entry_account', '[707] 707')
self.assert_json_equal('', 'entryline/@18/costaccounting', 'open')
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 0}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 3)
self.assert_json_equal('', 'costaccounting/@0/total_revenue', 0.0)
self.assert_json_equal('', 'costaccounting/@0/total_expense', 0.0)
self.assert_json_equal('', 'costaccounting/@1/total_revenue', 0.0)
self.assert_json_equal('', 'costaccounting/@1/total_expense', 0.0)
self.assert_json_equal('', 'costaccounting/@2/name', 'open')
self.assert_json_equal('', 'costaccounting/@2/total_revenue', 196.61)
self.assert_json_equal('', 'costaccounting/@2/total_expense', 336.26)
self.factory.xfer = EntryAccountCostAccounting()
self.calljson('/diacamma.accounting/entryAccountCostAccounting', {"SAVE": "YES", 'entryline': '8;9;12;13;18;19', 'cost_accounting_id': '0'}, False) # - -194.08 / 0
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountCostAccounting')
self._goto_entrylineaccountlist(0, 0, '', 25)
self.assert_json_equal('', 'entryline/@3/id', '9')
self.assert_json_equal('', 'entryline/@3/entry.num', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[401 Dalton Avrel]')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/id', '8')
self.assert_json_equal('', 'entryline/@4/entry.num', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[607] 607')
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@11/id', '13')
self.assert_json_equal('', 'entryline/@11/entry.num', None)
self.assert_json_equal('', 'entryline/@11/entry_account', '[401 Maximum]')
self.assert_json_equal('', 'entryline/@11/costaccounting', None)
self.assert_json_equal('', 'entryline/@12/id', '12')
self.assert_json_equal('', 'entryline/@12/entry.num', None)
self.assert_json_equal('', 'entryline/@12/entry_account', '[601] 601')
self.assert_json_equal('', 'entryline/@12/costaccounting', None)
self.assert_json_equal('', 'entryline/@17/id', '19')
self.assert_json_equal('', 'entryline/@17/entry.num', '6')
self.assert_json_equal('', 'entryline/@17/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@17/costaccounting', None)
self.assert_json_equal('', 'entryline/@18/id', '18')
self.assert_json_equal('', 'entryline/@18/entry.num', '6')
self.assert_json_equal('', 'entryline/@18/entry_account', '[707] 707')
self.assert_json_equal('', 'entryline/@18/costaccounting', None)
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 0}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 3)
self.assert_json_equal('', 'costaccounting/@0/total_revenue', 0.0)
self.assert_json_equal('', 'costaccounting/@0/total_expense', 0.0)
self.assert_json_equal('', 'costaccounting/@1/total_revenue', 0.0)
self.assert_json_equal('', 'costaccounting/@1/total_expense', 0.0)
self.assert_json_equal('', 'costaccounting/@2/name', 'open')
self.assert_json_equal('', 'costaccounting/@2/total_revenue', 70.64)
self.assert_json_equal('', 'costaccounting/@2/total_expense', 63.94)
self.factory.xfer = EntryAccountCostAccounting()
self.calljson('/diacamma.accounting/entryAccountCostAccounting', {"SAVE": "YES", 'entryline': '8;9;12;13;18;19', 'cost_accounting_id': '3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountCostAccounting')
self._goto_entrylineaccountlist(0, 0, '', 25)
self.assert_json_equal('', 'entryline/@3/id', '9')
self.assert_json_equal('', 'entryline/@3/entry.num', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[401 Dalton Avrel]')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/id', '8')
self.assert_json_equal('', 'entryline/@4/entry.num', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[607] 607')
self.assert_json_equal('', 'entryline/@4/costaccounting', 'aaa')
self.assert_json_equal('', 'entryline/@11/id', '13')
self.assert_json_equal('', 'entryline/@11/entry.num', None)
self.assert_json_equal('', 'entryline/@11/entry_account', '[401 Maximum]')
self.assert_json_equal('', 'entryline/@11/costaccounting', None)
self.assert_json_equal('', 'entryline/@12/id', '12')
self.assert_json_equal('', 'entryline/@12/entry.num', None)
self.assert_json_equal('', 'entryline/@12/entry_account', '[601] 601')
self.assert_json_equal('', 'entryline/@12/costaccounting', 'aaa')
self.assert_json_equal('', 'entryline/@17/id', '19')
self.assert_json_equal('', 'entryline/@17/entry.num', '6')
self.assert_json_equal('', 'entryline/@17/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@17/costaccounting', None)
self.assert_json_equal('', 'entryline/@18/id', '18')
self.assert_json_equal('', 'entryline/@18/entry.num', '6')
self.assert_json_equal('', 'entryline/@18/entry_account', '[707] 707')
self.assert_json_equal('', 'entryline/@18/costaccounting', 'aaa')
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 0}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 3)
self.assert_json_equal('', 'costaccounting/@0/name', 'aaa')
self.assert_json_equal('', 'costaccounting/@0/total_revenue', 125.97)
self.assert_json_equal('', 'costaccounting/@0/total_expense', 272.32)
self.assert_json_equal('', 'costaccounting/@1/name', 'bbb')
self.assert_json_equal('', 'costaccounting/@1/total_revenue', 0.0)
self.assert_json_equal('', 'costaccounting/@1/total_expense', 0.0)
self.assert_json_equal('', 'costaccounting/@2/name', 'open')
self.assert_json_equal('', 'costaccounting/@2/total_revenue', 70.64)
self.assert_json_equal('', 'costaccounting/@2/total_expense', 63.94)
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {"SAVE": "YES", 'id': '3', 'year': '2'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'costAccountingAddModify')
self.factory.xfer = EntryAccountCostAccounting()
self.calljson('/diacamma.accounting/entryAccountCostAccounting', {"SAVE": "YES", 'entryline': '8;9;12;13;18;19', 'cost_accounting_id': '4'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountCostAccounting')
self._goto_entrylineaccountlist(0, 0, '', 25)
self.assert_json_equal('', 'entryline/@3/id', '9')
self.assert_json_equal('', 'entryline/@3/entry.num', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[401 Dalton Avrel]')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/id', '8')
self.assert_json_equal('', 'entryline/@4/entry.num', None)
self.assert_json_equal('', 'entryline/@4/entry_account', '[607] 607')
self.assert_json_equal('', 'entryline/@4/costaccounting', 'aaa')
self.assert_json_equal('', 'entryline/@11/id', '13')
self.assert_json_equal('', 'entryline/@11/entry.num', None)
self.assert_json_equal('', 'entryline/@11/entry_account', '[401 Maximum]')
self.assert_json_equal('', 'entryline/@11/costaccounting', None)
self.assert_json_equal('', 'entryline/@12/id', '12')
self.assert_json_equal('', 'entryline/@12/entry.num', None)
self.assert_json_equal('', 'entryline/@12/entry_account', '[601] 601')
self.assert_json_equal('', 'entryline/@12/costaccounting', 'aaa')
self.assert_json_equal('', 'entryline/@17/id', '19')
self.assert_json_equal('', 'entryline/@17/entry.num', '6')
self.assert_json_equal('', 'entryline/@17/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@17/costaccounting', None)
self.assert_json_equal('', 'entryline/@18/id', '18')
self.assert_json_equal('', 'entryline/@18/entry.num', '6')
self.assert_json_equal('', 'entryline/@18/entry_account', '[707] 707')
self.assert_json_equal('', 'entryline/@18/costaccounting', 'aaa')
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 0}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 3)
self.assert_json_equal('', 'costaccounting/@0/name', 'aaa')
self.assert_json_equal('', 'costaccounting/@0/total_revenue', 125.97)
self.assert_json_equal('', 'costaccounting/@0/total_expense', 272.32)
self.assert_json_equal('', 'costaccounting/@1/name', 'bbb')
self.assert_json_equal('', 'costaccounting/@1/total_revenue', 0.0)
self.assert_json_equal('', 'costaccounting/@1/total_expense', 0.0)
self.assert_json_equal('', 'costaccounting/@2/name', 'open')
self.assert_json_equal('', 'costaccounting/@2/total_revenue', 70.64)
self.assert_json_equal('', 'costaccounting/@2/total_expense', 63.94)
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'year': 1, 'status': -1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 1)
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'year': 3, 'status': -1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 1)
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'year': -1, 'status': -1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 2)
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'year': 0, 'status': -1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 4)
def test_costaccounting_needed(self):
FiscalYear.objects.create(begin='2016-01-01', end='2016-12-31', status=0, last_fiscalyear_id=1)
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {"SAVE": "YES", 'name': 'aaa', 'description': 'aaa', 'year': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'costAccountingAddModify') # id = 3
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {"SAVE": "YES", 'name': 'bbb', 'description': 'bbb', 'year': '2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'costAccountingAddModify') # id = 4
self.factory.xfer = CostAccountingList()
self.calljson('/diacamma.accounting/costAccountingList', {'status': 0}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingList')
self.assert_count_equal('costaccounting', 3)
self.assert_json_equal('', 'costaccounting/@0/name', 'aaa')
self.assert_json_equal('', 'costaccounting/@1/name', 'bbb')
self.assert_json_equal('', 'costaccounting/@2/name', 'open')
self.factory.xfer = EntryAccountCostAccounting()
self.calljson('/diacamma.accounting/entryAccountCostAccounting', {'entryline': '8;9;12;13;18;19'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountCostAccounting')
self.assert_count_equal('', 3)
self.assert_select_equal('cost_accounting_id', {0: None, 2: 'open', 3: 'aaa'}) # nb=3
self.assert_json_equal('SELECT', 'cost_accounting_id', '2')
Params.setvalue('accounting-needcost', '1')
self.factory.xfer = EntryAccountClose()
self.calljson('/diacamma.accounting/entryAccountClose', {'CONFIRME': 'YES', 'year': '1', 'journal': '2', 'entryline': '10;11'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountClose')
self.factory.xfer = EntryAccountClose()
self.calljson('/diacamma.accounting/entryAccountClose', {'CONFIRME': 'YES', 'year': '1', 'journal': '2', 'entryline': '8;9;12;13;18;19'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'entryAccountClose')
self.assert_json_equal('', 'message', 'La comptabilité analytique est obligatoire !')
self.factory.xfer = EntryAccountCostAccounting()
self.calljson('/diacamma.accounting/entryAccountCostAccounting', {'entryline': '8;9;12;13;18;19'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountCostAccounting')
self.assert_count_equal('', 3)
self.assert_select_equal('cost_accounting_id', {2: 'open', 3: 'aaa'}) # nb=2
self.assert_json_equal('SELECT', 'cost_accounting_id', '2')
self.factory.xfer = EntryAccountCostAccounting()
self.calljson('/diacamma.accounting/entryAccountCostAccounting', {"SAVE": "YES", 'entryline': '8;9;12;13;18;19', 'cost_accounting_id': '2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountCostAccounting')
self.factory.xfer = EntryAccountClose()
self.calljson('/diacamma.accounting/entryAccountClose', {'CONFIRME': 'YES', 'year': '1', 'journal': '2', 'entryline': '8;9;12;13;18;19'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountClose')
def test_costaccounting_incomestatement(self):
self.factory.xfer = CostAccountingIncomeStatement()
self.calljson('/diacamma.accounting/costAccountingIncomeStatement', {'costaccounting': '1;2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingIncomeStatement')
self.assert_count_equal('', 4)
self.assertFalse('__tab_1' in self.json_data.keys(), self.json_data.keys())
self.assert_grid_equal('report_2', {"name": "Comptabilit\u00e9 analytique", "left": "Charges", "left_n": "Valeur",
"left_b": "Budget", "space": "", "right": "Produits", "right_n": "Valeur", "right_b": "Budget"}, 6 + 7 + 2)
self.factory.xfer = CostAccountingIncomeStatement()
self.calljson('/diacamma.accounting/costAccountingIncomeStatement', {'costaccounting': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingIncomeStatement')
self.assert_count_equal('', 5)
self.assertFalse('__tab_1' in self.json_data.keys(), self.json_data.keys())
self.assert_grid_equal('report_1', {"left": "Charges", "left_n": "Valeur", "left_b": "Budget",
"space": "", "right": "Produits", "right_n": "Valeur", "right_b": "Budget"}, 6)
self.factory.xfer = CostAccountingIncomeStatement()
self.calljson('/diacamma.accounting/costAccountingIncomeStatement', {'costaccounting': '2', 'begin_date': '2015-02-14', 'end_date': '2015-02-20'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingIncomeStatement')
self.assert_count_equal('', 5)
self.assertFalse('__tab_1' in self.json_data.keys(), self.json_data.keys())
self.assert_grid_equal('report_2', {"left": "Charges", "left_n": "Valeur", "space": "", "right": "Produits", "right_n": "Valeur"}, 6)
def test_costaccounting_importbudget(self):
FiscalYear.objects.create(begin='2016-01-01', end='2016-12-31', status=0, last_fiscalyear_id=1)
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {"SAVE": "YES", 'name': 'aaa', 'description': 'aaa', 'year': '2', 'last_costaccounting': '2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'costAccountingAddModify') # id = 3
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'cost_accounting': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 5)
self.assert_count_equal('budget_revenue', 0)
self.assert_count_equal('budget_expense', 0)
self.factory.xfer = BudgetImport()
self.calljson('/diacamma.accounting/budgetImport', {'cost_accounting': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetImport')
self.assert_count_equal('', 4)
self.assert_select_equal('costaccounting', {2: 'open'})
self.factory.xfer = BudgetImport()
self.calljson('/diacamma.accounting/budgetImport', {'cost_accounting': '3', 'costaccounting': '2', 'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'budgetImport')
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'cost_accounting': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assert_count_equal('budget_revenue', 1)
self.assert_count_equal('budget_expense', 2)
self.assert_json_equal('LABELFORM', 'result', -187.38)
def test_costaccounting_ledger(self):
self.factory.xfer = CostAccountingLedger()
self.calljson('/diacamma.accounting/costAccountingLedger', {'costaccounting': '1;2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingLedger')
self.assert_count_equal('', 4 + 2 * 2)
self.assertTrue('__tab_2' in self.json_data.keys(), self.json_data.keys())
self.assertFalse('__tab_3' in self.json_data.keys(), self.json_data.keys())
self.assert_count_equal('report_1', 5)
self.assert_count_equal('report_2', 15)
self.factory.xfer = CostAccountingLedger()
self.calljson('/diacamma.accounting/costAccountingLedger', {'costaccounting': '2', 'begin_date': '2015-02-14', 'end_date': '2015-02-20'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingLedger')
self.assert_count_equal('', 5 + 2 * 1)
self.assertTrue('__tab_1' in self.json_data.keys(), self.json_data.keys())
self.assertFalse('__tab_2' in self.json_data.keys(), self.json_data.keys())
self.assert_count_equal('report_2', 5)
def test_costaccounting_trialbalance(self):
self.factory.xfer = CostAccountingTrialBalance()
self.calljson('/diacamma.accounting/costAccountingTrialBalance', {'costaccounting': '1;2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingTrialBalance')
self.assert_count_equal('', 4 + 2 * 2)
self.assertTrue('__tab_2' in self.json_data.keys(), self.json_data.keys())
self.assertFalse('__tab_3' in self.json_data.keys(), self.json_data.keys())
self.assert_count_equal('report_1', 3)
self.assert_count_equal('report_2', 5)
self.factory.xfer = CostAccountingTrialBalance()
self.calljson('/diacamma.accounting/costAccountingTrialBalance', {'costaccounting': '2', 'begin_date': '2015-02-14', 'end_date': '2015-02-20'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'costAccountingTrialBalance')
self.assert_count_equal('', 5 + 2 * 1)
self.assertTrue('__tab_1' in self.json_data.keys(), self.json_data.keys())
self.assertFalse('__tab_2' in self.json_data.keys(), self.json_data.keys())
self.assert_count_equal('report_2', 3)
def test_fiscalyear_balancesheet(self):
self.factory.xfer = FiscalYearBalanceSheet()
self.calljson('/diacamma.accounting/fiscalYearBalanceSheet', {}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearBalanceSheet')
self._check_result()
self.assert_count_equal('report_1', 11)
self.assert_json_equal('', 'report_1/@1/left', '[411] 411')
self.assert_json_equal('', 'report_1/@1/left_n', 159.98)
self.assert_json_equal('', 'report_1/@1/left_n_1', '')
self.assert_json_equal('', 'report_1/@5/left', '[512] 512')
self.assert_json_equal('', 'report_1/@5/left_n', 1130.29)
self.assert_json_equal('', 'report_1/@5/left_n_1', '')
self.assert_json_equal('', 'report_1/@6/left', '[531] 531')
self.assert_json_equal('', 'report_1/@6/left_n', -79.63)
self.assert_json_equal('', 'report_1/@6/left_n_1', '')
self.assert_json_equal('', 'report_1/@1/right', '[106] 106')
self.assert_json_equal('', 'report_1/@1/right_n', 1250.38)
self.assert_json_equal('', 'report_1/@1/right_n_1', '')
self.assert_json_equal('', 'report_1/@5/right', '[401] 401')
self.assert_json_equal('', 'report_1/@5/right_n', 78.24)
self.assert_json_equal('', 'report_1/@5/right_n_1', '')
self.assert_json_equal('', 'report_1/@10/left', "     {[i]}{[b]}résultat (déficit){[/b]}{[/i]}")
self.assert_json_equal('', 'report_1/@10/left_n', {"format": "{[i]}{[b]}{0}{[/b]}{[/i]}", "value": 117.98})
self.assert_json_equal('', 'report_1/@10/right', "")
self.assert_json_equal('', 'report_1/@10/right_n', "")
def test_fiscalyear_balancesheet_filter(self):
self.factory.xfer = FiscalYearBalanceSheet()
self.calljson('/diacamma.accounting/fiscalYearBalanceSheet', {'begin': '2015-02-22', 'end': '2015-02-28'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearBalanceSheet')
self._check_result_with_filter()
def test_fiscalyear_balancesheet_print(self):
self.factory.xfer = FiscalYearReportPrint()
self.calljson('/diacamma.accounting/fiscalYearReportPrint', {'classname': 'FiscalYearBalanceSheet', "PRINT_MODE": 3}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'fiscalYearReportPrint')
self.save_pdf()
def test_fiscalyear_balancesheet_print_ods(self):
self.factory.xfer = FiscalYearReportPrint()
self.calljson('/diacamma.accounting/fiscalYearReportPrint', {'classname': 'FiscalYearBalanceSheet', "PRINT_MODE": 2}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'fiscalYearReportPrint')
self.save_ods()
def test_fiscalyear_incomestatement(self):
self.factory.xfer = FiscalYearIncomeStatement()
self.calljson('/diacamma.accounting/fiscalYearIncomeStatement', {}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearIncomeStatement')
self._check_result()
self.assert_count_equal('report_1', 12)
self.assert_json_equal('', 'report_1/@0/left', '[601] 601')
self.assert_json_equal('', 'report_1/@0/left_n', 78.24)
self.assert_json_equal('', 'report_1/@0/left_n_1', '')
self.assert_json_equal('', 'report_1/@0/left_b', 8.19)
self.assert_json_equal('', 'report_1/@1/left', '[602] 602')
self.assert_json_equal('', 'report_1/@1/left_n', 63.94)
self.assert_json_equal('', 'report_1/@1/left_n_1', '')
self.assert_json_equal('', 'report_1/@1/left_b', 7.35)
self.assert_json_equal('', 'report_1/@2/left', '[604] 604')
self.assert_json_equal('', 'report_1/@2/left_n', '')
self.assert_json_equal('', 'report_1/@2/left_n_1', '')
self.assert_json_equal('', 'report_1/@2/left_b', 6.24)
self.assert_json_equal('', 'report_1/@3/left', '[607] 607')
self.assert_json_equal('', 'report_1/@3/left_n', 194.08)
self.assert_json_equal('', 'report_1/@3/left_n_1', '')
self.assert_json_equal('', 'report_1/@3/left_b', '')
self.assert_json_equal('', 'report_1/@4/left', '[627] 627')
self.assert_json_equal('', 'report_1/@4/left_n', 12.34)
self.assert_json_equal('', 'report_1/@4/left_n_1', '')
self.assert_json_equal('', 'report_1/@4/left_b', '')
self.assert_json_equal('', 'report_1/@9/left', '[870] 870')
self.assert_json_equal('', 'report_1/@9/left_n', 1234.00)
self.assert_json_equal('', 'report_1/@9/left_n_1', '')
self.assert_json_equal('', 'report_1/@9/left_b', '')
self.assert_json_equal('', 'report_1/@0/right', '[701] 701')
self.assert_json_equal('', 'report_1/@0/right_n', '')
self.assert_json_equal('', 'report_1/@0/right_n_1', '')
self.assert_json_equal('', 'report_1/@0/right_b', 67.89)
self.assert_json_equal('', 'report_1/@1/right', '[707] 707')
self.assert_json_equal('', 'report_1/@1/right_n', 230.62)
self.assert_json_equal('', 'report_1/@1/right_n_1', '')
self.assert_json_equal('', 'report_1/@1/right_b', 123.45)
self.assert_json_equal('', 'report_1/@9/right', '[860] 860')
self.assert_json_equal('', 'report_1/@9/right_n', 1234.00)
self.assert_json_equal('', 'report_1/@9/right_n_1', '')
self.assert_json_equal('', 'report_1/@9/right_b', '')
def test_fiscalyear_import_budget(self):
FiscalYear.objects.create(begin='2016-01-01', end='2016-12-31', status=0, last_fiscalyear_id=1)
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 5)
self.assertEqual(len(self.json_actions), 4)
self.assert_count_equal('budget_revenue', 0)
self.assert_count_equal('#budget_revenue/actions', 2)
self.assert_count_equal('budget_expense', 0)
self.assert_count_equal('#budget_expense/actions', 2)
self.factory.xfer = BudgetImport()
self.calljson('/diacamma.accounting/budgetImport', {'year': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetImport')
self.assert_count_equal('', 4)
self.assert_select_equal('currentyear', {1: 'Exercice du 1 janvier 2015 au 31 décembre 2015 [en création]', 2: 'Exercice du 1 janvier 2014 au 31 décembre 2014 [terminé]'})
self.factory.xfer = BudgetImport()
self.calljson('/diacamma.accounting/budgetImport', {'year': '3', 'currentyear': '1', 'CONFIRME': 'YES'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'budgetImport')
self.factory.xfer = BudgetList()
self.calljson('/diacamma.accounting/budgetList', {'year': '3'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'budgetList')
self.assert_count_equal('', 6)
self.assert_count_equal('budget_revenue', 1)
self.assert_count_equal('budget_expense', 4)
self.assert_json_equal('LABELFORM', 'result', -117.98)
def test_fiscalyear_incomestatement_filter(self):
self.factory.xfer = FiscalYearIncomeStatement()
self.calljson('/diacamma.accounting/fiscalYearIncomeStatement', {'begin': '2015-02-22', 'end': '2015-02-28'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearIncomeStatement')
self._check_result_with_filter()
def test_fiscalyear_incomestatement_print(self):
self.factory.xfer = FiscalYearReportPrint()
self.calljson('/diacamma.accounting/fiscalYearReportPrint', {'classname': 'FiscalYearIncomeStatement', "PRINT_MODE": 3}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'fiscalYearReportPrint')
self.save_pdf()
def test_fiscalyear_incomestatement_print_ods(self):
self.factory.xfer = FiscalYearReportPrint()
self.calljson('/diacamma.accounting/fiscalYearReportPrint', {'classname': 'FiscalYearIncomeStatement', "PRINT_MODE": 2}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'fiscalYearReportPrint')
self.save_ods()
def test_fiscalyear_ledger(self):
self.factory.xfer = FiscalYearLedger()
self.calljson('/diacamma.accounting/fiscalYearLedger', {}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearLedger')
self._check_result()
def test_fiscalyear_ledger_filter(self):
self.factory.xfer = FiscalYearLedger()
self.calljson('/diacamma.accounting/fiscalYearLedger', {'begin': '2015-02-22', 'end': '2015-02-28'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearLedger')
self._check_result_with_filter()
def test_fiscalyear_ledger_print(self):
self.factory.xfer = FiscalYearReportPrint()
self.calljson('/diacamma.accounting/fiscalYearReportPrint', {'classname': 'FiscalYearLedger', "PRINT_MODE": 3}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'fiscalYearReportPrint')
self.save_pdf()
def test_fiscalyear_ledger_print_ods(self):
self.factory.xfer = FiscalYearReportPrint()
self.calljson('/diacamma.accounting/fiscalYearReportPrint', {'classname': 'FiscalYearLedger', "PRINT_MODE": 2}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'fiscalYearReportPrint')
self.save_ods()
def test_fiscalyear_trialbalance(self):
self.factory.xfer = FiscalYearTrialBalance()
self.calljson('/diacamma.accounting/fiscalYearTrialBalance', {}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearTrialBalance')
self._check_result()
self.assert_count_equal('report_1', 14)
self.assert_json_equal('', 'report_1/@0/designation', '[106] 106')
self.assert_json_equal('', 'report_1/@0/total_debit', 0.00)
self.assert_json_equal('', 'report_1/@0/total_credit', 1250.38)
self.assert_json_equal('', 'report_1/@0/solde_debit', 0)
self.assert_json_equal('', 'report_1/@0/solde_credit', 1250.38)
self.assert_json_equal('', 'report_1/@1/designation', '[401] 401')
self.assert_json_equal('', 'report_1/@1/total_debit', 258.02)
self.assert_json_equal('', 'report_1/@1/total_credit', 336.26)
self.assert_json_equal('', 'report_1/@1/solde_debit', 0)
self.assert_json_equal('', 'report_1/@1/solde_credit', 78.24)
self.assert_json_equal('', 'report_1/@2/designation', '[411] 411')
self.assert_json_equal('', 'report_1/@2/total_debit', 230.62)
self.assert_json_equal('', 'report_1/@2/total_credit', 70.64)
self.assert_json_equal('', 'report_1/@2/solde_debit', 159.98)
self.assert_json_equal('', 'report_1/@2/solde_credit', 0)
self.assert_json_equal('', 'report_1/@3/designation', '[512] 512')
self.assert_json_equal('', 'report_1/@3/total_debit', 1206.57)
self.assert_json_equal('', 'report_1/@3/total_credit', 76.28)
self.assert_json_equal('', 'report_1/@3/solde_debit', 1130.29)
self.assert_json_equal('', 'report_1/@3/solde_credit', 0)
self.assert_json_equal('', 'report_1/@4/designation', '[531] 531')
self.assert_json_equal('', 'report_1/@4/total_debit', 114.45)
self.assert_json_equal('', 'report_1/@4/total_credit', 194.08)
self.assert_json_equal('', 'report_1/@4/solde_debit', 0)
self.assert_json_equal('', 'report_1/@4/solde_credit', 79.63)
self.assert_json_equal('', 'report_1/@5/designation', '[601] 601')
self.assert_json_equal('', 'report_1/@5/total_debit', 78.24)
self.assert_json_equal('', 'report_1/@5/total_credit', 0.00)
self.assert_json_equal('', 'report_1/@5/solde_debit', 78.24)
self.assert_json_equal('', 'report_1/@5/solde_credit', 0)
self.assert_json_equal('', 'report_1/@6/designation', '[602] 602')
self.assert_json_equal('', 'report_1/@6/total_debit', 63.94)
self.assert_json_equal('', 'report_1/@6/total_credit', 0.00)
self.assert_json_equal('', 'report_1/@6/solde_debit', 63.94)
self.assert_json_equal('', 'report_1/@6/solde_credit', 0)
self.assert_json_equal('', 'report_1/@7/designation', '[607] 607')
self.assert_json_equal('', 'report_1/@7/total_debit', 194.08)
self.assert_json_equal('', 'report_1/@7/total_credit', 0.00)
self.assert_json_equal('', 'report_1/@7/solde_debit', 194.08)
self.assert_json_equal('', 'report_1/@7/solde_credit', 0)
self.assert_json_equal('', 'report_1/@8/designation', '[627] 627')
self.assert_json_equal('', 'report_1/@8/total_debit', 12.34)
self.assert_json_equal('', 'report_1/@8/total_credit', 0.00)
self.assert_json_equal('', 'report_1/@8/solde_debit', 12.34)
self.assert_json_equal('', 'report_1/@8/solde_credit', 0)
self.assert_json_equal('', 'report_1/@9/designation', '[707] 707')
self.assert_json_equal('', 'report_1/@9/total_debit', 0.00)
self.assert_json_equal('', 'report_1/@9/total_credit', 230.62)
self.assert_json_equal('', 'report_1/@9/solde_debit', 0)
self.assert_json_equal('', 'report_1/@9/solde_credit', 230.62)
self.assert_json_equal('', 'report_1/@10/designation', '[860] 860')
self.assert_json_equal('', 'report_1/@10/total_debit', 0.00)
self.assert_json_equal('', 'report_1/@10/total_credit', 1234.00)
self.assert_json_equal('', 'report_1/@10/solde_debit', 0)
self.assert_json_equal('', 'report_1/@10/solde_credit', 1234.00)
self.assert_json_equal('', 'report_1/@11/designation', '[870] 870')
self.assert_json_equal('', 'report_1/@11/total_debit', 1234.00)
self.assert_json_equal('', 'report_1/@11/total_credit', 0.00)
self.assert_json_equal('', 'report_1/@11/solde_debit', 1234.00)
self.assert_json_equal('', 'report_1/@11/solde_credit', 0)
self.assert_json_equal('', 'report_1/@13/designation', "          {[u]}{[b]}total{[/b]}{[/u]}")
self.assert_json_equal('', 'report_1/@13/total_debit', {"value": 3392.26, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
self.assert_json_equal('', 'report_1/@13/total_credit', {"value": 3392.26, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
self.assert_json_equal('', 'report_1/@13/solde_debit', {"value": 2872.87, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
self.assert_json_equal('', 'report_1/@13/solde_credit', {"value": 2872.8700000000003, "format": "{[u]}{[b]}{0}{[/b]}{[/u]}"})
def test_fiscalyear_trialbalance_filter(self):
self.factory.xfer = FiscalYearTrialBalance()
self.calljson('/diacamma.accounting/fiscalYearTrialBalance', {'begin': '2015-02-22', 'end': '2015-02-28'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearTrialBalance')
self._check_result_with_filter()
def test_fiscalyear_trialbalance_third(self):
self.factory.xfer = FiscalYearTrialBalance()
self.calljson('/diacamma.accounting/fiscalYearTrialBalance', {'filtercode': '4', 'with_third': 1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearTrialBalance')
self.assert_count_equal('report_1', 8)
self.factory.xfer = FiscalYearTrialBalance()
self.calljson('/diacamma.accounting/fiscalYearTrialBalance', {'filtercode': '4', 'with_third': 1, 'only_nonull': 1}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearTrialBalance')
self.assert_count_equal('report_1', 5)
def test_fiscalyear_trialbalance_print(self):
self.factory.xfer = FiscalYearReportPrint()
self.calljson('/diacamma.accounting/fiscalYearReportPrint', {'classname': 'FiscalYearTrialBalance', "PRINT_MODE": 3}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'fiscalYearReportPrint')
self.save_pdf()
def test_fiscalyear_trialbalance_print_ods(self):
self.factory.xfer = FiscalYearReportPrint()
self.calljson('/diacamma.accounting/fiscalYearReportPrint', {'classname': 'FiscalYearTrialBalance', "PRINT_MODE": 2}, False)
self.assert_observer('core.print', 'diacamma.accounting', 'fiscalYearReportPrint')
self.save_ods()
def test_export(self):
self.assertFalse(exists(get_user_path('accounting', 'fiscalyear_export_1.xml')))
self.factory.xfer = FiscalYearExport()
self.calljson('/diacamma.accounting/fiscalYearExport', {}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'fiscalYearExport')
self.assertTrue(exists(get_user_path('accounting', 'fiscalyear_export_1.xml')))
self.assertFalse(exists(get_user_path('accounting', 'fiscalyear_export_2.xml')))
self.factory.xfer = FiscalYearExport()
self.calljson('/diacamma.accounting/fiscalYearExport', {'fiscalyear': '2'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'fiscalYearExport')
self.assertFalse(exists(get_user_path('accounting', 'fiscalyear_export_2.xml')))
self.assert_json_equal('', 'code', '3')
self.assert_json_equal('', 'message', "Cet exercice n'a pas d'écriture validée !")
def test_search_advanced(self):
CustomField.objects.create(modelname='accounting.Third', name='categorie', kind=4, args="{'list':['---','petit','moyen','gros']}")
CustomField.objects.create(modelname='accounting.Third', name='value', kind=1, args="{'min':0,'max':100}")
third = Third.objects.get(id=7)
third.set_custom_values({'custom_2': '4'})
self.factory.xfer = EntryAccountSearch()
self.calljson('/diacamma.accounting/entryAccountSearch', {}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountSearch')
self.assert_count_equal('entryline', 25)
self.factory.xfer = EntryAccountSearch()
self.calljson('/diacamma.accounting/entryAccountSearch', {'CRITERIA': 'third.custom_2||1||4'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountSearch')
self.assert_count_equal('entryline', 2)
| Diacamma2/financial | diacamma/accounting/tests_completed_entries.py | Python | gpl-3.0 | 75,447 | [
"Dalton"
] | ac18ef35c06f2f8061c3cbff0723af3ca02e2fb387036c12f657e0c96337d89a |
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4.QtCore import Qt
from PyQt4 import QtGui, QtCore
from customdelegate import CustomDelegate, DocumentationMetaclass
from camelot.view.proxy import ValueLoading
from camelot.view.controls import editors
from camelot.core.utils import variant_to_pyobject
from camelot.view.art import Icon
class ColoredFloatDelegate(CustomDelegate):
"""Custom delegate for float values.
The class attribute icons is used to customize the icons displayed.
"""
__metaclass__ = DocumentationMetaclass
editor = editors.ColoredFloatEditor
icons = {
1:'tango/16x16/actions/go-up.png',
-1:'tango/16x16/actions/go-down-red.png',
0:'tango/16x16/actions/zero.png'
}
def __init__(self, parent=None,
precision=2, reverse=False, neutral=False,
unicode_format=None, **kwargs
):
CustomDelegate.__init__(self, parent=parent,
reverse=reverse, neutral=neutral,
precision=precision, unicode_format=unicode_format, **kwargs
)
self.precision = precision
self.reverse = reverse
self.neutral = neutral
self.unicode_format = unicode_format
self._locale = QtCore.QLocale()
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
value = variant_to_pyobject( index.model().data(index, Qt.EditRole) )
field_attributes = variant_to_pyobject(index.data(Qt.UserRole))
fontColor = QtGui.QColor()
editable, prefix, suffix, background_color, arrow = True, '', '', None, None
if field_attributes != ValueLoading:
editable = field_attributes.get('editable', True)
prefix = field_attributes.get('prefix', '')
suffix = field_attributes.get('suffix', '')
background_color = field_attributes.get('background_color', None)
arrow = field_attributes.get('arrow', None)
fontColor = QtGui.QColor()
if (option.state & QtGui.QStyle.State_Selected):
painter.fillRect(option.rect, option.palette.highlight())
else:
if editable:
painter.fillRect(option.rect, background_color or option.palette.base())
fontColor.setRgb(0,0,0)
else:
painter.fillRect(option.rect, background_color or option.palette.window())
fontColor.setRgb(130,130,130)
if arrow:
comparator = arrow.y
else:
comparator = value
#self.icons[cmp(comparator,0)].paint(painter, option.rect.left(), option.rect.top()+1, option.rect.height(), option.rect.height(), Qt.AlignVCenter)
iconpath = self.icons[cmp(comparator,0)]
icon = QtGui.QIcon(Icon(iconpath).getQPixmap())
icon.paint(
painter, option.rect.left(), option.rect.top()+1,
option.rect.height(), option.rect.height(), Qt.AlignVCenter
)
value_str = u''
if value != None and value != ValueLoading:
if self.unicode_format != None:
value_str = self.unicode_format(value)
else:
value_str = unicode( self._locale.toString( float(value),
'f',
self.precision ) )
value_str = unicode(prefix) + u' ' + unicode(value_str) + u' ' + unicode(suffix)
fontColor = fontColor.darker()
painter.setPen(fontColor.toRgb())
rect = QtCore.QRect(option.rect.left()+23,
option.rect.top(),
option.rect.width()-23,
option.rect.height())
painter.drawText(rect.x()+2,
rect.y(),
rect.width()-4,
rect.height(),
Qt.AlignVCenter | Qt.AlignRight,
value_str)
painter.restore()
| jeroendierckx/Camelot | camelot/view/controls/delegates/coloredfloatdelegate.py | Python | gpl-2.0 | 5,082 | [
"VisIt"
] | 85e6d1a0eb9307e842a77bdc3574216ad891cf7e06c99f6264721eda7ade72e2 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The splash screen
"""
from PyQt4 import QtCore, QtGui
class SplashScreen(QtGui.QSplashScreen):
"""
The splash screen
"""
def __init__(self):
"""
Constructor
"""
super(SplashScreen, self).__init__()
self.setupUi()
def setupUi(self):
"""
Set up the UI
"""
self.setObjectName('splashScreen')
self.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
splash_image = QtGui.QPixmap(':/graphics/openlp-splash-screen.png')
self.setPixmap(splash_image)
self.setMask(splash_image.mask())
self.resize(370, 370)
| marmyshev/item_title | openlp/core/ui/splashscreen.py | Python | gpl-2.0 | 2,744 | [
"Brian"
] | 00000bc982bd6581ba35c5fa9e46bc5f822f95ac49bc831e75fa68762490ca6c |
import functools
import logging
import threading
from galaxy.util import asbool, mask_password_from_url
from pulsar import manager_endpoint_util
from pulsar.client import amqp_exchange_factory
log = logging.getLogger(__name__)
TYPED_PARAMS = {
"amqp_consumer_timeout": lambda val: None if str(val) == "None" else float(val),
"amqp_publish_retry": asbool,
"amqp_publish_retry_max_retries": int,
"amqp_publish_retry_interval_start": int,
"amqp_publish_retry_interval_step": int,
"amqp_publish_retry_interval_max": int,
}
def get_exchange(connection_string, manager_name, conf):
# HACK: Fixup non-string parameters - ultimately this should reuse spec
# stuff from Galaxy.
for param, to_type in TYPED_PARAMS.items():
if param in conf:
val = conf[param]
conf[param] = to_type(val)
pulsar_exchange = amqp_exchange_factory.get_exchange(
connection_string,
manager_name,
conf
)
return pulsar_exchange
def bind_manager_to_queue(manager, queue_state, connection_string, conf):
manager_name = manager.name
log.info("bind_manager_to_queue called for [{}] and manager [{}]".format(mask_password_from_url(connection_string), manager_name))
pulsar_exchange = get_exchange(connection_string, manager_name, conf)
process_setup_messages = functools.partial(__process_setup_message, manager)
process_kill_messages = functools.partial(__process_kill_message, manager)
process_status_messages = functools.partial(__process_status_message, manager)
def drain(callback, name):
__drain(name, queue_state, pulsar_exchange, callback)
log.info("Finished consuming %s queue - no more messages will be processed." % (name))
if conf.get("message_queue_consume", True):
setup_thread = start_setup_consumer(pulsar_exchange, functools.partial(drain, process_setup_messages, "setup"))
kill_thread = start_kill_consumer(pulsar_exchange, functools.partial(drain, process_kill_messages, "kill"))
status_thread = start_status_consumer(pulsar_exchange, functools.partial(drain, process_status_messages, "status"))
if hasattr(queue_state, "threads"):
queue_state.threads.extend([setup_thread, kill_thread, status_thread])
if conf.get("amqp_acknowledge", False):
status_update_ack_thread = start_status_update_ack_consumer(pulsar_exchange, functools.partial(drain, None, "status_update_ack"))
getattr(queue_state, 'threads', []).append(status_update_ack_thread)
# TODO: Think through job recovery, jobs shouldn't complete until after bind
# has occurred.
def bind_on_status_change(new_status, job_id):
job_id = job_id or 'unknown'
try:
message = "Publishing Pulsar state change with status {} for job_id {}".format(new_status, job_id)
log.debug(message)
payload = manager_endpoint_util.full_status(manager, new_status, job_id)
pulsar_exchange.publish("status_update", payload)
except Exception:
log.exception("Failure to publish Pulsar state change for job_id %s." % job_id)
raise
if conf.get("message_queue_publish", True):
manager.set_state_change_callback(bind_on_status_change)
def __start_consumer(name, exchange, target):
exchange_url = mask_password_from_url(exchange.url)
thread_name = "consume-{}-{}".format(name, exchange_url)
thread = threading.Thread(name=thread_name, target=target)
# TODO: If the shutdown code is actually called make this
# not a daemon.
thread.daemon = True
thread.start()
return thread
start_setup_consumer = functools.partial(__start_consumer, "setup")
start_kill_consumer = functools.partial(__start_consumer, "kill")
start_status_consumer = functools.partial(__start_consumer, "status")
start_status_update_ack_consumer = functools.partial(__start_consumer, "status_update_ack")
def __drain(name, queue_state, pulsar_exchange, callback):
pulsar_exchange.consume(name, callback=callback, check=queue_state)
def __processes_message(f):
@functools.wraps(f)
def process_message(manager, body, message):
if message.acknowledged:
log.info("Message is already acknowledged (by an upstream callback?), Pulsar will not handle this message")
return
job_id = None
try:
job_id = __client_job_id_from_body(body)
assert job_id, 'Could not parse job id from body: %s' % body
f(manager, body, job_id)
except Exception:
job_id = job_id or 'unknown'
log.exception("Failed to process message with function {} for job_id {}".format(f.__name__, job_id))
message.ack()
return process_message
@__processes_message
def __process_kill_message(manager, body, job_id):
manager.kill(job_id)
@__processes_message
def __process_setup_message(manager, body, job_id):
manager_endpoint_util.submit_job(manager, body)
@__processes_message
def __process_status_message(manager, body, job_id):
manager.trigger_state_change_callback(job_id)
def __client_job_id_from_body(body):
job_id = body.get("job_id", None)
return job_id
| galaxyproject/pulsar | pulsar/messaging/bind_amqp.py | Python | apache-2.0 | 5,259 | [
"Galaxy"
] | 4910fa08912ecf7a022c921939b59857dd3849248ed2128db0348270278ee02a |
import os
from json import dumps
from .destination import submit_params
from .setup_handler import build as build_setup_handler
from .job_directory import RemoteJobDirectory
from .decorators import parseJson
from .decorators import retry
from .util import copy
from .util import ensure_directory
from .util import to_base64_json
from .action_mapper import path_type
import logging
log = logging.getLogger(__name__)
CACHE_WAIT_SECONDS = 3
class OutputNotFoundException(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return "No remote output found for path %s" % self.path
class BaseJobClient(object):
def __init__(self, destination_params, job_id):
self.destination_params = destination_params
self.job_id = job_id
if "jobs_directory" in (destination_params or {}):
staging_directory = destination_params["jobs_directory"]
sep = destination_params.get("remote_sep", os.sep)
job_directory = RemoteJobDirectory(
remote_staging_directory=staging_directory,
remote_id=job_id,
remote_sep=sep,
)
else:
job_directory = None
self.env = destination_params.get("env", [])
self.files_endpoint = destination_params.get("files_endpoint", None)
self.job_directory = job_directory
self.default_file_action = self.destination_params.get("default_file_action", "transfer")
self.action_config_path = self.destination_params.get("file_action_config", None)
self.setup_handler = build_setup_handler(self, destination_params)
def setup(self, tool_id=None, tool_version=None):
"""
Setup remote Pulsar server to run this job.
"""
setup_args = {"job_id": self.job_id}
if tool_id:
setup_args["tool_id"] = tool_id
if tool_version:
setup_args["tool_version"] = tool_version
return self.setup_handler.setup(**setup_args)
@property
def prefer_local_staging(self):
# If doing a job directory is defined, calculate paths here and stage
# remotely.
return self.job_directory is None
class JobClient(BaseJobClient):
"""
Objects of this client class perform low-level communication with a remote Pulsar server.
**Parameters**
destination_params : dict or str
connection parameters, either url with dict containing url (and optionally `private_token`).
job_id : str
Galaxy job/task id.
"""
def __init__(self, destination_params, job_id, job_manager_interface):
super(JobClient, self).__init__(destination_params, job_id)
self.job_manager_interface = job_manager_interface
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None):
"""
Queue up the execution of the supplied `command_line` on the remote
server. Called launch for historical reasons, should be renamed to
enqueue or something like that.
**Parameters**
command_line : str
Command to execute.
"""
launch_params = dict(command_line=command_line, job_id=self.job_id)
submit_params_dict = submit_params(self.destination_params)
if submit_params_dict:
launch_params['params'] = dumps(submit_params_dict)
if dependencies_description:
launch_params['dependencies_description'] = dumps(dependencies_description.to_dict())
if env:
launch_params['env'] = dumps(env)
if remote_staging:
launch_params['remote_staging'] = dumps(remote_staging)
if job_config and self.setup_handler.local:
# Setup not yet called, job properties were inferred from
# destination arguments. Hence, must have Pulsar setup job
# before queueing.
setup_params = _setup_params_from_job_config(job_config)
launch_params["setup_params"] = dumps(setup_params)
return self._raw_execute("submit", launch_params)
def full_status(self):
""" Return a dictionary summarizing final state of job.
"""
return self.raw_check_complete()
def kill(self):
"""
Cancel remote job, either removing from the queue or killing it.
"""
return self._raw_execute("cancel", {"job_id": self.job_id})
@retry()
@parseJson()
def raw_check_complete(self):
"""
Get check_complete response from the remote server.
"""
check_complete_response = self._raw_execute("status", {"job_id": self.job_id})
return check_complete_response
def get_status(self):
check_complete_response = self.raw_check_complete()
# Older Pulsar instances won't set status so use 'complete', at some
# point drop backward compatibility.
status = check_complete_response.get("status", None)
return status
def clean(self):
"""
Cleanup the remote job.
"""
self._raw_execute("clean", {"job_id": self.job_id})
@parseJson()
def remote_setup(self, **setup_args):
"""
Setup remote Pulsar server to run this job.
"""
return self._raw_execute("setup", setup_args)
def put_file(self, path, input_type, name=None, contents=None, action_type='transfer'):
if not name:
name = os.path.basename(path)
args = {"job_id": self.job_id, "name": name, "type": input_type}
input_path = path
if contents:
input_path = None
if action_type == 'transfer':
return self._upload_file(args, contents, input_path)
elif action_type == 'copy':
pulsar_path = self._raw_execute('path', args)
copy(path, pulsar_path)
return {'path': pulsar_path}
def fetch_output(self, path, name, working_directory, action_type, output_type):
"""
Fetch (transfer, copy, etc...) an output from the remote Pulsar server.
**Parameters**
path : str
Local path of the dataset.
name : str
Remote name of file (i.e. path relative to remote staging output
or working directory).
working_directory : str
Local working_directory for the job.
action_type : str
Where to find file on Pulsar (output_workdir or output). legacy is also
an option in this case Pulsar is asked for location - this will only be
used if targetting an older Pulsar server that didn't return statuses
allowing this to be inferred.
"""
if output_type == 'output_workdir':
self._fetch_work_dir_output(name, working_directory, path, action_type=action_type)
elif output_type == 'output':
self._fetch_output(path=path, name=name, action_type=action_type)
else:
raise Exception("Unknown output_type %s" % output_type)
def _raw_execute(self, command, args={}, data=None, input_path=None, output_path=None):
return self.job_manager_interface.execute(command, args, data, input_path, output_path)
def _fetch_output(self, path, name=None, check_exists_remotely=False, action_type='transfer'):
if not name:
# Extra files will send in the path.
name = os.path.basename(path)
self.__populate_output_path(name, path, action_type)
def _fetch_work_dir_output(self, name, working_directory, output_path, action_type='transfer'):
ensure_directory(output_path)
if action_type == 'transfer':
self.__raw_download_output(name, self.job_id, path_type.OUTPUT_WORKDIR, output_path)
else: # Even if action is none - Pulsar has a different work_dir so this needs to be copied.
pulsar_path = self._output_path(name, self.job_id, path_type.OUTPUT_WORKDIR)['path']
copy(pulsar_path, output_path)
def __populate_output_path(self, name, output_path, action_type):
ensure_directory(output_path)
if action_type == 'transfer':
self.__raw_download_output(name, self.job_id, path_type.OUTPUT, output_path)
elif action_type == 'copy':
pulsar_path = self._output_path(name, self.job_id, path_type.OUTPUT)['path']
copy(pulsar_path, output_path)
@parseJson()
def _upload_file(self, args, contents, input_path):
return self._raw_execute("upload_file", args, contents, input_path)
@parseJson()
def _output_path(self, name, job_id, output_type):
return self._raw_execute("path",
{"name": name,
"job_id": self.job_id,
"type": output_type})
@retry()
def __raw_download_output(self, name, job_id, output_type, output_path):
output_params = {
"name": name,
"job_id": self.job_id,
"type": output_type
}
self._raw_execute("download_output", output_params, output_path=output_path)
class BaseMessageJobClient(BaseJobClient):
def __init__(self, destination_params, job_id, client_manager):
super(BaseMessageJobClient, self).__init__(destination_params, job_id)
if not self.job_directory:
error_message = "Message-queue based Pulsar client requires destination define a remote job_directory to stage files into."
raise Exception(error_message)
self.client_manager = client_manager
def clean(self):
del self.client_manager.status_cache[self.job_id]
def full_status(self):
full_status = self.client_manager.status_cache.get(self.job_id, None)
if full_status is None:
raise Exception("full_status() called before a final status was properly cached with cilent manager.")
return full_status
def _build_setup_message(self, command_line, dependencies_description, env, remote_staging, job_config):
"""
"""
launch_params = dict(command_line=command_line, job_id=self.job_id)
submit_params_dict = submit_params(self.destination_params)
if submit_params_dict:
launch_params['submit_params'] = submit_params_dict
if dependencies_description:
launch_params['dependencies_description'] = dependencies_description.to_dict()
if env:
launch_params['env'] = env
if remote_staging:
launch_params['remote_staging'] = remote_staging
if job_config and self.setup_handler.local:
# Setup not yet called, job properties were inferred from
# destination arguments. Hence, must have Pulsar setup job
# before queueing.
setup_params = _setup_params_from_job_config(job_config)
launch_params["setup_params"] = setup_params
return launch_params
class MessageJobClient(BaseMessageJobClient):
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None):
"""
"""
launch_params = self._build_setup_message(
command_line,
dependencies_description=dependencies_description,
env=env,
remote_staging=remote_staging,
job_config=job_config
)
response = self.client_manager.exchange.publish("setup", launch_params)
log.info("Job published to setup message queue.")
return response
def kill(self):
self.client_manager.exchange.publish("kill", dict(job_id=self.job_id))
class MessageCLIJobClient(BaseMessageJobClient):
def __init__(self, destination_params, job_id, client_manager, shell):
super(MessageCLIJobClient, self).__init__(destination_params, job_id, client_manager)
self.remote_pulsar_path = destination_params["remote_pulsar_path"]
self.shell = shell
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None):
"""
"""
launch_params = self._build_setup_message(
command_line,
dependencies_description=dependencies_description,
env=env,
remote_staging=remote_staging,
job_config=job_config
)
base64_message = to_base64_json(launch_params)
submit_command = os.path.join(self.remote_pulsar_path, "scripts", "submit.bash")
# TODO: Allow configuration of manager, app, and ini path...
self.shell.execute("nohup %s --base64 %s &" % (submit_command, base64_message))
def kill(self):
# TODO
pass
class InputCachingJobClient(JobClient):
"""
Beta client that cache's staged files to prevent duplication.
"""
def __init__(self, destination_params, job_id, job_manager_interface, client_cacher):
super(InputCachingJobClient, self).__init__(destination_params, job_id, job_manager_interface)
self.client_cacher = client_cacher
@parseJson()
def _upload_file(self, args, contents, input_path):
action = "upload_file"
if contents:
input_path = None
return self._raw_execute(action, args, contents, input_path)
else:
event_holder = self.client_cacher.acquire_event(input_path)
cache_required = self.cache_required(input_path)
if cache_required:
self.client_cacher.queue_transfer(self, input_path)
while not event_holder.failed:
available = self.file_available(input_path)
if available['ready']:
token = available['token']
args["cache_token"] = token
return self._raw_execute(action, args)
event_holder.event.wait(30)
if event_holder.failed:
raise Exception("Failed to transfer file %s" % input_path)
@parseJson()
def cache_required(self, path):
return self._raw_execute("cache_required", {"path": path})
@parseJson()
def cache_insert(self, path):
return self._raw_execute("cache_insert", {"path": path}, None, path)
@parseJson()
def file_available(self, path):
return self._raw_execute("file_available", {"path": path})
def _setup_params_from_job_config(job_config):
job_id = job_config.get("job_id", None)
tool_id = job_config.get("tool_id", None)
tool_version = job_config.get("tool_version", None)
return dict(
job_id=job_id,
tool_id=tool_id,
tool_version=tool_version
)
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/pulsar/client/client.py | Python | gpl-3.0 | 14,735 | [
"Galaxy"
] | c6cbec5743cdceadf2f6a3f3d1fd197341aa75e66edc514f0bb049d24f87541d |
# -*- coding: utf-8 -*-
#
# conftest.py
# spt_compute
#
# Author : Alan D Snow, 2017.
# License: BSD 3-Clause
import json
import os
from shutil import copytree, rmtree
import pytest
from spt_compute.imports.extractnested import ExtractNested
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
RAPID_EXE_PATH = os.path.join(SCRIPT_DIR, "..", "..", "rapid", "src", "rapid")
def compare_warnings(return_file, compare_return_file):
"""compares warning json files"""
with open(return_file) as returnfp, \
open(compare_return_file) as creturnfp:
returndata = json.load(returnfp)
creturndata = json.load(creturnfp)
assert returndata == creturndata
class TestDirectories(object):
input = os.path.join(SCRIPT_DIR, 'input')
compare = os.path.join(SCRIPT_DIR, 'compare')
output = os.path.join(SCRIPT_DIR, 'output')
def clean(self):
"""
Clean out test directory
"""
original_dir = os.getcwd()
os.chdir(self.output)
# Clear out directory
file_list = os.listdir(self.output)
for afile in file_list:
if not afile.endswith('.gitignore'):
path = os.path.join(self.output, afile)
if os.path.isdir(path):
rmtree(path)
else:
os.remove(path)
os.chdir(original_dir)
class SetupForecast(object):
def __init__(self, a_tclean, watershed_folder, forecast_folder, historical=True):
a_tclean.clean()
# make log folder
self.log_folder = os.path.join(a_tclean.output, "logs")
os.makedirs(self.log_folder)
# copy RAPID model files
self.rapid_io_folder = os.path.join(a_tclean.output, "rapid-io")
rapid_input_folder = os.path.join(self.rapid_io_folder, "input")
os.makedirs(rapid_input_folder)
self.watershed_input_folder = os.path.join(rapid_input_folder, watershed_folder)
copytree(os.path.join(a_tclean.input, "rapid_input", watershed_folder),
self.watershed_input_folder)
if historical:
# copy historical simulation_files
self.historical_input_folder = os.path.join(a_tclean.output, "historical_input")
os.makedirs(self.historical_input_folder)
copytree(os.path.join(a_tclean.input, "historical_input", watershed_folder),
os.path.join(self.historical_input_folder, watershed_folder))
# copy forecast grid files
self.lsm_folder = os.path.join(a_tclean.output, forecast_folder)
copytree(os.path.join(a_tclean.input, "forecast_grids", forecast_folder),
self.lsm_folder)
# add path to comparison files
self.watershed_compare_folder = os.path.join(a_tclean.compare,
'rapid_output',
watershed_folder)
class SetupECMWFForecast(SetupForecast):
def __init__(self, a_tclean, watershed_folder, forecast_folder, historical=True):
super(SetupECMWFForecast, self).__init__(a_tclean, watershed_folder, forecast_folder, historical)
# make subprocess log folder
self.subprocess_log_folder = os.path.join(a_tclean.output, "subprocess_logs")
os.makedirs(self.subprocess_log_folder)
# make multiprocess execute folder
self.multiprocess_execute_folder = os.path.join(a_tclean.output, "mp_execute")
os.makedirs(self.multiprocess_execute_folder)
# extract the forecasts
forecast_targz = os.path.join(self.lsm_folder, "Runoff.20170708.00.C.america.exp1.Fgrid.netcdf.tar.gz")
ExtractNested(forecast_targz, True)
@pytest.fixture(scope="module")
def tclean(request):
_td = TestDirectories()
_td.clean()
yield _td
_td.clean()
| erdc-cm/spt_ecmwf_autorapid_process | tests/conftest.py | Python | bsd-3-clause | 3,870 | [
"NetCDF"
] | 95e8231b5c6e1f6a7ddf49a655e8d0e89f6c46f82c2575790a1046b1754a8684 |
# fabricfile to deploy build
#
# depends on installation of fabric - pip install fabric virtualenv
#
# example invocation
# $ fab -H jenkins@uf04.seedscientific.com deploy
# $ fab -H ubuntu@52.0.138.67 deploy
# $ fab -H ubuntu@uf04.seedscientific.com deploy
from fabric.api import local, run, cd, put
## global variables
##
# this can be set by passing venv_path arg to deploy() target
local_venv_path = None
# /var/www/clients.seedscientific.com/uf/UF04
remote_work_path = '~/deploy/polio-work'
remote_backend_path = '/var/www/apps/polio/'
remote_frontend_path = '/var/www/polio/static/'
# deploy build
#
# build-machine dependencies - node, gulp, bower, sass, compass, ruby, virtualenv, fabric-virtualenv
def deploy(venv_path=None):
global local_venv_path
local_venv_path = venv_path;
# on local machine...
_build_dependencies()
# on target machine
stop_apache()
_push_to_remote()
start_apache()
# apache controls
def stop_apache():
run("sudo /etc/init.d/apache2 stop")
def start_apache():
run("sudo /etc/init.d/apache2 start")
# build dependencies
#
#
def _build_dependencies():
###
### on build machine...
###
# set up dependencies
print ("TODO: confirm build machine has dependencies. i.e. node, gulp.")
# e.g.
# sudo gem install sass
# sudo gem install compass
# only build with a virtualenv if one is passed in.
if (local_venv_path):
# make virtual env
local('virtualenv %s' % local_venv_path)
# enter virtual environment
activate_this_file = "%s/bin/activate_this.py" % local_venv_path
execfile(activate_this_file, dict(__file__=activate_this_file))
# update/install dependencies
local ("npm install")
local ("pip install -r requirements.txt")
# make dist
local("./node_modules/.bin/bower install")
local("./node_modules/.bin/gulp dist")
# push build to remote
#
#
def _push_to_remote():
###
### on target machine...
###
# make folder if it doesn't exist
run ("mkdir -p %s" % remote_work_path)
# push to remote server
put ('dist/uf04-frontend.zip', remote_work_path)
put ('dist/uf04-backend.zip', remote_work_path)
# unzip stuff
with cd(remote_work_path):
run("rm -rf %s" % remote_frontend_path)
# Delete all Python, HTML, and SQL files. We don't delete the entire
# directory because that will catch the media/ directory which will
# probably have files we want to keep in it. This way we ensure that we
# clean out old scripts before deploying. Set mindepth to 2 so that we
# can keep the server's settings.py file in the application folder
run("find %s -mindepth 2 -regextype 'posix-extended' -regex '.*\.(pyc?|sql|html) -delete'" % remote_backend_path)
# [these unzips were trying to overwrite .pyc files owned by www-root
# so the 'find' command above may not be deleting enough compiled pycs]
run("unzip -o uf04-frontend.zip -d %s" % remote_frontend_path) # -o is overwrite
run("unzip -o uf04-backend.zip -d %s" % remote_backend_path)
with cd(remote_frontend_path):
# remove compiled files
run('sudo rm -rf `find . -name "*.pyc"`')
# chgroup, chmod so apache can edit
run('chgrp -R www-data *')
run('chmod -R g+w *')
# in server path -
with cd(remote_backend_path):
# remove compiled files
run('sudo rm -rf `find . -name "*.pyc"`')
run("chgrp -R www-data *")
run("chmod -R g+w *")
run("pip install -r requirements.txt")
# echo "== SYNCDB / MIGRATE =="
run("python manage.py syncdb --noinput --settings=settings")
run("python manage.py migrate --noinput --merge --settings=settings")
# echo "== BUILDING DATABASE =="
run("bash bin/build_db.sh")
# bounce apache??
# customize any other configuration?
#
# echo "== BUILDING DOCUMENTATION ==" # maybe...
# make clean -C docs
# make html -C docs
#
# echo "== RUNNING TESTS =="
# python manage.py test datapoints.tests.test_cache --settings=polio.settings_test
| SeedScientific/polio | fabfile.py | Python | agpl-3.0 | 4,190 | [
"GULP"
] | 736124cb16664e740e83ee9a37dbbd8f83b2a9146f43021df71de4ef27451e25 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
import numpy as np
from numpy.testing import (
dec,
assert_,
assert_equal,
assert_raises,
)
from unittest import skip
import MDAnalysis as mda
from MDAnalysisTests.datafiles import PSF, DCD
from MDAnalysisTests import parser_not_found
class TestSequence(object):
# all tests are done with the AdK system (PSF and DCD) sequence:
# http://www.uniprot.org/uniprot/P69441.fasta
# >sp|P69441|KAD_ECOLI Adenylate kinase OS=Escherichia coli (strain K12) GN=adk PE=1 SV=1
ref_adk_sequence = (
"MRIILLGAPGAGKGTQAQFIMEKYGIPQISTGDMLRAAVKSGSELGKQAKDIMDAGKLVT"
"DELVIALVKERIAQEDCRNGFLLDGFPRTIPQADAMKEAGINVDYVLEFDVPDELIVDRI"
"VGRRVHAPSGRVYHVKFNPPKVEGKDDVTGEELTTRKDDQEETVRKRLVEYHQMTAPLIG"
"YYSKEAEAGNTKYAKVDGTKPVAEVRADLEKILG"
)
def setUp(self):
self.u = mda.Universe(PSF, DCD)
def tearDown(self):
del self.u
def test_string(self):
p = self.u.select_atoms("protein")
assert_equal(p.residues.sequence(format="string"),
self.ref_adk_sequence)
def test_SeqRecord(self):
p = self.u.select_atoms("protein")
s = p.residues.sequence(format="SeqRecord",
id="P69441", name="KAD_ECOLI Adenylate kinase",
description="EcAdK from pdb 4AKE")
assert_equal(s.id, "P69441")
assert_equal(s.seq.tostring(), self.ref_adk_sequence)
def test_SeqRecord_default(self):
p = self.u.select_atoms("protein")
s = p.residues.sequence(id="P69441", name="KAD_ECOLI Adenylate kinase",
description="EcAdK from pdb 4AKE")
assert_equal(s.id, "P69441")
assert_equal(s.seq.tostring(), self.ref_adk_sequence)
def test_Seq(self):
p = self.u.select_atoms("protein")
s = p.residues.sequence(format="Seq")
assert_equal(s.tostring(), self.ref_adk_sequence)
def test_nonIUPACresname_VE(self):
"""test_sequence_nonIUPACresname: non recognized amino acids raise
ValueError"""
# fake non-IUPAC residue name for this test
residues = self.u.select_atoms("resname MET").residues
residues.resnames = "MSE"
def wrong_res():
self.u.residues.sequence()
assert_raises(ValueError, wrong_res)
def test_format_TE(self):
assert_raises(TypeError, self.u.residues.sequence, format='chicken')
class TestResidueGroup(object):
# Legacy tests from before 363
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def setUp(self):
"""Set up the standard AdK system in implicit solvent."""
self.universe = mda.Universe(PSF, DCD)
self.rg = self.universe.residues
def test_newResidueGroup(self):
"""test that slicing a ResidueGroup returns a new ResidueGroup
(Issue 135)"""
rg = self.universe.atoms.residues
newrg = rg[10:20:2]
assert_(isinstance(newrg, mda.core.groups.ResidueGroup),
"Failed to make a new ResidueGroup: type mismatch")
def test_n_atoms(self):
assert_equal(self.rg.n_atoms, 3341)
def test_n_residues(self):
assert_equal(self.rg.n_residues, 214)
def test_resids_dim(self):
assert_equal(len(self.rg.resids), len(self.rg))
def test_resnums_dim(self):
assert_equal(len(self.rg.resnums), len(self.rg))
def test_segids_dim(self):
assert_equal(len(self.rg.segids), len(self.rg))
def test_len(self):
"""testing that len(residuegroup) == residuegroup.n_residues"""
assert_equal(len(self.rg), self.rg.n_residues,
"len and n_residues disagree")
def test_set_resids(self):
rg = self.universe.select_atoms("bynum 12:42").residues
resid = 999
rg.resids = resid
# check individual atoms
for at in rg.atoms:
assert_equal(a.resid, resid,
err_msg="failed to set_resid atoms 12:42 to same resid")
# check residues
assert_equal(rg.resids, resid * np.ones(rg.n_residues),
err_msg="failed to set_resid of residues belonging to "
"atoms 12:42 to same resid")
def test_set_resids(self):
"""test_set_resid: set ResidueGroup resids on a per-residue basis"""
rg = self.universe.select_atoms("resid 10:18").residues
resids = np.array(rg.resids) + 1000
rg.resids = resids
# check individual atoms
for r, resid in zip(rg, resids):
for at in r.atoms:
assert_equal(at.resid, resid,
err_msg="failed to set_resid residues 10:18 to same "
"resid in residue {0}\n"
"(resids = {1}\nresidues = {2})".format(r, resids, rg))
assert_equal(rg.resids, resids,
err_msg="failed to set_resid of residues belonging to "
"residues 10:18 to new resids")
def test_set_resids_updates_self(self):
rg = self.universe.select_atoms("resid 10:18").residues
resids = np.array(rg.resids) + 1000
rg.resids = resids
assert_equal(rg.resids, resids,
err_msg="old selection was not changed in place "
"after set_resid")
def test_set_resnum_single(self):
rg = self.universe.residues[:3]
new = 22
rg.resnums = new
assert_equal(all(rg.resnums == new), True)
for r in rg:
assert_equal(r.resnum, new)
def test_set_resnum_many(self):
rg = self.universe.residues[:3]
new = [22, 23, 24]
rg.resnums = new
assert_equal(all(rg.resnums == new), True)
for r, v in zip(rg, new):
assert_equal(r.resnum, v)
def test_set_resnum_ValueError(self):
rg = self.universe.residues[:3]
new = [22, 23, 24, 25]
assert_raises(ValueError, setattr, rg, 'resnums', new)
# INVALID: no `set_resnames` method; use `resnames` property directly
@skip
def test_set_resname_single(self):
rg = self.universe.residues[:3]
new = 'newname'
rg.set_resnames(new)
assert_equal(all(rg.resnames == new), True)
for r in rg:
assert_equal(r.name, new)
# INVALID: no `set_resnames` method; use `resnames` property directly
@skip
def test_set_resname_many(self):
rg = self.universe.residues[:3]
new = ['a', 'b', 'c']
rg.set_resnames(new)
assert_equal(all(rg.resnames == new), True)
for r, v in zip(rg, new):
assert_equal(r.name, v)
# INVALID: no `set_resnames` method; use `resnames` property directly
@skip
def test_set_resname_ValueError(self):
rg = self.universe.residues[:3]
new = ['a', 'b', 'c', 'd']
assert_raises(ValueError, rg.set_resnames, new)
# INVALID: no `set_resids` method; also, residues are not mergeable
# by setting resids; resids are not necessarily unique; atoms must
# have their resindex set to change residue membership
@skip
def test_merge_residues(self):
rg = self.universe.select_atoms("resid 12:14").residues
nres_old = self.universe.atoms.n_residues
natoms_old = rg.n_atoms
rg.set_resids(12) # merge all into one with resid 12
nres_new = self.universe.atoms.n_residues
r_merged = self.universe.select_atoms("resid 12:14").residues
natoms_new = self.universe.select_atoms("resid 12").n_atoms
assert_equal(len(r_merged), 1, err_msg="set_resid failed to merge "
"residues: merged = {0}".format(r_merged))
assert_equal(nres_new, nres_old - 2,
err_msg="set_resid failed to merge residues: "
"merged = {0}".format(r_merged))
assert_equal(natoms_new, natoms_old, err_msg="set_resid lost atoms "
"on merge".format(r_merged))
assert_equal(self.universe.residues.n_residues,
self.universe.atoms.n_residues,
err_msg="Universe.residues and Universe.atoms.n_residues "
"do not agree after residue "
"merge.")
# INVALID: no `set_masses` method; use `masses` property directly
@skip
def test_set_masses(self):
rg = self.universe.select_atoms("bynum 12:42 and name H*").residues
mass = 2.0
rg.set_masses(mass)
# check individual atoms
assert_equal([a.mass for a in rg.atoms],
mass * np.ones(rg.n_atoms),
err_msg="failed to set_mass H* atoms in resid 12:42 to {0}".format(mass))
# VALID
def test_atom_order(self):
assert_equal(self.universe.residues.atoms.indices,
sorted(self.universe.residues.atoms.indices))
| kain88-de/mdanalysis | testsuite/MDAnalysisTests/core/test_residuegroup.py | Python | gpl-2.0 | 10,077 | [
"MDAnalysis"
] | 041b8f574e6e355f113de925f65c1d9a38e2d495f06b6519f0d4939fcb837803 |
import json
from django.conf import settings
from django.test import TestCase
from django.utils.importlib import import_module
from funfactory.urlresolvers import reverse
import mock
from nose.tools import ok_, eq_
from airmozilla.auth.browserid_mock import mock_browserid
from airmozilla.base import mozillians
from airmozilla.main.models import UserProfile
VOUCHED_FOR = """
{
"meta": {
"previous": null,
"total_count": 1,
"offset": 0,
"limit": 20,
"next": null
},
"objects": [
{
"website": "",
"bio": "",
"resource_uri": "/api/v1/users/2429/",
"last_updated": "2012-11-06T14:41:47",
"groups": [
"ugly tuna"
],
"city": "Casino",
"skills": [],
"country": "Albania",
"region": "Bush",
"id": "2429",
"languages": [],
"allows_mozilla_sites": true,
"photo": "http://www.gravatar.com/avatar/0409b497734934400822bb33...",
"is_vouched": true,
"email": "peterbe@gmail.com",
"ircname": "",
"allows_community_sites": true,
"full_name": "Peter Bengtsson"
}
]
}
"""
NOT_VOUCHED_FOR = """
{
"meta": {
"previous": null,
"total_count": 1,
"offset": 0,
"limit": 20,
"next": null
},
"objects": [
{
"website": "http://www.peterbe.com/",
"bio": "",
"resource_uri": "/api/v1/users/2430/",
"last_updated": "2012-11-06T15:37:35",
"groups": [
"no beard"
],
"city": "<style>p{font-style:italic}</style>",
"skills": [],
"country": "Heard Island and McDonald Islands",
"region": "Drunk",
"id": "2430",
"languages": [],
"allows_mozilla_sites": true,
"photo": "http://www.gravatar.com/avatar/23c6d359b6f7af3d3f91ca9e17...",
"is_vouched": false,
"email": "tmickel@mit.edu",
"ircname": "",
"allows_community_sites": true,
"full_name": null
}
]
}
"""
class Response(object):
def __init__(self, content=None, status_code=200):
self.content = content
self.status_code = status_code
class TestViews(TestCase):
def setUp(self):
super(TestViews, self).setUp()
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save() # we need to make load() work, or the cookie is worthless
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
def shortDescription(self):
# Stop nose using the test docstring and instead the test method name.
pass
def get_messages(self):
return self.client.session['_messages']
def _login_attempt(self, email, assertion='fakeassertion123', next=None):
if not next:
next = '/'
with mock_browserid(email):
post_data = {
'assertion': assertion,
'next': next
}
return self.client.post(
'/browserid/login/',
post_data
)
def test_invalid(self):
"""Bad BrowserID form (i.e. no assertion) -> failure."""
response = self._login_attempt(None, None)
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL_FAILURE)
# self.assertRedirects(
# response,
# settings.LOGIN_REDIRECT_URL_FAILURE + '?bid_login_failed=1'
# )
def test_bad_verification(self):
"""Bad verification -> failure."""
response = self._login_attempt(None)
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL_FAILURE)
# self.assertRedirects(
# response,
# settings.LOGIN_REDIRECT_URL_FAILURE + '?bid_login_failed=1'
# )
@mock.patch('requests.get')
def test_nonmozilla(self, rget):
"""Non-Mozilla email -> failure."""
def mocked_get(url, **options):
if 'tmickel' in url:
return Response(NOT_VOUCHED_FOR)
if 'peterbe' in url:
return Response(VOUCHED_FOR)
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self._login_attempt('tmickel@mit.edu')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL_FAILURE)
# self.assertRedirects(
# response,
# settings.LOGIN_REDIRECT_URL_FAILURE + '?bid_login_failed=1'
# )
# now with a non-mozillian that is vouched for
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
@mock.patch('requests.get')
def test_nonmozilla_vouched_for_second_time(self, rget):
assert not UserProfile.objects.all()
def mocked_get(url, **options):
return Response(VOUCHED_FOR)
rget.side_effect = mocked_get
# now with a non-mozillian that is vouched for
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
# should be logged in
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Sign in' not in response.content)
ok_('Sign out' in response.content)
profile, = UserProfile.objects.all()
ok_(profile.contributor)
# sign out
response = self.client.get(reverse('browserid.logout'))
eq_(response.status_code, 405)
response = self.client.post(reverse('browserid.logout'))
eq_(response.status_code, 200)
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# should be logged out
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Sign in' in response.content)
ok_('Sign out' not in response.content)
# sign in again
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
# should not have created another one
eq_(UserProfile.objects.all().count(), 1)
# sign out again
response = self.client.post(reverse('browserid.logout'))
eq_(response.status_code, 200)
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# pretend this is lost
profile.contributor = False
profile.save()
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
# should not have created another one
eq_(UserProfile.objects.filter(contributor=True).count(), 1)
def test_mozilla(self):
"""Mozilla email -> success."""
# Try the first allowed domain
response = self._login_attempt('tmickel@' + settings.ALLOWED_BID[0])
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
@mock.patch('requests.get')
def test_was_contributor_now_mozilla_bid(self, rget):
"""Suppose a user *was* a contributor but now her domain name
is one of the allowed ones, it should undo that contributor status
"""
assert not UserProfile.objects.all()
def mocked_get(url, **options):
return Response(VOUCHED_FOR)
rget.side_effect = mocked_get
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Sign in' not in response.content)
ok_('Sign out' in response.content)
profile = UserProfile.objects.get(user__email='peterbe@gmail.com')
ok_(profile.contributor)
self.client.logout()
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Sign in' in response.content)
ok_('Sign out' not in response.content)
with self.settings(ALLOWED_BID=settings.ALLOWED_BID + ('gmail.com',)):
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
profile = UserProfile.objects.get(user__email='peterbe@gmail.com')
ok_(not profile.contributor) # fixed!
@mock.patch('airmozilla.auth.views.logger')
@mock.patch('requests.get')
def test_nonmozilla_mozillians_unhappy(self, rget, rlogger):
assert not UserProfile.objects.all()
def mocked_get(url, **options):
raise mozillians.BadStatusCodeError('crap!')
rget.side_effect = mocked_get
# now with a non-mozillian that is vouched for
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL_FAILURE)
# self.assertRedirects(
# response,
# settings.LOGIN_REDIRECT_URL_FAILURE + '?bid_login_failed=1'
# )
eq_(rlogger.error.call_count, 1)
| bugzPDX/airmozilla | airmozilla/auth/tests/test_views.py | Python | bsd-3-clause | 10,729 | [
"CASINO"
] | 7dd99d34784ee45136d761121916affd50f8655130fc6ea46ae9984c868100e1 |
import os
import sys
import json
import click
from soccer import leagueids
from soccer.exceptions import IncorrectParametersException
from soccer.writers import get_writer
from soccer.request_handler import RequestHandler
def load_json(file):
"""Load JSON file at app start"""
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, file)) as jfile:
data = json.load(jfile)
return data
LEAGUE_IDS = leagueids.LEAGUE_IDS
TEAM_DATA = load_json("teams.json")["teams"]
TEAM_NAMES = {team["code"]: team["id"] for team in TEAM_DATA}
def get_input_key():
"""Input API key and validate"""
click.secho("No API key found!", fg="yellow", bold=True)
click.secho("Please visit {} and get an API token.".format(RequestHandler.BASE_URL),
fg="yellow",
bold=True)
while True:
confkey = click.prompt(click.style("Enter API key",
fg="yellow", bold=True))
if len(confkey) == 32: # 32 chars
try:
int(confkey, 16) # hexadecimal
except ValueError:
click.secho("Invalid API key", fg="red", bold=True)
else:
break
else:
click.secho("Invalid API key", fg="red", bold=True)
return confkey
def load_config_key():
"""Load API key from config file, write if needed"""
global api_token
try:
api_token = os.environ['SOCCER_CLI_API_TOKEN']
except KeyError:
home = os.path.expanduser("~")
config = os.path.join(home, ".soccer-cli.ini")
if not os.path.exists(config):
with open(config, "w") as cfile:
key = get_input_key()
cfile.write(key)
else:
with open(config, "r") as cfile:
key = cfile.read()
if key:
api_token = key
else:
os.remove(config) # remove 0-byte file
click.secho('No API Token detected. '
'Please visit {0} and get an API Token, '
'which will be used by Soccer CLI '
'to get access to the data.'
.format(RequestHandler.BASE_URL), fg="red", bold=True)
sys.exit(1)
return api_token
def map_team_id(code):
"""Take in team ID, read JSON file to map ID to name"""
for team in TEAM_DATA:
if team["code"] == code:
click.secho(team["name"], fg="green")
break
else:
click.secho("No team found for this code", fg="red", bold=True)
def list_team_codes():
"""List team names in alphabetical order of team ID, per league."""
# Sort teams by league, then alphabetical by code
cleanlist = sorted(TEAM_DATA, key=lambda k: (k["league"]["name"], k["code"]))
# Get league names
leaguenames = sorted(list(set([team["league"]["name"] for team in cleanlist])))
for league in leaguenames:
teams = [team for team in cleanlist if team["league"]["name"] == league]
click.secho(league, fg="green", bold=True)
for team in teams:
if team["code"] != "null":
click.secho(u"{0}: {1}".format(team["code"], team["name"]), fg="yellow")
click.secho("")
@click.command()
@click.option('--apikey', default=load_config_key,
help="API key to use.")
@click.option('--list', 'listcodes', is_flag=True,
help="List all valid team code/team name pairs.")
@click.option('--live', is_flag=True,
help="Shows live scores from various leagues.")
@click.option('--use12hour', is_flag=True, default=False,
help="Displays the time using 12 hour format instead of 24 (default).")
@click.option('--standings', '-s', is_flag=True,
help="Standings for a particular league.")
@click.option('--league', '-l', type=click.Choice(LEAGUE_IDS.keys()),
help=("Select fixtures from a particular league."))
@click.option('--players', is_flag=True,
help="Shows players for a particular team.")
@click.option('--team', type=click.Choice(TEAM_NAMES.keys()),
help=("Choose a particular team's fixtures."))
@click.option('--lookup', is_flag=True,
help="Get full team name from team code when used with --team command.")
@click.option('--time', default=6,
help=("The number of days in the past for which you "
"want to see the scores, or the number of days "
"in the future when used with --upcoming"))
@click.option('--upcoming', is_flag=True, default=False,
help="Displays upcoming games when used with --time command.")
@click.option('--stdout', 'output_format', flag_value='stdout', default=True,
help="Print to stdout.")
@click.option('--csv', 'output_format', flag_value='csv',
help='Output in CSV format.')
@click.option('--json', 'output_format', flag_value='json',
help='Output in JSON format.')
@click.option('-o', '--output-file', default=None,
help="Save output to a file (only if csv or json option is provided).")
def main(league, time, standings, team, live, use12hour, players,
output_format, output_file, upcoming, lookup, listcodes, apikey):
"""
A CLI for live and past football scores from various football leagues.
League codes:
\b
- WC: World Cup
- EC: European Championship
- CL: Champions League
- PL: English Premier League
- ELC: English Championship
- FL1: French Ligue 1
- BL: German Bundesliga
- SA: Serie A
- DED: Eredivisie
- PPL: Primeira Liga
- PD: Primera Division
- BSA: Brazil Serie A
"""
headers = {'X-Auth-Token': apikey}
try:
if output_format == 'stdout' and output_file:
raise IncorrectParametersException('Printing output to stdout and '
'saving to a file are mutually exclusive')
writer = get_writer(output_format, output_file)
rh = RequestHandler(headers, LEAGUE_IDS, TEAM_NAMES, writer)
if listcodes:
list_team_codes()
return
if live:
rh.get_live_scores(use12hour)
return
if standings:
if not league:
raise IncorrectParametersException('Please specify a league. '
'Example --standings --league=PL')
if league == 'CL':
raise IncorrectParametersException('Standings for CL - '
'Champions League not supported')
rh.get_standings(league)
return
if team:
if lookup:
map_team_id(team)
return
if players:
rh.get_team_players(team)
return
else:
rh.get_team_scores(team, time, upcoming, use12hour)
return
rh.get_league_scores(league, time, upcoming, use12hour)
except IncorrectParametersException as e:
click.secho(str(e), fg="red", bold=True)
if __name__ == '__main__':
main()
| architv/soccer-cli | soccer/main.py | Python | mit | 7,275 | [
"VisIt"
] | 11c22c110d80f00e4e4af915739fc92224f3352ee7707a1e708b7adfdb0a608c |
import os
import m5
from m5.objects import *
m5.util.addToPath('../common')
spec_dist = os.environ.get('M5_CPU2006', '/dist/m5/cpu2006')
binary_dir = spec_dist
data_dir = binary_dir
current_pid = 100
# 400.perlbench
def perlbench():
process = Process(pid=current_pid)
process.cwd = binary_dir + '400.perlbench/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'perlbench_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['-I./lib', 'checkspam.pl', '2500', '5', '25', '11', '150', '1', '1', '1', '1']
return process
#401.bzip2
def bzip2():
global current_pid
process = Process(pid=current_pid)
current_pid = current_pid + 1
process.cwd = binary_dir + '401.bzip2/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'bzip2_base.amd64-m64-gcc42-nn'
data = process.cwd+'input.program'
process.cmd = [process.executable] + [data, '280']
return process
#403.gcc
def gcc():
process = Process()
process.cwd = binary_dir + '403.gcc/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gcc_base.amd64-m64-gcc42-nn'
data = process.cwd +'166.i'
output = process.cwd +'166.s'
process.cmd = [process.executable] + [data]+['-o',output]
return process
#410.bwaves
def bwaves():
process = Process()
process.cwd = binary_dir + '410.bwaves/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'bwaves_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#416.gamess
def gamess():
prorcess=Process()
prorcess.cwd = binary_dir + '416.gamess/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
prorcess.executable = prorcess.cwd + 'gamess_base.amd64-m64-gcc42-nn'
prorcess.cmd = [prorcess.executable]
prorcess.input= prorcess.cwd + 'cytosine.2.config'
return prorcess
#429.mcf
def mcf():
process = Process()
process.cwd = binary_dir + '429.mcf/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'mcf_base.amd64-m64-gcc42-nn'
data = process.cwd+'inp.in'
process.cmd = [process.executable] + [data]
return process
#433.milc
def milc():
process=Process()
process.cwd = binary_dir + '433.milc/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'milc_base.amd64-m64-gcc42-nn'
stdin=process.cwd +'su3imp.in'
process.cmd = [process.executable]
process.input=stdin
return process
#434.zeusmp
def zeusmp():
process=Process()
process.cwd = binary_dir+'434.zeusmp/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'zeusmp_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#435.gromacs
def gromacs():
process = Process()
process.cwd = binary_dir+'435.gromacs/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gromacs_base.amd64-m64-gcc42-nn'
data = process.cwd +'gromacs.tpr'
process.cmd = [process.executable] + ['-silent','-deffnm',data,'-nice','0']
return process
#436.cactusADM
def cactusADM():
process = Process()
process.cwd = binary_dir+'436.cactusADM/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'cactusADM_base.amd64-m64-gcc42-nn'
data = process.cwd+'benchADM.par'
process.cmd = [process.executable] + [data]
return process
# 437.leslie3d
def leslie3d():
process = Process()
process.cwd = binary_dir + '437.leslie3d/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'leslie3d_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
process.input = process.cwd + 'leslie3d.in'
return process
#444.namd
def namd():
process = Process()
process.cwd = binary_dir + '444.namd/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'namd_base.amd64-m64-gcc42-nn'
input= process.cwd +'namd.input'
process.cmd = [process.executable] + ['--input',input,'--iterations','38','--output','namd.out']
return process
#445.gobmk
def gobmk():
process=Process()
process.cwd = binary_dir + '445.gobmk/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gobmk_base.amd64-m64-gcc42-nn'
stdin= process.cwd +'nngs.tst'
process.cmd = [process.executable]+['--quiet','--mode','gtp']
process.input=stdin
return process
# 447.dealII TODO
#450.soplex
def soplex():
process=Process()
process.cwd = binary_dir + '450.soplex/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'soplex_base.amd64-m64-gcc42-nn'
data= process.cwd +'ref.mps'
process.cmd = [process.executable]+['-m3500',data]
return process
#453.povray
def povray():
process=Process()
process.cwd = binary_dir + '453.povray/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'povray_base.amd64-m64-gcc42-nn'
data = process.cwd +'SPEC-benchmark-ref.ini'
process.cmd = [process.executable]+[data]
return process
#454.calculix
def calculix():
process=Process()
process.cwd = binary_dir + '454.calculix/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'calculix_base.amd64-m64-gcc42-nn'
data = process.cwd +'hyperviscoplastic'
process.cmd = [process.executable]+['-i',data]
return process
#456.hmmer
def hmmer():
process=Process()
process.cwd = binary_dir + '456.hmmer/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'hmmer_base.amd64-m64-gcc42-nn'
data = process.cwd +'retro.hmm'
process.cmd = [process.executable]+['--fixed', '0', '--mean', '500', '--num', '500000', '--sd', '350', '--seed', '0', data]
return process
#458.sjeng
def sjeng():
process=Process()
process.cwd = binary_dir + '458.sjeng/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'sjeng_base.amd64-m64-gcc42-nn'
data= process.cwd +'ref.txt'
process.cmd = [process.executable]+[data]
return process
#459.GemsFDTD
def GemsFDTD():
process=Process()
process.cwd = binary_dir + '459.GemsFDTD/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'GemsFDTD_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#462.libquantum
def libquantum():
process=Process()
process.cwd = binary_dir + '462.libquantum/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'libquantum_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable],'1397','8'
return process
#464.h264ref
def h264ref():
process=Process()
process.cwd = binary_dir + '464.h264ref/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'h264ref_base.amd64-m64-gcc42-nn'
data = process.cwd + 'foreman_ref_encoder_baseline.cfg'
process.cmd = [process.executable]+['-d',data]
return process
#470.lbm
def lbm():
process=Process()
process.cwd = binary_dir + '470.lbm/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'lbm_base.amd64-m64-gcc42-nn'
data= process.cwd +'100_100_130_ldc.of'
process.cmd = [process.executable]+['3000', 'reference.dat', '0', '0' ,data]
return process
#471.omnetpp
def omnetpp():
process=Process()
process.cwd = binary_dir + '471.omnetpp/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'omnetpp_base.amd64-m64-gcc42-nn'
data=process.cwd +'omnetpp.ini'
process.cmd = [process.executable]+[data]
return process
#473.astar
def astar():
process=Process()
process.cwd = binary_dir + '473.astar/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'astar_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['BigLakes2048.cfg']
return process
#481.wrf
def wrf():
process=Process()
process.cwd = binary_dir + '481.wrf/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'wrf_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['namelist.input']
return process
#482.sphinx3
def sphinx3():
process=Process()
process.cwd = binary_dir + '482.sphinx3/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'sphinx_livepretend_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['ctlfile', '.', 'args.an4']
return process
#483.xalancbmk TODO
#998.specrand
def specrand_i():
process=Process()
process.cwd = binary_dir + '998.specrand/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'specrand_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['1255432124','234923']
return process
#999.specrand
def specrand_f():
process=Process()
process.cwd = binary_dir + '999.specrand/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'specrand_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['1255432124','234923']
return process
| KuroeKurose/gem5 | configs/common/cpu2006.py | Python | bsd-3-clause | 9,229 | [
"GAMESS",
"Gromacs",
"NAMD"
] | 33329f46a56b1a28d88e61fff575be484a8f193fefe426dc40d0fd3e51d1a051 |
"""
Translates vensim .mdl file to pieces needed by the builder module to write a python version of the
model. Everything that requires knowledge of vensim syntax should be in this file.
"""
import re
import parsimonious
from . import builder
from . import utils
import textwrap
import numpy as np
import os
def get_file_sections(file_str):
"""
This is where we separate out the macros from the rest of the model file.
Working based upon documentation at: https://www.vensim.com/documentation/index.html?macros.htm
Macros will probably wind up in their own python modules eventually.
Parameters
----------
file_str
Returns
-------
entries: list of dictionaries
Each dictionary represents a different section of the model file, either a macro,
or the main body of the model file. The dictionaries contain various elements:
- returns: list of strings
represents what is returned from a macro (for macros) or empty for main model
- params: list of strings
represents what is passed into a macro (for macros) or empty for main model
- name: string
the name of the macro, or 'main' for main body of model
- string: string
string representing the model section
Examples
--------
>>> get_file_sections(r'a~b~c| d~e~f| g~h~i|')
[{'returns': [], 'params': [], 'name': 'main', 'string': 'a~b~c| d~e~f| g~h~i|'}]
"""
file_structure_grammar = r"""
file = encoding? (macro / main)+
macro = ":MACRO:" _ name _ "(" _ (name _ ","? _)+ _ ":"? _ (name _ ","? _)* _ ")" ~r".+?(?=:END OF MACRO:)" ":END OF MACRO:"
main = !":MACRO:" ~r".+(?!:MACRO:)"
name = basic_id / escape_group
basic_id = ~r"[a-zA-Z][a-zA-Z0-9_\s]*"
# between quotes, either escaped quote or character that is not a quote
escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\""
encoding = ~r"\{[^\}]*\}"
_ = ~r"[\s\\]*" # whitespace character
""" # the leading 'r' for 'raw' in this string is important for handling backslashes properly
parser = parsimonious.Grammar(file_structure_grammar)
tree = parser.parse(file_str)
class FileParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.entries = []
self.visit(ast)
def visit_main(self, n, vc):
self.entries.append({'name': '_main_',
'params': [],
'returns': [],
'string': n.text.strip()})
def visit_macro(self, n, vc):
name = vc[2]
params = vc[6]
returns = vc[10]
text = vc[13]
self.entries.append({'name': name,
'params': [x.strip() for x in params.split(',')] if params else [],
'returns': [x.strip() for x in
returns.split(',')] if returns else [],
'string': text.strip()})
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text or ''
return FileParser(tree).entries
def get_model_elements(model_str):
"""
Takes in a string representing model text and splits it into elements
I think we're making the assumption that all newline characters are removed...
Parameters
----------
model_str : string
Returns
-------
entries : array of dictionaries
Each dictionary contains the components of a different model element, separated into the
equation, units, and docstring.
Examples
--------
# Basic Parsing:
>>> get_model_elements(r'a~b~c| d~e~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Special characters are escaped within double-quotes:
>>> get_model_elements(r'a~b~c| d~e"~"~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e~"|"f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Double-quotes within escape groups are themselves escaped with backslashes:
>>> get_model_elements(r'a~b~c| d~e"\\\"~"~f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"\\\\"~"', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e~"\\\"|"f| g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': '"\\\\"|"f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
>>> get_model_elements(r'a~b~c| d~e"x\\nx"~f| g~h~|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e"x\\\\nx"', 'eqn': 'd'}, {'doc': '', 'unit': 'h', 'eqn': 'g'}]
# Todo: Handle model-level or section-level documentation
>>> get_model_elements(r'*** .model doc ***~ Docstring!| d~e~f| g~h~i|')
[{'doc': 'Docstring!', 'unit': '', 'eqn': ''}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Handle control sections, returning appropriate docstring pieces
>>> get_model_elements(r'a~b~c| ****.Control***~ Simulation Control Parameters | g~h~i|')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'i', 'unit': 'h', 'eqn': 'g'}]
# Handle the model display elements (ignore them)
>>> get_model_elements(r'a~b~c| d~e~f| \\\---///junk|junk~junk')
[{'doc': 'c', 'unit': 'b', 'eqn': 'a'}, {'doc': 'f', 'unit': 'e', 'eqn': 'd'}]
Notes
-----
- Tildes and pipes are not allowed in element docstrings, but we should still handle them there
"""
model_structure_grammar = r"""
model = (entry / section)+ sketch?
entry = element "~" element "~" element ("~" element)? "|"
section = element "~" element "|"
sketch = ~r".*" #anything
# Either an escape group, or a character that is not tilde or pipe
element = (escape_group / ~r"[^~|]")*
# between quotes, either escaped quote or character that is not a quote
escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\""
"""
parser = parsimonious.Grammar(model_structure_grammar)
tree = parser.parse(model_str)
class ModelParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.entries = []
self.visit(ast)
def visit_entry(self, n, vc):
self.entries.append({'eqn': vc[0].strip(),
'unit': vc[2].strip(),
'doc': vc[4].strip(),
'kind': 'entry'})
def visit_section(self, n, vc):
if vc[2].strip() != "Simulation Control Parameters":
self.entries.append({'eqn': '',
'unit': '',
'doc': vc[2].strip(),
'kind': 'section'})
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text or ''
return ModelParser(tree).entries
def get_equation_components(equation_str):
"""
Breaks down a string representing only the equation part of a model element.
Recognizes the various types of model elements that may exist, and identifies them.
Parameters
----------
equation_str : basestring
the first section in each model element - the full equation.
Returns
-------
Returns a dictionary containing the following:
real_name: basestring
The name of the element as given in the original vensim file
subs: list of strings
list of subscripts or subscript elements
expr: basestring
kind: basestring
What type of equation have we found?
- *component* - normal model expression or constant
- *lookup* - a lookup table
- *subdef* - a subscript definition
Examples
--------
>>> get_equation_components(r'constant = 25')
{'expr': '25', 'kind': 'component', 'subs': [], 'real_name': 'constant'}
Notes
-----
in this function we dont create python identifiers, we use real names.
This is so that when everything comes back together, we can manage
any potential namespace conflicts properly
"""
component_structure_grammar = r"""
entry = component / subscript_definition / lookup_definition
component = name _ subscriptlist? _ "=" _ expression
subscript_definition = name _ ":" _ subscript _ ("," _ subscript)*
lookup_definition = name _ &"(" _ expression # uses lookahead assertion to capture whole group
name = basic_id / escape_group
subscriptlist = '[' _ subscript _ ("," _ subscript)* _ ']'
expression = ~r".*" # expression could be anything, at this point.
subscript = basic_id / escape_group
basic_id = ~r"[a-zA-Z][a-zA-Z0-9_\s]*"
escape_group = "\"" ( "\\\"" / ~r"[^\"]" )* "\""
_ = ~r"[\s\\]*" # whitespace character
"""
# replace any amount of whitespace with a single space
equation_str = equation_str.replace('\\t', ' ')
equation_str = re.sub(r"\s+", ' ', equation_str)
parser = parsimonious.Grammar(component_structure_grammar)
tree = parser.parse(equation_str)
class ComponentParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.subscripts = []
self.real_name = None
self.expression = None
self.kind = None
self.visit(ast)
def visit_subscript_definition(self, n, vc):
self.kind = 'subdef'
def visit_lookup_definition(self, n, vc):
self.kind = 'lookup'
def visit_component(self, n, vc):
self.kind = 'component'
def visit_name(self, n, vc):
(name,) = vc
self.real_name = name.strip()
def visit_subscript(self, n, vc):
(subscript,) = vc
self.subscripts.append(subscript.strip())
def visit_expression(self, n, vc):
self.expression = n.text.strip()
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text
def visit__(self, n, vc):
return ' '
parse_object = ComponentParser(tree)
return {'real_name': parse_object.real_name,
'subs': parse_object.subscripts,
'expr': parse_object.expression,
'kind': parse_object.kind}
def parse_units(units_str):
"""
Extract and parse the units
Extract the bounds over which the expression is assumed to apply.
Parameters
----------
units_str
Returns
-------
Examples
--------
>>> parse_units('Widgets/Month [-10,10,1]')
>>> parse_units('Month [0,?]')
>>> parse_units('Widgets [0,100]')
"""
return units_str
def parse_general_expression(element, namespace=None, subscript_dict=None, macro_list=None):
"""
Parses a normal expression
# its annoying that we have to construct and compile the grammar every time...
Parameters
----------
element: dictionary
namespace : dictionary
subscript_dict : dictionary
macro_list: list of dictionaries
[{'name': 'M', 'py_name':'m', 'filename':'path/to/file', 'args':['arg1', 'arg2']}]
Returns
-------
translation
new_elements: list of dictionaries
If the expression contains builder functions, those builders will create new elements
to add to our running list (that will eventually be output to a file) such as stock
initialization and derivative funcs, etc.
Examples
--------
>>> parse_general_expression({'expr': 'INTEG (FlowA, -10)',
... 'py_name':'test_stock',
... 'subs':None},
... {'FlowA': 'flowa'}),
({'kind': 'component', 'py_expr': "_state['test_stock']"},
[{'kind': 'implicit',
'subs': None,
'doc': 'Provides initial conditions for test_stock function',
'py_name': 'init_test_stock',
'real_name': None,
'unit': 'See docs for test_stock',
'py_expr': '-10'},
{'py_name': 'dtest_stock_dt',
'kind': 'implicit',
'py_expr': 'flowa',
'real_name': None}])
"""
if namespace is None:
namespace = {}
if subscript_dict is None:
subscript_dict = {}
functions = {
# element-wise functions
"abs": "abs", "integer": "int", "exp": "np.exp", "sin": "np.sin", "cos": "np.cos",
"sqrt": "np.sqrt", "tan": "np.tan", "lognormal": "np.random.lognormal",
"random normal": "functions.bounded_normal", "poisson": "np.random.poisson", "ln": "np.log",
"exprnd": "np.random.exponential", "random uniform": "functions.random_uniform",
"sum": "np.sum",
"arccos": "np.arccos", "arcsin": "np.arcsin", "arctan": "np.arctan",
"if then else": "functions.if_then_else", "step": "functions.step", "modulo": "np.mod",
"pulse": "functions.pulse", "pulse train": "functions.pulse_train",
"ramp": "functions.ramp", "min": "np.minimum", "max": "np.maximum",
"active initial": "functions.active_initial", "xidz": "functions.xidz",
"zidz": "functions.zidz",
# vector functions
"vmin": "np.min", "vmax": "np.max", "prod": "np.prod"
}
builders = {
"integ": lambda expr, init: builder.add_stock(element['py_name'], element['subs'],
expr, init, subscript_dict),
"delay1": lambda in_var, dtime: builder.add_n_delay(in_var, dtime, '0', '1',
element['subs'], subscript_dict),
"delay1i": lambda in_var, dtime, init: builder.add_n_delay(in_var, dtime, init, '1',
element['subs'], subscript_dict),
"delay3": lambda in_var, dtime: builder.add_n_delay(in_var, dtime, '0', '3',
element['subs'], subscript_dict),
"delay3i": lambda in_var, dtime, init: builder.add_n_delay(in_var, dtime, init, '3',
element['subs'], subscript_dict),
"delay n": lambda in_var, dtime, init, order: builder.add_n_delay(in_var, dtime,
init, order,
element['subs'],
subscript_dict),
"smooth": lambda in_var, dtime: builder.add_n_smooth(in_var, dtime, '0', '1',
element['subs'], subscript_dict),
"smoothi": lambda in_var, dtime, init: builder.add_n_smooth(in_var, dtime, init, '1',
element['subs'],
subscript_dict),
"smooth3": lambda in_var, dtime: builder.add_n_smooth(in_var, dtime, '0', '3',
element['subs'], subscript_dict),
"smooth3i": lambda in_var, dtime, init: builder.add_n_smooth(in_var, dtime, init, '3',
element['subs'],
subscript_dict),
"smooth n": lambda in_var, dtime, init, order: builder.add_n_smooth(in_var, dtime,
init, order,
element['subs'],
subscript_dict),
"initial": lambda initial_input: builder.add_initial(initial_input)
}
in_ops = {
"+": "+", "-": "-", "*": "*", "/": "/", "^": "**", "=": "==", "<=": "<=", "<>": "!=",
"<": "<", ">=": ">=", ">": ">",
":and:": " and ", ":or:": " or "} # spaces important for word-based operators
pre_ops = {
"-": "-", ":not:": " not ", # spaces important for word-based operators
"+": " " # space is important, so that and empty string doesn't slip through generic
}
# in the following, if lists are empty use non-printable character
# everything needs to be escaped before going into the grammar, in case it includes quotes
sub_names_list = [re.escape(x) for x in subscript_dict.keys()] or ['\\a']
sub_elems_list = [re.escape(y) for x in subscript_dict.values() for y in x] or ['\\a']
ids_list = [re.escape(x) for x in namespace.keys()] or ['\\a']
in_ops_list = [re.escape(x) for x in in_ops.keys()]
pre_ops_list = [re.escape(x) for x in pre_ops.keys()]
if macro_list is not None and len(macro_list) > 0:
macro_names_list = [x['name'] for x in macro_list]
else:
macro_names_list = ['\\a']
expression_grammar = r"""
expr_type = array / expr
expr = _ pre_oper? _ (lookup_def / build_call / macro_call / call / parens / number / reference) _ (in_oper _ expr)?
lookup_def = ~r"(WITH\ LOOKUP)"I _ "(" _ reference _ "," _ "(" _ ("[" ~r"[^\]]*" "]" _ ",")? ( "(" _ expr _ "," _ expr _ ")" ","? _ )+ _ ")" _ ")"
call = (func / id) _ "(" _ (expr _ ","? _)* ")" # these don't need their args parsed...
build_call = builder _ "(" _ arguments _ ")"
macro_call = macro _ "(" _ arguments _ ")"
parens = "(" _ expr _ ")"
arguments = (expr _ ","? _)*
reference = id _ subscript_list?
subscript_list = "[" _ ((sub_name / sub_element) _ ","? _)+ "]"
array = (number _ ("," / ";")? _)+ !~r"." # negative lookahead for anything other than an array
number = ~r"\d+\.?\d*(e[+-]\d+)?"
id = ~r"(%(ids)s)"I
sub_name = ~r"(%(sub_names)s)"I # subscript names (if none, use non-printable character)
sub_element = ~r"(%(sub_elems)s)"I # subscript elements (if none, use non-printable character)
func = ~r"(%(funcs)s)"I # functions (case insensitive)
in_oper = ~r"(%(in_ops)s)"I # infix operators (case insensitive)
pre_oper = ~r"(%(pre_ops)s)"I # prefix operators (case insensitive)
builder = ~r"(%(builders)s)"I # builder functions (case insensitive)
macro = ~r"(%(macros)s)"I # macros from model file (if none, use non-printable character)
_ = ~r"[\s\\]*" # whitespace character
""" % {
# In the following, we have to sort keywords in decreasing order of length so that the
# peg parser doesn't quit early when finding a partial keyword
'sub_names': '|'.join(reversed(sorted(sub_names_list, key=len))),
'sub_elems': '|'.join(reversed(sorted(sub_elems_list, key=len))),
'ids': '|'.join(reversed(sorted(ids_list, key=len))),
'funcs': '|'.join(reversed(sorted(functions.keys(), key=len))),
'in_ops': '|'.join(reversed(sorted(in_ops_list, key=len))),
'pre_ops': '|'.join(reversed(sorted(pre_ops_list, key=len))),
'builders': '|'.join(reversed(sorted(builders.keys(), key=len))),
'macros': '|'.join(reversed(sorted(macro_names_list, key=len)))
}
parser = parsimonious.Grammar(expression_grammar)
tree = parser.parse(element['expr'])
class ExpressionParser(parsimonious.NodeVisitor):
# Todo: at some point, we could make the 'kind' identification recursive on expression,
# so that if an expression is passed into a builder function, the information
# about whether it is a constant, or calls another function, goes with it.
def __init__(self, ast):
self.translation = ""
self.kind = 'constant' # change if we reference anything else
self.new_structure = []
self.visit(ast)
def visit_expr_type(self, n, vc):
s = ''.join(filter(None, vc)).strip()
self.translation = s
def visit_expr(self, n, vc):
s = ''.join(filter(None, vc)).strip()
self.translation = s
return s
def visit_func(self, n, vc):
self.kind = 'component'
return functions[n.text.lower()]
def visit_in_oper(self, n, vc):
return in_ops[n.text.lower()]
def visit_pre_oper(self, n, vc):
return pre_ops[n.text.lower()]
def visit_reference(self, n, vc):
self.kind = 'component'
id_str = vc[0]
return id_str + '()'
def visit_id(self, n, vc):
return namespace[n.text]
def visit_lookup_def(self, n, vc):
""" This exists because vensim has multiple ways of doing lookups.
Which is frustrating."""
x_val = vc[4]
pairs = vc[11]
mixed_list = pairs.replace('(', '').replace(')', '').split(',')
xs = mixed_list[::2]
ys = mixed_list[1::2]
string = "functions.lookup(%(x)s, [%(xs)s], [%(ys)s])" % {
'x': x_val,
'xs': ','.join(xs),
'ys': ','.join(ys)
}
return string
def visit_array(self, n, vc):
if 'subs' in element and element['subs']: # first test handles when subs is not defined
coords = utils.make_coord_dict(element['subs'], subscript_dict, terse=False)
dims = [utils.find_subscript_name(subscript_dict, sub) for sub in element['subs']]
shape = [len(coords[dim]) for dim in dims]
if ';' in n.text or ',' in n.text:
text = n.text.strip(';').replace(' ', '').replace(';', ',')
data = np.array([float(s) for s in text.split(',')]).reshape(shape)
else:
data = np.tile(float(n.text), shape)
datastr = np.array2string(data, separator=',').replace('\n', '').replace(' ', '')
return textwrap.dedent("""\
xr.DataArray(data=%(datastr)s,
coords=%(coords)s,
dims=%(dims)s )""" % {
'datastr': datastr,
'coords': repr(coords),
'dims': repr(dims)})
else:
return n.text.replace(' ', '')
def visit_subscript_list(self, n, vc):
refs = vc[2]
subs = [x.strip() for x in refs.split(',')]
coordinates = utils.make_coord_dict(subs, subscript_dict)
if len(coordinates):
return '.loc[%s]' % repr(coordinates)
else:
return ' '
def visit_build_call(self, n, vc):
call = vc[0]
arglist = vc[4]
self.kind = 'component'
name, structure = builders[call.strip().lower()](*arglist)
self.new_structure += structure
return name
def visit_macro_call(self, n, vc):
call = vc[0]
arglist = vc[4]
self.kind = 'component'
py_name = utils.make_python_identifier(call)[0]
macro = [x for x in macro_list if x['py_name'] == py_name][0] # should match once
name, structure = builder.add_macro(macro['py_name'], macro['file_name'],
macro['params'], arglist)
self.new_structure += structure
return name
def visit_arguments(self, n, vc):
arglist = [x.strip(',') for x in vc]
return arglist
def visit__(self, n, vc):
""" Handles whitespace characters"""
return ''
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text
parse_object = ExpressionParser(tree)
return ({'py_expr': parse_object.translation,
'kind': parse_object.kind,
'arguments': ''},
parse_object.new_structure)
def parse_lookup_expression(element):
""" This syntax parses lookups that are defined with their own element """
lookup_grammar = r"""
lookup = _ "(" _ "[" ~r"[^\]]*" "]" _ "," _ ( "(" _ number _ "," _ number _ ")" ","? _ )+ ")"
number = ("+"/"-")? ~r"\d+\.?\d*(e[+-]\d+)?"
_ = ~r"[\s\\]*" # whitespace character
"""
parser = parsimonious.Grammar(lookup_grammar)
tree = parser.parse(element['expr'])
class LookupParser(parsimonious.NodeVisitor):
def __init__(self, ast):
self.translation = ""
self.new_structure = []
self.visit(ast)
def visit__(self, n, vc):
# remove whitespace
return ''
def visit_lookup(self, n, vc):
pairs = vc[9]
mixed_list = pairs.replace('(', '').replace(')', '').split(',')
xs = mixed_list[::2]
ys = mixed_list[1::2]
string = "functions.lookup(x, [%(xs)s], [%(ys)s])" % {
'xs': ','.join(xs),
'ys': ','.join(ys)
}
self.translation = string
def generic_visit(self, n, vc):
return ''.join(filter(None, vc)) or n.text
parse_object = LookupParser(tree)
return {'py_expr': parse_object.translation,
'arguments': 'x'}
def translate_section(section, macro_list):
model_elements = get_model_elements(section['string'])
# extract equation components
model_docstring = ''
for entry in model_elements:
if entry['kind'] == 'entry':
entry.update(get_equation_components(entry['eqn']))
elif entry['kind'] == 'section':
model_docstring += entry['doc']
# make python identifiers and track for namespace conflicts
namespace = {'TIME': 'time', 'Time': 'time'} # Initialize with builtins
# add macro parameters when parsing a macro section
for param in section['params']:
name, namespace = utils.make_python_identifier(param, namespace)
# add macro functions to namespace
for macro in macro_list:
if macro['name'] is not '_main_':
name, namespace = utils.make_python_identifier(macro['name'], namespace)
# add model elements
for element in model_elements:
if element['kind'] not in ['subdef', 'section']:
element['py_name'], namespace = utils.make_python_identifier(element['real_name'],
namespace)
# Create a namespace for the subscripts
# as these aren't used to create actual python functions, but are just labels on arrays,
# they don't actually need to be python-safe
subscript_dict = {e['real_name']: e['subs'] for e in model_elements if e['kind'] == 'subdef'}
# Parse components to python syntax.
for element in model_elements:
if element['kind'] == 'component' and 'py_expr' not in element:
# Todo: if there is new structure, it should be added to the namespace...
translation, new_structure = parse_general_expression(element,
namespace=namespace,
subscript_dict=subscript_dict,
macro_list=macro_list)
element.update(translation)
model_elements += new_structure
elif element['kind'] == 'lookup':
element.update(parse_lookup_expression(element))
# send the pieces to be built
build_elements = [e for e in model_elements if e['kind'] not in ['subdef', 'section']]
builder.build(build_elements,
subscript_dict,
namespace,
section['file_name'])
return section['file_name']
def translate_vensim(mdl_file):
"""
Parameters
----------
mdl_file : basestring
file path of a vensim model file to translate to python
Returns
-------
Examples
--------
>>> translate_vensim('../tests/test-models/tests/subscript_3d_arrays/test_subscript_3d_arrays.mdl')
#>>> translate_vensim('../../tests/test-models/tests/abs/test_abs.mdl')
#>>> translate_vensim('../../tests/test-models/tests/exponentiation/exponentiation.mdl')
#>>> translate_vensim('../../tests/test-models/tests/limits/test_limits.mdl')
"""
with open(mdl_file, 'rU') as in_file:
text = in_file.read()
outfile_name = mdl_file.replace('.mdl', '.py')
out_dir = os.path.dirname(outfile_name)
# extract model elements
file_sections = get_file_sections(text.replace('\n', ''))
# Todo: build up a representation of macros including parameters, filenames, that can be passed
# to the various builders.
for section in file_sections:
if section['name'] == '_main_':
# define outfile name
section['file_name'] = outfile_name
else:
section['py_name'] = utils.make_python_identifier(section['name'])[0]
section['file_name'] = out_dir + '/' + section['py_name'] + '.py'
macro_list = [s for s in file_sections if s['name'] is not '_main_']
for section in file_sections:
translate_section(section, macro_list)
return outfile_name
| SimonStrong/pysd | pysd/vensim2py.py | Python | mit | 29,615 | [
"VisIt"
] | be561aa5ef566fb3fdf71c4f4df953d147490a59cccf0779e1146e9aeaf166f2 |
# -*- coding: utf-8 -*-
# TAMkin is a post-processing toolkit for normal mode analysis, thermochemistry
# and reaction kinetics.
# Copyright (C) 2008-2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, An Ghysels
# <An.Ghysels@UGent.be> and Matthias Vandichel <Matthias.Vandichel@UGent.be>
# Center for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all
# rights reserved unless otherwise stated.
#
# This file is part of TAMkin.
#
# TAMkin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "TAMkin: A Versatile Package for Vibrational Analysis and Chemical Kinetics",
# An Ghysels, Toon Verstraelen, Karen Hemelsoet, Michel Waroquier and Veronique
# Van Speybroeck, Journal of Chemical Information and Modeling, 2010, 50,
# 1736-1750W
# http://dx.doi.org/10.1021/ci100099g
#
# TAMkin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from __future__ import print_function, division
import os
import stat
import subprocess
import pkg_resources
from molmod.test.common import tmpdir
def check_example(dirname, fn_script, fns_data):
"""Run an example in a temporary directory and check its exit code.
Parameters
----------
dirname : str
The directory with the example, relative to the __file__ of where you call this
function.
fn_script : str
The name of the script to be executed, assumed to be present in the given
directory.
fns_data : list of str:
A list of data files needed by the example, which will be copied over to the
temporary directory.
"""
with tmpdir(__name__, dirname + fn_script) as dntmp:
for fn in [fn_script] + fns_data:
with pkg_resources.resource_stream("tamkin", "examples/{}/{}".format(dirname, fn)) as fin:
# Create the directory if needed.
if '/' in fn:
subdntmp = os.path.join(dntmp, os.path.dirname(fn))
if not os.path.isdir(subdntmp):
os.makedirs(subdntmp)
# Extract the file manually.
with open(os.path.join(dntmp, fn), 'wb') as fout:
fout.write(fin.read())
env = dict(os.environ)
root_dir = os.getcwd()
env['PYTHONPATH'] = root_dir + ':' + env.get('PYTHONPATH', '')
path_script = os.path.join(dntmp, fn_script)
os.chmod(path_script, os.stat(path_script).st_mode | stat.S_IXUSR)
command = ["python", fn_script]
proc = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=dntmp, env=env)
outdata, errdata = proc.communicate()
if proc.returncode != 0:
lines = [
'Command faild', str(command), 'Standard output', '+'*80, outdata.decode('utf-8'),
'+'*80, 'Standard error', '+'*80, errdata.decode('utf-8'), '+'*80]
raise AssertionError('\n'.join(lines))
def test_example_001():
check_example("001_ethane", "thermo.py", ['gaussian.fchk'])
def test_example_002():
check_example("002_linear_co2", "thermo.py", ['gaussian.fchk'])
def test_example_003():
check_example("003_pentane", "thermo.py", ['gaussian.fchk'])
def test_example_005():
check_example("005_acrylamide_reaction", "reaction.py",
['aa.fchk', 'aarad.fchk', 'paats.fchk'])
def test_example_006():
check_example("006_5T_ethyl_ethene_addition", "reaction.py",
['react.fchk', 'ts.fchk'])
def test_example_007():
check_example("007_mfi_propene_reaction", "reaction.py",
['Zp_p_react.28aug.com', 'Zp_p_TS.28aug.com', 'Zp_p_react.28aug.fchk',
'Zp_p_react.14mei.fchk', 'Zp_p_TS.28aug.fchk',
'5Tp_p_TS.oniom21apr_HF.fchk'])
def test_example_008():
check_example("008_ethane_rotor", "thermo.py",
['freq/gaussian.fchk', 'scan/gaussian.log'])
def test_example_009():
check_example("009_ethyl_ethene", "reaction.py",
['ethyl/freq/gaussian.fchk', 'ethene/freq/gaussian.fchk',
'ts_ad1/freq_gauche/gaussian.fchk', 'ts_ad1/freq_trans/gaussian.fchk',
'ethyl/scan/gaussian.log', 'ts_ad1/scan1/gaussian.log',
'ts_ad1/scan2/gaussian.log'])
def test_example_012():
check_example("012_ethyl_ethene_scaling", "reaction.py",
['ethyl/freq/gaussian.fchk', 'ethene/freq/gaussian.fchk',
'ts_ad1/freq_gauche/gaussian.fchk'])
def test_example_013():
check_example("013_butane", "thermo.py",
['freq/gaussian.fchk', 'scan/gaussian.log'])
def test_example_014():
check_example("014_pentane_mbh", "thermo.py", ['gaussian.fchk'])
def test_example_015():
check_example("015_kie", "reaction.py", ['reactant.fchk', 'trans.fchk'])
def test_example_016():
check_example("016_modes", "modes.py", ['PUNCH'])
def test_example_017():
check_example("017_activationkineticmodel", "reaction.py",
['water/gaussian.fchk', 'VO_AA_OH_H2O/gaussian.fchk',
'VO_AA_OOtBu/gaussian.fchk', 'TBHP/gaussian.fchk', 'TS/TS.fchk',
'cyclohexene/gaussian.fchk'])
def test_example_018():
check_example("018_physisorption", "adsorption.py",
['m062x/argon/gaussian.fchk', 'm062x/benzene/freq/gaussian.fchk',
'm062x/complex/freq/gaussian.fchk'])
def test_example_019():
check_example("019_ethyl_ethene_simple", "kinetic.py",
['ethyl.fchk', 'ethene.fchk', 'ts_trans.fchk'])
def test_example_020():
check_example("020_butane_conformers", "equilibrium.py",
['trans.fchk', 'gauche.fchk'])
def test_example_021():
check_example("021_water_formation", "formation.py",
['oxygen.fchk', 'hydrogen.fchk', 'water.fchk'])
| molmod/tamkin | tamkin/test/test_examples.py | Python | gpl-3.0 | 6,629 | [
"Gaussian"
] | fe72a21826067987e849bfae6ac8f0fa040c41904e7c07d25b0a4cdadde4e8d6 |
import numpy as np
from numpy.random import seed
class AdalineSGD(object):
"""AdAptive LInear NEuron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
errors_ : list
Number of misclassifications in every epoch.
shuffle : bool (default:None)
Shuffles training data every epoch
if True to prevent cycles
random_state: int (default: None)
Set random state for shuffling
and initializing the weights.
"""
def __init__(self,eta = 0.01, n_iter = 10,
shuffle = True,random_state=None):
self.eta = eta
self.n_iter = n_iter
self.w_initialized = False
self.shuffle = shuffle
if random_state:
seed(random_state)
def fit(self,X,y):
"""Fit training data.
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors, where n_samples
is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
#self.w_ = np.zeros(1+X.shape[1])
self._initialize_weights(X.shape[1]);
self.cost_ = []
for i in range(self.n_iter):
if self.shuffle:
X,y = self._shuffle(X,y)
cost = []
for xi,target in zip(X,y):
cost.append(self._update_weights(xi,target))
avg_cost = sum(cost)/len(y)
self.cost_.append(avg_cost)
return self
def partial_fit(self,X,y):
"""Fit training data without reinitializing the weights"""
if not self.w_initialized:
self._initialize_weights(X.shape[1])
if y.ravel().shape[0]>1:
for xi ,target in zip(X,y):
self._update_weights(xi,target)
else:
self._update_weights(X,y)
return self
def _update_weights(self,xi,target):
"""Apply Adaline learning rule to update the weights"""
output = self.net_input(xi)
error = target - output
self.w_[1:] += self.eta * xi.dot(error)
self.w_[0] += self.eta*error;
cost = 0.5* error ** 2
return cost
def net_input(self,X):
'''calculate net input'''
return np.dot(X,self.w_[1:])+self.w_[0]
def activation(self,X):
'''compute linear activation'''
return self.net_input(X)
def predict(self,X):
'''Return class label after unit step'''
return np.where(self.activation(X) >= 0.0,1,-1)
def _initialize_weights(self,m):
"""Initialize weights to zeros"""
self.w_ = np.zeros(1+m)
self.w_initialized = True;
def _shuffle(self,X,y):
"""Shuffle training data"""
r = np.random.permutation(len(y))
return X[r],y[r]
| PhenixI/machine-learning | 1_supervised_classification/6-Perceptron and Neural Networks/Perceptron_python/AdalineSGD.py | Python | gpl-2.0 | 3,075 | [
"NEURON"
] | 23af9e8ba1af8ca1d4acbda2f71615ac04c0b217d7ec500304843e385251da68 |
# Apache HTTP requests manipulation module
import requests
import json
import os
# The REST server url
REST_URL = 'https://dirac.ba.infn.it:9910'
###########################################
# Get the access token first
# GET request parameters
params = {'grant_type':'client_credentials',
'group':'gridit_user',
'setup':'Bari-Production'}
# The user certificate, password will be asked for to the user
# before request submission
certificate = ('/home/managai/.globus/usercert.pem',
'/home/managai/.globus/userkey.pem')
result = requests.get(REST_URL+'/oauth2/token',params=params,cert=certificate,verify=False)
print result ## DEBUG
# the output is returned as a json encoded string, decode it here
resultDict = json.loads( result.text )
print resultDict ## DEBUG
access_token = resultDict['token']
print access_token ## DEBUG
###################################
#### Submit a job
###################################
####################################################
# Prepare the job description ( manifest ) first
manifest = {
'Executable': '/usr/bin/printenv',
'StdOut' : 'std.out',
'StdError' : 'std.err',
'OutputSandbox' : ['std.out','std.err'],
'JobName' : 'REST_test'
}
print manifest ## DEBUG
# add json encoded job manifest to the data to be transfered
# to the REST server
data = { 'manifest' : json.dumps( manifest ) }
print data ## DEBUG
##############################################################
# Input sandbox files as Multipart-Encoded files
#files = { 'file' : ('rest_test.py', open('rest_test.py','rb') ),
# 'file1' : ('rest1_test.py', open('rest1_test.py','rb') ) }
#files = { 'file' : ('ila1', open('ila1','rb') ),
# 'file1' : ('ila2', open('ila2','rb') ) }
#print files ## DEBUG
###############################################################
# Submit the job now, POST http request creates a new job,
# from now on access_token should be passed as the request parameter
# verify=False is to not to verify the server certificate by the client
result = requests.post(REST_URL+'/jobs',
data=data,
# files = files,
params={'access_token':access_token},
verify=False)
print result ## DEBUG
resultDict = json.loads( result.text )
print resultDict ## DEBUG
# resulting job ID(s) are returned as a list ( e.g. when bulk submission )
jobID = resultDict['jids'][0]
########################################
# Get job status
result = requests.get(REST_URL+'/jobs/%d' % jobID,
params={'access_token':access_token},
verify=False)
resultDict = json.loads( result.text )
print "Status for job %d:" % jobID, resultDict['status']
| SuperDIRAC/TESTDIRAC | sample-script/rest/simplest.py | Python | gpl-3.0 | 2,775 | [
"DIRAC"
] | 71ddebc9fb3c668ddd231b56216d905ab084a598e5783171debb2f2b255d615b |
''' Module for the applications built for the COSMO project
Quim Ballabrera, May 2017
EGL, 06/2020:
small adjustments of text fonts
A heap variable MESSAGE has been introduce to store "print" messages
'''
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
from tkinter import font as tkfont
import json
import os
import io
try:
to_unicode = unicode
except:
to_unicode = str
from cosmo.tools import empty
from cosmo.tools import exists
from cosmo import COSMO_ROOT
from cosmo import COSMO_CONF_PATH
from cosmo import COSMO_CONF_DATA
# ===================
class parameters:
# ===================
''' Class whose attributes will contain the options for running
the COSMO Lagrangian Model'''
__version__ = "1.0"
__author__ = "Quim Ballabrerera"
__date__ = "January 2018"
def __init__(self):
with open(COSMO_CONF_DATA) as infile:
conf = json.load(infile)
COSMO_CONF_NAME = conf['COSMO_CONF_NAME']
COSMO_CONF = COSMO_CONF_PATH + COSMO_CONF_NAME + os.sep
self.FILECONF = os.path.join(COSMO_CONF,'blm.conf')
self.PATH = tk.StringVar()
self.BIN = tk.StringVar()
self.VEC = []
self.UINDEX = 0
self.VINDEX = None
self.TINDEX = None
self.UFILE = tk.StringVar()
self.Ux = tk.StringVar()
self.Uy = tk.StringVar()
self.Uz = tk.StringVar()
self.Ut = tk.StringVar()
self.Uu = tk.StringVar()
self.VFILE = tk.StringVar()
self.Vx = tk.StringVar()
self.Vy = tk.StringVar()
self.Vz = tk.StringVar()
self.Vt = tk.StringVar()
self.Vv = tk.StringVar()
self.TFILE = tk.StringVar()
self.Tx = tk.StringVar()
self.Ty = tk.StringVar()
self.Tz = tk.StringVar()
self.Tt = tk.StringVar()
self.Tvname = tk.StringVar()
self.INI_USE = tk.BooleanVar()
self.INI = tk.StringVar()
self.FIN = tk.StringVar()
self.TRAJECTORY = tk.StringVar()
self.INI_USE.set(False)
self.xo = tk.DoubleVar()
self.yo = tk.DoubleVar()
self.to = tk.DoubleVar()
self.do = tk.StringVar()
self.to_use = tk.BooleanVar()
self.reverse = tk.BooleanVar()
self.stationary = tk.BooleanVar()
self.record_use = tk.BooleanVar()
self.record = tk.IntVar()
self.idt = tk.IntVar()
self.edt = tk.IntVar()
self.seed = tk.IntVar()
self.nfloats = tk.IntVar()
self.Rx = tk.DoubleVar()
self.Ry = tk.DoubleVar()
self.Rt = tk.DoubleVar()
# Default "Manufacturer" parameters:
#
self.PATH.set(os.path.join(COSMO_ROOT,'bin/'))
self.BIN.set('blm')
self.TRAJECTORY.set('blm.nc')
self.INI.set('')
self.FIN.set('blm.end')
self.xo.set(None)
self.yo.set(None)
self.to.set(0.0)
self.do.set('')
self.record.set(None)
self.stationary.set(False)
self.edt.set(None)
self.idt.set(1200)
self.to_use.set(True)
self.record_use.set(False)
self.seed.set(None)
self.nfloats.set(10)
self.Rx.set(0.10)
self.Ry.set(0.10)
self.Rt.set(0)
self.MESSAGE = "\n"
if exists(self.FILECONF):
self.MESSAGE += 'Reading BLM configuration'
self.load(self.FILECONF)
else:
self.MESSAGE += 'Saving BLM configuration'
self.save(self.FILECONF)
def load(self,filename):
# ======================
'''Read configuration values from file'''
with open(filename) as infile:
conf = json.load(infile)
self.PATH.set(conf['PATH'])
self.BIN.set(conf['BIN'])
self.TRAJECTORY.set(conf['TRAJECTORY'])
self.INI.set(conf['INI'])
self.FIN.set(conf['FIN'])
self.INI_USE.set(conf['INI_USE'])
self.to_use.set(conf['TO_USE'])
self.seed.set(conf['SEED'])
self.nfloats.set(conf['NFLOATS'])
self.Rx.set(conf['RX'])
self.Ry.set(conf['RY'])
self.Rt.set(conf['RT'])
self.edt.set(conf['EDT'])
self.idt.set(conf['IDT'])
def save(self,filename):
# ======================
conf = {}
conf['PATH'] = self.PATH.get()
conf['BIN'] = self.BIN.get()
conf['TRAJECTORY'] = self.TRAJECTORY.get()
conf['INI'] = self.INI.get()
conf['FIN'] = self.FIN.get()
conf['INI_USE'] = self.INI_USE.get()
conf['TO_USE'] = self.to_use.get()
try:
conf['SEED'] = self.seed.get()
except:
conf['SEED'] = None
conf['NFLOATS'] = self.nfloats.get()
conf['RX'] = self.Rx.get()
conf['RY'] = self.Ry.get()
conf['RT'] = self.Rt.get()
try:
conf['EDT'] = self.edt.get()
except:
conf['EDT'] = None
conf['IDT'] = self.idt.get()
with io.open(filename,'w',encoding='utf8') as outfile:
str_ = json.dumps(conf,ensure_ascii=False, \
sort_keys=True, \
indent=2, \
separators=(',',': '))
outfile.write(to_unicode(str_)+'\n')
# =============
def inout(CLM):
# =============
options = ''
return options
# =====================
def Basic_options(CLM):
# =====================
options = ' -U' + ' file=%s' % CLM.UFILE.get() \
+ ' x=%s' % CLM.Ux.get() \
+ ' y=%s' % CLM.Uy.get()
if empty(CLM.Uz.get()):
pass
else:
options += ' z=%s' % CLM.Uz.get()
if empty(CLM.Ut.get()):
pass
else:
options += ' t=%s' % CLM.Ut.get()
options += ' u=%s' % CLM.Uu.get()
options += ' -V'
if empty(CLM.VFILE.get()):
pass
else:
options += ' file=%s' % CLM.VFILE.get()
if empty(CLM.Vx.get()):
pass
else:
options += ' x=%s' % CLM.Vx.get()
if empty(CLM.Vy.get()):
pass
else:
options += ' y=%s' % CLM.Vy.get()
if empty(CLM.Vz.get()):
pass
else:
options += ' z=%s' % CLM.Vz.get()
if empty(CLM.Vt.get()):
pass
else:
options += ' t=%s' % CLM.Vt.get()
options += ' v=%s' % CLM.Vv.get()
if empty(CLM.TRAJECTORY.get()):
pass
else:
options += ' -traj %s' % CLM.TRAJECTORY.get()
if empty(CLM.FIN.get()):
pass
else:
options += ' -end %s' % CLM.FIN.get()
if CLM.INI_USE.get():
if empty(CLM.INI.get()):
messagebox.showinfo(message='No release file has been selected')
return ''
else:
aa = ' -release %s' % CLM.INI.get()
options += aa
else:
try:
aa = ' -xo %s' % CLM.xo.get()
options += aa
except:
messagebox.showinfo(message='Invalid release longitude')
return ''
try:
aa = ' -yo %s' % CLM.yo.get()
options += aa
except:
messagebox.showinfo(message='Invalid release latitude')
return ''
try:
aa = ' -idt %s' % CLM.idt.get()
options += aa
except:
pass
try:
aa = ' -edt %s' % CLM.edt.get()
options += aa
except:
pass
return options
# ==============
class WinConfig:
# ==============
def __init__(self,master,CLM):
self.ULIST = [CLM.VEC[i].UFILENAME.get() for i in range(len(CLM.VEC))]
self.ULIST.append('')
self.VLIST = [CLM.VEC[i].VFILENAME.get() for i in range(len(CLM.VEC))]
self.VLIST.append('')
def switch_mode():
if CLM.INI_USE.get():
self.wxo.configure(state='disabled')
self.wyo.configure(state='disabled')
self.wto.configure(state='disabled')
self.wdo.configure(state='disabled')
else:
self.wxo.configure(state='!disabled')
self.wyo.configure(state='!disabled')
if CLM.to_use.get():
self.wto.configure(state='!disabled')
self.wdo.configure(state='disabled')
else:
self.wto.configure(state='disabled')
self.wdo.configure(state='!disabled')
def open_traj():
nn = tk.filedialog.asksaveasfilename(title='Save', \
filetypes=[('Netcdf','*.nc')], \
confirmoverwrite=True)
if len(nn) == 0:
pass
else:
CLM.TRAJECTORY.set(nn)
def open_init():
nn = filedialog.askopenfile()
if nn is None:
pass
else:
CLM.INI.set(nn.name)
def select_to():
if CLM.to_use.get():
self.wto.configure(state='!disabled')
self.wdo.configure(state='disabled')
else:
self.wto.configure(state='disabled')
self.wdo.configure(state='!disabled')
def select_record():
if CLM.record_use.get():
self.wrn.configure(state='!disabled')
else:
self.wrn.configure(state='disabled')
def select_ufile():
CLM.UINDEX = self.ULIST.index(CLM.UFILE.get())
uid = CLM.VEC[CLM.UINDEX].U.varid
CLM.Ux.set(CLM.VEC[CLM.UINDEX].U.icdf.xname)
CLM.Uy.set(CLM.VEC[CLM.UINDEX].U.icdf.yname)
CLM.Uz.set(CLM.VEC[CLM.UINDEX].U.icdf.zname)
CLM.Ut.set(CLM.VEC[CLM.UINDEX].U.icdf.tname)
CLM.Uu.set(CLM.VEC[CLM.UINDEX].U.icdf.vname[uid])
if CLM.VINDEX is None:
vid = CLM.VEC[CLM.UINDEX].V.varid
CLM.Vv.set(CLM.VEC[CLM.UINDEX].V.icdf.vname[vid])
else:
vid = CLM.VEC[CLM.VINDEX].V.varid
CLM.Vv.set(CLM.VEC[CLM.VINDEX].V.icdf.vname[vid])
def select_vfile():
if empty(CLM.VFILE.get()):
CLM.VINDEX = None
CLM.Vx.set('')
CLM.Vy.set('')
CLM.Vz.set('')
CLM.Vt.set('')
vid = CLM.VEC[CLM.UINDEX].V.varid
CLM.Vv.set(CLM.VEC[CLM.UINDEX].V.icdf.vname[vid])
else:
CLM.VINDEX = self.VLIST.index(CLM.VFILE.get())
vid = CLM.VEC[CLM.VINDEX].V.varid
CLM.Vx.set(CLM.VEC[CLM.VINDEX].V.icdf.xname)
CLM.Vy.set(CLM.VEC[CLM.VINDEX].V.icdf.yname)
CLM.Vz.set(CLM.VEC[CLM.VINDEX].V.icdf.zname)
CLM.Vt.set(CLM.VEC[CLM.VINDEX].V.icdf.tname)
CLM.Vv.set(CLM.VEC[CLM.VINDEX].V.icdf.vname[vid])
def select_tfile():
if empty(CLM.TFILE.get()):
CLM.TINDEX = None
CLM.Tx.set('')
CLM.Ty.set('')
CLM.Tz.set('')
CLM.Tt.set('')
CLM.Tvname.set('')
else:
CLM.TINDEX = self.ULIST.index(CLM.TFILE.get())
CLM.Tx.set(CLM.VEC[CLM.TINDEX].icdf.xname)
CLM.Ty.set(CLM.VEC[CLM.TINDEX].icdf.yname)
CLM.Tz.set(CLM.VEC[CLM.TINDEX].icdf.zname)
CLM.Tt.set(CLM.VEC[CLM.TINDEX].icdf.tname)
def loadconf():
print('Loading default configuration')
CLM.load(CLM.FILECONF)
def saveconf():
print('Saving default configuration')
CLM.save(CLM.FILECONF)
menubar = tk.Menu(master)
menu = tk.Menu(menubar,tearoff=0)
menubar.add_cascade(label='Configuration',menu=menu)
menu.add_command(label='Restore',command=loadconf)
menu.add_command(label='Save',command=saveconf)
try:
master.config(menu=menubar)
except AttributeError:
# master is a toplevel window (Python 2.4/Tkinter 1.63)
master.tk.call(master, "config", "-menu", menubar)
font_bold = tkfont.Font(font='TkDefaultFont').copy()
font_bold['weight']='bold'
F0 = ttk.Frame(master,padding=5)
ttk.Label(F0,text='PATH',font=font_bold).grid(row=0,column=0,padx=3,pady=3)
ttk.Entry(F0,textvariable=CLM.PATH,width=80).grid(row=0,column=1,columnspan=8)
ttk.Label(F0,text='BIN',font=font_bold).grid(row=1,column=0,padx=3,pady=3)
ttk.Entry(F0,textvariable=CLM.BIN,width=80).grid(row=1,column=1,columnspan=8)
F0.grid()
# Define tabs:
self.nb = ttk.Notebook(master)
self.page1 = ttk.Frame(self.nb)
self.page2 = ttk.Frame(self.nb)
self.page3 = ttk.Frame(self.nb)
self.page4 = ttk.Frame(self.nb)
self.page5 = ttk.Frame(self.nb)
self.page6 = ttk.Frame(self.nb)
self.nb.add(self.page1,text='Zonal Velocity')
self.nb.add(self.page2,text='Meridional Velocity')
self.nb.add(self.page3,text='Advected scalars')
self.nb.add(self.page4,text='Input/Output files')
self.nb.add(self.page5,text='Time management')
self.nb.add(self.page6,text='Cloud simulation')
self.nb.grid()
# Initialize the CLM files and variables:
# AAA
CLM.UFILE.set(self.ULIST[CLM.UINDEX])
uid = CLM.VEC[CLM.UINDEX].U.varid
CLM.Ux.set(CLM.VEC[CLM.UINDEX].U.icdf.xname)
CLM.Uy.set(CLM.VEC[CLM.UINDEX].U.icdf.yname)
CLM.Uz.set(CLM.VEC[CLM.UINDEX].U.icdf.zname)
CLM.Ut.set(CLM.VEC[CLM.UINDEX].U.icdf.tname)
CLM.Uu.set(CLM.VEC[CLM.UINDEX].U.icdf.vname[uid])
if CLM.VINDEX is None:
vid = CLM.VEC[CLM.UINDEX].V.varid
CLM.Vv.set(CLM.VEC[CLM.UINDEX].V.icdf.vname[vid])
else:
vid = CLM.VEC[CLM.VINDEX].V.varid
CLM.Vv.set(CLM.VEC[CLM.VINDEX].V.icdf.vname[vid])
# The main window
#
F1 = ttk.Frame(self.page1,padding=5)
ttk.Label(F1,text='Zonal velocity file -U',width=25, \
font=font_bold).grid(row=0,column=0,columnspan=4)
ttk.Label(F1,text='file =') \
.grid(row=1,column=0,padx=3,sticky='e')
ubox = ttk.Combobox(F1,textvariable=CLM.UFILE,width=80, \
values=self.ULIST)
ubox.grid(row=1,column=1,columnspan=8)
ubox.bind('<<ComboboxSelected>>',lambda e: select_ufile())
ttk.Label(F1,text='x =') \
.grid(row=2,column=0,padx=3,sticky='e')
ttk.Entry(F1,textvariable=CLM.Ux,width=50).grid(row=2,column=1,columnspan=5)
ttk.Label(F1,text='y =') \
.grid(row=3,column=0,padx=3,sticky='e')
ttk.Entry(F1,textvariable=CLM.Uy,width=50).grid(row=3,column=1,columnspan=5)
ttk.Label(F1,text='z =') \
.grid(row=4,column=0,padx=3,sticky='e')
ttk.Entry(F1,textvariable=CLM.Uz,width=50).grid(row=4,column=1,columnspan=5)
ttk.Label(F1,text='t =') \
.grid(row=5,column=0,padx=3,sticky='e')
ttk.Entry(F1,textvariable=CLM.Ut,width=50).grid(row=5,column=1,columnspan=5)
ttk.Label(F1,text='variable =',width=8,justify='right') \
.grid(row=6,column=0,padx=3,sticky='e')
ttk.Entry(F1,textvariable=CLM.Uu,width=50).grid(row=6,column=1,columnspan=5)
F1.grid(pady=5)
F2 = ttk.Frame(self.page2,padding=5)
ttk.Label(F2,text='Meridional velocity file -V',width=25, \
font=font_bold).grid(row=0,column=0,columnspan=4)
ttk.Label(F2,text='file =') \
.grid(row=1,column=0,padx=3,sticky='e')
vbox = ttk.Combobox(F2,textvariable=CLM.VFILE,width=80, \
values=self.VLIST)
vbox.grid(row=1,column=1,columnspan=8)
vbox.bind('<<ComboboxSelected>>',lambda e: select_vfile())
ttk.Label(F2,text='x =') \
.grid(row=2,column=0,padx=3,sticky='e')
ttk.Entry(F2,textvariable=CLM.Vx,width=50).grid(row=2,column=1,columnspan=5)
ttk.Label(F2,text='y =') \
.grid(row=3,column=0,padx=3,sticky='e')
ttk.Entry(F2,textvariable=CLM.Vy,width=50).grid(row=3,column=1,columnspan=5)
ttk.Label(F2,text='z =') \
.grid(row=4,column=0,padx=3,sticky='e')
ttk.Entry(F2,textvariable=CLM.Vz,width=50).grid(row=4,column=1,columnspan=5)
ttk.Label(F2,text='t =') \
.grid(row=5,column=0,padx=3,sticky='e')
ttk.Entry(F2,textvariable=CLM.Vt,width=50).grid(row=5,column=1,columnspan=5)
ttk.Label(F2,text='variable =',width=8,justify='right') \
.grid(row=6,column=0,padx=3,sticky='e')
ttk.Entry(F2,textvariable=CLM.Vv,width=50).grid(row=6,column=1,columnspan=5)
F2.grid(pady=5)
F3 = ttk.Frame(self.page3,padding=5)
ttk.Label(F3,text='Advected parameter file -T',width=25, \
font=font_bold).grid(row=0,column=0,columnspan=4)
ttk.Label(F3,text='file =') \
.grid(row=1,column=0,padx=3,sticky='e')
tbox = ttk.Combobox(F3,textvariable=CLM.TFILE,width=80, \
values=self.ULIST)
tbox.grid(row=1,column=1,columnspan=8)
tbox.bind('<<ComboboxSelected>>',lambda e: select_tfile())
#ttk.Entry(F3,textvariable=CLM.TFILE,width=80).grid(row=1,column=1,columnspan=8)
ttk.Label(F3,text='x =') \
.grid(row=2,column=0,padx=3,sticky='e')
ttk.Entry(F3,textvariable=CLM.Tx,width=50).grid(row=2,column=1,columnspan=5)
ttk.Label(F3,text='y =') \
.grid(row=3,column=0,padx=3,sticky='e')
ttk.Entry(F3,textvariable=CLM.Ty,width=50).grid(row=3,column=1,columnspan=5)
ttk.Label(F3,text='z =') \
.grid(row=4,column=0,padx=3,sticky='e')
ttk.Entry(F3,textvariable=CLM.Tz,width=50).grid(row=4,column=1,columnspan=5)
ttk.Label(F3,text='t =') \
.grid(row=5,column=0,padx=3,sticky='e')
ttk.Entry(F3,textvariable=CLM.Tt,width=50).grid(row=5,column=1,columnspan=5)
ttk.Label(F3,text='variable =',width=8,justify='right') \
.grid(row=6,column=0,padx=3,sticky='e')
ttk.Entry(F3,textvariable=CLM.Tvname,width=50).grid(row=6,column=1,columnspan=5)
F3.grid(pady=5)
F4 = ttk.Frame(master,padding=5)
ttk.Label(F4,text='Initial float position',width=25, \
font=font_bold).grid(row=0,column=0,columnspan=4)
ttk.Checkbutton(F4,text='Use INIT file (See Input/Output files tab)', \
variable=CLM.INI_USE,command=switch_mode).grid(row=1,column=1,padx=3)
ttk.Label(F4,text='xo =').grid(row=2,column=0,padx=3)
self.wxo = ttk.Entry(F4,textvariable=CLM.xo,width=50)
self.wxo.grid(row=2,column=1,columnspan=5)
ttk.Label(F4,text='yo =').grid(row=3,column=0,padx=3)
self.wyo = ttk.Entry(F4,textvariable=CLM.yo,width=50)
self.wyo.grid(row=3,column=1,columnspan=5)
ttk.Label(F4,text='to =').grid(row=4,column=0,padx=3)
self.wto = ttk.Entry(F4,textvariable=CLM.to,width=50)
self.wto.grid(row=4,column=1,columnspan=5)
ttk.Checkbutton(F4,text='Use initial time',variable=CLM.to_use,command=select_to) \
.grid(row=4,column=6,padx=[5,1])
ttk.Label(F4,text='do =').grid(row=5,column=0,padx=3)
self.wdo = ttk.Entry(F4,textvariable=CLM.do,width=50)
self.wdo.grid(row=5,column=1,columnspan=5)
F4.grid(pady=5)
if CLM.to_use.get():
self.wto.configure(state='!disabled')
self.wdo.configure(state='disabled')
else:
self.wto.configure(state='disabled')
self.wdo.configure(state='!disabled')
F5 = ttk.Frame(self.page4,padding=5)
ttk.Label(F5,text='Input/Output files',width=25, \
font=font_bold).grid(row=0,column=0,columnspan=4)
ttk.Label(F5,text='Trajectory =').grid(row=1,column=0,padx=3)
ttk.Entry(F5,textvariable=CLM.TRAJECTORY,width=50).grid(row=1,column=1,columnspan=5)
ttk.Button(F5,text='Select',padding=3,command=open_traj).grid(row=1,column=6,padx=[5,1])
ttk.Label(F5,text='Initial positions =').grid(row=2,column=0,padx=3)
ttk.Entry(F5,textvariable=CLM.INI,width=50).grid(row=2,column=1,columnspan=5)
ttk.Button(F5,text='Select',padding=3,command=open_init).grid(row=2,column=6,padx=[5,1])
ttk.Label(F5,text='Final positions =').grid(row=3,column=0,padx=3)
ttk.Entry(F5,textvariable=CLM.FIN,width=50).grid(row=3,column=1,columnspan=5)
F5.grid(pady=5)
F6 = ttk.Frame(self.page5,padding=5)
ttk.Label(F6,text='Time Management',width=25, \
font=font_bold).grid(row=0,column=0,columnspan=4)
ttk.Label(F6,text='Stationary =').grid(row=1,column=0,padx=3)
ttk.Entry(F6,textvariable=CLM.stationary,width=50).grid(row=1,column=1,columnspan=5)
ttk.Label(F6,text='Record =').grid(row=2,column=0,padx=3)
self.wrn = ttk.Entry(F6,textvariable=CLM.record,width=50)
self.wrn.grid(row=2,column=1,columnspan=5)
ttk.Checkbutton(F6,text='Use record',variable=CLM.record_use,command=select_record) \
.grid(row=2,column=6,padx=[5,1])
ttk.Label(F6,text='External dt =').grid(row=3,column=0,padx=3)
ttk.Entry(F6,textvariable=CLM.edt,width=50).grid(row=3,column=1,columnspan=5)
ttk.Label(F6,text='Internal dt =').grid(row=4,column=0,padx=3)
ttk.Entry(F6,textvariable=CLM.idt,width=50).grid(row=4,column=1,columnspan=5)
F6.grid(pady=5)
if CLM.record_use.get():
self.wrn.configure(state='!disabled')
else:
self.wrn.configure(state='disabled')
F7 = ttk.Frame(self.page6,padding=5)
ttk.Label(F7,text='Time Management',width=25, \
font=font_bold).grid(row=0,column=0,columnspan=4)
ttk.Label(F7,text='Random Seed =').grid(row=1,column=0,padx=3)
ttk.Entry(F7,textvariable=CLM.seed,width=50).grid(row=1,column=1,columnspan=5)
ttk.Label(F7,text='Num. Floats =').grid(row=2,column=0,padx=3)
ttk.Entry(F7,textvariable=CLM.nfloats,width=50).grid(row=2,column=1,columnspan=5)
ttk.Label(F7,text='X Radius =').grid(row=3,column=0,padx=3)
ttk.Entry(F7,textvariable=CLM.Rx,width=50).grid(row=3,column=1,columnspan=5)
ttk.Label(F7,text='Y Radius =').grid(row=4,column=0,padx=3)
ttk.Entry(F7,textvariable=CLM.Ry,width=50).grid(row=4,column=1,columnspan=5)
ttk.Label(F7,text='T Radius =').grid(row=5,column=0,padx=3)
ttk.Entry(F7,textvariable=CLM.Rt,width=50).grid(row=5,column=1,columnspan=5)
F7.grid(pady=5)
| quimbp/cosmo | modules/cosmo/blm.py | Python | mit | 21,313 | [
"NetCDF"
] | 08153c8a4c61b259f3ae398824941bac6f9dc4b6840af9749da642106f27d2f9 |
from collections import defaultdict
import json
import difflib
import re
import math
from mhapi import skills
from mhapi.model import SharpnessLevel, _break_find
WEAKPART_WEIGHT = 0.5
def floor(x):
return int(math.floor(x))
def raw_damage(true_raw, sharpness, affinity, monster_hitbox, motion):
"""
Calculate raw damage to a monster part with the given true raw,
sharpness, monster raw weakness, and weapon motion value.
"""
return floor(raw_damage_nohitbox(true_raw, sharpness, affinity, motion)
* monster_hitbox / 100.0)
def raw_damage_nohitbox(true_raw, sharpness, affinity, motion):
"""
Calculate raw damage to a monster part with the given true raw,
sharpness, monster raw weakness, and weapon motion value.
"""
return (true_raw
* SharpnessLevel.raw_modifier(sharpness)
* (1 + (affinity / 400.0))
* motion / 100.0)
def element_damage(raw_element, sharpness, monster_ehitbox):
"""
Calculate elemental damage to a monster part with the given elemental
attack, the given sharpness, and the given monster elemental weakness.
Note that this is independent of the motion value of the attack.
"""
return floor(element_damage_nohitbox(raw_element, sharpness)
* monster_ehitbox / 100.0)
def element_damage_nohitbox(raw_element, sharpness):
"""
Calculate elemental damage to a monster part with the given elemental
attack, the given sharpness, and the given monster elemental weakness.
Note that this is independent of the motion value of the attack.
"""
return (raw_element * SharpnessLevel.element_modifier(sharpness))
class MotionType(object):
CUT = "cut"
IMPACT = "impact"
FIXED = "fixed"
class MotionValue(object):
def __init__(self, name, types, powers):
self.name = name
self.types = types
self.powers = powers
self.average = sum(self.powers) / len(self.powers)
class WeaponTypeMotionValues(object):
def __init__(self, weapon_type, motion_data):
self.weapon_type = weapon_type
self.motion_values = dict()
for d in motion_data:
name = d["name"]
self.motion_values[name] = MotionValue(name, d["type"], d["power"])
self.average = (sum(mv.average
for mv in self.motion_values.values())
/ len(self))
def __len__(self):
return len(self.motion_values)
def keys(self):
return list(self.motion_values.keys())
def __getitem__(self, key):
return self.motion_values[key]
class MotionValueDB(object):
def __init__(self, json_path):
with open(json_path) as f:
self._raw_data = json.load(f)
self.motion_values_map = dict()
for d in self._raw_data:
wtype = d["name"]
if wtype == "Sword":
wtype = "Sword and Shield"
self.motion_values_map[wtype] = WeaponTypeMotionValues(wtype,
d["motions"])
def __getitem__(self, weapon_type):
return self.motion_values_map[weapon_type]
def keys(self):
return list(self.motion_values_map.keys())
def __len__(self):
return len(self.motion_values_map)
class WeaponType(object):
"""
Enumeration for weapon types.
"""
SWITCH_AXE = "Switch Axe"
HAMMER = "Hammer"
HUNTING_HORN = "Hunting Horn"
GREAT_SWORD = "Great Sword"
CHARGE_BLADE = "Charge Blade"
LONG_SWORD = "Long Sword"
INSECT_GLAIVE = "Insect Glaive"
LANCE = "Lance"
GUNLANCE = "Gunlance"
HEAVY_BOWGUN = "Heavy Bowgun"
SWORD_AND_SHIELD = "Sword and Shield"
DUAL_BLADES = "Dual Blades"
LIGHT_BOWGUN = "Light Bowgun"
BOW = "Bow"
IMPACT = "impact"
CUT = "cut"
SHOT = "shot"
MIXED = "cut/impact"
_multiplier = {
"Switch Axe": 5.4,
"Hammer": 5.2,
"Hunting Horn": 5.2,
"Great Sword": 4.8,
"Charge Blade": 3.6,
"Long Sword": 3.3,
"Insect Glaive": 3.1,
"Lance": 2.3,
"Gunlance": 2.3,
"Heavy Bowgun": 1.5,
"Sword and Shield": 1.4,
"Dual Blades": 1.4,
"Light Bowgun": 1.3,
"Bow": 1.2,
}
@classmethod
def all(cls):
return list(cls._multiplier.keys())
@classmethod
def damage_type(cls, weapon_type):
if weapon_type in (cls.HAMMER, cls.HUNTING_HORN):
return cls.IMPACT
elif weapon_type == cls.LANCE:
return cls.MIXED
elif weapon_type in (cls.LIGHT_BOWGUN, cls.HEAVY_BOWGUN, cls.BOW):
return cls.SHOT
else:
return cls.CUT
@classmethod
def multiplier(cls, weapon_type):
return cls._multiplier[weapon_type]
class WeaponMonsterDamage(object):
"""
Class for calculating how much damage a weapon does to a monster.
Does not include overall monster defense.
"""
def __init__(self, weapon_row, monster_row, monster_damage, motion,
sharp_plus=False, breakable_parts=None,
attack_skill=skills.AttackUp.NONE,
critical_eye_skill=skills.CriticalEye.NONE,
element_skill=skills.ElementAttackUp.NONE,
awaken=False, artillery_level=0, limit_parts=None,
frenzy_bonus=0, blunt_power=False, is_true_attack=False):
self.weapon = weapon_row
self.monster = monster_row
self.monster_damage = monster_damage
self.motion = motion
self.sharp_plus = sharp_plus
self.breakable_parts = breakable_parts
self.attack_skill = attack_skill
self.critical_eye_skill = critical_eye_skill
self.element_skill = element_skill
self.awaken = awaken
self.artillery_level = artillery_level
self.blunt_power = blunt_power
self.is_true_attack = is_true_attack
self.limit_parts = limit_parts
# 15 normaly for overcoming the virus, 30 with frenzy res skill
assert frenzy_bonus in (0, 15, 30)
self.frenzy_bonus = frenzy_bonus
self.chaotic = False
self.damage_map = defaultdict(PartDamage)
self.average = 0
self.weakness_weighted = 0
self.best_weighted = 0
self.break_weighted = 0
# map of part -> (map of burst_level -> (raw, ele, burst))
self.cb_phial_damage = defaultdict(dict)
self.weapon_type = self.weapon["wtype"]
if is_true_attack:
self.true_raw = self.weapon["attack"]
else:
self.true_raw = (self.weapon["attack"]
/ WeaponType.multiplier(self.weapon_type))
if sharp_plus == 1:
self.sharpness = self.weapon.sharpness_plus.max
elif sharp_plus == 2:
self.sharpness = self.weapon.sharpness_plus2.max
else:
self.sharpness = self.weapon.sharpness.max
#print "sharpness=", self.sharpness
if self.weapon["affinity"]:
if (isinstance(self.weapon["affinity"], str)
and "/" in self.weapon["affinity"]):
self.chaotic = True
# Handle chaotic gore affinity, e.g. -35/10. This means that
# 35% of the time it does a negative critical (75% damage)
# and 10% of the time does a positive critical (125%
# damage). If frenzied (overcome virus which lasts 45
# seconds), the negative affinity becomes positive
# instead (35 + 10 = 45 in the example).
self.affinity = sum(
abs(int(x)) if self.frenzy_bonus else int(x)
for x in self.weapon["affinity"].split("/"))
else:
self.affinity = int(self.weapon["affinity"])
else:
self.affinity = 0
self.affinity += self.frenzy_bonus
self.damage_type = WeaponType.damage_type(self.weapon_type)
self.etype = self.weapon["element"]
self.eattack = self.weapon["element_attack"]
self.etype2 = self.weapon["element_2"]
self.eattack2 = self.weapon["element_2_attack"]
if not self.etype and self.awaken:
self.etype = self.weapon.awaken
self.eattack = self.weapon.awaken_attack
if self.eattack:
self.eattack = int(self.eattack)
else:
self.eattack = 0
if self.eattack2:
self.eattack2 = int(self.eattack2)
else:
self.eattack2 = 0
if not self.is_true_attack and self.eattack:
self.eattack /= 10
if self.eattack2:
self.eattack2 /= 10
self.true_raw = skills.AttackUp.modified(attack_skill,
self.true_raw)
self.affinity = skills.CriticalEye.modified(critical_eye_skill,
self.affinity)
self.eattack = skills.ElementAttackUp.modified(element_skill,
self.eattack)
self.eattack2 = skills.ElementAttackUp.modified(element_skill,
self.eattack2)
if self.blunt_power:
if self.sharpness in (SharpnessLevel.RED, SharpnessLevel.ORANGE):
self.true_raw += 30
elif self.sharpness == SharpnessLevel.YELLOW:
self.true_raw += 25
elif self.sharpness == SharpnessLevel.GREEN:
self.true_raw += 15
self.parts = []
self.break_count = 0
self.averages = dict(
uniform=0,
raw=0,
element=0,
weakpart_raw=0,
weakpart_element=0,
)
self.max_raw_part = (None, -1)
self.max_element_part = (None, -1)
self._calculate_damage()
@property
def attack(self):
if self.is_true_attack:
return self.true_raw
return self.true_raw * WeaponType.multiplier(self.weapon_type)
def _calculate_damage(self):
for row in self.monster_damage._rows:
# TODO: refactor to take advantage of new model
part = row["body_part"]
alt = None
m = re.match(r"([^(]+) \(([^)]+)\)", part)
if m:
part = m.group(1)
alt = m.group(2)
if self.limit_parts is not None and part not in self.limit_parts:
continue
if row["cut"] == -1:
continue
hitbox = 0
hitbox_cut = int(row["cut"])
hitbox_impact = int(row["impact"])
if self.damage_type == WeaponType.CUT:
hitbox = hitbox_cut
elif self.damage_type == WeaponType.IMPACT:
hitbox = hitbox_impact
elif self.damage_type == WeaponType.MIXED:
# Info from /u/ShadyFigure, see
# https://www.reddit.com/r/MonsterHunter/comments/3fr2u0/124th_weekly_stupid_question_thread/cts3hz8?context=3
hitbox = max(hitbox_cut, hitbox_impact * .72)
raw = raw_damage(self.true_raw, self.sharpness, self.affinity,
hitbox, self.motion)
element = 0
ehitbox = 0
if self.etype in "Fire Water Ice Thunder Dragon".split():
ehitbox = int(row[str(self.etype.lower())])
element = element_damage(self.eattack, self.sharpness, ehitbox)
if self.etype2:
# handle dual blades double element/status
element = element / 2.0
if self.etype2 in "Fire Water Ice Thunder Dragon".split():
ehitbox2 = int(row[str(self.etype2.lower())])
element2 = element_damage(self.eattack2,
self.sharpness, ehitbox2)
element += element2 / 2.0
part_damage = self.damage_map[part]
part_damage.set_damage(raw, element, hitbox, ehitbox, state=alt)
if not part_damage.part:
part_damage.part = part
if alt is None:
if (self.breakable_parts
and _break_find(part, list(self.monster_damage.parts.keys()),
self.breakable_parts)):
part_damage.breakable = True
if hitbox > self.max_raw_part[1]:
self.max_raw_part = (part, hitbox)
if ehitbox > self.max_element_part[1]:
self.max_element_part = (part, ehitbox)
for part in list(self.damage_map.keys()):
if None not in self.damage_map[part].states:
#print "Failed to parse part:", part
del self.damage_map[part]
for part, d in self.damage_map.items():
if d.is_breakable():
self.break_count += 1
self.parts = list(self.damage_map.keys())
self.averages["uniform"] = self.uniform()
self.averages["raw"] = self.weighted_raw()
self.averages["element"] = self.weighted_element()
self.averages["weakpart_raw"] = self.weakpart_weighted_raw()
self.averages["weakpart_element"] = self.weakpart_weighted_element()
self.averages["break_raw"] = self.break_weakpart_raw()
self.averages["break_element"] = self.break_weakpart_element()
self.averages["break_only"] = self.break_only()
self._calculate_cb_phial_damage()
def _calculate_cb_phial_damage(self):
if self.weapon_type != "Charge Blade":
return
if self.weapon.phial == "Impact":
fn = cb_impact_phial_damage
else:
fn = cb_element_phial_damage
for part in self.parts:
part_damage = self.damage_map[part]
hitbox = part_damage.hitbox
ehitbox = part_damage.ehitbox
for level in (0, 1, 2, 3, 5):
damage_tuple = fn(self.true_raw, self.eattack, self.sharpness,
self.affinity, hitbox, ehitbox, level,
shield_charged=True,
artillery_level=self.artillery_level)
self.cb_phial_damage[part][level] = damage_tuple
def uniform(self):
average = 0.0
for part, damage in self.damage_map.items():
average += damage.average()
return average / len(self.damage_map)
def weighted_raw(self):
"""
Average damage weighted by non-broken raw hitbox. For each part the
damage is averaged across broken vs non-broken, weighted by the
default of broken for 25% of the hits.
"""
average = 0.0
total_hitbox = 0.0
for part, damage in self.damage_map.items():
average += damage.average() * damage.hitbox
total_hitbox += damage.hitbox
if total_hitbox == 0:
return 0
return average / total_hitbox
def weighted_element(self):
"""
Average damage weighted by non-broken element hitbox.
"""
average = 0.0
total_ehitbox = 0.0
for part, damage in self.damage_map.items():
average += damage.average() * damage.ehitbox
total_ehitbox += damage.ehitbox
if total_ehitbox == 0:
return 0
return average / total_ehitbox
def weakpart_weighted_raw(self, weak_weight=WEAKPART_WEIGHT):
if len(self.parts) == 1:
other_weight = 0
weak_weight = 1
else:
other_weight = (1 - weak_weight) / (len(self.parts) - 1)
average = 0
for part, damage in self.damage_map.items():
if part == self.max_raw_part[0]:
weight = weak_weight
else:
weight = other_weight
average += damage.average() * weight
return average
def weakpart_weighted_element(self, weak_weight=WEAKPART_WEIGHT):
if len(self.parts) == 1:
other_weight = 0
weak_weight = 1
else:
other_weight = (1 - weak_weight) / (len(self.parts) - 1)
average = 0
for part, damage in self.damage_map.items():
if part == self.max_element_part[0]:
weight = weak_weight
else:
weight = other_weight
average += damage.average() * weight
return average
def break_weakpart_raw(self):
"""
Split evenly among break parts and weakest raw part.
"""
if not self.break_count:
return 0
average = 0.0
count = self.break_count + 1
for part, damage in self.damage_map.items():
if part == self.max_raw_part[0]:
average += damage.average()
if damage.is_breakable():
count -= 1
elif damage.is_breakable():
# for breaks, assume attack until broken, unless it's a
# weak part and covered above
average += damage.total
return average / count
def break_weakpart_element(self):
"""
Split evenly among break parts and weakest element part.
"""
if not self.break_count:
return 0
average = 0.0
count = self.break_count + 1
for part, damage in self.damage_map.items():
if part == self.max_element_part[0]:
# If weakpart is also a break, assume continue attacking
# even after broken
average += damage.average()
if damage.is_breakable():
count -= 1
elif damage.is_breakable():
# for breaks that aren't the weakpart, assume attack until
# broken and then go back to weakpart
average += damage.total
return average / count
def break_only(self):
"""
Split evenly among break parts. If there are breaks that are weak
to element but not to raw or vice versa, this will represent that
when comparing weapons.
"""
if not self.break_count:
return 0
average = 0.0
for part, damage in self.damage_map.items():
if damage.is_breakable():
# attack until broken, then move to next break
average += damage.total
return average / self.break_count
def compare_break_even(self, other_wd, motion=None):
"""
Compare with another weapon damage, to determine the break even
point of the raw/element hitbox. Assumes same element.
Returns (m, ratio), where ratio is the break even for the ratio of
raw hitbox to element hitbox. If m is 1, then self is better
when the ratio is larger (favors raw). If m is -1, then self is
better when the ratio is smaller (favors element).
"""
r1, e1 = self.nohitbox_damage(motion)
r2, e2 = other_wd.nohitbox_damage(motion)
#print r1, e1
#print r2, e2
rdiff = r1 - r2
ediff = e2 - e1
m = 1
if rdiff < 0:
m = -1
return m, (float(ediff) / rdiff)
def get_raw_element_ratios(self):
# TODO: better MIXED handling
if self.damage_type in (WeaponType.CUT, WeaponType.MIXED):
raw_type = "cut"
else:
raw_type = "impact"
hitboxes = []
for row in self.monster_damage._rows:
part = row["body_part"]
hitbox = int(row[raw_type])
if self.etype:
ehitbox = int(row[str(self.etype.lower())])
else:
ehitbox = 0
if ehitbox > 0:
ratio = float(hitbox) / ehitbox
else:
ratio = 0
hitboxes.append((part, hitbox, ehitbox, ratio))
return hitboxes
def nohitbox_damage(self, motion=None):
"""
Note: uses first element only, so not good for dual element DB.
"""
if motion is None:
motion = self.motion
raw = raw_damage_nohitbox(self.true_raw, self.sharpness,
self.affinity, motion)
element = element_damage_nohitbox(self.eattack, self.sharpness)
return (raw, element)
def __getitem__(self, key):
return self.damage_map[key]
def keys(self):
return self.parts
class PartDamageState(object):
def __init__(self, raw, element, hitbox, ehitbox, state=None):
self.raw = raw
self.element = element
self.hitbox = hitbox
self.ehitbox = ehitbox
self.state = state
class PartDamage(object):
"""
Class to represent the damage done to a single hitzone on a monster,
default state and alternate state (broken, enraged, etc).
"""
def __init__(self):
self.states = dict()
self.part = None
self.breakable = False
@property
def raw(self):
return self.states[None].raw
@property
def element(self):
return self.states[None].element
@property
def hitbox(self):
return self.states[None].hitbox
@property
def ehitbox(self):
return self.states[None].ehitbox
@property
def break_raw(self):
if "Break Part" in self.states:
return self.states["Break Part"].raw
else:
return self.raw
@property
def break_element(self):
if "Break Part" in self.states:
return self.states["Break Part"].element
else:
return self.element
@property
def rage_raw(self):
if "Enraged" in self.states:
return self.states["Enraged"].raw
else:
return self.raw
@property
def rage_element(self):
if "Enraged" in self.states:
return self.states["Enraged"].element
else:
return self.element
@property
def total(self):
return self.raw + self.element
@property
def total_break(self):
return self.break_raw + self.break_element
@property
def total_rage(self):
return self.rage_raw + self.rage_element
def break_diff(self):
return self.total_break - self.total
def rage_diff(self):
return self.total_rage - self.total
def is_breakable(self):
# If the part has a hitbox with different damage in the break
# rows from the db, or if it's explicitly marked as breakable
# (done by checking hunt rewards for breaks).
return self.break_diff() > 0 or self.breakable
def average(self, break_weight=0.25, rage_weight=0.5):
if self.break_diff():
avg = self.average_break(break_weight)
if self.rage_diff():
return (self.average_rage(rage_weight) + avg) / 2.0
return avg
else:
return self.average_rage(rage_weight)
def average_break(self, break_weight=0.25):
return (self.total_break * break_weight
+ self.total * (1 - break_weight))
def average_rage(self, rage_weight=0.5):
return (self.total_rage * rage_weight
+ self.total * (1 - rage_weight))
def set_damage(self, raw, element, hitbox, ehitbox, state=None):
if state == "Without Hide":
state = "Break Part"
self.states[state] = PartDamageState(raw, element,
hitbox, ehitbox, state)
def element_attack_up(value):
return value * 1.1
def element_x_attack_up(value, level=1):
value = value * (1 + .05 * level)
if level == 1:
value += 40
elif level == 2:
value += 60
elif level == 3:
value += 90
else:
raise ValueError("level must be 1, 2, or 3")
def cb_impact_phial_damage(true_raw, element, sharpness, affinity,
monster_hitbox, monster_ehitbox,
burst_level, artillery_level=0,
shield_charged=False):
"""
@burst_level: 0 for shield thrust, 1 for side chop, 2 for double swing,
3 for AED, 5 for super AED w/ 5 phials
@artillery_level: 1 for Novice, 2 for God or Novice + Felyne Bombardier
See
https://www.reddit.com/r/MonsterHunter/comments/391a5i/mh4u_charge_blade_phial_damage/
Note this contradicts data from the other link, but this is more recent.
"""
motions = _cb_get_motions(burst_level, shield_charged)
if burst_level == 5:
multiplier = 0.33
elif burst_level == 3:
multiplier = 0.1
else:
multiplier = 0.05
if artillery_level == 1:
multiplier *= 1.3
elif artillery_level == 2:
multiplier *= 1.4
elif artillery_level != 0:
raise ValueError("artillery_level must be 0, 1 (Novice), or 2 (God)")
if shield_charged and burst_level != 5:
multiplier *= 1.3
if shield_charged and burst_level == 0:
# Shield Thrust gets one blast if shield is charged
burst_level = 1
# burst damage is fixed, doesn't depend on monster hitbox
burst_dmg = true_raw * multiplier * burst_level
raw_dmg = sum([raw_damage(true_raw, sharpness, affinity, monster_hitbox,
motion)
for motion in motions])
ele_dmg = (element_damage(element, sharpness, monster_ehitbox)
* len(motions))
return (raw_dmg, ele_dmg, burst_dmg)
def cb_element_phial_damage(true_raw, element, sharpness, affinity,
monster_hitbox, monster_ehitbox,
burst_level, artillery_level=0,
shield_charged=False):
motions = _cb_get_motions(burst_level, shield_charged)
if burst_level == 5:
multiplier = 4.5 * 3
elif burst_level == 3:
multiplier = 4.5
else:
multiplier = 3
if shield_charged and burst_level != 5:
multiplier *= 1.35
if shield_charged and burst_level == 0:
# Shield Thrust gets one blast if shield is charged
burst_level = 1
burst_dmg = (element / 10.0 * multiplier * burst_level
* monster_ehitbox / 100.0)
raw_dmg = sum([raw_damage(true_raw, sharpness, affinity, monster_hitbox,
motion)
for motion in motions])
ele_dmg = (element_damage(element, sharpness, monster_ehitbox)
* len(motions))
return (raw_dmg, ele_dmg, burst_dmg)
def _cb_get_motions(burst_level, shield_charged):
# See https://www.reddit.com/r/MonsterHunter/comments/2ue8qw/charge_blade_attack_motion_values/
if burst_level == 0:
# Shield Thrust
motions = [8, 12]
elif burst_level == 1:
# Burst Side Chop
motions = [31] if shield_charged else [26]
elif burst_level == 2:
# Double Side Swing
motions = [21, 96] if shield_charged else [18, 80]
elif burst_level == 3:
# AED or Super Burst
motions = [108] if shield_charged else [90]
elif burst_level == 5:
# super AED or Ultra Burst, 5 phials filled
# Note: w/o phials it's [17, 90], but that is very rarely used
motions = [25, 99, 100]
else:
raise ValueError("burst_level must be 0, 1, 2, 3, or 5 (Super AED)")
return motions
| bd4/monster-hunter-scripts | mhapi/damage.py | Python | mit | 27,751 | [
"BLAST"
] | eaadfa32d2fe995aaa02752aea9e1ea92abb9008cfe70fc046bd8ea974a13713 |
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from __future__ import division, print_function, absolute_import
import itertools
import warnings
import numpy as np
from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_approx_equal,
assert_, dec, TestCase, run_module_suite, assert_allclose,
assert_raises, assert_array_almost_equal_nulp)
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipk, zeta
from scipy.special._testutils import assert_tol_equal, with_special_errors, \
assert_func_equal
from scipy._lib._version import NumpyVersion
import math
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes._gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
p = cephes.chndtr(np.linspace(20, 25, 5), 2, 1.07458615e+02)
assert_allclose(p, [1.21805009e-09, 2.81979982e-09, 6.25652736e-09,
1.33520017e-08, 2.74909967e-08],
rtol=1e-6, atol=0)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
assert_equal(cephes.expm1(np.inf), np.inf)
assert_equal(cephes.expm1(-np.inf), -1)
assert_equal(cephes.expm1(np.nan), np.nan)
# Earlier numpy version don't guarantee that npy_cexp conforms to C99.
@dec.skipif(NumpyVersion(np.__version__) < '1.9.0')
def test_expm1_complex(self):
expm1 = cephes.expm1
assert_equal(expm1(0 + 0j), 0 + 0j)
assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
@dec.knownfailureif(True, 'The real part of expm1(z) bad at these points')
def test_expm1_complex_hard(self):
# The real part of this function is difficult to evaluate when
# z.real = -log(cos(z.imag)).
y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
x = -np.log(np.cos(y))
z = x + 1j*y
# evaluate using mpmath.expm1 with dps=1000
expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
2.4289354732893695e-18+0.20271003550867248j,
4.5235500262585768e-17+0.30933624960962319j,
7.8234305217489006e-17-3.3805150062465863j,
-1.3685191953697676e-16-225.95084645419513j,
8.7175620481291045e-17+2.2371609442247422j])
found = cephes.expm1(z)
# this passes.
assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
# this fails.
assert_array_almost_equal_nulp(found.real, expected.real, 20)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
# cephes.fdtri(1,1,0.5) #BUG: gives NaN, should be 1
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes._gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtr_inf(self):
assert_equal(cephes.gdtr(1,1,np.inf),1.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
log1p = cephes.log1p
assert_equal(log1p(0), 0.0)
assert_equal(log1p(-1), -np.inf)
assert_equal(log1p(-2), np.nan)
assert_equal(log1p(np.inf), np.inf)
# earlier numpy version don't guarantee that npy_clog conforms to C99
@dec.skipif(NumpyVersion(np.__version__) < '1.9.0')
def test_log1p_complex(self):
log1p = cephes.log1p
c = complex
assert_equal(log1p(0 + 0j), 0 + 0j)
assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0.0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
assert_allclose(zeta(2,2), pi**2/6 - 1, rtol=1e-12)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_zeta_1arg(self):
assert_allclose(zeta(2), pi**2/6, rtol=1e-12)
assert_allclose(zeta(4), pi**4/90, rtol=1e-12)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry(TestCase):
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics(TestCase):
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert_equal(special.comb(100, 50, exact=True), expected)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
assert_equal(special.comb(np_n, np_k, exact=True),
special.comb(n, k, exact=True))
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, 1.0259330100195334 * np.ones_like(f), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 5.1296650500976675 * np.ones_like(f1), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, 0.84442884574781019 * np.ones_like(f), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, 3.3471442287390509 * np.ones_like(f1), 4)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
old_errors = np.seterr(all='ignore')
try:
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
finally:
np.seterr(**old_errors)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erfcinv(self):
i = special.erfcinv(1)
# Use assert_array_equal instead of assert_equal, so the comparsion
# of -0.0 and 0.0 doesn't fail.
assert_array_equal(i, 0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
a = special.errprint()
b = 1-a # a is the state 1-a inverts state
c = special.errprint(b) # returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) # returns to original state
assert_equal(d,b) # makes sure state was returned
# assert_equal(d,1-a)
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_allclose(eu0, [1], rtol=1e-15)
assert_allclose(eu1, [1, 0], rtol=1e-15)
assert_allclose(eu2, [1, 0, -1], rtol=1e-15)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions(TestCase):
def test_factorial(self):
# Some known values, float math
assert_array_almost_equal(special.factorial(0), 1)
assert_array_almost_equal(special.factorial(1), 1)
assert_array_almost_equal(special.factorial(2), 2)
assert_array_almost_equal([6., 24., 120.],
special.factorial([3, 4, 5], exact=False))
assert_array_almost_equal(special.factorial([[5, 3], [4, 3]]),
[[120, 6], [24, 6]])
# Some known values, integer math
assert_equal(special.factorial(0, exact=True), 1)
assert_equal(special.factorial(1, exact=True), 1)
assert_equal(special.factorial(2, exact=True), 2)
assert_equal(special.factorial(5, exact=True), 120)
assert_equal(special.factorial(15, exact=True), 1307674368000)
# ndarray shape is maintained
assert_equal(special.factorial([7, 4, 15, 10], exact=True),
[5040, 24, 1307674368000, 3628800])
assert_equal(special.factorial([[5, 3], [4, 3]], True),
[[120, 6], [24, 6]])
# object arrays
assert_equal(special.factorial(np.arange(-3, 22), True),
special.factorial(np.arange(-3, 22), False))
# int64 array
assert_equal(special.factorial(np.arange(-3, 15), True),
special.factorial(np.arange(-3, 15), False))
# int32 array
assert_equal(special.factorial(np.arange(-3, 5), True),
special.factorial(np.arange(-3, 5), False))
# Consistent output for n < 0
for exact in (True, False):
assert_array_equal(0, special.factorial(-3, exact))
assert_array_equal([1, 2, 0, 0],
special.factorial([1, 2, -5, -4], exact))
for n in range(0, 22):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])
assert_allclose(float(correct), special.factorial(n, False))
assert_allclose(float(correct), special.factorial([n], False)[0])
# Compare exact=True vs False, scalar vs array
assert_array_equal(special.factorial(n, True),
special.factorial(n, False))
assert_array_equal(special.factorial([n], True),
special.factorial([n], False))
def test_factorial2(self):
assert_array_almost_equal([105., 384., 945.],
special.factorial2([7, 8, 9], exact=False))
assert_equal(special.factorial2(7, exact=True), 105)
def test_factorialk(self):
assert_equal(special.factorialk(5, 1, exact=True), 120)
assert_equal(special.factorialk(5, 3, exact=True), 10)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
def test_fresnel_inf1(self):
frs = special.fresnel(np.inf)
assert_equal(frs, (0.5, 0.5))
def test_fresnel_inf2(self):
frs = special.fresnel(-np.inf)
assert_equal(frs, (-0.5, -0.5))
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincnan(self):
gama = special.gammainc(-1,1)
assert_(isnan(gama))
def test_gammainczero(self):
# bad arg but zero integration limit
gama = special.gammainc(-1,0)
assert_equal(gama,0.0)
def test_gammaincinf(self):
gama = special.gammainc(0.5, np.inf)
assert_equal(gama,1.0)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccnan(self):
gama = special.gammaincc(-1,1)
assert_(isnan(gama))
def test_gammainccinf(self):
gama = special.gammaincc(0.5,np.inf)
assert_equal(gama,0.0)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp0f1_gh5764(self):
# Just checks the point that failed; there's a more systematic
# test in test_mpmath
res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
# The expected value was generated using mpmath
assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f1_gh2282(self):
hyp = special.hyp1f1(0.5, 1.5, -1000)
assert_almost_equal(hyp, 0.028024956081989643, 12)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
old_err = np.seterr(all='ignore')
try:
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
finally:
np.seterr(**old_err)
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_in_kn_order0(self):
x = 1.
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
sph_i0 = special.sph_in(0, x)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = special.sph_kn(0, x)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
sph_i0k0 = special.sph_inkn(0, x)
assert_array_almost_equal(r_[sph_i0+sph_k0],
r_[sph_i0k0],
10)
def test_sph_jn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sy3 = special.sph_yn(1,.2)[1][1]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 # correct derivative value
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_ch2_inf():
assert_equal(special.chdtr(0.7,np.inf), 1.0)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
assert_allclose(special.agm(24, 6), 13.4581714817)
assert_allclose(special.agm(1e30, 1), 2.2292230559453832047768593e28)
def test_legacy():
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
# Legacy behavior: truncating arguments to integers
assert_equal(special.bdtrc(1, 2, 0.3), special.bdtrc(1.8, 2.8, 0.3))
assert_equal(special.bdtr(1, 2, 0.3), special.bdtr(1.8, 2.8, 0.3))
assert_equal(special.bdtri(1, 2, 0.3), special.bdtri(1.8, 2.8, 0.3))
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.hyp2f0(1, 2, 0.3, 1), special.hyp2f0(1, 2, 0.3, 1.8))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtrc(1, 0.3), special.pdtrc(1.8, 0.3))
assert_equal(special.pdtr(1, 0.3), special.pdtr(1.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
if __name__ == "__main__":
run_module_suite()
| asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/scipy/special/tests/test_basic.py | Python | mit | 132,346 | [
"Elk"
] | 299245f565438e2de3860b54109c17224f18d9582c07c82cfbc82cf19c78f084 |
#! /usr/bin/env python
"""
Launches a ParaView visualization of a simulation.
User provides saved ParaView state file and output directory.
Usage:
.. code-block:: bash
visualize-output outputs my_visu_state.pvsm
Opens paraview visualization for state ``my_visu_state.pvsm`` where all pvd
files are read from ``outputs`` directory.
.. code-block:: bash
visualize-output -r outputs my_visu_state.pvsm
As above but first regenerates all ``*.pvd`` files that contain ``*.vtu`` files
for time indices 0..100. Useful in cases where a shorter pvd file has been
created by another simulation run.
.. code-block:: bash
visualize-output -r -f 20 -l 200 outputs my_visu_state.pvsm
As above but generates ``*.pvd`` files for time indices 20..200.
.. code-block:: bash
visualize-output -r outputs my_visu_state.pvsm
As above but first generates all ``*.pvd`` for a parallel run, i.e. it lists
``*.pvtu`` files instead of ``*.vtu`` files.
"""
import argparse
import glob
import os
import subprocess
import tempfile
TMP_DIR = tempfile.gettempdir()
def generate_pvd_file(outdir, fieldname, timesteps, usepvtu=False):
"""
Generates ParaView PVD XML file fieldName.pvd that contains vtu or ptvu files for
the given time steps range.
:arg str outdir: directory where pvd files are stored
:arg str fieldname: name of the field that appears in vtu/pvtu file names
:arg timesteps: list of time indices of vtu files to include in the pvd file
:type timesteps: list of int
"""
template_header = """<?xml version="1.0" ?>\n"""
template_openblock = """<VTKFile type="Collection" version="0.1" byte_order="LittleEndian">\n<Collection>\n"""
template_closeblock = """</Collection>\n</VTKFile>\n"""
template_entry = """<DataSet timestep="{i}" file="{name}_{i}.{ext}" />"""
extension = 'pvtu' if usepvtu else 'vtu'
content = template_header
content += template_openblock
for i in timesteps:
content += template_entry.format(i=i, name=fieldname, ext=extension)
content += template_closeblock
filename = os.path.join(outdir, fieldname+'.pvd')
print('generating {:}'.format(filename))
f = open(filename, 'w')
f.write(content)
f.close()
def replace_path_in_xml(filename, outputfile, new_path):
"""
Replaces all paths in paraview xml file PVDReader entries.
:arg str filename: XML file to process
:arg str outputfile: file where updated XML file is saved
:arg new_path: a new path for all pvd files
All PVDReader entries of the form
<Proxy group="sources" type="PVDReader" ...>
<Property name="FileName" ...>
<Element value="some/path/to/a_file.pvd" .../>
...
</Property>
...
</Proxy>
will be reaplaced by
<Proxy group="sources" type="PVDReader" ...>
<Property name="FileName" ...>
<Element value="new_path/a_file.pvd" .../>
...
</Property>
...
</Proxy>
"""
import xml.etree.ElementTree as ET
tree = ET.parse(filename)
root = tree.getroot()
readers = root[0].findall("Proxy[@type='PVDReader']")
for reader in readers:
fnameprop = reader.findall("Property[@name='FileName']/Element")[0]
old_fname = fnameprop.attrib['value']
path, file = os.path.split(old_fname)
field, ext = os.path.splitext(file)
new_fname = os.path.join(new_path, field, file)
fnameprop.attrib['value'] = new_fname
tree.write(outputfile)
def process_args(outputdir, state_file, regenerate_pvd=True, timesteps=None,
parallel_vtu=True):
"""
Processes command line arguments
"""
temp_state_file = os.path.join(TMP_DIR, 'tmp.pvsm')
paraview_bin = 'paraview'
pv_log_file = os.path.join(TMP_DIR, 'log_pvoutput.txt')
static_pvd_files = ['bath'] # outputs that are not time dependent
# regenerate all existing PVD files
if regenerate_pvd:
pvd_files = glob.glob(os.path.join(outputdir, '*/*.pvd'))
for f in pvd_files:
path, fname = os.path.split(f)
fieldName, extension = os.path.splitext(fname)
if fieldName not in static_pvd_files:
generate_pvd_file(path, fieldName, timesteps, usepvtu=parallel_vtu)
# read state file, replace directory with new one
replace_path_in_xml(state_file, temp_state_file, outputdir)
# lauch paraview with new independent thread
log_file = open(pv_log_file, 'w')
cmd = ' '.join([paraview_bin, '--state={:}'.format(temp_state_file), '>', pv_log_file])
subprocess.Popen(cmd, shell=True, stdout=log_file, stderr=subprocess.STDOUT)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launch ParaView visualization',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('outputdir', type=str,
help='Directory where .pvd files are stored')
parser.add_argument('statefile', type=str,
help='ParaView *.pvsm state file')
parser.add_argument('-r', action='store_true', dest='regenerate_pvd',
help='regenerate PVD files')
parser.add_argument('-p', action='store_true', dest='parallel_vtu',
help='regenerate PVD files for parallel outputs')
parser.add_argument('-f', '--first-time-step', type=int, default=0,
help='first time step to be included in regenerated PVD file')
parser.add_argument('-l', '--last-time-step', type=int, default=100,
help='last time step to be included in regenerated PVD file')
args = parser.parse_args()
timesteps = range(args.first_time_step, args.last_time_step + 1)
process_args(args.outputdir, args.statefile, regenerate_pvd=args.regenerate_pvd,
timesteps=timesteps, parallel_vtu=args.parallel_vtu)
| tkarna/cofs | scripts/visualize-output.py | Python | mit | 5,929 | [
"ParaView"
] | 91d79ca63e69ddf8187a1107fe093ca4d35d811fd3a8612093d6d81b016df544 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""INSPIRE authors holdingpen views."""
from __future__ import absolute_import, division, print_function
import copy
import os
import re
import requests
from flask import (
abort,
Blueprint,
current_app,
jsonify,
render_template,
request,
url_for,
redirect,
)
from flask_babelex import gettext as _
from flask_breadcrumbs import register_breadcrumb
from flask_login import login_required, current_user
from werkzeug.datastructures import MultiDict
from invenio_db import db
from invenio_workflows import workflow_object_class, start, resume
from inspirehep.dojson.utils import strip_empty_values
from inspirehep.modules.forms.form import DataExporter
from ..forms import AuthorUpdateForm
from ..tasks import formdata_to_model
blueprint = Blueprint(
'inspirehep_authors_holdingpen',
__name__,
url_prefix='/submit/author',
template_folder='../templates',
static_folder='../static',
)
def convert_for_form(data):
"""
Convert author data model form field names.
FIXME This might be better in a different file and as a dojson conversion
"""
if "name" in data:
data["full_name"] = data["name"].get("value")
try:
data["given_names"] = data["name"].get(
"value").split(",")[1].strip()
except IndexError:
data["given_names"] = ""
data["family_name"] = data["name"].get("value").split(",")[0].strip()
data["display_name"] = data["name"].get("preferred_name")
data["status"] = data["name"].get("status", "").lower()
if "urls" in data:
data["websites"] = []
for url in data["urls"]:
if "description" not in url:
data["websites"].append({"webpage": url["value"]})
else:
if url["description"].lower() == "twitter":
data["twitter_url"] = url["value"]
elif url["description"].lower() == "blog":
data["blog_url"] = url["value"]
elif url["description"].lower() == "linkedin":
data["linkedin_url"] = url["value"]
del data["urls"]
if "field_categories" in data:
data["research_field"] = data['field_categories']
if "positions" in data:
data["institution_history"] = []
for position in data["positions"]:
if not any(
[
key in position for key in ('name', 'rank',
'start_year', 'end_year')
]
):
if 'email' in position:
# Only email available, take as public_email
data["public_email"] = position.get("email")
continue
pos = {}
pos["name"] = position.get("institution", {}).get("name")
pos["rank"] = position.get("rank", "")
pos["start_year"] = position.get("start_date", "")
pos["end_year"] = position.get("end_date", "")
pos["current"] = True if position.get("status") else False
pos["old_email"] = position.get("old_email", "")
if position.get("email"):
pos["email"] = position.get("email", "")
if not data.get("public_email"):
data["public_email"] = position.get("email")
data["institution_history"].append(pos)
data["institution_history"].reverse()
if 'advisors' in data:
advisors = data['advisors']
data['advisors'] = []
for advisor in advisors:
adv = {}
adv["name"] = advisor.get("name", "")
adv["degree_type"] = advisor.get("degree_type", "")
data["advisors"].append(adv)
if "ids" in data:
for id in data["ids"]:
try:
if id["type"] == "ORCID":
data["orcid"] = id["value"]
elif id["type"] == "BAI":
data["bai"] = id["value"]
elif id["type"] == "INSPIRE":
data["inspireid"] = id["value"]
except KeyError:
# Protect against cases when there is no value in metadata
pass
def get_inspire_url(data):
""" Generate url for the user to go back to INSPIRE. """
url = ""
if "bai" in data and data["bai"]:
url = "http://inspirehep.net/author/profile/" + data["bai"]
elif "recid" in data and data["recid"]:
url = "http://inspirehep.net/record/" + str(data["recid"])
else:
url = "http://inspirehep.net/hepnames"
return url
@blueprint.route('/validate', methods=['POST'])
def validate():
"""Validate form and return validation errors.
FIXME: move to forms module as a generic /validate where we can pass
the for class to validate.
"""
if request.method != 'POST':
abort(400)
is_update = True if request.args.get('is_update') == 'True' else False
data = request.json or MultiDict({})
formdata = MultiDict(data or {})
form = AuthorUpdateForm(formdata=formdata, is_update=is_update)
form.validate()
result = {}
changed_msgs = dict(
(name, messages) for name, messages in form.messages.items()
if name in formdata.keys()
)
result['messages'] = changed_msgs
return jsonify(result)
@blueprint.route('/create', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.new', _('New author information'))
@login_required
def new():
"""View for INSPIRE author new form."""
data = {}
bai = request.values.get('bai', u"", type=unicode)
if bai:
# Add BAI information to form in order to keep connection between
# a HEPName and an author profile.
data["bai"] = bai
form = AuthorUpdateForm(data=data)
ctx = {
"action": url_for('.submitnew'),
"name": "authorUpdateForm",
"id": "authorUpdateForm",
}
return render_template('authors/forms/new_form.html', form=form, **ctx)
@blueprint.route('/update', methods=['GET', 'POST'])
@register_breadcrumb(blueprint, '.update', _('Update author information'))
@login_required
def update():
"""View for INSPIRE author update form."""
from dojson.contrib.marc21.utils import create_record
from inspirehep.dojson.hepnames import hepnames
recid = request.values.get('recid', 0, type=int)
data = {}
if recid:
try:
url = os.path.join(
current_app.config["AUTHORS_UPDATE_BASE_URL"],
"record", str(recid), "export", "xm")
xml = requests.get(url)
record_regex = re.compile(
r"\<record\>.*\<\/record\>", re.MULTILINE + re.DOTALL)
xml_content = record_regex.search(xml.content).group()
data = strip_empty_values(
hepnames.do(create_record(xml_content))) # .encode("utf-8")
convert_for_form(data)
except requests.exceptions.RequestException:
pass
data["recid"] = recid
else:
return redirect(url_for("inspirehep_authors_holdingpen.new"))
form = AuthorUpdateForm(data=data, is_update=True)
ctx = {
"action": url_for('.submitupdate'),
"name": "authorUpdateForm",
"id": "authorUpdateForm",
}
# FIXME create template in authors module
return render_template('authors/forms/update_form.html', form=form, **ctx)
@blueprint.route('/submitupdate', methods=['POST'])
@login_required
def submitupdate():
"""Form action handler for INSPIRE author update form."""
form = AuthorUpdateForm(formdata=request.form, is_update=True)
visitor = DataExporter()
visitor.visit(form)
workflow_object = workflow_object_class.create(
data={},
id_user=current_user.get_id(),
data_type="authors"
)
workflow_object.extra_data['formdata'] = copy.deepcopy(visitor.data)
workflow_object.extra_data['is-update'] = True
workflow_object.data = formdata_to_model(workflow_object, visitor.data)
workflow_object.save()
db.session.commit()
# Start workflow. delay will execute the workflow in the background
start.delay("author", object_id=workflow_object.id)
ctx = {
"inspire_url": get_inspire_url(visitor.data)
}
return render_template('authors/forms/update_success.html', **ctx)
@blueprint.route('/submitnew', methods=['POST'])
@login_required
def submitnew():
"""Form action handler for INSPIRE author new form."""
form = AuthorUpdateForm(formdata=request.form)
visitor = DataExporter()
visitor.visit(form)
workflow_object = workflow_object_class.create(
data={},
id_user=current_user.get_id(),
data_type="authors"
)
workflow_object.extra_data['formdata'] = copy.deepcopy(visitor.data)
workflow_object.data = formdata_to_model(workflow_object, visitor.data)
workflow_object.save()
db.session.commit()
# Start workflow. delayed=True will execute the workflow in the
# background using, for example, Celery.
start.delay("author", object_id=workflow_object.id)
ctx = {
"inspire_url": get_inspire_url(visitor.data)
}
return render_template('authors/forms/new_success.html', **ctx)
@blueprint.route('/newreview', methods=['GET', 'POST'])
@login_required
# @permission_required(viewauthorreview.name)
def newreview():
"""View for INSPIRE author new form review by a cataloger."""
objectid = request.values.get('objectid', 0, type=int)
if not objectid:
abort(400)
workflow_object = workflow_object_class.get(objectid)
form = AuthorUpdateForm(
data=workflow_object.extra_data["formdata"], is_review=True)
ctx = {
"action": url_for('.reviewhandler', objectid=objectid),
"name": "authorUpdateForm",
"id": "authorUpdateForm",
"objectid": objectid
}
return render_template('authors/forms/review_form.html', form=form, **ctx)
@blueprint.route('/reviewhandler', methods=['POST'])
@login_required
# @permission_required(viewauthorreview.name)
def reviewhandler():
"""Form handler when a cataloger accepts an author review."""
objectid = request.values.get('objectid', 0, type=int)
if not objectid:
abort(400)
form = AuthorUpdateForm(formdata=request.form)
visitor = DataExporter()
visitor.visit(form)
workflow_object = workflow_object_class.get(objectid)
workflow_object.extra_data["approved"] = True
workflow_object.extra_data["ticket"] = request.form.get('ticket') == "True"
workflow_object.extra_data['formdata'] = visitor.data
workflow_object.data = formdata_to_model(workflow_object, visitor.data)
workflow_object.save()
db.session.commit()
resume.delay(workflow_object.id)
return render_template('authors/forms/new_review_accepted.html',
approved=True)
@blueprint.route('/holdingpenreview', methods=['GET', 'POST'])
@login_required
# @permission_required(viewauthorreview.name)
def holdingpenreview():
"""Handler for approval or rejection of new authors in Holding Pen."""
objectid = request.values.get('objectid', 0, type=int)
approved = request.values.get('approved', False, type=bool)
ticket = request.values.get('ticket', False, type=bool)
if not objectid:
abort(400)
workflow_object = workflow_object_class.get(objectid)
workflow_object.extra_data["approved"] = approved
workflow_object.extra_data["ticket"] = ticket
workflow_object.save()
db.session.commit()
resume.delay(workflow_object.id)
return render_template('authors/forms/new_review_accepted.html',
approved=approved)
| jacenkow/inspire-next | inspirehep/modules/authors/views/holdingpen.py | Python | gpl-2.0 | 12,677 | [
"VisIt"
] | b734c35f83d6358a6d796d98db9e78edc7f4042cfb339c9963feeba7c695e70c |
from ...pages.studio.auto_auth import AutoAuthPage
from ...fixtures.course import CourseFixture
from ..helpers import UniqueCourseTest
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.utils import verify_ordering
class StudioCourseTest(UniqueCourseTest):
"""
Base class for all Studio course tests.
"""
def setUp(self, is_staff=False):
"""
Install a course with no content using a fixture.
"""
super(StudioCourseTest, self).setUp()
self.course_fixture = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture)
self.course_fixture.install()
self.user = self.course_fixture.user
self.log_in(self.user, is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the course. The user will be given instructor access
to the course and enrolled in it. By default the user will not have staff access unless
is_staff is passed as True.
"""
self.auth_page = AutoAuthPage(
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
self.auth_page.visit()
class ContainerBase(StudioCourseTest):
"""
Base class for tests that do operations on the container page.
"""
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(ContainerBase, self).setUp()
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def go_to_nested_container_page(self):
"""
Go to the nested container page.
"""
unit = self.go_to_unit_page()
# The 0th entry is the unit page itself.
container = unit.xblocks[1].go_to_container()
return container
def go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'):
"""
Go to the test unit page.
If make_draft is true, the unit page will be put into draft mode.
"""
self.outline.visit()
subsection = self.outline.section(section_name).subsection(subsection_name)
return subsection.toggle_expand().unit(unit_name).go_to()
def do_action_and_verify(self, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
container = self.go_to_nested_container_page()
action(container)
verify_ordering(self, container, expected_ordering)
# Reload the page to see that the change was persisted.
container = self.go_to_nested_container_page()
verify_ordering(self, container, expected_ordering)
| dsajkl/reqiop | common/test/acceptance/tests/studio/base_studio_test.py | Python | agpl-3.0 | 3,318 | [
"VisIt"
] | 4bbc21c91f52a43bbcc58abbc23311e55728633151d204eb8603abab803be343 |
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.views import get_course_programs_for_dashboard
from openedx.core.djangoapps.programs.utils import is_student_dashboard_programs_enabled
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
# The course selection work is done in courseware.courses.
domain = settings.FEATURES.get('FORCE_UNIVERSITY_DOMAIN') # normally False
# do explicit check, because domain=None is valid
if domain is False:
domain = request.META.get('HTTP_HOST')
courses = get_courses(user, domain=domain)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): for use in Microsites. If not None, ONLY courses
of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# If we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, if we are not in a Microsite, then filter out any enrollments
# with courses attributed (by ORG) to Microsites.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
}
default_status = 'processing'
default_info = {'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
certificate_url = get_certificate_url(
user_id=user.id,
course_id=unicode(course_overview.id),
)
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': u'{url}'.format(url=certificate_url)
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in microsites/White Labels
if linkedin_config.enabled and not microsite.is_request_in_microsite():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not getattr(redeemed_registration.invoice_item.invoice, 'is_valid'):
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key
)
track.views.server_track(request, "change-email1-settings", {"receive_emails": "no", "course": course_key.to_deprecated_string()}, page='dashboard')
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# get the programs associated with courses being displayed.
# pass this along in template context in order to render additional
# program-related information on the dashboard view.
course_programs = {}
if is_student_dashboard_programs_enabled():
course_programs = _get_course_programs(user, [enrollment.course_id for enrollment in course_enrollments])
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'course_programs': course_programs,
}
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return donations_enabled and enrollment.mode in course_modes[course_id] and course_modes[course_id][enrollment.mode].min_price == 0
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": None,
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (honor)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "honor".
try:
CourseEnrollment.enroll(user, course_id, check_access=check_access)
except Exception:
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u'Login failed - user with username {username} has no social auth with backend_name {backend_name}'.format(
username=username, backend_name=backend_name))
return HttpResponse(
_("You've successfully logged into your {provider_name} account, but this account isn't linked with an {platform_name} account yet.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard.").format(
platform_name=platform_name, provider_name=requested_provider.name
)
+ "<br/><br/>" +
_("If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page.").format(
platform_name=platform_name),
content_type="text/plain",
status=403
)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
"value": _('There was an error receiving your login information. Please email us.'), # TODO: User error message
}) # TODO: this should be status code 400 # pylint: disable=fixme
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('This account has been temporarily locked due to excessive login failures. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(user.id, {
'email': email,
'username': username
})
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
delete_logged_in_cookies(response)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.all()[0].changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
if not form.is_valid():
raise ValidationError(form.errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
tos_required = (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception:
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
'age': profile.age,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except AccountValidationError:
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response
if request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
student = User.objects.filter(id=regs[0].user_id)
if student:
ceas = CourseEnrollmentAllowed.objects.filter(email=student[0].email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student[0], cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student[0].email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student[0].email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)}
# Support old password reset URLs that used base36 encoded user IDs.
# https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "yes", "course": course_id}, page='dashboard')
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id
)
track.views.server_track(request, "change-email-settings", {"receive_emails": "no", "course": course_id}, page='dashboard')
return JsonResponse({"success": True})
def _get_course_programs(user, user_enrolled_courses): # pylint: disable=invalid-name
""" Returns a dictionary of programs courses data require for the student
dashboard.
Given a user and an iterable of course keys, find all
the programs relevant to the user and return them in a
dictionary keyed by the course_key.
Arguments:
user (user object): Currently logged-in User
user_enrolled_courses (list): List of course keys in which user is
enrolled
Returns:
Dictionary response containing programs or {}
"""
course_programs = get_course_programs_for_dashboard(user, user_enrolled_courses)
programs_data = {}
for course_key, program in course_programs.viewitems():
if program.get('status') == 'active' and program.get('category') == 'xseries':
try:
programs_data[course_key] = {
'course_count': len(program['course_codes']),
'display_name': program['name'],
'category': program.get('category'),
'program_marketing_url': urljoin(
settings.MKTG_URLS.get('ROOT'), 'xseries' + '/{}'
).format(program['marketing_slug']),
'display_category': 'XSeries'
}
except KeyError:
log.warning('Program structure is invalid, skipping display: %r', program)
return programs_data
| hamzehd/edx-platform | common/djangoapps/student/views.py | Python | agpl-3.0 | 95,774 | [
"VisIt"
] | 29f6eec365f49c5ade7921f5ef486a80addab36f6af4260d9d94c9102a5541aa |
from behave import *
# Unique to Scenario: User logs out
@when('I logout')
def impl(context):
context.browser.find_link_by_text('Logout').first.click()
@then('I am no longer authenticated')
def impl(context):
#Try to visit my profile page
context.browser.visit(context.config.server_url + '/accounts/profile/')
#But find that we're redirected to the login page
assert context.browser.url == 'http://localhost:8081/accounts/login/?next=/accounts/profile/'
| nlhkabu/connect | bdd/features/steps/logout.py | Python | bsd-3-clause | 479 | [
"VisIt"
] | bf38b6fd0e07fdbb9b74eb0dee3742752c5b24c2cf13701213f146eacc1c9206 |
#!/usr/bin/env python
def get_help_data_12576():
"""
Sensor Inventory help.
Data store of information to be presented when a help request is made for port 12576.
Returns a list of dictionaries associated with various requests supported on that port.
"""
help_data = \
[
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv',
'method': 'GET',
'permission_required': False,
'description': 'Get platforms (subsites). Returns a list of available subsites from the sensor inventory.',
'data_required': False,
'data_format': None,
'samples': [{
'sample_request': 'sensor/inv',
'sample_response': [ "CE01ISSM", "CE01ISSP", "CE02SHBP",
"CP01CNSM", "CP02PMCI", "CP02PMCO",
"GA01SUMO", "GI01SUMO", "GP02HYPM", "GP03FLMA",
"GS01SUMO", "RS01SBPS", "RS03CCAL", "RS03ECAL", "SSRSPACC" ]
}]
},
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv/{subsite}',
'method': 'GET',
'permission_required': False,
'description': 'Returns a list of nodes for a subsite from the sensor inventory.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'sensor/inv/CE01ISSM',
'sample_response': [ "MFC31", "MFD35", "MFD37", "RID16", "SBC11", "SBD17" ]
}]
},
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv/{subsite}/{node}',
'method': 'GET',
'permission_required': False,
'description': 'Get sensors for a subsite-node. Returns a list of sensors for a subsite and node from the sensor inventory.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'sensor/inv/CE01ISSM/MFD35',
'sample_response': [ "00-DCLENG000", "01-VEL3DD000", "02-PRESFA000", "04-ADCPTM000",
"05-PCO2WB000", "06-PHSEND000" ]
}]
},
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv/{subsite}/{node}/{sensor}',
'method': 'GET',
'permission_required': False,
'description': 'Get all instrument methods. Returns a list of stream methods for a sensor.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'sensor',
'type': 'str',
'description': 'The sensor portion of the reference designator.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'sensor/inv/CE01ISSM/MFC31/00-CPMENG000',
'sample_response': [ "telemetered" ]
}]
},
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv/{subsite}/{node}/{sensor}/metadata',
'method': 'GET',
'permission_required': False,
'description': 'Get instrument metadata. Returns a metadata dictionary with the parameters and times for a sensor.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'sensor',
'type': 'str',
'description': 'The sensor portion of the reference designator.',
'valid_values': None,
'default': None
}
]
},
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv/{subsite}/{node}/{sensor}/metadata/times',
'method': 'GET',
'permission_required': False,
'description': 'Get instrument metadata times. Returns a list of dictionaries, each containing the stream name, method and begin and end times for a sensor.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'sensor',
'type': 'str',
'description': 'The sensor portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'partition',
'type': 'bool',
'description': '[Optional] Provide additional time partition information.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'sensor/inv/CE01ISSM/MFC31/00-CPMENG000/metadata/times',
'sample_response': [ {
"stream" : "cg_cpm_eng_cpm",
"method" : "telemetered",
"count" : 1,
"endTime" : "2015-08-25T00:36:10.708Z",
"beginTime" : "2015-08-25T00:36:10.708Z"
} ]
},
{
'sample_request': 'sensor/inv/CE01ISSM/MFC31/00-CPMENG000/metadata/times?partition=True',
'sample_response': [ {
"stream" : "cg_cpm_eng_cpm",
"method" : "telemetered",
"count" : 1,
"bin" : 3649363200,
"store" : "cass",
"endTime" : "2015-08-25T00:36:10.708Z",
"beginTime" : "2015-08-25T00:36:10.708Z"
} ]
}]
},
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv/{subsite}/{node}/{sensor}/<method>',
'method': 'GET',
'permission_required': False,
'description': 'Get instrument methods. Returns a list of available streams for an instrument and method.',
'data_required': True,
'data_format': [
{ 'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'sensor',
'type': 'str',
'description': 'The sensor portion of the reference designator.',
'valid_values': None,
'default': None
},
{ 'name': 'method',
'type': 'str',
'description': 'Stream acquisition method (i.e. \'telemetered\', \'streamed\', etc.)',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'sensor/inv/CE01ISSM/MFC31/00-CPMENG000/telemetered',
'sample_response': [ "cg_cpm_eng_cpm" ]
}]
},
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv/{subsite}/{node}/{sensor}/{method}/{stream}',
'method': 'GET',
'permission_required': False,
'description': 'Get data for a reference designator from a specific stream.',
'data_required': True,
'data_format': [
{'name': 'subsite',
'type': 'str',
'description': 'The subsite portion of the reference designator.',
'valid_values': None,
'default': None
},
{'name': 'node',
'type': 'str',
'description': 'The node portion of the reference designator.',
'valid_values': None,
'default': None
},
{'name': 'sensor',
'type': 'str',
'description': 'The sensor portion of the reference designator.',
'valid_values': None,
'default': None
},
{'name': 'method',
'type': 'str',
'description': 'Stream acquisition method (i.e. \'telemetered\', \'streamed\', etc.)',
'valid_values': None,
'default': None
},
{'name': 'stream',
'type': 'str',
'description': 'Stream name.',
'valid_values': None,
'default': None
},
{'name': 'user',
'type': 'str',
'description': 'The OOI user requesting the data.',
'valid_values': None,
'default': None
},
{'name': 'limit',
'type': 'int',
'description': '[Optional] The upper limit of json records to be returned. ' +
'(Example: &limit=1000) If limit=-1 a netcdf object is returned. ' +
'If limit is not provided, netcdf object information/links are provided ' +
'for the OOI opendap server. ',
'valid_values': None,
'default': None
},
{'name': 'parameters',
'type': 'str',
'description': '[Optional] Comma separated parameter(s); used to identify response data ' +
' by parameters. (Example: ¶meters=2926,7)',
'valid_values': None,
'default': None
},
{'name': 'email',
'type': 'str',
'description': '[Optional] Valid email address (registered in system) for ' +
'email notification when request completes.',
'valid_values': None,
'default': None
},
{'name': 'include_provenance',
'type': 'bool',
'description': '[Optional] Indicate whether or not to include provenance; default is False.',
'valid_values': None,
'default': None
},
{'name': 'include_annotations',
'type': 'bool',
'description': '[Optional] Indicate whether or not to include annotations; default is False.',
'valid_values': None,
'default': None
}
],
'samples': [{
'sample_request': 'sensor/inv/CE01ISSM/MFC31/00-CPMENG000/telemetered/cg_cpm_eng_cpm?user=foo',
'sample_response':
{"requestUUID":"8ade93a8-1ea8-48ff-a3ea-46b12bd3db75",
"outputURL":"https://opendap-test.oceanobservatories.org/thredds/catalog/ooinet-dev-03/foo/20170525T163138-CE01ISSM-MFC31-00-CPMENG000-telemetered-cg_cpm_eng_cpm/catalog.html",
"allURLs":["https://opendap-test.oceanobservatories.org/thredds/catalog/ooinet-dev-03/foo/20170525T163138-CE01ISSM-MFC31-00-CPMENG000-telemetered-cg_cpm_eng_cpm/catalog.html",
"https://opendap.oceanobservatories.org/ooinet-dev-03/async_results/foo/20170525T163138-CE01ISSM-MFC31-00-CPMENG000-telemetered-cg_cpm_eng_cpm"],
"sizeCalculation":1000,
"timeCalculation":60,
"numberOfSubJobs":1}
},
{
'sample_request': 'http://uframe-3-test.intra.oceanobservatories.org:12576/sensor/inv/' +
'GA01SUMO/RII11/02-CTDMOQ017/' +
'telemetered/ctdmo_ghqr_imodem_instrument?' +
'beginDT=2017-05-18T15:07:00.000Z&endDT=2017-05-25T15:07:00.000Z' +
'&limit=1000¶meters=2926,7&user=plotting',
'sample_response': [
{
"ctdmo_seawater_pressure_qc_results": 29,
"ctdmo_seawater_pressure_qc_executed": 29,
"pk": {
"node": "RII11",
"stream": "ctdmo_ghqr_imodem_instrument",
"subsite": "GA01SUMO",
"deployment": 3,
"time": 3704108851.0,
"sensor": "02-CTDMOQ017",
"method": "telemetered"
},
"ctdmo_seawater_pressure": 503.5411462319808,
"time": 3704108851.0
},
{
"ctdmo_seawater_pressure_qc_results": 29,
"ctdmo_seawater_pressure_qc_executed": 29,
"pk": {
"node": "RII11",
"stream": "ctdmo_ghqr_imodem_instrument",
"subsite": "GA01SUMO",
"deployment": 3,
"time": 3704109301.0,
"sensor": "02-CTDMOQ017",
"method": "telemetered"
},
"ctdmo_seawater_pressure": 503.45232172670586,
"time": 3704109301.0
}
]
}]
},
{
'root': 'sensor/inv',
'endpoint': 'sensor/inv/toc',
'method': 'GET',
'permission_required': False,
'description': 'Get toc. Returns a dictionary of parameters_by_stream, parameter_definitions and ' +
'list of instruments (The sample response content is abbreviated.)',
'data_required': False,
'data_format': None,
'samples': [{
'sample_request': 'sensor/inv/toc',
'sample_response': {
"parameters_by_stream": {"adcp_config" : [ "PD7", "PD10", "PD11", "PD12"]},
"parameter_definitions": [ {
"pdId" : "PD1",
"particle_key" : "conductivity",
"type" : "FLOAT",
"unsigned" : False,
"shape" : "SCALAR",
"fill_value" : "-9999999",
"units" : "S m-1"
}, {
"pdId" : "PD2",
"particle_key" : "pressure",
"type" : "FLOAT",
"unsigned" : False,
"shape" : "SCALAR",
"fill_value" : "-9999999",
"units" : "dbar"
}],
"instruments": [{
"reference_designator" : "CE02SHSM-SBD11-04-VELPTA000",
"platform_code" : "CE02SHSM",
"mooring_code" : "SBD11",
"instrument_code" : "04-VELPTA000",
"streams" : [ {
"stream" : "velpt_ab_dcl_diagnostics_metadata",
"method" : "telemetered",
"count" : 392,
"endTime" : "2017-04-10T12:03:01.000Z",
"beginTime" : "2016-09-27T00:03:01.000Z"
}, {
"stream" : "velpt_ab_dcl_diagnostics",
"method" : "telemetered",
"count" : 39200,
"endTime" : "2017-04-10T12:04:40.000Z",
"beginTime" : "2016-09-27T00:03:01.000Z"
}, {
"stream" : "velpt_ab_dcl_instrument",
"method" : "telemetered",
"count" : 18825,
"endTime" : "2017-04-10T23:45:00.000Z",
"beginTime" : "2016-09-26T21:45:00.000Z"
} ]
}]
}
}]
}
]
return help_data
| asascience-open/ooi-ui-services | ooiservices/app/m2m/help_data_12576.py | Python | apache-2.0 | 23,871 | [
"NetCDF"
] | a4b2c04f70ace88de407058a6bdf9692db430db55af4fbc35ee2ddc80f8867e1 |
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Oleh Kozynets <ok7mailbox@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# jona-sassenhagen <jona.sassenhagen@gmail.com>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
import contextlib
from functools import partial
import os
import os.path as op
import sys
import time
import traceback
import warnings
import numpy as np
from scipy import sparse
from collections import OrderedDict
from .colormap import calculate_lut
from .surface import Surface
from .view import views_dicts, _lh_views_dict
from .mplcanvas import MplCanvas
from .callback import (ShowView, IntSlider, TimeSlider, SmartSlider,
BumpColorbarPoints, UpdateColorbarScale)
from ..utils import _show_help, _get_color_list
from .._3d import _process_clim, _handle_time, _check_views
from ...externals.decorator import decorator
from ...defaults import _handle_default
from ...surface import mesh_edges
from ...source_space import SourceSpaces, vertex_to_mni, read_talxfm
from ...transforms import apply_trans
from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type,
use_log_level, Bunch, _ReuseCycle, warn)
@decorator
def safe_event(fun, *args, **kwargs):
"""Protect against PyQt5 exiting on event-handling errors."""
try:
return fun(*args, **kwargs)
except Exception:
traceback.print_exc(file=sys.stderr)
class _Overlay(object):
def __init__(self, scalars, colormap, rng, opacity):
self._scalars = scalars
self._colormap = colormap
self._rng = rng
self._opacity = opacity
def to_colors(self):
from .._3d import _get_cmap
from matplotlib.colors import ListedColormap
if isinstance(self._colormap, str):
cmap = _get_cmap(self._colormap)
else:
cmap = ListedColormap(self._colormap / 255.)
def diff(x):
return np.max(x) - np.min(x)
def norm(x, rng=None):
if rng is None:
rng = [np.min(x), np.max(x)]
return (x - rng[0]) / (rng[1] - rng[0])
rng = self._rng
scalars = self._scalars
if diff(scalars) != 0:
scalars = norm(scalars, rng)
colors = cmap(scalars)
if self._opacity is not None:
colors[:, 3] *= self._opacity
return colors
class _LayeredMesh(object):
def __init__(self, renderer, vertices, triangles, normals):
self._renderer = renderer
self._vertices = vertices
self._triangles = triangles
self._normals = normals
self._polydata = None
self._actor = None
self._is_mapped = False
self._cache = None
self._overlays = OrderedDict()
self._default_scalars = np.ones(vertices.shape)
self._default_scalars_name = 'Data'
def map(self):
kwargs = {
"color": None,
"pickable": True,
"rgba": True,
}
mesh_data = self._renderer.mesh(
x=self._vertices[:, 0],
y=self._vertices[:, 1],
z=self._vertices[:, 2],
triangles=self._triangles,
normals=self._normals,
scalars=self._default_scalars,
**kwargs
)
self._actor, self._polydata = mesh_data
self._is_mapped = True
def _compute_over(self, B, A):
assert A.ndim == B.ndim == 2
assert A.shape[1] == B.shape[1] == 4
A_w = A[:, 3:] # * 1
B_w = B[:, 3:] * (1 - A_w)
C = A.copy()
C[:, :3] *= A_w
C[:, :3] += B[:, :3] * B_w
C[:, 3:] += B_w
C[:, :3] /= C[:, 3:]
return np.clip(C, 0, 1, out=C)
def _compose_overlays(self):
B = None
for overlay in self._overlays.values():
A = overlay.to_colors()
if B is None:
B = A
else:
B = self._compute_over(B, A)
return B
def add_overlay(self, scalars, colormap, rng, opacity, name):
overlay = _Overlay(
scalars=scalars,
colormap=colormap,
rng=rng,
opacity=opacity
)
self._overlays[name] = overlay
colors = overlay.to_colors()
# save colors in cache
if self._cache is None:
self._cache = colors
else:
self._cache = self._compute_over(self._cache, colors)
# update the texture
self._update()
def remove_overlay(self, names):
if not isinstance(names, list):
names = [names]
for name in names:
if name in self._overlays:
del self._overlays[name]
self.update()
def _update(self):
if self._cache is None:
return
from ..backends._pyvista import _set_mesh_scalars
_set_mesh_scalars(
mesh=self._polydata,
scalars=self._cache,
name=self._default_scalars_name,
)
def update(self):
self._cache = self._compose_overlays()
self._update()
def _clean(self):
mapper = self._actor.GetMapper()
mapper.SetLookupTable(None)
self._actor.SetMapper(None)
self._actor = None
self._polydata = None
self._renderer = None
def update_overlay(self, name, scalars=None, colormap=None,
opacity=None):
overlay = self._overlays.get(name, None)
if overlay is None:
return
if scalars is not None:
overlay._scalars = scalars
if colormap is not None:
overlay._colormap = colormap
if opacity is not None:
overlay._opacity = opacity
self.update()
@fill_doc
class Brain(object):
"""Class for visualizing a brain.
.. warning::
The API for this class is not currently complete. We suggest using
:meth:`mne.viz.plot_source_estimates` with the PyVista backend
enabled to obtain a ``Brain`` instance.
Parameters
----------
subject_id : str
Subject name in Freesurfer subjects dir.
hemi : str
Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case
of 'both', both hemispheres are shown in the same window.
In the case of 'split' hemispheres are displayed side-by-side
in different viewing panes.
surf : str
FreeSurfer surface mesh name (ie 'white', 'inflated', etc.).
title : str
Title for the window.
cortex : str or None
Specifies how the cortical surface is rendered.
The name of one of the preset cortex styles can be:
``'classic'`` (default), ``'high_contrast'``,
``'low_contrast'``, or ``'bone'`` or a valid color name.
Setting this to ``None`` is equivalent to ``(0.5, 0.5, 0.5)``.
alpha : float in [0, 1]
Alpha level to control opacity of the cortical surface.
size : int | array-like, shape (2,)
The size of the window, in pixels. can be one number to specify
a square window, or a length-2 sequence to specify (width, height).
background : tuple(int, int, int)
The color definition of the background: (red, green, blue).
foreground : matplotlib color
Color of the foreground (will be used for colorbars and text).
None (default) will use black or white depending on the value
of ``background``.
figure : list of Figure | None | int
If None (default), a new window will be created with the appropriate
views. For single view plots, the figure can be specified as int to
retrieve the corresponding Mayavi window.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment
variable.
views : list | str
The views to use.
offset : bool
If True, aligs origin with medial wall. Useful for viewing inflated
surface where hemispheres typically overlap (Default: True).
show_toolbar : bool
If True, toolbars will be shown for each view.
offscreen : bool
If True, rendering will be done offscreen (not shown). Useful
mostly for generating images or screenshots, but can be buggy.
Use at your own risk.
interaction : str
Can be "trackball" (default) or "terrain", i.e. a turntable-style
camera.
units : str
Can be 'm' or 'mm' (default).
%(view_layout)s
show : bool
Display the window as soon as it is ready. Defaults to True.
Attributes
----------
geo : dict
A dictionary of pysurfer.Surface objects for each hemisphere.
overlays : dict
The overlays.
Notes
-----
This table shows the capabilities of each Brain backend ("✓" for full
support, and "-" for partial support):
.. table::
:widths: auto
+---------------------------+--------------+---------------+
| 3D function: | surfer.Brain | mne.viz.Brain |
+===========================+==============+===============+
| add_annotation | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_data | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_foci | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_label | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_text | ✓ | ✓ |
+---------------------------+--------------+---------------+
| close | ✓ | ✓ |
+---------------------------+--------------+---------------+
| data | ✓ | ✓ |
+---------------------------+--------------+---------------+
| foci | ✓ | |
+---------------------------+--------------+---------------+
| labels | ✓ | ✓ |
+---------------------------+--------------+---------------+
| remove_foci | ✓ | |
+---------------------------+--------------+---------------+
| remove_labels | ✓ | ✓ |
+---------------------------+--------------+---------------+
| remove_annotations | - | ✓ |
+---------------------------+--------------+---------------+
| scale_data_colormap | ✓ | |
+---------------------------+--------------+---------------+
| save_image | ✓ | ✓ |
+---------------------------+--------------+---------------+
| save_movie | ✓ | ✓ |
+---------------------------+--------------+---------------+
| screenshot | ✓ | ✓ |
+---------------------------+--------------+---------------+
| show_view | ✓ | ✓ |
+---------------------------+--------------+---------------+
| TimeViewer | ✓ | ✓ |
+---------------------------+--------------+---------------+
| enable_depth_peeling | | ✓ |
+---------------------------+--------------+---------------+
| get_picked_points | | ✓ |
+---------------------------+--------------+---------------+
| add_data(volume) | | ✓ |
+---------------------------+--------------+---------------+
| view_layout | | ✓ |
+---------------------------+--------------+---------------+
| flatmaps | | ✓ |
+---------------------------+--------------+---------------+
| vertex picking | | ✓ |
+---------------------------+--------------+---------------+
| label picking | | ✓ |
+---------------------------+--------------+---------------+
"""
def __init__(self, subject_id, hemi, surf, title=None,
cortex="classic", alpha=1.0, size=800, background="black",
foreground=None, figure=None, subjects_dir=None,
views='auto', offset=True, show_toolbar=False,
offscreen=False, interaction='trackball', units='mm',
view_layout='vertical', show=True):
from ..backends.renderer import backend, _get_renderer, _get_3d_backend
from .._3d import _get_cmap
from matplotlib.colors import colorConverter
if hemi in ('both', 'split'):
self._hemis = ('lh', 'rh')
elif hemi in ('lh', 'rh'):
self._hemis = (hemi, )
else:
raise KeyError('hemi has to be either "lh", "rh", "split", '
'or "both"')
self._view_layout = _check_option('view_layout', view_layout,
('vertical', 'horizontal'))
if figure is not None and not isinstance(figure, int):
backend._check_3d_figure(figure)
if title is None:
self._title = subject_id
else:
self._title = title
self._interaction = 'trackball'
if isinstance(background, str):
background = colorConverter.to_rgb(background)
self._bg_color = background
if foreground is None:
foreground = 'w' if sum(self._bg_color) < 2 else 'k'
if isinstance(foreground, str):
foreground = colorConverter.to_rgb(foreground)
self._fg_color = foreground
if isinstance(views, str):
views = [views]
views = _check_views(surf, views, hemi)
col_dict = dict(lh=1, rh=1, both=1, split=2)
shape = (len(views), col_dict[hemi])
if self._view_layout == 'horizontal':
shape = shape[::-1]
self._subplot_shape = shape
size = tuple(np.atleast_1d(size).round(0).astype(int).flat)
if len(size) not in (1, 2):
raise ValueError('"size" parameter must be an int or length-2 '
'sequence of ints.')
self._size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple
self.time_viewer = False
self.notebook = (_get_3d_backend() == "notebook")
self._hemi = hemi
self._units = units
self._alpha = float(alpha)
self._subject_id = subject_id
self._subjects_dir = subjects_dir
self._views = views
self._times = None
self._vertex_to_label_id = dict()
self._annotation_labels = dict()
self._labels = {'lh': list(), 'rh': list()}
self._annots = {'lh': list(), 'rh': list()}
self._layered_meshes = {}
# for now only one color bar can be added
# since it is the same for all figures
self._colorbar_added = False
# for now only one time label can be added
# since it is the same for all figures
self._time_label_added = False
# array of data used by TimeViewer
self._data = {}
self.geo = {}
self.set_time_interpolation('nearest')
geo_kwargs = self._cortex_colormap(cortex)
# evaluate at the midpoint of the used colormap
val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin'])
self._brain_color = _get_cmap(geo_kwargs['colormap'])(val)
# load geometry for one or both hemispheres as necessary
offset = None if (not offset or hemi != 'both') else 0.0
self._renderer = _get_renderer(name=self._title, size=self._size,
bgcolor=background,
shape=shape,
fig=figure)
if _get_3d_backend() == "pyvista":
self.plotter = self._renderer.plotter
self.window = self.plotter.app_window
self.window.signal_close.connect(self._clean)
for h in self._hemis:
# Initialize a Surface object as the geometry
geo = Surface(subject_id, h, surf, subjects_dir, offset,
units=self._units)
# Load in the geometry and curvature
geo.load_geometry()
geo.load_curvature()
self.geo[h] = geo
for ri, ci, v in self._iter_views(h):
self._renderer.subplot(ri, ci)
if self._layered_meshes.get(h) is None:
mesh = _LayeredMesh(
renderer=self._renderer,
vertices=self.geo[h].coords,
triangles=self.geo[h].faces,
normals=self.geo[h].nn,
)
mesh.map() # send to GPU
mesh.add_overlay(
scalars=self.geo[h].bin_curv,
colormap=geo_kwargs["colormap"],
rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]],
opacity=alpha,
name='curv',
)
self._layered_meshes[h] = mesh
# add metadata to the mesh for picking
mesh._polydata._hemi = h
else:
actor = self._layered_meshes[h]._actor
self._renderer.plotter.add_actor(actor)
self._renderer.set_camera(**views_dicts[h][v])
self.interaction = interaction
self._closed = False
if show:
self.show()
# update the views once the geometry is all set
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self.show_view(v, row=ri, col=ci, hemi=h)
if surf == 'flat':
self._renderer.set_interaction("rubber_band_2d")
if hemi == 'rh' and hasattr(self._renderer, "_orient_lights"):
self._renderer._orient_lights()
def setup_time_viewer(self, time_viewer=True, show_traces=True):
"""Configure the time viewer parameters.
Parameters
----------
time_viewer : bool
If True, enable widgets interaction. Defaults to True.
show_traces : bool
If True, enable visualization of time traces. Defaults to True.
"""
if self.time_viewer:
return
if not self._data:
raise ValueError("No data to visualize. See ``add_data``.")
self.time_viewer = time_viewer
self.orientation = list(_lh_views_dict.keys())
self.default_smoothing_range = [0, 15]
# setup notebook
if self.notebook:
self._configure_notebook()
return
# Default configuration
self.playback = False
self.visibility = False
self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)
self.default_scaling_range = [0.2, 2.0]
self.default_playback_speed_range = [0.01, 1]
self.default_playback_speed_value = 0.05
self.default_status_bar_msg = "Press ? for help"
self.default_label_extract_modes = {
"stc": ["mean", "max"],
"src": ["mean_flip", "pca_flip", "auto"],
}
self.default_trace_modes = ('vertex', 'label')
self.annot = None
self.label_extract_mode = None
all_keys = ('lh', 'rh', 'vol')
self.act_data_smooth = {key: (None, None) for key in all_keys}
self.color_list = _get_color_list()
# remove grey for better contrast on the brain
self.color_list.remove("#7f7f7f")
self.color_cycle = _ReuseCycle(self.color_list)
self.mpl_canvas = None
self.gfp = None
self.picked_patches = {key: list() for key in all_keys}
self.picked_points = {key: list() for key in all_keys}
self.pick_table = dict()
self._spheres = list()
self._mouse_no_mvt = -1
self.icons = dict()
self.actions = dict()
self.callbacks = dict()
self.sliders = dict()
self.keys = ('fmin', 'fmid', 'fmax')
self.slider_length = 0.02
self.slider_width = 0.04
self.slider_color = (0.43137255, 0.44313725, 0.45882353)
self.slider_tube_width = 0.04
self.slider_tube_color = (0.69803922, 0.70196078, 0.70980392)
self._trace_mode_widget = None
self._annot_cands_widget = None
self._label_mode_widget = None
# Direct access parameters:
self._iren = self._renderer.plotter.iren
self.main_menu = self.plotter.main_menu
self.tool_bar = self.window.addToolBar("toolbar")
self.status_bar = self.window.statusBar()
self.interactor = self.plotter.interactor
# Derived parameters:
self.playback_speed = self.default_playback_speed_value
_validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')
self.interactor_fraction = 0.25
if isinstance(show_traces, str):
self.show_traces = True
self.separate_canvas = False
self.traces_mode = 'vertex'
if show_traces == 'separate':
self.separate_canvas = True
elif show_traces == 'label':
self.traces_mode = 'label'
else:
assert show_traces == 'vertex' # guaranteed above
else:
if isinstance(show_traces, bool):
self.show_traces = show_traces
else:
show_traces = float(show_traces)
if not 0 < show_traces < 1:
raise ValueError(
'show traces, if numeric, must be between 0 and 1, '
f'got {show_traces}')
self.show_traces = True
self.interactor_fraction = show_traces
self.traces_mode = 'vertex'
self.separate_canvas = False
del show_traces
self._load_icons()
self._configure_time_label()
self._configure_sliders()
self._configure_scalar_bar()
self._configure_playback()
self._configure_menu()
self._configure_tool_bar()
self._configure_status_bar()
self._configure_picking()
self._configure_trace_mode()
# show everything at the end
self.toggle_interface()
with self.ensure_minimum_sizes():
self.show()
@safe_event
def _clean(self):
# resolve the reference cycle
self.clear_glyphs()
self.remove_annotations()
# clear init actors
for hemi in self._hemis:
self._layered_meshes[hemi]._clean()
self._clear_callbacks()
if getattr(self, 'mpl_canvas', None) is not None:
self.mpl_canvas.clear()
if getattr(self, 'act_data_smooth', None) is not None:
for key in list(self.act_data_smooth.keys()):
self.act_data_smooth[key] = None
# XXX this should be done in PyVista
for renderer in self.plotter.renderers:
renderer.RemoveAllLights()
# app_window cannot be set to None because it is used in __del__
for key in ('lighting', 'interactor', '_RenderWindow'):
setattr(self.plotter, key, None)
# Qt LeaveEvent requires _Iren so we use _FakeIren instead of None
# to resolve the ref to vtkGenericRenderWindowInteractor
self.plotter._Iren = _FakeIren()
if getattr(self.plotter, 'scalar_bar', None) is not None:
self.plotter.scalar_bar = None
if getattr(self.plotter, 'picker', None) is not None:
self.plotter.picker = None
# XXX end PyVista
for key in ('reps', 'plotter', 'main_menu', 'window', 'tool_bar',
'status_bar', 'interactor', 'mpl_canvas', 'time_actor',
'picked_renderer', 'act_data_smooth', '_iren',
'actions', 'sliders', 'geo', '_hemi_actors', '_data'):
setattr(self, key, None)
@contextlib.contextmanager
def ensure_minimum_sizes(self):
"""Ensure that widgets respect the windows size."""
from ..backends._pyvista import _process_events
sz = self._size
adjust_mpl = self.show_traces and not self.separate_canvas
if not adjust_mpl:
yield
else:
mpl_h = int(round((sz[1] * self.interactor_fraction) /
(1 - self.interactor_fraction)))
self.mpl_canvas.canvas.setMinimumSize(sz[0], mpl_h)
try:
yield
finally:
self.splitter.setSizes([sz[1], mpl_h])
_process_events(self.plotter)
_process_events(self.plotter)
self.mpl_canvas.canvas.setMinimumSize(0, 0)
_process_events(self.plotter)
_process_events(self.plotter)
# sizes could change, update views
for hemi in ('lh', 'rh'):
for ri, ci, v in self._iter_views(hemi):
self.show_view(view=v, row=ri, col=ci)
_process_events(self.plotter)
def toggle_interface(self, value=None):
"""Toggle the interface.
Parameters
----------
value : bool | None
If True, the widgets are shown and if False, they
are hidden. If None, the state of the widgets is
toggled. Defaults to None.
"""
if value is None:
self.visibility = not self.visibility
else:
self.visibility = value
# update tool bar icon
if self.visibility:
self.actions["visibility"].setIcon(self.icons["visibility_on"])
else:
self.actions["visibility"].setIcon(self.icons["visibility_off"])
# manage sliders
for slider in self.plotter.slider_widgets:
slider_rep = slider.GetRepresentation()
if self.visibility:
slider_rep.VisibilityOn()
else:
slider_rep.VisibilityOff()
# manage time label
time_label = self._data['time_label']
# if we actually have time points, we will show the slider so
# hide the time actor
have_ts = self._times is not None and len(self._times) > 1
if self.time_actor is not None:
if self.visibility and time_label is not None and not have_ts:
self.time_actor.SetInput(time_label(self._current_time))
self.time_actor.VisibilityOn()
else:
self.time_actor.VisibilityOff()
self._update()
def apply_auto_scaling(self):
"""Detect automatically fitting scaling parameters."""
self._update_auto_scaling()
for key in ('fmin', 'fmid', 'fmax'):
self.reps[key].SetValue(self._data[key])
self._update()
def restore_user_scaling(self):
"""Restore original scaling parameters."""
self._update_auto_scaling(restore=True)
for key in ('fmin', 'fmid', 'fmax'):
self.reps[key].SetValue(self._data[key])
self._update()
def toggle_playback(self, value=None):
"""Toggle time playback.
Parameters
----------
value : bool | None
If True, automatic time playback is enabled and if False,
it's disabled. If None, the state of time playback is toggled.
Defaults to None.
"""
if value is None:
self.playback = not self.playback
else:
self.playback = value
# update tool bar icon
if self.playback:
self.actions["play"].setIcon(self.icons["pause"])
else:
self.actions["play"].setIcon(self.icons["play"])
if self.playback:
time_data = self._data['time']
max_time = np.max(time_data)
if self._current_time == max_time: # start over
self.set_time_point(0) # first index
self._last_tick = time.time()
def reset(self):
"""Reset view and time step."""
self.reset_view()
max_time = len(self._data['time']) - 1
if max_time > 0:
self.callbacks["time"](
self._data["initial_time_idx"],
update_widget=True,
)
self._update()
def set_playback_speed(self, speed):
"""Set the time playback speed.
Parameters
----------
speed : float
The speed of the playback.
"""
self.playback_speed = speed
@safe_event
def _play(self):
if self.playback:
try:
self._advance()
except Exception:
self.toggle_playback(value=False)
raise
def _advance(self):
this_time = time.time()
delta = this_time - self._last_tick
self._last_tick = time.time()
time_data = self._data['time']
times = np.arange(self._n_times)
time_shift = delta * self.playback_speed
max_time = np.max(time_data)
time_point = min(self._current_time + time_shift, max_time)
# always use linear here -- this does not determine the data
# interpolation mode, it just finds where we are (in time) in
# terms of the time indices
idx = np.interp(time_point, time_data, times)
self.callbacks["time"](idx, update_widget=True)
if time_point == max_time:
self.toggle_playback(value=False)
def _set_slider_style(self):
for slider in self.sliders.values():
if slider is not None:
slider_rep = slider.GetRepresentation()
slider_rep.SetSliderLength(self.slider_length)
slider_rep.SetSliderWidth(self.slider_width)
slider_rep.SetTubeWidth(self.slider_tube_width)
slider_rep.GetSliderProperty().SetColor(self.slider_color)
slider_rep.GetTubeProperty().SetColor(self.slider_tube_color)
slider_rep.GetLabelProperty().SetShadow(False)
slider_rep.GetLabelProperty().SetBold(True)
slider_rep.GetLabelProperty().SetColor(self._fg_color)
slider_rep.GetTitleProperty().ShallowCopy(
slider_rep.GetLabelProperty()
)
slider_rep.GetCapProperty().SetOpacity(0)
def _configure_notebook(self):
from ._notebook import _NotebookInteractor
self._renderer.figure.display = _NotebookInteractor(self)
def _configure_time_label(self):
self.time_actor = self._data.get('time_actor')
if self.time_actor is not None:
self.time_actor.SetPosition(0.5, 0.03)
self.time_actor.GetTextProperty().SetJustificationToCentered()
self.time_actor.GetTextProperty().BoldOn()
self.time_actor.VisibilityOff()
def _configure_scalar_bar(self):
if self._colorbar_added:
scalar_bar = self.plotter.scalar_bar
scalar_bar.SetOrientationToVertical()
scalar_bar.SetHeight(0.6)
scalar_bar.SetWidth(0.05)
scalar_bar.SetPosition(0.02, 0.2)
def _configure_sliders(self):
# Orientation slider
# Use 'lh' as a reference for orientation for 'both'
if self._hemi == 'both':
hemis_ref = ['lh']
else:
hemis_ref = self._hemis
for hemi in hemis_ref:
for ri, ci, view in self._iter_views(hemi):
orientation_name = f"orientation_{hemi}_{ri}_{ci}"
self.plotter.subplot(ri, ci)
if view == 'flat':
self.callbacks[orientation_name] = None
continue
self.callbacks[orientation_name] = ShowView(
plotter=self.plotter,
brain=self,
orientation=self.orientation,
hemi=hemi,
row=ri,
col=ci,
)
self.sliders[orientation_name] = \
self.plotter.add_text_slider_widget(
self.callbacks[orientation_name],
value=0,
data=self.orientation,
pointa=(0.82, 0.74),
pointb=(0.98, 0.74),
event_type='always'
)
orientation_rep = \
self.sliders[orientation_name].GetRepresentation()
orientation_rep.ShowSliderLabelOff()
self.callbacks[orientation_name].slider_rep = orientation_rep
self.callbacks[orientation_name](view, update_widget=True)
# Put other sliders on the bottom right view
ri, ci = np.array(self._subplot_shape) - 1
self.plotter.subplot(ri, ci)
# Smoothing slider
self.callbacks["smoothing"] = IntSlider(
plotter=self.plotter,
callback=self.set_data_smoothing,
first_call=False,
)
self.sliders["smoothing"] = self.plotter.add_slider_widget(
self.callbacks["smoothing"],
value=self._data['smoothing_steps'],
rng=self.default_smoothing_range, title="smoothing",
pointa=(0.82, 0.90),
pointb=(0.98, 0.90)
)
self.callbacks["smoothing"].slider_rep = \
self.sliders["smoothing"].GetRepresentation()
# Time slider
max_time = len(self._data['time']) - 1
# VTK on macOS bombs if we create these then hide them, so don't
# even create them
if max_time < 1:
self.callbacks["time"] = None
self.sliders["time"] = None
else:
self.callbacks["time"] = TimeSlider(
plotter=self.plotter,
brain=self,
first_call=False,
callback=self.plot_time_line,
)
self.sliders["time"] = self.plotter.add_slider_widget(
self.callbacks["time"],
value=self._data['time_idx'],
rng=[0, max_time],
pointa=(0.23, 0.1),
pointb=(0.77, 0.1),
event_type='always'
)
self.callbacks["time"].slider_rep = \
self.sliders["time"].GetRepresentation()
# configure properties of the time slider
self.sliders["time"].GetRepresentation().SetLabelFormat(
'idx=%0.1f')
current_time = self._current_time
assert current_time is not None # should never be the case, float
time_label = self._data['time_label']
if callable(time_label):
current_time = time_label(current_time)
else:
current_time = time_label
if self.sliders["time"] is not None:
self.sliders["time"].GetRepresentation().SetTitleText(current_time)
if self.time_actor is not None:
self.time_actor.SetInput(current_time)
del current_time
# Playback speed slider
if self.sliders["time"] is None:
self.callbacks["playback_speed"] = None
self.sliders["playback_speed"] = None
else:
self.callbacks["playback_speed"] = SmartSlider(
plotter=self.plotter,
callback=self.set_playback_speed,
)
self.sliders["playback_speed"] = self.plotter.add_slider_widget(
self.callbacks["playback_speed"],
value=self.default_playback_speed_value,
rng=self.default_playback_speed_range, title="speed",
pointa=(0.02, 0.1),
pointb=(0.18, 0.1),
event_type='always'
)
self.callbacks["playback_speed"].slider_rep = \
self.sliders["playback_speed"].GetRepresentation()
# Colormap slider
pointa = np.array((0.82, 0.26))
pointb = np.array((0.98, 0.26))
shift = np.array([0, 0.1])
for idx, key in enumerate(self.keys):
title = "clim" if not idx else ""
rng = _get_range(self)
self.callbacks[key] = BumpColorbarPoints(
plotter=self.plotter,
brain=self,
name=key
)
self.sliders[key] = self.plotter.add_slider_widget(
self.callbacks[key],
value=self._data[key],
rng=rng, title=title,
pointa=pointa + idx * shift,
pointb=pointb + idx * shift,
event_type="always",
)
# fscale
self.callbacks["fscale"] = UpdateColorbarScale(
plotter=self.plotter,
brain=self,
)
self.sliders["fscale"] = self.plotter.add_slider_widget(
self.callbacks["fscale"],
value=1.0,
rng=self.default_scaling_range, title="fscale",
pointa=(0.82, 0.10),
pointb=(0.98, 0.10)
)
self.callbacks["fscale"].slider_rep = \
self.sliders["fscale"].GetRepresentation()
# register colorbar slider representations
self.reps = \
{key: self.sliders[key].GetRepresentation() for key in self.keys}
for name in ("fmin", "fmid", "fmax", "fscale"):
self.callbacks[name].reps = self.reps
# set the slider style
self._set_slider_style()
def _configure_playback(self):
self.plotter.add_callback(self._play, self.refresh_rate_ms)
def _configure_mplcanvas(self):
win = self.plotter.app_window
dpi = win.windowHandle().screen().logicalDotsPerInch()
ratio = (1 - self.interactor_fraction) / self.interactor_fraction
w = self.interactor.geometry().width()
h = self.interactor.geometry().height() / ratio
# Get the fractional components for the brain and mpl
self.mpl_canvas = MplCanvas(self, w / dpi, h / dpi, dpi)
xlim = [np.min(self._data['time']),
np.max(self._data['time'])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.mpl_canvas.axes.set(xlim=xlim)
if not self.separate_canvas:
from PyQt5.QtWidgets import QSplitter
from PyQt5.QtCore import Qt
canvas = self.mpl_canvas.canvas
vlayout = self.plotter.frame.layout()
vlayout.removeWidget(self.interactor)
self.splitter = splitter = QSplitter(
orientation=Qt.Vertical, parent=self.plotter.frame)
vlayout.addWidget(splitter)
splitter.addWidget(self.interactor)
splitter.addWidget(canvas)
self.mpl_canvas.set_color(
bg_color=self._bg_color,
fg_color=self._fg_color,
)
self.mpl_canvas.show()
def _configure_vertex_time_course(self):
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
# plot the GFP
y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()
if v[0] is not None))
y = np.linalg.norm(y, axis=0) / np.sqrt(len(y))
self.gfp, = self.mpl_canvas.axes.plot(
self._data['time'], y,
lw=3, label='GFP', zorder=3, color=self._fg_color,
alpha=0.5, ls=':')
# now plot the time line
self.plot_time_line()
# then the picked points
for idx, hemi in enumerate(['lh', 'rh', 'vol']):
act_data = self.act_data_smooth.get(hemi, [None])[0]
if act_data is None:
continue
hemi_data = self._data[hemi]
vertices = hemi_data['vertices']
# simulate a picked renderer
if self._hemi in ('both', 'rh') or hemi == 'vol':
idx = 0
self.picked_renderer = self.plotter.renderers[idx]
# initialize the default point
if self._data['initial_time'] is not None:
# pick at that time
use_data = act_data[
:, [np.round(self._data['time_idx']).astype(int)]]
else:
use_data = act_data
ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),
use_data.shape)
if hemi == 'vol':
mesh = hemi_data['grid']
else:
mesh = self._layered_meshes[hemi]._polydata
vertex_id = vertices[ind[0]]
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _configure_picking(self):
from ..backends._pyvista import _update_picking_callback
# get data for each hemi
for idx, hemi in enumerate(['vol', 'lh', 'rh']):
hemi_data = self._data.get(hemi)
if hemi_data is not None:
act_data = hemi_data['array']
if act_data.ndim == 3:
act_data = np.linalg.norm(act_data, axis=1)
smooth_mat = hemi_data.get('smooth_mat')
vertices = hemi_data['vertices']
if hemi == 'vol':
assert smooth_mat is None
smooth_mat = sparse.csr_matrix(
(np.ones(len(vertices)),
(vertices, np.arange(len(vertices)))))
self.act_data_smooth[hemi] = (act_data, smooth_mat)
_update_picking_callback(
self.plotter,
self._on_mouse_move,
self._on_button_press,
self._on_button_release,
self._on_pick
)
def _configure_trace_mode(self):
from ...source_estimate import _get_allowed_label_modes
from ...label import _read_annot_cands
from PyQt5.QtWidgets import QComboBox, QLabel
if not self.show_traces:
return
# do not show trace mode for volumes
if (self._data.get('src', None) is not None and
self._data['src'].kind == 'volume'):
self._configure_vertex_time_course()
return
# setup candidate annots
def _set_annot(annot):
self.clear_glyphs()
self.remove_labels()
self.remove_annotations()
self.annot = annot
if annot == 'None':
self.traces_mode = 'vertex'
self._configure_vertex_time_course()
else:
self.traces_mode = 'label'
self._configure_label_time_course()
self._update()
dir_name = op.join(self._subjects_dir, self._subject_id, 'label')
cands = _read_annot_cands(dir_name)
self.tool_bar.addSeparator()
self.tool_bar.addWidget(QLabel("Annotation"))
self._annot_cands_widget = QComboBox()
self.tool_bar.addWidget(self._annot_cands_widget)
self._annot_cands_widget.addItem('None')
for cand in cands:
self._annot_cands_widget.addItem(cand)
self.annot = cands[0]
# setup label extraction parameters
def _set_label_mode(mode):
if self.traces_mode != 'label':
return
import copy
glyphs = copy.deepcopy(self.picked_patches)
self.label_extract_mode = mode
self.clear_glyphs()
for hemi in self._hemis:
for label_id in glyphs[hemi]:
label = self._annotation_labels[hemi][label_id]
vertex_id = label.vertices[0]
self._add_label_glyph(hemi, None, vertex_id)
self.mpl_canvas.axes.relim()
self.mpl_canvas.axes.autoscale_view()
self.mpl_canvas.update_plot()
self._update()
self.tool_bar.addSeparator()
self.tool_bar.addWidget(QLabel("Label extraction mode"))
self._label_mode_widget = QComboBox()
self.tool_bar.addWidget(self._label_mode_widget)
stc = self._data["stc"]
modes = _get_allowed_label_modes(stc)
if self._data["src"] is None:
modes = [m for m in modes if m not in
self.default_label_extract_modes["src"]]
for mode in modes:
self._label_mode_widget.addItem(mode)
self.label_extract_mode = mode
if self.traces_mode == 'vertex':
_set_annot('None')
else:
_set_annot(self.annot)
self._annot_cands_widget.setCurrentText(self.annot)
self._label_mode_widget.setCurrentText(self.label_extract_mode)
self._annot_cands_widget.currentTextChanged.connect(_set_annot)
self._label_mode_widget.currentTextChanged.connect(_set_label_mode)
def _load_icons(self):
from PyQt5.QtGui import QIcon
from ..backends._pyvista import _init_resources
_init_resources()
self.icons["help"] = QIcon(":/help.svg")
self.icons["play"] = QIcon(":/play.svg")
self.icons["pause"] = QIcon(":/pause.svg")
self.icons["reset"] = QIcon(":/reset.svg")
self.icons["scale"] = QIcon(":/scale.svg")
self.icons["clear"] = QIcon(":/clear.svg")
self.icons["movie"] = QIcon(":/movie.svg")
self.icons["restore"] = QIcon(":/restore.svg")
self.icons["screenshot"] = QIcon(":/screenshot.svg")
self.icons["visibility_on"] = QIcon(":/visibility_on.svg")
self.icons["visibility_off"] = QIcon(":/visibility_off.svg")
def _save_movie_noname(self):
return self.save_movie(None)
def _configure_tool_bar(self):
self.actions["screenshot"] = self.tool_bar.addAction(
self.icons["screenshot"],
"Take a screenshot",
self.plotter._qt_screenshot
)
self.actions["movie"] = self.tool_bar.addAction(
self.icons["movie"],
"Save movie...",
self._save_movie_noname,
)
self.actions["visibility"] = self.tool_bar.addAction(
self.icons["visibility_on"],
"Toggle Visibility",
self.toggle_interface
)
self.actions["play"] = self.tool_bar.addAction(
self.icons["play"],
"Play/Pause",
self.toggle_playback
)
self.actions["reset"] = self.tool_bar.addAction(
self.icons["reset"],
"Reset",
self.reset
)
self.actions["scale"] = self.tool_bar.addAction(
self.icons["scale"],
"Auto-Scale",
self.apply_auto_scaling
)
self.actions["restore"] = self.tool_bar.addAction(
self.icons["restore"],
"Restore scaling",
self.restore_user_scaling
)
self.actions["clear"] = self.tool_bar.addAction(
self.icons["clear"],
"Clear traces",
self.clear_glyphs
)
self.actions["help"] = self.tool_bar.addAction(
self.icons["help"],
"Help",
self.help
)
self.actions["movie"].setShortcut("ctrl+shift+s")
self.actions["visibility"].setShortcut("i")
self.actions["play"].setShortcut(" ")
self.actions["scale"].setShortcut("s")
self.actions["restore"].setShortcut("r")
self.actions["clear"].setShortcut("c")
self.actions["help"].setShortcut("?")
def _configure_menu(self):
# remove default picking menu
to_remove = list()
for action in self.main_menu.actions():
if action.text() == "Tools":
to_remove.append(action)
for action in to_remove:
self.main_menu.removeAction(action)
# add help menu
menu = self.main_menu.addMenu('Help')
menu.addAction('Show MNE key bindings\t?', self.help)
def _configure_status_bar(self):
from PyQt5.QtWidgets import QLabel, QProgressBar
self.status_msg = QLabel(self.default_status_bar_msg)
self.status_progress = QProgressBar()
self.status_bar.layout().addWidget(self.status_msg, 1)
self.status_bar.layout().addWidget(self.status_progress, 0)
self.status_progress.hide()
def _on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def _on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def _on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
# programmatically detect the picked renderer
self.picked_renderer = self.plotter.iren.FindPokedRenderer(x, y)
# trigger the pick
self.plotter.picker.Pick(x, y, 0, self.picked_renderer)
self._mouse_no_mvt = 0
def _on_pick(self, vtk_picker, event):
if not self.show_traces:
return
# vtk_picker is a vtkCellPicker
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1 or not self._mouse_no_mvt:
return # don't pick
# 1) Check to see if there are any spheres along the ray
if len(self._spheres):
collection = vtk_picker.GetProp3Ds()
found_sphere = None
for ii in range(collection.GetNumberOfItems()):
actor = collection.GetItemAsObject(ii)
for sphere in self._spheres:
if any(a is actor for a in sphere._actors):
found_sphere = sphere
break
if found_sphere is not None:
break
if found_sphere is not None:
assert found_sphere._is_glyph
mesh = found_sphere
# 2) Remove sphere if it's what we have
if hasattr(mesh, "_is_glyph"):
self._remove_vertex_glyph(mesh)
return
# 3) Otherwise, pick the objects in the scene
try:
hemi = mesh._hemi
except AttributeError: # volume
hemi = 'vol'
else:
assert hemi in ('lh', 'rh')
if self.act_data_smooth[hemi][0] is None: # no data to add for hemi
return
pos = np.array(vtk_picker.GetPickPosition())
if hemi == 'vol':
# VTK will give us the point closest to the viewer in the vol.
# We want to pick the point with the maximum value along the
# camera-to-click array, which fortunately we can get "just"
# by inspecting the points that are sufficiently close to the
# ray.
grid = mesh = self._data[hemi]['grid']
vertices = self._data[hemi]['vertices']
coords = self._data[hemi]['grid_coords'][vertices]
scalars = grid.cell_arrays['values'][vertices]
spacing = np.array(grid.GetSpacing())
max_dist = np.linalg.norm(spacing) / 2.
origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()
ori = pos - origin
ori /= np.linalg.norm(ori)
# the magic formula: distance from a ray to a given point
dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)
assert dists.shape == (len(coords),)
mask = dists <= max_dist
idx = np.where(mask)[0]
if len(idx) == 0:
return # weird point on edge of volume?
# useful for debugging the ray by mapping it into the volume:
# dists = dists - dists.min()
# dists = (1. - dists / dists.max()) * self._cmap_range[1]
# grid.cell_arrays['values'][vertices] = dists * mask
idx = idx[np.argmax(np.abs(scalars[idx]))]
vertex_id = vertices[idx]
# Naive way: convert pos directly to idx; i.e., apply mri_src_t
# shape = self._data[hemi]['grid_shape']
# taking into account the cell vs point difference (spacing/2)
# shift = np.array(grid.GetOrigin()) + spacing / 2.
# ijk = np.round((pos - shift) / spacing).astype(int)
# vertex_id = np.ravel_multi_index(ijk, shape, order='F')
else:
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
if self.traces_mode == 'label':
self._add_label_glyph(hemi, mesh, vertex_id)
else:
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _add_label_glyph(self, hemi, mesh, vertex_id):
if hemi == 'vol':
return
label_id = self._vertex_to_label_id[hemi][vertex_id]
label = self._annotation_labels[hemi][label_id]
# remove the patch if already picked
if label_id in self.picked_patches[hemi]:
self._remove_label_glyph(hemi, label_id)
return
if hemi == label.hemi:
self.add_label(label, borders=True, reset_camera=False)
self.picked_patches[hemi].append(label_id)
def _remove_label_glyph(self, hemi, label_id):
label = self._annotation_labels[hemi][label_id]
label._line.remove()
self.color_cycle.restore(label._color)
self.mpl_canvas.update_plot()
self._layered_meshes[hemi].remove_overlay(label.name)
self.picked_patches[hemi].remove(label_id)
def _add_vertex_glyph(self, hemi, mesh, vertex_id):
if vertex_id in self.picked_points[hemi]:
return
# skip if the wrong hemi is selected
if self.act_data_smooth[hemi][0] is None:
return
from ..backends._pyvista import _sphere
color = next(self.color_cycle)
line = self.plot_time_course(hemi, vertex_id, color)
if hemi == 'vol':
ijk = np.unravel_index(
vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')
# should just be GetCentroid(center), but apparently it's VTK9+:
# center = np.empty(3)
# voxel.GetCentroid(center)
voxel = mesh.GetCell(*ijk)
pts = voxel.GetPoints()
n_pts = pts.GetNumberOfPoints()
center = np.empty((n_pts, 3))
for ii in range(pts.GetNumberOfPoints()):
pts.GetPoint(ii, center[ii])
center = np.mean(center, axis=0)
else:
center = mesh.GetPoints().GetPoint(vertex_id)
del mesh
# from the picked renderer to the subplot coords
rindex = self.plotter.renderers.index(self.picked_renderer)
row, col = self.plotter.index_to_loc(rindex)
actors = list()
spheres = list()
for ri, ci, _ in self._iter_views(hemi):
self.plotter.subplot(ri, ci)
# Using _sphere() instead of renderer.sphere() for 2 reasons:
# 1) renderer.sphere() fails on Windows in a scenario where a lot
# of picking requests are done in a short span of time (could be
# mitigated with synchronization/delay?)
# 2) the glyph filter is used in renderer.sphere() but only one
# sphere is required in this function.
actor, sphere = _sphere(
plotter=self.plotter,
center=np.array(center),
color=color,
radius=4.0,
)
actors.append(actor)
spheres.append(sphere)
# add metadata for picking
for sphere in spheres:
sphere._is_glyph = True
sphere._hemi = hemi
sphere._line = line
sphere._actors = actors
sphere._color = color
sphere._vertex_id = vertex_id
self.picked_points[hemi].append(vertex_id)
self._spheres.extend(spheres)
self.pick_table[vertex_id] = spheres
return sphere
def _remove_vertex_glyph(self, mesh, render=True):
vertex_id = mesh._vertex_id
if vertex_id not in self.pick_table:
return
hemi = mesh._hemi
color = mesh._color
spheres = self.pick_table[vertex_id]
spheres[0]._line.remove()
self.mpl_canvas.update_plot()
self.picked_points[hemi].remove(vertex_id)
with warnings.catch_warnings(record=True):
# We intentionally ignore these in case we have traversed the
# entire color cycle
warnings.simplefilter('ignore')
self.color_cycle.restore(color)
for sphere in spheres:
# remove all actors
self.plotter.remove_actor(sphere._actors, render=render)
sphere._actors = None
self._spheres.pop(self._spheres.index(sphere))
self.pick_table.pop(vertex_id)
def clear_glyphs(self):
"""Clear the picking glyphs."""
if not self.time_viewer:
return
for sphere in list(self._spheres): # will remove itself, so copy
self._remove_vertex_glyph(sphere, render=False)
assert sum(len(v) for v in self.picked_points.values()) == 0
assert len(self.pick_table) == 0
assert len(self._spheres) == 0
for hemi in self._hemis:
for label_id in list(self.picked_patches[hemi]):
self._remove_label_glyph(hemi, label_id)
assert sum(len(v) for v in self.picked_patches.values()) == 0
if self.gfp is not None:
self.gfp.remove()
self.gfp = None
self._update()
def plot_time_course(self, hemi, vertex_id, color):
"""Plot the vertex time course.
Parameters
----------
hemi : str
The hemisphere id of the vertex.
vertex_id : int
The vertex identifier in the mesh.
color : matplotlib color
The color of the time course.
Returns
-------
line : matplotlib object
The time line object.
"""
if self.mpl_canvas is None:
return
time = self._data['time'].copy() # avoid circular ref
if hemi == 'vol':
hemi_str = 'V'
xfm = read_talxfm(
self._subject_id, self._subjects_dir)
if self._units == 'mm':
xfm['trans'][:3, 3] *= 1000.
ijk = np.unravel_index(
vertex_id, self._data[hemi]['grid_shape'], order='F')
src_mri_t = self._data[hemi]['grid_src_mri_t']
mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)
else:
hemi_str = 'L' if hemi == 'lh' else 'R'
mni = vertex_to_mni(
vertices=vertex_id,
hemis=0 if hemi == 'lh' else 1,
subject=self._subject_id,
subjects_dir=self._subjects_dir
)
label = "{}:{} MNI: {}".format(
hemi_str, str(vertex_id).ljust(6),
', '.join('%5.1f' % m for m in mni))
act_data, smooth = self.act_data_smooth[hemi]
if smooth is not None:
act_data = smooth[vertex_id].dot(act_data)[0]
else:
act_data = act_data[vertex_id].copy()
line = self.mpl_canvas.plot(
time,
act_data,
label=label,
lw=1.,
color=color,
zorder=4,
)
return line
def plot_time_line(self):
"""Add the time line to the MPL widget."""
if self.mpl_canvas is None:
return
if isinstance(self.show_traces, bool) and self.show_traces:
# add time information
current_time = self._current_time
if not hasattr(self, "time_line"):
self.time_line = self.mpl_canvas.plot_time_line(
x=current_time,
label='time',
color=self._fg_color,
lw=1,
)
self.time_line.set_xdata(current_time)
self.mpl_canvas.update_plot()
def help(self):
"""Display the help window."""
pairs = [
('?', 'Display help window'),
('i', 'Toggle interface'),
('s', 'Apply auto-scaling'),
('r', 'Restore original clim'),
('c', 'Clear all traces'),
('Space', 'Start/Pause playback'),
]
text1, text2 = zip(*pairs)
text1 = '\n'.join(text1)
text2 = '\n'.join(text2)
_show_help(
col1=text1,
col2=text2,
width=5,
height=2,
)
def _clear_callbacks(self):
from ..backends._pyvista import _remove_picking_callback
if not hasattr(self, 'callbacks'):
return
for callback in self.callbacks.values():
if callback is not None:
if hasattr(callback, "plotter"):
callback.plotter = None
if hasattr(callback, "brain"):
callback.brain = None
if hasattr(callback, "slider_rep"):
callback.slider_rep = None
self.callbacks.clear()
if self.show_traces:
_remove_picking_callback(self._iren, self.plotter.picker)
@property
def interaction(self):
"""The interaction style."""
return self._interaction
@interaction.setter
def interaction(self, interaction):
"""Set the interaction style."""
_validate_type(interaction, str, 'interaction')
_check_option('interaction', interaction, ('trackball', 'terrain'))
for ri, ci, _ in self._iter_views('vol'): # will traverse all
self._renderer.subplot(ri, ci)
self._renderer.set_interaction(interaction)
def _cortex_colormap(self, cortex):
"""Return the colormap corresponding to the cortex."""
colormap_map = dict(classic=dict(colormap="Greys",
vmin=-1, vmax=2),
high_contrast=dict(colormap="Greys",
vmin=-.1, vmax=1.3),
low_contrast=dict(colormap="Greys",
vmin=-5, vmax=5),
bone=dict(colormap="bone_r",
vmin=-.2, vmax=2),
)
return colormap_map[cortex]
@verbose
def add_data(self, array, fmin=None, fmid=None, fmax=None,
thresh=None, center=None, transparent=False, colormap="auto",
alpha=1, vertices=None, smoothing_steps=None, time=None,
time_label="auto", colorbar=True,
hemi=None, remove_existing=None, time_label_size=None,
initial_time=None, scale_factor=None, vector_alpha=None,
clim=None, src=None, volume_options=0.4, colorbar_kwargs=None,
verbose=None):
"""Display data from a numpy array on the surface or volume.
This provides a similar interface to
:meth:`surfer.Brain.add_overlay`, but it displays
it with a single colormap. It offers more flexibility over the
colormap, and provides a way to display four-dimensional data
(i.e., a timecourse) or five-dimensional data (i.e., a
vector-valued timecourse).
.. note:: ``fmin`` sets the low end of the colormap, and is separate
from thresh (this is a different convention from
:meth:`surfer.Brain.add_overlay`).
Parameters
----------
array : numpy array, shape (n_vertices[, 3][, n_times])
Data array. For the data to be understood as vector-valued
(3 values per vertex corresponding to X/Y/Z surface RAS),
then ``array`` must be have all 3 dimensions.
If vectors with no time dimension are desired, consider using a
singleton (e.g., ``np.newaxis``) to create a "time" dimension
and pass ``time_label=None`` (vector values are not supported).
%(fmin_fmid_fmax)s
%(thresh)s
%(center)s
%(transparent)s
colormap : str, list of color, or array
Name of matplotlib colormap to use, a list of matplotlib colors,
or a custom look up table (an n x 4 array coded with RBGA values
between 0 and 255), the default "auto" chooses a default divergent
colormap, if "center" is given (currently "icefire"), otherwise a
default sequential colormap (currently "rocket").
alpha : float in [0, 1]
Alpha level to control opacity of the overlay.
vertices : numpy array
Vertices for which the data is defined (needed if
``len(data) < nvtx``).
smoothing_steps : int or None
Number of smoothing steps (smoothing is used if len(data) < nvtx)
The value 'nearest' can be used too. None (default) will use as
many as necessary to fill the surface.
time : numpy array
Time points in the data array (if data is 2D or 3D).
%(time_label)s
colorbar : bool
Whether to add a colorbar to the figure. Can also be a tuple
to give the (row, col) index of where to put the colorbar.
hemi : str | None
If None, it is assumed to belong to the hemisphere being
shown. If two hemispheres are being shown, an error will
be thrown.
remove_existing : bool
Not supported yet.
Remove surface added by previous "add_data" call. Useful for
conserving memory when displaying different data in a loop.
time_label_size : int
Font size of the time label (default 14).
initial_time : float | None
Time initially shown in the plot. ``None`` to use the first time
sample (default).
scale_factor : float | None (default)
The scale factor to use when displaying glyphs for vector-valued
data.
vector_alpha : float | None
Alpha level to control opacity of the arrows. Only used for
vector-valued data. If None (default), ``alpha`` is used.
clim : dict
Original clim arguments.
%(src_volume_options)s
colorbar_kwargs : dict | None
Options to pass to :meth:`pyvista.BasePlotter.add_scalar_bar`
(e.g., ``dict(title_font_size=10)``).
%(verbose)s
Notes
-----
If the data is defined for a subset of vertices (specified
by the "vertices" parameter), a smoothing method is used to interpolate
the data onto the high resolution surface. If the data is defined for
subsampled version of the surface, smoothing_steps can be set to None,
in which case only as many smoothing steps are applied until the whole
surface is filled with non-zeros.
Due to a Mayavi (or VTK) alpha rendering bug, ``vector_alpha`` is
clamped to be strictly < 1.
"""
_validate_type(transparent, bool, 'transparent')
_validate_type(vector_alpha, ('numeric', None), 'vector_alpha')
_validate_type(scale_factor, ('numeric', None), 'scale_factor')
# those parameters are not supported yet, only None is allowed
_check_option('thresh', thresh, [None])
_check_option('remove_existing', remove_existing, [None])
_validate_type(time_label_size, (None, 'numeric'), 'time_label_size')
if time_label_size is not None:
time_label_size = float(time_label_size)
if time_label_size < 0:
raise ValueError('time_label_size must be positive, got '
f'{time_label_size}')
hemi = self._check_hemi(hemi, extras=['vol'])
stc, array, vertices = self._check_stc(hemi, array, vertices)
array = np.asarray(array)
vector_alpha = alpha if vector_alpha is None else vector_alpha
self._data['vector_alpha'] = vector_alpha
self._data['scale_factor'] = scale_factor
# Create time array and add label if > 1D
if array.ndim <= 1:
time_idx = 0
else:
# check time array
if time is None:
time = np.arange(array.shape[-1])
else:
time = np.asarray(time)
if time.shape != (array.shape[-1],):
raise ValueError('time has shape %s, but need shape %s '
'(array.shape[-1])' %
(time.shape, (array.shape[-1],)))
self._data["time"] = time
if self._n_times is None:
self._times = time
elif len(time) != self._n_times:
raise ValueError("New n_times is different from previous "
"n_times")
elif not np.array_equal(time, self._times):
raise ValueError("Not all time values are consistent with "
"previously set times.")
# initial time
if initial_time is None:
time_idx = 0
else:
time_idx = self._to_time_index(initial_time)
# time label
time_label, _ = _handle_time(time_label, 's', time)
y_txt = 0.05 + 0.1 * bool(colorbar)
if array.ndim == 3:
if array.shape[1] != 3:
raise ValueError('If array has 3 dimensions, array.shape[1] '
'must equal 3, got %s' % (array.shape[1],))
fmin, fmid, fmax = _update_limits(
fmin, fmid, fmax, center, array
)
if colormap == 'auto':
colormap = 'mne' if center is not None else 'hot'
if smoothing_steps is None:
smoothing_steps = 7
elif smoothing_steps == 'nearest':
smoothing_steps = 0
elif isinstance(smoothing_steps, int):
if smoothing_steps < 0:
raise ValueError('Expected value of `smoothing_steps` is'
' positive but {} was given.'.format(
smoothing_steps))
else:
raise TypeError('Expected type of `smoothing_steps` is int or'
' NoneType but {} was given.'.format(
type(smoothing_steps)))
self._data['stc'] = stc
self._data['src'] = src
self._data['smoothing_steps'] = smoothing_steps
self._data['clim'] = clim
self._data['time'] = time
self._data['initial_time'] = initial_time
self._data['time_label'] = time_label
self._data['initial_time_idx'] = time_idx
self._data['time_idx'] = time_idx
self._data['transparent'] = transparent
# data specific for a hemi
self._data[hemi] = dict()
self._data[hemi]['glyph_dataset'] = None
self._data[hemi]['glyph_mapper'] = None
self._data[hemi]['glyph_actor'] = None
self._data[hemi]['array'] = array
self._data[hemi]['vertices'] = vertices
self._data['alpha'] = alpha
self._data['colormap'] = colormap
self._data['center'] = center
self._data['fmin'] = fmin
self._data['fmid'] = fmid
self._data['fmax'] = fmax
self.update_lut()
# 1) add the surfaces first
actor = None
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if hemi in ('lh', 'rh'):
actor = self._layered_meshes[hemi]._actor
else:
src_vol = src[2:] if src.kind == 'mixed' else src
actor, _ = self._add_volume_data(hemi, src_vol, volume_options)
assert actor is not None # should have added one
# 2) update time and smoothing properties
# set_data_smoothing calls "set_time_point" for us, which will set
# _current_time
self.set_time_interpolation(self.time_interpolation)
self.set_data_smoothing(self._data['smoothing_steps'])
# 3) add the other actors
if colorbar is True:
# botto left by default
colorbar = (self._subplot_shape[0] - 1, 0)
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
# Add the time label to the bottommost view
do = (ri, ci) == colorbar
if not self._time_label_added and time_label is not None and do:
time_actor = self._renderer.text2d(
x_window=0.95, y_window=y_txt,
color=self._fg_color,
size=time_label_size,
text=time_label(self._current_time),
justification='right'
)
self._data['time_actor'] = time_actor
self._time_label_added = True
if colorbar and not self._colorbar_added and do:
kwargs = dict(source=actor, n_labels=8, color=self._fg_color,
bgcolor=self._brain_color[:3])
kwargs.update(colorbar_kwargs or {})
self._renderer.scalarbar(**kwargs)
self._colorbar_added = True
self._renderer.set_camera(**views_dicts[hemi][v])
# 4) update the scalar bar and opacity
self.update_lut()
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
mesh.update_overlay(name='data', opacity=alpha)
self._update()
def _iter_views(self, hemi):
# which rows and columns each type of visual needs to be added to
if self._hemi == 'split':
hemi_dict = dict(lh=[0], rh=[1], vol=[0, 1])
else:
hemi_dict = dict(lh=[0], rh=[0], vol=[0])
for vi, view in enumerate(self._views):
if self._hemi == 'split':
view_dict = dict(lh=[vi], rh=[vi], vol=[vi, vi])
else:
view_dict = dict(lh=[vi], rh=[vi], vol=[vi])
if self._view_layout == 'vertical':
rows = view_dict # views are rows
cols = hemi_dict # hemis are columns
else:
rows = hemi_dict # hemis are rows
cols = view_dict # views are columns
for ri, ci in zip(rows[hemi], cols[hemi]):
yield ri, ci, view
def remove_labels(self):
"""Remove all the ROI labels from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
mesh.remove_overlay(self._labels[hemi])
self._labels[hemi].clear()
self._update()
def remove_annotations(self):
"""Remove all annotations from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
mesh.remove_overlay(self._annots[hemi])
self._annots[hemi].clear()
self._update()
def _add_volume_data(self, hemi, src, volume_options):
from ..backends._pyvista import _volume
_validate_type(src, SourceSpaces, 'src')
_check_option('src.kind', src.kind, ('volume',))
_validate_type(
volume_options, (dict, 'numeric', None), 'volume_options')
assert hemi == 'vol'
if not isinstance(volume_options, dict):
volume_options = dict(
resolution=float(volume_options) if volume_options is not None
else None)
volume_options = _handle_default('volume_options', volume_options)
allowed_types = (
['resolution', (None, 'numeric')],
['blending', (str,)],
['alpha', ('numeric', None)],
['surface_alpha', (None, 'numeric')],
['silhouette_alpha', (None, 'numeric')],
['silhouette_linewidth', ('numeric',)],
)
for key, types in allowed_types:
_validate_type(volume_options[key], types,
f'volume_options[{repr(key)}]')
extra_keys = set(volume_options) - set(a[0] for a in allowed_types)
if len(extra_keys):
raise ValueError(
f'volume_options got unknown keys {sorted(extra_keys)}')
blending = _check_option('volume_options["blending"]',
volume_options['blending'],
('composite', 'mip'))
alpha = volume_options['alpha']
if alpha is None:
alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1.
alpha = np.clip(float(alpha), 0., 1.)
resolution = volume_options['resolution']
surface_alpha = volume_options['surface_alpha']
if surface_alpha is None:
surface_alpha = min(alpha / 2., 0.1)
silhouette_alpha = volume_options['silhouette_alpha']
if silhouette_alpha is None:
silhouette_alpha = surface_alpha / 4.
silhouette_linewidth = volume_options['silhouette_linewidth']
del volume_options
volume_pos = self._data[hemi].get('grid_volume_pos')
volume_neg = self._data[hemi].get('grid_volume_neg')
center = self._data['center']
if volume_pos is None:
xyz = np.meshgrid(
*[np.arange(s) for s in src[0]['shape']], indexing='ij')
dimensions = np.array(src[0]['shape'], int)
mult = 1000 if self._units == 'mm' else 1
src_mri_t = src[0]['src_mri_t']['trans'].copy()
src_mri_t[:3] *= mult
if resolution is not None:
resolution = resolution * mult / 1000. # to mm
del src, mult
coords = np.array([c.ravel(order='F') for c in xyz]).T
coords = apply_trans(src_mri_t, coords)
self.geo[hemi] = Bunch(coords=coords)
vertices = self._data[hemi]['vertices']
assert self._data[hemi]['array'].shape[0] == len(vertices)
# MNE constructs the source space on a uniform grid in MRI space,
# but mne coreg can change it to be non-uniform, so we need to
# use all three elements here
assert np.allclose(
src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3]))
spacing = np.diag(src_mri_t)[:3]
origin = src_mri_t[:3, 3] - spacing / 2.
scalars = np.zeros(np.prod(dimensions))
scalars[vertices] = 1. # for the outer mesh
grid, grid_mesh, volume_pos, volume_neg = \
_volume(dimensions, origin, spacing, scalars, surface_alpha,
resolution, blending, center)
self._data[hemi]['alpha'] = alpha # incorrectly set earlier
self._data[hemi]['grid'] = grid
self._data[hemi]['grid_mesh'] = grid_mesh
self._data[hemi]['grid_coords'] = coords
self._data[hemi]['grid_src_mri_t'] = src_mri_t
self._data[hemi]['grid_shape'] = dimensions
self._data[hemi]['grid_volume_pos'] = volume_pos
self._data[hemi]['grid_volume_neg'] = volume_neg
actor_pos, _ = self._renderer.plotter.add_actor(
volume_pos, reset_camera=False, name=None, culling=False)
if volume_neg is not None:
actor_neg, _ = self._renderer.plotter.add_actor(
volume_neg, reset_camera=False, name=None, culling=False)
else:
actor_neg = None
grid_mesh = self._data[hemi]['grid_mesh']
if grid_mesh is not None:
import vtk
_, prop = self._renderer.plotter.add_actor(
grid_mesh, reset_camera=False, name=None, culling=False,
pickable=False)
prop.SetColor(*self._brain_color[:3])
prop.SetOpacity(surface_alpha)
if silhouette_alpha > 0 and silhouette_linewidth > 0:
for ri, ci, v in self._iter_views('vol'):
self._renderer.subplot(ri, ci)
grid_silhouette = vtk.vtkPolyDataSilhouette()
grid_silhouette.SetInputData(grid_mesh.GetInput())
grid_silhouette.SetCamera(
self._renderer.plotter.renderer.GetActiveCamera())
grid_silhouette.SetEnableFeatureAngle(0)
grid_silhouette_mapper = vtk.vtkPolyDataMapper()
grid_silhouette_mapper.SetInputConnection(
grid_silhouette.GetOutputPort())
_, prop = self._renderer.plotter.add_actor(
grid_silhouette_mapper, reset_camera=False, name=None,
culling=False, pickable=False)
prop.SetColor(*self._brain_color[:3])
prop.SetOpacity(silhouette_alpha)
prop.SetLineWidth(silhouette_linewidth)
return actor_pos, actor_neg
def add_label(self, label, color=None, alpha=1, scalar_thresh=None,
borders=False, hemi=None, subdir=None,
reset_camera=True):
"""Add an ROI label to the image.
Parameters
----------
label : str | instance of Label
Label filepath or name. Can also be an instance of
an object with attributes "hemi", "vertices", "name", and
optionally "color" and "values" (if scalar_thresh is not None).
color : matplotlib-style color | None
Anything matplotlib accepts: string, RGB, hex, etc. (default
"crimson").
alpha : float in [0, 1]
Alpha level to control opacity.
scalar_thresh : None | float
Threshold the label ids using this value in the label
file's scalar field (i.e. label only vertices with
scalar >= thresh).
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown.
subdir : None | str
If a label is specified as name, subdir can be used to indicate
that the label file is in a sub-directory of the subject's
label directory rather than in the label directory itself (e.g.
for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label``
``brain.add_label('cuneus', subdir='aparc')``).
reset_camera : bool
If True, reset the camera view after adding the label. Defaults
to True.
Notes
-----
To remove previously added labels, run Brain.remove_labels().
"""
from matplotlib.colors import colorConverter
from ...label import read_label
if isinstance(label, str):
if color is None:
color = "crimson"
if os.path.isfile(label):
filepath = label
label = read_label(filepath)
hemi = label.hemi
label_name = os.path.basename(filepath).split('.')[1]
else:
hemi = self._check_hemi(hemi)
label_name = label
label_fname = ".".join([hemi, label_name, 'label'])
if subdir is None:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', label_fname)
else:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', subdir, label_fname)
if not os.path.exists(filepath):
raise ValueError('Label file %s does not exist'
% filepath)
label = read_label(filepath)
ids = label.vertices
scalars = label.values
else:
# try to extract parameters from label instance
try:
hemi = label.hemi
ids = label.vertices
if label.name is None:
label_name = 'unnamed'
else:
label_name = str(label.name)
if color is None:
if hasattr(label, 'color') and label.color is not None:
color = label.color
else:
color = "crimson"
if scalar_thresh is not None:
scalars = label.values
except Exception:
raise ValueError('Label was not a filename (str), and could '
'not be understood as a class. The class '
'must have attributes "hemi", "vertices", '
'"name", and (if scalar_thresh is not None)'
'"values"')
hemi = self._check_hemi(hemi)
if scalar_thresh is not None:
ids = ids[scalars >= scalar_thresh]
scalars = np.zeros(self.geo[hemi].coords.shape[0])
scalars[ids] = 1
if self.time_viewer and self.show_traces:
stc = self._data["stc"]
src = self._data["src"]
tc = stc.extract_label_time_course(label, src=src,
mode=self.label_extract_mode)
tc = tc[0] if tc.ndim == 2 else tc[0, 0, :]
color = next(self.color_cycle)
line = self.mpl_canvas.plot(
self._data['time'], tc, label=label_name,
color=color)
else:
line = None
orig_color = color
color = colorConverter.to_rgba(color, alpha)
cmap = np.array([(0, 0, 0, 0,), color])
ctable = np.round(cmap * 255).astype(np.uint8)
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if borders:
n_vertices = scalars.size
edges = mesh_edges(self.geo[hemi].faces)
edges = edges.tocoo()
border_edges = scalars[edges.row] != scalars[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].faces.shape
keep_idx = self.geo[hemi].faces[np.any(
keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
show[keep_idx] = 1
scalars *= show
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=scalars,
colormap=ctable,
rng=None,
opacity=alpha,
name=label_name,
)
if reset_camera:
self._renderer.set_camera(**views_dicts[hemi][v])
if self.time_viewer and self.traces_mode == 'label':
label._color = orig_color
label._line = line
self._labels[hemi].append(label)
self._update()
def add_foci(self, coords, coords_as_verts=False, map_surface=None,
scale_factor=1, color="white", alpha=1, name=None,
hemi=None, resolution=50):
"""Add spherical foci, possibly mapping to displayed surf.
The foci spheres can be displayed at the coordinates given, or
mapped through a surface geometry. In other words, coordinates
from a volume-based analysis in MNI space can be displayed on an
inflated average surface by finding the closest vertex on the
white surface and mapping to that vertex on the inflated mesh.
Parameters
----------
coords : ndarray, shape (n_coords, 3)
Coordinates in stereotaxic space (default) or array of
vertex ids (with ``coord_as_verts=True``).
coords_as_verts : bool
Whether the coords parameter should be interpreted as vertex ids.
map_surface : None
Surface to map coordinates through, or None to use raw coords.
scale_factor : float
Controls the size of the foci spheres (relative to 1cm).
color : matplotlib color code
HTML name, RBG tuple, or hex code.
alpha : float in [0, 1]
Opacity of focus gylphs.
name : str
Internal name to use.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
resolution : int
The resolution of the spheres.
"""
from matplotlib.colors import colorConverter
hemi = self._check_hemi(hemi, extras=['vol'])
# those parameters are not supported yet, only None is allowed
_check_option('map_surface', map_surface, [None])
# Figure out how to interpret the first parameter
if coords_as_verts:
coords = self.geo[hemi].coords[coords]
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
if self._units == 'm':
scale_factor = scale_factor / 1000.
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
self._renderer.sphere(center=coords, color=color,
scale=(10. * scale_factor),
opacity=alpha, resolution=resolution)
self._renderer.set_camera(**views_dicts[hemi][v])
def add_text(self, x, y, text, name=None, color=None, opacity=1.0,
row=-1, col=-1, font_size=None, justification=None):
"""Add a text to the visualization.
Parameters
----------
x : float
X coordinate.
y : float
Y coordinate.
text : str
Text to add.
name : str
Name of the text (text label can be updated using update_text()).
color : tuple
Color of the text. Default is the foreground color set during
initialization (default is black or white depending on the
background color).
opacity : float
Opacity of the text (default 1.0).
row : int
Row index of which brain to use.
col : int
Column index of which brain to use.
font_size : float | None
The font size to use.
justification : str | None
The text justification.
"""
# XXX: support `name` should be added when update_text/remove_text
# are implemented
# _check_option('name', name, [None])
self._renderer.text2d(x_window=x, y_window=y, text=text, color=color,
size=font_size, justification=justification)
def _configure_label_time_course(self):
from ...label import read_labels_from_annot
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
self.traces_mode = 'label'
self.add_annotation(self.annot, color="w", alpha=0.75)
# now plot the time line
self.plot_time_line()
self.mpl_canvas.update_plot()
for hemi in self._hemis:
labels = read_labels_from_annot(
subject=self._subject_id,
parc=self.annot,
hemi=hemi,
subjects_dir=self._subjects_dir
)
self._vertex_to_label_id[hemi] = np.full(
self.geo[hemi].coords.shape[0], -1)
self._annotation_labels[hemi] = labels
for idx, label in enumerate(labels):
self._vertex_to_label_id[hemi][label.vertices] = idx
def add_annotation(self, annot, borders=True, alpha=1, hemi=None,
remove_existing=True, color=None, **kwargs):
"""Add an annotation file.
Parameters
----------
annot : str | tuple
Either path to annotation file or annotation name. Alternatively,
the annotation can be specified as a ``(labels, ctab)`` tuple per
hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere
or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both
hemispheres. ``labels`` and ``ctab`` should be arrays as returned
by :func:`nibabel.freesurfer.io.read_annot`.
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
alpha : float in [0, 1]
Alpha level to control opacity.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, data must exist
for both hemispheres.
remove_existing : bool
If True (default), remove old annotations.
color : matplotlib-style color code
If used, show all annotations in the same (specified) color.
Probably useful only when showing annotation borders.
**kwargs : dict
These are passed to the underlying
``mayavi.mlab.pipeline.surface`` call.
"""
from ...label import _read_annot
hemis = self._check_hemis(hemi)
# Figure out where the data is coming from
if isinstance(annot, str):
if os.path.isfile(annot):
filepath = annot
path = os.path.split(filepath)[0]
file_hemi, annot = os.path.basename(filepath).split('.')[:2]
if len(hemis) > 1:
if annot[:2] == 'lh.':
filepaths = [filepath, op.join(path, 'rh' + annot[2:])]
elif annot[:2] == 'rh.':
filepaths = [op.join(path, 'lh' + annot[2:], filepath)]
else:
raise RuntimeError('To add both hemispheres '
'simultaneously, filename must '
'begin with "lh." or "rh."')
else:
filepaths = [filepath]
else:
filepaths = []
for hemi in hemis:
filepath = op.join(self._subjects_dir,
self._subject_id,
'label',
".".join([hemi, annot, 'annot']))
if not os.path.exists(filepath):
raise ValueError('Annotation file %s does not exist'
% filepath)
filepaths += [filepath]
annots = []
for hemi, filepath in zip(hemis, filepaths):
# Read in the data
labels, cmap, _ = _read_annot(filepath)
annots.append((labels, cmap))
else:
annots = [annot] if len(hemis) == 1 else annot
annot = 'annotation'
for hemi, (labels, cmap) in zip(hemis, annots):
# Maybe zero-out the non-border vertices
self._to_borders(labels, hemi, borders)
# Handle null labels properly
cmap[:, 3] = 255
bgcolor = np.round(np.array(self._brain_color) * 255).astype(int)
bgcolor[-1] = 0
cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive
cmap[cmap[:, 4] <= 0, :4] = bgcolor
if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0):
cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]])))
# Set label ids sensibly
order = np.argsort(cmap[:, -1])
cmap = cmap[order]
ids = np.searchsorted(cmap[:, -1], labels)
cmap = cmap[:, :4]
# Set the alpha level
alpha_vec = cmap[:, 3]
alpha_vec[alpha_vec > 0] = alpha * 255
# Override the cmap when a single color is used
if color is not None:
from matplotlib.colors import colorConverter
rgb = np.round(np.multiply(colorConverter.to_rgb(color), 255))
cmap[:, :3] = rgb.astype(cmap.dtype)
ctable = cmap.astype(np.float64)
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=ids,
colormap=ctable,
rng=[np.min(ids), np.max(ids)],
opacity=alpha,
name=annot,
)
self._annots[hemi].append(annot)
if not self.time_viewer or self.traces_mode == 'vertex':
from ..backends._pyvista import _set_colormap_range
_set_colormap_range(mesh._actor, cmap.astype(np.uint8),
None)
self._update()
def close(self):
"""Close all figures and cleanup data structure."""
self._closed = True
self._renderer.close()
def show(self):
"""Display the window."""
self._renderer.show()
def show_view(self, view=None, roll=None, distance=None, row=0, col=0,
hemi=None):
"""Orient camera to display view.
Parameters
----------
view : str | dict
String view, or a dict with azimuth and elevation.
roll : float | None
The roll.
distance : float | None
The distance.
row : int
The row to set.
col : int
The column to set.
hemi : str
Which hemi to use for string lookup (when in "both" mode).
"""
hemi = self._hemi if hemi is None else hemi
if hemi == 'split':
if (self._view_layout == 'vertical' and col == 1 or
self._view_layout == 'horizontal' and row == 1):
hemi = 'rh'
else:
hemi = 'lh'
if isinstance(view, str):
view = views_dicts[hemi].get(view)
view = view.copy()
if roll is not None:
view.update(roll=roll)
if distance is not None:
view.update(distance=distance)
self._renderer.subplot(row, col)
self._renderer.set_camera(**view, reset_camera=False)
self._update()
def reset_view(self):
"""Reset the camera."""
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self._renderer.subplot(ri, ci)
self._renderer.set_camera(**views_dicts[h][v],
reset_camera=False)
def save_image(self, filename, mode='rgb'):
"""Save view from all panels to disk.
Parameters
----------
filename : str
Path to new image file.
mode : str
Either 'rgb' or 'rgba' for values to return.
"""
self._renderer.screenshot(mode=mode, filename=filename)
@fill_doc
def screenshot(self, mode='rgb', time_viewer=False):
"""Generate a screenshot of current view.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
%(brain_screenshot_time_viewer)s
Returns
-------
screenshot : array
Image pixel values.
"""
img = self._renderer.screenshot(mode)
if time_viewer and self.time_viewer and \
self.show_traces and \
not self.separate_canvas:
canvas = self.mpl_canvas.fig.canvas
canvas.draw_idle()
# In theory, one of these should work:
#
# trace_img = np.frombuffer(
# canvas.tostring_rgb(), dtype=np.uint8)
# trace_img.shape = canvas.get_width_height()[::-1] + (3,)
#
# or
#
# trace_img = np.frombuffer(
# canvas.tostring_rgb(), dtype=np.uint8)
# size = time_viewer.mpl_canvas.getSize()
# trace_img.shape = (size.height(), size.width(), 3)
#
# But in practice, sometimes the sizes does not match the
# renderer tostring_rgb() size. So let's directly use what
# matplotlib does in lib/matplotlib/backends/backend_agg.py
# before calling tobytes():
trace_img = np.asarray(
canvas.renderer._renderer).take([0, 1, 2], axis=2)
# need to slice into trace_img because generally it's a bit
# smaller
delta = trace_img.shape[1] - img.shape[1]
if delta > 0:
start = delta // 2
trace_img = trace_img[:, start:start + img.shape[1]]
img = np.concatenate([img, trace_img], axis=0)
return img
@fill_doc
def update_lut(self, fmin=None, fmid=None, fmax=None):
"""Update color map.
Parameters
----------
%(fmin_fmid_fmax)s
"""
from ..backends._pyvista import _set_colormap_range, _set_volume_range
center = self._data['center']
colormap = self._data['colormap']
transparent = self._data['transparent']
lims = dict(fmin=fmin, fmid=fmid, fmax=fmax)
lims = {key: self._data[key] if val is None else val
for key, val in lims.items()}
assert all(val is not None for val in lims.values())
if lims['fmin'] > lims['fmid']:
lims['fmin'] = lims['fmid']
if lims['fmax'] < lims['fmid']:
lims['fmax'] = lims['fmid']
self._data.update(lims)
self._data['ctable'] = np.round(
calculate_lut(colormap, alpha=1., center=center,
transparent=transparent, **lims) *
255).astype(np.uint8)
# update our values
rng = self._cmap_range
ctable = self._data['ctable']
# in testing, no plotter; if colorbar=False, no scalar_bar
scalar_bar = getattr(
getattr(self._renderer, 'plotter', None), 'scalar_bar', None)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
mesh.update_overlay(name='data',
colormap=self._data['ctable'])
_set_colormap_range(mesh._actor, ctable, scalar_bar, rng,
self._brain_color)
scalar_bar = None
grid_volume_pos = hemi_data.get('grid_volume_pos')
grid_volume_neg = hemi_data.get('grid_volume_neg')
for grid_volume in (grid_volume_pos, grid_volume_neg):
if grid_volume is not None:
_set_volume_range(
grid_volume, ctable, hemi_data['alpha'],
scalar_bar, rng)
scalar_bar = None
glyph_actor = hemi_data.get('glyph_actor')
if glyph_actor is not None:
for glyph_actor_ in glyph_actor:
_set_colormap_range(
glyph_actor_, ctable, scalar_bar, rng)
scalar_bar = None
def set_data_smoothing(self, n_steps):
"""Set the number of smoothing steps.
Parameters
----------
n_steps : int
Number of smoothing steps.
"""
from ...morph import _hemi_morph
for hemi in ['lh', 'rh']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]:
continue
vertices = hemi_data['vertices']
if vertices is None:
raise ValueError(
'len(data) < nvtx (%s < %s): the vertices '
'parameter must not be None'
% (len(hemi_data), self.geo[hemi].x.shape[0]))
morph_n_steps = 'nearest' if n_steps == 0 else n_steps
maps = sparse.eye(len(self.geo[hemi].coords), format='csr')
with use_log_level(False):
smooth_mat = _hemi_morph(
self.geo[hemi].orig_faces,
np.arange(len(self.geo[hemi].coords)),
vertices, morph_n_steps, maps, warn=False)
self._data[hemi]['smooth_mat'] = smooth_mat
self.set_time_point(self._data['time_idx'])
self._data['smoothing_steps'] = n_steps
@property
def _n_times(self):
return len(self._times) if self._times is not None else None
@property
def time_interpolation(self):
"""The interpolation mode."""
return self._time_interpolation
@fill_doc
def set_time_interpolation(self, interpolation):
"""Set the interpolation mode.
Parameters
----------
%(brain_time_interpolation)s
"""
self._time_interpolation = _check_option(
'interpolation',
interpolation,
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic')
)
self._time_interp_funcs = dict()
self._time_interp_inv = None
if self._times is not None:
idx = np.arange(self._n_times)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
self._time_interp_funcs[hemi] = _safe_interp1d(
idx, array, self._time_interpolation, axis=-1,
assume_sorted=True)
self._time_interp_inv = _safe_interp1d(idx, self._times)
def set_time_point(self, time_idx):
"""Set the time point shown (can be a float to interpolate).
Parameters
----------
time_idx : int | float
The time index to use. Can be a float to use interpolation
between indices.
"""
self._current_act_data = dict()
time_actor = self._data.get('time_actor', None)
time_label = self._data.get('time_label', None)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
# interpolate in time
vectors = None
if array.ndim == 1:
act_data = array
self._current_time = 0
else:
act_data = self._time_interp_funcs[hemi](time_idx)
self._current_time = self._time_interp_inv(time_idx)
if array.ndim == 3:
vectors = act_data
act_data = np.linalg.norm(act_data, axis=1)
self._current_time = self._time_interp_inv(time_idx)
self._current_act_data[hemi] = act_data
if time_actor is not None and time_label is not None:
time_actor.SetInput(time_label(self._current_time))
# update the volume interpolation
grid = hemi_data.get('grid')
if grid is not None:
vertices = self._data['vol']['vertices']
values = self._current_act_data['vol']
rng = self._cmap_range
fill = 0 if self._data['center'] is not None else rng[0]
grid.cell_arrays['values'].fill(fill)
# XXX for sided data, we probably actually need two
# volumes as composite/MIP needs to look at two
# extremes... for now just use abs. Eventually we can add
# two volumes if we want.
grid.cell_arrays['values'][vertices] = values
# interpolate in space
smooth_mat = hemi_data.get('smooth_mat')
if smooth_mat is not None:
act_data = smooth_mat.dot(act_data)
# update the mesh scalar values
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
if 'data' in mesh._overlays:
mesh.update_overlay(name='data', scalars=act_data)
else:
mesh.add_overlay(
scalars=act_data,
colormap=self._data['ctable'],
rng=self._cmap_range,
opacity=None,
name='data',
)
# update the glyphs
if vectors is not None:
self._update_glyphs(hemi, vectors)
self._data['time_idx'] = time_idx
self._update()
def set_time(self, time):
"""Set the time to display (in seconds).
Parameters
----------
time : float
The time to show, in seconds.
"""
if self._times is None:
raise ValueError(
'Cannot set time when brain has no defined times.')
elif min(self._times) <= time <= max(self._times):
self.set_time_point(np.interp(float(time), self._times,
np.arange(self._n_times)))
else:
raise ValueError(
f'Requested time ({time} s) is outside the range of '
f'available times ({min(self._times)}-{max(self._times)} s).')
def _update_glyphs(self, hemi, vectors):
from ..backends._pyvista import _set_colormap_range, _create_actor
hemi_data = self._data.get(hemi)
assert hemi_data is not None
vertices = hemi_data['vertices']
vector_alpha = self._data['vector_alpha']
scale_factor = self._data['scale_factor']
vertices = slice(None) if vertices is None else vertices
x, y, z = np.array(self.geo[hemi].coords)[vertices].T
if hemi_data['glyph_actor'] is None:
add = True
hemi_data['glyph_actor'] = list()
else:
add = False
count = 0
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if hemi_data['glyph_dataset'] is None:
glyph_mapper, glyph_dataset = self._renderer.quiver3d(
x, y, z,
vectors[:, 0], vectors[:, 1], vectors[:, 2],
color=None,
mode='2darrow',
scale_mode='vector',
scale=scale_factor,
opacity=vector_alpha,
name=str(hemi) + "_glyph"
)
hemi_data['glyph_dataset'] = glyph_dataset
hemi_data['glyph_mapper'] = glyph_mapper
else:
glyph_dataset = hemi_data['glyph_dataset']
glyph_dataset.point_arrays['vec'] = vectors
glyph_mapper = hemi_data['glyph_mapper']
if add:
glyph_actor = _create_actor(glyph_mapper)
prop = glyph_actor.GetProperty()
prop.SetLineWidth(2.)
prop.SetOpacity(vector_alpha)
self._renderer.plotter.add_actor(glyph_actor)
hemi_data['glyph_actor'].append(glyph_actor)
else:
glyph_actor = hemi_data['glyph_actor'][count]
count += 1
_set_colormap_range(
actor=glyph_actor,
ctable=self._data['ctable'],
scalar_bar=None,
rng=self._cmap_range,
)
@property
def _cmap_range(self):
dt_max = self._data['fmax']
if self._data['center'] is None:
dt_min = self._data['fmin']
else:
dt_min = -1 * dt_max
rng = [dt_min, dt_max]
return rng
def _update_fscale(self, fscale):
"""Scale the colorbar points."""
fmin = self._data['fmin'] * fscale
fmid = self._data['fmid'] * fscale
fmax = self._data['fmax'] * fscale
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _update_auto_scaling(self, restore=False):
user_clim = self._data['clim']
if user_clim is not None and 'lims' in user_clim:
allow_pos_lims = False
else:
allow_pos_lims = True
if user_clim is not None and restore:
clim = user_clim
else:
clim = 'auto'
colormap = self._data['colormap']
transparent = self._data['transparent']
mapdata = _process_clim(
clim, colormap, transparent,
np.concatenate(list(self._current_act_data.values())),
allow_pos_lims)
diverging = 'pos_lims' in mapdata['clim']
colormap = mapdata['colormap']
scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']
transparent = mapdata['transparent']
del mapdata
fmin, fmid, fmax = scale_pts
center = 0. if diverging else None
self._data['center'] = center
self._data['colormap'] = colormap
self._data['transparent'] = transparent
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _to_time_index(self, value):
"""Return the interpolated time index of the given time value."""
time = self._data['time']
value = np.interp(value, time, np.arange(len(time)))
return value
@property
def data(self):
"""Data used by time viewer and color bar widgets."""
return self._data
@property
def labels(self):
return self._labels
@property
def views(self):
return self._views
@property
def hemis(self):
return self._hemis
def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
import imageio
from ..backends._pyvista import _disabled_interaction
with _disabled_interaction(self._renderer):
images = self._make_movie_frames(
time_dilation, tmin, tmax, framerate, interpolation, callback,
time_viewer)
# find imageio FFMPEG parameters
if 'fps' not in kwargs:
kwargs['fps'] = framerate
if codec is not None:
kwargs['codec'] = codec
if bitrate is not None:
kwargs['bitrate'] = bitrate
imageio.mimwrite(filename, images, **kwargs)
@fill_doc
def save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
"""Save a movie (for data with a time axis).
The movie is created through the :mod:`imageio` module. The format is
determined by the extension, and additional options can be specified
through keyword arguments that depend on the format. For available
formats and corresponding parameters see the imageio documentation:
http://imageio.readthedocs.io/en/latest/formats.html#multiple-images
.. Warning::
This method assumes that time is specified in seconds when adding
data. If time is specified in milliseconds this will result in
movies 1000 times longer than expected.
Parameters
----------
filename : str
Path at which to save the movie. The extension determines the
format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio`
documentation for available formats).
time_dilation : float
Factor by which to stretch time (default 4). For example, an epoch
from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this
would result in a 2.8 s long movie.
tmin : float
First time point to include (default: all data).
tmax : float
Last time point to include (default: all data).
framerate : float
Framerate of the movie (frames per second, default 24).
%(brain_time_interpolation)s
If None, it uses the current ``brain.interpolation``,
which defaults to ``'nearest'``. Defaults to None.
codec : str | None
The codec to use.
bitrate : float | None
The bitrate to use.
callback : callable | None
A function to call on each iteration. Useful for status message
updates. It will be passed keyword arguments ``frame`` and
``n_frames``.
%(brain_screenshot_time_viewer)s
**kwargs : dict
Specify additional options for :mod:`imageio`.
Returns
-------
dialog : object
The opened dialog is returned for testing purpose only.
"""
if self.time_viewer:
try:
from pyvista.plotting.qt_plotting import FileDialog
except ImportError:
from pyvistaqt.plotting import FileDialog
if filename is None:
self.status_msg.setText("Choose movie path ...")
self.status_msg.show()
self.status_progress.setValue(0)
def _post_setup(unused):
del unused
self.status_msg.hide()
self.status_progress.hide()
dialog = FileDialog(
self.plotter.app_window,
callback=partial(self._save_movie, **kwargs)
)
dialog.setDirectory(os.getcwd())
dialog.finished.connect(_post_setup)
return dialog
else:
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
def frame_callback(frame, n_frames):
if frame == n_frames:
# On the ImageIO step
self.status_msg.setText(
"Saving with ImageIO: %s"
% filename
)
self.status_msg.show()
self.status_progress.hide()
self.status_bar.layout().update()
else:
self.status_msg.setText(
"Rendering images (frame %d / %d) ..."
% (frame + 1, n_frames)
)
self.status_msg.show()
self.status_progress.show()
self.status_progress.setRange(0, n_frames - 1)
self.status_progress.setValue(frame)
self.status_progress.update()
self.status_progress.repaint()
self.status_msg.update()
self.status_msg.parent().update()
self.status_msg.repaint()
# temporarily hide interface
default_visibility = self.visibility
self.toggle_interface(value=False)
# set cursor to busy
default_cursor = self.interactor.cursor()
self.interactor.setCursor(QCursor(Qt.WaitCursor))
try:
self._save_movie(
filename=filename,
time_dilation=(1. / self.playback_speed),
callback=frame_callback,
**kwargs
)
except (Exception, KeyboardInterrupt):
warn('Movie saving aborted:\n' + traceback.format_exc())
# restore visibility
self.toggle_interface(value=default_visibility)
# restore cursor
self.interactor.setCursor(default_cursor)
else:
self._save_movie(filename, time_dilation, tmin, tmax,
framerate, interpolation, codec,
bitrate, callback, time_viewer, **kwargs)
def _make_movie_frames(self, time_dilation, tmin, tmax, framerate,
interpolation, callback, time_viewer):
from math import floor
# find tmin
if tmin is None:
tmin = self._times[0]
elif tmin < self._times[0]:
raise ValueError("tmin=%r is smaller than the first time point "
"(%r)" % (tmin, self._times[0]))
# find indexes at which to create frames
if tmax is None:
tmax = self._times[-1]
elif tmax > self._times[-1]:
raise ValueError("tmax=%r is greater than the latest time point "
"(%r)" % (tmax, self._times[-1]))
n_frames = floor((tmax - tmin) * time_dilation * framerate)
times = np.arange(n_frames, dtype=float)
times /= framerate * time_dilation
times += tmin
time_idx = np.interp(times, self._times, np.arange(self._n_times))
n_times = len(time_idx)
if n_times == 0:
raise ValueError("No time points selected")
logger.debug("Save movie for time points/samples\n%s\n%s"
% (times, time_idx))
# Sometimes the first screenshot is rendered with a different
# resolution on OS X
self.screenshot(time_viewer=time_viewer)
old_mode = self.time_interpolation
if interpolation is not None:
self.set_time_interpolation(interpolation)
try:
images = [
self.screenshot(time_viewer=time_viewer)
for _ in self._iter_time(time_idx, callback)]
finally:
self.set_time_interpolation(old_mode)
if callback is not None:
callback(frame=len(time_idx), n_frames=len(time_idx))
return images
def _iter_time(self, time_idx, callback):
"""Iterate through time points, then reset to current time.
Parameters
----------
time_idx : array_like
Time point indexes through which to iterate.
callback : callable | None
Callback to call before yielding each frame.
Yields
------
idx : int | float
Current index.
Notes
-----
Used by movie and image sequence saving functions.
"""
if self.time_viewer:
func = partial(self.callbacks["time"],
update_widget=True)
else:
func = self.set_time_point
current_time_idx = self._data["time_idx"]
for ii, idx in enumerate(time_idx):
func(idx)
if callback is not None:
callback(frame=ii, n_frames=len(time_idx))
yield idx
# Restore original time index
func(current_time_idx)
def _show(self):
"""Request rendering of the window."""
try:
return self._renderer.show()
except RuntimeError:
logger.info("No active/running renderer available.")
def _check_stc(self, hemi, array, vertices):
from ...source_estimate import (
_BaseSourceEstimate, _BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate, _BaseVolSourceEstimate
)
if isinstance(array, _BaseSourceEstimate):
stc = array
stc_surf = stc_vol = None
if isinstance(stc, _BaseSurfaceSourceEstimate):
stc_surf = stc
elif isinstance(stc, _BaseMixedSourceEstimate):
stc_surf = stc.surface() if hemi != 'vol' else None
stc_vol = stc.volume() if hemi == 'vol' else None
elif isinstance(stc, _BaseVolSourceEstimate):
stc_vol = stc if hemi == 'vol' else None
else:
raise TypeError("stc not supported")
if stc_surf is None and stc_vol is None:
raise ValueError("No data to be added")
if stc_surf is not None:
array = getattr(stc_surf, hemi + '_data')
vertices = stc_surf.vertices[0 if hemi == 'lh' else 1]
if stc_vol is not None:
array = stc_vol.data
vertices = np.concatenate(stc_vol.vertices)
else:
stc = None
return stc, array, vertices
def _check_hemi(self, hemi, extras=()):
"""Check for safe single-hemi input, returns str."""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
raise ValueError('hemi must not be None when both '
'hemispheres are displayed')
else:
hemi = self._hemi
elif hemi not in ['lh', 'rh'] + list(extras):
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' +
extra + ", got " + str(hemi))
return hemi
def _check_hemis(self, hemi):
"""Check for safe dual or single-hemi input, returns list."""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
hemi = ['lh', 'rh']
else:
hemi = [self._hemi]
elif hemi not in ['lh', 'rh']:
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' + extra)
else:
hemi = [hemi]
return hemi
def _to_borders(self, label, hemi, borders, restrict_idx=None):
"""Convert a label/parc to borders."""
if not isinstance(borders, (bool, int)) or borders < 0:
raise ValueError('borders must be a bool or positive integer')
if borders:
n_vertices = label.size
edges = mesh_edges(self.geo[hemi].orig_faces)
edges = edges.tocoo()
border_edges = label[edges.row] != label[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].orig_faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].orig_faces.shape
keep_idx = self.geo[hemi].orig_faces[
np.any(keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
if restrict_idx is not None:
keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]
show[keep_idx] = 1
label *= show
def enable_depth_peeling(self):
"""Enable depth peeling."""
self._renderer.enable_depth_peeling()
def _update(self):
from ..backends import renderer
if renderer.get_3d_backend() in ['pyvista', 'notebook']:
if self.notebook and self._renderer.figure.display is not None:
self._renderer.figure.display.update()
else:
self._renderer.plotter.update()
def get_picked_points(self):
"""Return the vertices of the picked points.
Returns
-------
points : list of int | None
The vertices picked by the time viewer.
"""
if hasattr(self, "time_viewer"):
return self.picked_points
def __hash__(self):
"""Hash the object."""
raise NotImplementedError
def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False):
"""Work around interp1d not liking singleton dimensions."""
from scipy.interpolate import interp1d
if y.shape[axis] == 1:
def func(x):
return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis)
return func
else:
return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted)
def _update_limits(fmin, fmid, fmax, center, array):
if center is None:
if fmin is None:
fmin = array.min() if array.size > 0 else 0
if fmax is None:
fmax = array.max() if array.size > 0 else 1
else:
if fmin is None:
fmin = 0
if fmax is None:
fmax = np.abs(center - array).max() if array.size > 0 else 1
if fmid is None:
fmid = (fmin + fmax) / 2.
if fmin >= fmid:
raise RuntimeError('min must be < mid, got %0.4g >= %0.4g'
% (fmin, fmid))
if fmid >= fmax:
raise RuntimeError('mid must be < max, got %0.4g >= %0.4g'
% (fmid, fmax))
return fmin, fmid, fmax
def _get_range(brain):
val = np.abs(np.concatenate(list(brain._current_act_data.values())))
return [np.min(val), np.max(val)]
class _FakeIren():
def EnterEvent(self):
pass
def MouseMoveEvent(self):
pass
def LeaveEvent(self):
pass
def SetEventInformation(self, *args, **kwargs):
pass
def CharEvent(self):
pass
def KeyPressEvent(self, *args, **kwargs):
pass
def KeyReleaseEvent(self, *args, **kwargs):
pass
| Eric89GXL/mne-python | mne/viz/_brain/_brain.py | Python | bsd-3-clause | 132,116 | [
"Mayavi",
"VTK"
] | 9a533fe818a31c300f5d33851bd9e88e900ab8fcdde5c1f59c499bb52f8b1658 |
#!/usr/bin/env python
#
# vasp_raman.py v. 0.6.0
#
# Raman off-resonant activity calculator
# using VASP as a back-end.
#
# Contributors: Alexandr Fonari (Georgia Tech)
# Shannon Stauffer (UT Austin)
#
# URL: http://raman-sc.github.io
#
# MIT license, 2013 - 2016
#
import re
import sys
def MAT_m_VEC(m, v):
p = [ 0.0 for i in range(len(v)) ]
for i in range(len(m)):
assert len(v) == len(m[i]), 'Length of the matrix row is not equal to the length of the vector'
p[i] = sum( [ m[i][j]*v[j] for j in range(len(v)) ] )
return p
def T(m):
p = [[ m[i][j] for i in range(len( m[j] )) ] for j in range(len( m )) ]
return p
def parse_poscar(poscar_fh):
# modified subroutine from phonopy 1.8.3 (New BSD license)
#
poscar_fh.seek(0) # just in case
lines = poscar_fh.readlines()
#
scale = float(lines[1])
if scale < 0.0:
print "[parse_poscar]: ERROR negative scale not implemented."
sys.exit(1)
#
b = []
for i in range(2, 5):
b.append([float(x)*scale for x in lines[i].split()[:3]])
#
vol = b[0][0]*b[1][1]*b[2][2] + b[1][0]*b[2][1]*b[0][2] + b[2][0]*b[0][1]*b[1][2] - \
b[0][2]*b[1][1]*b[2][0] - b[2][1]*b[1][2]*b[0][0] - b[2][2]*b[0][1]*b[1][0]
#
try:
num_atoms = [int(x) for x in lines[5].split()]
line_at = 6
except ValueError:
symbols = [x for x in lines[5].split()]
num_atoms = [int(x) for x in lines[6].split()]
line_at = 7
nat = sum(num_atoms)
#
if lines[line_at][0].lower() == 's':
line_at += 1
#
if (lines[line_at][0].lower() == 'c' or lines[line_at][0].lower() == 'k'):
is_scaled = False
else:
is_scaled = True
#
line_at += 1
#
positions = []
for i in range(line_at, line_at + nat):
pos = [float(x) for x in lines[i].split()[:3]]
#
if is_scaled:
pos = MAT_m_VEC(T(b), pos)
#
positions.append(pos)
#
poscar_header = ''.join(lines[1:line_at-1]) # will add title and 'Cartesian' later
return nat, vol, b, positions, poscar_header
def parse_env_params(params):
tmp = params.strip().split('_')
if len(tmp) != 4:
print "[parse_env_params]: ERROR there should be exactly four parameters"
sys.exit(1)
#
[first, last, nderiv, step_size] = [int(tmp[0]), int(tmp[1]), int(tmp[2]), float(tmp[3])]
#
return first, last, nderiv, step_size
#### subs for the output from VTST tools
def parse_freqdat(freqdat_fh, nat):
freqdat_fh.seek(0) # just in case
#
eigvals = [ 0.0 for i in range(nat*3) ]
#
for i in range(nat*3): # all frequencies should be supplied, regardless of requested to calculate
tmp = freqdat_fh.readline().split()
eigvals[i] = float(tmp[0])
#
return eigvals
#
def parse_modesdat(modesdat_fh, nat):
from math import sqrt
modesdat_fh.seek(0) # just in case
#
eigvecs = [ 0.0 for i in range(nat*3) ]
norms = [ 0.0 for i in range(nat*3) ]
#
for i in range(nat*3): # all frequencies should be supplied, regardless of requested to calculate
eigvec = []
for j in range(nat):
tmp = modesdat_fh.readline().split()
eigvec.append([ float(tmp[x]) for x in range(3) ])
#
modesdat_fh.readline().split() # empty line
eigvecs[i] = eigvec
norms[i] = sqrt( sum( [abs(x)**2 for sublist in eigvec for x in sublist] ) )
#
return eigvecs, norms
#### end subs for VTST
#
def get_modes_from_OUTCAR(outcar_fh, nat):
from math import sqrt
eigvals = [ 0.0 for i in range(nat*3) ]
eigvecs = [ 0.0 for i in range(nat*3) ]
norms = [ 0.0 for i in range(nat*3) ]
#
outcar_fh.seek(0) # just in case
while True:
line = outcar_fh.readline()
if not line:
break
#
if "Eigenvectors after division by SQRT(mass)" in line:
outcar_fh.readline() # empty line
outcar_fh.readline() # Eigenvectors and eigenvalues of the dynamical matrix
outcar_fh.readline() # ----------------------------------------------------
outcar_fh.readline() # empty line
#
for i in range(nat*3): # all frequencies should be supplied, regardless of those requested to calculate
outcar_fh.readline() # empty line
p = re.search(r'^\s*(\d+).+?([\.\d]+) cm-1', outcar_fh.readline())
eigvals[i] = float(p.group(2))
#
outcar_fh.readline() # X Y Z dx dy dz
eigvec = []
#
for j in range(nat):
tmp = outcar_fh.readline().split()
#
eigvec.append([ float(tmp[x]) for x in range(3,6) ])
#
eigvecs[i] = eigvec
norms[i] = sqrt( sum( [abs(x)**2 for sublist in eigvec for x in sublist] ) )
#
return eigvals, eigvecs, norms
#
print "[get_modes_from_OUTCAR]: ERROR Couldn't find 'Eigenvectors after division by SQRT(mass)' in OUTCAR. Use 'NWRITE=3' in INCAR. Exiting..."
sys.exit(1)
#
def get_epsilon_from_OUTCAR(outcar_fh):
epsilon = []
#
outcar_fh.seek(0) # just in case
while True:
line = outcar_fh.readline()
if not line:
break
#
if "MACROSCOPIC STATIC DIELECTRIC TENSOR" in line:
outcar_fh.readline()
epsilon.append([float(x) for x in outcar_fh.readline().split()])
epsilon.append([float(x) for x in outcar_fh.readline().split()])
epsilon.append([float(x) for x in outcar_fh.readline().split()])
return epsilon
#
raise RuntimeError("[get_epsilon_from_OUTCAR]: ERROR Couldn't find dielectric tensor in OUTCAR")
return 1
#
if __name__ == '__main__':
from math import pi
from shutil import move
import os
import datetime
import time
#import argparse
import optparse
#
print ""
print " Raman off-resonant activity calculator,"
print " using VASP as a back-end."
print ""
print " Contributors: Alexandr Fonari (Georgia Tech)"
print " Shannon Stauffer (UT Austin)"
print " MIT License, 2013"
print " URL: http://raman-sc.github.io"
print " Started at: "+datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
print ""
#
description = "Before run, set environment variables:\n"
description += " VASP_RAMAN_RUN='mpirun vasp'\n"
description += " VASP_RAMAN_PARAMS='[first-mode]_[last-mode]_[nderiv]_[step-size]'\n\n"
description += "bash one-liner is:\n"
description += "VASP_RAMAN_RUN='mpirun vasp' VASP_RAMAN_PARAMS='1_2_2_0.01' python vasp_raman.py"
#
parser = optparse.OptionParser(description=description)
parser.add_option('-g', '--gen', help='Generate POSCAR only', action='store_true')
parser.add_option('-u', '--use_poscar', help='Use provided POSCAR in the folder, USE WITH CAUTION!!', action='store_true')
(options, args) = parser.parse_args()
#args = vars(parser.parse_args())
args = vars(options)
#
VASP_RAMAN_RUN = os.environ.get('VASP_RAMAN_RUN')
if VASP_RAMAN_RUN == None:
print "[__main__]: ERROR Set environment variable 'VASP_RAMAN_RUN'"
print ""
parser.print_help()
sys.exit(1)
print "[__main__]: VASP_RAMAN_RUN='"+VASP_RAMAN_RUN+"'"
#
VASP_RAMAN_PARAMS = os.environ.get('VASP_RAMAN_PARAMS')
if VASP_RAMAN_PARAMS == None:
print "[__main__]: ERROR Set environment variable 'VASP_RAMAN_PARAMS'"
print ""
parser.print_help()
sys.exit(1)
print "[__main__]: VASP_RAMAN_PARAMS='"+VASP_RAMAN_PARAMS+"'"
#
first, last, nderiv, step_size = parse_env_params(VASP_RAMAN_PARAMS)
assert first >= 1, '[__main__]: First mode should be equal or larger than 1'
assert last >= first, '[__main__]: Last mode should be equal or larger than first mode'
if args['gen']: assert last == first, "[__main__]: '-gen' mode -> only generation for the one mode makes sense"
assert nderiv == 2, '[__main__]: At this time, nderiv = 2 is the only supported'
disps = [-1, 1] # hardcoded for
coeffs = [-0.5, 0.5] # three point stencil (nderiv=2)
#
try:
poscar_fh = open('POSCAR.phon', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open input file POSCAR.phon, exiting...\n"
sys.exit(1)
#
# nat, vol, b, poscar_header = parse_poscar_header(poscar_fh)
nat, vol, b, pos, poscar_header = parse_poscar(poscar_fh)
print pos
#print poscar_header
#sys.exit(0)
#
# either use modes from vtst tools or VASP
if os.path.isfile('freq.dat') and os.path.isfile('modes_sqrt_amu.dat'):
try:
freqdat_fh = open('freq.dat', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open freq.dat, exiting...\n"
sys.exit(1)
#
eigvals = parse_freqdat(freqdat_fh, nat)
freqdat_fh.close()
#
try:
modes_fh = open('modes_sqrt_amu.dat' , 'r')
except IOError:
print "[__main__]: ERROR Couldn't open modes_sqrt_amu.dat, exiting...\n"
sys.exit(1)
#
eigvecs, norms = parse_modesdat(modes_fh, nat)
modes_fh.close()
#
elif os.path.isfile('OUTCAR.phon'):
try:
outcar_fh = open('OUTCAR.phon', 'r')
except IOError:
print "[__main__]: ERROR Couldn't open OUTCAR.phon, exiting...\n"
sys.exit(1)
#
eigvals, eigvecs, norms = get_modes_from_OUTCAR(outcar_fh, nat)
outcar_fh.close()
#
else:
print "[__main__]: Neither OUTCAR.phon nor freq.dat/modes_sqrt_amu.dat were found, nothing to do, exiting..."
sys.exit(1)
#
output_fh = open('vasp_raman.dat', 'w')
output_fh.write("# mode freq(cm-1) alpha beta2 activity\n")
for i in range(first-1, last):
eigval = eigvals[i]
eigvec = eigvecs[i]
norm = norms[i]
#
print ""
print "[__main__]: Mode #%i: frequency %10.7f cm-1; norm: %10.7f" % ( i+1, eigval, norm )
#
ra = [[0.0 for x in range(3)] for y in range(3)]
for j in range(len(disps)):
disp_filename = 'OUTCAR.%04d.%+d.out' % (i+1, disps[j])
#
try:
outcar_fh = open(disp_filename, 'r')
print "[__main__]: File "+disp_filename+" exists, parsing..."
except IOError:
if args['use_poscar'] != True:
print "[__main__]: File "+disp_filename+" not found, preparing displaced POSCAR"
poscar_fh = open('POSCAR', 'w')
poscar_fh.write("%s %4.1e \n" % (disp_filename, step_size))
poscar_fh.write(poscar_header)
poscar_fh.write("Cartesian\n")
#
for k in range(nat):
pos_disp = [ pos[k][l] + eigvec[k][l]*step_size*disps[j]/norm for l in range(3)]
poscar_fh.write( '%15.10f %15.10f %15.10f\n' % (pos_disp[0], pos_disp[1], pos_disp[2]) )
#print '%10.6f %10.6f %10.6f %10.6f %10.6f %10.6f' % (pos[k][0], pos[k][1], pos[k][2], dis[k][0], dis[k][1], dis[k][2])
poscar_fh.close()
else:
print "[__main__]: Using provided POSCAR"
#
if args['gen']: # only generate POSCARs
poscar_fn = 'POSCAR.%+d.out' % disps[j]
move('POSCAR', poscar_fn)
print "[__main__]: '-gen' mode -> "+poscar_fn+" with displaced atoms have been generated"
#
if j+1 == len(disps): # last iteration for the current displacements list
print "[__main__]: '-gen' mode -> POSCAR files with displaced atoms have been generated, exiting now"
sys.exit(0)
else: # run VASP here
print "[__main__]: Running VASP..."
os.system(VASP_RAMAN_RUN)
try:
move('OUTCAR', disp_filename)
except IOError:
print "[__main__]: ERROR Couldn't find OUTCAR file, exiting..."
sys.exit(1)
#
outcar_fh = open(disp_filename, 'r')
#
try:
eps = get_epsilon_from_OUTCAR(outcar_fh)
outcar_fh.close()
except Exception, err:
print err
print "[__main__]: Moving "+disp_filename+" back to 'OUTCAR' and exiting..."
move(disp_filename, 'OUTCAR')
sys.exit(1)
#
for m in range(3):
for n in range(3):
ra[m][n] += eps[m][n] * coeffs[j]/step_size * norm * vol/(4.0*pi)
#units: A^2/amu^1/2 = dimless * 1/A * 1/amu^1/2 * A^3
#
alpha = (ra[0][0] + ra[1][1] + ra[2][2])/3.0
beta2 = ( (ra[0][0] - ra[1][1])**2 + (ra[0][0] - ra[2][2])**2 + (ra[1][1] - ra[2][2])**2 + 6.0 * (ra[0][1]**2 + ra[0][2]**2 + ra[1][2]**2) )/2.0
print ""
print "! %4i freq: %10.5f alpha: %10.7f beta2: %10.7f activity: %10.7f " % (i+1, eigval, alpha, beta2, 45.0*alpha**2 + 7.0*beta2)
output_fh.write("%03i %10.5f %10.7f %10.7f %10.7f\n" % (i+1, eigval, alpha, beta2, 45.0*alpha**2 + 7.0*beta2))
output_fh.flush()
#
output_fh.close()
| raman-sc/VASP | vasp_raman.py | Python | mit | 13,862 | [
"VASP",
"phonopy"
] | 00b9e15a2a277299ae1093915bcd9cf6f4f70d26f47acd289055502bd1efcd0c |
#!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
"""
Monitor the jobs present in the repository
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
import os, sys
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... RepoDir' % Script.scriptName,
'Arguments:',
' RepoDir: Location of Job Repository' ] ) )
Script.parseCommandLine( ignoreErrors = False )
args = Script.getPositionalArgs()
if len( args ) != 1:
Script.showHelp()
repoLocation = args[0]
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac( withRepo = True, repoLocation = repoLocation )
exitCode = 0
result = dirac.monitorRepository( printOutput = True )
if not result['OK']:
print 'ERROR: ', result['Message']
exitCode = 2
DIRAC.exit( exitCode )
| Sbalbp/DIRAC | Interfaces/scripts/dirac-repo-monitor.py | Python | gpl-3.0 | 1,077 | [
"DIRAC"
] | 056890a44da204af635ca29515752634e243df5bb0f777a018c1820373969996 |
#!/usr/bin/env python
# Copyright 2001 by Brad Chapman. All rights reserved.
# Revisions copyright 2011-2013 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Test the Location code located in SeqFeature.py
This checks to be sure fuzzy and non-fuzzy representations of locations
are working properly.
"""
from __future__ import print_function
from Bio import SeqFeature
# --- test fuzzy representations
print("Testing fuzzy representations...")
# check the positions alone
exact_pos = SeqFeature.ExactPosition(5)
within_pos_s = SeqFeature.WithinPosition(10, left=10, right=13)
within_pos_e = SeqFeature.WithinPosition(13, left=10, right=13)
between_pos_e = SeqFeature.BetweenPosition(24, left=20, right=24)
before_pos = SeqFeature.BeforePosition(15)
after_pos = SeqFeature.AfterPosition(40)
print("Exact: %s" % exact_pos)
print("Within (as start, %i): %s" % (int(within_pos_s), within_pos_s))
print("Within (as end, %i): %s" % (int(within_pos_e), within_pos_e))
print("Between (as end, %i): %s" % (int(between_pos_e), between_pos_e))
print("Before: %s" % before_pos)
print("After: %s" % after_pos)
# put these into Locations
location1 = SeqFeature.FeatureLocation(exact_pos, within_pos_e)
location2 = SeqFeature.FeatureLocation(before_pos, between_pos_e)
location3 = SeqFeature.FeatureLocation(within_pos_s, after_pos)
for location in [location1, location2, location3]:
print("Location: %s" % location)
print(" Start: %s" % location.start)
print(" End : %s" % location.end)
# --- test non-fuzzy represenations
print("Testing non-fuzzy representations...")
for location in [location1, location2, location3]:
print("Location: %s" % location)
print(" Non-Fuzzy Start: %s" % location.nofuzzy_start)
print(" Non-Fuzzy End: %s" % location.nofuzzy_end)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_Location.py | Python | gpl-2.0 | 1,943 | [
"Biopython"
] | 14835f6a4805d86a122061c65c9f9de64430ff6b5b391bbd1b6ef0374285874b |
#!/usr/bin/python3
import argparse
import glob
import os
import time
import random
COLOURS = (b'\xFF\x00\x00', b'\x00\xFF\x00', b'\x00\x00\xFF', b'\xFF\xFF\x00', b'\xFF\x00\xFF', b'\x00\xFF\xFF')
def write_binary(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'wb') as open_file:
open_file.write(payload)
def read_string(driver_path, device_file):
with open(os.path.join(driver_path, device_file), 'r') as open_file:
return open_file.read().rstrip('\n')
def write_string(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'w') as open_file:
open_file.write(payload)
def find_devices(vid, pid):
driver_paths = glob.glob(os.path.join('/sys/bus/hid/drivers/razeraccessory', '*:{0:04X}:{1:04X}.*'.format(vid, pid)))
for driver_path in driver_paths:
device_type_path = os.path.join(driver_path, 'device_type')
if os.path.exists(device_type_path):
yield driver_path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--skip-standard', action='store_true')
parser.add_argument('--skip-custom', action='store_true')
parser.add_argument('--skip-game-led', action='store_true')
parser.add_argument('--skip-macro-led', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
found_chroma = False
for index, driver_path in enumerate(find_devices(0x1532, 0x0C00), start=1):
found_chroma = True
print("Razer Firefly {0}\n".format(index))
print("Driver version: {0}".format(read_string(driver_path, 'version')))
print("Driver firmware version: {0}".format(read_string(driver_path, 'firmware_version')))
print("Device serial: {0}".format(read_string(driver_path, 'device_serial')))
print("Device type: {0}".format(read_string(driver_path, 'device_type')))
print("Device mode: {0}".format(read_string(driver_path, 'device_mode')))
# Set to static red so that we have something standard
write_binary(driver_path, 'matrix_effect_static', b'\xFF\x00\x00')
if not args.skip_standard:
print("Starting brightness test. Press enter to begin.")
input()
print("Max brightness...", end='')
write_string(driver_path, 'matrix_brightness', '255')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Half brightness...", end='')
write_string(driver_path, 'matrix_brightness', '128')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Zero brightness...", end='')
write_string(driver_path, 'matrix_brightness', '0')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
write_string(driver_path, 'matrix_brightness', '255')
print("Starting other colour effect tests. Press enter to begin.")
input()
print("Green Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\x00')
time.sleep(5)
print("Cyan Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\xFF')
time.sleep(5)
print("Spectrum")
write_binary(driver_path, 'matrix_effect_spectrum', b'\x00')
time.sleep(10)
print("None")
write_binary(driver_path, 'matrix_effect_none', b'\x00')
time.sleep(5)
print("Wave Left")
write_string(driver_path, 'matrix_effect_wave', '1')
time.sleep(5)
print("Wave Right")
write_string(driver_path, 'matrix_effect_wave', '2')
time.sleep(5)
print("Breathing random")
write_binary(driver_path, 'matrix_effect_breath', b'\x00')
time.sleep(10)
print("Breathing red")
write_binary(driver_path, 'matrix_effect_breath', b'\xFF\x00\x00')
time.sleep(10)
print("Breathing blue-green")
write_binary(driver_path, 'matrix_effect_breath', b'\x00\xFF\x00\x00\x00\xFF')
time.sleep(10)
if not args.skip_custom:
# Custom LEDs all rows
payload_all = b'\x00\x00\x0E'
for i in range(0, 15): # 15 colours 0x00-0x0E
payload_all += random.choice(COLOURS)
payload_m1_5 = b''
for led in (0x00, 0x0E):
led_byte = led.to_bytes(1, byteorder='big')
payload_m1_5 += b'\x00' + led_byte + led_byte + b'\xFF\xFF\xFF'
print("Custom LED matrix colours test. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_all)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
print("Custom LED matrix partial colours test. First and last led to white. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_m1_5)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
time.sleep(0.5)
print("Finished")
if not found_chroma:
print("No Fireflies found")
| lah7/openrazer | scripts/driver/firefly/test.py | Python | gpl-2.0 | 5,515 | [
"Firefly"
] | 7f7cfe0ddb8cdb049ffdc1465e0599203f680f303b51be070afaace2b75bd754 |
"""
Implements Autodock Vina's pose-generation in tensorflow.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import numpy as np
import tensorflow as tf
from deepchem.models import Model
from deepchem.nn import model_ops
import deepchem.utils.rdkit_util as rdkit_util
def compute_neighbor_list(coords, nbr_cutoff, N, M, n_cells, ndim=3, k=5):
"""Computes a neighbor list from atom coordinates.
Parameters
----------
coords: tf.Tensor
Shape (N, ndim)
N: int
Max number atoms
M: int
Max number neighbors
ndim: int
Dimensionality of space.
k: int
Number of nearest neighbors to pull down.
Returns
-------
nbr_list: tf.Tensor
Shape (N, M) of atom indices
"""
start = tf.to_int32(tf.reduce_min(coords))
stop = tf.to_int32(tf.reduce_max(coords))
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
# Associate each atom with cell it belongs to. O(N*n_cells)
# Shape (n_cells, k)
atoms_in_cells, _ = put_atoms_in_cells(coords, cells, N, n_cells, ndim, k)
# Shape (N, 1)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
# Associate each cell with its neighbor cells. Assumes periodic boundary
# conditions, so does wrapround. O(constant)
# Shape (n_cells, 26)
neighbor_cells = compute_neighbor_cells(cells, ndim, n_cells)
# Shape (N, 26)
neighbor_cells = tf.squeeze(tf.gather(neighbor_cells, cells_for_atoms))
# coords of shape (N, ndim)
# Shape (N, 26, k, ndim)
tiled_coords = tf.tile(tf.reshape(coords, (N, 1, 1, ndim)), (1, 26, k, 1))
# Shape (N, 26, k)
nbr_inds = tf.gather(atoms_in_cells, neighbor_cells)
# Shape (N, 26, k)
atoms_in_nbr_cells = tf.gather(atoms_in_cells, neighbor_cells)
# Shape (N, 26, k, ndim)
nbr_coords = tf.gather(coords, atoms_in_nbr_cells)
# For smaller systems especially, the periodic boundary conditions can
# result in neighboring cells being seen multiple times. Maybe use tf.unique to
# make sure duplicate neighbors are ignored?
# TODO(rbharath): How does distance need to be modified here to
# account for periodic boundary conditions?
# Shape (N, 26, k)
dists = tf.reduce_sum((tiled_coords - nbr_coords)**2, axis=3)
# Shape (N, 26*k)
dists = tf.reshape(dists, [N, -1])
# TODO(rbharath): This will cause an issue with duplicates!
# Shape (N, M)
closest_nbr_locs = tf.nn.top_k(dists, k=M)[1]
# N elts of size (M,) each
split_closest_nbr_locs = [
tf.squeeze(locs) for locs in tf.split(closest_nbr_locs, N)
]
# Shape (N, 26*k)
nbr_inds = tf.reshape(nbr_inds, [N, -1])
# N elts of size (26*k,) each
split_nbr_inds = [tf.squeeze(split) for split in tf.split(nbr_inds, N)]
# N elts of size (M,) each
neighbor_list = [
tf.gather(nbr_inds, closest_nbr_locs)
for (nbr_inds, closest_nbr_locs
) in zip(split_nbr_inds, split_closest_nbr_locs)
]
# Shape (N, M)
neighbor_list = tf.stack(neighbor_list)
return neighbor_list
def get_cells_for_atoms(coords, cells, N, n_cells, ndim=3):
"""Compute the cells each atom belongs to.
Parameters
----------
coords: tf.Tensor
Shape (N, ndim)
cells: tf.Tensor
(box_size**ndim, ndim) shape.
Returns
-------
cells_for_atoms: tf.Tensor
Shape (N, 1)
"""
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (n_cells*N, ndim)
tiled_cells = tf.tile(cells, (N, 1))
# N tensors of shape (n_cells, 1)
tiled_cells = tf.split(tiled_cells, N)
# Shape (N*n_cells, 1) after tile
tiled_coords = tf.reshape(tf.tile(coords, (1, n_cells)), (n_cells * N, ndim))
# List of N tensors of shape (n_cells, 1)
tiled_coords = tf.split(tiled_coords, N)
# Lists of length N
coords_rel = [
tf.to_float(coords) - tf.to_float(cells)
for (coords, cells) in zip(tiled_coords, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
closest_inds = [tf.nn.top_k(-norm, k=1)[1] for norm in coords_norm]
# TODO(rbharath): tf.stack for tf 1.0
return tf.stack(closest_inds)
def compute_closest_neighbors(coords,
cells,
atoms_in_cells,
neighbor_cells,
N,
n_cells,
ndim=3,
k=5):
"""Computes nearest neighbors from neighboring cells.
TODO(rbharath): Make this pass test
Parameters
---------
atoms_in_cells: list
Of length n_cells. Each entry tensor of shape (k, ndim)
neighbor_cells: tf.Tensor
Of shape (n_cells, 26).
N: int
Number atoms
"""
n_cells = int(n_cells)
# Tensor of shape (n_cells, k, ndim)
#atoms_in_cells = tf.stack(atoms_in_cells)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
all_closest = []
for atom in range(N):
atom_vec = coords[atom]
cell = cells_for_atoms[atom]
nbr_inds = tf.gather(neighbor_cells, tf.to_int32(cell))
# Tensor of shape (26, k, ndim)
nbr_atoms = tf.gather(atoms_in_cells, nbr_inds)
# Reshape to (26*k, ndim)
nbr_atoms = tf.reshape(nbr_atoms, (-1, 3))
# Subtract out atom vector. Still of shape (26*k, ndim) due to broadcast.
nbr_atoms = nbr_atoms - atom_vec
# Dists of shape (26*k, 1)
nbr_dists = tf.reduce_sum(nbr_atoms**2, axis=1)
# Of shape (k, ndim)
closest_inds = tf.nn.top_k(nbr_dists, k=k)[1]
all_closest.append(closest_inds)
return all_closest
def get_cells(start, stop, nbr_cutoff, ndim=3):
"""Returns the locations of all grid points in box.
Suppose start is -10 Angstrom, stop is 10 Angstrom, nbr_cutoff is 1.
Then would return a list of length 20^3 whose entries would be
[(-10, -10, -10), (-10, -10, -9), ..., (9, 9, 9)]
Returns
-------
cells: tf.Tensor
(box_size**ndim, ndim) shape.
"""
return tf.reshape(
tf.transpose(
tf.stack(
tf.meshgrid(
* [tf.range(start, stop, nbr_cutoff) for _ in range(ndim)]))),
(-1, ndim))
def put_atoms_in_cells(coords, cells, N, n_cells, ndim, k=5):
"""Place each atom into cells. O(N) runtime.
Let N be the number of atoms.
Parameters
----------
coords: tf.Tensor
(N, 3) shape.
cells: tf.Tensor
(n_cells, ndim) shape.
N: int
Number atoms
ndim: int
Dimensionality of input space
k: int
Number of nearest neighbors.
Returns
-------
closest_atoms: tf.Tensor
Of shape (n_cells, k, ndim)
"""
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (n_cells*N, ndim)
tiled_cells = tf.reshape(tf.tile(cells, (1, N)), (n_cells * N, ndim))
# TODO(rbharath): Change this for tf 1.0
# n_cells tensors of shape (N, 1)
tiled_cells = tf.split(tiled_cells, n_cells)
# Shape (N*n_cells, 1) after tile
tiled_coords = tf.tile(coords, (n_cells, 1))
# List of n_cells tensors of shape (N, 1)
tiled_coords = tf.split(tiled_coords, n_cells)
# Lists of length n_cells
coords_rel = [
tf.to_float(coords) - tf.to_float(cells)
for (coords, cells) in zip(tiled_coords, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
closest_inds = [tf.nn.top_k(norm, k=k)[1] for norm in coords_norm]
# n_cells tensors of shape (k, ndim)
closest_atoms = tf.stack([tf.gather(coords, inds) for inds in closest_inds])
# Tensor of shape (n_cells, k)
closest_inds = tf.stack(closest_inds)
return closest_inds, closest_atoms
# TODO(rbharath):
# - Need to find neighbors of the cells (+/- 1 in every dimension).
# - Need to group closest atoms amongst cell neighbors
# - Need to do another top_k to find indices of closest neighbors.
# - Return N lists corresponding to neighbors for every atom.
def compute_neighbor_cells(cells, ndim, n_cells):
"""Compute neighbors of cells in grid.
# TODO(rbharath): Do we need to handle periodic boundary conditions
properly here?
# TODO(rbharath): This doesn't handle boundaries well. We hard-code
# looking for 26 neighbors, which isn't right for boundary cells in
# the cube.
Note n_cells is box_size**ndim. 26 is the number of neighbors of a cube in
a grid (including diagonals).
Parameters
----------
cells: tf.Tensor
(n_cells, 26) shape.
"""
n_cells = int(n_cells)
if ndim != 3:
raise ValueError("Not defined for dimensions besides 3")
# Number of neighbors of central cube in 3-space is
# 3^2 (top-face) + 3^2 (bottom-face) + (3^2-1) (middle-band)
# TODO(rbharath)
k = 9 + 9 + 8 # (26 faces on Rubik's cube for example)
#n_cells = int(cells.get_shape()[0])
# Tile cells to form arrays of size (n_cells*n_cells, ndim)
# Two tilings (a, b, c, a, b, c, ...) vs. (a, a, a, b, b, b, etc.)
# Tile (a, a, a, b, b, b, etc.)
tiled_centers = tf.reshape(
tf.tile(cells, (1, n_cells)), (n_cells * n_cells, ndim))
# Tile (a, b, c, a, b, c, ...)
tiled_cells = tf.tile(cells, (n_cells, 1))
# Lists of n_cells tensors of shape (N, 1)
tiled_centers = tf.split(tiled_centers, n_cells)
tiled_cells = tf.split(tiled_cells, n_cells)
# Lists of length n_cells
coords_rel = [
tf.to_float(cells) - tf.to_float(centers)
for (cells, centers) in zip(tiled_centers, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
# n_cells tensors of shape (26,)
closest_inds = tf.stack([tf.nn.top_k(norm, k=k)[1] for norm in coords_norm])
return closest_inds
def cutoff(d, x):
"""Truncates interactions that are too far away."""
return tf.where(d < 8, x, tf.zeros_like(x))
def gauss_1(d):
"""Computes first Gaussian interaction term.
Note that d must be in Angstrom
"""
return tf.exp(-(d / 0.5)**2)
def gauss_2(d):
"""Computes second Gaussian interaction term.
Note that d must be in Angstrom.
"""
return tf.exp(-((d - 3) / 2)**2)
def repulsion(d):
"""Computes repulsion interaction term."""
return tf.where(d < 0, d**2, tf.zeros_like(d))
def hydrophobic(d):
"""Compute hydrophobic interaction term."""
return tf.where(d < 0.5,
tf.ones_like(d), tf.where(d < 1.5, 1.5 - d, tf.zeros_like(d)))
def hbond(d):
"""Computes hydrogen bond term."""
return tf.where(d < -0.7,
tf.ones_like(d),
tf.where(d < 0, (1.0 / 0.7) * (0 - d), tf.zeros_like(d)))
def g(c, Nrot):
"""Nonlinear function mapping interactions to free energy."""
w = tf.Variable(tf.random_normal([
1,
], stddev=.3))
return c / (1 + w * Nrot)
def h(d):
"""Sum of energy terms used in Autodock Vina.
.. math:: h_{t_i,t_j}(d) = w_1\textrm{gauss}_1(d) + w_2\textrm{gauss}_2(d) + w_3\textrm{repulsion}(d) + w_4\textrm{hydrophobic}(d) + w_5\textrm{hbond}(d)
"""
w_1 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_2 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_3 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_4 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_5 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
return w_1 * gauss_1(d) + w_2 * gauss_2(d) + w_3 * repulsion(
d) + w_4 * hydrophobic(d) + w_5 * hbond(d)
class VinaModel(Model):
def __init__(self, logdir=None, batch_size=50):
"""Vina models.
.. math:: c = \sum_{i < j} f_{t_i,t_j}(r_{ij})
Over all pairs of atoms that can move relative to one-another. :math:`t_i` is the
atomtype of atom :math:`i`.
Can view as
.. math:: c = c_\textrm{inter} + c_\textrm{intra}
depending on whether atoms can move relative to one another. Free energy is
predicted only from :math:`c_\textrm{inter}`. Let :math:`R_t` be the Van der Waal's radius of
atom of type t. Then define surface distance
.. math:: d_{ij} = r_{ij} - R_{t_i} - R_{t_j}
Then the energy term is
.. math:: f_{t_i,t_j}(r_{ij}) = \textrm{cutoff}(d_{ij}, h_{t_i,t_j}(d_{ij}))
where
.. math:: \textrm{cutoff}(d, x) = \begin{cases} x & d < 8 \textrm{ Angstrom} \\ 0 & \textrm{otherwise} \end{cases}
The inner function can be further broken down into a sum of terms
.. math:: h_{t_i,t_j}(d) = w_1\textrm{gauss}_1(d) + w_2\textrm{gauss}_2(d) + w_3\textrm{repulsion}(d) + w_4\textrm{hydrophobic}(d) + w_5\textrm{hbond}(d)
these terms are defined as follows (all constants are in Angstroms):
.. math::
\textrm{gauss}_1(d) = \exp(-(d/(0.5))^2)
\textrm{gauss}_2(d) = \exp(-((d-3)/(2))^2)
\textrm{repulsion}(d) = \begin{cases} d^2 & d < 0 \\ 0 & d \geq 0 \end{cases}
\textrm{hydrophobic}(d) = \begin{cases} 1 & d < 0.5 \\ 1.5 - d & \textrm{otherwise} \\ 0 & d > 1.5 \end{cases}
\textrm{hbond}(d) = \begin{cases} 1 & d < -0.7 \\ (1.0/.7)(0 - d) & \textrm{otherwise} \\ 0 & d > 0 \end{cases}
The free energy of binding is computed as a function of the intermolecular interactions
..math:: s = g(c_\textrm{inter})
This function is defined as
..math:: g(c) = \frac{c}{1 + wN_\textrm{rot}}
Where :math:`w` is a weight parameter and :math:`N_\textrm{rot}` is the number of
rotatable bonds between heavy atoms in the ligand.
Gradients are taken backwards through the binding-free energy function with
respect to the position of the ligand and with respect to the torsions of
rotatable bonds and flexible ligands.
TODO(rbharath): It's not clear to me how the effect of the torsions on the :math:`d_{ij}` is
computed. Is there a way to get distances from torsions?
The idea is that mutations are applied to the ligand, and then gradient descent is
used to optimize starting from the initial structure. The code to compute the mutations
is specified
https://github.com/mwojcikowski/smina/blob/master/src/lib/mutate.cpp
Seems to do random quaternion rotations of the ligand. It's not clear to me yet
how the flexible and rotatable bonds are handled for the system.
Need to know an initial search space for the compound. Typically a cubic
binding box.
References
----------
Autodock Vina Paper:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041641/
Smina Paper:
http://pubs.acs.org/doi/pdf/10.1021/ci300604z
Omega Paper (ligand conformation generation):
http://www.sciencedirect.com/science/article/pii/S1093326302002048
QuickVina:
http://www.cil.ntu.edu.sg/Courses/papers/journal/QuickVina.pdf
"""
pass
def __init__(self, max_local_steps=10, max_mutations=10):
self.max_local_steps = max_local_steps
self.max_mutations = max_mutations
self.graph, self.input_placeholders, self.output_placeholder = self.construct_graph(
)
self.sess = tf.Session(graph=self.graph)
def construct_graph(self,
N_protein=1000,
N_ligand=100,
M=50,
ndim=3,
k=5,
nbr_cutoff=6):
"""Builds the computational graph for Vina."""
graph = tf.Graph()
with graph.as_default():
n_cells = 64
# TODO(rbharath): Make this handle minibatches
protein_coords_placeholder = tf.placeholder(
tf.float32, shape=(N_protein, 3))
ligand_coords_placeholder = tf.placeholder(
tf.float32, shape=(N_ligand, 3))
protein_Z_placeholder = tf.placeholder(tf.int32, shape=(N_protein,))
ligand_Z_placeholder = tf.placeholder(tf.int32, shape=(N_ligand,))
label_placeholder = tf.placeholder(tf.float32, shape=(1,))
# Shape (N_protein+N_ligand, 3)
coords = tf.concat(
[protein_coords_placeholder, ligand_coords_placeholder], axis=0)
# Shape (N_protein+N_ligand,)
Z = tf.concat([protein_Z_placeholder, ligand_Z_placeholder], axis=0)
# Shape (N_protein+N_ligand, M)
nbr_list = compute_neighbor_list(
coords, nbr_cutoff, N_protein + N_ligand, M, n_cells, ndim=ndim, k=k)
all_interactions = []
# Shape (N_protein+N_ligand,)
all_atoms = tf.range(N_protein + N_ligand)
# Shape (N_protein+N_ligand, 3)
atom_coords = tf.gather(coords, all_atoms)
# Shape (N_protein+N_ligand,)
atom_Z = tf.gather(Z, all_atoms)
# Shape (N_protein+N_ligand, M)
nbrs = tf.squeeze(tf.gather(nbr_list, all_atoms))
# Shape (N_protein+N_ligand, M, 3)
nbr_coords = tf.gather(coords, nbrs)
# Shape (N_protein+N_ligand, M)
nbr_Z = tf.gather(Z, nbrs)
# Shape (N_protein+N_ligand, M, 3)
tiled_atom_coords = tf.tile(
tf.reshape(atom_coords, (N_protein + N_ligand, 1, 3)), (1, M, 1))
# Shape (N_protein+N_ligand, M)
dists = tf.reduce_sum((tiled_atom_coords - nbr_coords)**2, axis=2)
# TODO(rbharath): Need to subtract out Van-der-Waals radii from dists
# Shape (N_protein+N_ligand, M)
atom_interactions = h(dists)
# Shape (N_protein+N_ligand, M)
cutoff_interactions = cutoff(dists, atom_interactions)
# TODO(rbharath): Use RDKit to compute number of rotatable bonds in ligand.
Nrot = 1
# TODO(rbharath): Autodock Vina only uses protein-ligand interactions in
# computing free-energy. This implementation currently uses all interaction
# terms. Not sure if this makes a difference.
# Shape (N_protein+N_ligand, M)
free_energy = g(cutoff_interactions, Nrot)
# Shape () -- scalar
energy = tf.reduce_sum(atom_interactions)
loss = 0.5 * (energy - label_placeholder)**2
return (graph, (protein_coords_placeholder, protein_Z_placeholder,
ligand_coords_placeholder, ligand_Z_placeholder),
label_placeholder)
def fit(self, X_protein, Z_protein, X_ligand, Z_ligand, y):
"""Fit to actual data."""
return
def mutate_conformer(protein, ligand):
"""Performs a mutation on the ligand position."""
return
def generate_conformation(self, protein, ligand, max_steps=10):
"""Performs the global search for conformations."""
best_conf = None
best_score = np.inf
conf = self.sample_random_conformation()
for i in range(max_steps):
mut_conf = self.mutate_conformer(conf)
loc_conf = self.gradient_minimize(mut_conf)
if best_conf is None:
best_conf = loc_conf
else:
loc_score = self.score(loc_conf)
if loc_score < best_score:
best_conf = loc_conf
return best_conf
| rbharath/deepchem | deepchem/models/tf_new_models/vina_model.py | Python | mit | 18,919 | [
"Gaussian",
"RDKit"
] | 8dd4af51fedca9fa06183e67ba664841ab91cf97c7d400796e73a6616785b91d |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import numpy as np
#Script to get rough estimates of damage thresholds for optics components as specified on Thorlabs
#Source of code is the Thorlabs tutorial on Laser Induced Damage Thresholds:
#https://www.thorlabs.com/tutorials.cfm?tabID=762473B5-84EE-49EB-8E93-375E0AA803FA
#author: im354
########################################
# YOUR LASER PARAMETERS
########################################
beam_diameter = 1e-3 #[units: m]
laser_output_power = 6 #[units: W]
laser_wavelength = 633e-9 #[units: m]
laser_type = "pulsed"
beam_profile = "gaussian"
laser_pulse_duration = 1e-9 #[units: seconds]
laser_pulse_repetition_rate = 60e6 #[units: Hz]
###################################################
# YOUR OPTIC DAMAGE (LIDT) PARAMETERS
###################################################
lidt_wavelength = 353e-9 #[units: n]
lidt_maximum_energy_density = 0.075 #[units: J/cm^2]
lidt_pulse_duration = 10e-9
pulse_repetition_rate = 10
###################################################
# PROGRAM START
###################################################
LASER_TYPES = ["cw", "pulsed"]
BEAM_PROFILES = ["tophat", "gaussian"]
#Convert units:
lidt_maximum_energy_density = lidt_maximum_energy_density * 10000 #[units: J/m^2]
#verify user input
assert(beam_profile in BEAM_PROFILES)
assert(laser_type in LASER_TYPES)
#CW regime - Thermal damage
if laser_type == "cw" or (laser_type == "pulsed" and laser_pulse_duration >= 1e-7):
print("-----CW-----")
linear_power_density = old_div(laser_output_power,beam_diameter) #[units: W/cm - see Thorlabs LIDT tutorial]
if beam_profile == "gaussian":
linear_power_density= linear_power_density*2.0 # adjust for peak power in gaussian beam
wavelength_adjusted_lidt = linear_power_density * (laser_wavelength/float(lidt_wavelength))
print("[CW] Wavelength adjusted LIDT for your laser:", wavelength_adjusted_lidt)
print("[CW] Specified LIDT for Optic", lidt_maximum_energy_density)
if wavelength_adjusted_lidt > lidt_maximum_energy_density:
print("[CW] !---WARNING---! : LIDT threshold specification exceeded for optic ")
else:
print("[CW] !---DONE---! : LIDT threshold NOT exceeded for optic")
#Pulsed regime - dielectric breakdown damage
if laser_type == "pulsed":
print("-----PULSED-----")
beam_area = np.pi*(beam_diameter/2.0)**2
pulse_energy = old_div(laser_output_power,laser_pulse_repetition_rate)
print("[Pulsed] Pulse energy: {0} [J]".format(pulse_energy))
print("[Pulsed] Beam area {0} [m^2]".format(beam_area))
area_energy_density = old_div(pulse_energy,beam_area)
if beam_profile == "gaussian":
area_energy_density = area_energy_density * 2.0 #adjust for peak power in gaussian beam
print("[Pulsed] Beam Energy Density (Beam Profile Adjusted) [per Pulse] {} [J/m^2]".format(area_energy_density))
if laser_pulse_duration >= 1e-9 and laser_pulse_duration < 1e-7:
print("[Pulsed] Damage Mechanism: Dielectric breakdown")
elif laser_pulse_duration >= 1e-7 and laser_pulse_duration < 1e-4:
print("[Pulsed] Damage Mechanism: Dielectric breakdown or Thermal")
elif laser_pulse_duration > 1e-7:
print("[Pulsed] Damage Mechanism: Thermal")
elif laser_pulse_duration < 1e-9 :
print("[Pulsed] Damage Mechanism: Avalanche Ionization, WARNING - NO comparison for Thorlabs Damage Specs")
adjusted_lidt = area_energy_density* np.sqrt(old_div(laser_pulse_duration,lidt_pulse_duration))*np.sqrt(laser_wavelength/float(lidt_wavelength))
print("[Pulsed] Adjusted (wavelength, pulse duration) LIDT for your laser:",adjusted_lidt)
print("[Pulsed] Specified LIDT for Optic", lidt_maximum_energy_density)
if adjusted_lidt > lidt_maximum_energy_density:
print("[Pulsed] !---WARNING---! : LIDT threshold specification exceeded for optic ")
else:
print("[Pulsed] !---DONE---! : LIDT threshold NOT exceeded for optic")
| nanophotonics/nplab | nplab/analysis/laser-damage-threshold-calculator.py | Python | gpl-3.0 | 3,938 | [
"Gaussian"
] | f2aef151c2ac816c59ddda6e296ca7978d92c706d67436349bec8141b334f48c |
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import mooseutils
from ChiggerResultBase import ChiggerResultBase
from ChiggerSourceBase import ChiggerSourceBase
from .. import utils
class ChiggerResult(ChiggerResultBase):
"""
A ChiggerResult object capable of attaching an arbitrary number of ChiggerFilterSourceBase
objects to the vtkRenderer.
Any options supplied to this object are automatically passed down to the ChiggerFilterSourceBase
objects contained by this class, if the applicable. To have the settings of the contained source
objects appear in this objects option dump then simply add the settings to the static getOptions
method of the derived class. This is not done here because this class is designed to accept
arbitrary ChiggerFilterSourceBase object which may have varying settings, see ExodusResult for
an example of a single type implementation based on this class.
Inputs:
*sources: A tuple of ChiggerFilterSourceBase object to render.
**kwargs: see ChiggerResultBase
"""
# The Base class type that this object to which its ownership is restricted.
SOURCE_TYPE = ChiggerSourceBase
@staticmethod
def getOptions():
opt = ChiggerResultBase.getOptions()
return opt
def __init__(self, *sources, **kwargs):
super(ChiggerResult, self).__init__(**kwargs)
self._sources = sources
def needsUpdate(self):
"""
Checks if this object or any of the contained ChiggerFilterSourceBase object require update.
(override)
"""
return super(ChiggerResult, self).needsUpdate() or \
any([src.needsUpdate() for src in self._sources])
def updateOptions(self, *args):
"""
Apply the supplied option objects to this object and the contained ChiggerFilterSourceBase
objects. (override)
Inputs:
see ChiggerResultBase
"""
changed = [self.needsUpdate()]
changed.append(super(ChiggerResult, self).updateOptions(*args))
for src in self._sources:
changed.append(src.updateOptions(*args))
changed = any(changed)
self.setNeedsUpdate(changed)
return changed
def setOptions(self, *args, **kwargs):
"""
Apply the supplied options to this object and the contained ChiggerFilterSourceBase objects.
(override)
Inputs:
see ChiggerResultBase
"""
changed = [self.needsUpdate()]
changed.append(super(ChiggerResult, self).setOptions(*args, **kwargs))
for src in self._sources:
changed.append(src.setOptions(*args, **kwargs))
changed = any(changed)
self.setNeedsUpdate(changed)
return changed
def update(self, **kwargs):
"""
Update this object and the contained ChiggerFilterSourceBase objects. (override)
Inputs:
see ChiggerResultBase
"""
super(ChiggerResult, self).update(**kwargs)
for src in self._sources:
if src.needsUpdate():
src.update()
def getSources(self):
"""
Return the list of ChiggerSource objects.
"""
return self._sources
def getBounds(self, check=True):
"""
Return the bounding box of the results.
Inputs:
check[bool]: (Default: True) When True, perform an update check and raise an exception
if object is not up-to-date. This should not be used.
TODO: For Peacock, on linux check=False must be set, but I am not sure why.
"""
if check:
self.checkUpdateState()
elif self.needsUpdate():
self.update()
return utils.get_bounds(*self._sources)
def getRange(self):
"""
Return the min/max range for the selected variables and blocks/boundary/nodeset.
NOTE: For the range to be restricted by block/boundary/nodest the reader must have
"squeeze=True", which can be much slower.
"""
rngs = [src.getRange() for src in self._sources]
return utils.get_min_max(*rngs)
def reset(self):
"""
Remove actors from renderer.
"""
super(ChiggerResult, self).reset()
for src in self._sources:
self._vtkrenderer.RemoveViewProp(src.getVTKActor())
def initialize(self):
"""
Initialize by adding actors to renderer.
"""
super(ChiggerResult, self).initialize()
for src in self._sources:
if not isinstance(src, self.SOURCE_TYPE):
n = src.__class__.__name__
t = self.SOURCE_TYPE.__name__
msg = 'The supplied source type of {} must be of type {}.'.format(n, t)
raise mooseutils.MooseException(msg)
src.setVTKRenderer(self._vtkrenderer)
self._vtkrenderer.AddViewProp(src.getVTKActor())
def __iter__(self):
"""
Provides iteration access to the underlying source objects.
"""
for src in self._sources:
yield src
def __getitem__(self, index):
"""
Provide [] access to the source objects.
"""
return self._sources[index]
def __len__(self):
"""
The number of source objects.
"""
return len(self._sources)
| Chuban/moose | python/chigger/base/ChiggerResult.py | Python | lgpl-2.1 | 6,289 | [
"MOOSE"
] | 24c0f855f03896c309e1a14b5e00300534a497aec61deef5ed7a3a62ddfa83b3 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see winpython/__init__.py for details)
"""
WinPython Package Manager
Created on Fri Aug 03 14:32:26 2012
"""
from __future__ import print_function
import os
import os.path as osp
import shutil
import re
import sys
import subprocess
# Local imports
from winpython import utils
from winpython.config import DATA_PATH
from winpython.py3compat import configparser as cp
# from former wppm separate script launcher
from argparse import ArgumentParser
from winpython import py3compat
# Workaround for installing PyVISA on Windows from source:
os.environ['HOME'] = os.environ['USERPROFILE']
# pep503 defines normalized package names: www.python.org/dev/peps/pep-0503
def normalize(name):
return re.sub(r"[-_.]+", "-", name).lower()
def get_package_metadata(database, name):
"""Extract infos (description, url) from the local database"""
# Note: we could use the PyPI database but this has been written on
# machine which is not connected to the internet
db = cp.ConfigParser()
db.readfp(open(osp.join(DATA_PATH, database)))
metadata = dict(description='', url='https://pypi.python.org/pypi/' + name)
for key in metadata:
name1 = name.lower()
# wheel replace '-' per '_' in key
for name2 in (name1, name1.split('-')[0], name1.replace('-', '_'),
'-'.join(name1.split('_')), normalize(name)):
try:
metadata[key] = db.get(name2, key)
break
except (cp.NoSectionError, cp.NoOptionError):
pass
return metadata
class BasePackage(object):
def __init__(self, fname):
self.fname = fname
self.name = None
self.version = None
self.architecture = None
self.pyversion = None
self.description = None
self.url = None
def __str__(self):
text = "%s %s" % (self.name, self.version)
pytext = ""
if self.pyversion is not None:
pytext = " for Python %s" % self.pyversion
if self.architecture is not None:
if not pytext:
pytext = " for Python"
pytext += " %dbits" % self.architecture
text += "%s\n%s\nWebsite: %s\n[%s]" % (pytext, self.description,
self.url,
osp.basename(self.fname))
return text
def is_compatible_with(self, distribution):
"""Return True if package is compatible with distribution in terms of
architecture and Python version (if applyable)"""
iscomp = True
if self.architecture is not None:
# Source distributions (not yet supported though)
iscomp = iscomp and self.architecture == distribution.architecture
if self.pyversion is not None:
# Non-pure Python package
iscomp = iscomp and self.pyversion == distribution.version
return iscomp
def extract_optional_infos(self):
"""Extract package optional infos (description, url)
from the package database"""
metadata = get_package_metadata('packages.ini', self.name)
for key, value in list(metadata.items()):
setattr(self, key, value)
class Package(BasePackage):
def __init__(self, fname):
BasePackage.__init__(self, fname)
self.files = []
self.extract_infos()
self.extract_optional_infos()
def extract_infos(self):
"""Extract package infos (name, version, architecture)
from filename (installer basename)"""
bname = osp.basename(self.fname)
if bname.endswith('.exe'):
# distutils bdist_wininst
match = re.match(utils.WININST_PATTERN, bname)
if match is not None:
(self.name, self.version,
_t0, _qtver, arch, _t1, self.pyversion, _t2) = match.groups()
self.architecture = 32 if arch == 'win32' else 64
return
# NSIS
pat = r'([a-zA-Z0-9\-\_]*)-Py([0-9\.]*)-x(64|32)-gpl-([0-9\.\-]*[a-z]*)\.exe'
match = re.match(pat, bname)
if match is not None:
self.name, self.pyversion, arch, self.version = match.groups()
self.architecture = int(arch)
return
# NSIS complement to match PyQt4-4.10.4-gpl-Py3.4-Qt4.8.6-x32.exe
pat = r'([a-zA-Z0-9\_]*)-([0-9\.]*[a-z]*)-gpl-Py([0-9\.]*)-.*-x(64|32)\.exe'
match = re.match(pat, bname)
if match is not None:
self.name, self.version, self.pyversion, arch = match.groups()
self.architecture = int(arch)
return
match = re.match(r'([a-zA-Z0-9\-\_]*)-([0-9\.]*[a-z]*)-py([0-9\.]*)-x(64|32)-([a-z0-9\.\-]*).exe', bname)
if match is not None:
self.name, self.version, self.pyversion, arch, _pyqt = match.groups()
self.architecture = int(arch)
return
# New : Binary wheel case
elif bname.endswith(('32.whl', '64.whl')):
# {name}[-{bloat}]-{version}-{python tag}-{abi tag}-{platform tag}.whl
# ['sounddevice','0.3.5','py2.py3.cp34.cp35','none','win32']
# PyQt5-5.7.1-5.7.1-cp34.cp35.cp36-none-win_amd64.whl
bname2 = bname[:-4].split("-")
self.name = bname2[0]
self.version = '-'.join(list(bname2[1:-3]))
self.pywheel, abi, arch = bname2[-3:]
self.pyversion = None # Let's ignore this self.pywheel
# wheel arch is 'win32' or 'win_amd64'
self.architecture = 32 if arch == 'win32' else 64
return
elif bname.endswith(('.zip', '.tar.gz', '.whl')):
# distutils sdist
infos = utils.get_source_package_infos(bname)
if infos is not None:
self.name, self.version = infos
return
raise NotImplementedError("Not supported package type %s" % bname)
def logpath(self, logdir):
"""Return full log path"""
return osp.join(logdir, osp.basename(self.fname+'.log'))
def save_log(self, logdir):
"""Save log (pickle)"""
header = ['# WPPM package installation log',
'# ',
'# Package: %s v%s' % (self.name, self.version),
'']
open(self.logpath(logdir), 'w').write('\n'.join(header + self.files))
def load_log(self, logdir):
"""Load log (pickle)"""
try:
data = open(self.logpath(logdir), 'U').readlines()
except (IOError, OSError):
data = [] # it can be now ()
self.files = []
for line in data:
relpath = line.strip()
if relpath.startswith('#') or len(relpath) == 0:
continue
self.files.append(relpath)
def remove_log(self, logdir):
"""Remove log (after uninstalling package)"""
try:
os.remove(self.logpath(logdir))
except WindowsError:
pass
class WininstPackage(BasePackage):
def __init__(self, fname, distribution):
BasePackage.__init__(self, fname)
self.logname = None
self.distribution = distribution
self.architecture = distribution.architecture
self.pyversion = distribution.version
self.extract_infos()
self.extract_optional_infos()
def extract_infos(self):
"""Extract package infos (name, version, architecture)"""
match = re.match(r'Remove([a-zA-Z0-9\-\_\.]*)\.exe', self.fname)
if match is None:
return
self.name = match.groups()[0]
self.logname = '%s-wininst.log' % self.name
fd = open(osp.join(self.distribution.target, self.logname), 'U')
searchtxt = 'DisplayName='
for line in fd.readlines():
pos = line.find(searchtxt)
if pos != -1:
break
else:
return
fd.close()
match = re.match(r'Python %s %s-([0-9\.]*)'
% (self.pyversion, self.name),
line[pos+len(searchtxt):])
if match is None:
return
self.version = match.groups()[0]
def uninstall(self):
"""Uninstall package"""
subprocess.call([self.fname, '-u', self.logname],
cwd=self.distribution.target)
class Distribution(object):
# PyQt module is now like :PyQt4-...
NSIS_PACKAGES = ('PyQt4', 'PyQwt', 'PyQt5') # known NSIS packages
def __init__(self, target=None, verbose=False, indent=False):
self.target = target
self.verbose = verbose
self.indent = indent
self.logdir = None
# if no target path given, take the current python interpreter one
if self.target is None:
self.target = os.path.dirname(sys.executable)
self.init_log_dir()
self.to_be_removed = [] # list of directories to be removed later
self.version, self.architecture = utils.get_python_infos(target)
def clean_up(self):
"""Remove directories which couldn't be removed when building"""
for path in self.to_be_removed:
try:
shutil.rmtree(path, onerror=utils.onerror)
except WindowsError:
print("Directory %s could not be removed" % path,
file=sys.stderr)
def remove_directory(self, path):
"""Try to remove directory -- on WindowsError, remove it later"""
try:
shutil.rmtree(path)
except WindowsError:
self.to_be_removed.append(path)
def init_log_dir(self):
"""Init log path"""
path = osp.join(self.target, 'Logs')
if not osp.exists(path):
os.mkdir(path)
self.logdir = path
def copy_files(self, package, targetdir,
srcdir, dstdir, create_bat_files=False):
"""Add copy task"""
srcdir = osp.join(targetdir, srcdir)
if not osp.isdir(srcdir):
return
offset = len(srcdir)+len(os.pathsep)
for dirpath, dirnames, filenames in os.walk(srcdir):
for dname in dirnames:
t_dname = osp.join(dirpath, dname)[offset:]
src = osp.join(srcdir, t_dname)
dst = osp.join(dstdir, t_dname)
if self.verbose:
print("mkdir: %s" % dst)
full_dst = osp.join(self.target, dst)
if not osp.exists(full_dst):
os.mkdir(full_dst)
package.files.append(dst)
for fname in filenames:
t_fname = osp.join(dirpath, fname)[offset:]
src = osp.join(srcdir, t_fname)
if dirpath.endswith('_system32'):
# Files that should be copied in %WINDIR%\system32
dst = fname
else:
dst = osp.join(dstdir, t_fname)
if self.verbose:
print("file: %s" % dst)
full_dst = osp.join(self.target, dst)
shutil.move(src, full_dst)
package.files.append(dst)
name, ext = osp.splitext(dst)
if create_bat_files and ext in ('', '.py'):
dst = name + '.bat'
if self.verbose:
print("file: %s" % dst)
full_dst = osp.join(self.target, dst)
fd = open(full_dst, 'w')
fd.write("""@echo off
python "%~dpn0""" + ext + """" %*""")
fd.close()
package.files.append(dst)
def create_file(self, package, name, dstdir, contents):
"""Generate data file -- path is relative to distribution root dir"""
dst = osp.join(dstdir, name)
if self.verbose:
print("create: %s" % dst)
full_dst = osp.join(self.target, dst)
open(full_dst, 'w').write(contents)
package.files.append(dst)
def get_installed_packages(self):
"""Return installed packages"""
# Packages installed with WPPM
wppm = [Package(logname[:-4]) for logname in os.listdir(self.logdir)
if '.whl.log' not in logname ]
# Packages installed with distutils wininst
wininst = []
for name in os.listdir(self.target):
if name.startswith('Remove') and name.endswith('.exe'):
try:
pack = WininstPackage(name, self)
except IOError:
continue
if pack.name is not None and pack.version is not None:
wininst.append(pack)
# Include package installed via pip (not via WPPM)
try:
if os.path.dirname(sys.executable) == self.target:
# direct way: we interrogate ourself, using official API
import pkg_resources, imp
imp.reload(pkg_resources)
pip_list = [(i.key, i.version)
for i in pkg_resources.working_set]
else:
# indirect way: we interrogate something else
cmdx=[osp.join(self.target, 'python.exe'), '-c',
"import pip;print('+!+'.join(['%s@+@%s@+@' % (i.key,i.version) for i in pip.get_installed_distributions()]))"]
p = subprocess.Popen(cmdx, shell=True, stdout=subprocess.PIPE,
cwd=self.target)
stdout, stderr = p.communicate()
start_at = 2 if sys.version_info >= (3,0) else 0
pip_list = [line.split("@+@")[:2] for line in
("%s" % stdout)[start_at:].split("+!+")]
# create pip package list
wppip = [Package('%s-%s-py2.py3-none-any.whl' %
(i[0].replace('-', '_').lower(), i[1])) for i in pip_list]
# pip package version is supposed better
already = set(b.name.replace('-', '_') for b in wppip+wininst)
wppm = wppip + [i for i in wppm
if i.name.replace('-', '_').lower() not in already]
except:
pass
return sorted(wppm + wininst, key=lambda tup: tup.name.lower())
def find_package(self, name):
"""Find installed package"""
for pack in self.get_installed_packages():
if normalize(pack.name) == normalize(name):
return pack
def uninstall_existing(self, package):
"""Uninstall existing package (or package name)"""
if isinstance(package ,str):
pack = self.find_package(package)
else:
pack = self.find_package(package.name)
if pack is not None:
self.uninstall(pack)
def patch_all_shebang(self, to_movable=True, max_exe_size=999999, targetdir=""):
"""make all python launchers relatives"""
import glob
import os
for ffname in glob.glob(r'%s\Scripts\*.exe' % self.target):
size = os.path.getsize(ffname)
if size <= max_exe_size:
utils.patch_shebang_line(ffname, to_movable=to_movable,
targetdir=targetdir)
for ffname in glob.glob(r'%s\Scripts\*.py' % self.target):
utils.patch_shebang_line_py(ffname, to_movable=to_movable,
targetdir=targetdir)
def install(self, package, install_options=None):
"""Install package in distribution"""
assert package.is_compatible_with(self)
tmp_fname = None
# wheel addition
if package.fname.endswith(('.whl', '.tar.gz', '.zip')):
self.install_bdist_direct(package, install_options=install_options)
bname = osp.basename(package.fname)
if bname.endswith('.exe'):
if re.match(r'(' + ('|'.join(self.NSIS_PACKAGES)) + r')-', bname):
self.install_nsis_package(package)
else:
self.install_bdist_wininst(package)
elif bname.endswith('.msi'):
self.install_bdist_msi(package)
self.handle_specific_packages(package)
# minimal post-install actions
self.patch_standard_packages(package.name)
if not package.fname.endswith(('.whl', '.tar.gz', '.zip')):
package.save_log(self.logdir)
if tmp_fname is not None:
os.remove(tmp_fname)
def do_pip_action(self, actions=None, install_options=None):
"""Do pip action in a distribution"""
my_list = install_options
if my_list is None:
my_list = []
my_actions = actions
if my_actions is None:
my_actions = []
executing = osp.join(self.target, '..', 'scripts', 'env.bat')
if osp.isfile(executing):
complement = [r'&&' , 'cd' , '/D', self.target,
r'&&', osp.join(self.target, 'python.exe') ]
complement += [ '-m', 'pip']
else:
executing = osp.join(self.target, 'python.exe')
complement = [ '-m', 'pip']
try:
fname = utils.do_script(this_script=None,
python_exe=executing,
architecture=self.architecture, verbose=self.verbose,
install_options=complement + my_actions + my_list)
except RuntimeError:
if not self.verbose:
print("Failed!")
raise
def patch_standard_packages(self, package_name='', to_movable=True):
"""patch Winpython packages in need"""
import filecmp
# 'pywin32' minimal post-install (pywin32_postinstall.py do too much)
if package_name.lower() == "pywin32" or package_name == '':
origin = self.target + (r"\Lib\site-packages\pywin32_system32")
destin = self.target
if osp.isdir(origin):
for name in os.listdir(origin):
here, there = osp.join(origin, name), osp.join(destin, name)
if (not os.path.exists(there) or
not filecmp.cmp(here, there)):
shutil.copyfile(here, there)
# 'pip' to do movable launchers (around line 100) !!!!
# rational: https://github.com/pypa/pip/issues/2328
if package_name.lower() == "pip" or package_name == '':
# ensure pip will create movable launchers
# sheb_mov1 = classic way up to WinPython 2016-01
# sheb_mov2 = tried way, but doesn't work for pip (at least)
sheb_fix = " executable = get_executable()"
sheb_mov1 = " executable = os.path.join(os.path.basename(get_executable()))"
sheb_mov2 = " executable = os.path.join('..',os.path.basename(get_executable()))"
if to_movable:
utils.patch_sourcefile(self.target +
r"\Lib\site-packages\pip\_vendor\distlib\scripts.py",
sheb_fix, sheb_mov1)
utils.patch_sourcefile(self.target +
r"\Lib\site-packages\pip\_vendor\distlib\scripts.py",
sheb_mov2, sheb_mov1)
else:
utils.patch_sourcefile(self.target +
r"\Lib\site-packages\pip\_vendor\distlib\scripts.py",
sheb_mov1, sheb_fix)
utils.patch_sourcefile(self.target +
r"\Lib\site-packages\pip\_vendor\distlib\scripts.py",
sheb_mov2, sheb_fix)
# ensure pip wheel will register relative PATH in 'RECORD' files
# will be in standard pip 8.0.3
utils.patch_sourcefile(
self.target + (
r"\Lib\site-packages\pip\wheel.py"),
" writer.writerow((f, h, l))",
" writer.writerow((normpath(f, lib_dir), h, l))")
# create movable launchers for previous package installations
self.patch_all_shebang(to_movable=to_movable)
if package_name.lower() == "spyder" or package_name == '':
# spyder don't goes on internet without I ask
utils.patch_sourcefile(
self.target + (
r"\Lib\site-packages\spyderlib\config\main.py"),
"'check_updates_on_startup': True,",
"'check_updates_on_startup': False,")
utils.patch_sourcefile(
self.target + (
r"\Lib\site-packages\spyder\config\main.py"),
"'check_updates_on_startup': True,",
"'check_updates_on_startup': False,")
# workaround bad installers
if package_name.lower() == "numba":
self.create_pybat(['numba', 'pycc'])
else:
self.create_pybat(package_name.lower())
def create_pybat(self, names='', contents=r"""@echo off
..\python "%~dpn0" %*"""):
"""Create launcher batch script when missing"""
scriptpy = osp.join(self.target, 'Scripts') # std Scripts of python
if not list(names) == names:
my_list = [f for f in os.listdir(scriptpy) if '.' not in f
and f.startswith(names)]
else:
my_list = names
for name in my_list:
if osp.isdir(scriptpy) and osp.isfile(osp.join(scriptpy, name)):
if (not osp.isfile(osp.join(scriptpy, name + '.exe')) and
not osp.isfile(osp.join(scriptpy, name + '.bat'))):
fd = open(osp.join(scriptpy, name + '.bat'), 'w')
fd.write(contents)
fd.close()
def handle_specific_packages(self, package):
"""Packages requiring additional configuration"""
if package.name.lower() in ('pyqt4', 'pyqt5'):
# Qt configuration file (where to find Qt)
name = 'qt.conf'
contents = """[Paths]
Prefix = .
Binaries = ."""
self.create_file(package, name,
osp.join('Lib', 'site-packages', package.name),
contents)
self.create_file(package, name, '.',
contents.replace('.', './Lib/site-packages/%s' % package.name))
# pyuic script
if package.name.lower() == 'pyqt5':
# see http://code.activestate.com/lists/python-list/666469/
tmp_string = r'''@echo off
if "%WINPYDIR%"=="" call "%~dp0..\..\scripts\env.bat"
"%WINPYDIR%\python.exe" -m PyQt5.uic.pyuic %1 %2 %3 %4 %5 %6 %7 %8 %9'''
else:
tmp_string = r'''@echo off
if "%WINPYDIR%"=="" call "%~dp0..\..\scripts\env.bat"
"%WINPYDIR%\python.exe" "%WINPYDIR%\Lib\site-packages\package.name\uic\pyuic.py" %1 %2 %3 %4 %5 %6 %7 %8 %9'''
self.create_file(package, 'pyuic%s.bat' % package.name[-1],
'Scripts', tmp_string.replace('package.name', package.name))
# Adding missing __init__.py files (fixes Issue 8)
uic_path = osp.join('Lib', 'site-packages', package.name, 'uic')
for dirname in ('Loader', 'port_v2', 'port_v3'):
self.create_file(package, '__init__.py',
osp.join(uic_path, dirname), '')
def _print(self, package, action):
"""Print package-related action text (e.g. 'Installing')
indicating progress"""
text = " ".join([action, package.name, package.version])
if self.verbose:
utils.print_box(text)
else:
if self.indent:
text = (' '*4) + text
print(text + '...', end=" ")
def _print_done(self):
"""Print OK at the end of a process"""
if not self.verbose:
print("OK")
def uninstall(self, package):
"""Uninstall package from distribution"""
self._print(package, "Uninstalling")
if isinstance(package, WininstPackage):
package.uninstall()
package.remove_log(self.logdir)
elif not package.name == 'pip':
# trick to get true target (if not current)
this_executable_path = os.path.dirname(self.logdir)
subprocess.call([this_executable_path + r'\python.exe',
'-m', 'pip', 'uninstall', package.name, '-y'],
cwd=this_executable_path)
# legacy, if some package installed by old non-pip means
package.load_log(self.logdir)
for fname in reversed(package.files):
path = osp.join(self.target, fname)
if osp.isfile(path):
if self.verbose:
print("remove: %s" % fname)
os.remove(path)
if fname.endswith('.py'):
for suffix in ('c', 'o'):
if osp.exists(path+suffix):
if self.verbose:
print("remove: %s" % (fname+suffix))
os.remove(path+suffix)
elif osp.isdir(path):
if self.verbose:
print("rmdir: %s" % fname)
pycache = osp.join(path, '__pycache__')
if osp.exists(pycache):
try:
shutil.rmtree(pycache, onerror=utils.onerror)
if self.verbose:
print("rmtree: %s" % pycache)
except WindowsError:
print("Directory %s could not be removed"
% pycache, file=sys.stderr)
try:
os.rmdir(path)
except OSError:
if self.verbose:
print("unable to remove directory: %s" % fname,
file=sys.stderr)
else:
if self.verbose:
print("file not found: %s" % fname, file=sys.stderr)
package.remove_log(self.logdir)
self._print_done()
def install_bdist_wininst(self, package):
"""Install a distutils package built with the bdist_wininst option
(binary distribution, .exe file)"""
self._print(package, "Extracting")
targetdir = utils.extract_archive(package.fname)
self._print_done()
self._print(package, "Installing %s from " % targetdir)
self.copy_files(package, targetdir, 'PURELIB',
osp.join('Lib', 'site-packages'))
self.copy_files(package, targetdir, 'PLATLIB',
osp.join('Lib', 'site-packages'))
self.copy_files(package, targetdir, 'SCRIPTS', 'Scripts',
create_bat_files=True)
self.copy_files(package, targetdir, 'DLLs', 'DLLs')
self.copy_files(package, targetdir, 'DATA', '.')
self._print_done()
def install_bdist_direct(self, package, install_options=None):
"""Install a package directly !"""
self._print(package, "Installing %s" % package.fname.split(".")[-1])
# targetdir = utils.extract_msi(package.fname, targetdir=self.target)
try:
fname = utils.direct_pip_install(package.fname,
python_exe=osp.join(self.target, 'python.exe'),
architecture=self.architecture, verbose=self.verbose,
install_options=install_options)
except RuntimeError:
if not self.verbose:
print("Failed!")
raise
package = Package(fname)
self._print_done()
def install_script(self, script, install_options=None):
try:
fname = utils.do_script(script,
python_exe=osp.join(self.target, 'python.exe'),
architecture=self.architecture, verbose=self.verbose,
install_options=install_options)
except RuntimeError:
if not self.verbose:
print("Failed!")
raise
def install_bdist_msi(self, package):
"""Install a distutils package built with the bdist_msi option
(binary distribution, .msi file)"""
raise NotImplementedError
# self._print(package, "Extracting")
# targetdir = utils.extract_msi(package.fname, targetdir=self.target)
# self._print_done()
def install_nsis_package(self, package):
"""Install a Python package built with NSIS (e.g. PyQt or PyQwt)
(binary distribution, .exe file)"""
bname = osp.basename(package.fname)
assert bname.startswith(self.NSIS_PACKAGES)
self._print(package, "Extracting")
targetdir = utils.extract_exe(package.fname)
self._print_done()
self._print(package, "Installing")
self.copy_files(package, targetdir, 'Lib', 'Lib')
if bname.startswith('PyQt5'):
# PyQt5
outdir = osp.join('Lib', 'site-packages', 'PyQt5')
elif bname.startswith('PyQt'):
# PyQt4
outdir = osp.join('Lib', 'site-packages', 'PyQt4')
else:
# Qwt5
outdir = osp.join('Lib', 'site-packages', 'PyQt4', 'Qwt5')
self.copy_files(package, targetdir, '$_OUTDIR', outdir)
self._print_done()
def main(test=False):
if test:
sbdir = osp.join(osp.dirname(__file__),
os.pardir, os.pardir, os.pardir, 'sandbox')
tmpdir = osp.join(sbdir, 'tobedeleted')
# fname = osp.join(tmpdir, 'scipy-0.10.1.win-amd64-py2.7.exe')
fname = osp.join(sbdir, 'VTK-5.10.0-Qt-4.7.4.win32-py2.7.exe')
print(Package(fname))
sys.exit()
target = osp.join(utils.BASE_DIR, 'build',
'winpython-2.7.3', 'python-2.7.3')
fname = osp.join(utils.BASE_DIR, 'packages.src', 'docutils-0.9.1.tar.gz')
dist = Distribution(target, verbose=True)
pack = Package(fname)
print(pack.description)
# dist.install(pack)
# dist.uninstall(pack)
else:
parser = ArgumentParser(description="WinPython Package Manager: install, "\
"uninstall or upgrade Python packages on a Windows "\
"Python distribution like WinPython.")
parser.add_argument('fname', metavar='package',
type=str if py3compat.PY3 else unicode,
help='path to a Python package')
parser.add_argument('-t', '--target', dest='target', default=sys.prefix,
help='path to target Python distribution '\
'(default: "%s")' % sys.prefix)
parser.add_argument('-i', '--install', dest='install',
action='store_const', const=True, default=False,
help='install package (this is the default action)')
parser.add_argument('-u', '--uninstall', dest='uninstall',
action='store_const', const=True, default=False,
help='uninstall package')
args = parser.parse_args()
if args.install and args.uninstall:
raise RuntimeError("Incompatible arguments: --install and --uninstall")
if not args.install and not args.uninstall:
args.install = True
if not osp.isfile(args.fname) and args.install:
raise IOError("File not found: %s" % args.fname)
if utils.is_python_distribution(args.target):
dist = Distribution(args.target)
try:
if args.uninstall:
package = dist.find_package(args.fname)
dist.uninstall(package)
else:
package = Package(args.fname)
if args.install and package.is_compatible_with(dist):
dist.install(package)
else:
raise RuntimeError("Package is not compatible with Python "\
"%s %dbit" % (dist.version, dist.architecture))
except NotImplementedError:
raise RuntimeError("Package is not (yet) supported by WPPM")
else:
raise WindowsError("Invalid Python distribution %s" % args.target)
if __name__ == '__main__':
main()
| tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/winpython/wppm.py | Python | bsd-3-clause | 32,732 | [
"VTK"
] | 37f5035cb1958dcf666e3daba4740a0b07fbcabb278a023091bc63009a9fa9b7 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# This little file is taken from layman )
# http://layman.sourceforge.net
#
# Copyright:
# (c) 2005 - 2009 Gunnar Wrobel
# (c) 2009 Sebastian Pipping
# (c) 2011 Brian Dolbec
# Distributed under the terms of the GNU General Public License v2
#
# Author(s):
# Gunnar Wrobel <wrobel@gentoo.org>
# Sebastian Pipping <sebastian@pipping.org>
# Brian Dolbec <brian.dolbec@gmail.com>
#
# Copyright:
# (c) 2013 Jauhien Piatlicki
# Distributed under the terms of the GNU General Public License v2
#
# Author(s):
# Jauhien Piatlicki <piatlicki@gmail.com>
VERSION = '0'
if __name__ == '__main__':
print(VERSION)
| jauhien/g-elisp | g_elisp/version.py | Python | gpl-2.0 | 862 | [
"Brian"
] | e6b6a7f12228dd713df484a6298726aa3e1ca3d60815bb848fd045b6cfd74c07 |
from django.shortcuts import get_object_or_404, redirect
from django.views import generic
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.exceptions import ObjectDoesNotExist
from django import http
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import logout as auth_logout, login as auth_login
from django.contrib.sites.models import get_current_site
from django.conf import settings
from oscar.core.utils import safe_referrer
from oscar.views.generic import PostActionMixin
from oscar.apps.customer.utils import get_password_reset_url
from oscar.core.loading import (
get_class, get_profile_class, get_classes, get_model)
from oscar.core.compat import get_user_model
from . import signals
PageTitleMixin, RegisterUserMixin = get_classes(
'customer.mixins', ['PageTitleMixin', 'RegisterUserMixin'])
Dispatcher = get_class('customer.utils', 'Dispatcher')
EmailAuthenticationForm, EmailUserCreationForm, OrderSearchForm = get_classes(
'customer.forms', ['EmailAuthenticationForm', 'EmailUserCreationForm',
'OrderSearchForm'])
PasswordChangeForm = get_class('customer.forms', 'PasswordChangeForm')
ProfileForm, ConfirmPasswordForm = get_classes(
'customer.forms', ['ProfileForm', 'ConfirmPasswordForm'])
UserAddressForm = get_class('address.forms', 'UserAddressForm')
Order = get_model('order', 'Order')
Line = get_model('basket', 'Line')
Basket = get_model('basket', 'Basket')
UserAddress = get_model('address', 'UserAddress')
Email = get_model('customer', 'Email')
ProductAlert = get_model('customer', 'ProductAlert')
CommunicationEventType = get_model('customer', 'CommunicationEventType')
User = get_user_model()
# =======
# Account
# =======
class AccountSummaryView(generic.RedirectView):
"""
View that exists for legacy reasons and customisability. It commonly gets
called when the user clicks on "Account" in the navbar, and can be
overridden to determine to what sub-page the user is directed without
having to change a lot of templates.
"""
url = reverse_lazy(settings.OSCAR_ACCOUNTS_REDIRECT_URL)
class AccountRegistrationView(RegisterUserMixin, generic.FormView):
form_class = EmailUserCreationForm
template_name = 'customer/registration.html'
redirect_field_name = 'next'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(settings.LOGIN_REDIRECT_URL)
return super(AccountRegistrationView, self).get(
request, *args, **kwargs)
def get_logged_in_redirect(self):
return reverse('customer:summary')
def get_form_kwargs(self):
kwargs = super(AccountRegistrationView, self).get_form_kwargs()
kwargs['initial'] = {
'email': self.request.GET.get('email', ''),
'redirect_url': self.request.GET.get(self.redirect_field_name, '')
}
kwargs['host'] = self.request.get_host()
return kwargs
def get_context_data(self, *args, **kwargs):
ctx = super(AccountRegistrationView, self).get_context_data(
*args, **kwargs)
ctx['cancel_url'] = safe_referrer(self.request, '')
return ctx
def form_valid(self, form):
self.register_user(form)
return redirect(form.cleaned_data['redirect_url'])
class AccountAuthView(RegisterUserMixin, generic.TemplateView):
"""
This is actually a slightly odd double form view that allows a customer to
either login or register.
"""
template_name = 'customer/login_registration.html'
login_prefix, registration_prefix = 'login', 'registration'
login_form_class = EmailAuthenticationForm
registration_form_class = EmailUserCreationForm
redirect_field_name = 'next'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(settings.LOGIN_REDIRECT_URL)
return super(AccountAuthView, self).get(
request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
ctx = super(AccountAuthView, self).get_context_data(*args, **kwargs)
if 'login_form' not in kwargs:
ctx['login_form'] = self.get_login_form()
if 'registration_form' not in kwargs:
ctx['registration_form'] = self.get_registration_form()
return ctx
def post(self, request, *args, **kwargs):
# Use the name of the submit button to determine which form to validate
if u'login_submit' in request.POST:
return self.validate_login_form()
elif u'registration_submit' in request.POST:
return self.validate_registration_form()
return http.HttpResponseBadRequest()
# LOGIN
def get_login_form(self, bind_data=False):
return self.login_form_class(
**self.get_login_form_kwargs(bind_data))
def get_login_form_kwargs(self, bind_data=False):
kwargs = {}
kwargs['host'] = self.request.get_host()
kwargs['prefix'] = self.login_prefix
kwargs['initial'] = {
'redirect_url': self.request.GET.get(self.redirect_field_name, ''),
}
if bind_data and self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def validate_login_form(self):
form = self.get_login_form(bind_data=True)
if form.is_valid():
user = form.get_user()
# Grab a reference to the session ID before logging in
old_session_key = self.request.session.session_key
auth_login(self.request, form.get_user())
# Raise signal robustly (we don't want exceptions to crash the
# request handling). We use a custom signal as we want to track the
# session key before calling login (which cycles the session ID).
signals.user_logged_in.send_robust(
sender=self, request=self.request, user=user,
old_session_key=old_session_key)
msg = self.get_login_success_message(form)
messages.success(self.request, msg)
return redirect(self.get_login_success_url(form))
ctx = self.get_context_data(login_form=form)
return self.render_to_response(ctx)
def get_login_success_message(self, form):
return _("Welcome back")
def get_login_success_url(self, form):
redirect_url = form.cleaned_data['redirect_url']
if redirect_url:
return redirect_url
# Redirect staff members to dashboard as that's the most likely place
# they'll want to visit if they're logging in.
if self.request.user.is_staff:
return reverse('dashboard:index')
return settings.LOGIN_REDIRECT_URL
# REGISTRATION
def get_registration_form(self, bind_data=False):
return self.registration_form_class(
**self.get_registration_form_kwargs(bind_data))
def get_registration_form_kwargs(self, bind_data=False):
kwargs = {}
kwargs['host'] = self.request.get_host()
kwargs['prefix'] = self.registration_prefix
kwargs['initial'] = {
'redirect_url': self.request.GET.get(self.redirect_field_name, ''),
}
if bind_data and self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def validate_registration_form(self):
form = self.get_registration_form(bind_data=True)
if form.is_valid():
self.register_user(form)
msg = self.get_registration_success_message(form)
messages.success(self.request, msg)
return redirect(self.get_registration_success_url(form))
ctx = self.get_context_data(registration_form=form)
return self.render_to_response(ctx)
def get_registration_success_message(self, form):
return _("Thanks for registering!")
def get_registration_success_url(self, form):
return settings.LOGIN_REDIRECT_URL
class LogoutView(generic.RedirectView):
url = settings.OSCAR_HOMEPAGE
permanent = False
def get(self, request, *args, **kwargs):
auth_logout(request)
response = super(LogoutView, self).get(request, *args, **kwargs)
for cookie in settings.OSCAR_COOKIES_DELETE_ON_LOGOUT:
response.delete_cookie(cookie)
return response
# =============
# Profile
# =============
class ProfileView(PageTitleMixin, generic.TemplateView):
template_name = 'customer/profile/profile.html'
page_title = _('Profile')
active_tab = 'profile'
def get_context_data(self, **kwargs):
ctx = super(ProfileView, self).get_context_data(**kwargs)
ctx['profile_fields'] = self.get_profile_fields(self.request.user)
return ctx
def get_profile_fields(self, user):
field_data = []
# Check for custom user model
for field_name in User._meta.additional_fields:
field_data.append(
self.get_model_field_data(user, field_name))
# Check for profile class
profile_class = get_profile_class()
if profile_class:
try:
profile = profile_class.objects.get(user=user)
except ObjectDoesNotExist:
profile = profile_class(user=user)
field_names = [f.name for f in profile._meta.local_fields]
for field_name in field_names:
if field_name in ('user', 'id'):
continue
field_data.append(
self.get_model_field_data(profile, field_name))
return field_data
def get_model_field_data(self, model_class, field_name):
"""
Extract the verbose name and value for a model's field value
"""
field = model_class._meta.get_field(field_name)
if field.choices:
value = getattr(model_class, 'get_%s_display' % field_name)()
else:
value = getattr(model_class, field_name)
return {
'name': getattr(field, 'verbose_name'),
'value': value,
}
class ProfileUpdateView(PageTitleMixin, generic.FormView):
form_class = ProfileForm
template_name = 'customer/profile/profile_form.html'
communication_type_code = 'EMAIL_CHANGED'
page_title = _('Edit Profile')
active_tab = 'profile'
success_url = reverse_lazy('customer:profile-view')
def get_form_kwargs(self):
kwargs = super(ProfileUpdateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
# Grab current user instance before we save form. We may need this to
# send a warning email if the email address is changed.
try:
old_user = User.objects.get(id=self.request.user.id)
except User.DoesNotExist:
old_user = None
form.save()
# We have to look up the email address from the form's
# cleaned data because the object created by form.save() can
# either be a user or profile instance depending whether a profile
# class has been specified by the AUTH_PROFILE_MODULE setting.
new_email = form.cleaned_data['email']
if old_user and new_email != old_user.email:
# Email address has changed - send a confirmation email to the old
# address including a password reset link in case this is a
# suspicious change.
ctx = {
'user': self.request.user,
'site': get_current_site(self.request),
'reset_url': get_password_reset_url(old_user),
'new_email': new_email,
}
msgs = CommunicationEventType.objects.get_and_render(
code=self.communication_type_code, context=ctx)
Dispatcher().dispatch_user_messages(old_user, msgs)
messages.success(self.request, _("Profile updated"))
return redirect(self.get_success_url())
class ProfileDeleteView(PageTitleMixin, generic.FormView):
form_class = ConfirmPasswordForm
template_name = 'customer/profile/profile_delete.html'
page_title = _('Delete profile')
active_tab = 'profile'
success_url = settings.OSCAR_HOMEPAGE
def get_form_kwargs(self):
kwargs = super(ProfileDeleteView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
self.request.user.delete()
messages.success(
self.request,
_("Your profile has now been deleted. Thanks for using the site."))
return redirect(self.get_success_url())
class ChangePasswordView(PageTitleMixin, generic.FormView):
form_class = PasswordChangeForm
template_name = 'customer/profile/change_password_form.html'
communication_type_code = 'PASSWORD_CHANGED'
page_title = _('Change Password')
active_tab = 'profile'
success_url = reverse_lazy('customer:profile-view')
def get_form_kwargs(self):
kwargs = super(ChangePasswordView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
messages.success(self.request, _("Password updated"))
ctx = {
'user': self.request.user,
'site': get_current_site(self.request),
'reset_url': get_password_reset_url(self.request.user),
}
msgs = CommunicationEventType.objects.get_and_render(
code=self.communication_type_code, context=ctx)
Dispatcher().dispatch_user_messages(self.request.user, msgs)
return redirect(self.get_success_url())
# =============
# Email history
# =============
class EmailHistoryView(PageTitleMixin, generic.ListView):
context_object_name = "emails"
template_name = 'customer/email/email_list.html'
paginate_by = 20
page_title = _('Email History')
active_tab = 'emails'
def get_queryset(self):
return Email._default_manager.filter(user=self.request.user)
class EmailDetailView(PageTitleMixin, generic.DetailView):
"""Customer email"""
template_name = "customer/email/email_detail.html"
context_object_name = 'email'
active_tab = 'emails'
def get_object(self, queryset=None):
return get_object_or_404(Email, user=self.request.user,
id=self.kwargs['email_id'])
def get_page_title(self):
"""Append email subject to page title"""
return u'%s: %s' % (_('Email'), self.object.subject)
# =============
# Order history
# =============
class OrderHistoryView(PageTitleMixin, generic.ListView):
"""
Customer order history
"""
context_object_name = "orders"
template_name = 'customer/order/order_list.html'
paginate_by = 20
model = Order
form_class = OrderSearchForm
page_title = _('Order History')
active_tab = 'orders'
def get(self, request, *args, **kwargs):
if 'date_from' in request.GET:
self.form = self.form_class(self.request.GET)
if not self.form.is_valid():
self.object_list = self.get_queryset()
ctx = self.get_context_data(object_list=self.object_list)
return self.render_to_response(ctx)
data = self.form.cleaned_data
# If the user has just entered an order number, try and look it up
# and redirect immediately to the order detail page.
if data['order_number'] and not (data['date_to'] or
data['date_from']):
try:
order = Order.objects.get(
number=data['order_number'], user=self.request.user)
except Order.DoesNotExist:
pass
else:
return redirect(
'customer:order', order_number=order.number)
else:
self.form = self.form_class()
return super(OrderHistoryView, self).get(request, *args, **kwargs)
def get_queryset(self):
qs = self.model._default_manager.filter(user=self.request.user)
if self.form.is_bound and self.form.is_valid():
qs = qs.filter(**self.form.get_filters())
return qs
def get_context_data(self, *args, **kwargs):
ctx = super(OrderHistoryView, self).get_context_data(*args, **kwargs)
ctx['form'] = self.form
return ctx
class OrderDetailView(PageTitleMixin, PostActionMixin, generic.DetailView):
model = Order
active_tab = 'orders'
def get_template_names(self):
return ["customer/order/order_detail.html"]
def get_page_title(self):
"""
Order number as page title
"""
return u'%s #%s' % (_('Order'), self.object.number)
def get_object(self, queryset=None):
return get_object_or_404(self.model, user=self.request.user,
number=self.kwargs['order_number'])
def do_reorder(self, order): # noqa (too complex (10))
"""
'Re-order' a previous order.
This puts the contents of the previous order into your basket
"""
# Collect lines to be added to the basket and any warnings for lines
# that are no longer available.
basket = self.request.basket
lines_to_add = []
warnings = []
for line in order.lines.all():
is_available, reason = line.is_available_to_reorder(
basket, self.request.strategy)
if is_available:
lines_to_add.append(line)
else:
warnings.append(reason)
# Check whether the number of items in the basket won't exceed the
# maximum.
total_quantity = sum([line.quantity for line in lines_to_add])
is_quantity_allowed, reason = basket.is_quantity_allowed(
total_quantity)
if not is_quantity_allowed:
messages.warning(self.request, reason)
self.response = redirect('customer:order-list')
return
# Add any warnings
for warning in warnings:
messages.warning(self.request, warning)
for line in lines_to_add:
options = []
for attribute in line.attributes.all():
if attribute.option:
options.append({
'option': attribute.option,
'value': attribute.value})
basket.add_product(line.product, line.quantity, options)
if len(lines_to_add) > 0:
self.response = redirect('basket:summary')
messages.info(
self.request,
_("All available lines from order %(number)s "
"have been added to your basket") % {'number': order.number})
else:
self.response = redirect('customer:order-list')
messages.warning(
self.request,
_("It is not possible to re-order order %(number)s "
"as none of its lines are available to purchase") %
{'number': order.number})
class OrderLineView(PostActionMixin, generic.DetailView):
"""Customer order line"""
def get_object(self, queryset=None):
order = get_object_or_404(Order, user=self.request.user,
number=self.kwargs['order_number'])
return order.lines.get(id=self.kwargs['line_id'])
def do_reorder(self, line):
self.response = redirect(
'customer:order', int(self.kwargs['order_number']))
basket = self.request.basket
line_available_to_reorder, reason = line.is_available_to_reorder(
basket, self.request.strategy)
if not line_available_to_reorder:
messages.warning(self.request, reason)
return
# We need to pass response to the get_or_create... method
# as a new basket might need to be created
self.response = redirect('basket:summary')
# Convert line attributes into basket options
options = []
for attribute in line.attributes.all():
if attribute.option:
options.append({'option': attribute.option,
'value': attribute.value})
basket.add_product(line.product, line.quantity, options)
if line.quantity > 1:
msg = _("%(qty)d copies of '%(product)s' have been added to your"
" basket") % {
'qty': line.quantity, 'product': line.product}
else:
msg = _("'%s' has been added to your basket") % line.product
messages.info(self.request, msg)
class AnonymousOrderDetailView(generic.DetailView):
model = Order
template_name = "customer/anon_order.html"
def get_object(self, queryset=None):
# Check URL hash matches that for order to prevent spoof attacks
order = get_object_or_404(self.model, user=None,
number=self.kwargs['order_number'])
if self.kwargs['hash'] != order.verification_hash():
raise http.Http404()
return order
# ------------
# Address book
# ------------
class AddressListView(PageTitleMixin, generic.ListView):
"""Customer address book"""
context_object_name = "addresses"
template_name = 'customer/address/address_list.html'
paginate_by = 40
active_tab = 'addresses'
page_title = _('Address Book')
def get_queryset(self):
"""Return customer's addresses"""
return UserAddress._default_manager.filter(user=self.request.user)
class AddressCreateView(PageTitleMixin, generic.CreateView):
form_class = UserAddressForm
model = UserAddress
template_name = 'customer/address/address_form.html'
active_tab = 'addresses'
page_title = _('Add a new address')
success_url = reverse_lazy('customer:address-list')
def get_form_kwargs(self):
kwargs = super(AddressCreateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
ctx = super(AddressCreateView, self).get_context_data(**kwargs)
ctx['title'] = _('Add a new address')
return ctx
def get_success_url(self):
messages.success(self.request,
_("Address '%s' created") % self.object.summary)
return super(AddressCreateView, self).get_success_url()
class AddressUpdateView(PageTitleMixin, generic.UpdateView):
form_class = UserAddressForm
model = UserAddress
template_name = 'customer/address/address_form.html'
active_tab = 'addresses'
page_title = _('Edit address')
success_url = reverse_lazy('customer:address-list')
def get_form_kwargs(self):
kwargs = super(AddressUpdateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
ctx = super(AddressUpdateView, self).get_context_data(**kwargs)
ctx['title'] = _('Edit address')
return ctx
def get_queryset(self):
return self.request.user.addresses.all()
def get_success_url(self):
messages.success(self.request,
_("Address '%s' updated") % self.object.summary)
return super(AddressUpdateView, self).get_success_url()
class AddressDeleteView(PageTitleMixin, generic.DeleteView):
model = UserAddress
template_name = "customer/address/address_delete.html"
page_title = _('Delete address?')
active_tab = 'addresses'
context_object_name = 'address'
success_url = reverse_lazy('customer:address-list')
def get_queryset(self):
return UserAddress._default_manager.filter(user=self.request.user)
def get_success_url(self):
messages.success(self.request,
_("Address '%s' deleted") % self.object.summary)
return super(AddressDeleteView, self).get_success_url()
class AddressChangeStatusView(generic.RedirectView):
"""
Sets an address as default_for_(billing|shipping)
"""
url = reverse_lazy('customer:address-list')
permanent = False
def get(self, request, pk=None, action=None, *args, **kwargs):
address = get_object_or_404(UserAddress, user=self.request.user,
pk=pk)
# We don't want the user to set an address as the default shipping
# address, though they should be able to set it as their billing
# address.
if address.country.is_shipping_country:
setattr(address, 'is_%s' % action, True)
elif action == 'default_for_billing':
setattr(address, 'is_default_for_billing', True)
else:
messages.error(request, _('We do not ship to this country'))
address.save()
return super(AddressChangeStatusView, self).get(
request, *args, **kwargs)
| jinnykoo/christmas | src/oscar/apps/customer/views.py | Python | bsd-3-clause | 25,451 | [
"VisIt"
] | 5e22e52b401b1f943f079d121bfa54e15cd20e13b81897669afe756b14a24add |
"""Tests for dials.damage_analysis"""
from __future__ import annotations
import os
import procrunner
import pytest
from dxtbx.serialize import load
from dials.array_family import flex
from dials.command_line.damage_analysis import PychefRunner, phil_scope, run
def test_damage_analysis_dials_data(dials_data, run_in_tmpdir):
"""Test dials.damage_analysis on scaled data."""
location = dials_data("l_cysteine_4_sweeps_scaled")
refls = location.join("scaled_20_25.refl").strpath
expts = location.join("scaled_20_25.expt").strpath
args = [
refls,
expts,
"min_completeness=0.4",
"-v",
"json=dials.damage_analysis.json",
]
run(args)
assert os.path.isfile("dials.damage_analysis.html")
assert os.path.isfile("dials.damage_analysis.json")
def test_setup_from_dials_data(dials_data, run_in_tmpdir):
"""Test dials.damage_analysis on scaled data."""
location = dials_data("l_cysteine_4_sweeps_scaled")
refls = location.join("scaled_20_25.refl").strpath
expts = location.join("scaled_20_25.expt").strpath
table = flex.reflection_table.from_file(refls)
experiments = load.experiment_list(expts, check_format=False)
params = phil_scope.extract()
params.dose.experiments.shared_crystal = True
params.dose.experiments.dose_per_image = [1.0, 2.0]
# First experiment is images 1>1800, second 1>1700 (i.e. dose spans 1>5200)
runner = PychefRunner.from_dials_data_files(params, experiments, table)
assert max(runner.dose) == 5198 # last reflection measured not quite at end
assert min(runner.dose) == 2
# Now try again in 'standard' mode i.e. not shared crystal, and set a
# starting dose
params.dose.experiments.shared_crystal = False
params.dose.experiments.dose_per_image = [1.0]
params.dose.experiments.starting_doses = [10, 10]
runner = PychefRunner.from_dials_data_files(params, experiments, table)
assert max(runner.dose) == 1800 + 10
assert min(runner.dose) == 2 + 10
def test_damage_analysis_mtz(dials_data, run_in_tmpdir):
"""Test dials.damage_analysis on scaled data."""
location = dials_data("l_cysteine_4_sweeps_scaled")
refls = location.join("scaled_20_25.refl").strpath
expts = location.join("scaled_20_25.expt").strpath
# First export the data
command = ["dials.export", refls, expts]
result = procrunner.run(command)
assert not result.returncode and not result.stderr
assert os.path.isfile("scaled.mtz")
args = [
run_in_tmpdir.join("scaled.mtz").strpath,
"anomalous=True",
"json=dials.damage_analysis.json",
]
run(args)
assert os.path.isfile("dials.damage_analysis.html")
assert os.path.isfile("dials.damage_analysis.json")
def test_damage_analysis_input_handling(dials_data, run_in_tmpdir):
"""Test that errors are handled if more than one refl file, no refl/expt
file or unscaled data."""
location = dials_data("l_cysteine_4_sweeps_scaled")
refls = location.join("scaled_20_25.refl").strpath
expts = location.join("scaled_20_25.expt").strpath
# Too many refl files
args = [refls, expts, refls]
with pytest.raises(SystemExit):
run(args)
# No refl file
args = [expts]
with pytest.raises(SystemExit):
run(args)
# No expt file
args = [refls]
with pytest.raises(SystemExit):
run(args)
# Unscaled data
location = dials_data("l_cysteine_dials_output")
refls = location.join("20_integrated.pickle").strpath
expts = location.join("20_integrated_experiments.json").strpath
args = [refls, expts]
with pytest.raises(SystemExit):
run(args)
| dials/dials | tests/command_line/test_damage_analysis.py | Python | bsd-3-clause | 3,701 | [
"CRYSTAL"
] | ce926b9862e9d31555d6bd191c1fa1328f3721a8d1be065174b70e1fed08a5c8 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
# TODO(phajdan.jr): Make licensecheck not print the comma after 3.1.
'BSL (v1.0) GPL (v3.1,)',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'ISC',
'LGPL',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v3 or later)',
# TODO(phajdan.jr): Make licensecheck convert that comma to a dot.
'LGPL (v2,1 or later)',
'LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) GPL (unversioned/unknown version)',
# TODO(phajdan.jr): Make licensecheck not print the comma after 1.1.
'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2 or later)',
'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2.1 or later)',
'MIT/X11 (BSD like)',
'Ms-PL',
'Public domain',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
'chrome/test/data/layout_tests/LayoutTests': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'data/mozilla_js_tests': [
'UNKNOWN',
],
'data/page_cycler': [
'UNKNOWN',
'GPL (v2 or later)',
],
'data/tab_switching': [
'UNKNOWN',
],
'googleurl': [ # http://code.google.com/p/google-url/issues/detail?id=15
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
# TODO(phajdan.jr): Make licensecheck not print the comma after v2.
'GPL (v2,)',
'GPL (v2 or later)',
# TODO(phajdan.jr): Make licensecheck not print the comma after 3.1.
'GPL (v3.1,)',
],
'net/disk_cache/hash.cc': [ # http://crbug.com/98100
'UNKNOWN',
],
'net/tools/spdyshark': [
'GPL (v2 or later)',
'UNKNOWN',
],
# http://crbug.com/98107
'ppapi/c/documentation/check.sh': [
'UNKNOWN',
],
'ppapi/cpp/documentation/check.sh': [
'UNKNOWN',
],
'ppapi/lib/gl/include': [
'UNKNOWN',
],
'ppapi/native_client/tests/earth/earth_image.inc': [
'UNKNOWN',
],
'third_party/WebKit': [
'UNKNOWN',
],
'third_party/WebKit/Source/JavaScriptCore/tests/mozilla': [
'GPL',
'GPL (unversioned/unknown version)',
],
'third_party/active_doc': [ # http://crbug.com/98113
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
'third_party/bsdiff/mbsdiff.cc': [
'UNKNOWN',
],
'third_party/bzip2': [
'UNKNOWN',
],
'third_party/cld/encodings/compact_lang_det': [ # http://crbug.com/98120
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/gles2_book': [ # http://crbug.com/98130
'UNKNOWN',
],
'third_party/gles2_conform/GTF_ES': [ # http://crbug.com/98131
'UNKNOWN',
],
'third_party/harfbuzz': [ # http://crbug.com/98133
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jemalloc': [ # http://crbug.com/98302
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/lcov-1.9/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg': [ # http://crbug.com/98313
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libvpx/source/libvpx/examples/includes': [
'GPL (v2 or later)',
],
'third_party/libwebp': [ # http://crbug.com/98448
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/MesaLib': [
'GPL (v2)',
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/npapi/npspy/extern/java': [
'GPL (unversioned/unknown version)',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao/NSBezierPath+MCAdditions.h': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/npapi/npspy': [
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/ply/__init__.py': [
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
'third_party/pylib': [
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/sqlite': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/grit/grit/node/custom/__init__.py': [
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/histograms': [
'UNKNOWN',
],
'tools/memory_watcher': [
'UNKNOWN',
],
'tools/playback_benchmark': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/site_compare': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'webkit/data/ico_decoder': [
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path, '-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
success = True
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/') or filename.startswith('sconsbuild/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
found_path_specific = False
for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES:
if (filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]):
found_path_specific = True
break
if found_path_specific:
continue
print "'%s' has non-whitelisted license '%s'" % (filename, license)
success = False
if success:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
return 1
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
| aYukiSekiguchi/ACCESS-Chromium | tools/checklicenses/checklicenses.py | Python | bsd-3-clause | 13,969 | [
"Galaxy"
] | d36a22e4081ebbb85b647a0c4426fc858229c613b0f805b7a829965dcdc4e02f |
import os
import tarfile
from flyingpigeon.subset import countries, countries_longname
from flyingpigeon.log import init_process_logger
from pywps.Process import WPSProcess
import logging
logger = logging.getLogger(__name__)
class climatefactsheetProcess(WPSProcess):
def __init__(self):
WPSProcess.__init__(
self,
identifier="climatefactsheet",
title="Climate Fact Sheet Generator",
version="0.1",
abstract="Returns a pdf with a short overview of the climatological situation for the selected countries",
metadata=[
# {"title": "LSCE", "href": "http://www.lsce.ipsl.fr/en/index.php"},
{"title": "Doc", "href": "http://flyingpigeon.readthedocs.io/en/latest/"},
],
statusSupported=True,
storeSupported=True
)
self.resource = self.addComplexInput(
identifier="resource",
title="Resource",
abstract="NetCDF Files or archive (tar/zip) containing netCDF files",
minOccurs=1,
maxOccurs=1000,
maxmegabites=5000,
formats=[{"mimeType": "application/x-netcdf"},
{"mimeType": "application/x-tar"},
{"mimeType": "application/zip"}],
)
self.region = self.addLiteralInput(
identifier="region",
title="Region",
# abstract= countries_longname(), # need to handle special non-ascii char in countries.
default='DEU',
type=type(''),
minOccurs=1,
maxOccurs=len(countries()),
allowedValues=countries() # REGION_EUROPE #COUNTRIES #
)
#
# self.mosaic = self.addLiteralInput(
# identifier="mosaic",
# title="Mosaic",
# abstract="If Mosaic is checked, selected polygons will be merged to one Mosaic for each input file",
# default=False,
# type=type(False),
# minOccurs=0,
# maxOccurs=1,
# )
# self.dimension_map = self.addLiteralInput(
# identifier="dimension_map",
# title="Dimension Map",
# abstract= 'if not ordered in lon/lat a dimension map has to be provided',
# type=type(''),
# minOccurs=0,
# maxOccurs=1
# )
#
# self.variable = self.addLiteralInput(
# identifier="variable",
# title="Variable",
# abstract="Variable to be expected in the input files (Variable will be detected if not set)",
# default=None,
# type=type(''),
# minOccurs=0,
# maxOccurs=1,
# )
###########
# OUTPUTS
###########
# self.output = self.addComplexOutput(
# title="Subsets",
# abstract="Tar archive containing the netCDF files",
# formats=[{"mimeType": "application/x-tar"}],
# asReference=True,
# identifier="output",
# )
# self.output_netcdf = self.addComplexOutput(
# title="Subsets for one dataset",
# abstract="NetCDF file with subsets of one dataset.",
# formats=[{"mimeType": "application/x-netcdf"}],
# asReference=True,
# identifier="ncout",
# )
self.output_factsheet = self.addComplexOutput(
title="Climate Fact Sheet",
abstract="PDF with a short overview of the climatological situation of the selected countries",
formats=[{"mimeType": "application/pdf"}],
asReference=True,
identifier="output_factsheet",
)
self.output_log = self.addComplexOutput(
identifier="output_log",
title="Logging information",
abstract="Collected logs during process run.",
formats=[{"mimeType": "text/plain"}],
asReference=True,
)
def execute(self):
from flyingpigeon.utils import archive, archiveextract
from tempfile import mkstemp
init_process_logger('log.txt')
self.output_log.setValue('log.txt')
ncs = archiveextract(self.getInputValues(identifier='resource'))
# mosaic = self.mosaic.getValue()
regions = self.region.getValue()
self.status.set('Arguments set for subset process', 0)
logger.debug('starting: regions=%s, num_files=%s' % (len(regions), len(ncs)))
try:
from flyingpigeon.visualisation import plot_polygons
png_country = plot_polygons(regions)
except:
logger.exception('failed to plot the polygon to world map')
o1, factsheet_plot = mkstemp(dir='.', suffix='.png')
# clip the demanded polygons
from flyingpigeon.subset import clipping
subsets = clipping(resource=ncs, variable=None,
dimension_map=None,
calc=None,
output_format='nc',
calc_grouping=None,
time_range=None,
time_region=None,
historical_concatination=True,
prefix=None,
spatial_wrapping='wrap',
polygons=regions,
mosaic=True
)
try:
from flyingpigeon.visualisation import uncertainty
png_uncertainty = uncertainty(subsets)
except:
logger.exception('failed to generate the uncertainty plot')
_, png_uncertainty = mkstemp(dir='.', suffix='.png')
try:
from flyingpigeon.visualisation import spaghetti
png_spaghetti = spaghetti(subsets)
except:
logger.exception('failed to generate the spaghetti plot')
_, png_spaghetti = mkstemp(dir='.', suffix='.png')
from flyingpigeon.visualisation import factsheetbrewer
factsheet = factsheetbrewer(png_country=png_country,
png_uncertainty=png_uncertainty,
png_spaghetti=png_spaghetti)
self.output_factsheet.setValue(factsheet)
self.status.set('done', 100)
| KatiRG/flyingpigeon | flyingpigeon/processes/wps_climatefactsheet.py | Python | apache-2.0 | 6,448 | [
"NetCDF"
] | 2052e271d6a138ceeda30c4d49b13bbb256ab839bfc9540eac70807f66698203 |
from setuptools import setup
setup(name='gparse',
version='0.0.1',
description='Simple library for handling Gaussian vibrational analysis data.',
url='https://github.com/SeanMcGrath/gparse',
author='Sean McGrath',
author_email='srmcgrat@umass.edu',
license='MIT',
packages=['gparse'],
zip_safe=False)
| SeanMcGrath/raman | setup.py | Python | mit | 350 | [
"Gaussian"
] | d274b65983074d2afd321c2be8e34bfdae36e019cb3d9db03c69c66d705796f6 |
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`ewimport` module provides the functionality for importing
EasyWorship song databases into the current installation database.
"""
import os
import struct
import re
from openlp.core.lib import translate
from openlp.plugins.songs.lib import VerseType
from openlp.plugins.songs.lib import retrieve_windows_encoding, strip_rtf
from .songimport import SongImport
RTF_STRIPPING_REGEX = re.compile(r'\{\\tx[^}]*\}')
# regex: at least two newlines, can have spaces between them
SLIDE_BREAK_REGEX = re.compile(r'\n *?\n[\n ]*')
NUMBER_REGEX = re.compile(r'[0-9]+')
NOTE_REGEX = re.compile(r'\(.*?\)')
class FieldDescEntry:
def __init__(self, name, field_type, size):
self.name = name
self.field_type = field_type
self.size = size
class FieldType(object):
"""
An enumeration class for different field types that can be expected in an EasyWorship song file.
"""
String = 1
Int16 = 3
Int32 = 4
Logical = 9
Memo = 0x0c
Blob = 0x0d
Timestamp = 0x15
class EasyWorshipSongImport(SongImport):
"""
The :class:`EasyWorshipSongImport` class provides OpenLP with the
ability to import EasyWorship song files.
"""
def __init__(self, manager, **kwargs):
SongImport.__init__(self, manager, **kwargs)
def doImport(self):
# Open the DB and MB files if they exist
import_source_mb = self.import_source.replace('.DB', '.MB')
if not os.path.isfile(self.import_source) or not os.path.isfile(import_source_mb):
return
db_size = os.path.getsize(self.import_source)
if db_size < 0x800:
return
db_file = open(self.import_source, 'rb')
self.memoFile = open(import_source_mb, 'rb')
# Don't accept files that are clearly not paradox files
record_size, header_size, block_size, first_block, num_fields = struct.unpack('<hhxb8xh17xh', db_file.read(35))
if header_size != 0x800 or block_size < 1 or block_size > 4:
db_file.close()
self.memoFile.close()
return
# Take a stab at how text is encoded
self.encoding = 'cp1252'
db_file.seek(106)
code_page, = struct.unpack('<h', db_file.read(2))
if code_page == 852:
self.encoding = 'cp1250'
# The following codepage to actual encoding mappings have not been
# observed, but merely guessed. Actual example files are needed.
elif code_page == 737:
self.encoding = 'cp1253'
elif code_page == 775:
self.encoding = 'cp1257'
elif code_page == 855:
self.encoding = 'cp1251'
elif code_page == 857:
self.encoding = 'cp1254'
elif code_page == 866:
self.encoding = 'cp1251'
elif code_page == 869:
self.encoding = 'cp1253'
elif code_page == 862:
self.encoding = 'cp1255'
elif code_page == 874:
self.encoding = 'cp874'
self.encoding = retrieve_windows_encoding(self.encoding)
if not self.encoding:
return
# Read the field description information
db_file.seek(120)
field_info = db_file.read(num_fields * 2)
db_file.seek(4 + (num_fields * 4) + 261, os.SEEK_CUR)
field_names = db_file.read(header_size - db_file.tell()).split(b'\0', num_fields)
field_names.pop()
field_descs = []
for i, field_name in enumerate(field_names):
field_type, field_size = struct.unpack_from('BB', field_info, i * 2)
field_descs.append(FieldDescEntry(field_name, field_type, field_size))
self.setRecordStruct(field_descs)
# Pick out the field description indexes we will need
try:
success = True
fi_title = self.findField(b'Title')
fi_author = self.findField(b'Author')
fi_copy = self.findField(b'Copyright')
fi_admin = self.findField(b'Administrator')
fi_words = self.findField(b'Words')
fi_ccli = self.findField(b'Song Number')
except IndexError:
# This is the wrong table
success = False
# There does not appear to be a _reliable_ way of getting the number of songs/records, so loop through the file
# blocks and total the number of records. Store the information in a list so we dont have to do all this again.
cur_block = first_block
total_count = 0
block_list = []
while cur_block != 0 and success:
cur_block_pos = header_size + ((cur_block - 1) * 1024 * block_size)
db_file.seek(cur_block_pos)
cur_block, rec_count = struct.unpack('<h2xh', db_file.read(6))
rec_count = (rec_count + record_size) // record_size
block_list.append((cur_block_pos, rec_count))
total_count += rec_count
self.import_wizard.progress_bar.setMaximum(total_count)
for block in block_list:
cur_block_pos, rec_count = block
db_file.seek(cur_block_pos + 6)
# Loop through each record within the current block
for i in range(rec_count):
if self.stop_import_flag:
break
raw_record = db_file.read(record_size)
self.fields = self.recordStruct.unpack(raw_record)
self.setDefaults()
self.title = self.getField(fi_title).decode()
# Get remaining fields.
copy = self.getField(fi_copy)
admin = self.getField(fi_admin)
ccli = self.getField(fi_ccli)
authors = self.getField(fi_author)
words = self.getField(fi_words)
# Set the SongImport object members.
if copy:
self.copyright = copy.decode()
if admin:
if copy:
self.copyright += ', '
self.copyright += translate('SongsPlugin.EasyWorshipSongImport',
'Administered by %s') % admin.decode()
if ccli:
self.ccliNumber = ccli.decode()
if authors:
# Split up the authors
author_list = authors.split(b'/')
if len(author_list) < 2:
author_list = authors.split(b';')
if len(author_list) < 2:
author_list = authors.split(b',')
for author_name in author_list:
self.addAuthor(author_name.decode().strip())
if words:
# Format the lyrics
result = strip_rtf(words.decode(), self.encoding)
if result is None:
return
words, self.encoding = result
verse_type = VerseType.tags[VerseType.Verse]
for verse in SLIDE_BREAK_REGEX.split(words):
verse = verse.strip()
if not verse:
continue
verse_split = verse.split('\n', 1)
first_line_is_tag = False
# EW tags: verse, chorus, pre-chorus, bridge, tag,
# intro, ending, slide
for tag in VerseType.tags + ['tag', 'slide']:
tag = tag.lower()
ew_tag = verse_split[0].strip().lower()
if ew_tag.startswith(tag):
verse_type = tag[0]
if tag == 'tag' or tag == 'slide':
verse_type = VerseType.tags[VerseType.Other]
first_line_is_tag = True
number_found = False
# check if tag is followed by number and/or note
if len(ew_tag) > len(tag):
match = NUMBER_REGEX.search(ew_tag)
if match:
number = match.group()
verse_type += number
number_found = True
match = NOTE_REGEX.search(ew_tag)
if match:
self.comments += ew_tag + '\n'
if not number_found:
verse_type += '1'
break
self.addVerse(verse_split[-1].strip() if first_line_is_tag else verse, verse_type)
if len(self.comments) > 5:
self.comments += str(translate('SongsPlugin.EasyWorshipSongImport',
'\n[above are Song Tags with notes imported from EasyWorship]'))
if self.stop_import_flag:
break
if not self.finish():
self.logError(self.import_source)
db_file.close()
self.memoFile.close()
def findField(self, field_name):
return [i for i, x in enumerate(self.fieldDescs) if x.name == field_name][0]
def setRecordStruct(self, field_descs):
# Begin with empty field struct list
fsl = ['>']
for field_desc in field_descs:
if field_desc.field_type == FieldType.String:
fsl.append('%ds' % field_desc.size)
elif field_desc.field_type == FieldType.Int16:
fsl.append('H')
elif field_desc.field_type == FieldType.Int32:
fsl.append('I')
elif field_desc.field_type == FieldType.Logical:
fsl.append('B')
elif field_desc.field_type == FieldType.Memo:
fsl.append('%ds' % field_desc.size)
elif field_desc.field_type == FieldType.Blob:
fsl.append('%ds' % field_desc.size)
elif field_desc.field_type == FieldType.Timestamp:
fsl.append('Q')
else:
fsl.append('%ds' % field_desc.size)
self.recordStruct = struct.Struct(''.join(fsl))
self.fieldDescs = field_descs
def getField(self, field_desc_index):
field = self.fields[field_desc_index]
field_desc = self.fieldDescs[field_desc_index]
# Return None in case of 'blank' entries
if isinstance(field, bytes):
if not field.rstrip(b'\0'):
return None
elif field == 0:
return None
# Format the field depending on the field type
if field_desc.field_type == FieldType.String:
return field.rstrip(b'\0')
elif field_desc.field_type == FieldType.Int16:
return field ^ 0x8000
elif field_desc.field_type == FieldType.Int32:
return field ^ 0x80000000
elif field_desc.field_type == FieldType.Logical:
return (field ^ 0x80 == 1)
elif field_desc.field_type == FieldType.Memo or field_desc.field_type == FieldType.Blob:
block_start, blob_size = struct.unpack_from('<II', field, len(field)-10)
sub_block = block_start & 0xff
block_start &= ~0xff
self.memoFile.seek(block_start)
memo_block_type, = struct.unpack('b', self.memoFile.read(1))
if memo_block_type == 2:
self.memoFile.seek(8, os.SEEK_CUR)
elif memo_block_type == 3:
if sub_block > 63:
return b''
self.memoFile.seek(11 + (5 * sub_block), os.SEEK_CUR)
sub_block_start, = struct.unpack('B', self.memoFile.read(1))
self.memoFile.seek(block_start + (sub_block_start * 16))
else:
return b''
return self.memoFile.read(blob_size)
else:
return 0
| marmyshev/bug_1117098 | openlp/plugins/songs/lib/ewimport.py | Python | gpl-2.0 | 14,242 | [
"Brian"
] | 0141d67f3e65e4cc5f6921dc27a5430790461e2e07618c61ef8a80c48446d30f |
# -*- coding: utf-8 -*-
#%% visualize latent space, for latent dimension larger than 2
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from keras.datasets import mnist
from VAE import VAE_net
#%% load test data
img_size = (28, 28, 1)
_, (x_test, y_test) = mnist.load_data()
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape((x_test.shape[0],) + img_size)
#%% load trained VAE model
model_name = './trained_models/vae_model_latent_5'
vae_net = VAE_net.load(model_name)
#%% 2D scatter plot of test data for each pair of latent dimensions
latent_dim = vae_net.config.latent_dim
batch_size = vae_net.config.batch_size
x_test_encoded = vae_net.encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(8, 8))
for i in range(latent_dim):
for j in range(i+1,latent_dim):
plt.subplot(latent_dim-1, latent_dim-1, i*(latent_dim-1)+j)
plt.scatter(x_test_encoded[:, i], x_test_encoded[:, j], s=1, c=y_test)
plt.axis('off')
plt.suptitle('distribution of test data in latent space')
plt.show()
#%% visualize image in latent space
# one figure for each latent dimension,
# x axis: vary value along current latent dimension, while keep other dimensions fixed
# y axis: fix the value of current latent dimension fixed, while randomly sample other dimensions
n_x = 10 # number of image along specified latent dimension
n_y = 5 # number of image for other latent dimensions (randomly sampled)
def gen_images_vary_one_latent_dim(idim, n_x, n_y):
'''generate images vary one latent dimension,
while keep others constant (randomly draw from Gaussian)
'''
z = np.zeros((batch_size, latent_dim))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
x = norm.ppf(np.linspace(0.05, 0.95, n_x))
y = norm.rvs(size=(n_y,latent_dim))
count = 0
for i in range(n_x):
for j in range(n_y):
z[count] = y[j]
z[count,idim] = x[i]
count += 1
images = vae_net.decoder.predict(z, batch_size=batch_size)
images = images[0:n_x*n_y]
return images
def image_stack_to_montage(images, x, y):
''' '''
h = images.shape[1]
w = images.shape[2]
im = np.zeros((h*y,w*x))
c = 0
for i in range(x):
for j in range(y):
im[j * h: (j + 1) * h, i * w: (i + 1) * w] = images[c].squeeze()
c += 1
return im
for idim in range(latent_dim):
images = gen_images_vary_one_latent_dim(idim, n_x, n_y)
image = image_stack_to_montage(images, n_x, n_y)
plt.figure()
plt.imshow(image, cmap='Greys_r')
plt.axis('equal')
plt.axis('off')
plt.show()
plt.title('latent dimension {}'.format(idim+1))
| LiangShe/macaque_generator | vae_mnist/visualize.py | Python | mit | 2,927 | [
"Gaussian"
] | 55bf755e8458bad59ea167698bfdd63b4fae991eafd3de7c369f819d5a4e2685 |
# -*- coding: utf-8 -*-
{
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'この地域を地理的に指定するロケーション。これはロケーションの階層構造のうちの一つか、ロケーショングループの一つか、この地域の境界に面するロケーションです。',
"Acronym of the organization's name, eg. IFRC.": '団体の略称 (IFRCなど)',
"Authenticate system's Twitter account": '認証システムの Twitter アカウント',
"Can't import tweepy": 'tweepyをインポートできません',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": "救援要請と寄付項目を関連付けるには、項目左の'寄付'ボタンを押してください。",
"Couldn't import tweepy library": 'tweepy libraryをインポートできません',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": 'サイトの所在地住所を詳細に記述します。情報伝達と物品搬送に使用します。このサイトに関する情報を、以下の「ロケーション」項目にGIS/地図データを挿入できることに注意してください。',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'もしこの設定が地域メニューにある地域を指しているのであれば、メニューで使う名前を設定してください。個人用の地図設定の名前では、ユーザの名前で設定されます。',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとしてアサインされるように指定することができます。ただし、ユーザーのドメインと団体のドメイン項目に差異がない場合のみ有効です。',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'この項目の内容はユーザーの基本所在地となり、ユーザーが地図上に表示されるようになります。',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": 'この設定が有効の場合、削除されたレコードには削除済みフラグが付与されるだけで、実際のデータは消去されません。一般のユーザが閲覧することはできませんが、データベースを直接参照することでデータを確認できます。',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '行方不明者の登録が存在しない場合、「人物情報を追加」ボタンを押して、新規登録を行ってください。',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": 'リストに病院が表示されない場合、「病院情報を追加」することで新規に登録が可能です。',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": 'オフィスが一覧にない場合は、「オフィスを追加」をクリックすることで新規のオフィスを追加できます。',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "もしあなたの団体の登録がない場合、'団体を追加'リンクをクリックすることで追加が可能です",
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'データを同期する際には、ネットワークを経由してではなく、ファイルから行うことも可能です。ネットワークが存在しない場合に利用されます。ファイルからのデータインポート、およびファイルへのエクスポートはこのページから実行可能です。右部のリンクをクリックしてください。',
"Level is higher than parent's": '親情報よりも高いレベルです',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "注意: SMS は'アクション可能'のためリクエストがフィルターされます。一方、ツイートのリクエストはフィルターされません。よって、これは検索する手段となります",
"Need a 'url' argument!": "'url'引数が必要です。",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "オプション項目。ジオメトリカラムの名称です。PostGISでのデフォルト値は 'the_geom'となります。",
"Parent level should be higher than this record's level. Parent level is": '親レベルは、このレコードのレベルより上位でなければなりません。親レベルは',
"Password fields don't match": 'パスワードが一致しません。',
"Phone number to donate to this organization's relief efforts.": 'この団体の救援活動に対して寄付を行う際の連絡先となる電話番号を記載します。',
"Please come back after sometime if that doesn't help.": 'この方法で問題が解決しない場合は、しばらく時間を置いて再度アクセスしてください。',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": "'Delete Old'ボタンを押すことで、データを参照しているレコードは全て参照先を再指定され、古い方のレコードは削除されます。",
"Quantity in %s's Inventory": '%s 倉庫にある量',
"Search here for a person's record in order to:": '人物情報の検索を行い、以下の機能を実現します:',
"Select a person in charge for status 'assigned'": "状況が '割り当て済み' である担当者を選択します",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'もし全ての特定の場所が住所階層の最下層で親の場所を必要とするなら、これを選択して下さい。例えば、もし「地区」が階層の最小の地域なら、全ての特定の場所は親階層の地区を持っている必要が有るでしょう。',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'もし全ての特定の場所が住所階層での親の場所を必要とするなら、これを選択して下さい。これは被災地の「地域」表示の設定に役立てられます。',
"Sorry, things didn't get done on time.": 'すいません、時間通りに行われていません。',
"Sorry, we couldn't find that page.": 'すいません、お探しのページは見つかりませんでした。',
"System's Twitter account updated": 'システムのTwitterアカウントを変更しました',
"The <a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>Well-Known Text</a> representation of the Polygon/Line.": "この線、あるいは面の<a href='http://en.wikipedia.org/wiki/Well-known_text' target=_blank>具体的な説明</a>",
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": 'このプロジェクトの資金提供組織を選択します。複数の項目を選択するには、Ctrlキーを押しながらクリックしてください。',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": 'この団体の活動分野を選択します。複数の項目を選択するには、コントロールキーを押しながらクリックしてください。',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '画像ファイルのURLです。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"The person's manager within this Office/Project.": 'このオフィス/プロジェクトのマネージャ。',
"To search for a body, enter the ID label of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '遺体の検索を行うには、遺体のID番号を入力してください。検索時のワイルドカード文字として、%を使うことができます。入力せずに「検索」すると、全ての遺体が表示されます。',
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": 'ID情報を入力することで、遺体を検索します。ワイルドカードとして % が使用できます。何も指定せずに「検索」すると、全ての遺体が表示されます。',
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "病院を検索するには、名前、病院のID、団体名、省略名のいずれかをスペース(空白)で区切って入力してください。 % がワイルドカードとして使えます。全病院のリストを表示するにはなにも入力せずに '検索' ボタンを押してください。",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '探し出したい病院をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての病院を表示します。',
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '病院を検索するには、名称の一部かIDを入力してください。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」を押した場合、全ての病院を表示します。',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "ロケーションを検索するには、名前を入力します。%をワイルドカード文字として使用することが出来ます。何も入力しないで '検索' をクリックするとすべてのロケーションが表示されます。",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '苗字、名前などを半角スペースで区切って入力し、人物検索して下さい。「%」を使うとファジー検索できます。何も入力せずに検索すれば、全ての情報を検索表示します。',
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '人を検索するためには、お名前(苗字、名前または両方)を入力してください。また姓名の間にはスペースをいれてください。ワイルドカードとして % が使えます。すべての人物情報をリストするには、検索ボタンをおしてください。',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '探し出したい支援要請をテキスト入力し、検索を行うことができます。検索時のワイルドカード文字として、%を使うことができます。何も入力せずに「検索」ボタンを押した場合、全ての支援要請を表示します。',
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": 'アセスメントを検索するには、アセスメントのチケット番号の一部を入力してください。ワイルドカードとして % が使えます。すべてのアセスメントをリストするには、なにも入力せず検索ボタンをおしてください。',
"Type the first few characters of one of the Person's names.": '検索したい人物の名前の先頭数文字を入力してください',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '画像ファイルのアップロードはここから行ってください。ファイルのアップロードを行わない場合、ロケーションをURL項目に入力してください。',
"View and/or update details of the person's record": '人物情報を検索し、詳細の閲覧や更新を行ないます',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": 'データベースの直接閲覧/編集(注意:フレームワークの規則に反します)',
"What are the people's normal ways to obtain food in this area?": 'この地域で食料を調達するための手段を記載してください',
"What should be done to reduce women and children's vulnerability to violence?": '未成年や女性を暴力から守るために、どのような活動や設備が必要かを記載してください',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '他とデータを同期するとき、二つ(以上)の団体がそれぞれ更新した情報を同期するときにコンフリクトが発生することがあります。同期モジュールは、コンフリクトを自動解決しようと試みますが、解決できないことがあります。そのような場合、手作業でコンフリクトを解決するか、クリックして次のページに進んでください。',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click ": 'ユーザ固有の設定を行っている場合、ここで変更を行っても、目に見える変化がない場合があります。ユーザ固有の設定を行うには、以下をクリックしてください。 ',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": '変更が保存されていません。「キャンセル」をクリックした後、「保存」を押して保存してください。変更を破棄するには、OK をクリックしてください。',
"You haven't made any calculations": '計算が実行されていません',
"You haven't yet Verified your account - please check your email": '利用者登録はまだ有効ではありません。',
"couldn't be parsed so NetworkLinks not followed.": 'パースできなかったため、 NetworkLinksはフォローされません。',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'OpenLayersで未サポートの機能である GroundOverlayやScreenOverlayを含むため、不具合がある可能性があります。',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" は、"field1=\'newvalue\'" のようなオプションです。"JOIN の結果を更新または削除することはできません。',
'# Houses Damaged': '損傷した家屋の数',
'# Houses Flooded': '浸水した家屋数',
'# People Needing Food': '食料が必要な人の数',
'# People at Risk From Vector-Borne Diseases': '生物が媒介する疾病の危険性がある人の数',
'# People without Access to Safe Drinking-Water': '安全な飲料水が確保されていない人の数',
'# of Houses Damaged': '損壊した家屋数',
'# of Houses Destroyed': '全壊した家屋数',
'# of International Staff': '国外スタッフ人数',
'# of National Staff': '国内スタッフの人数',
'# of People Affected': '被災者数',
'# of People Deceased': '死亡者数',
'# of People Injured': '負傷者数',
'# of Vehicles': '車両数',
'%s Create a new site or ensure that you have permissions for an existing site.': '%s 新しいサイトを作成するか既存のサイトに対する権限を持っているかどうか確認して下さい',
'%s rows deleted': '%s 行を削除しました',
'%s rows updated': '%s 行を更新しました',
'& then click on the map below to adjust the Lat/Lon fields': 'そして下の地図をクリックして、緯度 / 経度フィールドを調節してください',
'* Required Fields': '* は必須項目です',
'0-15 minutes': '0-15 分間',
'1 Assessment': '1アセスメント',
'1 location, shorter time, can contain multiple Tasks': '1つの地域における短期間の活動を表し、1つの支援活動のなかで複数のタスクを実行します。',
'1-3 days': '1-3 日間',
'1. Fill the necessary fields in BLOCK letters.': '1. 太字の項目は必須項目です.',
'15-30 minutes': '15-30 分間',
'2 different options are provided here currently:': '現在は、2種類のオプションが提供されています。',
'2. Always use one box per letter and leave one box space to seperate words.': '2. 一マス一文字で、単語の間は一マス開けてください。',
'2x4 Car': '2x4 車両',
'30-60 minutes': '30-60 分間',
'4-7 days': '4-7 日間',
'4x4 Car': '四輪駆動車',
'8-14 days': '8-14 日間',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '機能クラスに設定したマーカーを上書きする必要があれば、個々のロケーションに設定したマーカーを設定します',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'このデータ内容を確認できるファイルやURL情報、連絡先担当者などのリファレンスデータを記載します。最初の何文字かを入力することで、既存の類似文書にリンクすることが可能です。',
'A Warehouse is a physical place which contains Relief Items available to be Distributed.': '倉庫とは、救援物資の配布を行うことができる物理的な地点を意味します。',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': '倉庫 / サイトとは、物資の保管場所のことであり、住所とGIS情報が付帯します。特定の建物や、市内の特定地域などがあげられます。',
'A brief description of the group (optional)': 'グループの詳細(オプション)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'GPSからダウンロードしたファイルには、その地点に関する様々な情報がXML形式で保存されています。',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'GPSから取得したGPX形式のファイル。タイムスタンプは画像と関連づけられ、地図上に配置することができます。',
'A library of digital resources, such as photos, documents and reports': '写真や文書、レポートなど、電子化された資料',
'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': 'ロケーションを取りまとめた単位はロケーショングループと呼称されます(たいていは、一定範囲内の管理対象地域をさします)。このページから、ロケーションをグループに追加することができます。ロケーショングループ単位で地図上に表示させたり、検索結果として表示させることが可能となります。グループを使用することで、1つの管理地域に縛られない被災地域定義が可能となります。ロケーショングループは、地域メニューから定義できます。',
'A location group must have at least one member.': 'ロケーショングループには、メンバーが最低一人必要です。',
'A place within a Site like a Shelf, room, bin number etc.': 'Site内に存在する施設。例えば棚、部屋、Binの番号など',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'binのスナップショットや追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': 'ロケーションのスナップショットや、Siteに関する追加情報の更新は、ここから行えます。',
'A survey series with id %s does not exist. Please go back and create one.': 'ID番号 %sに関するsurvey seriesは存在しません。「戻る」ボタンを押して、新規に作成してください。',
'ABOUT THIS MODULE': 'このモジュールについて',
'ABOUT': '概要',
'ACCESS DATA': 'アクセスデータ',
'ANY': '全て',
'API is documented here': 'APIに関する文書はこちら',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ニュージーランド向けに変更したATC-20(建物の簡易安全性評価プロセス)',
'ATC-20': 'ATC-20(建物の簡易安全性評価プロセス)',
'Abbreviation': '省略',
'Ability to Fill Out Surveys': '調査記入能力',
'Ability to customize the list of details tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズ可否',
'Ability to customize the list of human resource tracked at a Shelter': '避難所で追跡する詳細のリストのカスタマイズの可否',
'Ability to customize the list of important facilities needed at a Shelter': '避難所で追跡する人的資源のリストのカスタマイズの可否',
'Ability to track partial fulfillment of the request': '支援要請の部分的な達成度の追跡可否',
'Ability to view Results of Completed and/or partially filled out Surveys': '完了または一部完了した聞き取り調査の結果をみる機能',
'About Sahana Eden': 'Sahana Edenについて',
'About Sahana': 'Sahanaについて',
'About this module': 'モジュールの詳細',
'About': '情報',
'Access denied': 'アクセスが拒否されました',
'Access to Shelter': '避難所へのアクセス',
'Access to education services': '学校へのアクセス',
'Accessibility of Affected Location': '被災地域へのアクセス方法',
'Account registered, however registration is still pending approval - please wait until confirmation received.': '利用者登録の申請を受け付けました。所属団体またはサイト管理者による承認を待っています。',
'Acronym': '略称/イニシャル',
'Actionable by all targeted recipients': 'すべての対象受信者にとって実用的な',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '指定された参加者のみ実施可能です。<note>の中に行使するためのIDがあることが必要です。',
'Actionable': '対応可能',
'Actioned?': '実施済み?',
'Actions taken as a result of this request.': '要請に対して行われるアクション',
'Actions': 'アクション',
'Active Problems': '対処中の問題',
'Activities Map': '支援活動マップ',
'Activities are blue.': '支援活動(アクティビティ)は青色で表示されます。',
'Activities matching Assessments:': 'アセスメントに適合した支援活動',
'Activities of boys 13-17yrs before disaster': '災害発生前の13-17歳男子の活動状況',
'Activities of boys 13-17yrs now': '現在の13-17歳男子の活動状況',
'Activities of boys <12yrs before disaster': '災害発生前の12歳以下男子の活動状況',
'Activities of boys <12yrs now': '現在の12歳以下男子の活動状況',
'Activities of children': '子供たちの活動',
'Activities of girls 13-17yrs before disaster': '災害発生前の13-17歳女子の活動状況',
'Activities of girls 13-17yrs now': '現在の13-17歳女子の活動状況',
'Activities of girls <12yrs before disaster': '災害発生前の12歳以下女子の活動状況',
'Activities of girls <12yrs now': '現在の12歳以下女子の活動状況',
'Activities': '支援活動',
'Activity Added': '支援活動を追加しました',
'Activity Deleted': '支援活動を削除しました',
'Activity Details': '支援活動の詳細',
'Activity Report': '支援活動レポート',
'Activity Reports': '支援活動レポート',
'Activity Type': '支援活動タイプ',
'Activity Updated': '支援活動を更新しました',
'Activity': '支援活動',
'Add Address': 'アドレスを追加',
'Add Activity Type': '支援活動タイプを追加',
'Add Aid Request': '治療要請を追加',
'Add Alternative Item': '代わりの物資を追加',
'Add Assessment Summary': 'アセスメントの要約を追加',
'Add Assessment': 'アセスメントを追加',
'Add Baseline Type': '基準値タイプの追加',
'Add Baseline': '基準値の追加',
'Add Bin Type': 'Bin Typeを追加',
'Add Bins': 'Binを追加',
'Add Bundle': 'Bundleを追加',
'Add Catalog.': 'カタログを追加',
'Add Category': 'カテゴリを追加',
'Add Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係を追加',
'Add Config': '設定を追加',
'Add Contact': '連絡先を追加',
'Add Contact Information': '連絡先情報を追加',
'Add Credential': '証明書の追加',
'Add Credentials': '証明書の追加',
'Add Detailed Evaluation': '詳細な評価を追加',
'Add Disaster Victims': '被災者情報を追加',
'Add Distribution.': '配給所を追加',
'Add Donor': '資金提供組織を追加',
'Add Flood Report': '洪水レポートを追加',
'Add Group Member': 'グループメンバを追加',
'Add Identity': 'IDを追加',
'Add Image': '画像を追加',
'Add Impact Type': '災害影響のタイプを追加',
'Add Impact': '被災状況の追加',
'Add Inventory Item': '備蓄物資を追加します',
'Add Inventory Store': '物資集積地点を追加',
'Add Item (s)': '物資を追加',
'Add Item Catalog': '物資カタログを追加',
'Add Item Category': '救援物資カタログカテゴリを追加',
'Add Item Sub-Category': '救援物資サブカテゴリを追加',
'Add Item to Request': '要求する支援物資の登録',
'Add Item to Shipment': '輸送に物資を追加する',
'Add Item': '物資を追加',
'Add Key': 'Keyを追加',
'Add Kit': 'Kitを追加',
'Add Level 1 Assessment': 'レベル1アセスメントを追加',
'Add Level 2 Assessment': 'レベル2アセスメントを追加',
'Add Line': '行を追加',
'Add Location': 'ロケーションを追加',
'Add Location Group': 'ロケーショングループを追加',
'Add Locations': 'ロケーションを追加',
'Add Log Entry': 'ログエントリを追加',
'Add Member': 'メンバを追加',
'Add Membership': 'メンバシップを追加',
'Add Message': 'メッセージを追加',
'Add Need Type': '需要タイプを追加',
'Add Need': '要求を追加',
'Add New Aid Request': '援助要請を新規追加',
'Add New Assessment Summary': '新規アセスメントの要約を追加',
'Add New Baseline Type': '基準値タイプの新規追加',
'Add New Baseline': '新しい基準値を追加',
'Add New Bin Type': 'Bin Typeを新規追加',
'Add New Bin': 'Binを新規追加',
'Add New Budget': '予算を新規追加',
'Add New Bundle': 'Bundleを新規追加',
'Add New Cluster Subsector': 'クラスタのサブセクタを新規作成',
'Add New Cluster': 'クラスタを新規追加',
'Add New Commitment Item': '物資コミットメントを新規追加',
'Add New Config': '設定を新規追加',
'Add New Distribution Item': '配給物資を新規追加',
'Add New Distribution': '配給所を新規追加',
'Add New Document': '文書を新規追加',
'Add New Donor': '資金提供組織を新規追加',
'Add New Entry': 'エントリを新規追加',
'Add New Flood Report': '洪水情報を新規追加',
'Add New Image': '画像を新規追加',
'Add New Impact Type': '災害影響のタイプを新規追加',
'Add New Impact': '新規影響を追加',
'Add New Inventory Item': '備蓄物資を新規追加',
'Add New Inventory Store': '物資集積場所を新規追加',
'Add New Item Catalog Category': '物資カタログカテゴリを新規追加',
'Add New Item Catalog': '物資カタログを新規追加',
'Add New Item Sub-Category': '物資サブカテゴリを新規追加',
'Add New Item to Kit': 'キットに救援物資を新規追加',
'Add New Key': 'Keyを新規追加',
'Add New Level 1 Assessment': 'レベル1アセスメントを新規追加',
'Add New Level 2 Assessment': 'レベル2アセスメントを新規追加',
'Add New Member': 'メンバを新規追加',
'Add New Membership': 'メンバシップを新規追加',
'Add New Metadata': 'メタデータを新規追加',
'Add New Need Type': '需要タイプを新規追加',
'Add New Need': '新しい要求を登録する',
'Add New Note': '追加情報を新規追加',
'Add New Peer': 'データ同期先を新規追加',
'Add New Position': '場所を新規追加',
'Add New Problem': '問題を新規追加',
'Add New Rapid Assessment': '被災地の現況アセスメントを新規追加',
'Add New Received Item': '受領した物資を新規追加',
'Add New Record': 'レコードを新規追加',
'Add New Request Item': '特定物資の要請を新規追加',
'Add New Request': '支援要請を新規追加',
'Add New Response': '支援要請を新規追加',
'Add New River': '河川情報を新規追加',
'Add New Role to User': 'ユーザに役割を新規割り当て',
'Add New Sent Item': '送った物資の追加',
'Add New Setting': '設定を新規追加',
'Add New Shipment to Send': '発送する輸送物資を新規追加',
'Add New Site': 'Siteを新規追加',
'Add New Solution': '解決案を提示する',
'Add New Staff Type': 'スタッフタイプを新規追加',
'Add New Staff': 'スタッフを新規追加',
'Add New Storage Location': '備蓄場所を新規追加',
'Add New Survey Answer': '新しい調査の回答を追加しました',
'Add New Survey Question': '調査項目を新規追加',
'Add New Survey Section': '新しい調査セクションを追加',
'Add New Survey Series': '新しい一連の調査を追加します',
'Add New Survey Template': 'Survey Templateを新規追加',
'Add New Team': 'チームを新規追加',
'Add New Ticket': 'チケットを新規追加',
'Add New Track': '追跡情報を新規追加',
'Add New Unit': '単位を新規追加',
'Add New User to Role': '新規ユーザに役割を割り当て',
'Add New Warehouse Item': '倉庫物資を新規追加',
'Add New': '新規追加',
'Add Note': 'ノートを追加',
'Add Peer': 'データ同期先を追加',
'Add Performance Evaluation': 'パフォーマンス評価を追加',
'Add Person': '人物情報を追加',
'Add Photo': '写真を追加',
'Add Point': 'ポイントを追加',
'Add Polygon': 'Polygonを追加',
'Add Position': '場所を追加',
'Add Problem': '問題を追加',
'Add Projections': '地図投影法を追加',
'Add Question': '質問事項を追加',
'Add Rapid Assessment': '被災地の現況アセスメントを追加',
'Add Rapid Evaluation': '迅速評価を追加',
'Add Recipient Site': '受け取りSiteを追加',
'Add Recipient': '受け取り担当者を追加',
'Add Record': 'レコードを追加',
'Add Recovery Report': '遺体回収レポートを追加',
'Add Reference Document': 'リファレンス文書を追加',
'Add Report': 'レポートを追加',
'Add Request Detail': '支援要請の詳細を追加',
'Add Request Item': '物資の要請を追加します',
'Add Request': '支援要請を追加',
'Add Response': '返答を追加',
'Add Section': 'Sectionを追加',
'Add Sender Organization': '送付元団体を追加',
'Add Sender Site': '送付元Siteを追加',
'Add Setting': '設定を追加',
'Add Shipment Transit Log': '輸送履歴を追加',
'Add Shipment/Way Bills': '輸送費/渡航費を追加',
'Add Site': 'サイトを追加',
'Add Skill Types': 'スキルタイプを追加',
'Add Solution': '解決案を追加',
'Add Staff Type': 'スタッフタイプを追加',
'Add Staff': 'スタッフを追加',
'Add Storage Bin ': 'Storage Binを追加 ',
'Add Storage Bin Type': 'Storage Bin Typeを追加',
'Add Storage Location': '備蓄地点を追加',
'Add Sub-Category': 'サブカテゴリを追加',
'Add Subscription': '寄付金情報を追加',
'Add Survey Answer': '調査の回答を追加',
'Add Survey Question': '聞き取り調査項目を追加',
'Add Survey Section': '調査セクションの追加',
'Add Survey Series': '一連の調査を追加',
'Add Survey Template': '調査テンプレートを追加',
'Add Team Member': 'メンバを追加',
'Add Team': 'チームを追加',
'Add Ticket': 'チケットを追加',
'Add Unit': '単位を追加',
'Add Volunteer Registration': 'ボランティア登録を追加',
'Add Warehouse Item': '倉庫物資を追加',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'ファイル、URL、あるいは、このデータの確認を行なう連絡先のような参照文書を追加します。参照文書を入力しない場合、代わりにあなたのメールが表示されます。',
'Add a Volunteer': 'ボランティアの追加',
'Add a new Relief Item.': '救援物資を新規追加',
'Add a new Site from where the Item is being sent.': 'この救援物資の送付先を新規サイトとして追加',
'Add a new Site where the Item is being sent to.': 'この物資の送付先サイトを新規追加',
'Add an Photo.': '写真を追加.',
'Add location': 'ロケーションを追加',
'Add main Item Category.': '主要なアイテムカテゴリを追加',
'Add main Item Sub-Category.': '主要な救援物資サブカテゴリを追加',
'Add new Group': 'グループを新規追加',
'Add new Individual': '個人を新規追加',
'Add new position.': '新しいポジションを追加してください。',
'Add new project.': 'プロジェクトを新規追加',
'Add new staff role.': 'スタッフの権限を新規追加',
'Add or Update': '追加、あるいは更新',
'Add the Storage Bin Type.': 'Storage Binタイプを追加します。',
'Add the Storage Location where this bin is located.': 'binが保存されている貯蔵場所を追加します。',
'Add the Storage Location where this this Bin belongs to.': 'このBinがある備蓄地点を追加します。',
'Add the main Warehouse/Site information where this Bin belongs to.': 'その物資の備蓄スペースとなっている倉庫/サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Item is to be added.': 'この物資が追加されることになっている主要な倉庫 / サイトの情報を追加してください。',
'Add the main Warehouse/Site information where this Storage location is.': 'その物資の備蓄場所となっている倉庫/サイトの情報を追加してください。',
'Add the unit of measure if it doesnt exists already.': '距離単位が未登録の場合、単位を追加します。',
'Add to Bundle': 'Bundleへの登録',
'Add to Catalog': 'カタログへ登録',
'Add to budget': '予算項目へ登録',
'Add': '追加',
'Add/Edit/Remove Layers': 'レイヤを追加/編集/削除',
'Added to Group': 'メンバシップを追加しました',
'Added to Team': 'メンバシップを追加しました',
'Additional Beds / 24hrs': '追加ベッド予測数 / 24h',
'Additional Comments': '追加コメント',
'Additional quantity quantifier – i.e. “4x5”.': '数量を表す追記(例 「4x5」)',
'Address Details': '住所情報の詳細',
'Address Type': '住所情報タイプ',
'Address added': '住所情報を追加しました',
'Address deleted': '住所情報を削除しました',
'Address updated': '住所情報を更新しました',
'Address': '住所情報',
'Addresses': '住所',
'Adequate food and water available': '適切な量の食料と水が供給されている',
'Adequate': '適正',
'Adjust Item(s) Quantity': 'アイテム量の修正',
'Adjust Items due to Theft/Loss': 'アイテム量の修正(盗難/紛失のため)',
'Admin Email': '管理者の電子メール',
'Admin Name': '管理者名',
'Admin Tel': '管理者の電話番号',
'Admin': '管理者',
'Administration': '管理',
'Administrator': '管理者',
'Admissions/24hrs': '患者増加数/24h',
'Adolescent (12-20)': '青年(12-20)',
'Adolescent participating in coping activities': '未成年が災害対応に従事',
'Adult (21-50)': '成人(21-50)',
'Adult ICU': '成人 ICU',
'Adult Psychiatric': '精神病の成人',
'Adult female': '成人女性',
'Adult male': '成人男性',
'Adults in prisons': '刑務所で服役中の成人がいる',
'Advanced Bin Search': 'Binの詳細検索',
'Advanced Catalog Search': 'カタログの詳細検索',
'Advanced Category Search': '詳細カテゴリー検索',
'Advanced Item Search': '詳細な物資検索',
'Advanced Location Search': '詳細な位置検索',
'Advanced Site Search': 'Siteの詳細検索',
'Advanced Sub-Category Search': 'サブカテゴリの詳細検索',
'Advanced Unit Search': '高度な単位検索',
'Advanced': '詳細',
'Advanced:': 'もっと正確に:',
'Advisory': '注意喚起',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'このボタンをクリックすると、解決法のペアが順に表示されます。各ペアから、最も適する項目を1つずつ選択してください。',
'Age Group': '年齢グループ',
'Age group does not match actual age.': '年齢グループが実際の年齢と一致しません。',
'Age group': '年齢グループ',
'Aggravating factors': '悪化要因',
'Aggregate Items': 'アイテムの集約',
'Agriculture': '農業',
'Aid Request Details': '援助要請の詳細',
'Aid Request added': '援助要請を追加しました',
'Aid Request deleted': '救援要請を追加しました',
'Aid Request updated': '援助要請を更新しました',
'Aid Request': '治療要請',
'Aid Requests': '援助要請',
'Air Transport Service': '物資空輸サービス',
'Aircraft Crash': '飛行機事故',
'Aircraft Hijacking': '航空機ハイジャック',
'Airport Closure': '空港閉鎖',
'Airspace Closure': '離陸地点閉鎖',
'Alcohol': 'アルコール',
'Alert': 'アラート',
'All Inbound & Outbound Messages are stored here': '送受信した全てのメッセージはここに格納されます。',
'All Locations': '全てのロケーション',
'All Records': 'すべてのレコード',
'All Requested Items': '物資要請一覧',
'All Resources': 'すべての資源',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'このサイトのSahana Software Foundationで提供されるデータのライセンスは、CCA (Creative Commons Attribution licence)となります。しかし、すべてのデータの発生源が、このサイトであるとは限りません。詳細は、各エントリの情報ソースの項目に記載されています。',
'All': '全て',
'Allowed to push': 'プッシュが許可済みである',
'Allows a Budget to be drawn up': '予算の策定を行ないます',
'Allows authorized users to control which layers are available to the situation map.': '認証済みユーザーが「状況地図のどのレイヤが利用できるか」を制御することを許可します。',
'Alternative Item Details': '代わりの品物についての詳細',
'Alternative Item added': '代わりの物資を追加しました',
'Alternative Item deleted': '代わりの品物が削除されました',
'Alternative Item updated': '代わりの物資を更新しました',
'Alternative Item': '代わりの物資',
'Alternative Items': '代わりとなる物資',
'Alternative infant nutrition in use': '利用中の乳児用代替食',
'Alternative places for studying available': '学校以外の場所を学習に利用可能である',
'Alternative places for studying': '授業開設に利用可能な施設',
'Ambulance Service': '救急サービス',
'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': '物資集積場所とは、救援物資の配給能力をもつ、物理的な場所を指します。',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、調達、その他様々な資産やリソースの管理といった機能。',
'An item which can be used in place of another item': '他の物資の代わりに使う物資',
'Analysis of Completed Surveys': '完了したフィードバックの分析',
'Animal Die Off': '動物の死',
'Animal Feed': '動物のエサ',
'Animals': '動物',
'Answer Choices (One Per Line)': '選択肢(一行に一つ)',
'Anthropology': '人類学',
'Antibiotics available': '抗生物質が利用可能',
'Antibiotics needed per 24h': '24時間ごとに必要な抗生物質',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': 'ファイル内の利用可能なすべてのメタデータ(タイムスタンプ、作成者、緯度経度等)を自動的に読み込みます。',
'Any comments about this sync partner.': 'データの同期先に関するコメント',
'Apparent Age': '年齢(外見)',
'Apparent Gender': '性別(外見)',
'Application Permissions': 'アプリケーションに対する権限',
'Application': '申請',
'Applications': 'アプリケーション',
'Appropriate clothing available': '適切な衣料が利用可能である',
'Appropriate cooking equipment/materials in HH': '世帯内にて適切な調理器具/食材が利用可能である',
'Approved': '承認されました',
'Approver': '承認者',
'Approx. number of cases/48h': '事象の発生概数/48h',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': '過去48時間以内に発生した、5歳未満小児の下痢症状発生件数を記載してください。概数でかまいません',
'Archive not Delete': 'Archiveを削除しない',
'Arctic Outflow': '北極気団の南下',
'Are basic medical supplies available for health services since the disaster?': '災害発生後、基本的な医療行為を行えるよう、ヘルスサービスに対して供給があったかどうかを記載します',
'Are breast milk substitutes being used here since the disaster?': '災害発生後、母乳代替品が使われているかどうかを記載します',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': '日中時間帯、この地域での生活や遊び、通行によって、未成年や高齢者、障碍者に肉体的な危害が及ぶ可能性があるかを記載します',
'Are the chronically ill receiving sufficient care and assistance?': '慢性病の罹患者に対して、十分なケアと介護が行われているかを記載します',
'Are there adults living in prisons in this area?': 'この地域で刑務所に収容されている成人がいるかどうかを記載してください',
'Are there alternative places for studying?': '学校以外に学習を行える場所があるかどうかを記載してください',
'Are there cases of diarrhea among children under the age of 5?': '5歳未満の幼児に下痢症状が発生しているかどうかを記載してください',
'Are there children living in adult prisons in this area?': 'この地域で、成人用刑務所に収容されている未成年がいるかどうかを記載してください',
'Are there children living in boarding schools in this area?': 'この地域で、寄宿舎に居住している未成年がいるかどうかを記載してください',
'Are there children living in homes for disabled children in this area?': 'この地域で、障がいのある子供の世話をするために家にいる未成年がいるかどうかを記載してください',
'Are there children living in juvenile detention in this area?': 'この地域で、少年院に収容されている未成年がいるかどうかを記載してください',
'Are there children living in orphanages in this area?': 'この地域で、孤児となった子供は居ますか?',
'Are there children with chronical illnesses in your community?': '慢性疾患をもった子どもが共同体の中にいるかどうかを記載してください',
'Are there health services functioning for the community since the disaster?': '災害発生後、共同体で医療サービスが機能しているかどうかを記載してください',
'Are there older people living in care homes in this area?': 'この地域で、介護施設に居住している高齢者がいるかどうかを記載してください',
'Are there older people with chronical illnesses in your community?': 'この共同体のなかで、慢性疾患を患っている高齢者がいるかどうかを記載してください',
'Are there people with chronical illnesses in your community?': 'この共同体の中で、慢性疾患を患っている人物がいるかどうかを記載してください',
'Are there separate latrines for women and men available?': 'トイレが男女別になっているかどうかを記載してください',
'Are there staff present and caring for the residents in these institutions?': 'これら施設の居住者に対して、ケアと介護を行えるスタッフが存在するかどうかを記載してください',
'Area': 'エリア',
'Areas inspected': '調査済み地域',
'Assessment Details': 'アセスメントの詳細',
'Assessment Reported': 'アセスメントを報告しました',
'Assessment Summaries': 'アセスメントの要約',
'Assessment Summary Details': 'アセスメント要約の詳細',
'Assessment Summary added': 'アセスメントの要約を追加しました',
'Assessment Summary deleted': 'アセスメントの要約を削除しました',
'Assessment Summary updated': 'アセスメントの要約を更新しました',
'Assessment Type': 'アセスメントタイプ',
'Assessment added': 'アセスメントを追加しました',
'Assessment admin level': 'アセスメントの管理レベル',
'Assessment and Activities Gap Analysis Map': 'アセスメントと活動のギャップについての解析マップ',
'Assessment and Activities Gap Analysis Report': 'アセスメントと支援活動のギャップ解析レポート',
'Assessment deleted': 'アセスメントを削除しました',
'Assessment timeline': 'アセスメントタイムライン',
'Assessment updated': 'アセスメントを更新しました',
'Assessment': 'アセスメント',
'Assessments Needs vs. Activities': '需要アセスメントと支援活動のギャップ',
'Assessments and Activities': 'アセスメントと支援活動',
'Assessments are shown as green, yellow, orange, red.': 'アセスメントは、緑・黄・オレンジ・赤のいずれかの色で表されます。',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。データには、WFP(国連世界食糧計画)アセスメントも含まれます',
'Assessments are structured reports done by Professional Organizations': 'アセスメントとは、専門団体によって作成された調査文書のことを指します。',
'Assessments': 'アセスメント',
'Assessments:': 'アセスメント:',
'Assessor': '査定実施者',
'Asset Assigned': '資産割り当て',
'Asset Assignment Details': '資産割り当ての詳細',
'Asset Assignments deleted': '資産の割り当てを削除しました',
'Asset Assignments updated': '物資割り当てを更新しました',
'Asset Assignments': '資産割り当て',
'Asset Details': '資産の詳細',
'Asset Management': '資産管理',
'Asset Number': '資産番号',
'Asset added': '資産を追加しました',
'Asset deleted': '資産を削除しました',
'Asset updated': '資産を更新しました',
'Asset': '資産',
'Assets': '資産',
'Assign Asset': '資産割り当て',
'Assign Storage Location': '蓄積地点の割り当て',
'Assign to Org.': '組織に割り当て',
'Assigned To': '担当者',
'Assigned to': '担当者',
'Assigned': '割り当てられた',
'Assignments': '割り当て',
'Assistance for immediate repair/reconstruction of houses': '緊急の修理/家屋復旧の手伝い',
'Assistant': 'アシスタント',
'At/Visited Location (not virtual)': '実際に訪問した/訪問中のロケーション',
'Attend to information sources as described in <instruction>': '<instruction>に記載されている情報ソースへの参加',
'Attribution': '属性',
'Audit Read': '監査報告書の読み込み',
'Audit Write': '監査報告書の書き込み',
'Author': '作者',
'Automotive': '車両',
'Availability': 'ボランティア期間',
'Available Alternative Inventory Items': '利用可能な他の物資',
'Available Beds': '利用可能なベッド数',
'Available Inventory Items': '利用可能な倉庫内の物資',
'Available Messages': '利用可能なメッセージ',
'Available Records': '利用可能なレコード',
'Available databases and tables': '利用可能なデータベースおよびテーブル',
'Available for Location': '活動可能な地域',
'Available from': 'ボランティア開始日',
'Available in Viewer?': 'ビューワ内で利用可能?',
'Available until': 'ボランティア終了日',
'Availablity': '活動期間',
'Avalanche': '雪崩',
'Avoid the subject event as per the <instruction>': '<instruction>に従って対象の事象を避ける',
'Babies who are not being breastfed, what are they being fed on?': '乳児に対して母乳が与えられない場合、どうやって乳幼児の食事を確保しますか?',
'Baby And Child Care': '乳幼児へのケア',
'Background Color for Text blocks': 'テキストブロックの背景色',
'Background Color': '背景色',
'Bahai': 'バハイ',
'Baldness': '禿部',
'Balochi': 'バロチ語',
'Banana': 'バナナ',
'Bank/micro finance': '銀行/マイクロファイナンス',
'Barricades are needed': 'バリケードが必要',
'Base Layer?': '基本レイヤ?',
'Base Layers': '基本レイヤ',
'Base Location': '基本となるロケーション',
'Base Unit': '基本単位',
'Baseline Number of Beds': '平常時のベッド設置数',
'Baseline Type Details': '基準値タイプの詳細',
'Baseline Type added': '基準値タイプを追加しました',
'Baseline Type deleted': '基準値のタイプを削除しました',
'Baseline Type updated': '基準値タイプを更新しました',
'Baseline Type': '基準値タイプ',
'Baseline Types': '基準値の種類',
'Baseline added': '基準値を追加しました',
'Baseline deleted': '基準値を削除しました',
'Baseline number of beds of that type in this unit.': 'この施設における、通常状態のベッド収容数です。',
'Baseline updated': '基準値を更新しました',
'Baselines Details': '基準値の詳細',
'Baselines': '基準値',
'Basic Assess.': '基本アセスメント',
'Basic Assessment Reported': 'ベーシック・アセスメントを報告しました',
'Basic Assessment': '基本アセスメント',
'Basic Details': '基本情報',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': '支援要請と寄付に関する基本情報です。カテゴリ、単位、連絡先詳細および状態等が記載されています。',
'Basic medical supplies available prior to disaster': '災害発生以前 基本的な医療行為の提供',
'Basic medical supplies available since disaster': '災害発生後 基本的な医療行為の提供',
'Basic reports on the Shelter and drill-down by region': '避難所の基本レポートと、地域による絞り込み',
'Basic': '基本',
'Baud rate to use for your modem - The default is safe for most cases': 'モデムを使用するためのボーレートです。大抵の場合はデフォルトが安全です。',
'Baud': 'ボー値',
'Beam': '梁',
'Bed Capacity per Unit': '施設ごとのベッド最大収容数',
'Bed Capacity': 'ベッド最大収容数',
'Bed Type': 'ベッド種別',
'Bed type already registered': 'ベッドのタイプは既に登録済みです。',
'Bedding materials available': '寝具が利用可能である',
'Below ground level': '地下',
'Beneficiary Type': '受益者タイプ',
'Biological Hazard': '生物災害',
'Biscuits': 'ビスケット',
'Blizzard': '吹雪',
'Blood Type (AB0)': '血液型 (AB0式)',
'Blowing Snow': '地吹雪',
'Boat': 'ボート',
'Bodies found': '未回収の遺体',
'Bodies recovered': '回収済みの遺体',
'Body Recovery Reports': '遺体回収レポート',
'Body Recovery Request': '遺体回収の要請',
'Body Recovery Requests': '遺体回収の要請',
'Body': '本文',
'Bomb Explosion': '爆発が発生',
'Bomb Threat': '爆発の危険性',
'Bomb': '爆発物',
'Border Color for Text blocks': 'テキストブロックの枠色',
'Bounding Box Insets': '領域を指定した枠組みへ差し込む',
'Bounding Box Size': '領域を指定した枠組みのサイズ',
'Boys 13-18 yrs in affected area': '影響地域内の13-18歳の男子数',
'Boys 13-18 yrs not attending school': '学校に来ていなかった13-18歳の男子数',
'Boys 6-12 yrs in affected area': '影響地域内の6-12歳の男子数',
'Boys 6-12 yrs not attending school': '学校に来ていなかった6-12歳の男子数',
'Brand Details': '銘柄の詳細',
'Brand added': '銘柄を追加しました',
'Brand deleted': '銘柄が削除されました',
'Brand updated': '銘柄が更新されました',
'Brand': '銘柄',
'Brands': '銘柄',
'Breast milk substitutes in use since disaster': '災害発生後から母乳代替品を使用している',
'Breast milk substitutes used prior to disaster': '災害前から母乳代替品を使用していた',
'Bricks': 'レンガ',
'Bridge Closed': '橋梁(通行止め)',
'Bucket': 'バケツ',
'Buddhist': '仏教徒',
'Budget Details': '予算の詳細',
'Budget Updated': '予算を更新しました',
'Budget added': '予算を追加しました',
'Budget deleted': '予算を削除しました',
'Budget updated': '予算を更新しました',
'Budget': '予算',
'Budgeting Module': '予算編成モジュール',
'Budgets': '予算編成',
'Buffer': 'バッファ',
'Bug': 'バグ',
'Building Aide': '建設援助',
'Building Assessment': '建物のアセスメント',
'Building Assessments': '建築物アセスメント',
'Building Collapsed': '崩壊した建物',
'Building Name': '建物名',
'Building Safety Assessments': '建物の安全アセスメント',
'Building Short Name/Business Name': '建物の名前 / 会社名',
'Building or storey leaning': '建物または階層が傾いている',
'Built using the Template agreed by a group of NGOs working together as the': '例えばECB等、多くのNGOによって利用されている形式を使っての記録が可能です。',
'Bulk Uploader': 'まとめてアップロード',
'Bundle Contents': '小包の内容',
'Bundle Details': 'Bundleの詳細',
'Bundle Updated': 'バンドルを更新しました',
'Bundle added': 'バンドルを追加しました',
'Bundle deleted': 'バンドルを削除しました',
'Bundle updated': 'バンドル・セットを更新しました',
'Bundle': 'バンドル',
'Bundles': 'バンドル',
'Burn ICU': '熱傷 ICU',
'Burn': '火傷(やけど)',
'Burned/charred': '火傷/炭化',
'Business damaged': 'ビジネスへの損害が発生している',
'By Inventory': '物資の送付元',
'By Person': '人物ごと',
'By Site': 'サイト別',
'By Warehouse': '送付元倉庫',
'CBA Women': 'CBA 女性',
'CN': '貨物運送状',
'CSS file %s not writable - unable to apply theme!': 'CSS ファイル %s が書き込み不可になっているため、テーマを適用することができません。',
'Calculate': '計算',
'Camp Coordination/Management': '仮泊施設間の調整 / 管理',
'Camp': '仮泊施設',
'Can only disable 1 record at a time!': '一度に1つしか無効にできません!',
'Can users register themselves for authenticated login access?': '新規ユーザが、他者の承認なしに自分を新規ユーザとして登録できるか?',
'Cancel Add': '追加を取り消す',
'Cancel Shipment': '輸送をキャンセルする',
'Cancel': 'キャンセル',
'Canceled': 'キャンセル',
'Candidate Matches for Body %s': 'Bodyに適合した候補者は %s',
'Canned Fish': '魚の缶詰',
'Cannot be empty': '必ず入力してください。',
'Cannot delete whilst there are linked records. Please delete linked records first.': 'リンクされたレコードがあるので削除できません。このレコードよりも先に、リンク先のレコードを削除してください。',
'Cannot disable your own account!': '自分自身のアカウントを無効にする事はできません',
'Capacity (Max Persons)': '収容可能数 (最大人数)',
'Capacity (W x D X H)': '収容可能面積 (W x D X H)',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': '被災者の個々のグループについて、情報を取得する (ツアー旅行者、滞在者、家族、など)',
'Capture Information on each disaster victim': '被災者情報を個別に把握する',
'Capturing organizational information of a relief organization and all the projects they have in the region': '個々の支援団体と、地域内で実行中の全てのプロジェクトを取得します',
'Capturing the essential services each Volunteer is providing and where': '各ボランティアの居場所と、提供している主要なサービスを取得する',
'Capturing the projects each organization is providing and where': '各団体の所在地と、提供している主要なサービスを取得します',
'Cardiology': '心臓病学',
'Cash available to restart business': '事業再開に必要な資金調達が可能',
'Cassava': 'キャッサバ',
'Casual Labor': '一般労働',
'Casualties': '犠牲者',
'Catalog Item added': '救援物資カタログにアイテムを追加しました',
'Catalog Item deleted': 'カタログアイテムを削除しました',
'Catalog Item updated': '救援物資カタログを更新しました',
'Catalog Item': '救援物資カタログ',
'Catalog Items': '物資カタログ',
'Catalog Name': 'カタログ名',
'Catalog': 'カタログ',
'Category': 'カテゴリ',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog 間の関係を追加しました',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog 関係を削除しました',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog 間の関係を更新しました',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 間の関係',
'Ceilings, light fixtures': '天井、照明あり',
'Central point to record details on People': '被災者や支援者など、関係者情報の集積を行ないます',
'Certificate Status': '認証状態',
'Certification': '有資格者',
'Change Password': 'パスワードの変更',
'Check for errors in the URL, maybe the address was mistyped.': '入力したURLに間違いがないか確認してください。',
'Check if the URL is pointing to a directory instead of a webpage.': 'URLがウェブページではなくディレクトリを指定しているか、確認してください。',
'Check outbox for the message status': '送信箱を調べてメッセージステータスを確認する',
'Check to delete': '削除項目にチェック',
'Check to delete:': '削除項目にチェック:',
'Check': '確認',
'Check-In': 'チェックイン',
'Check-Out': 'チェックアウト',
'Check-in': 'チェックイン',
'Check-out': 'チェックアウト',
'Checklist created': 'チェックリストを作成しました',
'Checklist deleted': 'チェックリストを削除しました',
'Checklist of Operations': '作業項目チェックリスト',
'Checklist updated': 'チェックリストを更新しました',
'Checklist': 'チェックリスト',
'Chemical Hazard': '化学災害',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '兵器による攻撃、脅威(化学兵器、生物兵器、放射能汚染、核兵器、高威力の爆発)',
'Chicken': 'ニワトリ',
'Child (2-11)': '子供 (2-11歳)',
'Child (< 18 yrs)': '子供 (18歳未満)',
'Child Abduction Emergency': '未成年誘拐警報',
'Child headed households (<18 yrs)': '代表者が未成年 (18歳以下)の世帯数',
'Child': '子供',
'Children (2-5 years)': '子供たち (2-5歳)',
'Children (5-15 years)': '子供たち(5-15歳)',
'Children (< 2 years)': '子供たち (2歳未満)',
'Children in adult prisons': '成人用刑務所に未成年がいる',
'Children in boarding schools': '寄宿制学校の児童がいる',
'Children in homes for disabled children': '障がい児施設にいる子ども',
'Children in juvenile detention': '少年院収容者がいる',
'Children in orphanages': '身寄りの無い人がいる',
'Children living on their own (without adults)': '未成年のみで自活(成人無し)',
'Children not enrolled in new school': '新しい学校に入学していない子供',
'Children orphaned by the disaster': '被災のため孤児になった子供たち',
'Children separated from their parents/caregivers': '親(または親相当の後見人)とはぐれた子供の数',
'Children that have been sent to safe places': '安全な地域へ疎開済みの子供数',
'Children who have disappeared since the disaster': '災害発生後に行方不明の子供たち',
'Children with chronical illnesses': '慢性疾患をもつ子供がいる',
'Chinese (Taiwan)': '中国語 (台湾繁体字)',
'Cholera Treatment Capability': 'コレラ治療対応能力',
'Cholera Treatment Center': 'コレラ治療センター',
'Cholera Treatment': 'コレラの治療',
'Cholera-Treatment-Center': 'コレラ治療センター',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '新規の評価とチームの判定に基づいた新しいポスターを選択してください。建物全体が深刻な状態の場合「危険」を、一部は使える場合「制限あり」です。主要な出入口に「調査済み」プラカードを設置してください。全ての使用可能な出入口には他のプラカードを設置してください。',
'Choose': '選択',
'Choosing Skill and Resources of Volunteers': 'ボランティアのスキルとリソースを選択してください',
'Christian': 'キリスト教徒',
'Church': '教会',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': '行方不明時の状況や、この人物の生存を最後に確認した人物についての情報を記載してください。',
'Civil Emergency': '市民緊急事態',
'Cladding, glazing': '被覆・外壁、ガラス板',
'Clear Selection': '選択をクリア',
'Click on the link %(url)s to reset your password': 'リンクをクリックしてください %(url)s パスワードのリセット',
'Click on the link %(url)s to verify your email': 'リンクをクリックしてください %(url)s 登録されたメールアドレスに間違いが無いことが確認されます',
'Client IP': 'クライアントIP',
'Clinical Laboratory': '臨床検査',
'Clinical Operations': '診療の人員数',
'Clinical Status': '診療状況',
'Close map': '地図を閉じる',
'Closed': '閉鎖中',
'Closure': '閉鎖・通行止め',
'Clothing': '衣服',
'Cluster Details': 'クラスタの詳細',
'Cluster Distance': 'クラスタ距離',
'Cluster Subsector Details': 'クラスタのサブクラスタの詳細',
'Cluster Subsector added': 'クラスタのサブセクタを追加しました',
'Cluster Subsector deleted': 'クラスタのサブセクタを削除しました',
'Cluster Subsector updated': 'クラスタのサブセクタを更新しました',
'Cluster Subsector': 'クラスタのサブクラスタ',
'Cluster Subsectors': 'クラスタのサブセクタ',
'Cluster Threshold': 'クラスタのしきい値',
'Cluster added': 'クラスタを追加しました',
'Cluster deleted': 'クラスタを削除しました',
'Cluster updated': 'クラスタを更新しました',
'Cluster': 'クラスタ',
'Cluster(s)': 'クラスタ',
'Clusters': 'クラスタ',
'Code': 'プロジェクトコード',
'Cold Wave': '寒波',
'Collapse, partial collapse, off foundation': '全壊、一部損壊、off foundation',
'Collective center': '収集センター',
'Color for Underline of Subheadings': 'サブヘッダのアンダーラインの色',
'Color of Buttons when hovering': 'ホバー時のボタンの色',
'Color of bottom of Buttons when not pressed': '押されなかった時のボタンの下部の色',
'Color of bottom of Buttons when pressed': 'ボタン押下時の下部の色',
'Color of dropdown menus': 'ドロップダウンメニューの色',
'Color of selected Input fields': '選択中の入力フィールドの色',
'Color of selected menu items': '選択中のメニューアイテムの色',
'Column Choices (One Per Line': 'カラム選択 (一行に一つ',
'Columns, pilasters, corbels': '円柱、付け柱、コーベル',
'Combined Method': '複数証跡の組み合わせ',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '復旧まで少々お待ちください。あなた以外の閲覧者にも、この表示がされています。',
'Come back later.': '復旧まで少々お待ちください',
'Comments': 'コメント',
'Commercial/Offices': '商業 / オフィス',
'Commit Date': '受け入れ日',
'Commit from %s': '%sからのコミット',
'Commit': 'コミット',
'Commit Status': '支援の引き受け状況',
'Commiting a changed spreadsheet to the database': '変更後のスプレッドシートをデータベースに反映します',
'Commitment Added': 'コミットメントを追加しました',
'Commitment Canceled': 'コミットをキャンセルしました',
'Commitment Details': 'コミットの詳細',
'Commitment Item Details': 'コミットされた救援物資の詳細',
'Commitment Item added': 'コミットの物資を追加しました',
'Commitment Item deleted': 'コミットされた救援物資を削除しました',
'Commitment Item updated': 'コミット物資を更新しました',
'Commitment Item': '物資のコミットメント',
'Commitment Items': 'コミットされた物資',
'Commitment Status': '支援の引き受け状況',
'Commitment Updated': 'コミットを更新しました',
'Commitment': 'コミットメント',
'Commitments': 'コミット',
'Committed By': '受け入れ団体/人',
'Committed': 'コミット済み',
'Committing Inventory': '引き受け中の倉庫',
'Communication problems': 'コミュニケーションの問題',
'Community Centre': 'コミュニティセンター',
'Community Health Center': '地域の医療センター',
'Community Member': 'コミュニティの構成員',
'Complete Unit Label for e.g. meter for m.': '単位を表すラベル。例えばメートルなら m など。',
'Complete': '完了',
'Completed': '完了',
'Complexion': '人種、肌色',
'Compose': 'メッセージ作成',
'Compromised': '易感染状態',
'Concrete frame': 'コンクリートのフレーム',
'Concrete shear wall': 'コンクリートせん断壁',
'Config added': '設定を追加しました',
'Config deleted': '設定を削除しました',
'Config updated': '設定を更新しました',
'Config': '設定',
'Configs': '設定',
'Configurations': '設定',
'Configure Run-time Settings': 'ランタイムの設定',
'Confirm Shipment Received': '配送物の受領を確認',
'Confirmed Incidents': '確認済みのインシデント',
'Confirmed': '確認済み',
'Conflict Details': 'コンフリクトの詳細',
'Conflict Resolution': 'データ競合の解決',
'Consignment Note': '出荷通知',
'Constraints Only': '制約のみ',
'Consumable': '消耗品',
'Contact Data': '連絡先データ',
'Contact Details': '連絡先の詳細',
'Contact Information Added': '連絡先情報を追加しました',
'Contact Information Deleted': '連絡先情報を削除しました',
'Contact Information Updated': '連絡先情報を更新しました',
'Contact Information': '連絡先情報',
'Contact Method': '問い合わせ方法',
'Contact Name': '連絡先名',
'Contact Person': '窓口担当者',
'Contact Phone': '連絡先電話番号',
'Contact details': '連絡先の詳細',
'Contact information added': '連絡先情報を追加しました',
'Contact information deleted': '連絡先情報を削除しました',
'Contact information updated': '連絡先情報を更新しました',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '詳細事項の質問や連絡を行なう際の連絡担当者を記載します(レポート報告者と異なる場合のみ)。電話番号、住所、電子メールなどを記載してください。',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '情報伝達や追加質問を行う際の代表担当者(報告者と異なる場合のみ記載してください)。電話番号や住所、メールアドレスなどを指定できます。',
'Contact us': '問い合わせ',
'Contact': '連絡先',
'Contacts': '連絡先',
'Contents': '内容',
'Contradictory values!': '値が矛盾しています!',
'Contributor': '投稿者',
'Conversion Tool': '変換ツール',
'Cooking NFIs': '調理用器具',
'Cooking Oil': '調理油',
'Coordinate Conversion': '座標変換',
'Coping Activities': '一時対応活動',
'Copy any data from the one to be deleted into the one to keep': '削除する側の候補地から残す方の候補地へ、必要なデータを転載します。',
'Copy': 'コピー',
'Corn': 'とうもろこし',
'Cost Type': '料金種別',
'Cost per Megabyte': '1メガバイト毎に課金',
'Cost per Minute': '1分毎に課金',
'Country of Residence': '居住国',
'Country': '国',
'Create & manage Distribution groups to receive Alerts': 'アラートの送付先グループを作成・管理する',
'Create Activity Report': '支援活動レポートを追加',
'Create Activity Type': '支援活動タイプを追加',
'Create Activity': '支援活動を追加',
'Create Assessment': 'アセスメントを新規追加',
'Create Asset': '資産の追加',
'Create Bed Type': 'ベッドの種類を追加',
'Create Brand': '銘柄を追加',
'Create Budget': '予算を追加',
'Create Catalog Item': '物資カタログを追加',
'Create Catalog': 'カタログを追加',
'Create Checklist': 'チェックリストの作成',
'Create Cholera Treatment Capability Information': 'コレラ治療能力に関する情報の追加',
'Create Cluster Subsector': 'クラスタのサブセクタを追加',
'Create Cluster': 'クラスタを追加',
'Create Contact': '連絡先を追加',
'Create Dead Body Report': '遺体発見レポートを追加',
'Create Feature Layer': 'Feature Layerを追加',
'Create Group Entry': 'グループエントリの作成',
'Create Group': 'グループを追加',
'Create Hospital': '病院を新規追加',
'Create Identification Report': 'IDレポートを追加',
'Create Impact Assessment': '災害影響範囲アセスメントの作成',
'Create Import Job': 'Import Jobの作成',
'Create Incident Report': 'インシデントレポートを追加',
'Create Incident': 'インシデントを追加',
'Create Item Category': '物資カテゴリを追加',
'Create Item Pack': '救援物資パックの追加',
'Create Item': '救援物資を新規追加',
'Create Kit': 'キットを新規追加',
'Create Layer': 'レイヤを追加',
'Create Location': 'ロケーションを追加',
'Create Map Profile': '地図設定を追加',
'Create Marker': 'マーカーを追加',
'Create Member': 'メンバを追加',
'Create Mobile Impact Assessment': '災害影響範囲アセスメントをモバイル端末から作成',
'Create Office': 'オフィスを追加',
'Create Organization': '団体を追加',
'Create Personal Effects': 'Personal Effectsを追加',
'Create Project': 'プロジェクトを追加',
'Create Projection': '地図投影法を追加',
'Create Rapid Assessment': '被災地の現況アセスメントを作成',
'Create Report': 'レポートを新規追加',
'Create Request': '支援要請を作成',
'Create Resource': 'リソースを追加',
'Create River': '河川情報を追加',
'Create Role': '役割を追加',
'Create Sector': '活動分野を追加',
'Create Service Profile': 'サービスプロファイルを追加',
'Create Shelter Service': '避難所における提供サービスを追加',
'Create Shelter Type': '避難所タイプを追加',
'Create Shelter': '避難所を追加',
'Create Skill Type': 'スキルタイプを追加',
'Create Skill': 'スキルを追加',
'Create Status': '状況を追加',
'Create Task': 'タスクを追加',
'Create Theme': 'テーマを追加',
'Create User': 'ユーザを追加',
'Create Volunteer': 'ボランティアの追加',
'Create Warehouse': '倉庫を追加',
'Create a Person': '人物情報を追加',
'Create a group entry in the registry.': '登録にグループエントリを作成。',
'Create, enter, and manage surveys.': '調査の作成、入力、管理を実施',
'Creation of Surveys': '聞き取り調査の新規作成',
'Credential Details': '証明書の詳細',
'Credential added': '証明書を追加しました',
'Credential deleted': '証明書を削除しました',
'Credential updated': '証明書を更新しました',
'Credentials': '証明書',
'Crime': '犯罪',
'Criteria': '基準',
'Currency': '通貨',
'Current Group Members': '現在のグループメンバ',
'Current Identities': '現在のID',
'Current Location': '現在のロケーション',
'Current Log Entries': '現在のログエントリ',
'Current Memberships': '現在のメンバシップ',
'Current Notes': '現在選択中の追加情報',
'Current Registrations': '現在の登録',
'Current Status': '現在の状況',
'Current Team Members': '現在のチームメンバ',
'Current Twitter account': '現在のTwitterアカウント',
'Current community priorities': '現在のコミュニティの優先順位',
'Current general needs': '現在の需要',
'Current greatest needs of vulnerable groups': '現在、被災者が最も必要としている物資/サービス',
'Current health problems': '現在の健康問題',
'Current main income sources': '現在の主な収入源',
'Current major expenses': '現在の主な支出項目',
'Current number of patients': '現在の患者数',
'Current problems, categories': '現在の問題、カテゴリ',
'Current problems, details': '現在の問題の詳細',
'Current request': '現在の要求',
'Current response': '現在の対応状況',
'Current session': '現在のセッション',
'Current type of health problems, adults': '現在発生中の健康問題(成人)',
'Current type of health problems, children': '現在発生中の健康問題(小児)',
'Current type of source for drinking water': '現在の飲料水確保方法',
'Current type of source for sanitary water': '現在の生活用水確保方法',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': 'カストマイズされたデータベースのリソース (例:Sahana 内のリソースとして定義された物)',
'Customisable category of aid': 'カスタマイズ可能な支援カテゴリ',
'DC': '寄付の証明(Donation Certificate)',
'DECISION': '決定',
'DNA Profile': 'DNAプロファイル',
'DNA Profiling': 'DNAプロファイリング',
'DVI Navigator': '被災者の検索',
'Daily': '日次',
'Dam Overflow': 'ダム決壊',
'Damage': '損傷',
'Dangerous Person': '危険人物',
'Dashboard': 'ダッシュボード',
'Data import policy': 'データのインポートポリシー',
'Data uploaded': 'データがアップロードされました',
'Database': 'データベース',
'Date & Time': '日付と時刻',
'Date Avaialble': '日付あり',
'Date Available': '可能な日付',
'Date Received': '物資受領日',
'Date Requested': '要請した日',
'Date Required': '物資が必要になる日',
'Date Sent': '送付日',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': '物資を受領した日時を記録します。デフォルトでは現在の時間が入力されます。変更するには、ドロップダウンリストから選択してください。',
'Date and Time': '日付と時刻',
'Date and time this report relates to.': 'このレポートに関連する日付と時刻',
'Date of Birth': '生年月日',
'Date of Latest Information on Beneficiaries Reached': '恩恵を受ける人にたどり着いた最新の情報の日付',
'Date of Report': 'レポートの日付',
'Date': '日付',
'Date/Time of Find': '日付/発見日時',
'Date/Time of disappearance': '行方不明になった日付/時刻',
'Date/Time': '日付/時刻',
'De-duplicator': '重複解消機能',
'Dead Body Details': '遺体の詳細',
'Dead Body Reports': '遺体情報レポート',
'Dead Body': '遺体の管理',
'Dead body report added': '遺体発見レポートを追加しました',
'Dead body report deleted': '遺体報告を削除しました',
'Dead body report updated': '遺体レポートを更新しました',
'Deaths in the past 24h': '過去24時間の死者',
'Deaths/24hrs': '死亡者数/24h',
'Debug': 'デバッグ',
'Deceased': '死亡',
'Decimal Degrees': '十進角',
'Decomposed': '腐乱',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの縦高。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Height of the map window.': '地図ウィンドウの初期の高さ',
'Default Marker': 'デフォルトマーカー',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': 'マップウィンドウのデフォルトの幅。ウィンドウレイアウトでは、マップはウィンドウ全体に最大化されるので、大きな値を設定する必要はありません。',
'Default Width of the map window.': '地図ウィンドウの幅の初期値',
'Default synchronization policy': 'データ同期ポリシーのデフォルト設定',
'Defaults updated': 'デフォルト値を更新しました',
'Defaults': 'デフォルト値',
'Defecation area for animals': '動物排便用の地域',
'Defines the icon used for display of features on handheld GPS.': 'ハンドヘルドGPSに表示するアイコンを決定します。',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': '対話型地図および KML の出力上で Feature の表示に使用するアイコンを定義します。Feature Class に割り当てられたマーカーを上書きする必要がある場合、個々の場所に割り当てられたマーカーが設定されます。どちらも定義されていない場合は、デフォルトのマーカーが使用されます。',
'Defines the icon used for display of features on interactive map & KML exports.': 'インタラクティブマップとKMLエクスポートで建物などの表示に使われるアイコン定義',
'Defines the marker used for display & the attributes visible in the popup.': 'ポップアップ時と通常時に表示されるマーカーを指定してください。',
'Degrees must be a number between -180 and 180': '度数は -180 から 180 の間にしてください。',
'Dehydration': '脱水症状',
'Delete Aid Request': '援助要請を削除',
'Delete Alternative Item': '代わりの物資を削除する',
'Delete Assessment Summary': 'アセスメントの要約を削除',
'Delete Assessment': 'アセスメントを削除',
'Delete Asset Assignments': '資産割り当ての削除',
'Delete Asset': '資産の削除',
'Delete Baseline Type': '基準値タイプを削除',
'Delete Baseline': '基準値を削除',
'Delete Brand': 'ブランドを削除してください',
'Delete Budget': '予算を削除',
'Delete Bundle': 'Bundleを削除',
'Delete Catalog Item': '救援物資カタログを削除',
'Delete Cluster Subsector': 'クラスタのサブクラスタを削除',
'Delete Cluster': 'クラスタを削除',
'Delete Commitment Item': 'コミットした物資の削除',
'Delete Commitment': 'コミットメントの削除',
'Delete Config': '設定を削除',
'Delete Contact Information': '連絡先情報の削除',
'Delete Credential': '証明書の削除',
'Delete Distribution Item': '配給物資を削除',
'Delete Distribution': '配給所を削除',
'Delete Document': '文書を削除',
'Delete Donor': '資金提供組織を削除',
'Delete Entry': 'エントリを削除',
'Delete Feature Layer': '機能レイヤを削除',
'Delete Group': 'グループを削除',
'Delete Hospital': '病院を削除',
'Delete Image': '画像を削除',
'Delete Impact Type': '影響範囲のタイプを削除',
'Delete Impact': '影響範囲の削除',
'Delete Incident Report': 'インシデントレポートを削除',
'Delete Incident': 'インシデントを削除',
'Delete Inventory Item': '備蓄物資を削除',
'Delete Inventory Store': '物資集積地点を削除',
'Delete Item Category': 'アイテムカテゴリを削除',
'Delete Item Pack': '救援物資パックの削除',
'Delete Item': '救援物資を削除',
'Delete Key': 'Keyを削除',
'Delete Kit': 'Kitを削除',
'Delete Layer': 'レイヤーを削除',
'Delete Level 1 Assessment': 'レベル1アセスメントの削除',
'Delete Level 2 Assessment': 'レベル2アセスメントの削除',
'Delete Location': 'ロケーションを削除',
'Delete Map Profile': '地図設定を削除',
'Delete Marker': 'マーカーを削除',
'Delete Membership': 'メンバシップを削除',
'Delete Message': 'メッセージを削除',
'Delete Metadata': 'メタデータを削除',
'Delete Need Type': '需要タイプを削除',
'Delete Need': '要求を削除',
'Delete Office': 'オフィスを削除',
'Delete Old': '古いものを削除',
'Delete Organization': '団体情報を削除',
'Delete Peer': 'データ同期先の削除',
'Delete Person': '人物情報を削除',
'Delete Photo': '写真を削除',
'Delete Project': 'プロジェクトを削除',
'Delete Projection': '地図投影法を削除',
'Delete Rapid Assessment': '被災地の現況アセスメントを削除',
'Delete Received Item': '受け取った物資の削除',
'Delete Received Shipment': '受け取った輸送の削除',
'Delete Record': 'レコードを削除',
'Delete Recovery Report': '遺体回収レポートを削除',
'Delete Report': 'レポートを削除',
'Delete Request Item': '物資の要請を削除',
'Delete Request': '支援要請を削除',
'Delete Resource': 'リソースを削除',
'Delete Section': 'Sectionを削除',
'Delete Sector': '活動分野を削除',
'Delete Sent Item': '送付物資を削除',
'Delete Sent Shipment': '輸送物資を削除',
'Delete Service Profile': 'サービスプロファイルを削除',
'Delete Setting': '設定を削除',
'Delete Skill Type': 'スキルタイプを削除',
'Delete Skill': 'スキルを削除',
'Delete Staff Type': 'スタッフタイプを削除',
'Delete Status': '状況を削除しました',
'Delete Subscription': '寄付申し込みを削除',
'Delete Survey Answer': '調査回答削除',
'Delete Survey Question': 'Survey Questionを削除',
'Delete Survey Section': '調査項目を削除',
'Delete Survey Series': '一連の調査を削除',
'Delete Survey Template': '調査用テンプレートを削除',
'Delete Unit': '単位を削除',
'Delete User': 'ユーザを削除',
'Delete Volunteer': 'ボランティアを削除',
'Delete Warehouse Item': '倉庫物資の削除',
'Delete Warehouse': '倉庫を削除',
'Delete from Server?': 'サーバから削除しますか?',
'Delete': '削除',
'Delivered': '配信済み',
'Delphi Decision Maker': 'Delphi意思決定',
'Demographic': '人口情報',
'Demonstrations': 'デモ発生',
'Dental Examination': '歯科検査',
'Dental Profile': '歯の欠損/治療跡',
'Department/Unit Name': '所属部課名',
'Deployment': '展開',
'Describe the condition of the roads to your hospital.': '道路状況|病院までの道路状況を記載してください',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'このレコードに関連する手続きを説明してください。(例えば "検診" です。)',
'Description of Bin Type': 'Binタイプを記載してください',
'Description of Contacts': '連絡先の説明',
'Description of defecation area': '排泄用地についての補足説明',
'Description of drinking water source': '飲料水に関する補足説明',
'Description of sanitary water source': '生活用水に関する説明',
'Description of water source before the disaster': '災害発生前の水の確保方法について補足説明',
'Description': '説明',
'Descriptive Text (e.g., Prose, etc)': '説明文 (例: 文学、等)',
'Designated for': '指定済み',
'Desire to remain with family': '家族との残留を希望',
'Destination': '目的地',
'Detail': '詳細',
'Details': '詳細',
'Dialysis': '透析',
'Diaphragms, horizontal bracing': '仕切り板、水平部材',
'Diarrhea among children under 5': '5歳未満の幼児に下痢が蔓延している',
'Diarrhea': '下痢',
'Dignitary Visit': '要人の訪問',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '物資備蓄スペースの容積。ドロップダウンリストから単位を選び、以下の形式にしたがって入力してください。 1 x 2 x 3 , 横幅 x 奥行き x 縦幅。',
'Direction': '方向',
'Disable': '無効',
'Disabled participating in coping activities': '障害者が災害対応に従事',
'Disabled': '無効',
'Disabled?': '無効になっているか?',
'Disaster Victim Identification': '被災者の同定',
'Disaster Victim Registry': '被災者登録',
'Disaster clean-up/repairs': '災害の清掃活動や修復',
'Discharge (cusecs)': '流水量 (cusecs)',
'Discharges/24hrs': '退院者数/24h',
'Discussion Forum on item': 'フォーラム(物資について)',
'Discussion Forum': 'フォーラム',
'Disease vectors': '病原媒介者',
'Dispatch Items': 'アイテムの発送',
'Dispatch': '発送',
'Dispensary': '診療所',
'Displaced Populations': '避難者数',
'Displaced': '避難中',
'Display Polygons?': '多角形を表示しますか?',
'Display Routes?': 'ルートを表示しますか?',
'Display Tracks?': 'Tracksを表示しますか?',
'Display Waypoints?': 'ウェイポイントを表示しますか?',
'Dispose Expired/Unusable Items': '期限切れ / 使用できない物資の処分',
'Dispose': '処分',
'Distance between defecation area and water source': '水資源採取場所と排泄場所の間の距離',
'Distance between latrines and temporary shelter in meters': 'トイレと避難所の距離(m)',
'Distance between shelter and latrines': '簡易避難所と排泄場所との間の距離(メートル)',
'Distance(Kms)': '距離(Kms)',
'Distribution Details': '配給所の詳細',
'Distribution Item Details': '配給物資の詳細',
'Distribution Item added': '配給物資を追加しました',
'Distribution Item deleted': '配給物資を削除しました',
'Distribution Item updated': '配給物資を更新しました',
'Distribution Item': '配給物資',
'Distribution Items': '配給物資',
'Distribution added': '配給所を追加しました',
'Distribution deleted': '配給所を削除しました',
'Distribution groups': '配信グループ',
'Distribution updated': '配給所を更新しました',
'Distribution': '配給所',
'Distributions': '配給所',
'District': '地区(行政地区)',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の青年は、災害に対応するための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': '1つの世帯ごとに、少なくとも2つ以上の水貯蔵容器(10-20リットル/容器)があるかどうかを記載してください',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': '調理や食事に必要となる道具や器材(コンロ、ポット、皿やプレート、マグカップ、飲料容器など)が世帯に存在するかを記載します',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': 'ベッド、あるいはベッド用部材(例:タープ、プラスチックマット、毛布)が世帯に存在するかを記載します',
'Do households have household water storage containers?': '水貯蔵容器が世帯に存在するかを記載します',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '地域にいるマイノリティ(社会的少数者)の人が、自助的な災害対処につながる活動に参加しているか記載してください。(例 打ち合わせ、宗教活動、地域の清掃ボランティアなど)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '災害復旧活動に従事している高齢者が、共同体の中にいるかどうかを記載してください(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': '個人に対して、少なくとも2セット以上の衣服(シャツ、ズボン/腰巻、下着など)があるかどうか記載してください',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': '十分な量のサニタリ / 衛生用品が、安定して供給されているかどうかを記載します(石鹸、シャンプー、歯ブラシ、洗濯用洗剤など)',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域で障害者と一緒にいる方は、災害に対処るための彼らの支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do women and girls have easy access to sanitary materials?': '女性用生理用品の入手が容易かどうかを記載してください',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': 'あなたの地域の女性は、災害対応のための支援活動に参加しますか?(例: 打ち合わせ、宗教活動、清掃活動ボランティアなど)',
'Do you have access to cash to restart your business?': 'ビジネス再開に必要な現金が入手可能かどうかを記載してください',
'Do you know of any incidents of violence?': '暴力事件が発生したかどうかを記載してください',
'Do you know of children living on their own (without adults)?': '成人がおらず、未成年のみで生活しているグループがあるかどうかを記載してください',
'Do you know of children separated from their parents or caregivers?': '親や養育者とはぐれた未成年がいるかどうかを記載してください',
'Do you know of children that have been orphaned by the disaster?': '災害によって孤児となった未成年がいるかどうかを記載してください',
'Do you know of children that have been sent to safe places?': '安全な場所に疎開した未成年がいるかどうかを記載してください',
'Do you know of children that have disappeared without explanation in the period since the disaster?': '災害発生後、行き先の説明ないまま連絡が取れなくなった未成年がいるかどうかを記載してください',
'Do you know of older people who are primary caregivers of children?': '未成年に対する介護経験がある高齢者がいるかどうかを記載してください',
'Do you know of parents/caregivers missing children?': '子供と連絡が取れなくなった親や養育者がいるかどうかを記載してください',
'Do you really want to delete these records?': '本当にこれらのデータを削除しますか?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'この輸送の受領をキャンセルしますか?キャンセルするとこの物資は備蓄から削除されます。この操作は *取り消せません!*',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '出荷された物資をキャンセルしますか?この物資は、在庫に返されます。このアクションは、元に戻せません。',
'Do you want to over-write the file metadata with new default values?': 'ファイルのメタデータを、新しいデフォルト値で上書きしますか?',
'Do you want to receive this shipment?': 'この輸送物資を受け取られますか?',
'Do you want to send these Committed items?': 'これらコミットされた物資を送付してよいですか?',
'Do you want to send this shipment?': 'この発送情報を送信しますか?',
'Document Details': '文書の詳細',
'Document Scan': '文書のスキャン',
'Document added': '文書を追加しました',
'Document deleted': '文書を削除しました',
'Document updated': '文書を更新しました',
'Document': '文書',
'Documents and Photos': '文書と写真',
'Documents': '文書',
'Does this facility provide a cholera treatment center?': 'コレラ治療センターの機能を提供可能かどうか',
'Doing nothing (no structured activity)': '活動なし(組織立った行動なし)',
'Dollars': 'ドル',
'Domain': 'ドメイン',
'Domestic chores': '家事手伝い',
'Donation Certificate': '寄付証明書',
'Donation Phone #': '寄付受付電話番号',
'Donor Details': '資金提供組織の詳細',
'Donor added': '資金提供組織を追加しました',
'Donor deleted': '資金提供組織を削除しました',
'Donor updated': '資金提供組織を更新しました',
'Donor': '資金提供組織',
'Donors Report': '資金提供レポート',
'Donors': '資金提供組織',
'Door frame': 'ドア枠',
'Download PDF': 'PDFをダウンロード',
'Draft Features': '草案(ドラフト)',
'Draft': 'ドラフト',
'Drainage': '排水',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'ロケーションに対する、スタッフと備品の予算を作成します。',
'Drill Down by Group': 'グループで絞り込み',
'Drill Down by Incident': 'インシデントで絞り込み',
'Drill Down by Shelter': '避難所で絞り込み',
'Driving License': '運転免許',
'Drought': '干ばつ',
'Drugs': '医薬品',
'Dug Well': '丸井戸',
'Duplicate?': '重複?',
'Duration': '活動実施期間',
'Dust Storm': '粉塵嵐',
'Dwelling': '居住施設',
'Dwellings': '住居数',
'EMS Reason': '緊急医療受け入れ状態',
'EMS Status Reason': '救急医療状況の理由',
'EMS Status': 'EMSステータス',
'EMS Traffic Status': '救急医療の混雑状況',
'ER Status Reason': 'ER医療状況の理由',
'ER Status': 'ER ステータス',
'Early Recovery': '早期復旧',
'Earthquake': '地震',
'Easy access to sanitation items for women/girls': '女性用サニタリ用品の入手が容易である',
'Edit Activity': '支援活動を編集',
'Edit Address': '住所の編集',
'Edit Aid Request': '援助要請を編集',
'Edit Alternative Item': '代わりの物資を編集',
'Edit Application': 'アプリケーションの編集',
'Edit Assessment Summary': 'アセスメントの要約を編集',
'Edit Assessment': 'アセスメントを編集',
'Edit Asset Assignment': '資産割り当ての編集',
'Edit Asset': '資産を編集',
'Edit Baseline Type': '基準値のタイプを編集',
'Edit Baseline': 'Baselineの編集',
'Edit Brand': '銘柄の編集',
'Edit Budget': '予算の編集',
'Edit Bundle': 'Bundleの編集',
'Edit Catalog Item': '救援物資カタログの編集',
'Edit Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係の編集',
'Edit Cluster Subsector': 'クラスタのサブセクターの編集',
'Edit Cluster': 'クラスタを編集',
'Edit Commitment Item': 'コミットされた物資の検索',
'Edit Commitment': 'コミットを編集',
'Edit Config': '設定の編集',
'Edit Contact Information': '連絡先情報の編集',
'Edit Contact': '連絡先の編集',
'Edit Contents': '内容の編集',
'Edit Credential': '証明書の編集',
'Edit Dead Body Details': '遺体の詳細を編集',
'Edit Defaults': 'デフォルト値の編集',
'Edit Description': '説明の編集',
'Edit Details': '詳細の編集',
'Edit Disaster Victims': '被災者情報の編集',
'Edit Distribution Item': '配給物資の編集',
'Edit Distribution': '配給所の編集',
'Edit Document': '文書を編集',
'Edit Donor': '資金提供組織の編集',
'Edit Email Settings': '電子メール設定の編集',
'Edit Feature Layer': 'Feature Layerの編集',
'Edit Flood Report': '洪水レポートの編集',
'Edit Gateway Settings': 'ゲートウェイ設定の編集',
'Edit Group': 'グループの編集',
'Edit Hospital': '病院の編集',
'Edit Identification Report': 'IDレポートの編集',
'Edit Identity': 'IDの編集',
'Edit Image Details': '画像の詳細の編集',
'Edit Image': '画像の編集',
'Edit Impact Type': '災害影響のタイプを編集',
'Edit Impact': '被災影響の編集',
'Edit Incident Report': 'インシデントレポートの編集',
'Edit Incident': 'インシデントを編集',
'Edit Inventory Item': '備蓄物資の編集',
'Edit Inventory Store': '物資集積地点の編集',
'Edit Item Catalog Categories': '救援物資カタログのカテゴリを編集',
'Edit Item Catalog': '救援物資カタログの編集',
'Edit Item Category': '救援物資カテゴリの編集',
'Edit Item Pack': '物資パックを編集',
'Edit Item Sub-Categories': '救援物資サブカテゴリの編集',
'Edit Item': '物資の編集',
'Edit Key': 'Keyの編集',
'Edit Kit': 'Kitの編集',
'Edit Layer': 'レイヤの編集',
'Edit Level 1 Assessment': 'レベル1アセスメントを編集する',
'Edit Level 2 Assessment': 'レベル2アセスメントを編集',
'Edit Location': 'ロケーションの編集',
'Edit Log Entry': 'ログエントリの編集',
'Edit Map Profile': '地図設定を編集する',
'Edit Map Services': '地図サービスの編集',
'Edit Marker': 'マーカーの編集',
'Edit Membership': 'メンバシップの編集',
'Edit Message': 'メッセージの編集',
'Edit Messaging Settings': 'メッセージ設定の編集',
'Edit Metadata': 'メタデータの編集',
'Edit Modem Settings': 'モデム設定の編集',
'Edit Need Type': '需要タイプの編集',
'Edit Need': 'ニーズを編集',
'Edit Note': '追加情報を編集',
'Edit Office': 'オフィスの編集',
'Edit Options': 'オプション編集',
'Edit Organization': '団体の編集',
'Edit Parameters': 'パラメータの編集',
'Edit Peer Details': 'データ同期先の詳細を編集',
'Edit Peer': 'データ同期先の編集',
'Edit Person Details': '人物情報の詳細を編集',
'Edit Personal Effects Details': 'Personal Effectsの詳細の編集',
'Edit Photo': '写真の編集',
'Edit Pledge': '寄付の編集',
'Edit Position': '場所の編集',
'Edit Problem': '問題の編集',
'Edit Project': 'プロジェクトの編集',
'Edit Projection': '地図投影法の編集',
'Edit Rapid Assessment': '被災地の現況アセスメントの編集',
'Edit Received Item': '物資の受領を編集',
'Edit Received Shipment': '物資の輸送の受領報告を編集',
'Edit Record': 'レコードの編集',
'Edit Recovery Details': '遺体回収の詳細を編集',
'Edit Registration Details': '登録状況の詳細を編集',
'Edit Registration': '登録の編集',
'Edit Report': 'レポートの編集',
'Edit Request Item': '物資の要請を編集',
'Edit Request': '支援要請の編集',
'Edit Resource': 'リソースの編集',
'Edit Response': '返信を編集',
'Edit River': '河川の編集',
'Edit Role': '役割の編集',
'Edit Sector': '活動分野を編集',
'Edit Sent Item': '送付した物資の編集',
'Edit Setting': '設定の編集',
'Edit Settings': '設定の編集',
'Edit Shelter Service': '避難所提供サービスの編集',
'Edit Shelter Type': '避難所タイプの編集',
'Edit Shelter': '避難所の編集',
'Edit Shipment Transit Log': '輸送履歴の編集',
'Edit Shipment to Send': '送付する輸送を編集',
'Edit Shipment/Way Bills': '輸送費/移動費の編集',
'Edit Shipment<>Item Relation': '輸送<>物資の関係を編集',
'Edit Site': 'Siteを編集',
'Edit Skill Type': 'スキルタイプの編集',
'Edit Skill': 'スキルの編集',
'Edit Solution': '解決案の編集',
'Edit Staff Type': 'スタッフタイプの編集',
'Edit Staff': 'スタッフの編集',
'Edit Storage Bin Type(s)': 'Storage Binタイプを編集',
'Edit Storage Bins': 'Storage Binの編集',
'Edit Storage Location': '備蓄地点の編集',
'Edit Subscription': '寄付申し込みの編集',
'Edit Survey Answer': '調査回答の編集',
'Edit Survey Question': '調査の質問項目を編集',
'Edit Survey Section': 'フィードバック内容を編集します',
'Edit Survey Series': '一連の調査の編集',
'Edit Survey Template': '調査テンプレートを編集',
'Edit Task': 'タスクの編集',
'Edit Team': 'チームの編集',
'Edit Theme': 'テーマの編集',
'Edit Themes': 'テーマの編集',
'Edit Ticket': 'チケットの編集',
'Edit Track': '追跡情報の編集',
'Edit Tropo Settings': 'Tropo 設定の編集',
'Edit Unit': '単位の編集',
'Edit User': 'ユーザの編集',
'Edit Volunteer Details': 'ボランティアの詳細を編集する',
'Edit Volunteer Registration': 'ボランティア登録の編集',
'Edit Warehouse Item': '倉庫物資を編集',
'Edit Warehouse': '倉庫を編集',
'Edit current record': '現在のレコードの編集',
'Edit message': 'メッセージの編集',
'Edit the Application': 'アプリケーションの編集',
'Edit': '編集',
'Editable?': '編集可能?',
'Education materials received': '教育資材を受領した',
'Education materials, source': '教育資材の送付元',
'Education': '教育',
'Effects Inventory': '備蓄物資への影響',
'Eggs': '卵',
'Either a shelter or a location must be specified': '避難所かロケーションのどちらかを特定する必要があります',
'Either file upload or document URL required.': 'ファイルのアップロードと文書のURLの両方が必要です。',
'Either file upload or image URL required.': 'アップロードするファイルか、URLを指定してください。',
'Elderly person headed households (>60 yrs)': '代表者が60歳以上の世帯数',
'Electrical': '電動の',
'Electrical, gas, sewerage, water, hazmats': '電気、ガス、下水道、水、有害物',
'Elevated': '高まる',
'Elevators': 'エレベーター',
'Email Address': 'メールアドレス',
'Email Settings': '電子メール設定',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': '電子メールの認証は完了しましたが、登録はまだ完了していません。確認が完了するまで少々お待ちください。',
'Email settings updated': '電子メールの設定を更新しました',
'Email verification': '利用者登録の確認',
'Email': '電子メール',
'Embalming': '遺体防腐処理',
'Embassy': '大使館',
'Emergency Capacity Building project': 'ECB (緊急時の被災者収容建築プロジェクト)',
'Emergency Department': '救急部門',
'Emergency Shelter': '緊急避難所',
'Emergency Support Facility': '緊急支援施設',
'Emergency Support Service': '緊急支援サービス',
'Emergency Telecommunications': '緊急時電話連絡先',
'Enable/Disable Layers': 'レイヤの有効化/無効化',
'Enabled': '有効',
'End date should be after start date': '終了日付は開始日付より後にしてください',
'End date': '終了日',
'End of Period': '終了期間',
'English': 'English 英語',
'Enter Coordinates': '緯度経度を入力',
'Enter Coordinates:': '座標入力:',
'Enter a GPS Coord': 'GPS Coordを入力',
'Enter a GPS Coordinate': 'GPS座標を入力してください',
'Enter a date before': '以前の日時を入力',
'Enter a few characters of the name to select an existing Location or else simply type the name of the new Location.': '最初の数文字を入力して既存の項目から選ぶか、あるいは新しいロケーション名を入力して、ロケーションを特定してください。',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'アップロードするスプレッドシートの名前を入力してください。(必須項目)',
'Enter a new support request.': '新規の支援要請を登録',
'Enter a summary of the request here.': '要求事項の概要を入力',
'Enter a unique label!': 'そのラベル名は使われています。一意のラベル名を入力してください。',
'Enter a valid date before': 'より前の正しい日付を入力してください',
'Enter a valid email': '正しいメールアドレスを入力してください',
'Enter a valid future date': '正しい未来の日付を入力してください',
'Enter some characters to bring up a list of possible matches': '文字を入力することで、候補の一覧が表示されます',
'Enter some characters to bring up a list of possible matches.': '検索文字列を入力してください',
'Enter tags separated by commas.': 'タグはカンマで区切って入力してください。',
'Enter the same password as above': '確認のため、パスワードを再入力',
'Enter your firstname': 'あなたの名前を入力',
'Entered': '入力された',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '電話番号の入力は任意です。入力すると、SMS メッセージの受け取り登録ができます。',
'Entering an Organization is optional, but doing so directs you to the appropriate approver & means you automatically get the appropriate permissions.': '選択リストに含まれる団体のメンバーであれば、所属する団体を選択してください。(団体の選択は必須ではありません)',
'Entry deleted': 'エントリを削除しました',
'Environment': '環境',
'Equipment': '備品',
'Error encountered while applying the theme.': 'テーマ適用時にエラーが発生しました。',
'Error in message': 'エラーメッセージ',
"Error logs for '%(app)s'": '"%(app)s" に関するエラーログ',
'Errors': 'エラー',
'Estimated # of households who are affected by the emergency': '非常事態の影響を受けた世帯の推定数',
'Estimated # of people who are affected by the emergency': '非常事態の影響を受けた住民の推定数',
'Estimated Overall Building Damage': '建物全体の被害見積り',
'Estimated total number of people in institutions': 'なんらかの施設に収容されている住民の推定数',
'Euros': 'ユーロ',
'Evacuating': '退避中',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'このメッセージの情報を評価します。(この値は、公開される警告アプリケーションで使用してはなりません)',
'Event Time': 'イベント発生時刻',
'Event Type': 'イベントタイプ',
'Event type': 'イベントタイプ',
'Example': '例',
'Exceeded': '超過',
'Exclude contents': 'コンテンツを除く',
'Excreta disposal': 'し尿処理',
'Execute a pre-planned activity identified in <instruction>': '事前に準備していた計画 <instruction>を実行する',
'Existing Placard Type': '設置されたポスターのタイプ',
'Existing food stocks': '食糧備蓄あり',
'Existing food stocks, main dishes': '備蓄中の食料(主皿)',
'Existing food stocks, side dishes': '備蓄中の食料(副皿)',
'Exits': '出口',
'Expected In': '予定期間',
'Expected Out': '予期される出力',
'Experience': '熟練者',
'Expiry Date': '有効期限',
'Expiry Time': '有効期限',
'Expiry_Date': '有効期限',
'Explosive Hazard': '爆発災害',
'Export Data': 'データのエクスポート',
'Export Database as CSV': 'データベースをCSV形式でエクスポート',
'Export in GPX format': 'GPXフォーマットでエクスポート',
'Export in KML format': 'KMLフォーマットでエクスポート',
'Export in OSM format': 'OSMフォーマットでエクスポート',
'Export in PDF format': 'PDFフォーマットでエクスポート',
'Export in RSS format': 'RSSフォーマットでエクスポート',
'Export in XLS format': 'XLSフォーマットでエクスポート',
'Export': 'エクスポート',
'Exterior Only': '外装のみ',
'Exterior and Interior': '外装と内装',
'External Features': '外部機能',
'Eye Color': '目の色',
'Facial hair, color': 'ヒゲ, 色',
'Facial hair, type': 'ヒゲ, 形状',
'Facial hear, length': 'ヒゲ, 長さ',
'Facility Operations': '施設の運用',
'Facility Status': '施設の状態',
'Facility Type': '施設タイプ',
'Factors affecting school attendance': '生徒の就学に影響する要因',
'Failed to send mail to Approver - see if you can notify them manually!': '承認依頼メールを送信できませんでした。利用者登録は完了していません。サイト管理者へ連絡してください。',
'Failed!': '失敗しました!',
'Falling Object Hazard': '落下/墜落による災害',
'Families/HH': '家族/世帯',
'Family tarpaulins received': 'タープ(家族用簡易テント)を受領した',
'Family tarpaulins, source': 'タープ(家族用簡易テント)の送付元',
'Family': '家族',
'Family/friends': '家族/友人',
'Farmland/fishing material assistance, Rank': '農業 / 漁業用物資の補助、ランク',
'Fatalities': '死亡者',
'Fax': 'ファックス',
'Feature Layer Details': '機能レイヤの詳細',
'Feature Layer added': '機能レイヤを追加しました',
'Feature Layer deleted': '機能レイヤを削除しました',
'Feature Layer updated': '機能レイヤを更新しました',
'Feature Layers': '機能レイヤ',
'Feature Namespace': 'Feature 名前空間',
'Feature Request': '機能の要求',
'Feature Type': 'Feature タイプ',
'Feature': '機能',
'Features Include': '含まれる機能',
'Female headed households': '代表者が女性の世帯数',
'Female': '女性',
'Few': '少数',
'Field Hospital': '野外病院',
'File': 'ファイル',
'Fill in Latitude': '緯度を記入',
'Fill in Longitude': '経度を記入',
'Fill out Rapid Evaluation Forms': '迅速評価フォームに記入します',
'Fill out detailed Evaluation Forms': '詳細な評価フォームに入力する',
'Filter Field': 'フィールドをフィルタする',
'Filter Value': '値をフィルタ',
'Filter': 'フィルタ',
'Filtered search of aid pledges and requests': '援助申出と要請の検索されたもの',
'Find All Matches': '完全一致',
'Find Dead Body Report': '遺体レポートの発見',
'Find Hospital': '病院を探す',
'Find Person Record': '人物情報を検索',
'Find Recovery Report': '遺体発見レポート',
'Find Volunteers': 'ボランティアを探す',
'Find a Person Record': '人物情報を検索する',
'Find by Name': '名前で検索',
'Find': '検索',
'Finder': '発見者',
'Fingerprint': '指紋',
'Fingerprinting': '指紋',
'Fingerprints': '指紋',
'Finish': '完了',
'Finished Jobs': '完了したジョブ',
'Fire suppression and rescue': '消火・救出活動',
'Fire': '火災',
'First Name': '苗字',
'First name': '苗字',
'Fishing': '漁業',
'Flash Flood': '鉄砲水',
'Flash Freeze': '瞬間凍結',
'Fleet Management': '船舶の管理',
'Flexible Impact Assessments': '災害影響範囲アセスメント',
'Flood Alerts show water levels in various parts of the country': '洪水警報では、国内各所の水位情報を確認することができます。',
'Flood Alerts': '洪水警報',
'Flood Report Details': '洪水レポートの詳細',
'Flood Report added': '洪水レポートを追加しました',
'Flood Report deleted': '洪水レポートを削除しました',
'Flood Report updated': '洪水レポートを更新しました',
'Flood Report': '洪水レポート',
'Flood Reports': '洪水レポート',
'Flood': '洪水',
'Flow Status': '流れの状況',
'Focal Point': '代表者',
'Fog': '濃霧',
'Food Supply': '食料の供給',
'Food assistance available/expected': '食糧援助が利用可能 / 期待できる',
'Food assistance': '食糧援助',
'Food': '食料',
'Footer file %s missing!': 'フッターファイル%sが見つかりません。',
'Footer': 'フッタ',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': 'Eden の場合はベースURL(例えば http://sync.sahanfoundation.org/eden)、他のシステムの場合は同期インターフェースのURL。',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'POP-3では通常110 (SSLでは995)で、IMAPでは通常143 (IMAPSでは993)。',
'For Warehouse': '倉庫向け',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '国の場合は ISO2 コード、町の場合は 空港コード(Airport Locode)',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'それぞれの同期パートナーについて、指定した間隔で実行する同期ジョブがデフォルトで存在します。必要に応じて、さらなる同期ジョブを設定し、カスタマイズすることができます。開始するには、リンクをクリックしてください。',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'セキュリティ向上のため、ユーザー名とパスワードを入力し、団体の他端末の管理者にユーザー名とパスワードを通知して「データ同期」 -> 「データ同期パートナー」であなたのUUIDに追加してもらうことを推奨します。',
'For live help from the Sahana community on using this application, go to': 'Sahanaの使い方について Sahanaコミュニティからライブヘルプを希望する際は、以下に進んでください。',
'For messages that support alert network internal functions': '警戒(alert)ネットワークの内部機能をサポートするメッセージの場合',
'For more details on the Sahana Eden system, see the': 'Sahana Edenに関する詳細は、以下をごらんください。',
'For more information, see ': '詳細は、以下を参照してください。',
'For other types, the next screen will allow you to enter the relevant details...': 'その他の種類については、次の画面で関連する詳細情報を入力できます…',
'For': ' ',
'For:': '対象:',
'Forest Fire': '森林火災',
'Formal camp': '指定避難所',
'Format': 'フォーマット',
'Forms': 'フォーム',
'Found': '発見された',
'Foundations': '構造基礎',
'Freezing Drizzle': '凍結霧雨',
'Freezing Rain': 'みぞれ',
'Freezing Spray': '冷却スプレー',
'French': 'フランス語',
'Friday': '金曜日',
'From Inventory': '送付元',
'From Location': '送付元ロケーション',
'From Organization': '送付元団体',
'From Person': '送付元の担当者',
'From Warehouse': '倉庫から',
'From': '輸送元',
'Frost': '凍結',
'Fulfil. Status': '確保量は十分か',
'Fulfillment Status': '充足状況',
'Full beard': 'もみあげまでのアゴヒゲ、口髭あり',
'Full': '満員',
'Fullscreen Map': 'フルスクリーン表示',
'Function Permissions': '機能に対する権限',
'Function': '機能',
'Functional Tests': '機能テスト',
'Functions available': '利用可能な機能',
'Funding Organization': '資金提供団体',
'Funeral': '葬儀',
'Further Action Recommended': '更なる対応が推奨されている',
'GIS Reports of Shelter': '避難所のGISレポート',
'GIS integration to view location details of the Shelter': '避難所のロケーション詳細を閲覧するGISインテグレーション',
'GPS Marker': 'GPSマーカー',
'GPS Track File': 'GPS Track ファイル',
'GPS Track': 'GPS トラック',
'GPX Layers': 'GPX レイヤ',
'GPX Track': 'GPX形式の追跡情報',
'GRN Status': 'GRNステータス',
'Gale Wind': '強風',
'Gantt Chart': 'ガントチャート',
'Gap Analysis Map': 'ギャップ解析マップ',
'Gap Analysis Report': 'ギャップ解析報告',
'Gap Analysis': 'ギャップ解析',
'Gap Map': '需給ギャップマップ',
'Gap Report': '需給ギャップの報告',
'Gateway Settings': 'ゲートウェイ設定',
'Gateway settings updated': 'ゲートウェイ設定を更新しました',
'Gender': '性別',
'General Comment': '包括コメント',
'General Medical/Surgical': '一般医学/外科',
'General emergency and public safety': '一般的緊急事態と公共の安全',
'General information on demographics': '人口統計の情報',
'Generator': '発電機',
'Geocoder Selection': 'Geocoder 選択',
'Geometry Name': 'Geometry名',
'Geonames.org search requires Internet connectivity!': 'Geonames.org の検索を行うには、インターネットに接続している必要があります。',
'Geophysical (inc. landslide)': '地球物理 (地滑りを含む)',
'Geotechnical Hazards': '地盤災害',
'Geotechnical': '地質工学',
'Geraldo module not available within the running Python - this needs installing for PDF output!': '実行中のPythonでGeraldoモジュールが利用できません。PDF出力に必要です。',
'Geraldo not installed': 'Geraldoがインストールされていません',
'Get incoming recovery requests as RSS feed': '遺体回収要請をRSSフィードとして取得する',
'Girls 13-18 yrs in affected area': '影響地域内の13-18歳の女子数',
'Girls 13-18 yrs not attending school': '学校に来ていなかった13-18歳の女子数',
'Girls 6-12 yrs in affected area': '影響地域内の6-12歳の女子数',
'Girls 6-12 yrs not attending school': '学校に来ていなかった6-12歳の女子数',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '画像に関する説明。特に、写真のどの箇所に何が確認できるかを記載します (オプション)',
'Give information about where and when you have seen the person': '人物を見かけた場所や時間の情報を提供してください',
'Give information about where and when you have seen them': 'どこで、いつ、彼らを見かけたのか、情報をください',
'Global Messaging Settings': 'メッセージの全般設定',
'Glossary': '用語集',
'Go to Request': '支援要請に行く',
'Goatee': 'やぎヒゲ',
'Goods Received Note': '受諾した物資の注釈',
'Government UID': '政府UID',
'Government building': '政府所管の建物',
'Government': '政府・行政機関',
'Grade': '学年',
'Greek': 'ギリシャ語',
'Green': '緑',
'Ground movement, fissures': '地盤移動、亀裂',
'Ground movement, settlement, slips': '地盤移動、沈下、がけ崩れ',
'Group %(group_id)s created': 'グループ %(group_id)s を作成しました',
'Group Description': 'グループの説明',
'Group Details': 'グループの詳細',
'Group ID': 'グループID',
'Group Member added': 'グループメンバを追加しました',
'Group Members': 'グループメンバ',
'Group Memberships': 'グループメンバシップ',
'Group Name': 'グループ名',
'Group Title': 'グループのタイトル',
'Group Type': 'グループのタイプ',
'Group added': 'グループを追加しました',
'Group deleted': 'グループを削除しました',
'Group description': 'グループの説明',
'Group name': 'グループ名',
'Group type': 'グループタイプ',
'Group updated': 'グループを更新しました',
'Group': 'グループ',
'Groups removed': 'グループを削除しました',
'Groups': 'グループ',
'Guest': 'ゲスト',
'HR Data': '人的資源の情報',
'HR Manager': '人的資源マネージャー',
'Hail': 'あられ',
'Hair Color': '頭髪の色',
'Hair Length': '頭髪の長さ',
'Hair Style': 'ヘアスタイル',
'Has additional rights to modify records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを変更するための権限を追加します',
'Has data from this Reference Document been entered into Sahana?': 'リファレンス文書の内容が Sahanaに登録してあるかどうかを記載してください。',
'Has only read-only access to records relating to this Organization or Site.': 'この団体やサイトに関連するレコードを閲覧のみに制限します',
'Has the safety and security of women and children in your community changed since the emergency?': '緊急事態以来、女性や未成年の生活の危険度が変化したかどうかを記載してください',
'Has your business been damaged in the course of the disaster?': '災害の過程で、ビジネス上の損害を受けているかどうかを記載してください',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': '世帯に対して避難所用品や生活必需品が配布されている、あるいは数日以内に配布を実施できるかを記載してください',
'Have normal food sources been disrupted?': '平常時の食料調達源が利用不可能になったかどうかを記載してください',
'Have schools received or are expecting to receive any assistance?': '学校に対してなんらかの支援が行われた、あるいは行われる予定であるかどうかを記載してください',
'Have the people received or are you expecting any medical or food assistance in the coming days?': '医療品や食糧支援を、被災者、あるいはあなたが受領したかどうか、あるいは数日以内に受領できそうかどうかを記載してください。',
'Hazard Pay': '災害補償金',
'Hazardous Material': '危険物',
'Hazardous Road Conditions': '災害発生後の道路状況',
'Header Background': 'ヘッダー背景',
'Header background file %s missing!': 'ヘッダー背景ファイル%sが存在しません。',
'Headquarters': '本部・本社',
'Health care assistance, Rank': '医療 / 介護支援、ランク',
'Health center with beds': '保健所(ベッドあり)',
'Health center without beds': '保健所(ベッドなし)',
'Health center': '保健所',
'Health services functioning prior to disaster': '災害発生以前 ヘルスサービスの提供',
'Health services functioning since disaster': '災害発生後 ヘルスサービスの提供',
'Health services status': '医療サービス状況',
'Health': '保険・介護',
'Healthcare Worker': 'ヘルスケア要員',
'Heat Wave': '熱波',
'Heat and Humidity': '熱と湿度',
'Height (cm)': '身長 (cm)',
'Height': '身長',
'Help': ' ヘルプ ',
'Helps to monitor status of hospitals': '病院の現状把握に役立つ情報を管理します',
'Helps to report and search for Missing Persons': '行方不明者の報告と検索を支援します。',
'Here are the solution items related to the problem.': '問題に関連する解決案です。',
'Heritage Listed': '遺産登録',
'Hide Details': '詳細を隠す',
'Hierarchy Level 0 Name (e.g. Country)': '階層レベル0の名前(例: 国)',
'Hierarchy Level 1 Name (e.g. Province)': '階層レベル1の名前 (例: 都道府県)',
'Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Hierarchy Level 3 Name': '階層レベル3の名前',
'Hierarchy Level 4 Name': '階層レベル4の名前',
'High Water': '最高水位',
'High': '高',
'Hindu': 'ヒンズー教徒',
'History': '履歴',
'Hit the back button on your browser to try again.': 'ブラウザの「戻る」ボタンを押して、やり直してください。',
'Holiday Address': '休日の住所',
'Home Address': '自宅住所',
'Home Country': '所属国',
'Home Crime': '住居犯罪',
'Home': 'ホーム',
'Hospital Details': '病院の詳細',
'Hospital Status Report': '病院ステータスレポート',
'Hospital information added': '病院情報を追加しました',
'Hospital information deleted': '病院情報を削除しました',
'Hospital information updated': '病院情報を更新しました',
'Hospital status assessment.': '病院ステータスアセスメント',
'Hospital': '病院',
'Hospitals': '病院情報',
'Hot Spot': 'ホットスポット',
'Hour': '時間',
'Hourly': '1時間毎',
'Household kits received': '家事用品を受領しました',
'Household kits, source': '家事用品の送付元',
'How did boys 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の男子がよく集まっていた場所と活動は?',
'How did boys <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の男子がよく集まっていた場所と活動は?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': '災害発生前、13-17歳の女子がよく集まっていた場所と活動は?',
'How did girls <12yrs spend most of their time prior to the disaster?': '災害発生前、12歳以下の女子がよく集まっていた場所と活動は?',
'How do boys 13-17yrs spend most of their time now?': '現在、13-17歳の男子は普段何をして過ごしていますか?',
'How do boys <12yrs spend most of their time now?': '現在、12歳以下の男子は普段何をして過ごしていますか?',
'How do girls 13-17yrs spend most of their time now?': '現在、13-17歳の女子は普段何をして過ごしていますか?',
'How do girls <12yrs spend most of their time now?': '現在、12歳以下の女子は普段何をして過ごしていますか?',
'How does it work?': 'どのように動きますか?',
'How is this person affected by the disaster? (Select all that apply)': 'この人物の被災状況を記載してください(該当する項目を全て選択)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': '水資源を確保できる地点までの距離を記載します。徒歩で往復し、待ち時間も含めた時間を記載してください。',
'How long does it take you to walk to the health service?': '医療サービスが提供されている場所まで、徒歩で必要な時間を記載します。',
'How long will the food last?': '洪水の残存予測期間',
'How long will this water resource last?': '水の供給が枯渇する時期',
'How many Boys (0-17 yrs) are Dead due to the crisis': '災害で死亡した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Injured due to the crisis': '災害で負傷した少年の数(0-17歳)',
'How many Boys (0-17 yrs) are Missing due to the crisis': '災害で行方不明となった少年の数(0-17歳)',
'How many Girls (0-17 yrs) are Dead due to the crisis': '災害で死亡した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Injured due to the crisis': '災害で負傷した少女の数(0-17歳)',
'How many Girls (0-17 yrs) are Missing due to the crisis': '災害で行方不明になった少女の数(0-17歳)',
'How many Men (18 yrs+) are Dead due to the crisis': '災害で死亡した男性の数(18歳以上)',
'How many Men (18 yrs+) are Injured due to the crisis': '災害で負傷した男性の数(18歳以上)',
'How many Men (18 yrs+) are Missing due to the crisis': '災害で行方不明となった男性の数(18歳以上)',
'How many Women (18 yrs+) are Dead due to the crisis': '災害で死亡した女性の数(18歳以上)',
'How many Women (18 yrs+) are Injured due to the crisis': '災害で負傷した女性の数(18歳以上)',
'How many Women (18 yrs+) are Missing due to the crisis': '災害で行方不明となった女性の数(18歳以上)',
'How many days will the supplies last?': '支援物資がなくなるまでの日数',
'How many doctors in the health centers are still actively working?': 'ヘルスセンター内の医師の人数を記載してください',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': '居住不可になった家屋数を記載してください(居住不可 = 基礎構造や土台部分の破壊など)',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': '災害によって破損したが、まだ利用が可能である住居の数を記載してください(利用可能 = 窓の破壊、壁のヒビ、屋根の軽微な破損など)',
'How many latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレの数を記載してください',
'How many midwives in the health centers are still actively working?': '医療センター内の助産師の人数を記載してください',
'How many new cases have been admitted to this facility in the past 24h?': '過去24時間でこの施設で受け入れたケースの数は?',
'How many nurses in the health centers are still actively working?': '保健所で活動可能な看護師は何人居ますか?',
'How many of the patients with the disease died in the past 24h at this facility?': 'この施設で過去24時間で何人の患者がこの病気で亡くなりましたか?',
'How many of the primary school age boys (6-12) in the area are not attending school?': 'この地域の、登校していない学童期男児(6-12歳)の数を記載してください。',
'How many of the primary school age girls (6-12) in the area are not attending school?': 'この地域の、登校していない学童期女児(6-12歳)の数を記載してください。',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': '平常通りの授業を実施できている小学校・中学校・高校の数を記入してください',
'How many of the secondary school age boys (13-18) in the area are not attending school?': 'この地域の、登校していない中高校生年齢男子(13-18歳)の数を記載してください。',
'How many of the secondary school age girls (13-18) in the area are not attending school?': 'この地域の、登校していない女子中高生(13-18歳)の数を記載してください。',
'How many patients with the disease are currently hospitalized at this facility?': 'この病気のためにこの施設に入院している患者は現在何人ですか?',
'How many primary school age boys (6-12) are in the affected area?': '被災地域内の学童期男児(6-12歳)の数を記載してください',
'How many primary school age girls (6-12) are in the affected area?': '被災地域内の学童期女児(6-12歳)の数を記載してください。',
'How many primary/secondary schools were opening prior to the disaster?': '災害発生前に授業が行われていた小学校・中学校・高校の数を記載してください',
'How many secondary school age boys (13-18) are in the affected area?': '被災地域内の男子中学生・男子高校生(13-18歳)の数を記載してください',
'How many secondary school age girls (13-18) are in the affected area?': '被災地域内の中高生年齢女子(13-18歳)の数を記載してください。',
'How many teachers have been affected by the disaster (affected = unable to work)?': '被災し、授業ができない状態の教師の人数を記載してください',
'How many teachers worked in the schools prior to the disaster?': '災害発生前の教師の人数を記載してください',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'どの程度詳細な情報が表示されるかを定義します。ズームすることで詳細が表示されるようになりますが、そのかわり、広域を見渡すことができなくなります。逆に、ズームしないことで広域を表示できますが、詳細情報の確認は行えなくなります。',
'Human Resource Management': '人的資源マネージメント',
'Human Resource': '人的資源',
'Human Resources Management': '人的資源管理',
'Human Resources': '人的資源',
'Humanitarian NGO': '人道支援NGO',
'Hurricane Force Wind': 'ハリケーンの風力',
'Hurricane': 'ハリケーン',
'Hygiene NFIs': '衛生用品',
'Hygiene kits received': '衛生用品を受領した',
'Hygiene kits, source': '衛生用品の送付元',
'Hygiene practice': '衛生習慣',
'Hygiene problems': '衛生上の問題',
'Hygiene': '衛生',
'I am available in the following area(s)': '以下の地域を担当できます',
'ID Label': 'IDラベル',
'ID Label: ': 'IDラベル: ',
'ID Tag Number': 'IDタグ番号',
'ID Tag': 'ID タグ',
'ID Type': 'IDタイプ',
'Ice Pressure': '氷結圧力',
'Iceberg': 'アイスバーグ',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': 'できればソースファイルの完全なURLを記載します。難しい場合はデータ入手元のメモでも構いません。',
'Identification Report': 'IDレポート',
'Identification Reports': 'IDレポート',
'Identification Status': 'IDステータス',
'Identification label of the Storage bin.': '備蓄コンテナの区別用ラベル番号。',
'Identification': 'ID',
'Identified as': '判明した身元',
'Identified by': 'によって識別された',
'Identity Details': '身元確認の詳細',
'Identity added': '身元情報を追加しました',
'Identity deleted': '身元確認を削除しました',
'Identity updated': '身元確認を更新しました',
'Identity': '身元確認',
'If Staff have login accounts then they are given access to edit the details of the': 'スタッフがログイン用アカウントを有している場合、以下項目の詳細を編集することができます:',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': '「Unit = m, Base Unit = Km」の場合、「1m = 0.001 km」なので乗数は0.0001 です。',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'このドメインの電子メールアドレスを所有するユーザーを認証する場合は、承認がさらに必要かどうか、必要なら誰が承認するか、を決めるのに承認者フィールドを使用します。',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーがアクセスしたときに、全てのレコードがログに保存されます。無効にすると、モジュール毎に有効にすることができます。',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': '有効にすると、ユーザーが編集したすべてのレコードを記録します。無効にすると、モジュール毎に有効にできます。',
'If neither are defined, then the Default Marker is used.': 'もし両方共定義されていない場合、デフォルトマーカーが使われます。',
'If no marker defined then the system default marker is used': 'マーカーが定義されていない場合は、システムのデフォルトマーカーを使用します。',
'If no, specify why': 'いいえ、の場合はその理由を記載してください',
'If none are selected, then all are searched.': 'もしなにも選択しなければ、全てを検索します',
'If the location is a geographic area, then state at what level here.': '場所が地理的に確定できる場所ならば、その場所のレベルを記載してくだい。',
'If the request is for type "Other", you should enter a summary of the request here.': '支援要請が"その他"の場合、概要をここに入力する必要があります',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'この項目が設定されている場合、ユーザーは、登録の際、この団体のスタッフとして登録されるように指定することができます',
'If this is set to True then mails will be deleted from the server after downloading.': 'Trueに設定されている場合は、メールはダウンロード後にサーバーから削除されます。',
'If this record should be restricted then select which role is required to access the record here.': 'このレコードへのアクセスを制限する際には、アクセスに必要となる権限を選択してください',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'このレコードを制限したい場合、アクセスを許可する権限を指定してください。',
'If yes, specify what and by whom': '「はい」の場合、供給される食料と供給元',
'If yes, which and how': '「はい」の場合、混乱している場所や原因を記載',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '参照文書を入力しない場合は、データ検証のために入力者の電子メールが表示されます。',
'If you know what the Geonames ID of this location is then you can enter it here.': 'このロケーションの Geonames ID がある場合、ここに入力してください。',
'If you know what the OSM ID of this location is then you can enter it here.': 'このロケーションの OSM ID がある場合、ここに入力してください。',
'If you need to add a new document then you can click here to attach one.': '文書の添付はこのページから可能です。',
'If you want several values, then separate with': '複数の値を入力したい場合、この文字で分割してください : ',
'If you would like to help, then please': 'ご協力いただける方は登録をお願いします',
'Illegal Immigrant': '不法移民',
'Image Details': '画像の詳細',
'Image Tags': '画像のタグ',
'Image Type': '画像のタイプ',
'Image Upload': '画像のアップロード',
'Image added': '画像を追加しました',
'Image deleted': '画像を削除しました',
'Image updated': '画像を更新しました',
'Image': '画像',
'Image/Attachment': '画像/添付資料',
'Image/Other Attachment': '画像/その他の添付ファイル',
'Imagery': '画像',
'Images': '画像',
'Immediate reconstruction assistance, Rank': '建築物の緊急修理 / 再建築支援、ランク',
'Impact Assessment Summaries': '災害影響範囲アセスメントの概要',
'Impact Assessments': '災害影響範囲アセスメント',
'Impact Baselines': '影響範囲の基準値',
'Impact Details': '被害の詳細',
'Impact Type Details': '災害影響のタイプ詳細',
'Impact Type added': '災害の影響タイプを追加しました',
'Impact Type deleted': '影響範囲タイプを削除しました',
'Impact Type updated': '災害影響のタイプを更新しました',
'Impact Type': '災害影響タイプ',
'Impact Types': '災害影響のタイプ',
'Impact added': '被災影響を追加しました',
'Impact deleted': '影響範囲を削除しました',
'Impact updated': '被災状況を更新しました',
'Impacts': '影響',
'Import & Export Data': 'データのインポートとエクスポート',
'Import Data': 'データのインポート',
'Import Job': 'Jobのインポート',
'Import Jobs': 'Jobsのインポート',
'Import and Export': 'インポートとエクスポート',
'Import from Ushahidi Instance': 'Ushahidi インスタンスから設定をインポート',
'Import if Master': 'マスターなら取り込む',
'Import job created': 'Import jobを作成しました',
'Import multiple tables as CSV': '複数のテーブルをCSVとしてインポート',
'Import': 'インポート',
'Import/Export': 'インポート/エクスポート',
'Important': '重要',
'Importantly where there are no aid services being provided': '救護サービスが提供されていない地域において重要となります',
'Imported': 'インポートしました',
'Importing data from spreadsheets': 'スプレッドシートからデータをインポートしています',
'Improper decontamination': '不適切な汚染の除去',
'Improper handling of dead bodies': '誤った扱いをされている遺体',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'In GeoServerでは、これはレイヤ名です。WFS getCapabilitiesでは、これはコロン(:)後のFeatureType名の部分です。',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'GeoServer では、これはワークスペース名です。WFS getCapabilities では、これはコロン「:」の前の FeatureType の部分となります。',
'In Inventories': 'この物資の在処',
'In Process': '実行中',
'In Progress': '実行中',
'In Transit': '輸送中',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'この地図のウィンドウレイアウトは、全体を覆い隠します。従って、ここで大きな値を入力する必要はありません',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': '一般的に、コミュニティ内の高齢者、障がい者、子供、青年、女性たちが最も必要としている物資やサービスがなんであるかを記載してください',
'Inbound Mail Settings': '着信メール設定',
'Inbox': '受信箱',
'Incident Categories': 'インシデントカテゴリ',
'Incident Details': 'インシデントの詳細',
'Incident Report Details': 'インシデントレポートの詳細',
'Incident Report added': '災害影響範囲レポートを追加しました',
'Incident Report deleted': 'インシデントレポートを削除しました',
'Incident Report updated': 'インシデントレポートを更新しました',
'Incident Report': 'インシデントレポート',
'Incident Reporting System': 'インシデントの報告を行ないます',
'Incident Reporting': 'インシデントレポート',
'Incident Reports': 'インシデントレポート',
'Incident added': 'インシデントを追加しました',
'Incident deleted': 'インシデントを削除しました',
'Incident updated': 'インシデントを更新しました',
'Incident': 'インシデント',
'Incidents': 'インシデント',
'Incoming Shipment canceled': '到着する配送が取消しされました',
'Incoming Shipment updated': '入荷した物資が更新されました',
'Incoming': '入荷',
'Incomplete': '未完了',
'Individuals': '個人',
'Industrial Crime': '産業犯罪',
'Industrial': '産業',
'Industry Fire': '工場から出火',
'Industry close to village/camp': '村落/仮泊施設の周辺に工場が存在',
'Infant (0-1)': '乳児(0-1歳)',
'Infectious Disease': '感染症',
'Infectious Diseases': '感染症',
'Infestation': '感染',
'Informal Leader': '非公式なリーダー',
'Informal camp': '非指定避難所',
'Information gaps': '情報のギャップ',
'Infusion catheters available': '注入カテーテルが利用可能',
'Infusion catheters need per 24h': '24時間毎に必要な注入カテーテル数',
'Infusion catheters needed per 24h': '24時間ごとに、注入カテーテルが必要',
'Infusions available': '点滴が利用可能',
'Infusions needed per 24h': '24時間毎に必要な点滴の数',
'Input Job': 'Jobのインポート',
'Inspected': '調査済み',
'Inspection Date': '調査した日付',
'Inspection date and time': '調査日時',
'Inspection time': '調査した時刻',
'Inspector ID': '調査者ID',
'Instance Type': 'インスタンスタイプ',
'Instant Porridge': 'インスタント粥',
'Institution': 'その他の組織',
'Insufficient Privileges': '権限が足りません',
'Insufficient vars: Need module, resource, jresource, instance': '不十分な変数: module, resource, jresource, instance が必要です',
'Insufficient': '不足',
'Intake Items': 'アイテムの受け入れ',
'Intergovernmental Organization': '国際政府間組織',
'Interior walls, partitions': '室内の壁、仕切り',
'Internal Features': '内部機能',
'Internal State': '内部状態',
'International NGO': '国際NGO',
'International Organization': '国際機関',
'International Staff': '国外からのスタッフ',
'Intervention': '介入',
'Interview taking place at': 'インタビュー実施場所',
'Invalid Query': '無効なクエリ',
'Invalid email': '無効な電子メール',
'Invalid login': '無効なログイン',
'Invalid request!': 'リクエストは無効です。',
'Invalid ticket': '無効なチケット',
'Invalid': '無効な',
'Inventories with Item': '在庫アイテム',
'Inventories': '在庫管理',
'Inventory Item Details': '救援物資の在庫詳細',
'Inventory Item added': '救援物資の在庫を追加しました',
'Inventory Item deleted': '備蓄物資を削除しました',
'Inventory Item updated': '備蓄物資を更新しました',
'Inventory Item': '備蓄物資',
'Inventory Items Available for Request Item': '要求された物資に適合する、倉庫内の物資',
'Inventory Items': '備蓄物資',
'Inventory Management': '物資の管理',
'Inventory Store Details': '物資集積地点の詳細',
'Inventory Store added': '物資集積地点を追加しました',
'Inventory Store deleted': '物資集積地点を削除しました',
'Inventory Store updated': '物資集積地点を更新しました',
'Inventory Store': '物資集積地点',
'Inventory Stores': '物資集積地点',
'Inventory functionality is available for:': '備蓄機能を利用可能:',
'Inventory of Effects': '救援物資の影響',
'Inventory': '在庫',
'Inventory/Ledger': '在庫 / 元帳',
'Is adequate food and water available for these institutions?': '関係者に対して十分な水と食料が供給されていますか?',
'Is it safe to collect water?': '水の確保は安全に行えるか?',
'Is there any industrial or agro-chemical production close to the affected area/village?': '村落/集落の近くに、工場あるいは農業化学プラントなどが存在しますか?',
'Is this a strict hierarchy?': 'これは厳密な階層構造ですか?',
'Issuing Authority': '発行機関',
'It is built using the Template agreed by a group of NGOs working together as the': '聞き取り項目のテンプレートは、以下リンクのNGO組織と協同で作成されています。',
'Item Added to Shipment': '輸送情報に物資を追加する',
'Item Catalog Categories': '物資カタログカテゴリ',
'Item Catalog Category Details': '救援物資カタログのカテゴリ詳細',
'Item Catalog Category added': '救援物資カタログのカテゴリを追加しました',
'Item Catalog Category deleted': '救援物資カタログのカテゴリを削除しました',
'Item Catalog Category updated': '物資カタログカテゴリを更新しました',
'Item Catalog Category': '救援物資カタログのカテゴリ',
'Item Catalog Details': '物資カタログの詳細',
'Item Catalog added': '救援物資カタログを追加しました',
'Item Catalog deleted': '物資カタログを削除しました',
'Item Catalog updated': '物資カタログを更新しました',
'Item Catalogs': '救援物資カタログ',
'Item Categories': '物資カテゴリ',
'Item Category Details': '物資カテゴリの詳細',
'Item Category added': '救援物資カテゴリを追加しました',
'Item Category deleted': '救援物資カテゴリを削除しました',
'Item Category updated': '物資カテゴリを更新しました',
'Item Category': '物資カテゴリ',
'Item Details': '救援物資の詳細',
'Item Pack Details': '救援物資パックの詳細',
'Item Pack added': '物資パックを追加しました',
'Item Pack deleted': '救援物資のパックを削除しました',
'Item Pack updated': '救援物資パックを更新しました',
'Item Packs': '物資パック',
'Item Sub-Categories': '救援物資のサブカテゴリ',
'Item Sub-Category Details': '物資サブカテゴリの詳細',
'Item Sub-Category added': '救援物資のサブカテゴリを追加しました',
'Item Sub-Category deleted': '物資サブカテゴリを削除しました',
'Item Sub-Category updated': '救援物資サブカテゴリを更新しました',
'Item Sub-Category': '物資サブカテゴリ',
'Item added to shipment': '物資が輸送に回りました',
'Item added': '救援物資を追加しました',
'Item already in Bundle!': '物資がすでにバンドルに存在しています。',
'Item already in Kit!': '救援物資は既にキットに存在しています',
'Item already in budget!': '物資は既に予算に登録されています',
'Item deleted': '物資を削除しました',
'Item updated': '救援物資を更新しました',
'Item': '物資',
'Items': '救援物資',
'Japan': '日本',
'Japanese': '日本語',
'Jerry can': 'ジェリ缶',
'Jew': 'ユダヤ教徒',
'Job Market': '求人',
'Job Title': '肩書き',
'Jobs': '職業',
'Just Once': '一度だけ',
'KPIs': 'KPI',
'Key Details': 'Keyの詳細',
'Key added': 'キーを追加しました',
'Key deleted': 'キーを削除しました',
'Key updated': 'キーを更新しました',
'Key': 'キー',
'Keys': 'キー',
'Kit Contents': 'Kitの内容',
'Kit Details': 'Kitの詳細',
'Kit Updated': 'キットを更新しました',
'Kit added': 'キットを追加しました',
'Kit deleted': 'キットを削除しました',
'Kit updated': 'キットを更新しました',
'Kit': 'キット',
'Kits': 'キット',
'Known Identities': '既知のID',
'Known incidents of violence against women/girls': '女性に対する暴力行為が発生した',
'Known incidents of violence since disaster': '災害発生後に暴力行為が発生した',
'LICENSE': 'ライセンス',
'LMS Administration': 'LMSの管理',
'Label': 'ラベル',
'Lack of material': '資材不足',
'Lack of school uniform': '学校制服が不足',
'Lack of supplies at school': '学校用物資の不足',
'Lack of transport to school': '学校への輸送手段の不足',
'Lactating women': '授乳中の女性の数',
'Lahar': 'ラハール',
'Landslide': '地すべり',
'Language': 'Language 言語',
'Last Name': '名前',
'Last known location': '最後に目撃された場所',
'Last name': '名前',
'Last synchronization time': 'データ同期の最終実施時刻',
'Last updated': '最終更新日',
'Last updated by': '最終更新者',
'Last updated on': '直近のアップデート実施時刻',
'Latitude & Longitude': '緯度&経度',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は南北方向(上下)を定義します。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。',
'Latitude is North-South (Up-Down).': '緯度は南北(上下)です',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度は赤道では0、北半球ではプラス、南半球ではマイナスになります',
'Latitude should be between': '緯度の値として有効な値は',
'Latitude': '緯度',
'Latrines': 'トイレ',
'Law enforcement, military, homeland and local/private security': '法執行機関、自衛隊、警察および警備会社',
'Layer Details': 'レイヤの詳細',
'Layer added': 'レイヤを追加しました',
'Layer deleted': 'レイヤを削除しました',
'Layer updated': 'レイヤを更新しました',
'Layer': 'レイヤ',
'Layers updated': 'レイヤを更新しました',
'Layers': 'レイヤ',
'Layout': 'レイアウト',
'Legend Format': '凡例形式',
'Length': '長さ',
'Level 1 Assessment Details': 'レベル1アセスメントの詳細',
'Level 1 Assessment added': 'レベル1アセスメントを追加しました',
'Level 1 Assessment deleted': 'レベル1のアセスメントを削除しました',
'Level 1 Assessment updated': 'レベル1アセスメントを更新しました',
'Level 1 Assessments': 'レベル1 アセスメント',
'Level 1': 'レベル1',
'Level 2 Assessment Details': 'レベル2アセスメントの詳細',
'Level 2 Assessment added': 'レベル2アセスメントを追加しました',
'Level 2 Assessment deleted': 'レベル2アセスメントを削除しました',
'Level 2 Assessment updated': 'レベル2アセスメントを更新しました',
'Level 2 Assessments': 'レベル2アセスメント',
'Level 2 or detailed engineering evaluation recommended': 'レベル2あるいは詳細な技術的評価を行うことを推奨します',
'Level 2': 'レベル2',
'Level': 'レベル',
'Library support not available for OpenID': 'OpenIDのライブラリサポートが利用できません',
'License Plate': '個人認証カード',
'Line': '行',
'LineString': '折れ線',
'Link Item & Shipment': 'アイテムと輸送を紐付ける',
'Link an Item & Shipment': 'アイテムと出荷を結び付ける',
'Linked Records': '参照しているレコード',
'Linked records': '関連しているレコード',
'List / Add Baseline Types': '基準値タイプの一覧 / 追加',
'List / Add Impact Types': '災害影響のタイプを表示 / 追加',
'List / Add Services': 'サービスの一覧表示 / 追加',
'List / Add Types': 'タイプの一覧表示 / 追加',
'List Activities': '支援活動一覧',
'List Aid Requests': '援助要請の一覧',
'List All Entries': '全てのエントリ一覧',
'List All Memberships': '全てのメンバシップ一覧',
'List All Reports': '報告すべての一覧',
'List All': '全項目一覧',
'List Alternative Items': '代わりの物資一覧',
'List Assessment Summaries': 'アセスメント要約の一覧',
'List Assessments': 'アセスメント一覧',
'List Asset Assignments': '資産割り当ての一覧',
'List Assets': '資産一覧',
'List Baseline Types': '基準値タイプ一覧',
'List Baselines': '基準値一覧',
'List Brands': '銘柄の一覧',
'List Budgets': '予算の一覧',
'List Bundles': 'Bundleの一覧',
'List Catalog Items': '物資カタログの一覧',
'List Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog 関係一覧',
'List Checklists': 'チェックリスト一覧',
'List Cluster Subsectors': 'クラスタのサブセクタ一覧',
'List Cluster': 'クラスタ一覧',
'List Clusters': 'クラスタ一覧',
'List Commitment Items': 'コミットされた救援物資の一覧',
'List Commitments': 'コミットメントの一覧',
'List Configs': '設定一覧',
'List Conflicts': 'データ競合一覧',
'List Contact Information': '連絡先情報の一覧',
'List Contacts': '連絡先一覧',
'List Credentials': '証明書一覧',
'List Current': '現在の一覧',
'List Distribution Items': '配給物資リスト',
'List Distributions': '配給所リスト',
'List Documents': '文書の一覧',
'List Donors': '資金提供組織一覧',
'List Feature Layers': 'Featureレイヤリスト',
'List Flood Reports': '洪水レポート一覧',
'List GPX Layers': 'GPXレイヤ一覧',
'List Groups': 'グループ一覧',
'List Groups/View Members': 'グループを一覧/メンバーを表示',
'List Hospitals': '病院の一覧',
'List Identities': 'ID一覧',
'List Images': '画像の一覧',
'List Impact Assessments': '災害影響範囲アセスメント一覧',
'List Impact Types': '災害影響のタイプ一覧',
'List Impacts': '被害一覧',
'List Incident Reports': 'インシデントレポート一覧',
'List Incidents': 'インシデント一覧',
'List Inventory Items': '備蓄物資リスト',
'List Inventory Stores': '物資集積地点リスト',
'List Item Catalog Categories': '救援物資カタログのカテゴリ一覧',
'List Item Catalogs': '救援物資カタログ一覧',
'List Item Categories': '物資カテゴリ一覧',
'List Item Packs': '物資パックの一覧',
'List Item Sub-Categories': '物資サブカテゴリ一覧',
'List Items': '救援物資一覧',
'List Keys': 'Keyの一覧',
'List Kits': 'Kit一覧',
'List Layers': 'レイヤ一覧',
'List Level 1 Assessments': 'レベル1アセスメントの一覧',
'List Level 1 assessments': 'レベル1アセスメント一覧',
'List Level 2 Assessments': 'レベル2のアセスメント一覧',
'List Level 2 assessments': 'レベル2アセスメント一覧',
'List Locations': 'ロケーション一覧',
'List Log Entries': 'ログエントリ一覧',
'List Map Profiles': '地図設定の一覧',
'List Markers': 'マーカー一覧',
'List Members': 'メンバ一覧',
'List Memberships': 'メンバシップ一覧',
'List Messages': 'メッセージ一覧',
'List Metadata': 'メタデータ一覧',
'List Missing Persons': '行方不明者リストを表示',
'List Need Types': '需要タイプ一覧',
'List Needs': 'ニーズ一覧',
'List Notes': '追加情報一覧',
'List Offices': 'オフィス一覧',
'List Organizations': '団体一覧',
'List Peers': 'データ同期先一覧',
'List Personal Effects': '携帯品のリスト',
'List Persons': '人物情報一覧',
'List Photos': '写真リスト',
'List Positions': '場所一覧',
'List Problems': '問題一覧',
'List Projections': '地図投影法リスト',
'List Projects': 'プロジェクト一覧',
'List Rapid Assessments': '被災地の現況アセスメント一覧',
'List Received Items': '受領された物資の一覧',
'List Received Shipments': '受領された輸送一覧',
'List Records': 'レコード一覧',
'List Registrations': '登録証明書の一覧',
'List Reports': 'レポート一覧',
'List Request Items': '物資要請リスト',
'List Requests': '支援要請の一覧',
'List Resources': 'リソース一覧',
'List Responses': '回答の一覧',
'List Rivers': '河川リスト',
'List Roles': '役割一覧',
'List Sections': 'Section一覧',
'List Sectors': '活動分野の一覧',
'List Sent Items': '送付した物資一覧',
'List Sent Shipments': '送付済み物資一覧',
'List Service Profiles': 'サービスプロファイル一覧',
'List Settings': '設定一覧',
'List Shelter Services': '避難所での提供サービス一覧',
'List Shelter Types': '避難所タイプ一覧',
'List Shelters': '避難所の一覧',
'List Shipment Transit Logs': '物資輸送履歴の一覧',
'List Shipment/Way Bills': '輸送費/渡航費の一覧',
'List Shipment<>Item Relation': '輸送と物資の関連性一覧',
'List Shipments': '配送の一覧',
'List Sites': 'Site一覧',
'List Skill Types': 'スキルタイプを一覧表示',
'List Skills': 'スキルを一覧表示',
'List Solutions': '解決案一覧',
'List Staff Types': 'スタッフタイプ一覧',
'List Staff': 'スタッフ一覧',
'List Status': '状況一覧',
'List Storage Bin Type(s)': 'Storage Binタイプ一覧',
'List Storage Bins': 'Storage Bin一覧',
'List Storage Location': '備蓄地点の一覧',
'List Subscriptions': '寄付申し込み一覧',
'List Support Requests': '支援要求のリスト',
'List Survey Answers': '調査の回答の一覧',
'List Survey Questions': 'Survey Question一覧',
'List Survey Sections': 'Survey Sectionsの一覧',
'List Survey Series': '一連の調査リスト',
'List Survey Templates': '調査テンプレートの一覧',
'List TMS Layers': 'TMS レイヤの一覧',
'List Tasks': 'タスク一覧',
'List Teams': 'チーム一覧',
'List Themes': 'テーマ一覧',
'List Tickets': 'チケット一覧',
'List Tracks': '追跡情報の一覧',
'List Units': '単位一覧',
'List Users': 'ユーザ一覧',
'List Volunteers': 'ボランティアの表示',
'List WMS Layers': 'WMSレイヤ一覧',
'List Warehouse Items': '倉庫に備蓄中の物資一覧',
'List Warehouses': '倉庫の一覧',
'List all': '全項目を表示',
'List of Items': '物資一覧',
'List of Missing Persons': '行方不明者リスト',
'List of Peers': 'データ同期先一覧',
'List of Reports': 'レポート一覧',
'List of Requests': '支援要請の一覧',
'List of Roles': '権限リスト',
'List of Spreadsheets uploaded': 'アップロード済スプレッドシート一覧',
'List of Spreadsheets': 'スプレッドシート一覧',
'List of Volunteers for this skill set': 'このスキルを所持するボランティアの一覧',
'List of addresses': '住所一覧',
'List unidentified': '身元不明者の一覧',
'List': '一覧',
'List/Add': '一覧/追加',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '救援団体は自身の支援活動の内容と場所を登録し、公開することで、他の組織との活動を調整することが可能となります。',
'Live Help': 'ライブヘルプ',
'Livelihood': '生計',
'Load Cleaned Data into Database': '整形したデータをデータベースへロード',
'Load Details': '詳細情報の読み込み',
'Load Raw File into Grid': 'Rawファイルをグリッドにロードしてください',
'Load the details to help decide which is the best one to keep out of the 2.': '2つのうちどちらを残すほうがよいか判断するため、詳細情報を確認します。',
'Loading Locations': 'ロケーションデータロード中',
'Loading Locations...': '位置を読込みしています ...',
'Loading': '読み込み中',
'Local Name': 'ローカル名',
'Local Names': 'ローカル名',
'Location 1': 'ロケーション 1',
'Location 2': 'ロケーション 2',
'Location De-duplicated': 'ロケーションの重複解消',
'Location Details': 'ロケーションの詳細',
'Location Hierarchy Level 0 Name': 'ロケーション階層レベル0の名前',
'Location Hierarchy Level 1 Name': 'ロケーション階層レベル1の名前',
'Location Hierarchy Level 2 Name': 'ロケーション階層レベル2の名前',
'Location Hierarchy Level 3 Name': 'ロケーション階層レベル3の名前',
'Location Hierarchy Level 4 Name': 'ロケーション階層レベル4の名前',
'Location Hierarchy Level 5 Name': 'ロケーション階層レベル5の名前',
'Location added': 'ロケーションを追加しました',
'Location cannot be converted into a group.': 'ロケーションはグループに変換できません',
'Location deleted': 'ロケーションを削除しました',
'Location details': 'ロケーションの詳細',
'Location group cannot be a parent.': 'ロケーショングループは親にできません',
'Location group cannot have a parent.': 'ロケーショングループに親情報がありません。',
'Location updated': 'ロケーションを更新しました',
'Location': 'ロケーション',
'Location: ': 'ロケーション: ',
'Locations De-duplicator': 'ロケーションの重複解消',
'Locations of this level need to have a parent of level': 'このレベルのロケーションには、親属性となるレベルが必要です',
'Locations should be different!': '異なる位置を設定してください!',
'Locations': 'ロケーション',
'Lockdown': '厳重監禁',
'Log Entry Details': 'ログエントリの詳細',
'Log entry added': 'ログエントリを追加しました',
'Log entry deleted': 'ログエントリを削除しました',
'Log entry updated': 'ログエントリを更新しました',
'Log': 'ログ',
'Logged in': 'ログインしました',
'Logged out': 'ログアウトしました',
'Login': 'ログイン',
'Logistics Management System': '物流管理システム',
'Logistics Management': '物流管理',
'Logistics': '物流',
'Logo file %s missing!': 'ロゴファイル%sが見つかりません。',
'Logo': 'ロゴ',
'Logout': 'ログアウト',
'Long Text': '詳細テキスト',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': '経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度は東西(横)です。経度は子午線(グリニッジ標準時)でゼロ、東(ヨーロッパ、アジア)でプラスです。西(大西洋、アメリカ)でマイナスです。',
'Longitude is West - East (sideways).': '緯度は東西です(横方向)',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '経度はグリニッジ子午線(グリニッジ標準時)上が0度です。東側に向かってヨーロッパやアジアの各地で正の値となります。西に向かって大西洋やアメリカの各地で負の値となります。',
'Longitude should be between': '経度の値の有効な範囲は',
'Longitude': '経度',
'Looking up Parents': '親を検索',
'Looting': '略奪',
'Lost Password': 'パスワードの紛失',
'Lost': '行方不明',
'Low': '低',
'Magnetic Storm': '磁気嵐',
'Main cash source': '主な現金収入源',
'Main income sources before disaster': '災害発生前の主な収入源',
'Major expenses': '主な費用',
'Major outward damage': '大きな損傷あり',
'Make Commitment': 'コミットの作成',
'Make Pledge': '寄付の作成',
'Make Request': '支援を要請する',
'Make a Request for Aid': '援助要請を登録',
'Make a Request': '支援要請を登録',
'Make preparations per the <instruction>': '<instruction>毎に準備作業を行う',
'Male': '男性',
'Malnutrition present prior to disaster': '災害前から栄養が失調発生していた',
'Manage Category': 'カテゴリ管理',
'Manage Item catalog': '物資カタログの管理',
'Manage Kits': 'Kitsの管理',
'Manage Relief Item Catalogue': '救援アイテムカタログの管理',
'Manage Sub-Category': 'サブカテゴリの管理',
'Manage Users & Roles': 'ユーザと役割の管理',
'Manage Warehouses/Sites': '倉庫/Sitesの管理',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '支援物資、資産、人員、その他のリソースに対する要求を管理します。支援物資が要求された時に在庫と照合します。',
'Manage requests of hospitals for assistance.': '病院からの支援要請の管理',
'Manage volunteers by capturing their skills, availability and allocation': 'ボランティアのスキル、稼働状況、割り当て状況を管理します',
'Manage': '管理',
'Manager': 'マネージャ',
'Managing Office': 'オフィスの管理',
'Managing, Storing and Distributing Relief Items': '救援物資の保管、流通、配布状況を管理します',
'Managing, Storing and Distributing Relief Items.': '救援物資の管理、保存、配布状況を管理します。',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '必須項目。GeoServerでのこの項目はレイヤー名となります。WFSの get Capabilitiesでは、コロン( : )の後に付与される FeatureTypeとして表示されます。',
'Mandatory. The URL to access the service.': '省略できません。サービスにアクセスするためのURLです。',
'Manual Synchronization': 'データ手動同期',
'Manual': 'マニュアル',
'Many': '多数',
'Map Profile added': '地図の設定を追加しました',
'Map Profile deleted': '地図設定を削除しました',
'Map Profile updated': '地図設定を更新しました',
'Map Profile': '地図の設定',
'Map Profiles': '地図の設定',
'Map Height': '地図の縦高',
'Map Service Catalog': '地図サービスカタログ',
'Map Settings': '地図の設定',
'Map Viewing Client': '地図閲覧クライアント',
'Map Width': '地図の横幅',
'Map of Hospitals': '病院の地図',
'Map': '地図',
'Mapping': 'マッピング',
'Marine Security': '海上保安',
'Marital Status': '婚姻状況',
'Marker Details': 'マーカーの詳細',
'Marker added': 'マーカーを追加しました',
'Marker deleted': 'マーカーを削除しました',
'Marker updated': 'マーカーを更新しました',
'Marker': 'マーカー',
'Markers': 'マーカー',
'Master Message Log to process incoming reports & requests': '受け取ったレポートと要求を処理するマスターメッセージログ',
'Master Message Log': 'マスターメッセージログ',
'Match Percentage': '一致率',
'Match Requests': '支援要請マッチ',
'Match percentage indicates the % match between these two records': 'マッチの割合は、2つのレコードの間のマッチ状況をあわらします',
'Matching Catalog Items': '適合する救援物資カタログ',
'Matching Records': '一致するレコード',
'Matrix of Choices (Multiple Answers)': '選択肢 (複数可)',
'Matrix of Choices (Only one answer)': '選択肢 (複数選択不可)',
'Matrix of Text Fields': 'テキストフィールドのマトリックス',
'Max Persons per Dwelling': '住居ごとの最大収容人数',
'Maximum Weight': '最大重量',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': '最大重量| ドロップダウンリストで単位を選択してから、備蓄地点の最大重量を指定します。',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': 'storage binに収容することができるアイテムの最大重量を指定します。ドロップダウンリストから、単位を選択してください。',
'Measure Area: Click the points around the polygon & end with a double-click': '観測領域: 多角形の角をクリックし、ダブルクリックで終了',
'Measure Length: Click the points along the path & end with a double-click': '距離を計測: 経路上の中継点をクリックして、終点でダブルクリックしてください',
'Medical and public health': '医療、公衆衛生',
'Medicine': '薬品',
'Medium': '中',
'Megabytes per Month': '1月毎のメガバイト数',
'Member removed from Group': 'メンバシップを削除しました',
'Members': 'メンバ',
'Membership Details': 'メンバシップの詳細',
'Membership updated': 'メンバシップを更新しました',
'Membership': 'メンバシップ',
'Memberships': 'メンバシップ',
'Message Details': 'メッセージの詳細',
'Message Sent': 'メッセージが送信されました',
'Message Variable': 'メッセージ変数',
'Message added': 'メッセージを追加しました',
'Message deleted': 'メッセージを削除しました',
'Message field is required!': 'メッセージは必須です',
'Message sent to outbox': 'メッセージを送信箱に送りました',
'Message updated': 'メッセージを更新しました',
'Message variable': 'メッセージ変数',
'Message': 'メッセージ',
'Messages': 'メッセージ',
'Messaging settings updated': 'メッセージング設定を更新しました',
'Messaging': 'メッセージング',
'Metadata Details': 'メタデータの詳細',
'Metadata added': 'メタデータを追加しました',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': '必要に応じて、アップロードした全ての画像に適用されるメタデータをここで入力できます。',
'Metadata deleted': 'メタデータを削除しました',
'Metadata updated': 'メタデータを更新しました',
'Metadata': 'メタデータ',
'Meteorite': '隕石落下',
'Meteorological (inc. flood)': '気象 (洪水を含む)',
'Method used': '使用されるメソッド',
'Micronutrient malnutrition prior to disaster': '災害前から栄養失調傾向あり',
'Middle Name': 'ミドルネーム',
'Migrants or ethnic minorities': '移民、あるいは少数民族の数',
'Military': '軍隊',
'Minimum Bounding Box': '最小:領域を指定した枠組み',
'Minimum shift time is 6 hours': '最小シフト時間は6時間です。',
'Minor/None': '少数 / なし',
'Minorities participating in coping activities': '少数民族が災害対応に従事',
'Minute': '分',
'Minutes must be a number between 0 and 60': '分には0-60の間の数字を記入してください',
'Minutes must be a number greater than 0 and less than 60': '分数は0から60の間で入力してください',
'Minutes per Month': '一ヶ月に数分間',
'Minutes should be a number greater than 0 and less than 60': '分は0から60の間で入力してください',
'Miscellaneous': 'その他',
'Missing Person Details': '行方不明者の詳細',
'Missing Person Reports': '行方不明者レポート',
'Missing Person': '行方不明者',
'Missing Persons Registry': '行方不明者の登録',
'Missing Persons Report': '行方不明者のレポート',
'Missing Persons': '行方不明者',
'Missing Report': '行方不明レポート',
'Missing Senior Citizen': '高齢者の行方不明',
'Missing Vulnerable Person': '被介護者の行方不明',
'Missing': '行方不明',
'Mobile Assess.': '移動端末アクセス',
'Mobile Basic Assessment': 'モバイルの基本アセスメント',
'Mobile Basic': 'モバイルの基礎',
'Mobile Phone': '携帯番号',
'Mobile': 'モバイル',
'Mode': 'モード',
'Modem Settings': 'モバイル機器の設定',
'Modem settings updated': 'モバイル機器の設定を更新しました',
'Moderate': 'モデレート',
'Moderator': 'モデレータ',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': '地物の変更: 変形する地物を選択し、点の一つをドラッグすることで地物の形を修正可能です。',
'Modify Information on groups and individuals': 'グループと個人の情報更新',
'Modifying data in spreadsheet before importing it to the database': 'データベース登録前に、スプレッドシート内のデータ項目を修正',
'Module Administration': 'モジュール管理',
'Module disabled!': 'モジュールが無効です',
'Module provides access to information on current Flood Levels.': 'このモジュールにより、洪水の現在の水位情報にアクセス可能です',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': 'モジュールでは、専門団体によって作成された調査文書を管理します。データには、WFP(国連世界食糧計画)アセスメントも含まれます。',
'Monday': '月曜日',
'Monthly Cost': '月額費用',
'Monthly Salary': '給与(月額)',
'Months': '月',
'Morgue Status': '死体安置所のステータス',
'Morgue Units Available': '死体公示所の収容可能数',
'Mosque': 'モスク',
'Motorcycle': 'オートバイ',
'Moustache': '口ひげ',
'Move Feature: Drag feature to desired location': 'Featureの移動: Feature を希望するロケーションにドラッグしてください',
'Movements (Filter In/Out/Lost)': '活動 (フィルター イン/アウト/ロスト)',
'MultiPolygon': 'マルチポリゴン',
'Multiple Choice (Multiple Answers)': '複数選択(複数回答)',
'Multiple Choice (Only One Answer)': '複数選択(1つだけ回答)',
'Multiple Matches': '複数の結果が適合しました',
'Multiple Text Fields': '複数の入力項目',
'Multiple': '複数',
'Multiplicator': '乗数',
'Muslim': 'イスラム教徒',
'Must a location have a parent location?': 'ある場所にはその親の場所が無ければならないですか?',
'My Current function': '現在登録している機能',
'My Tasks': '自分のタスク',
'N/A': '該当なし',
'NZSEE Level 1': 'NZSEE レベル1',
'NZSEE Level 2': 'NZSEE レベル 2',
'Name and/or ID Label': '名前および/またはIDラベル',
'Name and/or ID': '名前および/またはID',
'Name of Storage Bin Type.': '物資保管タイプの名前です。',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'ヘッダーの背景に使用される、static にあるファイルの名前 (オプションでサブパス)。',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '左上の画像で静的位置を表すファイル名(サブパス名はオプション)',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'フッターに使われるビューにあるファイル名 (オプションとしてサブパス)。',
'Name of the person in local language and script (optional).': '現地言語での名前と表記(オプション)',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': 'このレポートに関連する組織や部署の名前。部署をもたない病院の場合は空欄にしてください。',
'Name or Job Title': '名前あるいは役職名',
'Name': '名前',
'Name, Org and/or ID': '名前、組織、IDなど',
'Name/Model/Type': '名前/ モデル/タイプ',
'Name: ': '名前: ',
'Names can be added in multiple languages': '名前は、複数の言語で記述することができます。',
'National ID Card': 'ナショナルIDカード',
'National NGO': '国内NPO',
'National Staff': '現地スタッフ',
'Nationality of the person.': 'この人物の国籍です。',
'Nationality': '国籍',
'Nautical Accident': '船舶事故',
'Nautical Hijacking': '船舶ハイジャック',
'Need Type Details': '需要タイプの詳細',
'Need Type added': '需要タイプを追加しました',
'Need Type deleted': '需要タイプを削除しました',
'Need Type updated': '需要タイプを更新しました',
'Need Type': '需要タイプ',
'Need Types': '需要タイプ',
'Need added': 'ニーズを追加しました',
'Need deleted': 'ニーズを削除しました',
'Need to be logged-in to be able to submit assessments': '評価を確定させるには、ログインが必要です',
'Need to configure Twitter Authentication': 'Twitterの認証を設定する必要があります',
'Need to select 2 Locations': 'ロケーションを2つ指定してください',
'Need to specify a Budget!': '予算を指定する必要があります。',
'Need to specify a Kit!': 'Kitを指定する必要があります。',
'Need to specify a Resource!': 'リソースを指定する必要があります。',
'Need to specify a bundle!': 'bundleを指定する必要があります。',
'Need to specify a group!': 'グループを指定する必要があります。',
'Need to specify a location to search for.': '検索対象となるロケーションを指定する必要があります。',
'Need to specify a role!': '役割を指定する必要があります。',
'Need to specify a service!': 'サービスを指定してください!',
'Need to specify a table!': 'テーブルを指定する必要があります。',
'Need to specify a user!': 'ユーザを指定する必要があります。',
'Need updated': 'ニーズを更新しました',
'Needs Details': '需要の詳細',
'Needs to reduce vulnerability to violence': '暴力行為の対策として必要な物資 / サービス',
'Needs': '要求',
'Negative Flow Isolation': '逆流の分離',
'Neighbourhood': '近隣',
'Neighbouring building hazard': '隣接ビルが危険な状態',
'Neonatal ICU': '新生児ICU',
'Neonatology': '新生児科',
'Network': 'ネットワーク',
'Neurology': '神経科',
'New Assessment reported from': '新規アセスメントの報告元',
'New Checklist': '新規チェックリスト',
'New Peer': '新しいデータ同期先',
'New Record': '新規レコード',
'New Report': '新規レポート',
'New Request': '新規の支援要請',
'New Solution Choice': '新しい解決案を選択',
'New Support Request': '新しい支援要請',
'New Synchronization Peer': '新しい同期先',
'New cases in the past 24h': '過去24時間の新規ケース数',
'New': '新規',
'News': 'ニュース',
'Next View': '次を表示',
'Next': '次へ',
'No Activities Found': '支援活動が見つかりませんでした',
'No Addresses currently registered': '住所は、まだ登録がありません。',
'No Aid Requests have been made yet': '援助要請がまだ作成されていません',
'No Alternative Items currently registered': '代替物資は現在登録されていません',
'No Assessment Summaries currently registered': 'アセスメントの要約が登録されていません',
'No Assessments currently registered': '登録済みのアセスメントがありません',
'No Asset Assignments currently registered': '現在のところ資産割り当ては登録されていません',
'No Assets currently registered': '登録されている資産は現在ありません。',
'No Baseline Types currently registered': '登録済みのBaseline Typesはありません',
'No Baselines currently registered': '登録されている基準値はありません',
'No Brands currently registered': '登録されている銘柄がありません',
'No Budgets currently registered': '予算は、まだ登録がありません。',
'No Bundles currently registered': 'Bundleは、まだ登録がありません。',
'No Catalog Items currently registered': '登録済みのカタログアイテムがありません',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Category<>Sub-Category<>Catalog間の関係は、まだ登録がありません。',
'No Checklist available': '利用可能なチェックリストがありません',
'No Cluster Subsectors currently registered': 'クラスタのサブセクタはまだ登録がありません',
'No Clusters currently registered': '登録済みのクラスタはありません',
'No Commitment Items currently registered': '現在のところコミット済み物資は登録されていません',
'No Commitments': 'コミットメントがありません',
'No Configs currently defined': '設定は、まだ定義されていません',
'No Credentials currently set': '現在のところ証明書が設定されていません',
'No Details currently registered': '詳細は、まだ登録されていません',
'No Distribution Items currently registered': '配給物資の登録がありません',
'No Distributions currently registered': '配給所の登録がありません',
'No Documents found': '文書が見つかりませんでした。',
'No Donors currently registered': '資金提供組織はまだ登録されていません',
'No Feature Layers currently defined': 'Feature Layersはまだ定義されていません',
'No Flood Reports currently registered': '登録済みの洪水情報はありません',
'No GPX Layers currently defined': 'GPXレイヤはまだ定義されていません',
'No Groups currently defined': 'グループはまだ定義されていません',
'No Groups currently registered': 'グループはまだ登録されていません',
'No Hospitals currently registered': '病院はまだ登録されていません',
'No Identification Report Available': '利用可能なIDレポートはありません',
'No Identities currently registered': '登録されているIDはありません',
'No Image': '画像なし',
'No Images currently registered': '画像の登録はありません',
'No Impact Types currently registered': '被害の種類は未登録です',
'No Impacts currently registered': 'これまでに登録されたImpactはありません',
'No Incident Reports currently registered': '登録されているインシデントレポートはありません',
'No Incidents currently registered': '登録済みのインシデントはありません。',
'No Incoming Shipments': '到着予定の輸送物資',
'No Inventory Items currently registered': '備蓄物資の登録がありません',
'No Inventory Stores currently registered': '現在登録されている物資集積地点はありません',
'No Item Catalog Category currently registered': '救援物資カタログのカテゴリはまだ登録がありません',
'No Item Catalog currently registered': 'アイテムカタログはまだ登録されていません',
'No Item Categories currently registered': '救援物資カテゴリの登録がありません',
'No Item Packs currently registered': '救援物資のパックは、まだ登録がありません',
'No Item Sub-Category currently registered': '救援物資のサブカテゴリはまだ登録されていません',
'No Item currently registered': 'アイテムはまだ登録されていません',
'No Items currently registered': '物資はまだ登録されていません',
'No Items currently requested': '要求されている物資はありません',
'No Keys currently defined': 'Keyはまだ定義されていません',
'No Kits currently registered': 'Kitはまだ登録されていません',
'No Level 1 Assessments currently registered': '現在のところ、レベル1アセスメントは登録されていません',
'No Level 2 Assessments currently registered': '現在のところ、レベル2アセスメントは登録されていません',
'No Locations currently available': '現在利用可能なロケーションはありません',
'No Locations currently registered': 'ロケーションはまだ登録されていません',
'No Map Profiles currently defined': '地図の設定が定義されていません',
'No Markers currently available': '現在利用可能なマーカーはありません',
'No Match': '合致する結果がありません',
'No Matching Catalog Items': '適合する救援物資はありませんでした',
'No Matching Records': '適合する検索結果がありませんでした',
'No Members currently registered': 'メンバはまだ登録されていません',
'No Memberships currently defined': 'メンバシップはまだ登録されていません',
'No Messages currently in Outbox': '送信箱にメッセージがありません',
'No Metadata currently defined': 'メタデータはまだ定義されていません',
'No Need Types currently registered': '現在登録されている需要タイプはありません',
'No Needs currently registered': '現在要求は登録されていません',
'No Offices currently registered': 'オフィスはまだ登録されていません',
'No Offices found!': 'オフィスが見つかりませんでした',
'No Organizations currently registered': '団体はまだ登録されていません',
'No Packs for Item': 'この物資に対する救援物資パックはありません',
'No Peers currently registered': '登録済みのデータ同期先はありません',
'No People currently registered in this shelter': 'この避難所に登録されている人物情報はありません',
'No Persons currently registered': '人物情報はまだ登録されていません',
'No Persons currently reported missing': '現在、行方不明者の登録はありません',
'No Persons found': '該当する人物はいませんでした',
'No Photos found': '写真の登録がありません',
'No Picture': '写真がありません',
'No Presence Log Entries currently registered': '所在地履歴の登録がありません',
'No Problems currently defined': '定義済みの問題がありません',
'No Projections currently defined': '地図投影法は、まだ定義されていません。',
'No Projects currently registered': '定義済みのプロジェクトはありません',
'No Rapid Assessments currently registered': '被災地の現況アセスメントはまだ登録されていません',
'No Received Items currently registered': '受領された救援物資の登録はありません',
'No Received Shipments': '受け取った輸送はありません',
'No Records currently available': '利用可能なレコードはありません',
'No Records matching the query': '条件に当てはまるレコードが存在しません',
'No Request Items currently registered': '物資要請の登録がありません',
'No Requests have been made yet': '支援要請は、まだ行われていません',
'No Requests match this criteria': 'この条件に一致する支援要請はありません',
'No Requests': '支援要請がありません',
'No Responses currently registered': '現在登録されていて返答が無いもの',
'No Rivers currently registered': '河川情報の登録がありません',
'No Roles currently defined': '役割はまだ定義されていません',
'No Sections currently registered': 'このセクションの登録情報がありません',
'No Sectors currently registered': '登録済みの活動分野がありません',
'No Sent Items currently registered': '送付した物資の登録がありません',
'No Sent Shipments': '送付が行われた輸送がありません',
'No Settings currently defined': '設定は、まだ定義されていません',
'No Shelter Services currently registered': '登録されている避難所サービスがありません',
'No Shelter Types currently registered': '登録済みの避難所タイプがありません',
'No Shelters currently registered': '避難所はまだ登録されていません',
'No Shipment Transit Logs currently registered': '物資輸送履歴の登録がありません',
'No Shipment/Way Bills currently registered': '輸送費/Way Billsはまだ登録されていません',
'No Shipment<>Item Relation currently registered': '輸送とアイテムの関連付けはまだ登録されていません',
'No Sites currently registered': '登録されているサイトはありません',
'No Skill Types currently set': '設定済みのスキルタイプはありません',
'No Solutions currently defined': '解決案はまだ定義されていません',
'No Staff Types currently registered': 'スタッフタイプはまだ登録されていません',
'No Staff currently registered': 'スタッフはまだ登録されていません',
'No Storage Bin Type currently registered': '登録済みのStorage Binタイプがありません',
'No Storage Bins currently registered': 'Storage Binはまだ登録されていません',
'No Storage Locations currently registered': '登録されている備蓄地点がありません',
'No Subscription available': '寄付の申し込みがありません',
'No Support Requests currently registered': '現在のところ、支援要請は登録されていません',
'No Survey Answers currently registered': 'これまでに登録されたフィードバックの回答はありません',
'No Survey Questions currently registered': '登録済みのSurvey Questionsはありません',
'No Survey Sections currently registered': '登録済みのSurvey Sectionはありません',
'No Survey Series currently registered': '現在、調査報告は登録されていません',
'No Survey Template currently registered': '登録されている調査テンプレートがありません',
'No TMS Layers currently defined': 'TMS レイヤーがまだ定義されていません',
'No Tasks with Location Data': 'ロケーション情報を持っているタスクがありません',
'No Themes currently defined': 'テーマはまだ定義されていません',
'No Tickets currently registered': 'チケットはまだ定義されていません',
'No Tracks currently available': '利用可能な追跡情報はありません',
'No Units currently registered': '単位はまだ登録されていません',
'No Users currently registered': '登録済みのユーザがありません',
'No Volunteers currently registered': 'ボランティアの登録がありません',
'No Warehouse Items currently registered': '現在登録済みの倉庫物資はありません',
'No Warehouses currently registered': '倉庫が登録されていません',
'No Warehouses match this criteria': '条件に合致する倉庫がありません',
'No access at all': '完全に孤立中',
'No access to this record!': 'このレコードにはアクセスできません',
'No action recommended': 'アクション無しを推奨',
'No calculations made': '見積が作成されていません',
'No conflicts logged': 'コンフリクトのログはありません。',
'No contact information available': '利用可能な連絡先情報はありません',
'No contacts currently registered': '連絡先が登録されていません',
'No data in this table - cannot create PDF!': 'テーブルにデータがありません。PDF を作成できません。',
'No databases in this application': 'このアプリケーションにデータベースはありません',
'No dead body reports available': '遺体情報のレポートはありません',
'No entries found': 'エントリが見つかりません',
'No entries matching the query': 'クエリに一致するエントリはありませんでした。',
'No import jobs': 'インポートされたJobがありません',
'No linked records': 'リンクされているレコードはありません',
'No location known for this person': 'この人物の消息が不明です',
'No locations found for members of this team': 'このチームのメンバーの場所が見つかりませんでした',
'No locations registered at this level': 'この階層に登録されているロケーションはありません',
'No log entries matching the query': '検索に合致するログエントリがありません',
'No matching items for this request': 'この支援要請に適合する物資はありません',
'No matching records found.': '一致するレコードがありませんでした。',
'No messages in the system': 'システム上にメッセージが存在しません',
'No notes available': '追加情報はありません',
'No peers currently registered': '現在登録されているデータ同期先はありません',
'No pending registrations found': '処理保留中の登録申請はありません',
'No pending registrations matching the query': '検索に合致する処理保留登録申請がありません。',
'No person record found for current user.': '現在のユーザの人物情報レコードが見つかりませんでした。',
'No positions currently registered': '登録されているpositionがありません',
'No problem group defined yet': '定義済みの問題グループがありません。',
'No records matching the query': '条件に当てはまるレコードが存在しません',
'No records to delete': '削除するレコードがありません',
'No recovery reports available': '利用可能な遺体回収レポートはありません',
'No report available.': '利用可能なレポートはありません。',
'No reports available.': '利用可能なレポートがありません。',
'No reports currently available': '利用可能なレポートはありません',
'No requests found': '支援要請は見つかりませんでした',
'No resources currently registered': 'リソースはまだ登録されていません',
'No resources currently reported': 'レポート済みのリソースはありません',
'No service profile available': '利用可能なサービスプロファイルはありません',
'No skills currently set': 'スキルが登録されていません',
'No status information available': '状況に関する情報はありません',
'No synchronization': '同期なし',
'No tasks currently registered': 'タスクはまだ登録されていません',
'No template found!': 'テンプレートが見つかりません。',
'No units currently registered': '単位はまだ登録されていません',
'No volunteer information registered': 'ボランティア情報はまだ登録されていません',
'No': 'いいえ',
'Non-structural Hazards': 'その他の災害',
'None (no such record)': 'なし(記録がありません)',
'None': 'なし',
'Noodles': '麺',
'Normal food sources disrupted': '普段の食料供給源が混乱している',
'Normal': '通常どおり',
'Not Applicable': '該当なし',
'Not Authorised!': '認証されていません',
'Not Possible': '対応不可',
'Not Set': '設定されていません',
'Not Authorized': '認証されていません',
'Not installed or incorrectly configured.': 'インストールされていないか、適切な設定がされていません',
'Not yet a Member of any Group': 'メンバシップはまだ登録されていません',
'Note Details': '追加情報の詳細',
'Note Status': '状態を記録',
'Note Type': '追加情報の種類',
'Note added': '追加情報を追加しました',
'Note deleted': '追加情報を削除しました',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead': '注意:このリストは、活動中のボランティアのみ表示しています。システムに登録しているすべての人をみるには、ホーム・スクリーンから検索してください。',
'Note updated': '追加情報を更新しました',
'Note': '追加情報',
'Notes': '追加情報',
'Notice to Airmen': 'NOTAM (航空従事者用)',
'Number of Columns': '列数',
'Number of Patients': '患者数',
'Number of Rows': '行数',
'Number of Vehicles': '車両数',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'この施設において、今後24時間以内に利用可能になると予測されている、このタイプの追加ベッド数。',
'Number of alternative places for studying': '授業用に確保できる場所の数',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'このタイプの利用可能/空きベッド数(報告時点)',
'Number of deaths during the past 24 hours.': '過去24時間以内の死亡者数',
'Number of discharged patients during the past 24 hours.': '退院患者数(過去24時間以内)',
'Number of doctors actively working': '現在活動中の医師の数',
'Number of doctors': '医者の人数',
'Number of houses damaged, but usable': '破損しているが利用可能な家屋の数',
'Number of houses destroyed/uninhabitable': '全壊/居住不可になった家屋数',
'Number of in-patients at the time of reporting.': 'レポート時の患者数です。',
'Number of latrines': 'トイレ総数',
'Number of midwives actively working': '現在活動中の助産師の数',
'Number of newly admitted patients during the past 24 hours.': '入院患者数(過去24時間以内)',
'Number of non-medical staff': '医療従事以外のスタッフ数',
'Number of nurses actively working': '現在活動中の看護師の数',
'Number of nurses': '看護師の人数',
'Number of private schools': '私立学校の数',
'Number of public schools': '公立学校の数',
'Number of religious schools': '宗教学校の数',
'Number of residential units not habitable': '住めなくなった住居の数',
'Number of residential units': '居住施設の数',
'Number of schools damaged but usable': '破損しているが利用可能な校舎の数',
'Number of schools destroyed/uninhabitable': '全壊 / 利用不可能な校舎の数',
'Number of schools open before disaster': '災害前に開校していた学校数',
'Number of schools open now': '現在開校している学校の数',
'Number of teachers affected by disaster': '被災した教師の数',
'Number of teachers before disaster': '災害発生前の教師の数',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '病院に設置されている、現在利用可能なベッドの数。日時レポートにより、自動的に更新されます。',
'Number of vacant/available units to which victims can be transported immediately.': '現在利用可能なユニット数。犠牲者を即座に安置できる数。',
'Number or Label on the identification tag this person is wearing (if any).': 'この人物の衣服につけられているタグの番号、あるいはラベル名(ある場合のみ).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'この場所をあとで検索するための番号かコード 例: フラグ番号、グリッドの位置、サイトの参照番号など',
'Number': '番号',
'Number/Percentage of affected population that is Female & Aged 0-5': '女性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 13-17': '女性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 18-25': '女性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 26-60': '女性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 6-12': '女性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Female & Aged 61+': '女性(61歳以上)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 0-5': '男性(0-5歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 13-17': '男性(13-17歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 18-25': '男性(18-25歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 26-60': '男性(26-60歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 6-12': '男性(6-12歳)の被災者数 / 割合',
'Number/Percentage of affected population that is Male & Aged 61+': '男性(61歳以上)の被災者数 / 割合',
'Numbers Only': '数値のみ',
'Nursery Beds': '看護ベッド',
'Nutrition problems': '栄養問題',
'Nutrition': '食料・栄養',
'OR Reason': '手術室の詳細',
'OR Status Reason': '手術室の状態理由',
'OR Status': '手術室の状態',
'Observer': 'オブザーバ',
'Obsolete': '廃止済み',
'Obstetrics/Gynecology': '産婦人科',
'Office Address': 'オフィスの住所',
'Office Details': 'オフィスの詳細',
'Office added': 'オフィスを追加しました',
'Office deleted': 'オフィスを削除しました',
'Office updated': 'オフィスを更新しました',
'Office': 'オフィス',
'Offices': 'オフィス',
'Offline Sync (from USB/File Backup)': 'データのオフライン同期(USB/バックアップファイル利用)',
'Offline Sync': 'データのオフライン同期',
'Old': '古い',
'Older people as primary caregivers of children': '子供の介護を、高齢者が担当',
'Older people in care homes': '介護施設で生活する高齢者がいる',
'Older people participating in coping activities': '高齢者が災害対応に従事',
'Older people with chronical illnesses': '慢性疾患をもつ高齢者がいる',
'Older person (>60 yrs)': '高齢者(60歳以上)',
'On by default? (only applicable to Overlays)': 'デフォルトでオン(オーバーレイにのみ有効)',
'On by default?': 'デフォルトでON?',
'One Time Cost': '1回毎の費用',
'One time cost': '一回毎の費用',
'One-time costs': '一回毎の費用',
'One-time': '1回毎',
'Oops! Something went wrong...': '申し訳ありません、何か問題が発生しています。',
'Oops! something went wrong on our side.': '申し訳ありません、システム側に問題が発生しています。',
'Opacity (1 for opaque, 0 for fully-transparent)': '不透明度(1は不透明、0は完全に透明)',
'Open Assessment': '未解決のアセスメント',
'Open Map': '地図を開く',
'Open area': '空き地',
'Open recent': '最近使用したものを開く',
'Open': '開く',
'OpenStreetMap Editor': 'OpenStreetMap エディタ',
'Operating Rooms': '手術室',
'Optional link to an Incident which this Assessment was triggered by.': 'このアセスメントの端緒となった事故へのオプション・リンク',
'Optional': '任意',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'オプション。GeoServerでは、ワークスペース名前空間のURIです。WFS getCapabilitiesでは、FeatureType名のコロンの前の部分です。',
'Options': 'オプション',
'Organization Details': '団体の詳細',
'Organization Registry': '団体情報の登録',
'Organization added': '団体を追加しました',
'Organization deleted': '団体を削除しました',
'Organization updated': '団体を更新しました',
'Organization': '団体',
'Organizations': '団体',
'Origin of the separated children': '離別した子供たちの出身地',
'Origin': '出身地',
'Other (describe)': 'その他 (要記述)',
'Other (specify)': 'その他(具体的に)',
'Other Evidence': 'その他の証跡',
'Other Faucet/Piped Water': 'その他 蛇口/パイプによる水源',
'Other Isolation': 'その他の孤立',
'Other Name': 'その他の名前',
'Other activities of boys 13-17yrs before disaster': 'その他、災害発生前の13-17歳男子の活動状況',
'Other activities of boys 13-17yrs': 'その他、13-17歳男子の活動状況',
'Other activities of boys <12yrs before disaster': 'その他、災害発生前の12歳以下男子の活動状況',
'Other activities of boys <12yrs': 'その他、12歳以下男子の活動状況',
'Other activities of girls 13-17yrs before disaster': 'その他、災害発生前の13-17歳女子の活動状況',
'Other activities of girls 13-17yrs': 'その他、13-17歳女子の活動状況',
'Other activities of girls<12yrs before disaster': 'その他、災害発生前の12歳以下女子の活動状況',
'Other activities of girls<12yrs': 'その他、12歳以下女子の活動状況',
'Other alternative infant nutrition in use': 'その他、使用されている乳児用代替食',
'Other alternative places for study': 'その他、授業開設に利用可能な施設',
'Other assistance needed': 'その他に必要な援助活動',
'Other assistance, Rank': 'その他の援助、ランク',
'Other current health problems, adults': 'その他の健康問題(成人)',
'Other current health problems, children': 'その他の健康問題(小児)',
'Other events': '他のイベント',
'Other factors affecting school attendance': 'その他、生徒の就学に影響する要因',
'Other major expenses': 'その他の主な支出',
'Other non-food items': '食料以外の救援物資',
'Other recommendations': '他の推薦',
'Other residential': '住宅その他',
'Other school assistance received': 'その他の学校用品を受領した',
'Other school assistance, details': '受領した学校用品の内訳',
'Other school assistance, source': 'その他の学校用品の送付元',
'Other side dishes in stock': '在庫のあるその他食材',
'Other types of water storage containers': 'それ以外の水貯蔵容器タイプ',
'Other ways to obtain food': 'それ以外の食料調達方法',
'Other': 'その他',
'Outbound Mail settings are configured in models/000_config.py.': '送信メール設定は、models/000_config.py で定義されています。',
'Outbox': '送信箱',
'Outgoing SMS Handler': 'SMS 送信ハンドラ',
'Outgoing SMS handler': 'SMS送信ハンドラ',
'Overall Hazards': 'すべての危険',
'Overhead falling hazard': '頭上落下物の危険',
'Overland Flow Flood': '陸上の洪水流量',
'Overlays': 'オーバーレイ',
'Owned Records': '自身のレコード',
'Owned Resources': '保持しているリソース',
'PDAM': '水道会社(PDAM)',
'PIN number ': 'PIN 番号',
'PIN': '暗証番号',
'PL Women': 'PL 女性',
'Pack': 'パック',
'Packs': 'パック',
'Pan Map: keep the left mouse button pressed and drag the map': 'マップをパン: マウスの左ボタンを押したまま、地図をドラッグしてください',
'Parameters': 'パラメータ',
'Parapets, ornamentation': '欄干、オーナメント',
'Parent Office': '親組織のオフィス',
'Parent needs to be of the correct level': '適切なレベルの親属性を指定してください',
'Parent needs to be set for locations of level': 'ロケーションのレベルには親属性が必要です',
'Parent needs to be set': '親情報が設定される必要があります',
'Parent': '親',
'Parents/Caregivers missing children': '親/介護者とはぐれた子供たち',
'Partial': '一部 / 不足',
'Participant': '参加者',
'Pashto': 'パシュトー語',
'Passport': 'パスポート',
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'Password for authentication at the peer. HTTPベーシック認証のみサポートしています。',
'Password': 'パスワード',
'Path': 'パス',
'Pathology': '病理学',
'Patients': '患者数',
'Pediatric ICU': '小児ICU',
'Pediatric Psychiatric': '小児精神科',
'Pediatrics': '小児科医',
'Peer Details': 'データ同期先の詳細',
'Peer Registration Details': 'データ同期先登録の詳細',
'Peer Registration Request': 'データ同期先の登録要求',
'Peer Registration': 'データ同期先登録',
'Peer Type': '同期先タイプ',
'Peer UID': '同期先UID',
'Peer added': 'データ同期先を追加しました',
'Peer deleted': 'データ同期先を削除しました',
'Peer not allowed to push': '同期先がデータのプッシュを許可していません',
'Peer registration request added': 'データ同期先の登録要求を追加しました',
'Peer registration request deleted': 'データ同期先の登録要求を削除しました',
'Peer registration request updated': 'データ同期先の登録要求を更新しました',
'Peer updated': '同期先を更新しました',
'Peer': 'データ同期先',
'Peers': '同期先',
'Pending Requests': '保留中の支援要請',
'Pending': '中断',
'People Needing Food': '食料不足',
'People Needing Shelter': '避難所が必要',
'People Needing Water': '水が必要',
'People Trapped': '救難者',
'People with chronical illnesses': '慢性疾患をもつ成人がいる',
'People': '人物情報',
'Person 1': '人物 1',
'Person 1, Person 2 are the potentially duplicate records': '人物情報1と人物情報2は重複したレコードの可能性があります。',
'Person 2': '人物 2',
'Person Data': '人物データ',
'Person De-duplicator': '人物情報の重複削除',
'Person Details': '人物情報の詳細',
'Person Finder': '消息情報',
'Person Registry': '人物情報の登録',
'Person added to Group': 'グループメンバを追加しました',
'Person added to Team': 'グループメンバを追加しました',
'Person added': '人物情報を追加しました',
'Person deleted': '人物情報を削除しました',
'Person details updated': '人物情報を更新しました',
'Person interviewed': 'インタビュー担当者',
'Person missing': '行方不明中',
'Person must be specified!': '登録がありません',
'Person reporting': 'レポート報告者',
'Person who has actually seen the person/group.': '人物/グループで実際に目撃された人物情報',
'Person who is reporting about the presence.': 'この所在報告を行った人物です。',
'Person who observed the presence (if different from reporter).': '人物の所在を確認したひとの情報(報告者と異なる場合のみ記入)。',
'Person': '人物情報',
'Person/Group': '人物/グループ',
'Personal Data': '個人情報',
'Personal Effects Details': '個人の影響の詳細',
'Personal Effects': '所持品',
'Personal impact of disaster': 'この人物の被災状況',
'Personal': '個人',
'Persons in institutions': '施設居住中の住人',
'Persons with disability (mental)': '障がい者数(精神的障がい者を含む)',
'Persons with disability (physical)': '肉体的な障がい者の数',
'Persons': '人物情報',
'Phone 1': '電話番号',
'Phone 2': '電話番号(予備)',
'Phone': '電話番号',
'Phone/Business': '電話番号/仕事',
'Phone/Emergency': '電話番号/緊急連絡先',
'Phone/Exchange': '電話/とりつぎ',
'Photo Details': '写真の詳細',
'Photo Taken?': '写真撮影済み?',
'Photo added': '写真を追加しました',
'Photo deleted': '写真を削除しました',
'Photo updated': '写真を更新しました',
'Photo': '写真',
'Photograph': '写真',
'Photos': '写真',
'Physical Description': '身体外見の説明',
'Physical Safety': '身体的安全',
'Picture upload and finger print upload facility': '指紋や写真のアップロード機能',
'Picture': '写真',
'Place for solid waste disposal': '廃棄物の処理を行う場所を記載してください',
'Place of Recovery': '遺体回収場所',
'Place on Map': '地図上の場所',
'Places for defecation': 'トイレ',
'Places the children have been sent to': '子供たちの避難先',
'Planner': '立案者',
'Playing': '家庭内/外で遊ぶ',
'Please correct all errors.': 'すべてのエラーを修正してください。',
'Please enter a First Name': '苗字を入力してください',
'Please enter a valid email address': '有効な電子メールアドレスを入力してください。',
'Please enter the first few letters of the Person/Group for the autocomplete.': '自動入力するには人物あるいはグループの最初の数文字を入力してください',
'Please enter the recipient': '受取担当者を入力してください',
'Please fill this!': 'ここに入力してください',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': '言及先のURLを明示し、期待する結果と実際に発生した結果を記述してください。不具合チケットが発行された場合は、そのチケットIDも記載してください。',
'Please report here where you are:': 'いまあなたが居る場所を入力してください。',
'Please select another level': '別のレベルを選択してください',
'Please select': '選んでください',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '携帯電話番号でサインアップし、Sahanaからのテキストメッセージを受け取れるようにします。国際電話コードまで含めた形式で入力してください',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '病気の治療に当たって問題となる事象の詳細を記載します。状況を改善するための提案も、もしあれば記載してください。',
'Please use this field to record any additional information, including a history of the record if it is updated.': '追加情報はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Please use this field to record any additional information, including any Special Needs.': '特別な要求など、どんな追加情報でも構いませんので、この部分に記録してください',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'UshahidiのインスタンスIDなど、追加情報がある場合はこの項目に記載してください。レコードの変更履歴などにも利用可能です。',
'Pledge Aid to match these Requests': 'これらの要求に一致する支援に寄付する',
'Pledge Aid': '寄付する',
'Pledge Status': '寄付のステータス',
'Pledge Support': '寄付サポート',
'Pledge': '寄付',
'Pledged': '寄付済み',
'Pledges': '寄付',
'Point': 'ポイント',
'Poisoning': '中毒',
'Poisonous Gas': '有毒ガス',
'Police': '警察',
'Pollution and other environmental': '汚染、あるいはその他の環境要因',
'Polygon reference of the rating unit': 'その評価単位への参照ポリゴン',
'Polygon': 'ポリゴン',
'Population and number of households': '人口と世帯数',
'Population': '利用者数',
'Porridge': 'おかゆ',
'Port Closure': '港湾閉鎖',
'Port': 'ポート',
'Position Details': 'ポジションの詳細',
'Position added': 'Position を追加しました',
'Position deleted': 'ポジションを削除しました',
'Position type': '場所のタイプ',
'Position updated': 'ポジションを更新しました',
'Positions': 'ポジション',
'Postcode': '郵便番号',
'Poultry restocking, Rank': '家禽の補充、ランク',
'Poultry': '家禽(ニワトリ)',
'Pounds': 'ポンド',
'Power Failure': '停電',
'Pre-cast connections': 'プレキャスト連結',
'Preferred Name': '呼び名',
'Pregnant women': '妊婦の数',
'Preliminary': '予備',
'Presence Condition': '所在情報',
'Presence Log': '所在履歴',
'Presence': '所在',
'Previous View': '前を表示',
'Previous': '前へ',
'Primary Name': '基本名',
'Primary Occupancy': '主要な従事者',
'Priority Level': '優先度レベル',
'Priority': '優先度',
'Private': '企業',
'Problem Administration': '問題管理',
'Problem Details': '問題の詳細',
'Problem Group': '問題グループ',
'Problem Title': '問題の名称',
'Problem added': '問題を追加しました',
'Problem connecting to twitter.com - please refresh': 'twitter.comに接続できません。更新ボタンを押してください',
'Problem deleted': '問題を削除しました',
'Problem updated': '問題を更新しました',
'Problem': '問題',
'Problems': '問題',
'Procedure': '手続き',
'Procurements': '物資の調達',
'Product Description': '製品の説明',
'Product Name': '製品名',
'Profile': 'プロファイル',
'Project Activities': 'プロジェクト活動状況',
'Project Details': 'プロジェクトの詳細',
'Project Management': 'プロジェクト管理',
'Project Status': 'プロジェクトのステータス',
'Project Tracking': 'プロジェクト追跡',
'Project added': 'プロジェクトを追加しました',
'Project deleted': 'プロジェクトを削除しました',
'Project has no Lat/Lon': 'プロジェクトの緯度/経度情報はありません',
'Project updated': 'プロジェクトを更新しました',
'Project': 'プロジェクト',
'Projection Details': '地図投影法の詳細',
'Projection added': '地図投影法を追加しました',
'Projection deleted': '地図投影法を削除しました',
'Projection updated': '地図投影法を更新しました',
'Projection': '地図投影法',
'Projections': '地図投影法',
'Projects': 'プロジェクト',
'Property reference in the council system': '評議システムで使用されるプロパティリファレンス',
'Protected resource': '保護されたリソース',
'Protection': '被災者保護',
'Provide Metadata for your media files': 'メディアファイルにメタデータを提供',
'Provide a password': 'パスワードを入力',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '建物全体か損傷箇所のスケッチを提供し、損傷箇所を明示してください。',
'Province': '都道府県',
'Proxy-server': 'プロキシサーバ',
'Psychiatrics/Adult': '精神病/成人',
'Psychiatrics/Pediatric': '精神病/小児',
'Public Event': '公開イベント',
'Public and private transportation': '公共および民営の交通機関',
'Public assembly': '公会堂',
'Public': '公開',
'Pull tickets from external feed': '外部フィードからのticketの取得',
'Punjabi': 'パンジャブ',
'Push tickets to external system': '外部システムにチケットの発信',
'Put a choice in the box': '箱の中から選んで取る',
'Pyroclastic Flow': '火砕流',
'Pyroclastic Surge': '火砕サージ',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'PythonでPython Serial moduleが利用できません。モデムの有効化に必要です。',
'Python needs the ReportLab module installed for PDF export': '実行中のPythonでReportLabモジュールが利用できません。PDF出力に必要です。',
'Quantity Committed': '引き受けた量',
'Quantity Fulfilled': '十分な量がある',
'Quantity in Transit': '運送中の数量',
'Quantity': '数量',
'Quarantine': '隔離施設',
'Queries': 'クエリ',
'Query Feature': '問合せ機能',
'Query': 'クエリ',
'Queryable?': '検索可能?',
'RC frame with masonry infill': '鉄骨入りコンクリートブロック',
'RECORD A': 'レコード A',
'RECORD B': 'レコード B',
'RESPONSE': '対応',
'Race': '人種',
'Radiological Hazard': '放射能災害',
'Radiology': '放射線科',
'Railway Accident': '鉄道事故',
'Railway Hijacking': '鉄道ハイジャック',
'Rain Fall': '降雨',
'Rapid Assessment Details': '被災地の現況アセスメントの詳細',
'Rapid Assessment added': '被災地の現況アセスメントを追加しました',
'Rapid Assessment deleted': '被災地の現況アセスメントを削除しました',
'Rapid Assessment updated': '被災地の現況アセスメントを更新しました',
'Rapid Assessment': '被災地の現況アセスメント',
'Rapid Assessments & Flexible Impact Assessments': '被災地の現況アセスメントと、災害影響範囲アセスメント',
'Rapid Assessments': '被災地の現況アセスメント',
'Rapid Close Lead': '急いで閉め、先導してください。',
'Rapid Data Entry': 'データ入力簡易版',
'Rating Scale': '評価尺度',
'Raw Database access': 'データベースへの直接アクセス',
'Read-Only': '読み込み専用',
'Read-only': '登録内容の編集を禁止',
'Real World Arbitrary Units': '実在の任意単位',
'Receive Items': '物資を受領',
'Receive Shipment': '輸送を受け取る',
'Receive this shipment?': 'この物資送付を受領しますか?',
'Receive': '物資受領',
'Received By': '物資受領責任者',
'Received Item Details': '配送済み物資の詳細',
'Received Item deleted': '受領した物資を削除しました',
'Received Item updated': '受領された物資を更新しました',
'Received Shipment Details': '受け取った輸送の詳細',
'Received Shipment canceled and items removed from Inventory': '受領した輸送をキャンセルしました。物資は備蓄から削除されます',
'Received Shipment canceled': '受け取った輸送をキャンセルしました',
'Received Shipment updated': '受領済みの配送物の情報が更新されました',
'Received Shipments': '受諾した輸送物資',
'Received': '受領済み',
'Receiving and Sending Items': '送付 / 受領した救援物資',
'Recipient': '受け取り担当者',
'Recipients': '受信者',
'Recommendations for Repair and Reconstruction or Demolition': '再築や取り壊し、修繕を推奨',
'Record %(id)s created': 'レコード %(id)s が作成されました',
'Record Created': '作成されたレコード',
'Record Details': 'レコードの詳細',
'Record ID': 'レコードID',
'Record Saved': 'レコードが保存されました',
'Record added': 'レコードを追加しました',
'Record any restriction on use or entry': '利用や入力に当たっての制限事項を記載',
'Record deleted': 'レコードを削除しました',
'Record last updated': '最近更新されたレコード',
'Record not found!': 'レコードが見つかりませんでした',
'Record updated': 'レコードを更新しました',
'Record': 'レコード',
'Recording and Assigning Assets': '物資の割り当てと記録',
'Records': 'レコード',
'Recovery Request added': '遺体の回収要請を追加しました',
'Recovery Request deleted': '遺体回収要請を削除しました',
'Recovery Request updated': '遺体回収要請を更新しました',
'Recovery Request': '遺体回収の要請',
'Recovery Requests': '遺体回収要請',
'Recovery report added': '遺体回収レポートを追加しました',
'Recovery report deleted': '遺体回収レポートを削除しました',
'Recovery report updated': '遺体回収レポートを更新しました',
'Recovery': '遺体回収',
'Recruitment': '人材募集',
'Recurring Cost': '経常費用',
'Recurring cost': '経常費用',
'Recurring costs': '経常費用',
'Recurring': '採用活動',
'Red': '赤',
'Reference Document': '関連文書',
'Region Location': '地域のロケーション',
'Regional': '国際支部',
'Register Person into this Shelter': 'この避難所に人物情報を登録',
'Register Person': '人物情報を登録',
'Register them as a volunteer': 'ボランティアとして登録',
'Register': '登録',
'Registered People': '登録した人物情報',
'Registered users can': '登録済みのユーザは',
'Registering ad-hoc volunteers willing to contribute': '貢献を希望する臨時ボランティアを登録',
'Registration Details': '登録情報詳細',
'Registration Disabled!': '現在アカウント登録は受け付けていません。',
'Registration added': '登録を追加しました',
'Registration entry deleted': '登録を削除しました',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '登録はまだ承認されていません (承認者:(%s)) -- 確認メールが届くまでもうしばらくお待ちください。',
'Registration key': '登録key',
'Registration successful': '登録に成功しました',
'Registration updated': '登録を更新しました',
'Registration': '登録',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '地域内で活動する全ての支援団体を追跡し、情報を保持します。これにより、各団体が活動している地域の情報だけでなく、それぞれの地域でどのような活動が行われているかも掌握することができます。',
'Rehabilitation/Long Term Care': 'リハビリ/長期介護',
'Reinforced masonry': 'コンクリートブロック壁',
'Rejected': '拒否されました',
'Reliable access to sanitation/hygiene items': 'サニタリ / 衛生用品の安定供給がある',
'Relief Item Catalog': '救援物資カタログ',
'Relief Item': '救援物資',
'Relief Items': '救援物資',
'Relief Team': '救援チーム',
'Relief': '救援',
'Religion': '宗教',
'Religious Leader': '宗教指導者',
'Religious': '宗教',
'Relocate as instructed in the <instruction>': '<instruction>の内容に従って再配置',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'Featureの削除: 削除したいfeatureを選択し、削除キーを押下してください',
'Remove Person from Group': 'メンバシップを削除',
'Remove Person from Team': 'メンバシップを削除',
'Remove': '削除',
'Removed from Group': 'メンバシップを削除しました',
'Removed from Team': 'メンバシップを削除しました',
'Repeat your password': 'パスワードをもう一度入力してください',
'Replace if Master': 'マスターなら置換',
'Replace if Newer': '新しいものがあれば置き換える',
'Replace': '置換',
'Report Another Assessment...': '別のアセスメントをレポートする',
'Report Details': 'レポートの詳細',
'Report Resource': 'レポートリソース',
'Report Type': 'レポートタイプ',
'Report Types Include': 'レポートタイプを含む',
'Report a Problem with the Software': 'ソフトウェアの不具合を報告',
'Report added': 'レポートを追加しました',
'Report deleted': 'レポートを削除しました',
'Report my location': '自分の現在地を報告',
'Report that person missing': '行方不明者の情報を報告',
'Report the contributing factors for the current EMS status.': '現在の緊急受け入れ状態に影響している事由を記載',
'Report the contributing factors for the current OR status.': '現在の手術室の状況報告',
'Report the person as found': '人物の所在情報を報告',
'Report them as found': '発見として報告',
'Report them missing': '行方不明として報告',
'Report updated': 'レポートを更新しました',
'Report': 'レポート',
'Reporter Name': 'レポーターの氏名',
'Reporter': 'レポーター',
'Reporting on the projects in the region': 'この地域で展開しているプロジェクトのレポート',
'Reports': 'レポート',
'Request Added': '支援要請を追加しました',
'Request Canceled': '支援要請をキャンセルしました',
'Request Details': '支援要請の詳細',
'Request Item Details': '救援物資要請の詳細',
'Request Item added': '救援物資の要請を追加しました',
'Request Item deleted': '救援物資の要請を削除しました',
'Request Item updated': '救援物資の要請を更新しました',
'Request Item': '物資を要請',
'Request Items': '物資の要請',
'Request Status': '支援要請の状況',
'Request Type': '支援要請のタイプ',
'Request Updated': '支援要請を更新しました',
'Request added': '支援要請を追加しました',
'Request deleted': '支援要請を削除しました',
'Request for Role Upgrade': '上位権限の取得要求',
'Request updated': '支援要請を更新しました',
'Request': '支援要請',
'Request, Response & Session': '要求、応答、およびセッション',
'Requested By Site': '支援要請を行ったサイト',
'Requested By Warehouse': '倉庫からの要請',
'Requested By': '支援要求元',
'Requested Items': '支援要請が行われた物資',
'Requested by': '要求元',
'Requested on': 'に関する要請',
'Requested': '要求済み',
'Requester': '要請の実施者',
'Requestor': '要請者',
'Requests From': '支援要請フォーム',
'Requests for Item': '物資に関する要請',
'Requests': '支援要請',
'Requires Login!': 'ログインしてください。',
'Requires login': 'ログインが必要です',
'Rescue and recovery': '救出、あるいは遺体回収作業',
'Reset Password': 'パスワードのリセット',
'Reset form': 'フォームをクリア',
'Reset': 'リセット',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Featureのリサイズ: リサイズしたいfeatureを選択し、適切なサイズになるようドラッグしてください',
'Resolve Conflict': '競合の解決',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '"解決"リンクでは、新しい画面を開き、重複している情報を解決してデータベースを更新します',
'Resolve': '解決済みか',
'Resource Details': 'リソースの詳細',
'Resource added': 'リソースを追加しました',
'Resource deleted': 'リソースを削除しました',
'Resource updated': 'リソースを更新しました',
'Resource': 'リソース',
'Resources': 'リソース',
'Respiratory Infections': '呼吸器感染症',
'Response Details': '応答の詳細',
'Response added': '返答を追加しました',
'Response deleted': 'Responseを削除しました',
'Response updated': '返答を更新しました',
'Response': '対応',
'Responses': '対応',
'Restricted Access': 'アクセス制限中',
'Restricted Use': '制限された目的での使用',
'Restrictions': '制限',
'Results': '結果',
'Retail Crime': '小売犯罪',
'Retrieve Password': 'パスワードの取得',
'Rice': '米穀',
'Riot': '暴動',
'River Details': '河川の詳細',
'River added': '河川を追加しました',
'River deleted': '河川を削除しました',
'River updated': '河川を更新しました',
'River': '河川',
'Rivers': '河川',
'Road Accident': '道路障害',
'Road Closed': '道路(通行止め)',
'Road Conditions': '路面の状況',
'Road Delay': '道路遅延',
'Road Hijacking': '道路ハイジャック',
'Road Usage Condition': '道路の路面状況',
'Role Details': '権限の詳細',
'Role Name': '権限の名称',
'Role Required': '権限が必要',
'Role Updated': '権限を更新しました',
'Role added': '権限を追加しました',
'Role deleted': '権限を削除しました',
'Role updated': '権限を更新しました',
'Role': '権限',
'Role-based': '権限に基づいた',
'Roles Permitted': '許可された権限',
'Roles': '権限',
'Roof tile': '屋根瓦',
'Roofs, floors (vertical load)': '屋根、床板 (vertical load)',
'Roster': '名簿',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': '地物の回転: 回転させたい地物を選択し、目的の位置に回転させるために関連付けられた点をドラッグします。',
'Row Choices (One Per Line)': '行の選択 (One Per Line)',
'Rows in table': 'テーブルの行',
'Rows selected': '行が選択されました',
'Run Functional Tests': '動作テストの実行',
'Run Interval': '実行間隔',
'Running Cost': 'ランニングコスト',
'SITUATION': '状況',
'Safe environment for vulnerable groups': '被災者にとって安全な環境である',
'Safety Assessment Form': '安全性アセスメントフォーム',
'Safety of children and women affected by disaster': '被災した女性と未成年が保護されている',
'Sahana Administrator': 'Sahana管理者',
'Sahana Blue': 'Sahana ブルー',
'Sahana Community Chat': 'Sahanaコミュニティチャット',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=> その他 (Sahana Agasti, Ushahidi 等.)',
'Sahana Eden <=> Other': 'Sahana Eden <=> 他のシステム',
'Sahana Eden Disaster Management Platform': 'Sahana Eden 被災地支援情報共有プラットフォーム',
'Sahana Eden Website': 'Sahana Eden公式ページ',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organizations working in disaster management.': 'Sahana Edenは、災害復旧に関わる様々な支援団体が、お互いに協力しあうために存在します。',
'Sahana FOSS Disaster Management System': 'Sahana オープンソース 被災地情報共有システム',
'Sahana Green': 'Sahana グリーン',
'Sahana Login Approval Pending': 'Sahana ログインは承認待ちです',
'Sahana access granted': 'Sahanaへのアクセス権を付与',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana: 新しい支援要請が行われました。ログインして、支援要請を実現できるか確認してください。',
'Salted Fish': '塩漬けの魚',
'Salvage material usable from destroyed houses': '全壊した家屋から回収した物品(使用可能)',
'Salvage material usable from destroyed schools': '全壊した校舎から回収した物品(使用可能)',
'Sanitation problems': '衛生設備に問題',
'Satellite Office': '現地活動拠点',
'Satellite': '衛星',
'Saturday': '土曜日',
'Save any Changes in the one you wish to keep': '残す方の候補地へ行った変更を保存します。',
'Save': '保存',
'Save: Default Lat, Lon & Zoom for the Viewport': 'デフォルト表示範囲の緯度,経度,ズームレベルを保存',
'Saved.': '保存しました',
'Saving...': '保存しています...',
'Scale of Results': '結果の規模',
'Schedule': 'スケジュール',
'School Closure': '学校閉鎖',
'School Lockdown': '学校の厳重封鎖',
'School Reports': '学校のレポート',
'School Teacher': '学校教師',
'School activities': '学校の活動',
'School assistance received/expected': '学校用支援品を受領済み/受領予定',
'School assistance': '学校の援助',
'School attendance': '学校へ出席者',
'School destroyed': '校舎全壊',
'School heavily damaged': '校舎の深刻な損壊',
'School tents received': '仮校舎用テントを受領',
'School tents, source': '仮校舎用テント、送付元',
'School used for other purpose': '校舎を他目的で利用中',
'School': '学校',
'School/studying': '学校/勉強',
'Schools': '学校',
'Search & List Bin Types': 'Bin Typeを検索して一覧表示',
'Search & List Bins': 'Binsを検索して一覧表示',
'Search & List Catalog': 'カタログを検索して一覧表示',
'Search & List Category': 'カテゴリを検索して一覧表示',
'Search & List Items': '救援物資を検索して一覧表示',
'Search & List Locations': 'ロケーションを検索して一覧表示',
'Search & List Site': 'Siteを検索して一覧表示',
'Search & List Sub-Category': 'サブカテゴリを検索して一覧表示',
'Search & List Unit': '単位を検索して一覧表示',
'Search Activities': '支援活動の検索',
'Search Activity Report': '支援活動レポートの検索',
'Search Addresses': '住所を検索',
'Search Aid Requests': '援助要請を検索',
'Search Alternative Items': 'その他のアイテムを検索',
'Search Assessment Summaries': 'アセスメントの要約を検索',
'Search Assessments': 'アセスメントを検索',
'Search Asset Assignments': '資産割り当ての検索',
'Search Assets': '資産の検索',
'Search Baseline Type': 'Baseline Typeを検索',
'Search Baselines': '基準値の検索',
'Search Brands': '銘柄を検索',
'Search Budgets': '予算を検索',
'Search Bundles': 'Bundleを検索',
'Search Catalog Items': '救援物資カタログを検索',
'Search Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog関係の検索',
'Search Checklists': 'チェックリストを検索',
'Search Cluster Subsectors': 'クラスタのサブセクタを検索',
'Search Clusters': 'クラスタを検索',
'Search Commitment Items': 'コミットされた救援物資の検索',
'Search Commitments': 'コミットの検索',
'Search Configs': '設定を検索',
'Search Contact Information': '連絡先情報を検索',
'Search Contacts': '連絡先を検索',
'Search Credentials': '証明書の検索',
'Search Distribution Items': '配給物資を検索',
'Search Distributions': '配給所を検索',
'Search Documents': 'ドキュメントを検索',
'Search Donors': '資金提供組織の検索',
'Search Existing Locations': '既存のロケーションを検索する',
'Search Feature Layers': 'Feature Layersの検索',
'Search Flood Reports': '洪水レポートの検索',
'Search Geonames': 'Geonamesの検索',
'Search Groups': 'グループの検索',
'Search Hospitals': '病院情報の検索',
'Search Identity': 'ID情報の検索',
'Search Images': '画像の検索',
'Search Impact Type': '被害の種類を検索',
'Search Impacts': '影響の検索',
'Search Incident Reports': 'インシデントレポートを検索',
'Search Incidents': 'インシデントの検索',
'Search Inventory Items': '備蓄物資を検索',
'Search Inventory Stores': '物資集積地点の検索',
'Search Item Catalog Category(s)': 'アイテムカタログカテゴリの検索',
'Search Item Catalog(s)': '救援物資カタログの検索',
'Search Item Categories': '救援物資カテゴリを検索',
'Search Item Packs': '物資のパックを検索',
'Search Item Sub-Category(s)': 'アイテムサブカテゴリの検索',
'Search Items': 'アイテムの検索',
'Search Keys': 'Keyの検索',
'Search Kits': 'Kitsの検索',
'Search Layers': 'レイヤの検索',
'Search Level 1 Assessments': 'レベル1アセスメントの検索',
'Search Level 2 Assessments': 'レベル2のアセスメントを検索',
'Search Locations': 'ロケーションの検索',
'Search Log Entry': 'ログエントリの検索',
'Search Map Profiles': '地図設定の検索',
'Search Markers': 'マーカーの検索',
'Search Members': 'メンバーの検索',
'Search Membership': 'メンバシップの検索',
'Search Memberships': 'メンバシップの検索',
'Search Metadata': 'メタデータの検索',
'Search Need Type': '需要タイプの検索',
'Search Needs': '必要な物資を検索',
'Search Notes': '追加情報を検索',
'Search Offices': 'オフィスの検索',
'Search Organizations': '団体の検索',
'Search Peer': '同期先を検索',
'Search Peers': 'データ同期先を検索',
'Search Personal Effects': 'Personal Effectsの検索',
'Search Persons': '人物情報の検索',
'Search Photos': '写真の検索',
'Search Positions': 'Positionsの検索',
'Search Problems': '問題の検索',
'Search Projections': '地図投影法の検索',
'Search Projects': 'プロジェクトの検索',
'Search Rapid Assessments': '被災地の現況アセスメントを検索',
'Search Received Items': '受領済み救援物資の検索',
'Search Received Shipments': '受信済みの出荷の検索',
'Search Records': 'レコードの検索',
'Search Recovery Reports': '遺体回収レポートを検索',
'Search Registations': '登録情報の検索',
'Search Registration Request': '登録要請を検索',
'Search Report': 'レポートの検索',
'Search Reports': 'レポートの検索',
'Search Request Items': '物資の要請を検索',
'Search Request': '支援要請の検索',
'Search Requested Items': '支援要請されている物資を検索',
'Search Requests': '支援要請の検索',
'Search Resources': 'リソースの検索',
'Search Responses': '検索の応答',
'Search Rivers': '河川を検索',
'Search Roles': '役割の検索',
'Search Sections': 'セクションの検索',
'Search Sectors': '活動分野を検索',
'Search Sent Items': '送付した物資を検索',
'Search Sent Shipments': '送信した出荷の検索',
'Search Service Profiles': 'サービスプロファイルの検索',
'Search Settings': '設定の検索',
'Search Shelter Services': '避難所での提供サービスを検索',
'Search Shelter Types': '避難所タイプの検索',
'Search Shelters': '避難所の検索',
'Search Shipment Transit Logs': '輸送履歴の検索',
'Search Shipment/Way Bills': '輸送費/渡航費の検索',
'Search Shipment<>Item Relation': '輸送と救援物資の関係性の検索',
'Search Site(s)': 'Siteの検索',
'Search Skill Types': 'スキルタイプの検索',
'Search Skills': 'スキルを検索',
'Search Solutions': '解決案の検索',
'Search Staff Types': 'スタッフタイプの検索',
'Search Staff': 'スタッフの検索',
'Search Status': '状態の検索',
'Search Storage Bin Type(s)': 'Storage Bin Typeの検索',
'Search Storage Bin(s)': 'Storage Bin(s)の検索',
'Search Storage Location(s)': '備蓄地点の検索',
'Search Subscriptions': '寄付申し込みを検索',
'Search Support Requests': '支援要求の検索',
'Search Tasks': 'タスクの検索',
'Search Teams': 'チームの検索',
'Search Themes': 'テーマの検索',
'Search Tickets': 'チケットの検索',
'Search Tracks': '追跡情報の検索',
'Search Twitter Tags': 'Twitterのタグを検索',
'Search Units': '単位の検索',
'Search Users': 'ユーザの検索',
'Search Volunteer Registrations': 'ボランティア登録の検索',
'Search Volunteers': 'ボランティアの検索',
'Search Warehouse Items': '倉庫の物資を検索',
'Search Warehouses': 'Warehousesの検索',
'Search and Edit Group': 'グループを検索して編集',
'Search and Edit Individual': '人物情報を検索して個別に編集',
'Search by ID Tag': 'IDタグで検索',
'Search for Items': '物資の検索',
'Search for a Hospital': '病院を探す',
'Search for a Location': '検索地域を指定します',
'Search for a Person': '人物を探す',
'Search for a Project': 'プロジェクトを探す',
'Search for a Request': '支援要請の検索',
'Search for a shipment received between these dates': 'ある期間内に受け取られた輸送を検索する',
'Search for an item by category.': 'カテゴリで物資を検索',
'Search for an item by text.': 'テキストで項目を検索',
'Search here for a person record in order to:': '人物情報を検索することで、以下の事柄を行うことができます。',
'Search messages': 'メッセージの検索',
'Search': '検索',
'Searching for different groups and individuals': '他のグループと個人を探す',
'Secondary Server (Optional)': 'セカンダリサーバ(オプション)',
'Seconds must be a number between 0 and 60': '秒には0-60の間の数字を記入してください',
'Seconds must be a number greater than 0 and less than 60': '秒は0から60の間で入力してください',
'Section Details': 'Sectionの詳細',
'Section deleted': 'Sectionを削除しました',
'Section updated': 'セクションを更新しました',
'Sections': 'セクション',
'Sector Details': '活動分野の詳細',
'Sector added': '活動分野を追加しました',
'Sector deleted': '活動分野を削除しました',
'Sector updated': '活動分野を更新しました',
'Sector': '活動分野',
'Sectors': '活動分野',
'Security Policy': 'セキュリティポリシー',
'Security Status': 'セキュリティステータス',
'Security problems': 'セキュリティーの問題',
'See unassigned recovery requests': 'まだ割り当てられていない遺体回収要請を見る',
'Seen': '発見情報あり',
'Select 2 potential locations from the dropdowns.': '候補地を2つ、ドロップダウンから選択します。',
'Select Items from the Request': '支援要請を基にアイテムを選択する',
'Select Items from this Inventory': '備蓄中の物資から選択',
'Select Language': '言語選択',
'Select Organization': '団体の選択',
'Select Photos': '写真の選択',
'Select a location': 'ロケーションを選択',
'Select a question from the list': 'リストから質問を選択してください',
'Select a range for the number of total beds': 'ベッド総数の範囲を選択',
'Select all that apply': '該当する項目を全て選択',
'Select an Organization to see a list of offices': '団体を選択すると、所属するオフィスが表示されます',
'Select an existing Location': '既に登録してあるロケーションを選択してください',
'Select the Cluster Layers for Assessments and Activities to analyse the Gaps:': 'アセスメントと支援活動のギャップを解析するクラスタの層を選択:',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'オーバーレイを指定し、適切なアセスメントと支援活動を表示させてニーズを明確にします。',
'Select the person assigned to this role for this project.': 'この人物に、プロジェクト内の権限を担当させます。',
'Select the person associated with this scenario.': 'このタスクに関連する人物を選択してください。',
'Select to see a list of subdivisions.': '項目を選択すると、より細かい分類を選択できます。',
'Select to show this configuration in the Regions menu.': '範囲メニューで表示する構成を選択して下さい',
'Select': '選択',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'SMS送信時に、モデム、Tropoまたはゲートウェイのどちらを使用するかを選択',
'Selects whether to use the gateway or the Modem for sending out SMS': 'SMS送信時、モデムとゲートウェイのどちらを使用するか選択',
'Self Registration': '本人による登録',
'Self-registration': '本人による登録',
'Send Alerts using Email &/or SMS': '電子メールまたはSMSを使用してアラートを送信',
'Send Items': '物資を送付',
'Send Mail': 'メール送信',
'Send Message': 'メッセージを送る',
'Send Notification': '通知を送信',
'Send Shipment': '輸送を開始する',
'Send from %s': '依頼主( %s )',
'Send message': 'メッセージ送信',
'Send new message': '新規メッセージ送信',
'Send': '物資送付',
'Sends & Receives Alerts via Email & SMS': '電子メール/SMS 経由でアラート送信/受信',
'Senior (50+)': '高齢者 (50+)',
'Sensitivity': '感度',
'Sent Item Details': '送付した物資の詳細',
'Sent Item deleted': '輸送済み物資を削除しました',
'Sent Item updated': '送付した救援物資を更新しました',
'Sent Shipment Details': '送付物資の詳細',
'Sent Shipment canceled and items returned to Inventory': '送付処理した輸送がキャンセルされ、物資は倉庫に戻りました',
'Sent Shipment canceled': '輸送開始をキャンセルしました',
'Sent Shipment updated': '送信した物資が更新されました',
'Sent Shipments': '物資を送付しました',
'Sent': '送信',
'Separate latrines for women and men': 'トイレは男女別である',
'Separated children, caregiving arrangements': '親と離れた子供だちのための保育手配',
'Seraiki': 'セライキ',
'Serial Number': 'シリアルナンバー',
'Series': 'シリーズ',
'Server': 'サーバ',
'Service Catalog': 'サービスカタログ',
'Service or Facility': 'サービス、または施設',
'Service profile added': 'サービスプロファイルを追加しました',
'Service profile deleted': 'サービスプロファイルを削除しました',
'Service profile updated': 'サービスプロファイルを更新しました',
'Service': 'サービス',
'Services Available': '利用可能なサービス',
'Services': 'サービス',
'Setting Details': '設定の詳細',
'Setting added': '設定を追加しました',
'Setting deleted': '設定を削除しました',
'Setting updated': '設定を更新しました',
'Settings updated': '設定を更新しました',
'Settings were reset because authenticating with Twitter failed': 'Twitterの認証に失敗したため、設定をクリアします',
'Settings': '設定',
'Severe': '深刻',
'Severity': '深刻度',
'Severity:': '深刻度:',
'Share a common Marker (unless over-ridden at the Feature level)': 'マーカーの共有 (機能レイヤで上書きされない限り)',
'Shelter & Essential NFIs': '避難所/生活用品',
'Shelter Details': '避難所の詳細',
'Shelter Name': '避難所名称',
'Shelter Registry': '避難所登録',
'Shelter Service Details': '避難所サービスの詳細',
'Shelter Service added': '避難所サービスを追加しました',
'Shelter Service deleted': '避難所サービスを削除しました',
'Shelter Service updated': '避難所サービスを更新しました',
'Shelter Service': '避難所サービス',
'Shelter Services': '避難所サービス',
'Shelter Type Details': '避難所タイプの詳細',
'Shelter Type added': '避難所タイプを追加しました',
'Shelter Type deleted': '避難所タイプを削除しました',
'Shelter Type updated': '避難所サービスを更新しました',
'Shelter Type': '避難所タイプ',
'Shelter Types and Services': '避難所のタイプとサービス',
'Shelter Types': '避難所タイプ',
'Shelter added': '避難所を追加しました',
'Shelter deleted': '避難所を削除しました',
'Shelter updated': '避難所を更新しました',
'Shelter': '避難所',
'Shelter/NFI Assistance': '避難所 / 生活用品支援',
'Shelter/NFI assistance received/expected': '避難所 / 生活必需品の支援を受領済み、あるいは受領予定',
'Shelters': '避難所',
'Shipment Created': '輸送が作成されました',
'Shipment Details': '輸送の詳細',
'Shipment Items received by Inventory': '物資備蓄地点から送付された救援物資',
'Shipment Items sent from Inventory': '備蓄物資から輸送を行いました',
'Shipment Items': '救援物資の輸送',
'Shipment Transit Log Details': '輸送履歴の詳細',
'Shipment Transit Log added': '輸送履歴を追加しました',
'Shipment Transit Log deleted': '輸送履歴を削除しました',
'Shipment Transit Log updated': '輸送履歴を更新しました',
'Shipment Transit Logs': '輸送履歴',
'Shipment/Way Bill added': '輸送/移動費を追加しました',
'Shipment/Way Bills Details': '輸送/移動費の詳細',
'Shipment/Way Bills deleted': '輸送/移動費を削除しました',
'Shipment/Way Bills updated': '輸送/移動費を更新しました',
'Shipment/Way Bills': '輸送/移動費',
'Shipment<>Item Relation added': '輸送<>物資間の関係を追加しました',
'Shipment<>Item Relation deleted': '輸送<>アイテム間の関係を削除しました',
'Shipment<>Item Relation updated': '輸送<>物資間の関係を更新しました',
'Shipment<>Item Relations Details': '輸送<>物資間の関係詳細',
'Shipment<>Item Relations': '輸送<>物資間の関係',
'Shipments To': '輸送先',
'Shipments': '輸送',
'Shooting': '銃撃',
'Short Assessment': '簡易評価',
'Short Description': '概要',
'Show Checklist': 'チェックリストを表示',
'Show Details': '詳細を表示',
'Show Map': '地図の表示',
'Show Region in Menu?': '地域をメニューで表示しますか?',
'Show on map': '地図上に表示',
'Sign-up as a volunteer': 'ボランティアとして登録する',
'Sign-up for Account': 'アカウント登録',
'Sign-up succesful - you should hear from us soon!': '登録できました。すぐに連絡が送られます。',
'Sindhi': 'シンド語',
'Site Address': 'サイトの住所',
'Site Administration': 'このサイト自体の管理',
'Site Description': 'サイトの説明',
'Site Details': 'Siteの詳細',
'Site ID': 'サイトID',
'Site Location Description': 'サイト ロケーションの説明',
'Site Location Name': 'サイトロケーション名',
'Site Manager': 'Site 管理者',
'Site Name': 'Site の名前',
'Site added': 'サイトを追加しました',
'Site deleted': 'サイトを削除しました',
'Site updated': 'サイトを更新しました',
'Site': 'サイト',
'Site/Warehouse': 'サイト/倉庫',
'Sites': 'サイト',
'Situation Awareness & Geospatial Analysis': '広域情報の取得や、地理情報の分析を行ないます',
'Sketch': 'スケッチ',
'Skill Details': 'スキルの詳細',
'Skill Status': 'スキル状況',
'Skill Type Details': 'スキルタイプの詳細',
'Skill Type added': 'スキルタイプを追加しました',
'Skill Type deleted': 'スキルタイプを削除しました',
'Skill Type updated': 'スキルタイプを更新しました',
'Skill Types': 'スキルタイプ',
'Skill added': 'スキルを追加しました',
'Skill deleted': 'スキルを削除しました',
'Skill updated': 'スキルを更新しました',
'Skill': 'スキル',
'Skills': 'スキル',
'Slope failure, debris': '斜面崩壊・崩壊堆積物',
'Small Trade': '小規模取引',
'Smoke': '煙',
'Snapshot Report': 'スナップショットレポート',
'Snapshot': 'スナップショット',
'Snow Fall': '降雪',
'Snow Squall': '豪雪',
'Soil bulging, liquefaction': '土壌隆起・液状化',
'Solid waste': '固形廃棄物',
'Solution Details': '解決案の詳細',
'Solution Item': '解決案項目',
'Solution added': '解決案を追加しました',
'Solution deleted': '解決案を削除しました',
'Solution updated': '解決案を更新しました',
'Solution': '解決案',
'Solutions': '解決案',
'Some': '散見',
'Sorry - the server has a problem, please try again later.': 'すみません、サーバーに問題が発生しています。時間を置いてやり直してください。',
'Sorry that location appears to be outside the area of the Parent.': 'このロケーションは親属性のエリアの外に表示されます。',
'Sorry that location appears to be outside the area supported by this deployment.': 'すいません、この位置は、このデプロイメントでサポートされている領域の外です。',
'Sorry, I could not understand your request': '残念ながら、リクエストが理解できませんでした。',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '申し訳ありませんが、 MapAdmin 権限を持つユーザだけがロケーションのグループを作れます',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '申し訳ありませんが、ロケーションの編集を行うにはMapAdmin権限を持ったユーザである必要があります。',
'Sorry, something went wrong.': 'すいません、何か問題が発生しています。',
'Sorry, that page is forbidden for some reason.': 'すいません、都合により、このページは閲覧禁止です。',
'Sorry, that service is temporary unavailable.': 'すいません、このサービスは一時的に利用不可となっています。',
'Sorry, there are no addresses to display': 'すいません、表示する住所がありません',
'Source ID': '情報元ID',
'Source Time': '情報ソース入手時刻',
'Source Type': '情報ソース種別',
'Source': '情報元',
'Sources of income': '収入源',
'Space Debris': '宇宙廃棄物',
'Spanish': 'スペイン語',
'Special Ice': '特別な氷',
'Special Marine': '特別海上',
'Special needs': '特別な要求',
'Specialized Hospital': '専門病院',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'ある人々やグループが見られるロケーションの中の特別な場所 (建物、部屋等)',
'Specific Location': '特定のロケーション',
'Specific locations need to have a parent of level': 'ロケーションを指定するには、そのロケーションの親属性指定が必要です',
'Specify a descriptive title for the image.': '画像の説明として一言タイトルをつけてください。',
'Specify the bed type of this unit.': 'この施設にある寝具の種別を指定してください',
'Specify the minimum sustainability in weeks or days.': '最短で何週間、あるいは何日以内に枯渇の可能性があるかを記載してください',
'Specify the number of available sets': '利用可能なセットの個数を入力してください',
'Specify the number of available units (adult doses)': '(成人が使用するとして)使用可能な個数を入力してください',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': '使用可能な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Specify the number of sets needed per 24h': '24時間ごとに必要なセットの数を指定する',
'Specify the number of units (adult doses) needed per 24h': '(成人が使用するとして)24時間ごとに必要な個数を入力してください',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': '24時間ごとに必要な乳酸リンゲル液あるいは同等品のリッター数を入力してください',
'Spherical Mercator?': '球面メルカトル?',
'Spreadsheet Importer': 'スプレッドシートの取り込み',
'Spreadsheet uploaded': 'スプレッドシートがアップロードされました',
'Spring': '湧き水',
'Squall': 'スコール',
'Staff 2': 'スタッフ 2',
'Staff Details': 'スタッフの詳細',
'Staff Type Details': 'スタッフタイプの詳細',
'Staff Type added': 'スタッフタイプを追加しました',
'Staff Type deleted': 'スタッフタイプを削除しました',
'Staff Type updated': 'スタッフタイプを更新しました',
'Staff Types': 'スタッフ分類',
'Staff added': 'スタッフを追加しました',
'Staff deleted': 'スタッフを削除しました',
'Staff present and caring for residents': '上記施設にスタッフが配置され、ケアを行っている',
'Staff updated': 'スタッフを更新しました',
'Staff': 'スタッフ',
'Staffing': 'スタッフ配備',
'Stairs': '階段',
'Start date and end date should have valid date values': '開始日と終了日は正しい値である必要があります',
'Start date': '開始日',
'Start of Period': '開始期間',
'Stationery': '文房具',
'Status Report': 'ステータスレポート',
'Status added': '状況が追加されました',
'Status deleted': 'ステータスを削除しました',
'Status of clinical operation of the facility.': '施設で行われている診療の状況を記載してください。',
'Status of general operation of the facility.': '施設の運用状況情報を記載してください。',
'Status of morgue capacity.': '死体安置所の収容状況です。',
'Status of operations of the emergency department of this hospital.': 'この病院の緊急手術室の状態です。',
'Status of security procedures/access restrictions in the hospital.': '病院のアクセス制限/セキュリティ手順の状態。',
'Status of the operating rooms of this hospital.': 'この病院の手術室の状態。',
'Status updated': '状況を更新しました',
'Status': 'ステータス',
'Steel frame': '鉄骨',
'Storage Bin Details': '物資保管場所の詳細',
'Storage Bin Number': 'Storage Bin番号',
'Storage Bin Type Details': '物資保管タイプの詳細',
'Storage Bin Type added': '物資保管タイプを追加しました',
'Storage Bin Type deleted': 'Storage Binタイプを削除しました',
'Storage Bin Type updated': 'Storage Binタイプを更新しました',
'Storage Bin Type': 'Storage Binタイプ',
'Storage Bin Types': '収納箱のタイプ',
'Storage Bin added': 'Storage Binを追加しました',
'Storage Bin deleted': 'Storage Bin を削除しました',
'Storage Bin updated': 'Storage Bin を更新しました',
'Storage Bin': '物資貯蔵容器',
'Storage Bins': '物資保管場所',
'Storage Location Details': '備蓄地点の詳細',
'Storage Location ID': '備蓄地点ID',
'Storage Location Name': '備蓄地点名称',
'Storage Location added': '備蓄地点を追加しました',
'Storage Location deleted': '備蓄地点を削除しました',
'Storage Location updated': '備蓄地点を更新しました',
'Storage Location': '備蓄地点',
'Storage Locations': '備蓄地点',
'Store spreadsheets in the Eden database': 'Edenのデータベースにスプレッドシートを格納',
'Storeys at and above ground level': '階層、あるいは地面より上部',
'Storm Force Wind': '嵐の風の強さ',
'Storm Surge': '高潮',
'Stowaway': '密航者',
'Street (continued)': '住所 (続き)',
'Street Address': '住所',
'Street': 'ストリート',
'Strong Wind': '強風',
'Structural Hazards': '構造破壊',
'Structural': '構造的な',
'Sub Category': 'サブカテゴリ',
'Sub-type': 'サブタイプ',
'Subject': '件名',
'Submission successful - please wait': '送信に成功しました。しばらくお待ちください',
'Submission successful - please wait...': '送信に成功しました。しばらくお待ちください',
'Submit New (full form)': '(完全なフォームで)新しく投稿する',
'Submit New (triage)': '新しい (トリアージ) を追加',
'Submit New': '新規登録',
'Submit a request for recovery': '遺体回収要請を作成する',
'Submit new Level 1 assessment (full form)': 'レベル1のアセスメントを投稿する(完全なフォーム)',
'Submit new Level 1 assessment (triage)': '新しいレベル1アセスメント(トリアージ)を追加',
'Submit new Level 2 assessment': '新しいレベル2アセスメントの登録',
'Submit': '送信',
'Subscription Details': '寄付申し込みの詳細',
'Subscription added': '寄付申し込みを追加しました',
'Subscription deleted': '寄付申し込みを削除しました',
'Subscription updated': '寄付申し込みを更新しました',
'Subscriptions': '寄付申し込み',
'Subsistence Cost': '生存コスト',
'Suburb': '郊外',
'Sufficient care/assistance for chronically ill': '慢性疾患罹患者への十分なケア / 介護がある',
'Suggest not changing this field unless you know what you are doing.': 'よくわからない場合は、この項目を変更しないでください。',
'Summary by Administration Level': '管理レベルの概要',
'Summary': '要約',
'Sunday': '日曜',
'Supervisor': '管理権限を追加',
'Supplies': '支給品',
'Support Request': '支援要請',
'Support Requests': '支援の要請',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '危機管理の専門グループの助言を取り入れることで、救援活動の優先順位を作成しやすくします。',
'Sure you want to delete this object?': 'このオブジェクトを削除してもよろしいですか?',
'Surgery': '外科',
'Survey Answer Details': '調査回答詳細',
'Survey Answer added': '調査の回答を追加しました',
'Survey Answer deleted': '調査の回答を削除しました',
'Survey Answer updated': '調査回答を更新しました',
'Survey Answer': '調査回答',
'Survey Module': '調査モジュール',
'Survey Name': 'Survey 名',
'Survey Question Details': '調査項目の詳細',
'Survey Question Display Name': 'フィードバックの質問の表示名',
'Survey Question added': '調査の質問を追加しました',
'Survey Question deleted': '調査の質問を削除しました',
'Survey Question updated': 'Survey Questionを更新しました',
'Survey Question': '調査の質問',
'Survey Section Details': 'フィードバック項目の詳細',
'Survey Section Display Name': '調査項目の表示名',
'Survey Section added': '調査項目を追加しました',
'Survey Section deleted': 'フィードバック項目を削除しました',
'Survey Section updated': 'サーベイセクションを更新しました',
'Survey Section': '調査項目',
'Survey Series Details': 'Survey Seriesの詳細',
'Survey Series Name': 'フィードバックシリーズ名',
'Survey Series added': '一連の調査を追加しました',
'Survey Series deleted': '一連の調査を削除しました',
'Survey Series updated': '連続調査を更新しました',
'Survey Series': '一連の調査',
'Survey Template Details': '調査テンプレートの詳細',
'Survey Template added': 'Surveyテンプレートを追加しました',
'Survey Template deleted': '調査テンプレートを削除しました',
'Survey Template updated': '調査のテンプレートを更新しました',
'Survey Template': '調査テンプレート',
'Survey Templates': '調査のテンプレート',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': '開発時にこのスイッチをONにすることで、CSS/Javascriptファイルの診断を行なえます。',
'Symbology': 'コード',
'Sync Conflicts': 'データ同期中に競合が発生しました',
'Sync History': 'データ同期履歴',
'Sync Now': 'データ同期中',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'データ同期先とは、情報の同期を行うインスタンスやピアのことを指します。(Sahana EdenやSahanaAgasti、Ushahidiなどと同期可能です) 同期先の登録や検索、登録情報の変更を行う際は、リンクをクリックしてページを表示してください。',
'Sync Partners': 'データ同期パートナー',
'Sync Pools': 'プールの同期',
'Sync Schedule': 'データ同期スケジュール',
'Sync Settings': 'データ同期設定',
'Sync process already started on ': 'データ同期プロセスは既に開始しています',
'Synchronisation History': 'データ同期履歴',
'Synchronisation': '同期',
'Synchronization Conflicts': '同期のコンフリクト',
'Synchronization Details': 'データ同期の詳細',
'Synchronization History': 'データ同期履歴',
'Synchronization Peers': 'データ同期先',
'Synchronization Settings': 'データ同期設定',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'データ同期を使用すると、他の端末とデータを共有し、自身のデータを最新の状態に更新することができます。このページには、SahanaEdenにおいてデータ同期を行う方法が記載されています。',
'Synchronization not configured.': 'データ同期が設定されていません',
'Synchronization settings updated': 'データ同期設定を更新しました',
'Synchronization': 'データ同期',
'Syncronisation History': 'データ同期履歴',
'Syncronisation Schedules': 'データ同期スケジュール',
'System allows the General Public to Report Incidents & have these Tracked.': 'システムを使うことで、一般市民によるインシデントの報告、および報告されたインシデントの追跡を行うことができます。',
'System allows the tracking & discovery of Items stored in Locations.': 'システムにより、物資がどこで保持されているかを追跡、明確化することができます。',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': 'このシステムは、支援団体、個々の支援者、政府職員、そして避難所に移動した人々の間で、援助の需要と供給の調整を図るための、オンラインの中央データベースです。このシステムを用いて、利用可能な資源を、需要を満たすように、有効かつ効率的に割り当てることができます。',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': 'この仕組みでは、災害地域の全てのボランティア情報を提供します。ボランティアの活動場所に加え、そこで提供する支援内容も提供します。',
'TMS Layers': 'TMSレイヤ',
'Table name': 'テーブル名',
'Tags': 'タグ',
'Take shelter in place or per <instruction>': '場所や<instruction>ごとに避難してください',
'Task Details': 'タスクの詳細',
'Task List': 'タスク一覧',
'Task Status': 'タスクの状況',
'Task added': 'タスクを追加しました',
'Task deleted': 'タスクを削除しました',
'Task status': 'タスク状況',
'Task updated': 'タスクを更新しました',
'Tasks': 'タスク',
'Team Description': 'チーム概要',
'Team Details': 'チームの詳細',
'Team Head': 'チーム代表者',
'Team Id': 'チームID',
'Team Leader': 'チームリーダー',
'Team Member added': 'チームメンバーを追加しました',
'Team Members': 'チームメンバー',
'Team Name': 'チーム名',
'Team Type': 'チームタイプ',
'Team added': 'チームを追加しました',
'Team deleted': 'チームを削除しました',
'Team updated': 'チームを更新しました',
'Team': 'チーム',
'Teams': 'チーム',
'Technical testing only, all recipients disregard': '技術検証のみで、すべての受取人は無視されます',
'Telecommunications': '通信・情報',
'Telephone': '電話',
'Telephony': '電話',
'Temp folder %s not writable - unable to apply theme!': '一時フォルダ%sが書き込み不可になっています。テーマを適用できません。',
'Template file %s not readable - unable to apply theme!': 'テンプレートファイル %s が読み込み不可になっています。テーマを適用できません。',
'Templates': 'テンプレート',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '国内における第五段階管理部門を示す用語(例: 郵便番号の下位部分)。このレベルは通常使われません。',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '国内で第4の行政区域を示す用語 (例えば村、地区)',
'Term for the primary within-country administrative division (e.g. State or Province).': '国内で最大の行政区域を示す用語 (例えば州や都道府県)',
'Term for the secondary within-country administrative division (e.g. District).': '国内で二番目の管理部門の用語 (例: 区)',
'Term for the third-level within-country administrative division (e.g. City or Town).': '国内で三番目の管理部門を示す用語 (例: 市や町)',
'Term for the top-level administrative division (typically Country).': '最上位の統制区域を示す用語 (一般的には国)',
'Territorial Authority': '地方機関',
'Terrorism': 'テロリズム',
'Tertiary Server (Optional)': '三番目のサーバ(オプション)',
'Test Results': 'テスト結果',
'Text Color for Text blocks': 'テキストブロックのテキスト色',
'Text before each Text Field (One per line)': 'テキストフィールドの前のテキスト (一行に一つ)',
'Text in Message': 'メッセージのテキスト',
'Text in Message: ': 'メッセージのテキスト: ',
'Text': 'テキスト',
'Thanks for your assistance': 'ご協力ありがとうございます',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': '"query"は"db.table1.field1==\'value\'"のような条件です。SQL JOINの"db.table1.field1 == db.table2.field2"結果のようなものです。',
'The Area which this Site is located within.': 'このサイトが含まれる地域',
'The Assessments module allows field workers to send in assessments. 2 different options are provided here currently:': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。現在は、2種類のオプションが提供されています。',
'The Assessments module allows field workers to send in assessments.': 'アセスメントモジュールは、被災現場で活動する人々による現状の査定報告を記録することができます。',
'The Author of this Document (optional)': 'この文書の作成者氏名(オプション)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'ビルアセスメントモジュールではビルの安全性評価を行います (例:地震の後など)',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物/グループの現在地は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The District for this Report.': 'このレポートが関連する地区。',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '承認依頼が送信されるメールアドレス(通常は個人のメールアドレスではなく、グループのメールアドレス)。この欄が空白の場合、ドメインが一致すれば依頼は自動的に承認されます',
'The Group whose members can edit data in this record.': 'このグループのメンバーは、レコード上のデータを修正することができます。',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '一般ユーザは、インシデント・レポートシステムからインシデントを報告し、その結果を表示させることができます。',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': 'Siteのロケーション、(レポート用で)おおまかな場合と、(地図表示用で)正確な場合とがあります。',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物がやって来たロケーションで、報告のためのだいたいの場所、あるいは地図で表示するための正確な緯度経度です。使用可能なロケーションを検索するには最初の数文字を入力してください',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'その人物が向かう場所は報告用の概要レベルの情報あるいは地図上の表示のため正確な情報いずれの場合もあります。場所名の数文字を入力すると、登録済みの場所から検索できます。',
'The Media Library provides a catalog of digital media.': 'メディア・ライブラリーは、デジタル・メディアの一覧を提供します。',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'メッセージング・モジュールは、SAHANAシステムのコミュニケーション中心となります。災害の前、災害中または災害の後に様々なグループや個人にSMSとeメールで警報やメッセージを送ります。',
'The Office this record is associated with.': 'このレコードに関連するオフィス',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '団体情報を登録することで、被災地域で活動するすべての団体の活動を追跡します。また、それぞれの地域において、彼らがどこで活動しているかという情報だけでなく、彼らが各地で提供しているプロジェクトの範囲についての情報も提供します。',
'The Organization this record is associated with.': 'このレコードに関連する団体',
'The Organization which is funding this Activity.': 'この支援活動に資金を提供する団体',
'The Person currently filling this Role.': '現在この役割に属している人物',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'プロジェクト追跡モジュールでは、支援活動(アクティビティ)を作成し、必要な物資 / サービスのギャップを満たすことを目的とします。',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': '被災地の現況アセスメントには、専門団体によって行われたレポートの結果が格納されます。',
'The Request this record is associated with.': 'このレコードに関連する支援要請',
'The Requests Management System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': '支援要請管理システムは、全ての支援団体、救援者、政府職員、および避難所に暮らす避難者たち自身が、要求に応じて援助の供給を調整できる中央のオンラインデータベースです。支援要請管理システムは効果的かつ効率的に要求を満たすことができる利用可能な資源の割り当てを可能にします。',
'The Role this person plays within this Office/Project.': 'オフィス/プロジェクトにおける役割',
'The Role this person plays within this hospital.': '病院内における役割',
'The Role to which this Role reports.': 'この権限の報告先となる権限',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所登録は、避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'The Shelter this Request is from (optional).': '要請を行った避難所(オプション)',
'The Shelter this person is checking into.': 'この人物がチェックインした避難所',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': '地図を用いてレイヤを利用できる WMS サービスの GetCapabilities の URL。',
'The URL of your web gateway without the post parameters': 'ポストパラメータを指定しないWebゲートウェイのURL',
'The URL to access the service.': 'サービスにアクセスするためのURL',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '政府UUID|政府がこの施設に割り当てている汎用一意識別子(UUID)。',
'The area is ': 'この地域は',
'The attribute within the KML which is used for the title of popups.': 'このKML属性はポップアップのタイトルに使われます。',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'KMLで定義されている属性はポップアップの本文に使用されます。(各属性ごとに半角スペースで分割して記載してください)',
'The body height (crown to heel) in cm.': '頭頂からかかとまでの身長(単位はcm)',
'The category of the Item.': 'この救援物資のカテゴリです',
'The contact person for this organization.': '団体の代表窓口',
'The country the person usually lives in.': 'この人物が普段の生活を営む国',
'The default policy for data import from this peer.': 'このデータ同期先からデータをインポートする際のデフォルト設定。',
'The descriptive name of the peer.': 'データ同期先のわかりやすい名称',
'The duplicate record will be deleted': '重複したレコードは削除されます',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': '入力した単位をこのユニットにリンクします。例えば、mをメートルとする場合、(存在するなら) kilometer を選択して、乗数に値 0.001 を入力します。',
'The first or only name of the person (mandatory).': '人物の苗字(必須)。 外国籍の方等については避難所等での管理上の主たる表記/順に従ってください。',
'The following modules are available': '利用可能なモジュールは以下のとおりです。',
'The hospital this record is associated with.': 'このレコードに関連のある病院。',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': 'ある特定のプロジェクトや、人々、市町村への物資または、交付コード等のついた特定区域への寄付等のは物資は、送付されることになっています。',
'The language to use for notifications.': '通知に使用する言語',
'The language you wish the site to be displayed in.': 'このサイトを表示するための言語',
'The last known location of the missing person before disappearance.': '行方不明者が最後に目撃された場所',
'The length is ': '長さは',
'The list of Brands are maintained by the Administrators.': '銘柄一覧の整備は、管理者によって可能です',
'The list of Item categories are maintained by the Administrators.': '供給物資カテゴリの一覧は、管理者によってメンテナンスされています。',
'The name to be used when calling for or directly addressing the person (optional).': '電話をかける際など、直接連絡をとりたい場合に使われる名前(オプション)',
'The next screen will allow you to detail the number of people here & their needs.': '次の画面では、人数および必要な物資/サービスの詳細を確認できます。',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': '次のスクリーンで、項目の詳細なリストと量を入力できる場合があります。',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '元の物資一つと同じだけの、代替品の測定単位での数量',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '表示している地図の周辺タイルをダウンロードする数。0は最初のページの読み込みがより早い事を意味し、数字を大きくすると視点をパンした際に表示がより早くなります。',
'The person at the location who is reporting this incident (optional)': '現地からこのインシデントを報告した人物(オプション)',
'The person reporting about the missing person.': '行方不明者情報の提供者。',
'The person reporting the missing person.': '行方不明者を報告した人',
'The post variable containing the phone number': '電話番号を含む post 変数',
'The post variable on the URL used for sending messages': 'メッセージ送信に使用するURLのPOST変数',
'The post variables other than the ones containing the message and the phone number': 'メッセージや電話番号以外を含むpost変数',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'モデムが接続されているシリアルポート - Linuxでは /dev/ttyUSB0 等、Windowsでは com1, com2 等',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーからの応答がありませんでした。',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '要求を満たすためアクセスしていた別のサーバーから不正な応答が返ってきました。',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': 'シンプルポリシーでは、匿名ユーザーによるデータの閲覧、および、登録ユーザーによる編集が許可されます。完全版ポリシーでは、個々のテーブルやレコードに対して管理権限を設定することができます。詳細はmodels/zzz.pyを参照してください。',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '件名のイベントはこれ以上の脅威や懸案事項を引き起こすことはありません。よって、<instruction>には、今後実施すべきアクションが記述されていません。',
'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'あなたのタイムゾーンとUTCとの差を、東では+HHMMで、西では-HHMMで指定してください',
'The title of the WMS Browser panel in the Tools panel.': '[ツール]パネルのWMS Browserパネルのタイトル',
'The token associated with this application on': 'このアプリケーションが関連づけられているトークン',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': '一意のデータ同期先識別子です。データ同期先がSahana Edenシステムではない場合は、空白にしておくことで自動的に割り当てが行われます。',
'The unique identifier which identifies this instance to other instances.': 'このインスタンスを他のインスタンスと区別するための固有識別子',
'The way in which an item is normally distributed': '物資が配給される際の通常経路',
'The weight in kg.': '重量(単位:kg)',
'The': ' ',
'Theme Details': 'テーマの詳細',
'Theme added': 'テーマを追加しました',
'Theme deleted': 'テーマを削除しました',
'Theme updated': 'テーマを更新しました',
'Theme': 'テーマ',
'Themes': 'テーマ',
'There are errors': 'エラーが発生しました',
'There are multiple records at this location': 'このロケーションに複数のレコードが存在します',
'There are not sufficient items in the Inventory to send this shipment': 'この輸送を開始するために十分な量の物資が備蓄されていません',
'There is no address for this person yet. Add new address.': 'この人物の住所がまだありません。新しい住所を入力してください',
'There was a problem, sorry, please try again later.': '問題が発生しています。すみませんが、時間を置いてからやり直してください。',
'These are settings for Inbound Mail.': '電子メール受信箱の設定です',
'These are the Incident Categories visible to normal End-Users': '普通のユーザーが見ることができるインシデント一覧です',
'These are the default settings for all users. To change settings just for you, click ': 'これらは、全てのユーザーのデフォルト設定です。個人用の設定を変更するには、以下をクリックしてください。',
'These need to be added in Decimal Degrees.': 'これらは、十進角で追加する必要があります。',
'They': 'それら',
'This Group has no Members yet': 'メンバはまだ登録されていません',
'This Team has no Members yet': 'メンバはまだ登録されていません',
'This appears to be a duplicate of ': 'これは、以下のものと重複しているようです。',
'This can either be the postal address or a simpler description (such as `Next to the Fuel Station`).': '住所か、あるいは簡単な記述(ガソリンスタンドの隣、など)を記載しています。',
'This email address is already in use': 'このメールアドレスは使用されています',
'This file already exists on the server as': 'このファイルは別の名前でサーバに既に存在しています : ',
'This form allows the administrator to remove a duplicate location.': '管理者はこのフォームを使うことで、重複したロケーションデータを削除できます。',
'This is the way to transfer data between machines as it maintains referential integrity.': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '参照整合性を保ちつつ、端末間でデータを転送する方法が記載されています。...重複したデータは最初に手動で削除する必要があります。',
'This might be due to a temporary overloading or maintenance of the server.': 'サーバーが一時的に過負荷状態になっているか、あるいはメンテナンスを行っています。',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '過去に行ったデータ同期履歴を表示します。以下のリンクをクリックしてください。',
'This screen allows you to upload a collection of photos to the server.': 'この画面では、複数の画像をサーバーにアップロードすることができます。',
'This shipment has already been received.': '輸送が開始され、物資が受領されました',
'This shipment has already been sent.': '輸送が開始され、送付されました',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'この輸送は受領されていません。 - まだ編集可能であり、キャンセルされてはいません',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': '輸送はまだ開始されていませんが、キャンセルされてはいません。編集可能です。',
'This shipment will be confirmed as received.': 'この輸送された物資は、受信済み扱いになります',
'This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': 'この値はその地点の外側までの距離の小さなマウントを追加します。この値が無い場合は、一番外側の地点が境界ボックスになり、表示されない可能性があります。',
'This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': 'この値はこの地域を表示する時に使う最小の幅と高さを示します。この値がない場合、ある単一の地点を表示するときにその周辺の範囲は表示されません。地図が表示された後では、好きな大きさに拡大・縮小できます。',
'Thunderstorm': '雷雨',
'Thursday': '木曜日',
'Ticket Details': 'チケットの詳細',
'Ticket ID': 'チケットID',
'Ticket added': 'チケットを追加しました',
'Ticket deleted': 'チケットを削除しました',
'Ticket updated': 'チケットを更新しました',
'Ticket': 'チケット',
'Ticketing Module': 'チケット発行モジュール',
'Tickets': 'チケット',
'Tilt-up concrete': 'ティルトアップ式コンクリート',
'Timber frame': '木造',
'Time needed to collect water': '水の確保に必要な時間',
'Time of Request': '要求発生時刻',
'Timeline Report': 'タイムラインレポート',
'Timeline': 'タイムライン',
'Timestamp': 'タイムスタンプ',
'Title': 'タイトル',
'To Location': '送付先ロケーション',
'To Organization': '送付先団体',
'To Person': '送付先人物情報',
'To Site': '送付先サイト',
'To begin the sync process, click the button on the right => ': '右のボタンを押すと、データ同期が開始されます。',
'To begin the sync process, click this button => ': 'このボタンを押すと、データ同期が開始されます。=>',
'To create a personal map configuration, click ': '個人用の地図設定を作成するにはクリックしてください',
'To delete': '削除する側',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'OpenStreetMapを編集する際は、models/000_config.pyで定義されている設定を編集してください',
'To submit a new job, use the': 'jobを新規送信するには、以下を使用してください。',
'To variable': '変数に',
'To': ' ',
'Tools': 'ツール',
'Tornado': '竜巻',
'Total # of Beneficiaries Reached ': '支援が到達した受益者の合計数 ',
'Total # of Target Beneficiaries': '受益対象者の合計人数',
'Total # of households of site visited': '訪問した世帯数',
'Total Beds': '合計ベッド数',
'Total Beneficiaries': '受益者の総数',
'Total Cost per Megabyte': 'メガバイト毎の合計費用',
'Total Cost per Minute': '一分毎の合計費用',
'Total Households': '総世帯数',
'Total Monthly Cost': '月額総計',
'Total Monthly Cost: ': '月毎の費用の合計: ',
'Total Monthly': '月ごとの合計',
'Total One-time Costs': '1回毎の費用総計',
'Total Persons': '合計者数',
'Total Recurring Costs': '経常費用総計',
'Total Unit Cost': '単価合計',
'Total Unit Cost: ': '単価合計: ',
'Total Units': '総数',
'Total gross floor area (square meters)': '延面積(平方メートル)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'この病院のベッド数総計。日時レポートにより、自動的に更新されます。',
'Total number of houses in the area': 'この地域の家屋総数',
'Total number of schools in affected area': '被災地内の学校総数',
'Total population of site visited': '訪問地域の総人口数',
'Total': '合計数',
'Totals for Budget:': '予算の合計:',
'Totals for Bundle:': 'Bundleの合計:',
'Totals for Kit:': 'Kitの合計:',
'Tourist Group': '旅行者グループ',
'Town': '町',
'Traces internally displaced people (IDPs) and their needs': '国内の避難している人(IDP)と彼らの必要としている物資/サービスの追跡',
'Tracing': '履歴の追跡',
'Track Details': '追跡情報の詳細',
'Track deleted': '追跡情報を削除しました',
'Track updated': '追跡情報を更新しました',
'Track uploaded': '追跡情報をアップデートしました',
'Track': '追跡情報',
'Tracking of Projects, Activities and Tasks': 'プロジェクトや支援活動、タスクの追跡',
'Tracking of basic information on the location, facilities and size of the Shelters': '避難所の基本情報(場所、施設、規模等)を追跡',
'Tracks requests for aid and matches them against donors who have pledged aid': '支援要請を管理し、救援物資の提供者とマッチングします。',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': '避難所のロケーション、配置、収容能力と被災者の状態を追跡します。',
'Tracks': 'トラック',
'Traffic Report': 'トラフィックレポート',
'Transfer': '輸送',
'Transit Status': '輸送状態',
'Transit': '移動中の立ち寄り',
'Transit. Status': '輸送状態',
'Transition Effect': '推移への影響',
'Transparent?': '透明ですか?',
'Transportation assistance, Rank': '移動 / 輸送支援、ランク',
'Trauma Center': '心的外傷センター',
'Travel Cost': '移動費',
'Tree': '樹木',
'Tropical Storm': '熱帯低気圧',
'Tropo Messaging Token': 'Tropo メッセージのトークン',
'Tropo Settings': 'Tropo 設定',
'Tropo Voice Token': 'Tropo 音声トークン',
'Tropo settings updated': 'Tropo 設定を更新しました',
'Truck': 'トラック',
'Try checking the URL for errors, maybe it was mistyped.': '入力したURLに間違いがないか確認してください。',
'Try hitting refresh/reload button or trying the URL from the address bar again.': 'ページの再読み込みを行うか、あるいはアドレスバーに直接URLを入力してみてください。',
'Try refreshing the page or hitting the back button on your browser.': 'ページを再読込するか、ブラウザの[戻る]ボタンを押してください。',
'Tsunami': '津波',
'Tuesday': '火曜日',
'Twitter ID or #hashtag': 'Twitter ID あるいは #ハッシュタグ',
'Twitter Settings': 'Twitter設定',
'Type of Construction': '建物の種類',
'Type of cause': '原因のタイプ',
'Type of latrines': 'トイレの種類',
'Type of place for defecation': '排泄用地の種類',
'Type of water source before the disaster': '災害発生前の水の確保方法',
'Type': 'タイプ',
'Types of health services available': '利用可能な健康サービスの種別',
'Types of water storage containers available': '利用可能な水貯蔵容器の種別',
'UID': 'ユニークID',
'UN': '国連',
'UTC Offset': 'UTC(世界標準時刻)との差',
'Unable to parse CSV file!': 'CSVファイルをパースできません。',
'Understaffed': '人員不足',
'Unidentified': '詳細不明',
'Unit Bed Capacity': 'ベッド収容数',
'Unit Cost': '単価',
'Unit Details': '単位の詳細',
'Unit Name': '単位名',
'Unit Set': '単位の設定',
'Unit Short Code for e.g. m for meter.': '単位の略称、例えばメートルはmと表記。',
'Unit added': '単位を追加しました',
'Unit deleted': '単位を削除しました',
'Unit of Measure': '1個口の内訳',
'Unit updated': '単位を更新しました',
'Unit': '単位',
'Units of Measure': '測定単位',
'Units': '単位',
'Unknown Peer': '登録に無いデータ同期先',
'Unknown type of facility': '施設規模不明',
'Unknown': '不明',
'Unreinforced masonry': '補強されていない石造建築物',
'Unresolved Conflicts': '未解決のデータ競合',
'Unsafe': '危険な',
'Unselect to disable the modem': 'モデムを無効化するにはチェックを外す',
'Unsent': '未送信',
'Unsupported data format!': 'サポートされていないデータフォーマットです。',
'Unsupported method!': 'サポートされていないメソッドです。',
'Unsupported method': 'サポートされていないメソッドです',
'Update Activity Report': '支援活動レポートの更新',
'Update Cholera Treatment Capability Information': 'コレラ対策能力情報を更新',
'Update Import Job': 'Import Jobの更新',
'Update Request': '支援要請を更新',
'Update Service Profile': 'サービスプロファイルの更新',
'Update Task Status': 'タスク状況の更新',
'Update Unit': '単位の更新',
'Update if Master': 'マスターサイトなら更新する',
'Update if Newer': '新しいものがあれば更新する',
'Update your current ordered list': '現在の順序付きリストの更新',
'Update': '更新',
'Upload Photos': '写真のアップロード',
'Upload Spreadsheet': 'スプレッドシートのアップロード',
'Upload Track': '追跡情報のアップロード',
'Upload a Spreadsheet': 'スプレッドシートをアップロード',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '画像ファイルをアップロード(bmp,gif,jpeg,png) 最大300x300ピクセル',
'Upload an image file here.': '画像ファイルをここにアップロードしてください',
'Upload an image, such as a photo': '写真などのイメージをアップロードしてください',
'Upload': 'アップロード',
'Urban Fire': '都市火災',
'Urban area': '市街地',
'Urdu': 'ウルドゥー語',
'Urgent': '緊急',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '複雑なクエリを構築するには、ANDは (...)&(...) を、ORは (...)|(...) を、NOTは ~(...) を使用してください。',
'Use default': 'デフォルト値を使用',
'Use these links to download data that is currently in the database.': 'これらのリンクを使用して、現在データベースにあるデータをダウンロードします。',
'Use this space to add a description about the Bin Type.': 'Bin Typeに関する説明は、このスペースに記載してください。',
'Use this space to add a description about the site location.': 'このスペースを使って、サイトの位置の説明を追加してください。',
'Use this space to add a description about the warehouse/site.': '倉庫/Siteに関する説明は、このスペースに記載してください。',
'Use this space to add additional comments and notes about the Site/Warehouse.': 'Site/倉庫に関する追加情報を記載するには、このスペースを使用してください。',
'Used to import data from spreadsheets into the database': 'スプレッドシートからデータベースにデータをインポートするために使われます',
'User %(first_name)s %(last_name)s Approved': '%(first_name)s %(last_name)s のユーザー登録が承認されました',
'User %(id)s Logged-in': 'ユーザー %(id)s がログインしています',
'User %(id)s Logged-out': 'ユーザー %(id)s がログアウトしました',
'User %(id)s Profile updated': 'ユーザ %(id)s のプロファイルを更新しました',
'User %(id)s Registered': 'ユーザー%(id)sを登録しました',
'User Account has been Disabled': 'ユーザアカウントが無効になっています',
'User Details': 'ユーザーの詳細',
'User ID': 'ユーザーID',
'User Management': 'ユーザー管理',
'User Profile': 'ユーザープロファイル',
'User Requests': 'ユーザー要求',
'User Updated': 'ユーザーを更新しました',
'User added': 'ユーザーを追加しました',
'User already has this role': 'この権限のあるユーザー',
'User deleted': 'ユーザーを削除しました',
'User updated': 'ユーザーを更新しました',
'User': 'ユーザー',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': 'データ同期先との認証に使うユーザ名。HTTPベーシック認証のみサポートしています。',
'Username': 'ユーザー名',
'Users removed': 'ユーザーを削除しました',
'Users': 'ユーザー',
'Usual food sources in the area': 'この地域の普段の食料調達方法',
'Utilities': 'ユーティリティ',
'Utility, telecommunication, other non-transport infrastructure': 'ユーティリティ、通信、その他のインフラ設備(交通以外)',
'Vacancies': '欠員',
'Value': '値',
'Various Reporting functionalities': '多種多様な報告を行う機能',
'Vehicle Crime': '車両犯罪',
'Vehicle Types': '車両の種別',
'Vehicle': '車両',
'Vendor': 'ベンダー',
'Verification Email sent - please check your email to validate. If you do not receive this email please check you junk email or spam filters': 'メールアドレス確認用のメールを送信しました。メールに記載された確認用URLにアクセスしてください。もしメールが届かない場合迷惑メールフォルダに入ってしまっている可能性がありますのでご確認ください。',
'Verification Status': '認証ステータス',
'Verified': '認証済み',
'Verified?': '認証(ログイン)できません.メールアドレス・パスワードを確認してください.',
'Verify Password': 'パスワード再確認',
'Verify password': 'パスワードの確認',
'Version': 'バージョン',
'Very High': '非常に高い',
'View Alerts received using either Email or SMS': '電子メールまたはSMSで受信したアラートの閲覧',
'View Fullscreen Map': '地図をフルスクリーン表示',
'View Image': '画像の閲覧',
'View On Map': '地図上で閲覧',
'View Outbox': '送信箱の表示',
'View Picture': '写真の表示',
'View Requests for Aid': '援助要請を閲覧',
'View Settings': '設定の確認',
'View Tickets': 'チケットの閲覧',
'View and/or update their details': '詳細の閲覧および更新',
'View or update the status of a hospital.': '病院のステータスの閲覧と更新',
'View pending requests and pledge support.': '処理中の要求と寄付サポートの閲覧',
'View the hospitals on a map.': '病院の場所を地図上で表示します。',
'Village Leader': '村長',
'Village': '村落',
'Visible?': '表示しますか?',
'Visual Recognition': '画像認識',
'Volcanic Ash Cloud': '火山灰雲',
'Volcanic Event': '火山活動',
'Volume - Fluids': '流量 - 液状物',
'Volume - Solids': '流量 - 固形物',
'Volume Capacity': '容量',
'Volume/Dimensions': '容量/外形寸法',
'Volunteer Data': 'ボランティアデータ',
'Volunteer Details': 'ボランティアの詳細',
'Volunteer Management': 'ボランティアの管理',
'Volunteer Project': 'ボランティアプロジェクト',
'Volunteer Registration': 'ボランティア登録',
'Volunteer Registrations': 'ボランティア登録',
'Volunteer Request': 'ボランティア要請',
'Volunteer added': 'ボランティアを追加しました',
'Volunteer deleted': 'ボランティアを削除しました',
'Volunteer details updated': 'ボランティアの詳細を更新しました',
'Volunteer registration added': 'ボランティア登録を追加しました',
'Volunteer registration deleted': 'ボランティア登録を削除しました',
'Volunteer registration updated': 'ボランティア登録を更新しました',
'Volunteers were notified!': 'ボランティアに通知されました',
'Volunteers': 'ボランティア',
'Vote': '投票',
'Votes': '投票',
'WASH': '除染',
'WMS Browser Name': 'WMSブラウザ名',
'WMS Browser URL': 'WMSブラウザのURL',
'Walking Only': '徒歩のみ',
'Walking time to the health service': '医療サービス提供所までの徒歩時間',
'Wall or other structural damage': '壁やその他の構造の損傷',
'Warehouse Details': '倉庫の詳細',
'Warehouse Item Details': '倉庫物資の詳細',
'Warehouse Item added': '倉庫物資を追加しました',
'Warehouse Item deleted': '倉庫内物資を削除しました',
'Warehouse Item updated': '倉庫物資を更新しました',
'Warehouse Items': '倉庫に備蓄中の物資',
'Warehouse Management': '倉庫管理',
'Warehouse added': '倉庫を追加しました',
'Warehouse deleted': '倉庫を削除しました',
'Warehouse updated': '倉庫を更新しました',
'Warehouse': '倉庫',
'Warehouse/Sites Registry': '倉庫/Siteの登録',
'Warehouses': '倉庫',
'WatSan': '給水と衛生',
'Water Level still high?': '水位はまだ高いままですか?',
'Water Sanitation Hygiene': '水質衛生',
'Water collection': '給水',
'Water gallon': 'ガロン容器',
'Water storage containers available for HH': '世帯用の水貯蔵容器が利用可能である',
'Water storage containers in households': '世帯の水貯蔵容器',
'Water storage containers sufficient per HH': '世帯毎に1つ以上の水貯蔵容器が利用可能である',
'Water supply': '水の供給',
'Water': '水',
'Waterspout': '水上竜巻',
'Way Bill(s)': '移動費',
'We have tried': '私達は試行しました',
'Website': 'ウェブサイト',
'Wednesday': '水曜日',
'Weekly': '週次',
'Weight (kg)': '体重 (kg)',
'Weight': '体重',
'Welcome to the Sahana Eden Disaster Management Platform': 'Sahana Eden -災害情報管理プラットフォームへようこそ',
'Welcome to the Sahana Eden Disaster Management System': 'Sahana Eden -災害情報管理システムへようこそ',
'Welcome to the Sahana Portal at ': 'Sahana ポータルへようこそ: ',
'Welcome to the Sahana Portal at': 'Sahanaポータルにようこそ',
'Well-Known Text': '既知の文章',
'Were basic medical supplies available for health services prior to the disaster?': '災害前に、基本的な医療サービスが機能していたかどうかを記載してください',
'Were breast milk substitutes used prior to the disaster?': '災害前に利用していた母乳代用品の入手源を記載してください',
'Were there cases of malnutrition in this area prior to the disaster?': 'この地域で、災害前に栄養失調が発生していたかどうかを記載してください',
'Were there health services functioning for the community prior to the disaster?': '災害前、共同体でヘルスサービスが機能していたかどうかを記載してください',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': '災害発生前から栄養失調の報告があった、あるいはその証跡があったかどうかを記載します',
'What are the factors affecting school attendance?': '生徒の就学状況に影響する要因を記載してください',
'What are your main sources of cash to restart your business?': 'ビジネス再開に必要な現金の、主な調達源を記載してください',
'What are your main sources of income now?': '現在の主な収入源を記載してください',
'What do you spend most of your income on now?': '現在の主な支出要因を記載してください',
'What food stocks exist? (main dishes)': '備蓄食料の種類(主皿)',
'What food stocks exist? (side dishes)': '備蓄食料の種類(副皿)',
'What is the estimated total number of people in all of these institutions?': '上記施設内の居住者を総計すると、おおよそどの程度になるかを記載してください',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': '洗濯、料理、入浴など、日常生活で必要となる清潔な水の、主な入手源を記載してください',
'What is your major source of drinking water?': '飲料水の主な入手源を記載してください',
'What type of latrines are available in the village/IDP centre/Camp?': '村落/IDPセンター/仮泊施設内で利用可能なトイレのタイプは?',
'What type of salvage material can be used from destroyed houses?': '全壊した家屋から回収した部材が流用可能な用途を記載します',
'What type of salvage material can be used from destroyed schools?': '倒壊した校舎において、再利用できる部材は何ですか?',
'What types of health problems do children currently have?': '小児が現在抱えている健康問題のタイプを記載してください',
'What types of health problems do people currently have?': '住人たちが現在抱えている健康問題のタイプを記載してください',
'What types of health services are still functioning in the affected area?': '現在、被災地で機能しているヘルスサービスの種類を選択してください',
'What types of household water storage containers are available?': '世帯で使っている水貯蔵容器のタイプを選択してください',
'What were your main sources of income before the disaster?': '災害発生前の主な収入源を選択してください',
'Wheat': '小麦',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、それらポイントの外に余白を付与します。指定しない場合、表示領域とポイントが重なり、表示範囲から外れてしまう可能性があります。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': '地図上に複数のポイントが表示されている場合、それらポイント全てを表示できる縮尺で地図が表示されます。この値は、地域を表示する際の横幅と縦高の最小値となります。指定しない場合、対象の一点のみ表示され、その周辺は表示されません。一度表示された後であれば、縮尺の変更が可能です。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points.': '地点の集合にフォーカスを合わせた地図を表示すると、この地図はそれら地点の集合を表示できる範囲に拡大・縮小します',
'When reports were entered': 'いつ報告が入力されたか',
'Where are the alternative places for studying?': '学校以外で、学習が可能な施設の種類を選択してください',
'Where are the separated children originally from?': '保護者が居ない児童の住居地はどこですか?',
'Where do the majority of people defecate?': 'トイレはどこで済ませますか?',
'Where have the children been sent?': '疎開先の情報がある場合は記載してください',
'Where is solid waste disposed in the village/camp?': '村落/仮泊施設内での、固形廃棄物処理場所を記載してください',
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': 'Sahana Eden, Sahana Agasti, Ushahidi あるいは他のシステムの場合も',
'Whiskers': 'ほおひげ',
'Who is doing what and where': '誰がどこで何をしているか',
'Who usually collects water for the family?': '日頃、家族のために水を採取しているのは誰か?',
'Width': '横幅',
'Wild Fire': '野火',
'Wind Chill': '風速冷却',
'Window frame': 'ウィンドウ枠',
'Winter Storm': '吹雪',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': '災害発生後、女性や少女に対する暴力事件が発生したかどうかを記載してください。具体的な人名や場所を記載する必要はありません',
'Women of Child Bearing Age': '出産年齢の女性',
'Women participating in coping activities': '女性が災害対応に従事',
'Women who are Pregnant or in Labour': '妊娠中、あるいは労働中の女性',
'Womens Focus Groups': '女性のフォーカスグループ(Womens Focus Groups)',
'Wooden plank': '木製板',
'Wooden poles': '木製の柱',
'Working hours end': '作業終了時刻',
'Working hours start': '作業開始時刻',
'Working or other to provide money/food': '金銭/食料調達のため就労、あるいは活動を実施',
'Would you like to display the photos on the map?': '地図上に写真を表示しますか?',
'X-Ray': 'X線',
'Year built': '建築年',
'Year of Manufacture': '製造年',
'Yellow': '黄色',
'Yes': 'はい',
'You are a recovery team?': 'あなたが遺体回収チームの場合',
'You are attempting to delete your own account - are you sure you want to proceed?': '自分のアカウントを削除しようとしています。本当に削除しますか?',
'You are currently reported missing!': 'あなたが行方不明者として登録されています!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '同期に関する設定は、「設定」セクションで行うことができます。設定には、UUID(unique identification number)、同期スケジュール、ビーコンサービス等が含まれます。同期設定は以下のリンクから変更可能です。',
'You can click on the map below to select the Lat/Lon fields': '下の地図をクリックすることで、緯度経度情報を入力できます',
'You can click on the map below to select the Lat/Lon fields:': '緯度と経度の設定は、以下の地図をクリックすることでも可能です:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '経度/緯度の項目は、地図を選択することでも登録可能です。経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'You can select the Draw tool (': '選択可能な描画ツール (',
'You can select the Draw tool': 'ドローツールを選択できます',
'You can set the modem settings for SMS here.': 'SMS用モデムの設定をすることができます。',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '変換ツールを使うことで、GPS、あるいはDegrees/Minutes/Seconds形式からデータを変換できます。',
'You do no have permission to cancel this received shipment.': '輸送の受け取りをキャンセルする権限がありません',
'You do no have permission to cancel this sent shipment.': '輸送の送付をキャンセルする権限がありません',
'You do no have permission to make this commitment.': 'このコミットを作成する権限がありません',
'You do no have permission to receive this shipment.': 'この輸送を受け取る権限がありません',
'You do no have permission to send this shipment.': 'この輸送を開始する権限がありません',
'You do not have permission for any site to add an inventory item.': 'あなたには他の場所から在庫アイテムを追加する権限はありません',
'You do not have permission for any site to make a commitment.': 'どの場所にも受け入れを示す権限が有りません。',
'You do not have permission for any site to make a request.': '支援要請を作成する権限がありません',
'You do not have permission for any site to perform this action.': 'この操作をするための権限がありません',
'You do not have permission for any site to receive a shipment.': '物資の輸送を受け取る権限がありません',
'You do not have permission for any site to send a shipment.': '物資の輸送をする権限がありません',
'You do not have permission to send a shipment from this site.': 'あなたはこのサイトから物資を送る権限はありません',
'You have a personal map configuration. To change your personal configuration, click ': '個人用地図設定があります。あなたの個人用地図設定を編集するにはクリックしてください',
'You have found a dead body?': '遺体を発見しましたか?',
'You must be logged in to register volunteers.': 'ボランティアとして登録するには、ログインする必要があります',
'You must be logged in to report persons missing or found.': '行方不明者の発見状況を登録するには、ログインする必要があります。',
'You must provide a series id to proceed.': '処理を行うにはシリーズIDを指定する必要があります。',
'You should edit OpenStreetMap settings in models/000_config.py': 'OpenStreetMapの設定を変更するには、models/000_config.pyを編集してください',
'You should edit Twitter settings in models/000_config.py': 'Twitter設定を変更するには、models/000_config.pyを編集してください。',
'Your Account is Approved - you can now login\n %s%s/': '利用者登録が完了しました。リンク先のログインページで あなたが登録したユーザー名とパスワードを入力してログインしてください。\n %s%s/',
'Your Account is Approved': '利用者登録が完了しました',
'Your action is required. Please approve user %s asap: ': 'あなたの行動が要求されています。ただちにユーザー %s を承認してください。',
'Your action is required. Please approve user': 'ユーザーから承認の依頼が届いています。承諾お願いします',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '解決項目の順番付きリストは以下です。再度投票することによって変更可能です。',
'Your post was added successfully.': '投稿が成功しました',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'あなたがお使いのシステムには、ユニークID (UUID) が割り当てられており、このIDを用いて他のコンピュータがあなたのシステムを同定します。あなたの UUID を閲覧するには、同期 -> 同期設定と進んでください。そのページでは、他の設定を閲覧することもできます。',
'ZIP/Postcode': '郵便番号',
'Zinc roof': 'トタン屋根',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'ズームイン: マップをクリックするか、拡大したい場所をドラッグで選択してください',
'Zoom Levels': 'ズームレベル',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'ズームアウト: マップをクリックするか、拡大したい地点をマウスの左ボタンでドラッグしてください',
'Zoom to Current Location': '現在の場所を拡大',
'Zoom to maximum map extent': 'マップの最大範囲までズーム',
'Zoom': 'ズーム',
'act': '活動',
'active': 'アクティブ',
'added': '追加しました',
'all records': '全てのレコード',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'では、スタッフや設備、それらの管理コストまで含めた予算編成を行ないます。',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '自然災害による被災影響調査の作成、および管理を許可する',
'an individual/team to do in 1-2 days': '個人やチーム単位で、1-2日中に実施するべき事柄をさします。',
'approved': '承認された',
'assigned': '担当者・部門が確定',
'average': '平均的',
'black': '黒',
'blond': 'ブロンド',
'blue': '青',
'brown': '茶色',
'business_damaged': 'ビジネスへの損害',
'by': ' ',
'c/o Name': 'c/o 名前',
'can be used to extract data from spreadsheets and put them into database tables.': 'スプレッドシートからデータを抽出して、データベーステーブルに挿入できます。',
'can use this to identify the Location': 'ここからロケーションの特定が可能です',
'caucasoid': '白人',
'check all': '全てチェック',
'click for more details': '詳細はクリック',
'collateral event': '付帯イベント',
'completed': '完了',
'confirmed': '確認済',
'consider': '考慮',
'criminal intent': '犯罪目的',
'crud': '性病',
'curly': '縮れ毛',
'currently registered': '登録済み',
'daily': '日次',
'dark': '濃い',
'data uploaded': 'データがアップロードされました',
'database %s select': 'データベース%sの選択',
'database': 'データベース',
'db': 'データベース',
'delete all checked': 'チェックされた項目を全て削除',
'deleted': '削除されました',
'denied': '拒否されました',
'description': '説明',
'design': 'デザイン',
'diseased': '罹患中',
'displaced': '避難中',
'divorced': '離別',
'done!': '完了!',
'duplicate': '重複',
'edit': '編集',
'editor': '編集者',
'eg. gas, electricity, water': 'ガス、電気、水道など',
'embedded': '埋め込まれた',
'enclosed area': '専用地',
'export as csv file': 'csvファイルとしてエクスポート',
'fat': '肥満',
'feedback': '現地からの要望',
'female': '女性',
'final report': '最終報告書',
'flush latrine with septic tank': '浄化槽つき水洗トイレ',
'follow-up assessment': 'アセスメントのフォローアップ',
'food_sources': '食糧供給源',
'forehead': 'ひたい',
'form data': 'フォームデータ',
'from Twitter': 'Twitter経由',
'full': '完全',
'getting': '取得中',
'green': '緑',
'grey': '灰色',
'here': 'ここ',
'high': '高い',
'hourly': '1時間毎',
'households': '世帯情報',
'human error': 'ヒューマンエラー',
'identified': '身元確認済み',
'ignore': '無視する',
'immediately': '即応',
'in Deg Min Sec format': 'Deg Min Sec フォーマットで',
'in GPS format': 'GPS フォーマットで',
'in Inv.': '個',
'inactive': '休止中',
'initial assessment': '初期アセスメント',
'injured': '負傷中',
'insert new %s': '%sの新規挿入',
'insert new': '新規挿入',
'invalid request': '無効な要求',
'invalid': '無効',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'は、災害犠牲者とその家族、特に身元の判明した遺体、避難者、難民など、全ての情報を集約可能な中央オンラインレポジトリです。名前、年齢、連絡先番号、IDカード番号、避難した場所、その他の詳細が記録されます。人物の写真や指紋をアップロードすることができます。効率性と利便性のため、人物をグループ分けすることができます。',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'は、支援団体による救援活動や復興プロジェクトの作業を管理するために、複数のサブモジュールを組み合わせて高度な機能を実現しようと考えており、物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、輸送管理、調達、財務記録、その他様々な資産やリソースの管理といった機能を備えています',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '全ての入荷伝票を追跡することで、カテゴリー分けや適切な実行場所への配分を行う',
'kilogram': 'キログラム',
'kit': 'キット',
'latrines': 'トイレ',
'leave empty to detach account': 'アカウントを取り外すには空欄のままにしてください',
'legend URL': '凡例の URL',
'light': '淡い',
'liter': 'リットル',
'locations': 'ロケーション',
'login': 'ログイン',
'long': '長い',
'long>12cm': '12cm以上',
'low': '低い',
'male': '男性',
'manual': 'マニュアル',
'married': '既婚',
'max': '最大',
'maxExtent': '最大範囲',
'maxResolution': '最高分解能',
'medium': '中',
'medium<12cm': '12cm未満',
'menu item': 'メニューアイテム',
'message_id': 'メッセージID',
'meter cubed': '立方メートル',
'meter': 'メートル',
'meters': 'メートル',
'min': '最小',
'module allows the an inspector to fill information for buildings.': 'モジュールでは、建築物の調査情報を記録できます。',
'module allows the site administrator to configure various options.': 'モジュールを使うことで、サイト管理者が様々な項目を設定する際の手間を省くことができます。',
'module helps monitoring the status of hospitals.': 'モジュールでは、病院の状態をモニタできます。',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'モジュールでは、オンラインマッピング(GIS)を使用して、現在の災害地域の状態を俯瞰することができます。',
'mongoloid': '黄色人種',
'more': 'その他の項目 ',
'n/a': 'データなし',
'natural hazard': '自然災害',
'negroid': '黒人',
'never': 'まだ',
'new ACL': '新規ACL',
'new record inserted': '新規レコードを挿入しました',
'new': '新規登録',
'next 100 rows': '次の100行',
'no': ' ',
'none': 'なし',
'normal': '通常',
'not accessible - no cached version available!': 'アクセスできません - キャッシュされたバージョンがありません!',
'not accessible - using cached version from': 'アクセス不可 - キャッシュ版を使用しています',
'not specified': '未指定',
'num Zoom Levels': 'ズーム倍率',
'obsolete': '孤立中',
'on': ' ',
'once': '一度',
'open defecation': '野外',
'operational intent': '運用目的',
'or import from csv file': 'またはcsvファイルからインポート',
'other': 'その他',
'over one hour': '1時間以上',
'pack of 10': '10のパック',
'people': '居住者情報',
'piece': 'ピース(単位)',
'pit latrine': '穴掘りトイレ',
'pit': '堀穴',
'postponed': '実施を延期',
'preliminary template or draft, not actionable in its current form': '現行フォーム内で実用的でない予備テンプレートまたはドラフト',
'previous 100 rows': '前の100行',
'primary incident': '優先すべきインシデント',
'problem connecting to twitter.com - please refresh': 'twitter.comへの接続に問題が発生しました。再読込を行ってください',
'provides a catalogue of digital media.': 'デジタルメディアのカタログを提供します',
'record does not exist': 'レコードが存在しません',
'record id': 'レコードID',
'records deleted': 'レコードを削除しました',
'red': '赤い',
'reported': '報告済み',
'reports successfully imported.': 'レポートは正しくインポートできました',
'representation of the Polygon/Line.': 'Polygon/Lineの表現',
'retired': '終了',
'retry': '再試行',
'river': '河川',
'sack 20kg': '袋 20kg',
'sack 50kg': '袋 50kg',
'secondary effect': '副次効果',
'see comment': 'コメント参照',
'selected': '選択された',
'separated from family': '家族とはぐれた',
'separated': '別居',
'shaved': '坊主',
'shift_start': 'シフト開始',
'short': '小柄',
'short<6cm': '6cm未満',
'sides': '側面',
'sign-up now': '今すぐ登録',
'simple': '単純な',
'single': '独身',
'slim': 'やせ型',
'specify': '明記してください',
'staff': 'スタッフ',
'state location': 'ステートロケーション',
'state': '状態',
'straight': '直毛',
'suffered financial losses': '経済的損失',
'table': 'テーブル',
'table_name': 'テーブル名',
'tall': '大柄',
'technical failure': '技術的な原因',
'this': 'この',
'times and it is still not working. We give in. Sorry.': '回繰り返しましたが、処理を完了できません。ご迷惑をおかけしますが、処理を中止します。',
'to access the system': 'してシステムにアクセスしてください',
'ton': 'トン',
'tonsure': '剃髪',
'total': '合計',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '避難所を追跡し、それらの詳細を蓄積します。避難所に関連付けられた人、利用可能なサービス等の他のモジュールと協業します。',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': '実行中のPythonで tweepyモジュールが利用できません。Tropo以外でのTwitter機能利用で必要です',
'unable to parse csv file': 'csvファイルをパースできません。',
'unapproved': '承認されていない',
'uncheck all': 'チェックをすべて外す',
'unidentified': '詳細不明',
'uninhabitable = foundation and structure destroyed': '利用不可能 = 基礎構造や土台部分の破壊など',
'unknown': '不明',
'unspecified': 'その他',
'unverified': '未検証',
'updated': '更新しました',
'updates only': '更新のみ',
'urgent': '緊急',
'using default': '標準値を使用',
'verified': '確認済み',
'volunteer': 'ボランティア',
'wavy': '波状',
'weekly': '週次',
'white': '白',
'wider area, longer term, usually contain multiple Activities': '活動範囲が広く、長期的目標を有しており、複数の支援活動を包括します。',
'widowed': '死別',
'window': '窓',
'windows broken, cracks in walls, roof slightly damaged': '窓破損、壁にひび割れ、屋根の一部損傷',
'within human habitat': '人間の居住地域内',
'xlwt module not available within the running Python - this needs installing for XLS output!': '実行中のPythonでxlwtモジュールが利用できません。XLS出力に必要です。',
'yes': 'はい',
}
| sahana/Turkey | languages/ja.py | Python | mit | 353,215 | [
"VisIt"
] | aa8597cc01ff719bfa02c341a8103c40dc73993f3b6c7b5049b89c5b4e95cfd8 |
from flask import Flask, render_template, session, request, redirect
import random
import datetime
app = Flask(__name__)
app.secret_key = 'gold_game_gg'
@app.route('/')
def index():
if not 'gold' in session:
session['gold'] = 0
if not 'activities' in session:
session['activities'] = []
return render_template('home.html')
# This will process the function to add gold then log the activity
@app.route('/process_money', methods = ['POST'])
def money_bags():
jobs = {
'farm':random.randint(10,20),
'cave':random.randint(5,10),
'house':random.randint(2,5),
'casino':random.randint(-50,50)
}
if request.form['job'] in jobs:
income = jobs[request.form['job']]
session['gold'] += income
if income >= 0:
income_string = ['gained', '!', 'green']
elif income < 0:
income_string = ['lost', '... Ouch...', 'red']
timestamp = datetime.datetime.now()
activity = "You went to the {} and {} {} gold{} ({})".format(request.form['job'], income_string[0], income, income_string[1], timestamp)
activity_dict = {
'activity' : activity,
'color' : income_string[2]
}
session['activities'].append(activity_dict)
return redirect('/')
@app.route('/reset', methods = ['POST'])
def reset_game():
session.clear()
return redirect('/')
if __name__ == '__main__':
app.run(debug = True)
| jiobert/python | Preston_Kellen/Assignments/Ninja Gold/server.py | Python | mit | 1,474 | [
"CASINO"
] | bb0482ee3ed741e0325d4a25ea4cf2403cfa48195f13aea5e7aa75e3036620ad |
#!/usr/bin/env python
from __future__ import print_function
import timeit
import argparse
import os
import subprocess
from subprocess import Popen, PIPE
import sys
import shutil
import multiprocessing
__version__ = "1.5.1"
###
# CLASSES
###
class Logger(object):
""" A convenient logging object
Prints output to a given log file and/or stdout
"""
def __init__(self, logfp=None, use_std_out=True):
# note: if logfp directories don't exist, make them.
if logfp is not None:
outdir = os.path.abspath(os.path.dirname(os.path.realpath(logfp)))
# Checks for output directory. Makes it if necessary.
if not os.path.exists(outdir):
os.makedirs(outdir)
self.logfp = logfp
logf = open(logfp,'w')
logf.close()
self.use_std_out = use_std_out
def log(self, msg):
if not msg.endswith('\n'):
msg += '\n'
if self.logfp is not None:
logf = open(self.logfp,'a')
logf.write(msg)
logf.close()
if self.use_std_out:
sys.stdout.write(msg)
###
# UTILITY METHODS
###
# Gets arguments from command line using argparse, instead of the deprecated optparse
# Takes an argparse object as a parameter
def get_args(p):
p.add_argument("-i", "--input",
type = str,
default = None,
metavar = '',
help="Input fasta file (use \"input1.fna,input2.fna\" for paired-end) [required].")
p.add_argument("-o", "--output",
type = str,
default = '.',
metavar = '',
help = "Output folder (default current directory)")
p.add_argument("-b", "--database",
type = str,
default = 'greengenes97',
metavar = '',
help = "Name of database folder in ninja top-level directory; folder must contain bowtie2 index with basename the same as the folder, a taxonomy file named basename.taxonomy, and a ninja db map (output from ninja_prep) named basename.db [default %(default)s]")
p.add_argument("-t", "--trim",
type = int,
default = -1,
metavar = '',
help = "Trim sequences to a specified number of bp, cutting off from their ends (default no trim)")
p.add_argument("-T", "--trim2",
type = int,
default = -1,
metavar = '',
help = "Trim reverse paired-end reads to a different length than forward reads. If trim2 is specified, trim must be too. (default no trim)")
p.add_argument("-s", "--similarity",
type = float,
default = 0.97,
metavar = '',
help = "Minimum fractional similarity - id - between query sequence and reference sequence [default %(default)s]")
p.add_argument("-I", "--insert",
type = int,
default = 1400,
metavar = '',
help = "Maximum total length for paired-end matches. Set this as small as possible (e.g. 400 for 515F-806R primers) [default %(default)s, for 16S]")
p.add_argument("-p", "--threads",
type = int,
default = multiprocessing.cpu_count(),
metavar = '',
help = "Number of threads/cores to run bowtie2 on [default %(default)s]")
p.add_argument("-m", "--mode",
type = str,
default = 'max',
metavar = '',
help = "NINJA sensitivity mode: 'normal' (medium sensitivity), 'fast' (less sensitive), or 'max' (default; more sensitive, slower)")
p.add_argument("-d", "--denoising",
type = float,
default = 1,
help = "Discards all reads that appear fewer than d times. No denoising/compaction = 0; " + \
" Read compaction = 1 (maps shorter reads to longer neighbor), Moderate denoising = 2 (throws out all singleton reads);" + \
" Aggressive denoising = 6 (nearly guaranteed to eliminate all sequencing error - although not PCR error - in most data sets) [default %(default)s]")
p.add_argument("-K", "--suppress_OTU_compaction",
action = 'store_true',
help = "Keeps all reported OTUs with random breaking of ties. This will produce many more OTUs but will more accurately represent the distribution of \"potential\" OTUs in the data. [default: break ties deterministically]")
p.add_argument("-F", "--full_output",
action = 'store_true',
help = "Output files listing failed sequences, filtered sequences, and sequence mappings [default %(default)s]")
p.add_argument("-l", "--legacy",
action = 'store_true',
help = "Output legacy (tab-delimited) QIIME OTU table [default %(default)s]")
p.add_argument("-P", "--print_only",
action = 'store_true',
help = "Print commands only - do not run [default %(default)s]")
p.add_argument("-S", "--suppress_stdout",
action = 'store_true',
help = "Suppress standard output [default %(default)s]")
p.add_argument("-R", "--retain_intermediates",
action = 'store_true',
help = "Retain intermediate files [default %(default)s]")
p.add_argument("-C", "--check_fasta",
action = 'store_true',
help = "Check fasta for correct formatting; otherwise assumes fasta is in QIIME-ready format [default %(default)s]")
p.add_argument("-r", "--reverse_complement",
action = 'store_true',
help = "Reverses all sequence orientations [default %(default)s]")
p.add_argument("-z", "--both_strands",
action = 'store_true',
help = "Forces both read orientations to be considered (slower, not recommended) [default %(default)s]")
args = p.parse_args()
return args
# Checks if args work. Takes args and parser as input
def check_args(args, p):
if args['input'] is None:
p.print_help()
sys.exit('\nPlease include an input sequences file in fasta format.')
if args['trim'] == -1 and args['trim2'] != -1:
sys.exit('\nIf trim2 is specified you must also specify trim1 (set to large number for no trimming).')
# Checks if an input fasta file is formatted correctly for QIIME. Returns boolean.
# Looks for data or titles with multiple lines and improper characters in seqs.
# Makes sure first line is a header. Doesn't allow spaces in seqs.
def check_fasta(f, logger):
lineNumber = 1
inData = True
seqChars = ['A', 'T', 'G', 'C', 'N', '\n']
for line in f:
if line[0] == ">":
if inData:
inData = False
else:
logger.log("Warning: Multiline title in line " + str(lineNumber) + " of input fasta.")
return False
else:
# Checks if line is actually a title
for ch in line:
c = ch.upper()
if c not in seqChars:
logger.log('Warning: Unsupported sequence character "' + c + '" in line ' + str(lineNumber) + ' of input fasta.')
return False
if not inData:
inData = True
else:
logger.log("Warning: Multiline sequence in line " + str(lineNumber) + " of input fasta.")
return False
lineNumber += 1
return True
# Generator for fasta files - returns [(name, seq)]
# Call using 'with open(file) as f'
def read_fasta(f):
title = None
data = None
for line in f:
if line.startswith(">"):
if title != None:
yield (title,data)
title = line[1:]
data = ''
else:
data += line.strip()
if title is None:
yield None
else:
yield (title,data)
return
# Takes a list of [(title, seq), ...] tuples and prints as a FASTA file to the given filename
def write_fasta(listOfTuples, fileName):
output = ""
if fileName.find(".",0) == -1:
fileName += ".fna"
for data in listOfTuples:
title = data[0].strip()
if title[0] != ">":
title = ">" + title
output += (title + "\n")
output += (data[1] + "\n")
with open(fileName, 'wb') as outFile:
outFile.write(output)
# Takes a dict[OTU, IDs...] and prints as an OTU map to the given filename
def write_map(otuMap, fileName):
with open(fileName, 'wb') as outFile:
for k in otuMap:
outFile.write(k + "\t" + otuMap[k] + "\n")
# Returns reverse complement of a DNA seq
def reverse_complement(seq): return complement(seq[::-1])
# Returns complement of a DNA seq
def complement(seq):
complement = []
for c in seq:
if c == 'A':
complement.append('T')
elif c == 'T':
complement.append('A')
elif c == 'C':
complement.append('G')
elif c == 'G':
complement.append('C')
return ''.join(complement)
###
# MAIN METHODS
###
# Runs ninja_filter
# INPUT inputSeqsFile: input sequences in fasta format
# inputSeqsFile2: paired-end input sequences in fasta format
# OUTPUT filteredSeqsFile: input sequences filtered using ninja algorithm
# seqsDB: utility file used in ninja_parse
# OPTIONAL trim: trims sequences to <= uniform X bp (e.g. AGGC, GCG with trim 2 returns AG, GC)
# RC: takes reverse complement of input sequences
# denoising: for argument x.y, discards all reads that appear less than x times and all kmers that
# appear less than y*1000 times (unless that read appears more than x times)
def ninja_filter(inputSeqsFile, inputSeqsFile2, file_prefix, trim, trim2, RC, denoising, logger, full_output=False,
run_with_shell=True, print_only=False):
# Converts optional arguments to args readable by ninja_filter
argTrim = ''
argRC = ''
argDenoising = ''
if trim != -1:
argTrim = str(trim)
if trim2 != -1:
argTrim += "," + str(trim2)
argTrim = '"' + argTrim + '"'
if RC:
argRC = "RC"
# Sets the relevant binaries for mac and windows support.
ninjaDirectory = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
ninjaDirectory = os.path.abspath(os.path.join(ninjaDirectory, os.pardir))
if sys.platform.startswith("darwin") or sys.platform.startswith("os"): # Mac
ninjaFilterFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_filter_mac"))
elif sys.platform.startswith("win32") or sys.platform.startswith("cygwin"): # Windows and cygwin
ninjaFilterFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_filter.exe"))
else: # Linux
ninjaFilterFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_filter_linux"))
argDenoising = 'D ' + str(denoising)
# Runs ninja_filter. Run in shell only on Mac
cmd = ""
cmd = '"' + ninjaFilterFile + '"'
cmd += ' ' + '"' + inputSeqsFile + '"'
if inputSeqsFile2 is not None:
cmd += ' ' + 'PE "' + inputSeqsFile2 + '"'
cmd += ' ' + '"' + file_prefix + '"'
cmd += ' ' + argTrim
cmd += ' ' + argRC
cmd += ' ' + argDenoising
if full_output:
cmd += ' LOG'
logger.log(cmd)
if not print_only:
proc = Popen(cmd,shell=run_with_shell,universal_newlines=True,stdout=PIPE,stderr=PIPE)
stdout, stderr = proc.communicate()
logger.log(stdout)
if proc.returncode != 0:
logger.log(stderr + '\n')
raise ValueError("ERROR: Filtering failed. One possible explanation is a problem with input FASTA formatting. Please rerun with '--check_fasta'. Exiting.")
return cmd
# Runs bowtie2. Uses two presets for ninja normal and max
# INPUT filteredSeqsFile: filtered sequences output from ninja_filter
# OUTPUT alignmentsFile: main output of bowtie
# OPTIONAL mode: 'ninja' or 'ninjaMax', for less and more sensitivity, respectively
# threads: number of threads/cores to run bowtie2 on
# similarity: minimum fractional similarity between query sequence and reference sequence
# both_strands: forces the aligner (bowtie2) to use both orientations (forward,reverse) for matching
def bowtie2(bowtie2_cmd,filteredSeqsFile, filteredSeqsFile2, alignmentsFile, bowtieDatabase, similarity, insert, threads, mode,
logger, both_strands, run_with_shell=True, print_only=False):
# TODO: Automatically convert fasta file if formatted incorrectly
# Checks if similarity is a percentage
if similarity > 1 or similarity < 0:
raise ValueError("Similarity error. Enter similarity as a decimal between 0 and 1. Exiting.")
similarity = 1 - similarity # Converts to similarity readable by bowtie2
# Switches between ninja normal and max according to user input. Only runs in shell on Mac/linux
cmd = [bowtie2_cmd,'--no-head']
cmd.append('-x ' + bowtieDatabase)
cmd.append('-S ' + '"' + alignmentsFile + '"')
cmd.append('--np 0')
cmd.append('--mp "1,1"')
cmd.append('--rdg "0,1"')
cmd.append('--rfg "0,1"')
cmd.append('--score-min "L,0,-' + str(similarity) + '"')
if not both_strands:
cmd.append('--norc')
if filteredSeqsFile2 is None:
cmd.append('-f ' + '"'+ filteredSeqsFile + '"')
else:
cmd.append('-f -1 ' + '"'+ filteredSeqsFile + '"')
cmd.append('-2 ' + '"'+ filteredSeqsFile2 + '"')
cmd.append('--maxins ' + str(insert))
cmd.append('--no-mixed --no-discordant')
cmd.append('-p ' + str(threads))
if mode != 'max':
cmd.append('-k 1')
if mode == 'fast':
cmd.append('--fast')
elif mode == 'max':
cmd.append('--very-sensitive')
# run command
cmd = ' '.join(cmd)
logger.log(cmd)
if not print_only:
proc = Popen(cmd,shell=run_with_shell,universal_newlines=True,stdout=PIPE,stderr=PIPE)
stdout, stderr = proc.communicate()
logger.log(stdout)
if proc.returncode != 0:
logger.log(stderr + '\n')
raise ValueError("ERROR: Bowtie2 failed. Exiting.")
return cmd
# Runs ninja_compact.
# PARAMS
# alignmentsFile: alignment file output from bowtie2
# masterDBFile: master db file packaged with ninja
# alignmentsFileCompacted: compacted alignment file output path from
# ninja_compact. optional: default is to move
# alignmentsFile to alignmentsFile_uncompacted.txt
#
def ninja_compact(alignmentsFile, masterFastaFile, logger, alignmentsFileCompacted=None, run_with_shell=True, print_only=False):
# move alignmentsFile if explicit new alignmentsFileCompacted not provided
if alignmentsFileCompacted is None:
alignmentsFileCompacted = alignmentsFile
alignmentsFile = os.path.splitext(alignmentsFile)[0]+'_uncompacted.txt'
shutil.move(alignmentsFileCompacted, alignmentsFile)
# Sets the relevant binaries for mac and windows support.
ninjaDirectory = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
ninjaDirectory = os.path.abspath(os.path.join(ninjaDirectory, os.pardir))
if sys.platform.startswith("darwin") or sys.platform.startswith("os"): # Mac
ninjaCompactFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_compact_mac"))
elif sys.platform.startswith("win32") or sys.platform.startswith("cygwin"): # Windows and cygwin
ninjaCompactFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_compact.exe"))
else: # Linux
ninjaCompactFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_compact_linux"))
cmd = ['"' + ninjaCompactFile + '"',
'"' + alignmentsFile + '"',
'"' + masterFastaFile + '"',
'"' + alignmentsFileCompacted + '"']
cmd = ' '.join(cmd)
logger.log(cmd)
if not print_only:
proc = Popen(cmd,shell=run_with_shell,universal_newlines=True,stdout=PIPE,stderr=PIPE)
stdout, stderr = proc.communicate()
logger.log(stdout)
if proc.returncode != 0:
logger.log(stderr + '\n')
raise ValueError("ERROR: Read compaction failed for unknown reason. Exiting.")
return cmd
# Runs ninja_parse_filtered.
# INPUT seqsDBFile: db file output from ninja_filter
# alignmentsFile: alignment file output from bowtie2
# masterDBFile: master db file packaged with ninja
# taxMapFile: reference taxonomy map packaged with ninja
# OUTPUT otuTableFile: OTU table output that's really the point of all this
# AUTO legacyTable: OTU table in legacy format, output automatically
# parseLog: a utility file containing parsed sequences, used in post-processing and output automatically
def ninja_parse(file_prefix, alignmentsFile, masterDBFile, taxMapFile, full_output,
logger, legacy_table=False, run_with_shell=True, print_only=False):
# Sets the relevant binaries for mac and windows support.
ninjaDirectory = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
ninjaDirectory = os.path.abspath(os.path.join(ninjaDirectory, os.pardir))
if sys.platform.startswith("darwin") or sys.platform.startswith("os"): # Mac
ninjaParseFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_parse_filtered_mac"))
elif sys.platform.startswith("win32") or sys.platform.startswith("cygwin"): # Windows and cygwin
ninjaParseFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_parse_filtered.exe"))
else: # Linux
ninjaParseFile = os.path.join(ninjaDirectory, os.path.join("bin", "ninja_parse_filtered_linux"))
cmd = ['"' + ninjaParseFile + '"', '"' + file_prefix + '"', '"' + alignmentsFile + '"', '"' + masterDBFile + '"']
if taxMapFile is not None:
cmd.append('"' + taxMapFile + '"')
if legacy_table:
cmd.append('--legacy')
if full_output:
cmd.append('LOG')
cmd = ' '.join(cmd)
logger.log(cmd)
if not print_only:
proc = Popen(cmd,shell=run_with_shell,universal_newlines=True,stdout=PIPE,stderr=PIPE)
stdout, stderr = proc.communicate()
logger.log(stdout)
if proc.returncode != 0:
logger.log(stderr + '\n')
raise ValueError("ERROR: Parsing failed. Exiting.")
return cmd
# Performs housekeeping on files, deleting the intermediate ones listed below
# INPUT inputSeqsFile: original file of sequences passed to ninja_filter (.fna, .fasta)
# filteredSeqsFile: seqs with duplicates removed output from ninja_filter
# filteredSeqsFile2: paired-end reverse reads, or None
# seqsDBFile: db file output from ninja_filter
# alignmentsFile: main output of bowtie2
# parseLogFile: parse log generated from ninja_parse_filter
def clean(files):
for f in files:
if f is not None:
try:
os.remove(f)
except OSError as e:
myError = "INTERNAL ERROR: Can't find all files marked for moving and/or deletion. Check working directory and output folder."
logger.log(myError)
raise ValueError(myError)
# Runs ninja, bowtie2 and then processes output. All files output in specified output folder.
# User must specify ninja's directory as an environment variable named 'NINJA_DIR'
def main(argparser):
args = get_args(argparser)
args = vars(args)
# Opens logger to write to log and/or stdout
# First stores original console location as a variable for error handling
ninjaLog = os.path.join(args['output'], "ninja_log.txt")
logger = Logger(ninjaLog, not args['suppress_stdout'])
logger.log("NINJA-OPS v" + __version__)
check_args(args, argparser)
# if paired end, store second file as a different parameter
if("," in args['input']):
files = args['input'].split(',')
args['input'] = files[0]
args['input2'] = files[1]
else:
args['input2'] = None
# Checks if input sequences fasta is correctly formatted. Writes correct one if not
if args['check_fasta']:
fileName = args['input']
if not check_fasta(open(fileName), logger):
new_input_fasta = os.path.join(args['output'], "formatted_input_fasta.fna")
logger.log("Warning: Input fasta formatted incorrectly for QIIME, e.g. sequences or title on multiple lines. Writing " + \
"corrected file to " + new_input_fasta)
with open(fileName) as f:
write_fasta(read_fasta(f), new_input_fasta)
args['input'] = new_input_fasta
if args['input2'] is not None:
fileName = args['input2']
if not check_fasta(open(fileName), logger):
new_input_fasta = os.path.join(args['output'], "formatted_input2_fasta.fna")
logger.log("Warning: Reverse input fasta formatted incorrectly for QIIME, e.g. sequences or title on multiple lines. Writing " + \
"corrected file to " + new_input_fasta)
with open(fileName) as f:
write_fasta(read_fasta(f), new_input_fasta)
args['input2'] = new_input_fasta
RC = args['reverse_complement']
similarity = args['similarity']
threads = args['threads']
mode = args['mode']
denoising = args['denoising']
suppress_stdout = args['suppress_stdout']
full_output = args['full_output']
retain_intermediates = args['retain_intermediates']
legacy_table = args['legacy']
# Runs ninja pipeline
# Gets ninja's directory relative to current working directory
ninjaDirectory = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
ninjaDirectory = os.path.abspath(os.path.join(ninjaDirectory, os.pardir))
# Checks for output subdirectory of current working directory. Makes it if necessary.
# Edits global output folder variable
outdir = os.path.join(os.getcwd(), args['output'])
if not os.path.exists(outdir):
os.makedirs(outdir)
outdir = os.path.abspath(outdir)
# do not run commands with shell=TRUE on win32 or cygwin
run_with_shell = not (sys.platform.startswith("win32") or sys.platform.startswith("cygwin"))
# open /dev/null equivalent to catch unwanted subprocess output
FNULL = open(os.devnull, 'w')
try:
bowtie2_cmd = "bowtie2-align-s"
subprocess.check_call(bowtie2_cmd + " --version", shell=run_with_shell, stdout=FNULL, stderr=FNULL)
except subprocess.CalledProcessError as e:
try:
bowtie2_cmd = os.path.join(ninjaDirectory,"bowtie2-align-s")
subprocess.check_call(bowtie2_cmd + " --version", shell=run_with_shell, stdout=FNULL, stderr=FNULL)
except subprocess.CalledProcessError as e:
myError = "ERROR: Bowtie2 executable not found in system path or top-level NINJA package folder. Please install bowtie2 and add its accompanying executables to the system path or place bowtie2-align-s in the top-level ninja package folder (not a subfolder). Check README.txt for additional instructions. Exiting."
logger.log(myError)
raise ValueError(myError)
# Sets variables used in ninja calls. First, ninja_filter files
file_prefix = os.path.join(outdir, "ninja")
# Set paired-end file to None if this is not a paired-end run
if(args['input2']) is not None:
pe_file = file_prefix + "2_filt.fa"
else:
pe_file = None
# Bowtie2 files
alignmentsFile = os.path.join(outdir, "alignments.txt")
databasedir = os.path.join(ninjaDirectory, 'databases', args['database'])
logger.log('NINJA-OPS database directory is ' + databasedir)
masterDBFile = os.path.abspath(os.path.join(databasedir, args['database'] + ".db"))
masterFastaFile = os.path.abspath(os.path.join(databasedir, args['database'] + ".tcf"))
bowtieDatabase = os.path.abspath(os.path.join(databasedir, args['database']))
# Ninja_parse files
taxMapFile = os.path.abspath(os.path.join(databasedir, args['database'] + ".taxonomy"))
if not os.path.exists(taxMapFile):
taxMapFile = None
otuTableFile = os.path.join(outdir, "otutable.biom")
# Post-processing files
seqOutFile = os.path.join(outdir, "failed_sequences.fna")
mapOutFile = os.path.join(outdir, "otu_map.txt")
# Runs ninja_filter, bowtie2 and ninja_parse. Processes ninja results, generating OTU map and a list of failed seqs
logger.log("Running NINJA-OPS filter...")
t1 = timeit.Timer(lambda:
ninja_filter(args['input'], args['input2'], file_prefix, args['trim'], args['trim2'], RC, denoising, logger, full_output,
run_with_shell=run_with_shell, print_only=args['print_only'])
)
logger.log("NINJA-OPS filter time: " + str(t1.timeit(1)))
logger.log("Running Bowtie2...")
t2 = timeit.Timer(lambda:
bowtie2(bowtie2_cmd,file_prefix + "_filt.fa", pe_file, alignmentsFile, bowtieDatabase, similarity, args['insert'], threads, mode,
logger, both_strands=args['both_strands'], run_with_shell=run_with_shell, print_only=args['print_only'])
)
logger.log("Bowtie time: " + str(t2.timeit(1)))
if not args['suppress_OTU_compaction']:
logger.log("Running NINJA-OPS compact...")
t2 = timeit.Timer(lambda:
ninja_compact(alignmentsFile, masterFastaFile, logger, run_with_shell=run_with_shell, print_only=args['print_only'])
)
logger.log("NINJA-OPS compact time: " + str(t2.timeit(1)))
logger.log("Running NINJA-OPS parse...")
t3 = timeit.Timer(lambda:
ninja_parse(file_prefix, alignmentsFile, masterDBFile, taxMapFile, full_output,
logger, legacy_table, run_with_shell=run_with_shell, print_only=args['print_only'])
)
logger.log("NINJA-OPS parse time: " + str(t3.timeit(1)) + "\n")
if not retain_intermediates:
to_remove = [file_prefix + "_filt.fa", pe_file, file_prefix + ".db", alignmentsFile]
if not args['suppress_OTU_compaction']:
to_remove.append(os.path.splitext(alignmentsFile)[0]+'_uncompacted.txt')
clean(to_remove)
# Wrapper for main function, called from command line
# Bare minimum args:
# -i "seqs.fna"
# Sample maximum args (note that reverse complmenting changes ALL read orientation):
# -i "seqs.fna" -o "output" -r -t 200 -mo 'max' -s 0.98 -d 1.005 -q
if __name__=='__main__':
# Parses command line arguments
p = argparse.ArgumentParser(description = "NINJA-OPS: NINJA Is Not Just Another OTU Picking Solution (v" + __version__ +")\n" + \
"Knights Lab (www.ninja-ops.ninja)\n" + \
"This program outputs an otu table and map from sequence reads in fasta format.",
add_help = True,
epilog ='NOTE: If one or more output files are empty, trying reverse complementing your input ' + \
'sequences with -r')
main(p)
| GabeAl/NINJA-OPS | bin/ninja.py | Python | isc | 27,509 | [
"Bowtie"
] | d8f1cd0f36c9d2980445af5046982c44c539dbce9170c35003f94ae877596960 |
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import argument
from chainer.utils import type_check
class Gaussian(function_node.FunctionNode):
"""Gaussian sampling function.
.. note::
In forward calculation, this function takes a mean and the logarithm of
a variance as inputs, and draws a sample from a Gaussian distribution
accordingly.
"""
def __init__(self, eps=None):
# When ``eps`` is set to None, per-instance noise that is generated
# once during its first forward pass and then reused in subsequent
# calls.
self.eps = eps
def check_type_forward(self, in_types):
type_check._argname(in_types, ('mean', 'ln_var'))
m_type, v_type = in_types
type_check.expect(
m_type.dtype.kind == 'f',
m_type.dtype == v_type.dtype,
m_type.shape == v_type.shape,
)
def forward_cpu(self, inputs):
self.retain_inputs((1,))
mean, ln_var = inputs
if self.eps is None:
self.eps = (
numpy.random.standard_normal(ln_var.shape)
.astype(mean.dtype, copy=False)
)
self.noise = numpy.exp(ln_var * mean.dtype.type(0.5)) * self.eps
return utils.force_array(mean + self.noise),
def forward_gpu(self, inputs):
self.retain_inputs((1,))
mean, ln_var = inputs
if self.eps is None:
if mean.dtype != numpy.float16:
self.eps = cuda.cupy.random.standard_normal(
ln_var.shape, dtype=mean.dtype)
else:
# Draw samples in FP32 then cast them to FP16 because
# cupy.random does not support FP16 currently.
self.eps = cuda.cupy.random.standard_normal(
ln_var.shape, dtype=numpy.float32).astype(numpy.float16)
self.noise = cuda.cupy.empty_like(mean)
self.noise = cuda.elementwise(
'T v, T e', 'T noise',
'noise = exp(v / 2) * e',
'gaussian_forward'
)(ln_var, self.eps)
return mean + self.noise,
def backward(self, indexes, grad_outputs):
ln_var, = self.get_retained_inputs()
gy, = grad_outputs
ret = []
if 0 in indexes:
ret.append(gy)
if 1 in indexes:
noise = chainer.functions.exp(ln_var * 0.5) * self.eps
ret.append(gy * noise * 0.5)
return ret
def gaussian(mean, ln_var, **kwargs):
"""gaussian(mean, ln_var, *, eps=None, return_eps=False)
Gaussian sampling function.
This function takes a mean :math:`\\mu` and the logarithm of a variance
:math:`\\log(\\sigma^2)` as inputs and outputs a sample drawn from a
Gaussian distribution :math:`N(\\mu, \\sigma)`.
The inputs must have the same shape.
Args:
mean (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable representing the mean :math:`\\mu`.
ln_var (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable representing the logarithm of a variance
:math:`\\log(\\sigma^2)`.
eps (:ref:`ndarray` or None):
The eps value to be used.
You do not have to specify this value, unless you need to make
results deterministic.
If ``eps`` is not specified or set to ``None``, an eps value will
be generated randomly.
The shape and dtype must be the same as ``ln_var`` and should be
on the same device.
return_eps (bool):
If ``True``, the eps value used in this function is returned
together with the output variable.
The returned eps can later be reused by passing it to the ``eps``
argument.
Returns:
~chainer.Variable or tuple:
When ``return_eps`` is ``False`` (default), returns the output
variable with the shape of ``mean`` and/or ``ln_var``.
When ``True``, returns the tuple of the output variable and eps
(:ref:`ndarray`).
The eps will be on the same device as the input (``ln_var``).
"""
eps = None
return_eps = False
if kwargs:
eps, return_eps = argument.parse_kwargs(
kwargs, ('eps', eps), ('return_eps', return_eps))
func = Gaussian(eps)
out = func.apply((mean, ln_var))[0]
if return_eps:
return out, func.eps
return out
| okuta/chainer | chainer/functions/noise/gaussian.py | Python | mit | 4,588 | [
"Gaussian"
] | 34636396c4eb3dbeda689882b62e9eab1d40cd40ace00ff618eb2c34c52c8892 |
## \file
## \ingroup tutorial_roofit
## \notebook
## Addition and convolution: setting up an extended maximum likelihood fit
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Set up component pdfs
# ---------------------------------------
# Declare observable x
x = ROOT.RooRealVar("x", "x", 0, 10)
# Create two Gaussian PDFs g1(x,mean1,sigma) anf g2(x,mean2,sigma) and
# their parameters
mean = ROOT.RooRealVar("mean", "mean of gaussians", 5)
sigma1 = ROOT.RooRealVar("sigma1", "width of gaussians", 0.5)
sigma2 = ROOT.RooRealVar("sigma2", "width of gaussians", 1)
sig1 = ROOT.RooGaussian("sig1", "Signal component 1", x, mean, sigma1)
sig2 = ROOT.RooGaussian("sig2", "Signal component 2", x, mean, sigma2)
# Build Chebychev polynomial pdf
a0 = ROOT.RooRealVar("a0", "a0", 0.5, 0., 1.)
a1 = ROOT.RooRealVar("a1", "a1", -0.2, 0., 1.)
bkg = ROOT.RooChebychev("bkg", "Background", x, ROOT.RooArgList(a0, a1))
# Sum the signal components into a composite signal pdf
sig1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in signal", 0.8, 0., 1.)
sig = ROOT.RooAddPdf(
"sig", "Signal", ROOT.RooArgList(sig1, sig2), ROOT.RooArgList(sig1frac))
# Method 1 - Construct extended composite model
# -------------------------------------------------------------------
# Sum the composite signal and background into an extended pdf
# nsig*sig+nbkg*bkg
nsig = ROOT.RooRealVar("nsig", "number of signal events", 500, 0., 10000)
nbkg = ROOT.RooRealVar(
"nbkg", "number of background events", 500, 0, 10000)
model = ROOT.RooAddPdf(
"model",
"(g1+g2)+a",
ROOT.RooArgList(
bkg,
sig),
ROOT.RooArgList(
nbkg,
nsig))
# Sample, fit and plot extended model
# ---------------------------------------------------------------------
# Generate a data sample of expected number events in x from model
# = model.expectedEvents() = nsig+nbkg
data = model.generate(ROOT.RooArgSet(x))
# Fit model to data, ML term automatically included
model.fitTo(data)
# Plot data and PDF overlaid, expected number of events for pdf projection normalization
# rather than observed number of events (==data.numEntries())
xframe = x.frame(ROOT.RooFit.Title("extended ML fit example"))
data.plotOn(xframe)
model.plotOn(xframe, ROOT.RooFit.Normalization(
1.0, ROOT.RooAbsReal.RelativeExpected))
# Overlay the background component of model with a dashed line
ras_bkg = ROOT.RooArgSet(bkg)
model.plotOn(
xframe, ROOT.RooFit.Components(ras_bkg), ROOT.RooFit.LineStyle(
ROOT.kDashed), ROOT.RooFit.Normalization(
1.0, ROOT.RooAbsReal.RelativeExpected))
# Overlay the background+sig2 components of model with a dotted line
ras_bkg_sig2 = ROOT.RooArgSet(bkg, sig2)
model.plotOn(
xframe, ROOT.RooFit.Components(ras_bkg_sig2), ROOT.RooFit.LineStyle(
ROOT.kDotted), ROOT.RooFit.Normalization(
1.0, ROOT.RooAbsReal.RelativeExpected))
# Print structure of composite pdf
model.Print("t")
# Method 2 - Construct extended components first
# ---------------------------------------------------------------------
# Associated nsig/nbkg as expected number of events with sig/bkg
esig = ROOT.RooExtendPdf("esig", "extended signal pdf", sig, nsig)
ebkg = ROOT.RooExtendPdf("ebkg", "extended background pdf", bkg, nbkg)
# Sum extended components without coefs
# -------------------------------------------------------------------------
# Construct sum of two extended pdf (no coefficients required)
model2 = ROOT.RooAddPdf("model2", "(g1+g2)+a", ROOT.RooArgList(ebkg, esig))
# Draw the frame on the canvas
c = ROOT.TCanvas("rf202_extendedmlfit", "rf202_extendedmlfit", 600, 600)
ROOT.gPad.SetLeftMargin(0.15)
xframe.GetYaxis().SetTitleOffset(1.4)
xframe.Draw()
c.SaveAs("rf202_extendedmlfit.png")
| root-mirror/root | tutorials/roofit/rf202_extendedmlfit.py | Python | lgpl-2.1 | 3,835 | [
"Gaussian"
] | 65a6a0feff94443106a82320a4e447c76e3049c93b9fa74ada13770804e86617 |
import ovito
from ovito.io import *
from ovito.vis import *
import os
import glob
import shutil
import sys
import re
print("Hello, this is OVITO %i.%i.%i" % ovito.version)
rs = RenderSettings(
filename = '/tmp/image.png',
size = (320,240),
background_color = (1.0, 1.0, 1.0)
)
rs.renderer.antialiasing = False
def makemovie(path):
files = sorted(glob.glob(path + "/*.xyz"), key= lambda x: int(re.findall("surfaces(\d+)\.xyz", x)[0]))[::2]
name = path.split("/")[-1] + ".gif"
imgs = []
node = ovito.dataset.selected_node
for frame, f in enumerate(files):
if not node:
node = import_file(f, columns=["Particle Type", "Position.X", "Position.Y", "Position.Z"])
else:
node.source.load(f)
rs.filename = "%s/image%s.png" % (path, str(frame).rjust(4, "0"))
ovito.dataset.viewports.active_vp.render(rs)
imgs.append(rs.filename)
sys.stdout.flush()
print("\rRendered img %4d/%d : %s from %s" % (frame+1, len(files),
rs.filename.split("/")[-1],
f.split("/")[-1]))
os.system("mkdir -p /tmp/movies/")
os.system("convert -delay 3 %s -loop 1 /tmp/movies/%s" % (" ".join(imgs), name))
for img in imgs:
os.remove(img)
dirs = glob.glob("/tmp/felix_*")
for i, dir in enumerate(dirs):
print("Entering %s : %d/%d", dir, i+1, len(dirs))
makemovie(dir)
| jorgehog/Deux-kMC | scripts/felix_front/ovitoviz.py | Python | gpl-3.0 | 1,489 | [
"OVITO"
] | 9dbb638562063441fe6f6706ec8aa67df96ae85f347897dfd20479d714d11382 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PipelineRunner, an abstract base runner object."""
from __future__ import absolute_import
import logging
import os
import shelve
import shutil
import tempfile
__all__ = ['PipelineRunner', 'PipelineState', 'PipelineResult']
def _get_runner_map(runner_names, module_path):
"""Create a map of runner name in lower case to full import path to the
runner class.
"""
return {runner_name.lower(): module_path + runner_name
for runner_name in runner_names}
_DIRECT_RUNNER_PATH = 'apache_beam.runners.direct.direct_runner.'
_DATAFLOW_RUNNER_PATH = (
'apache_beam.runners.dataflow.dataflow_runner.')
_TEST_RUNNER_PATH = 'apache_beam.runners.test.'
_PYTHON_RPC_DIRECT_RUNNER = (
'apache_beam.runners.experimental.python_rpc_direct.'
'python_rpc_direct_runner.')
_KNOWN_PYTHON_RPC_DIRECT_RUNNER = ('PythonRPCDirectRunner',)
_KNOWN_DIRECT_RUNNERS = ('DirectRunner', 'BundleBasedDirectRunner',
'SwitchingDirectRunner')
_KNOWN_DATAFLOW_RUNNERS = ('DataflowRunner',)
_KNOWN_TEST_RUNNERS = ('TestDataflowRunner',)
_RUNNER_MAP = {}
_RUNNER_MAP.update(_get_runner_map(_KNOWN_DIRECT_RUNNERS,
_DIRECT_RUNNER_PATH))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_DATAFLOW_RUNNERS,
_DATAFLOW_RUNNER_PATH))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_PYTHON_RPC_DIRECT_RUNNER,
_PYTHON_RPC_DIRECT_RUNNER))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_TEST_RUNNERS,
_TEST_RUNNER_PATH))
_ALL_KNOWN_RUNNERS = (
_KNOWN_DIRECT_RUNNERS + _KNOWN_DATAFLOW_RUNNERS + _KNOWN_TEST_RUNNERS)
def create_runner(runner_name):
"""For internal use only; no backwards-compatibility guarantees.
Creates a runner instance from a runner class name.
Args:
runner_name: Name of the pipeline runner. Possible values are:
DirectRunner, DataflowRunner and TestDataflowRunner.
Returns:
A runner object.
Raises:
RuntimeError: if an invalid runner name is used.
"""
# Get the qualified runner name by using the lower case runner name. If that
# fails try appending the name with 'runner' and check if it matches.
# If that also fails, use the given runner name as is.
runner_name = _RUNNER_MAP.get(
runner_name.lower(),
_RUNNER_MAP.get(runner_name.lower() + 'runner', runner_name))
if '.' in runner_name:
module, runner = runner_name.rsplit('.', 1)
try:
return getattr(__import__(module, {}, {}, [runner], -1), runner)()
except ImportError:
if runner_name in _KNOWN_DATAFLOW_RUNNERS:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
else:
raise
else:
raise ValueError(
'Unexpected pipeline runner: %s. Valid values are %s '
'or the fully qualified name of a PipelineRunner subclass.' % (
runner_name, ', '.join(_ALL_KNOWN_RUNNERS)))
class PipelineRunner(object):
"""A runner of a pipeline object.
The base runner provides a run() method for visiting every node in the
pipeline's DAG and executing the transforms computing the PValue in the node.
A custom runner will typically provide implementations for some of the
transform methods (ParDo, GroupByKey, Create, etc.). It may also
provide a new implementation for clear_pvalue(), which is used to wipe out
materialized values in order to reduce footprint.
"""
def run(self, transform, options=None):
"""Run the given transform or callable with this runner.
Blocks until the pipeline is complete. See also `PipelineRunner.run_async`.
"""
result = self.run_async(transform, options)
result.wait_until_finish()
return result
def run_async(self, transform, options=None):
"""Run the given transform or callable with this runner.
May return immediately, executing the pipeline in the background.
The returned result object can be queried for progress, and
`wait_until_finish` may be called to block until completion.
"""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import PTransform
from apache_beam.pvalue import PBegin
from apache_beam.pipeline import Pipeline
p = Pipeline(runner=self, options=options)
if isinstance(transform, PTransform):
p | transform
else:
transform(PBegin(p))
return p.run()
def run_pipeline(self, pipeline):
"""Execute the entire pipeline or the sub-DAG reachable from a node.
Runners should override this method.
"""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
class RunVisitor(PipelineVisitor):
def __init__(self, runner):
self.runner = runner
def visit_transform(self, transform_node):
try:
self.runner.run_transform(transform_node)
except:
logging.error('Error while visiting %s', transform_node.full_label)
raise
pipeline.visit(RunVisitor(self))
def apply(self, transform, input):
"""Runner callback for a pipeline.apply call.
Args:
transform: the transform to apply.
input: transform's input (typically a PCollection).
A concrete implementation of the Runner class may want to do custom
pipeline construction for a given transform. To override the behavior
for a transform class Xyz, implement an apply_Xyz method with this same
signature.
"""
for cls in transform.__class__.mro():
m = getattr(self, 'apply_%s' % cls.__name__, None)
if m:
return m(transform, input)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (transform, self))
def apply_PTransform(self, transform, input):
# The base case of apply is to call the transform's expand.
return transform.expand(input)
def run_transform(self, transform_node):
"""Runner callback for a pipeline.run call.
Args:
transform_node: transform node for the transform to run.
A concrete implementation of the Runner class must implement run_Abc for
some class Abc in the method resolution order for every non-composite
transform Xyz in the pipeline.
"""
for cls in transform_node.transform.__class__.mro():
m = getattr(self, 'run_%s' % cls.__name__, None)
if m:
return m(transform_node)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (
transform_node.transform, self))
class PValueCache(object):
"""For internal use only; no backwards-compatibility guarantees.
Local cache for arbitrary information computed for PValue objects."""
def __init__(self, use_disk_backed_cache=False):
# Cache of values computed while a runner executes a pipeline. This is a
# dictionary of PValues and their computed values. Note that in principle
# the runner could contain PValues from several pipelines without clashes
# since a PValue is associated with one and only one pipeline. The keys of
# the dictionary are tuple of PValue instance addresses obtained using id()
# and tag names converted to strings.
self._use_disk_backed_cache = use_disk_backed_cache
if use_disk_backed_cache:
self._tempdir = tempfile.mkdtemp()
self._cache = shelve.open(os.path.join(self._tempdir, 'shelve'))
else:
self._cache = {}
def __del__(self):
if self._use_disk_backed_cache:
self._cache.close()
shutil.rmtree(self._tempdir)
def __len__(self):
return len(self._cache)
def to_cache_key(self, transform, tag):
return transform.full_label, tag
def _ensure_pvalue_has_real_producer(self, pvalue):
"""Ensure the passed-in PValue has the real_producer attribute.
Args:
pvalue: A PValue instance whose cached value is requested.
During the runner's execution only the results of the primitive transforms
are cached. Whenever we are looking for a PValue that is the output of a
composite transform we need to find the output of its rightmost transform
part.
"""
if not hasattr(pvalue, 'real_producer'):
real_producer = pvalue.producer
while real_producer.parts:
real_producer = real_producer.parts[-1]
pvalue.real_producer = real_producer
def is_cached(self, pobj):
from apache_beam.pipeline import AppliedPTransform
if isinstance(pobj, AppliedPTransform):
transform = pobj
tag = None
else:
self._ensure_pvalue_has_real_producer(pobj)
transform = pobj.real_producer
tag = pobj.tag
return self.to_cache_key(transform, tag) in self._cache
def cache_output(self, transform, tag_or_value, value=None):
if value is None:
value = tag_or_value
tag = None
else:
tag = tag_or_value
self._cache[
self.to_cache_key(transform, tag)] = value
def get_pvalue(self, pvalue):
"""Gets the value associated with a PValue from the cache."""
self._ensure_pvalue_has_real_producer(pvalue)
try:
return self._cache[self.key(pvalue)]
except KeyError:
if (pvalue.tag is not None
and self.to_cache_key(pvalue.real_producer, None) in self._cache):
# This is an undeclared, empty output of a DoFn executed
# in the local runner before this output was referenced.
return []
else:
raise
def get_unwindowed_pvalue(self, pvalue):
return [v.value for v in self.get_pvalue(pvalue)]
def clear_pvalue(self, pvalue):
"""Removes a PValue from the cache."""
if self.is_cached(pvalue):
del self._cache[self.key(pvalue)]
def key(self, pobj):
self._ensure_pvalue_has_real_producer(pobj)
return self.to_cache_key(pobj.real_producer, pobj.tag)
class PipelineState(object):
"""State of the Pipeline, as returned by :attr:`PipelineResult.state`.
This is meant to be the union of all the states any runner can put a
pipeline in. Currently, it represents the values of the dataflow
API JobState enum.
"""
UNKNOWN = 'UNKNOWN' # not specified
STARTING = 'STARTING' # not yet started
STOPPED = 'STOPPED' # paused or not yet started
RUNNING = 'RUNNING' # currently running
DONE = 'DONE' # successfully completed (terminal state)
FAILED = 'FAILED' # failed (terminal state)
CANCELLED = 'CANCELLED' # explicitly cancelled (terminal state)
UPDATED = 'UPDATED' # replaced by another job (terminal state)
DRAINING = 'DRAINING' # still processing, no longer reading data
DRAINED = 'DRAINED' # draining completed (terminal state)
PENDING = 'PENDING' # the job has been created but is not yet running.
CANCELLING = 'CANCELLING' # job has been explicitly cancelled and is
# in the process of stopping
class PipelineResult(object):
"""A :class:`PipelineResult` provides access to info about a pipeline."""
def __init__(self, state):
self._state = state
@property
def state(self):
"""Return the current state of the pipeline execution."""
return self._state
def wait_until_finish(self, duration=None):
"""Waits until the pipeline finishes and returns the final status.
Args:
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
Raises:
~exceptions.IOError: If there is a persistent problem getting job
information.
~exceptions.NotImplementedError: If the runner does not support this
operation.
Returns:
The final state of the pipeline, or :data:`None` on timeout.
"""
raise NotImplementedError
def cancel(self):
"""Cancels the pipeline execution.
Raises:
~exceptions.IOError: If there is a persistent problem getting job
information.
~exceptions.NotImplementedError: If the runner does not support this
operation.
Returns:
The final state of the pipeline.
"""
raise NotImplementedError
def metrics(self):
"""Returns :class:`~apache_beam.metrics.metric.MetricResults` object to
query metrics from the runner.
Raises:
~exceptions.NotImplementedError: If the runner does not support this
operation.
"""
raise NotImplementedError
# pylint: disable=unused-argument
def aggregated_values(self, aggregator_or_name):
"""Return a dict of step names to values of the Aggregator."""
logging.warn('%s does not implement aggregated_values',
self.__class__.__name__)
return {}
| tgroh/beam | sdks/python/apache_beam/runners/runner.py | Python | apache-2.0 | 13,575 | [
"VisIt"
] | 7089d9545ab6bbb2724262e50999b2e9ae7fadbb604ee5e47302cd00cb7a4479 |
#!/usr/bin/env python
"""
dirac-rss-policy-manager
Script to manage the Policy section within a given CS setup of a given dirac cfg file.
It allows you to:
- view the policy current section (no option needed)
- test all the policies that apply for a given 'element', 'elementType' or element 'name'
(one of the aforementioned options is needed)
- update/add a policy to a given dirac cfg file (no option needed)
- remove a policy from a given dirac cfg file ('policy' option needed)
- restore the last backup of the diarc config file, to undo last changes (no option needed)
Usage:
dirac-rss-policy-manager [option] <command>
Commands:
[test|view|update|remove]
Options:
--name= ElementName (it admits a comma-separated list of element names); None by default
--element= Element family (either 'Site' or 'Resource')
--elementType= ElementType narrows the search (string, list); None by default
--setup= Setup where the policy section should be retrieved from; 'Defaults' by default
--file= Fullpath config file location other then the default one (but for testing use only the original)
--policy= Policy name to be removed
Verbosity:
-o LogLevel=LEVEL NOTICE by default, levels available: INFO, DEBUG, VERBOSE..
"""
# FIXME: this, I believe, is not complete
import datetime
import json
import shutil
from DIRAC import gLogger, exit as DIRACExit, S_OK, version
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities import Time
from DIRAC.ConfigurationSystem.private.ConfigurationData import CFG
from DIRAC.ResourceStatusSystem.Policy import Configurations
from DIRAC.ResourceStatusSystem.Utilities.InfoGetter import getPoliciesThatApply
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id:$'
subLogger = None
switchDict = {}
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
switches = (('elementType=', 'ElementType narrows the search; None if default'),
('element=', 'Element family ( Site, Resource )'),
('name=', 'ElementName; None if default'),
('setup=', "Setup where the policy section should be retrieved from; 'Defaults' by default"),
('file=',
"Fullpath config file location other then the default one (but for testing use only the original)"),
('policy=', "Policy name to be removed")
)
for switch in switches:
Script.registerSwitch('', switch[0], switch[1])
def registerUsageMessage():
'''
Takes the script __doc__ and adds the DIRAC version to it
'''
usageMessage = 'DIRAC version: %s \n' % version
usageMessage += __doc__
Script.setUsageMessage(usageMessage)
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if not args:
error("Argument is missing, you should enter either 'test', 'update', 'view', 'remove', 'restore'")
else:
cmd = args[0].lower()
switches = dict(Script.getUnprocessedSwitches())
diracConfigFile = CSHelpers.gConfig.diracConfigFilePath
# Default values
switches.setdefault('name', None)
switches.setdefault('element', None)
switches.setdefault('elementType', None)
switches.setdefault('setup', "Defaults")
switches.setdefault('file', diracConfigFile)
#switches.setdefault( 'statusType', None )
#switches.setdefault( 'status', None )
# when it's a add/modify query and status/reason/statusType are not specified
# then some specific defaults are set up
if cmd == 'test':
if switches['elementType'] is None and switches['element'] is None and switches['name'] is None:
error("to test, you should enter at least one switch: either element, elmentType, or name")
else:
if switches['element'] is not None:
switches['element'] = switches['element'].title()
if switches['element'] not in ('Resource', 'Site'):
error("you should enter either 'Site' or 'Resource' for switch 'element'")
if switches['elementType'] is not None:
switches['elementType'] = switches['elementType'].title()
if switches['file'] is None:
error("Enter a fullpath dirac config file location when using 'file' option")
elif cmd == 'remove':
if 'policy' not in switches or switches['policy'] is None:
error("to remove, you should enter a policy")
elif cmd == 'update' or cmd == 'view' or cmd == 'restore':
pass
else:
error("Incorrect argument: you should enter either 'test', 'update', 'view', 'remove', 'restore'")
subLogger.debug("The switches used are:")
map(subLogger.debug, switches.iteritems())
return args, switches
#...............................................................................
# UTILS: to check and unpack
def error(msg):
'''
Format error messages
'''
subLogger.error("\nERROR:")
subLogger.error("\t" + msg)
subLogger.error("\tPlease, check documentation below")
Script.showHelp()
DIRACExit(1)
def getToken(key):
'''
Function that gets the userName from the proxy
'''
proxyInfo = getProxyInfo()
if not proxyInfo['OK']:
error(str(proxyInfo))
if key.lower() == 'owner':
userName = proxyInfo['Value']['username']
tokenOwner = S_OK(userName)
if not tokenOwner['OK']:
error(tokenOwner['Message'])
return tokenOwner['Value']
elif key.lower() == 'expiration':
expiration = proxyInfo['Value']['secondsLeft']
tokenExpiration = S_OK(expiration)
if not tokenExpiration['OK']:
error(tokenExpiration['Message'])
now = Time.dateTime()
# datetime.datetime.utcnow()
expirationDate = now + datetime.timedelta(seconds=tokenExpiration['Value'])
expirationDate = Time.toString(expirationDate)
expirationDate = expirationDate.split('.')[0]
return expirationDate
#...............................................................................
def listCSPolicies(setup="Defaults"):
'''
to get the list of the policies from the dirac config file
'''
policies = getPolicySection(setup)
for p in policies:
print " " * 3, p, " || matchParams: ", policies[p]['matchParams'], " || policyType: ", policies[p]['policyType']
def listAvailablePolicies():
'''
to get the list of the policies available in the RSS.Policy.Configurations
'''
policiesMeta = Configurations.POLICIESMETA
for pm in policiesMeta:
print " " * 3, pm, " || args: ", policiesMeta[pm]['args'], " || description: ", policiesMeta[pm]['description']
def getPolicySection(cfg, setup="Defaults"):
'''
to get the Policy section from the dirac config file, and within a given setup
'''
return cfg['Operations'][setup]['ResourceStatus']['Policies']
def getPolicies(params):
'''
to get all the policies that apply to the given list of params
'''
paramsClone = dict(params)
for param in paramsClone:
if params[param] is None:
del params[param]
result = getPoliciesThatApply(params)
if result['OK']:
return result['Value']
else:
error("It wasn't possible to execute getPoliciesThatApply, check this: %s" % str(result))
def updatePolicy(policySection):
'''
to interactively update/add policies inside the dirac config file
'''
headLine("3 steps to update/add a policy: enter a policy name, then its match params, then a policyType")
while True:
# setting policyName
name = raw_input("STEP1 - Enter a policy name (leave empty otherwise): ").strip()
if name == "":
break
policySection[name] = {}
policySection[name]['matchParams'] = {}
params = ['element', 'name', 'elementType']
while True:
print ""
print "\t WARNING:"
print "\t if you enter 'element' as param then you should enter 'Site' or 'Resource' as a value"
print "\t if you enter 'name' as param then you should enter either a name or a comma-separated list of names\n"
# setting match params
param = raw_input("STEP2 - Enter a match param (among %s), or leave empty otherwise: " % str(params)).strip()
if param == "":
break
if param not in params:
print "\t WARNING: you should enter a match param (among %s), or leave it empty otherwise" % str(params)
continue
value = raw_input("STEP2 - Enter a value for match param '" + param + "', leave it empty otherwise:").strip()
if value == "":
break
if param == 'element':
value = value.title()
if value != 'Site' and value != 'Resource':
error("You didn't provide either 'Site' or 'Resource' as a value for match param 'element'")
policySection[name]['matchParams'][param] = value
params.remove(param)
# setting policy type
headLine("LIST OF AVAILABLE POLICIES")
print listAvailablePolicies()
policy = raw_input(
"STEP3 - Enter a policyType (see one of the the policies listed above, leave empty otherwise): ").strip()
if policy == "":
break
policySection[name]['policyType'] = policy
headLine(" Enter another policy, if you like")
return S_OK(policySection)
def removePolicy(policySection, policies):
'''
to remove some policies from the dirac config file
'''
for policy in policies.split(','):
if policy == '':
continue
if policy in policySection:
del policySection[policy]
else:
print "\n\t WARNING: No policy named %s was found in the Policy section!" % policy
return policySection
def dumpPolicy(cfgDict, fileName):
'''
to copy updates and removals to the dirac config file (it creates a backup copy, if needed for restoring)
'''
fileCFG = CFG()
# update cfg policy section
confirmation = raw_input("Do you want to dump your changes? (replay 'yes' or 'y' to confirm): ").strip()
if confirmation == 'yes' or confirmation == 'y':
fileCFG.loadFromDict(cfgDict)
shutil.copyfile(fileName, fileName + ".bkp") # creates a backup copy of the dirac config file
dumpedSucccessfully = fileCFG.writeToFile(fileName)
if dumpedSucccessfully:
print "Your update has been dumped successfully!"
else:
print "It was not possible to dump your update. Something went wrong!"
def viewPolicyDict(policyDict):
'''
to "prettyprint" a python dictionary
'''
print json.dumps(policyDict, indent=2, sort_keys=True)
def restoreCfgFile(fileName):
'''
to restore the last backup copy of the dirac config file before the latest updates/removals
'''
shutil.copyfile(fileName + ".bkp", fileName)
print "\n\tWARNING: dirac config file was restored!"
def headLine(text):
'''
to create a pretty printout headline
'''
print "\n\t*** %s ***\n" % text
def run(cmd, params):
'''
to execute a command among view, test, update, remove, restore
'''
cmd = cmd.pop()
fileCFG = CFG()
fileName = params['file']
setup = params['setup']
fileCFG.loadFromFile(fileName)
cfgDict = fileCFG.getAsDict()
policySection = getPolicySection(cfgDict)
if cmd == 'view':
viewPolicyDict(policySection)
elif cmd == 'test':
policiesThatApply = getPolicies(params)
viewPolicyDict(policiesThatApply)
elif cmd == 'update':
result = updatePolicy(policySection)
if result['OK']:
policySection = result['Value']
cfgDict['Operations'][setup]['ResourceStatus']['Policies'] = policySection
headLine("A preview of your policy section after the update")
viewPolicyDict(policySection)
dumpPolicy(cfgDict, fileName)
elif cmd == 'remove':
policies = params['policy']
policySection = removePolicy(policySection, policies)
cfgDict['Operations'][setup]['ResourceStatus']['Policies'] = policySection
headLine("A preview of your policy section after the removal")
viewPolicyDict(policySection)
dumpPolicy(cfgDict, fileName)
elif cmd == 'restore':
restoreCfgFile(fileName)
fileCFG.loadFromFile(fileName)
cfgDict = fileCFG.getAsDict()
policySection = getPolicySection(cfgDict)
viewPolicyDict(policySection)
#...............................................................................
if __name__ == "__main__":
subLogger = gLogger.getSubLogger(__file__)
# Script initialization
registerSwitches()
registerUsageMessage()
cmd, params = parseSwitches()
# Unpack switchDict if 'name' or 'statusType' have multiple values
#switchDictSet = unpack( switchDict )
# Run script
run(cmd, params)
# Bye
DIRACExit(0)
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| andresailer/DIRAC | ResourceStatusSystem/scripts/dirac-rss-policy-manager.py | Python | gpl-3.0 | 12,984 | [
"DIRAC"
] | 2d7c322c454ca10cdbe5a00a5312a9175e95731f77c395515368734d93e35788 |
import os, logging, threading, time
from Queue import Queue, Empty
from galaxy import model
from paste.deploy.converters import asbool
import pkg_resources
try:
pkg_resources.require( "DRMAA_python" )
DRMAA = __import__( "DRMAA" )
except:
DRMAA = None
log = logging.getLogger( __name__ )
if DRMAA is not None:
DRMAA_state = {
DRMAA.Session.UNDETERMINED: 'process status cannot be determined',
DRMAA.Session.QUEUED_ACTIVE: 'job is queued and waiting to be scheduled',
DRMAA.Session.SYSTEM_ON_HOLD: 'job is queued and in system hold',
DRMAA.Session.USER_ON_HOLD: 'job is queued and in user hold',
DRMAA.Session.USER_SYSTEM_ON_HOLD: 'job is queued and in user and system hold',
DRMAA.Session.RUNNING: 'job is running',
DRMAA.Session.SYSTEM_SUSPENDED: 'job is system suspended',
DRMAA.Session.USER_SUSPENDED: 'job is user suspended',
DRMAA.Session.DONE: 'job finished normally',
DRMAA.Session.FAILED: 'job finished, but failed',
}
sge_template = """#!/bin/sh
#$ -S /bin/sh
GALAXY_LIB="%s"
if [ "$GALAXY_LIB" != "None" ]; then
if [ -n "$PYTHONPATH" ]; then
PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"
else
PYTHONPATH="$GALAXY_LIB"
fi
export PYTHONPATH
fi
cd %s
%s
"""
class SGEJobState( object ):
def __init__( self ):
"""
Encapsulates state related to a job that is being run via SGE and
that we need to monitor.
"""
self.job_wrapper = None
self.job_id = None
self.old_state = None
self.running = False
self.job_file = None
self.ofile = None
self.efile = None
self.runner_url = None
class SGEJobRunner( object ):
"""
Job runner backed by a finite pool of worker threads. FIFO scheduling
"""
STOP_SIGNAL = object()
def __init__( self, app ):
"""Initialize this job runner and start the monitor thread"""
# Check if SGE was importable, fail if not
if DRMAA is None:
raise Exception( "SGEJobRunner requires DRMAA_python which was not found" )
self.app = app
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.queue = Queue()
self.default_cell = self.determine_sge_cell( self.app.config.default_cluster_job_runner )
self.ds = DRMAA.Session()
self.ds.init( self.default_cell )
self.monitor_thread = threading.Thread( target=self.monitor )
self.monitor_thread.start()
log.debug( "ready" )
def determine_sge_cell( self, url ):
"""Determine what SGE cell we are using"""
url_split = url.split("/")
if url_split[0] == 'sge:':
return url_split[2]
# this could happen if sge is started, but is not the default runner
else:
return ''
def determine_sge_queue( self, url ):
"""Determine what SGE queue we are submitting to"""
url_split = url.split("/")
queue = url_split[3]
if queue == "":
# None == server's default queue
queue = None
return queue
def queue_job( self, job_wrapper ):
"""Create SGE script for a job and submit it to the SGE queue"""
try:
job_wrapper.prepare()
command_line = job_wrapper.get_command_line()
except:
job_wrapper.fail( "failure preparing job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
runner_url = job_wrapper.tool.job_runner
# This is silly, why would we queue a job with no command line?
if not command_line:
job_wrapper.finish( '', '' )
return
# Check for deletion before we change state
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
job_wrapper.cleanup()
return
# Change to queued state immediately
job_wrapper.change_state( model.Job.states.QUEUED )
if self.determine_sge_cell( runner_url ) != self.default_cell:
# TODO: support multiple cells
log.warning( "(%s) Using multiple SGE cells is not supported. This job will be submitted to the default cell." % job_wrapper.job_id )
sge_queue_name = self.determine_sge_queue( runner_url )
# define job attributes
ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job_wrapper.job_id)
efile = "%s/database/pbs/%s.e" % (os.getcwd(), job_wrapper.job_id)
jt = self.ds.createJobTemplate()
jt.remoteCommand = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job_wrapper.job_id)
jt.outputPath = ":%s" % ofile
jt.errorPath = ":%s" % efile
if sge_queue_name is not None:
jt.setNativeSpecification( "-q %s" % sge_queue_name )
script = sge_template % (job_wrapper.galaxy_lib_dir, os.path.abspath( job_wrapper.working_directory ), command_line)
fh = file( jt.remoteCommand, "w" )
fh.write( script )
fh.close()
os.chmod( jt.remoteCommand, 0750 )
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
self.cleanup( ( ofile, efile, jt.remoteCommand ) )
job_wrapper.cleanup()
return
galaxy_job_id = job_wrapper.job_id
log.debug("(%s) submitting file %s" % ( galaxy_job_id, jt.remoteCommand ) )
log.debug("(%s) command is: %s" % ( galaxy_job_id, command_line ) )
# runJob will raise if there's a submit problem
job_id = self.ds.runJob(jt)
if sge_queue_name is None:
log.debug("(%s) queued in default queue as %s" % (galaxy_job_id, job_id) )
else:
log.debug("(%s) queued in %s queue as %s" % (galaxy_job_id, sge_queue_name, job_id) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_runner( runner_url, job_id )
# Store SGE related state information for job
sge_job_state = SGEJobState()
sge_job_state.job_wrapper = job_wrapper
sge_job_state.job_id = job_id
sge_job_state.ofile = ofile
sge_job_state.efile = efile
sge_job_state.job_file = jt.remoteCommand
sge_job_state.old_state = 'new'
sge_job_state.running = False
sge_job_state.runner_url = runner_url
# delete the job template
self.ds.deleteJobTemplate( jt )
# Add to our 'queue' of jobs to monitor
self.queue.put( sge_job_state )
def monitor( self ):
"""
Watches jobs currently in the PBS queue and deals with state changes
(queued to running) and job completion
"""
while 1:
# Take any new watched jobs and put them on the monitor list
try:
while 1:
sge_job_state = self.queue.get_nowait()
if sge_job_state is self.STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.ds.exit()
return
self.watched.append( sge_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
self.check_watched_items()
# Sleep a bit before the next state check
time.sleep( 1 )
def check_watched_items( self ):
"""
Called by the monitor thread to look at each watched job and deal
with state changes.
"""
new_watched = []
for sge_job_state in self.watched:
job_id = sge_job_state.job_id
galaxy_job_id = sge_job_state.job_wrapper.job_id
old_state = sge_job_state.old_state
try:
state = self.ds.getJobProgramStatus( job_id )
except DRMAA.InvalidJobError:
# we should only get here if an orphaned job was put into the queue at app startup
log.debug("(%s/%s) job left SGE queue" % ( galaxy_job_id, job_id ) )
self.finish_job( sge_job_state )
continue
except Exception, e:
# so we don't kill the monitor thread
log.exception("(%s/%s) Unable to check job status" % ( galaxy_job_id, job_id ) )
log.warning("(%s/%s) job will now be errored" % ( galaxy_job_id, job_id ) )
sge_job_state.job_wrapper.fail( "Cluster could not complete job" )
continue
if state != old_state:
log.debug("(%s/%s) state change: %s" % ( galaxy_job_id, job_id, DRMAA_state[state] ) )
if state == DRMAA.Session.RUNNING and not sge_job_state.running:
sge_job_state.running = True
sge_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
if state == DRMAA.Session.DONE:
self.finish_job( sge_job_state )
continue
if state == DRMAA.Session.FAILED:
sge_job_state.job_wrapper.fail( "Cluster could not complete job" )
sge_job_state.job_wrapper.cleanup()
continue
sge_job_state.old_state = state
new_watched.append( sge_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
def finish_job( self, sge_job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the SGE temporary files.
"""
ofile = sge_job_state.ofile
efile = sge_job_state.efile
job_file = sge_job_state.job_file
# collect the output
try:
ofh = file(ofile, "r")
efh = file(efile, "r")
stdout = ofh.read()
stderr = efh.read()
except:
stdout = ''
stderr = 'Job output not returned from cluster'
log.debug(stderr)
try:
sge_job_state.job_wrapper.finish( stdout, stderr )
except:
log.exception("Job wrapper finish method failed")
# clean up the sge files
self.cleanup( ( ofile, efile, job_file ) )
def cleanup( self, files ):
if not asbool( self.app.config.get( 'debug', False ) ):
for file in files:
if os.access( file, os.R_OK ):
os.unlink( file )
def put( self, job_wrapper ):
"""Add a job to the queue (by job identifier)"""
self.queue_job( job_wrapper )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "sending stop signal to worker threads" )
self.queue.put( self.STOP_SIGNAL )
log.info( "sge job runner stopped" )
def stop_job( self, job ):
"""Attempts to delete a job from the SGE queue"""
try:
self.ds.control( job.job_runner_external_id, DRMAA.Session.TERMINATE )
log.debug( "(%s/%s) Removed from SGE queue at user's request" % ( job.id, job.job_runner_external_id ) )
except DRMAA.InvalidJobError:
log.debug( "(%s/%s) User killed running job, but it was already dead" % ( job.id, job.job_runner_external_id ) )
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
sge_job_state = SGEJobState()
sge_job_state.ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job.id)
sge_job_state.efile = "%s/database/pbs/%s.e" % (os.getcwd(), job.id)
sge_job_state.job_file = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job.id)
sge_job_state.job_id = str( job.job_runner_external_id )
sge_job_state.runner_url = job_wrapper.tool.job_runner
job_wrapper.command_line = job.command_line
sge_job_state.job_wrapper = job_wrapper
if job.state == model.Job.states.RUNNING:
log.debug( "(%s/%s) is still in running state, adding to the SGE queue" % ( job.id, job.job_runner_external_id ) )
sge_job_state.old_state = DRMAA.Session.RUNNING
sge_job_state.running = True
self.queue.put( sge_job_state )
elif job.state == model.Job.states.QUEUED:
log.debug( "(%s/%s) is still in SGE queued state, adding to the SGE queue" % ( job.id, job.job_runner_external_id ) )
sge_job_state.old_state = DRMAA.Session.QUEUED
sge_job_state.running = False
self.queue.put( sge_job_state )
| volpino/Yeps-EURAC | lib/galaxy/jobs/runners/sge.py | Python | mit | 13,219 | [
"Galaxy"
] | 611922dbe7704d32150352d4f0fd0708fa5f158f1e2e370a3125cad3ab4fb815 |
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
Only NetCDF3 is supported here; for NetCDF4 see
`netCDF4-python <http://unidata.github.io/netcdf4-python/>`__,
which has a similar API.
"""
# TODO:
# * properly implement ``_FillValue``.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file', 'netcdf_variable']
import warnings
import weakref
from operator import mul
from platform import python_implementation
import mmap as mm
import numpy as np
from numpy import frombuffer, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
IS_PYPY = python_implementation() == 'PyPy'
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
FILL_BYTE = b'\x81'
FILL_CHAR = b'\x00'
FILL_SHORT = b'\x80\x01'
FILL_INT = b'\x80\x00\x00\x01'
FILL_FLOAT = b'\x7C\xF0\x00\x00'
FILL_DOUBLE = b'\x47\x9E\x00\x00\x00\x00\x00\x00'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
FILLMAP = {NC_BYTE: FILL_BYTE,
NC_CHAR: FILL_CHAR,
NC_SHORT: FILL_SHORT,
NC_INT: FILL_INT,
NC_FLOAT: FILL_FLOAT,
NC_DOUBLE: FILL_DOUBLE}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file:
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w', 'a'}, optional
read-write-append mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object. Note that when mmap is in use, data arrays
returned refer directly to the mmapped data on disk, and the
file cannot be closed as long as references to it exist.
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <https://www.unidata.ucar.edu/software/netcdf/docs/netcdf_introduction.html#select_format>`__
for more info.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<https://www.unidata.ucar.edu/software/netcdf/guide_toc.html>`__. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Note that when `netcdf_file` is used to open a file with mmap=True
(default for read-only), arrays returned by it refer to data
directly on the disk. The file should not be closed, and cannot be cleanly
closed when asked, if such arrays are alive. You may want to copy data arrays
obtained from mmapped Netcdf file if they are to be processed after the file
is closed, see the example below.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``arange(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``arange(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'r')
>>> print(f.history)
b'Created for a test'
>>> time = f.variables['time']
>>> print(time.units)
b'days since 2008-01-01'
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
NetCDF files, when opened read-only, return arrays that refer
directly to memory-mapped data on disk:
>>> data = time[:]
>>> data.base.base
<mmap.mmap object at 0x7fe753763180>
If the data is to be processed after the file is closed, it needs
to be copied to main memory:
>>> data = time[:].copy()
>>> f.close()
>>> data.mean()
4.5
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf
>>> with netcdf.netcdf_file('simple.nc', 'r') as f:
... print(f.history)
b'Created for a test'
"""
def __init__(self, filename, mode='r', mmap=None, version=1,
maskandscale=False):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if mode not in 'rwa':
raise ValueError("Mode must be either 'r', 'w' or 'a'.")
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
omode = 'r+' if mode == 'a' else mode
self.fp = open(self.filename, '%sb' % omode)
if mmap is None:
# Mmapped files on PyPy cannot be usually closed
# before the GC runs, so it's better to use mmap=False
# as the default.
mmap = (not IS_PYPY)
if mode != 'r':
# Cannot read write-only files
mmap = False
self.use_mmap = mmap
self.mode = mode
self.version_byte = version
self.maskandscale = maskandscale
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self._mm = None
self._mm_buf = None
if self.use_mmap:
self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
self._attributes = {}
if mode in 'ra':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if hasattr(self, 'fp') and not self.fp.closed:
try:
self.flush()
finally:
self.variables = {}
if self._mm_buf is not None:
ref = weakref.ref(self._mm_buf)
self._mm_buf = None
if ref() is None:
# self._mm_buf is gc'd, and we can close the mmap
self._mm.close()
else:
# we cannot close self._mm, since self._mm_buf is
# alive and there may still be arrays referring to it
warnings.warn((
"Cannot close a netcdf_file opened with mmap=True, when "
"netcdf_variables or arrays referring to its data still exist. "
"All data arrays obtained from such files refer directly to "
"data on disk, and must be copied before the file can be cleanly "
"closed. (See netcdf_file docstring for more information on mmap.)"
), category=RuntimeWarning)
self._mm = None
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
if length is None and self._dims:
raise ValueError("Only first dimension may be unlimited!")
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for NumPy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions,
maskandscale=self.maskandscale)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode in 'wa':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tobytes())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_att_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variable names non-recs first, then recs.
def sortkey(n):
v = self.variables[n]
if v.isrec:
return (-1,)
return v._shape
variables = sorted(self.variables, key=sortkey, reverse=True)
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(nc_type)
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tobytes())
count = var.data.size * var.data.itemsize
self._write_var_padding(var, var._vsize - count)
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
var.data.resize(shape)
except ValueError:
var.__dict__['data'] = np.resize(var.data, shape).astype(var.data.dtype)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tobytes())
# Padding
count = rec.size * rec.itemsize
self._write_var_padding(var, var._vsize - count)
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_var_padding(self, var, size):
encoded_fill_value = var._get_encoded_fill_value()
num_fills = size // len(encoded_fill_value)
self.fp.write(encoded_fill_value * num_fills)
def _write_att_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(int, NC_INT), (float, NC_FLOAT), (str, NC_CHAR)]
# bytes index into scalars in py3k. Check for "string" types
if isinstance(values, (str, bytes)):
sample = values
else:
try:
sample = values[0] # subscriptable?
except TypeError:
sample = values # scalar
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
# asarray() dies with bytes and '>c' in py3k. Change to 'S'
dtype_ = 'S' if dtype_ == '>c' else dtype_
values = asarray(values, dtype=dtype_)
self.fp.write(nc_type)
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tobytes())
count = values.size * values.itemsize
self.fp.write(b'\x00' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = self._unpack_string().decode('latin1')
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = self._unpack_string().decode('latin1')
attributes[name] = self._read_att_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# https://www.unidata.ucar.edu/software/netcdf/guide_toc.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0:
begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
data.shape = shape
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = frombuffer(self.fp.read(a_size), dtype=dtype_
).copy()
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes,
maskandscale=self.maskandscale)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
rec_array.shape = (self._recs,)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = frombuffer(self.fp.read(self._recs*self._recsize),
dtype=dtypes).copy()
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = self._unpack_string().decode('latin1')
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_att_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode != 'c':
values = frombuffer(values, dtype='>%s' % typecode).copy()
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tobytes())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(frombuffer(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tobytes())
def _unpack_int64(self):
return frombuffer(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(s.encode('latin1'))
self.fp.write(b'\x00' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable:
"""
A data object for netcdf files.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
maskandscale : bool, optional
Whether to automatically scale and/or mask data based on attributes.
Default is False.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions,
attributes=None,
maskandscale=False):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self.maskandscale = maskandscale
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return bool(self.data.shape) and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of NumPy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (e.g., 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (e.g., 8 for float64).
"""
return self._size
def __getitem__(self, index):
if not self.maskandscale:
return self.data[index]
data = self.data[index].copy()
missing_value = self._get_missing_value()
data = self._apply_missing_value(data, missing_value)
scale_factor = self._attributes.get('scale_factor')
add_offset = self._attributes.get('add_offset')
if add_offset is not None or scale_factor is not None:
data = data.astype(np.float64)
if scale_factor is not None:
data = data * scale_factor
if add_offset is not None:
data += add_offset
return data
def __setitem__(self, index, data):
if self.maskandscale:
missing_value = (
self._get_missing_value() or
getattr(data, 'fill_value', 999999))
self._attributes.setdefault('missing_value', missing_value)
self._attributes.setdefault('_FillValue', missing_value)
data = ((data - self._attributes.get('add_offset', 0.0)) /
self._attributes.get('scale_factor', 1.0))
data = np.ma.asarray(data).filled(missing_value)
if self._typecode not in 'fd' and data.dtype.kind == 'f':
data = np.round(data)
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
# Resize in-place does not always work since
# the array might not be single-segment
try:
self.data.resize(shape)
except ValueError:
self.__dict__['data'] = np.resize(self.data, shape).astype(self.data.dtype)
self.data[index] = data
def _default_encoded_fill_value(self):
"""
The default encoded fill-value for this Variable's data type.
"""
nc_type = REVERSE[self.typecode(), self.itemsize()]
return FILLMAP[nc_type]
def _get_encoded_fill_value(self):
"""
Returns the encoded fill value for this variable as bytes.
This is taken from either the _FillValue attribute, or the default fill
value for this variable's data type.
"""
if '_FillValue' in self._attributes:
fill_value = np.array(self._attributes['_FillValue'],
dtype=self.data.dtype).tobytes()
if len(fill_value) == self.itemsize():
return fill_value
else:
return self._default_encoded_fill_value()
else:
return self._default_encoded_fill_value()
def _get_missing_value(self):
"""
Returns the value denoting "no data" for this variable.
If this variable does not have a missing/fill value, returns None.
If both _FillValue and missing_value are given, give precedence to
_FillValue. The netCDF standard gives special meaning to _FillValue;
missing_value is just used for compatibility with old datasets.
"""
if '_FillValue' in self._attributes:
missing_value = self._attributes['_FillValue']
elif 'missing_value' in self._attributes:
missing_value = self._attributes['missing_value']
else:
missing_value = None
return missing_value
@staticmethod
def _apply_missing_value(data, missing_value):
"""
Applies the given missing value to the data array.
Returns a numpy.ma array, with any value equal to missing_value masked
out (unless missing_value is None, in which case the original array is
returned).
"""
if missing_value is None:
newdata = data
else:
try:
missing_value_isnan = np.isnan(missing_value)
except (TypeError, NotImplementedError):
# some data types (e.g., characters) cannot be tested for NaN
missing_value_isnan = False
if missing_value_isnan:
mymask = np.isnan(data)
else:
mymask = (data == missing_value)
newdata = np.ma.masked_where(mymask, data)
return newdata
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
| WarrenWeckesser/scipy | scipy/io/netcdf.py | Python | bsd-3-clause | 39,128 | [
"NetCDF"
] | 79593acfe630b75c1a5d9e9d3183393bc87e3f1dceb9c2cfd253b5d468411ea4 |
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import unittest
import ghmm
import ghmmwrapper
import random
import time
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/rapid_categorization/taxel_based/')
from data_variable_length_force_sample import Fmat_original, temp_num_fol, temp_num_trunk
if __name__ == '__main__':
start_time = time.time()
Fmat = Fmat_original
Foliage_Trials = temp_num_fol
Trunk_Trials = temp_num_trunk
# Getting mean / covariance
i = 0
number_states = 20
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < Foliage_Trials):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 0:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_ff_force = np.zeros((number_states,1))
sigma_ff = np.zeros((number_states,1))
while (j < number_states):
mu_ff_force[j] = np.mean(feature_1_final_data[j])
sigma_ff[j] = scp.std(feature_1_final_data[j])
j = j+1
i = Foliage_Trials
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < (Foliage_Trials + Trunk_Trials)):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == Foliage_Trials:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_tf_force = np.zeros((number_states,1))
sigma_tf = np.zeros((number_states,1))
while (j < number_states):
mu_tf_force[j] = np.mean(feature_1_final_data[j])
sigma_tf[j] = scp.std(feature_1_final_data[j])
j = j+1
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
if number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_ff = [0.0]*number_states
B_tf = [0.0]*number_states
for num_states in range(number_states):
B_ff[num_states] = [mu_ff_force[num_states][0],sigma_ff[num_states][0]]
B_tf[num_states] = [mu_tf_force[num_states][0],sigma_tf[num_states][0]]
# pi - initial probabilities per state
if number_states == 3:
pi = [1./3.] * 3
elif number_states == 5:
pi = [0.2] * 5
elif number_states == 10:
pi = [0.1] * 10
elif number_states == 15:
pi = [1./15.] * 15
elif number_states == 20:
pi = [0.05] * 20
# generate FF, TF models from parameters
model_ff = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_ff, pi) # Will be Trained
model_tf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_tf, pi) # Will be Trained
folds = 2
fold_idx = 1
ff_final = np.matrix(np.zeros((((Foliage_Trials + Trunk_Trials)/folds),1)))
tf_final = np.matrix(np.zeros((((Foliage_Trials + Trunk_Trials)/folds),1)))
total_seq = Fmat
for i in range((Foliage_Trials + Trunk_Trials)):
total_seq[i][:] = sum(total_seq[i][:],[])
while (fold_idx <= folds):
# For Training
if (fold_idx == 1):
total_seq_ff = total_seq[0:(Foliage_Trials/folds)]
total_seq_tf = total_seq[Foliage_Trials:Foliage_Trials + (Trunk_Trials/folds)]
else:
total_seq_ff = total_seq[(Foliage_Trials/folds):Foliage_Trials]
total_seq_tf = total_seq[Foliage_Trials + (Trunk_Trials/folds):(Foliage_Trials + Trunk_Trials)]
print len(total_seq_ff)
print len(total_seq_tf)
train_seq_ff = total_seq_ff
train_seq_tf = total_seq_tf
final_ts_ff = ghmm.SequenceSet(F,train_seq_ff)
final_ts_tf = ghmm.SequenceSet(F,train_seq_tf)
model_ff.baumWelch(final_ts_ff)
model_tf.baumWelch(final_ts_tf)
# For Testing
if (fold_idx == 1):
total_seq_ff = total_seq[(Foliage_Trials/folds):Foliage_Trials]
total_seq_tf = total_seq[Foliage_Trials + (Trunk_Trials/folds):(Foliage_Trials + Trunk_Trials)]
else:
total_seq_ff = total_seq[0:(Foliage_Trials/folds)]
total_seq_tf = total_seq[Foliage_Trials:Foliage_Trials + (Trunk_Trials/folds)]
total_seq_obj = total_seq_ff + total_seq_tf
#print len(total_seq_ff)
#print len(total_seq_tf)
#print len(total_seq_obj)
ff = np.matrix(np.zeros((1,((Foliage_Trials + Trunk_Trials)/folds))))
tf = np.matrix(np.zeros((1,((Foliage_Trials + Trunk_Trials)/folds))))
k = 0
while (k < ((Foliage_Trials + Trunk_Trials)/folds)):
test_seq_obj = total_seq_obj[k]
#print test_seq_obj
ts_obj = test_seq_obj
#print ts_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj)
# Find Viterbi Path
path_ff_obj = model_ff.viterbi(final_ts_obj)
path_tf_obj = model_tf.viterbi(final_ts_obj)
obj = max(path_ff_obj[1],path_tf_obj[1])
if obj == path_ff_obj[1]:
ff[0,k] = 1
elif obj == path_tf_obj[1]:
tf[0,k] = 1
k = k+1
#print ff.T
#print '##############'
ff_final = ff_final + ff.T
tf_final = tf_final + tf.T
#print np.sum(ff_final) + np.sum(tf_final)
fold_idx = fold_idx + 1
#print ff_final
#print tf_final
# Confusion Matrix
cmat = np.zeros((2,2))
arrsum_ff = np.zeros((2,1))
arrsum_tf = np.zeros((2,1))
k_ff = Foliage_Trials/folds
k = Foliage_Trials/folds
i = 0
while (k < ((Foliage_Trials + Trunk_Trials)/folds)+1):
arrsum_ff[i] = np.sum(ff_final[k_ff-Foliage_Trials/folds:k,0])
arrsum_tf[i] = np.sum(tf_final[k_ff-Foliage_Trials/folds:k,0])
i = i+1
k_ff = k_ff+Foliage_Trials/folds
k = k+Trunk_Trials/folds
i=0
while (i < 2):
j=0
while (j < 2):
if (i == 0):
cmat[i][j] = arrsum_ff[j]
elif (i == 1):
cmat[i][j] = arrsum_tf[j]
j = j+1
i = i+1
#print cmat
time_taken = time.time()-start_time
print time_taken
# Plot Confusion Matrix
Nlabels = 2
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5])
ax.set_xticklabels(['Foliage', 'Trunk'])
ax.set_yticks([1.5,0.5])
ax.set_yticklabels(['Foliage', 'Trunk'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 2):
j = 0
while (j < 2):
pp.text(j+0.5,1.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
| tapomayukh/projects_in_python | rapid_categorization/cross-validation check/sample_length_effect/hmm_force_variable_length.py | Python | mit | 13,992 | [
"Gaussian",
"Mayavi"
] | a42edf2a9a443dd5daf28fa8cff99669e68d453f307b61d6061b6d93cf68eb43 |
"""The basic dict based notebook format.
The Python representation of a notebook is a nested structure of
dictionary subclasses that support attribute access
(.ipstruct.Struct). The functions in this module are merely
helpers to build the structs in the right form.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import pprint
import uuid
from .ipstruct import Struct
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# Change this when incrementing the nbformat version
nbformat = 3
nbformat_minor = 0
class NotebookNode(Struct):
pass
def from_dict(d):
if isinstance(d, dict):
newd = NotebookNode()
for k,v in list(d.items()):
newd[k] = from_dict(v)
return newd
elif isinstance(d, (tuple, list)):
return [from_dict(i) for i in d]
else:
return d
def new_output(output_type=None, output_text=None, output_png=None,
output_html=None, output_svg=None, output_latex=None, output_json=None,
output_javascript=None, output_jpeg=None, prompt_number=None,
ename=None, evalue=None, traceback=None, stream=None, metadata=None):
"""Create a new code cell with input and output"""
output = NotebookNode()
if output_type is not None:
output.output_type = str(output_type)
if metadata is None:
metadata = {}
if not isinstance(metadata, dict):
raise TypeError("metadata must be dict")
output.metadata = metadata
if output_type != 'pyerr':
if output_text is not None:
output.text = str(output_text)
if output_png is not None:
output.png = bytes(output_png)
if output_jpeg is not None:
output.jpeg = bytes(output_jpeg)
if output_html is not None:
output.html = str(output_html)
if output_svg is not None:
output.svg = str(output_svg)
if output_latex is not None:
output.latex = str(output_latex)
if output_json is not None:
output.json = str(output_json)
if output_javascript is not None:
output.javascript = str(output_javascript)
if output_type == 'pyout':
if prompt_number is not None:
output.prompt_number = int(prompt_number)
if output_type == 'pyerr':
if ename is not None:
output.ename = str(ename)
if evalue is not None:
output.evalue = str(evalue)
if traceback is not None:
output.traceback = [str(frame) for frame in list(traceback)]
if output_type == 'stream':
output.stream = 'stdout' if stream is None else str(stream)
return output
def new_code_cell(input=None, prompt_number=None, outputs=None,
language='python', collapsed=False, metadata=None):
"""Create a new code cell with input and output"""
cell = NotebookNode()
cell.cell_type = 'code'
if language is not None:
cell.language = str(language)
if input is not None:
cell.input = str(input)
if prompt_number is not None:
cell.prompt_number = int(prompt_number)
if outputs is None:
cell.outputs = []
else:
cell.outputs = outputs
if collapsed is not None:
cell.collapsed = bool(collapsed)
cell.metadata = NotebookNode(metadata or {})
return cell
def new_text_cell(cell_type, source=None, rendered=None, metadata=None):
"""Create a new text cell."""
cell = NotebookNode()
# VERSIONHACK: plaintext -> raw
# handle never-released plaintext name for raw cells
if cell_type == 'plaintext':
cell_type = 'raw'
if source is not None:
cell.source = str(source)
if rendered is not None:
cell.rendered = str(rendered)
cell.metadata = NotebookNode(metadata or {})
cell.cell_type = cell_type
return cell
def new_heading_cell(source=None, rendered=None, level=1, metadata=None):
"""Create a new section cell with a given integer level."""
cell = NotebookNode()
cell.cell_type = 'heading'
if source is not None:
cell.source = str(source)
if rendered is not None:
cell.rendered = str(rendered)
cell.level = int(level)
cell.metadata = NotebookNode(metadata or {})
return cell
def new_worksheet(name=None, cells=None, metadata=None):
"""Create a worksheet by name with with a list of cells."""
ws = NotebookNode()
if name is not None:
ws.name = str(name)
if cells is None:
ws.cells = []
else:
ws.cells = list(cells)
ws.metadata = NotebookNode(metadata or {})
return ws
def new_notebook(name=None, metadata=None, worksheets=None):
"""Create a notebook by name, id and a list of worksheets."""
nb = NotebookNode()
nb.nbformat = nbformat
nb.nbformat_minor = nbformat_minor
if worksheets is None:
nb.worksheets = []
else:
nb.worksheets = list(worksheets)
if metadata is None:
nb.metadata = new_metadata()
else:
nb.metadata = NotebookNode(metadata)
if name is not None:
nb.metadata.name = str(name)
return nb
def new_metadata(name=None, authors=None, license=None, created=None,
modified=None, gistid=None):
"""Create a new metadata node."""
metadata = NotebookNode()
if name is not None:
metadata.name = str(name)
if authors is not None:
metadata.authors = list(authors)
if created is not None:
metadata.created = str(created)
if modified is not None:
metadata.modified = str(modified)
if license is not None:
metadata.license = str(license)
if gistid is not None:
metadata.gistid = str(gistid)
return metadata
def new_author(name=None, email=None, affiliation=None, url=None):
"""Create a new author."""
author = NotebookNode()
if name is not None:
author.name = str(name)
if email is not None:
author.email = str(email)
if affiliation is not None:
author.affiliation = str(affiliation)
if url is not None:
author.url = str(url)
return author
| maximsch2/SublimeIPythonNotebook | external/nbformat3/nbbase.py | Python | gpl-3.0 | 6,710 | [
"Brian"
] | e4613267b00c823688fdfe240a7faacf3d3ef5cfda3921161da2540657a47635 |
# -*- coding: utf-8 -*-
import yaml
import urllib
from math import isnan
from flatten_dict import flatten
from boltons.iterutils import remap
from flask import current_app, render_template, url_for, request
from flask_mongoengine import Document
from marshmallow import ValidationError
from marshmallow.fields import String
from marshmallow.validate import Email as EmailValidator
from marshmallow_mongoengine.conversion import params
from marshmallow_mongoengine.conversion.fields import register_field
from mongoengine import EmbeddedDocument, signals
from mongoengine.queryset.manager import queryset_manager
from mongoengine.fields import (
StringField, BooleanField, DictField, URLField, EmailField,
FloatField, IntField, EmbeddedDocumentListField, EmbeddedDocumentField
)
from mpcontribs.api import send_email, sns_client, valid_key, valid_dict, delimiter, enter
PROVIDERS = {"github", "google", "facebook", "microsoft", "amazon"}
MAX_COLUMNS = 50
def visit(path, key, value):
from mpcontribs.api.contributions.document import quantity_keys
# pull out units
if isinstance(value, dict) and "unit" in value:
return key, value["unit"]
elif isinstance(value, (str, bool)) and key not in quantity_keys:
return key, None
return True
class ProviderEmailField(EmailField):
"""Field to validate usernames of format <provider>:<email>"""
def validate(self, value):
if value.count(":") != 1:
self.error(self.error_msg % value)
provider, email = value.split(":", 1)
if provider not in PROVIDERS:
self.error(
"{} {}".format(self.error_msg % value, "(invalid provider)")
)
super().validate(email)
class ProviderEmailValidator(EmailValidator):
def __call__(self, value):
message = self._format_error(value)
if value.count(":") != 1:
raise ValidationError(message)
provider, email = value.split(":", 1)
if provider not in PROVIDERS:
raise ValidationError(message)
super().__call__(email)
return value
class ProviderEmail(String):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
validator = ProviderEmailValidator(error="Not a valid MP username ({input}).")
self.validators.insert(0, validator)
def dict_wo_nans(d):
return {k: v for k, v in d.items() if k in ["min", "max"] and not isnan(v)}
class Column(EmbeddedDocument):
path = StringField(required=True, help_text="column path in dot-notation")
min = FloatField(required=True, default=float("nan"), help_text="column minimum")
max = FloatField(required=True, default=float("nan"), help_text="column maximum")
unit = StringField(required=True, default="NaN", help_text="column unit")
def __eq__(self, other):
if isinstance(other, self.__class__):
return dict_wo_nans(self._data) == dict_wo_nans(other._data)
return False
class Reference(EmbeddedDocument):
label = StringField(
required=True,
min_length=3,
max_length=20,
help_text="label",
validation=valid_key,
)
url = URLField(required=True, help_text="URL")
class Stats(EmbeddedDocument):
columns = IntField(required=True, default=0, help_text="#columns")
contributions = IntField(required=True, default=0, help_text="#contributions")
tables = IntField(required=True, default=0, help_text="#tables")
structures = IntField(required=True, default=0, help_text="#structures")
attachments = IntField(required=True, default=0, help_text="#attachments")
class Projects(Document):
__project_regex__ = "^[a-zA-Z0-9_]{3,31}$"
name = StringField(
min_length=3,
max_length=30,
regex=__project_regex__,
primary_key=True,
help_text=f"project name/slug (valid format: `{__project_regex__}`)",
)
is_public = BooleanField(
required=True, default=False, help_text="public/private project"
)
title = StringField(
min_length=5,
max_length=30,
required=True,
unique=True,
help_text="short title for the project/dataset",
)
long_title = StringField(
min_length=5,
max_length=55,
help_text="optional full title for the project/dataset",
)
authors = StringField(
required=True,
help_text="comma-separated list of authors"
# TODO change to EmbeddedDocumentListField
)
description = StringField(
min_length=5,
max_length=1500,
required=True,
help_text="brief description of the project",
)
references = EmbeddedDocumentListField(
Reference,
required=True,
min_length=1,
max_length=20,
help_text="list of references",
)
other = DictField(validation=valid_dict, null=True, help_text="other information")
owner = ProviderEmailField(
required=True, unique_with="name", help_text="owner / corresponding email"
)
is_approved = BooleanField(
required=True, default=False, help_text="project approved?"
)
unique_identifiers = BooleanField(
required=True, default=True, help_text="identifiers unique?"
)
columns = EmbeddedDocumentListField(Column, max_length=MAX_COLUMNS)
stats = EmbeddedDocumentField(Stats, required=True, default=Stats)
meta = {
"collection": "projects",
"indexes": ["is_public", "title", "owner", "is_approved", "unique_identifiers"],
}
@queryset_manager
def objects(doc_cls, queryset):
return queryset.only(
"name", "is_public", "title", "owner", "is_approved", "unique_identifiers"
)
@classmethod
def post_save(cls, sender, document, **kwargs):
admin_email = current_app.config["MAIL_DEFAULT_SENDER"]
admin_topic = current_app.config["MAIL_TOPIC"]
scheme = "http" if current_app.config["DEBUG"] else "https"
if kwargs.get("created"):
ts = current_app.config["USTS"]
email_project = [document.owner, document.name]
token = ts.dumps(email_project)
link = url_for(
"projects.applications", token=token, _scheme=scheme, _external=True
)
subject = f'New project "{document.name}"'
hours = int(current_app.config["USTS_MAX_AGE"] / 3600)
doc_yaml = yaml.dump(
document.to_mongo().to_dict(), indent=4, sort_keys=False
)
html = render_template(
"admin_email.html", doc=doc_yaml, link=link, hours=hours
)
send_email(admin_topic, subject, html)
resp = sns_client.create_topic(
Name=f"mpcontribs_{document.name}",
Attributes={"DisplayName": f"MPContribs {document.title}"},
)
endpoint = document.owner.split(":", 1)[1]
sns_client.subscribe(
TopicArn=resp["TopicArn"], Protocol="email", Endpoint=endpoint
)
else:
delta_set, delta_unset = document._delta()
if "is_approved" in delta_set and document.is_approved:
subject = f'Your project "{document.name}" has been approved'
netloc = urllib.parse.urlparse(request.url).netloc.replace("-api", "")
portal = f"{scheme}://{netloc}"
html = render_template(
"owner_email.html",
approved=True,
admin_email=admin_email,
host=portal,
project=document.name
)
topic_arn = ":".join(
admin_topic.split(":")[:-1] + ["mpcontribs_" + document.name]
)
send_email(topic_arn, subject, html)
if "columns" in delta_set or "columns" in delta_unset or (
not delta_set and not delta_unset
):
from mpcontribs.api.contributions.document import Contributions, COMPONENTS
columns = {}
ncontribs = Contributions.objects(project=document.id).count()
if "columns" in delta_set:
# document.columns updated by the user as intended
for col in document.columns:
columns[col.path] = col
elif "columns" in delta_unset or ncontribs:
# document.columns unset by user to reinit all columns from DB
# -> get paths and units across all contributions from DB
group = {"_id": "$project", "merged": {"$mergeObjects": "$data"}}
pipeline = [{"$match": {"project": document.id}}, {"$group": group}]
result = list(Contributions.objects.aggregate(pipeline))
merged = {} if not result else result[0]["merged"]
flat = flatten(remap(merged, visit=visit, enter=enter), reducer="dot")
for k, v in flat.items():
path = f"data.{k}"
columns[path] = Column(path=path)
if v is not None:
columns[path].unit = v
# set min/max for all number columns
min_max_paths = [path for path, col in columns.items() if col["unit"] != "NaN"]
group = {"_id": None}
for path in min_max_paths:
field = f"{path}{delimiter}value"
for k in ["min", "max"]:
clean_path = path.replace(delimiter, "__")
key = f"{clean_path}__{k}"
group[key] = {f"${k}": f"${field}"}
pipeline = [{"$match": {"project": document.id}}, {"$group": group}]
result = list(Contributions.objects.aggregate(pipeline))
min_max = {} if not result else result[0]
for clean_path in min_max_paths:
for k in ["min", "max"]:
path = clean_path.replace(delimiter, "__")
m = min_max.get(f"{path}__{k}")
if m is not None:
setattr(columns[clean_path], k, m)
# update stats
stats_kwargs = {"columns": len(columns), "contributions": ncontribs}
for component in COMPONENTS.keys():
pipeline = [
{"$match": {
"project": document.id,
component: {
"$exists": True,
"$not": {"$size": 0}
}
}},
{"$count": "count"}
]
result = list(Contributions.objects.aggregate(pipeline))
if result:
stats_kwargs[component] = result[0]["count"]
columns[component] = Column(path=component)
else:
stats_kwargs[component] = 0
stats = Stats(**stats_kwargs)
document.update(stats=stats, columns=columns.values())
@classmethod
def post_delete(cls, sender, document, **kwargs):
admin_email = current_app.config["MAIL_DEFAULT_SENDER"]
admin_topic = current_app.config["MAIL_TOPIC"]
subject = f'Your project "{document.name}" has been deleted'
html = render_template(
"owner_email.html", approved=False,
admin_email=admin_email, project=document.name
)
topic_arn = ":".join(
admin_topic.split(":")[:-1] + ["mpcontribs_" + document.name]
)
send_email(topic_arn, subject, html)
sns_client.delete_topic(TopicArn=topic_arn)
register_field(ProviderEmailField, ProviderEmail, available_params=(params.LengthParam,))
signals.post_save.connect(Projects.post_save, sender=Projects)
signals.post_delete.connect(Projects.post_delete, sender=Projects)
| materialsproject/MPContribs | mpcontribs-api/mpcontribs/api/projects/document.py | Python | mit | 12,238 | [
"VisIt"
] | 5998b6044584f96079797e0e2bf314cf26d8fa2c5616500df78a301bd9cdfd11 |
# _*_ coding:utf-8 _*_
# =======
# Imports
# =======
# Static Method Hook
import inspect
# And Now For Something Completely Different
import os
import sys
import re
from time import sleep
import pkg_resources
# Config File and Flags
if sys.version_info[0] == 2:
import config
CONFIG = config.CONFIG
else:
from irs.config import CONFIG
# ==================
# Static Method Hook
# ==================
def staticmethods(cls):
for name, method in inspect.getmembers(cls, inspect.ismethod):
setattr(cls, name, staticmethod(method.__func__))
return cls
# =========================
# Youtube-DL Logs and Hooks
# =========================
@staticmethods
class YdlUtils:
def clear_line():
sys.stdout.write("\x1b[2K\r")
class MyLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
print(msg)
def my_hook(d):
if d['status'] == 'finished':
print(CONFIG["converting"])
# ================================
# Object Manipulation and Checking
# ================================
def set_encoding(ld, encoding): # ld => list or dictionary with strings in it
if type(ld) == dict:
for k in ld:
if type(ld[k]) == dict or type(ld[k]) == list:
ld[k] = set_encoding(ld[k], encoding)
elif type(ld[k]) == str:
ld[k] = encoding(ld[k])
elif type(ld) == list:
for index, datum in enumerate(ld):
if type(datum) == str:
ld[index] = encoding(datum)
elif type(ld[k]) == dict or type(ld[k]) == list:
ld[k] = set_encoding(ld[k], encoding)
return ld
@staticmethods
class ObjManip: # Object Manipulation
def limit_song_name(song):
bad_phrases = "remaster remastered master".split(" ")
# I have "master" here because Spotify actually sometimes mispells
# stuff and it is hella annoying, so this was my solution
for phrase in bad_phrases:
if ObjManip.blank_include(song.split(" - ")[-1], phrase):
return song.split(" - ")[0]
return song
def check_garbage_phrases(phrases, string, title):
for phrase in phrases:
if phrase in string.lower():
if phrase not in title.lower():
return True
return False
def blank(string, downcase=True, remove_and=True):
if downcase:
string = string.lower()
if remove_and:
string = string.replace("and", "")
import re
regex = re.compile('[^a-zA-Z0-9\ ]')
if sys.version_info == 2:
string = regex.sub('', string.decode("utf8"))
return ' '.join(string.decode().split())
else:
string = regex.sub('', string)
return ' '.join(string.split())
def blank_include(this, includes_this):
this = ObjManip.blank(this)
includes_this = ObjManip.blank(includes_this)
if includes_this in this:
return True
return False
def individual_word_match(match_against, match):
match_against = ObjManip.blank(match_against).split(" ")
match = ObjManip.blank(match).split(" ")
matched = []
for match_ag in match_against:
for word in match:
if match_ag == word:
matched.append(word)
return (float(len(set(matched))) / float(len(match_against)))
def flatten(l):
flattened_list = []
for x in l:
if type(x) != str:
for y in x:
flattened_list.append(y)
else:
flattened_list.append(x)
return flattened_list
def remove_none_values(d):
new_d = d
for x in list(d.keys()):
if type(new_d[x]) is list:
new_d[x] = ObjManip.remove_none_values(d[x])
elif new_d[x] is None:
del new_d[x]
return new_d
# ld => a list or dictionary with strings in it
def set_utf8_encoding(ld):
return set_encoding(ld, lambda x: x.encode('utf-8'))
def set_encoding(*args):
return set_encoding(*args)
# ========================================
# Download Log Reading/Updating/Formatting
# ========================================
@staticmethods
class DLog:
def format_download_log_line(t, download_status="not downloaded"):
return (" @@ ".join([t["name"], t["artist"], t["album"]["id"],
str(t["genre"]), t["track_number"], t["disc_number"],
t["compilation"], t["file_prefix"], download_status]))
def format_download_log_data(data):
lines = []
for track in data:
lines.append(DLog.format_download_log_line(track))
return "\n".join(lines)
def read_download_log(spotify):
data = []
with open(".irs-download-log", "r") as file:
for line in file:
line = line.split(" @@ ")
data.append({
"name": line[0],
"artist": line[1],
"album": spotify.album(line[2]),
"genre": eval(line[3]),
"track_number": line[4],
"disc_number": line[5],
"compilation": bool(line[6]),
"file_prefix": line[7],
})
return data
def update_download_log_line_status(track, status="downloaded"):
line_to_find = DLog.format_download_log_line(track)
with open(".irs-download-log", "r") as input_file:
with open(".irs-download-log", "w") as output_file:
for line in input_file:
if line == line_to_find:
output_file.write(
DLog.format_download_log_line(track, status))
else:
output_file.write(line)
# ===========================================
# And Now, For Something Completely Different
# ===========================================
# (It's for the CLI)
try:
COLS = int(os.popen('tput cols').read().strip("\n"))
except:
COLS = 80
if sys.version_info[0] == 2:
def input(string):
return raw_input(string)
def code(code1):
return "\x1b[%sm" % str(code1)
def no_colors(string):
return re.sub("\x1b\[\d+m", "", string)
def center_colors(string, cols):
return no_colors(string).center(cols).replace(no_colors(string), string)
def decode_utf8(string):
if sys.version_info[0] == 3:
return string.encode("utf8", "strict").decode()
elif sys.version_info[0] == 2:
return string.decode("utf8")
def center_unicode(string, cols):
tmp_chars = "X" * len(decode_utf8(string))
chars = center_colors(tmp_chars, cols)
return chars.replace(tmp_chars, string)
def center_lines(string, cols, end="\n"):
lines = []
for line in string.split("\n"):
lines.append(center_unicode(line, cols))
return end.join(lines)
def flush_puts(msg, time=0.01):
# For slow *burrrp* scroll text, Morty. They-They just love it, Morty.
# When they see this text. Just slowwwly extending across the page. Mmm,
# mmm. You just give the time for how *buurp* slow you wa-want it, Morty.
# It works with colors and escape characters too, Morty.
# Your grandpa's a genius *burrrp* Morty
def check_color(s):
if "\x1b" not in s:
new = list(s)
else:
new = s
return new
msg = re.split("(\x1b\[\d+m)", msg)
msg = list(filter(None, map(check_color, msg)))
msg = ObjManip.flatten(msg)
for char in msg:
if char not in (" ", "", "\n") and "\x1b" not in char:
sleep(time)
sys.stdout.write(char)
sys.stdout.flush()
print("")
BOLD = code(1)
END = code(0)
RED = code(31)
GREEN = code(32)
YELLOW = code(33)
BLUE = code(34)
PURPLE = code(35)
CYAN = code(36)
GRAY = code(37)
BRED = RED + BOLD
BGREEN = GREEN + BOLD
BYELLOW = YELLOW + BOLD
BBLUE = BLUE + BOLD
BPURPLE = PURPLE + BOLD
BCYAN = CYAN + BOLD
BGRAY = GRAY + BOLD
def banner():
title = (BCYAN + center_lines("""\
██╗██████╗ ███████╗
██║██╔══██╗██╔════╝
██║██████╔╝███████╗
██║██╔══██╗╚════██║
██║██║ ██║███████║
╚═╝╚═╝ ╚═╝╚══════╝\
""", COLS) + END)
for num in range(0, 6):
os.system("clear || cls")
if num % 2 == 1:
print(BRED + center_unicode("🚨 🚨 🚨 🚨 🚨 \r", COLS))
else:
print("")
print(title)
sleep(0.3)
flush_puts(center_colors("{0}Ironic Redistribution System ({1}IRS{2})"
.format(BYELLOW, BRED, BYELLOW), COLS))
flush_puts(center_colors("{0}Made with 😈 by: {1}Kepoor Hampond \
({2}kepoorhampond{3})".format(BBLUE, BYELLOW, BRED, BYELLOW) + END, COLS))
flush_puts(center_colors("{0}Version: {1}".format(BBLUE, BYELLOW) +
pkg_resources.get_distribution("irs").version, COLS))
def menu(unicode, time=0.01):
flush_puts("Choose option from menu:", time)
flush_puts("\t[{0}song{1}] Download Song".format(BGREEN, END), time)
flush_puts("\t[{0}album{1}] Download Album".format(BGREEN, END), time)
flush_puts("\t[{0}{1}{2}] Download Playlist"
.format(BGREEN, unicode[-1], END), time)
flush_puts("\t[{0}help{1}] Print This Menu".format(BGREEN, END), time)
flush_puts("\t[{0}exit{1}] Exit IRS".format(BGREEN, END), time)
print("")
def console(ripper):
banner()
print(END)
if ripper.authorized is True:
unicode = [BGREEN + "✔" + END, "list"]
elif ripper.authorized is False:
unicode = [BRED + "✘" + END]
flush_puts("[{0}] Authenticated with Spotify".format(unicode[0]))
print("")
menu(unicode)
while True:
try:
choice = input("{0}irs{1}>{2} ".format(BBLUE, BGRAY, END))
if choice in ("exit", "e"):
raise KeyboardInterrupt
try:
if choice in ("song", "s"):
song_name = input("Song name{0}:{1} ".format(BBLUE, END))
artist_name = input("Artist name{0}:{1} "
.format(BBLUE, END))
ripper.song(song_name, artist_name)
elif choice in ("album", "a"):
album_name = input("Album name{0}:{1} ".format(BBLUE, END))
ripper.spotify_list("album", album_name)
elif choice in ("list", "l") and ripper.authorized is True:
username = input("Spotify Username{0}:{1} "
.format(BBLUE, END))
list_name = input("Playlist Name{0}:{1} "
.format(BBLUE, END))
ripper.spotify_list("playlist", list_name, username)
elif choice in ("help", "h", "?"):
menu(unicode, 0)
except (KeyboardInterrupt, EOFError):
print("")
pass
except (KeyboardInterrupt, EOFError):
sys.exit(0)
"""
# =====================
# Config File and Flags
# =====================
def check_sources(ripper, key, default=None, environment=False, where=None):
if where is not None:
tmp_args = ripper.args.get(where)
else:
tmp_args = ripper.args
if tmp_args.get(key):
return tmp_args.get(key)
"""
# ===========
# CONFIG FILE
# ===========
def check_sources(ripper, key, default=None, environment=False, where=None):
# tmp_args = ripper.args
# if where is not None and ripper.args.get(where):
# tmp_args = ripper.args.get("where")
if ripper.args.get(key):
return ripper.args.get(key)
elif CONFIG.get(key):
return CONFIG.get(key)
elif os.environ.get(key) and environment is True:
return os.environ.get(key)
else:
return default
@staticmethods
class Config:
def parse_spotify_creds(ripper):
CLIENT_ID = check_sources(ripper, "SPOTIFY_CLIENT_ID",
environment=True)
CLIENT_SECRET = check_sources(ripper, "SPOTIFY_CLIENT_SECRET",
environment=True)
return CLIENT_ID, CLIENT_SECRET
def parse_search_terms(ripper):
search_terms = check_sources(ripper, "additional_search_terms",
"lyrics")
return search_terms
def parse_artist(ripper):
artist = check_sources(ripper, "artist")
return artist
def parse_directory(ripper):
directory = check_sources(ripper, "custom_directory",
where="post_processors")
if directory is None:
directory = check_sources(ripper, "custom_directory", "~/Music")
return directory.replace("~", os.path.expanduser("~"))
def parse_default_flags(default=""):
if CONFIG.get("default_flags"):
args = sys.argv[1:] + CONFIG.get("default_flags")
else:
args = default
return args
def parse_organize(ripper):
organize = check_sources(ripper, "organize")
if organize is None:
return check_sources(ripper, "organize", False,
where="post_processors")
else:
return True
def parse_exact(ripper):
exact = check_sources(ripper, "exact")
if exact in (True, False):
return exact
#==============
# Captcha Cheat
#==============
# I basically consider myself a genius for this snippet.
from splinter import Browser
from time import sleep
@staticmethods
class CaptchaCheat:
def cheat_it(url, t=1):
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
with Browser('chrome', **executable_path) as b:
b.visit(url)
sleep(t)
while CaptchaCheat.strip_it(b.evaluate_script("document.URL")) != CaptchaCheat.strip_it(url):
sleep(t)
return b.evaluate_script("document.getElementsByTagName('html')[0].innerHTML")
def strip_it(s):
s = s.encode("utf-8")
s = s.strip("http://")
s = s.strip("https://")
return s
| kepoorhampond/irs | irs/utils.py | Python | gpl-3.0 | 14,689 | [
"VisIt"
] | 6e005b538078687961de2119d215c0cf2e4276e0834d998a8dd73b448eb803f9 |
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2016-2018 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Maya imports.
import pymel.core as pm
# appleseedMaya imports.
from appleseedMaya.logger import logger
class AEappleseedSkyDomeLightTemplate(pm.ui.AETemplate):
def __init__(self, nodeName):
super(AEappleseedSkyDomeLightTemplate, self).__init__(nodeName)
self.beginScrollLayout()
self.buildBody(nodeName)
self.addExtraControls("ExtraControls")
self.endScrollLayout()
def addControl(self, control, label=None, **kwargs):
pm.ui.AETemplate.addControl(self, control, label=label, **kwargs)
def beginLayout(self, name, collapse=True):
pm.ui.AETemplate.beginLayout(self, name, collapse=collapse)
def buildBody(self, nodeName):
self.beginLayout('Map', collapse=0)
self.addControl("map")
self.addSeparator()
self.addControl("intensity")
self.addControl("exposure")
self.addSeparator()
self.addControl("hShift")
self.addControl("vShift")
self.endLayout()
self.beginLayout('Color Correct', collapse=0)
self.endLayout()
self.beginLayout('Display', collapse=1)
self.addControl('size', label='Size')
self.endLayout()
| appleseedhq/appleseed-maya | scripts/appleseedMaya/AETemplates/appleseedSkyDomeLightTemplate.py | Python | mit | 2,507 | [
"VisIt"
] | ee873d8c3b01468adf7023d2df735ba316f3815093c899e543f414ae93b85e4b |
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from gluon.validators import IS_NOT_EMPTY
from s3.s3fields import S3Represent
from s3.s3resource import S3FieldSelector
from s3.s3utils import S3DateTime, s3_auth_user_represent_name, s3_avatar_represent
from s3.s3validators import IS_LOCATION_SELECTOR2, IS_ONE_OF
from s3.s3widgets import S3LocationSelectorWidget2
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
"""
Template settings for Requests Management
- for Philippines
"""
datetime_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users don't need to be approved
#settings.auth.registration_requires_approval = True
# Organisation links are either done automatically
# - by registering with official domain of Org
# or Manually by Call Center staff
#settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = False
# Uncomment this to allow Admin to see Organisations in user Admin even if the Registration doesn't request this
settings.auth.admin_sees_organisation = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
settings.auth.terms_of_service = True
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 5 # Apply Controller, Function and Table ACLs
settings.security.map = True
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = False
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate = ["Philippines"]
settings.base.system_name = T("Sahana")
settings.base.system_name_short = T("Sahana")
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "Philippines"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en", "English"),
# ("tl", "Tagalog"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "UTC +0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ["PH"]
# Until we add support to LocationSelector2 to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Hide unnecessary Toolbar items
settings.gis.nav_controls = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# -----------------------------------------------------------------------------
# Finance settings
settings.fin.currencies = {
"PHP" : T("Philippine Pesos"),
#"EUR" : T("Euros"),
#"GBP" : T("Great British Pounds"),
#"CHF" : T("Swiss Francs"),
"USD" : T("United States Dollars"),
}
settings.fin.currency_default = "PHP"
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# -----------------------------------------------------------------------------
# Summary Pages
settings.ui.summary = [#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
{"name": "charts",
"label": "Reports",
"widgets": [{"method": "report", "ajax_init": True}]
},
]
settings.search.filter_manager = False
# Filter forms - style for Summary pages
#def filter_formstyle(row_id, label, widget, comment, hidden=False):
# return DIV(label, widget, comment,
# _id=row_id,
# _class="horiz_filter_form")
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an organisation
settings.hrm.org_required = False
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Skills
settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
settings.hrm.teams = False
# Uncomment to hide fields in S3AddPersonWidget[2]
settings.pr.request_dob = False
settings.pr.request_gender = False
# -----------------------------------------------------------------------------
# Org
#settings.org.site_label = "Office/Shelter/Hospital"
settings.org.site_label = "Site"
settings.org.site_autocomplete = True
# Extra fields to show in Autocomplete Representations
settings.org.site_autocomplete_fields = ["location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
]
# -----------------------------------------------------------------------------
# Project
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Links to Filtered Components for Donors & Partners
#settings.project.organisation_roles = {
# 1: T("Host National Society"),
# 2: T("Partner"),
# 3: T("Donor"),
# #4: T("Customer"), # T("Beneficiary")?
# #5: T("Supplier"),
# 9: T("Partner National Society"),
#}
# -----------------------------------------------------------------------------
# Notifications
# Template for the subject line in update notifications
#settings.msg.notify_subject = "$S %s" % T("Notification")
settings.msg.notify_subject = "$S Notification"
# -----------------------------------------------------------------------------
def currency_represent(v):
"""
Custom Representation of Currencies
"""
if v == "USD":
return "$"
elif v == "EUR":
return "€"
elif v == "GBP":
return "£"
else:
# e.g. CHF
return v
# -----------------------------------------------------------------------------
def render_contacts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Contacts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_human_resource.id"]
item_class = "thumbnail"
raw = record._row
#author = record["hrm_human_resource.modified_by"]
date = record["hrm_human_resource.modified_on"]
fullname = record["hrm_human_resource.person_id"]
job_title = raw["hrm_human_resource.job_title_id"] or ""
if job_title:
job_title = "- %s" % record["hrm_human_resource.job_title_id"]
#organisation = record["hrm_human_resource.organisation_id"]
organisation_id = raw["hrm_human_resource.organisation_id"]
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
pe_id = raw["pr_person.pe_id"]
person_id = raw["hrm_human_resource.person_id"]
location = record["org_site.location_id"]
location_id = raw["org_site.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or T("no office assigned")
email = raw["pr_email_contact.value"] or T("no email address")
if isinstance(email, list):
email = email[0]
phone = raw["pr_phone_contact.value"] or T("no phone number")
if isinstance(phone, list):
phone = phone[0]
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
query = (ltable.pe_id == pe_id)
row = db(query).select(ltable.user_id,
limitby=(0, 1)
).first()
if row:
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
avatar = s3_avatar_represent(row.user_id,
_class="media-object")
else:
avatar = IMG(_src=URL(c="static", f="img", args="blank-user.gif"),
_class="media-object")
# Edit Bar
permit = current.auth.s3_has_permission
table = db.pr_person
if permit("update", table, record_id=person_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_url = URL(c="hrm", f="person",
args=[person_id, "update.popup"],
vars=vars)
title_update = current.response.s3.crud_strings.hrm_human_resource.title_update
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=edit_url,
_class="s3_modal",
_title=title_update,
)
else:
edit_btn = ""
edit_url = "#"
title_update = ""
# Deletions failing due to Integrity Errors
#if permit("delete", table, record_id=person_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
#else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
avatar = A(avatar,
_href=edit_url,
_class="pull-left s3_modal",
_title=title_update,
)
# Render the item
body = TAG[""](P(fullname,
" ",
SPAN(job_title),
_class="person_pos",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
I(_class="icon-envelope-alt"),
" ",
SPAN(email),
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
# Organisation only needed if displaying elsewhere than org profile
# Author confusing with main contact record
#DIV(#author,
# #" - ",
# A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
# _class="card-person",
# ),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def quote_unicode(s):
"""
Quote unicode strings for URLs for Rocket
"""
chars = []
for char in s:
o = ord(char)
if o < 128:
chars.append(char)
else:
chars.append(hex(o).replace("0x", "%").upper())
return "".join(chars)
# -----------------------------------------------------------------------------
def render_locations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = raw["gis_location.name"]
level = raw["gis_location.level"]
L1 = raw["gis_location.L1"]
L2 = raw["gis_location.L2"]
L3 = raw["gis_location.L3"]
L4 = raw["gis_location.L4"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
if level == "L1":
represent = name
if level == "L2":
represent = "%s (%s)" % (name, L1)
elif level == "L3":
represent = "%s (%s, %s)" % (name, L2, L1)
elif level == "L4":
represent = "%s (%s, %s, %s)" % (name, L3, L2, L1)
else:
# L0 or specific
represent = name
# Users don't edit locations
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars={"refresh": list_id,
# "record": record_id}),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Tallies
# NB We assume that all records are readable here
# Search all sub-locations
locations = current.gis.get_children(record_id)
locations = [l.id for l in locations]
locations.append(record_id)
db = current.db
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.location_id.belongs(locations))
count = stable.id.count()
row = db(query).select(count).first()
if row:
tally_sites = row[count]
else:
tally_sites = 0
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.location_id.belongs(locations))
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_reqs = row[count]
else:
tally_reqs = 0
table = s3db.req_commit
query = (table.deleted == False) & \
(table.location_id.belongs(locations))
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_commits = row[count]
else:
tally_commits = 0
if level == "L4":
next_Lx = ""
next_Lx_label = ""
else:
if level == "L0":
next_Lx = "L1"
next_Lx_label = "Regions"
if level == "L1":
next_Lx = "L2"
next_Lx_label = "Provinces"
elif level == "L2":
next_Lx = "L3"
next_Lx_label = "Municipalities / Cities"
elif level == "L3":
next_Lx = "L4"
next_Lx_label = "Barangays"
table = db.gis_location
query = (table.deleted == False) & \
(table.level == next_Lx) & \
(table.parent == record_id)
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_Lx = row[count]
else:
tally_Lx = 0
next_url = URL(c="gis", f="location",
args=["datalist"],
vars={"~.level": next_Lx,
"~.parent": record_id,
})
next_Lx_label = A(next_Lx_label,
_href=next_url,
)
next_Lx = SPAN(tally_Lx,
_class="badge",
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc:
from s3.s3codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
)
),
_class="pull-left",
_href=location_url,
),
DIV(SPAN(A(represent,
_href=location_url,
_class="media-heading"
),
),
#edit_bar,
_class="card-header-select",
),
DIV(P(next_Lx_label,
next_Lx,
T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_locations_profile(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": list_id,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item
# -----------------------------------------------------------------------------
def render_sites(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Facilities on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_facility.id"]
item_class = "thumbnail"
raw = record._row
name = record["org_facility.name"]
site_id = raw["org_facility.id"]
opening_times = raw["org_facility.opening_times"] or ""
author = record["org_facility.modified_by"]
date = record["org_facility.modified_on"]
organisation = record["org_facility.organisation_id"]
organisation_id = raw["org_facility.organisation_id"]
location = record["org_facility.location_id"]
level = raw["gis_location.level"]
if level:
location_id = raw["org_facility.location_id"]
else:
location_id = raw["gis_location.parent"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or ""
phone = raw["org_facility.phone1"] or ""
facility_type = record["org_site_facility_type.facility_type_id"]
comments = record["org_facility.comments"] or ""
logo = raw["org_organisation.logo"]
site_url = URL(c="org", f="facility", args=[site_id, "profile"])
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
facility_status = raw["org_site_status.facility_status"] or ""
if facility_status:
if facility_status == 1:
icon = "thumbs-up-alt"
colour = "green"
elif facility_status == 2:
icon = "thumbs-down-alt"
colour = "amber"
elif facility_status == 3:
icon = "reply-all"
colour = "red"
elif facility_status == 4:
icon = "remove"
colour = "red"
elif facility_status == 99:
icon = "question"
colour = ""
facility_status = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Status"), record["org_site_status.facility_status"])),
" ",
_class="card_1_line %s" % colour,
)
power_supply_type = raw["org_site_status.power_supply_type"] or ""
if power_supply_type:
if power_supply_type == 1:
icon = "thumbs-up-alt"
colour = "green"
elif power_supply_type == 2:
icon = "cogs"
colour = "amber"
elif power_supply_type == 98:
icon = "question"
colour = "amber"
elif power_supply_type == 99:
icon = "remove"
colour = "red"
power_supply_type = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Power"), record["org_site_status.power_supply_type"])),
" ",
_class="card_1_line %s" % colour,
)
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_facility
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_facility.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
body = TAG[""](P(I(_class="icon-flag"),
" ",
SPAN(facility_type),
" ",
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
_class="card_1_line",
),
facility_status,
power_supply_type,
P(comments,
_class="card_manylines s3-truncate",
),
)
item = DIV(DIV(SPAN(A(name,
_href=site_url,
),
_class="card-title",
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_organisations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Organisations on the Stakeholder Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_organisation.id"]
item_class = "thumbnail span6" # span6 for 2 cols
raw = record._row
name = record["org_organisation.name"]
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = SPAN(XML(money_details),
_class="s3-truncate")
money_details = P(I(_class="icon icon-dollar"),
" ",
money_details,
_class="card_manylines",
)
else:
# Include anyway to make cards align
money_details = P(I(_class="icon icon-dollar"),
" ",
_class="card_1_line",
)
#time = raw["req_organisation_needs.vol"]
#if time:
# time_details = record["req_organisation_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
org_url = URL(c="org", f="organisation", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
db = current.db
permit = current.auth.s3_has_permission
table = db.org_organisation
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_organisation.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
# NB We assume that all records are readable here
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.obsolete == False) & \
(stable.organisation_id == record_id)
tally_sites = db(query).count()
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.organisation_id == record_id)
tally_reqs = db(query).count()
table = s3db.req_commit
query = (table.deleted == False) & \
(table.organisation_id == record_id)
tally_commits = db(query).count()
# Render the item
item = DIV(DIV(logo,
DIV(SPAN(A(name,
_href=org_url,
_class="media-heading"
),
),
edit_bar,
_class="card-header-select",
),
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
#time_details,
P(T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_org_needs(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Needs
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_organisation_needs.id"]
item_class = "thumbnail"
raw = record._row
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_organisation_needs.modified_by"]
date = record["req_organisation_needs.modified_on"]
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = P(I(_class="icon icon-dollar"),
" ",
XML(money_details),
_class="card_manylines",
)
else:
money_details = ""
time = raw["req_organisation_needs.vol"]
if time:
time_details = record["req_organisation_needs.vol_details"]
time_details = P(I(_class="icon icon-time"),
" ",
XML(time_details),
_class="card_manylines",
)
else:
time_details = ""
org_id = raw["org_organisation.id"]
org_url = URL(c="org", f="organisation", args=[org_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_organisation_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="organisation_needs",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_organisation_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Org Profile page - no need to repeat Org Name
title = " "
else:
title = raw["org_organisation.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
time_details,
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_org_needs = render_org_needs
# -----------------------------------------------------------------------------
def render_site_needs(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Needs
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_site_needs.id"]
item_class = "thumbnail"
raw = record._row
logo = raw["org_organisation.logo"]
addresses = raw["gis_location.addr_street"]
if addresses:
if isinstance(addresses, list):
address = addresses[0]
else:
address = addresses
else:
address = ""
#contact = raw["org_facility.contact"] or ""
opening_times = raw["org_facility.opening_times"] or ""
phone = raw["org_facility.phone1"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_site_needs.modified_by"]
date = record["req_site_needs.modified_on"]
#goods = raw["req_site_needs.goods"]
#if goods:
# goods_details = record["req_site_needs.goods_details"]
# goods_details = P(I(_class="icon icon-truck"),
# " ",
# XML(goods_details),
# _class="card_1_line",
# )
#else:
# goods_details = ""
#time = raw["req_site_needs.vol"]
#if time:
# time_details = record["req_site_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
site_url = URL(c="org", f="facility", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=site_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_site_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="site_needs",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_site_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Site Profile page - no need to repeat Site Name
title = " "
else:
title = raw["org_facility.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(#goods_details,
#time_details,
P(I(_class="icon icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
P(I(_class="icon icon-user"),
" ",
contact,
_class="card_1_line",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_site_needs = render_site_needs
# -----------------------------------------------------------------------------
def customise_gis_location_controller(**attr):
"""
Customise gis_location controller
- Profile Page
"""
db = current.db
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
s3db = current.s3db
table = s3db.gis_location
if r.method == "datalist":
# Lx selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Default 5 triggers an AJAX call, we should load all by default
s3.dl_pagelength = 17
level = current.request.get_vars.get("~.level", None)
if not level:
# Just show PH L1s
level = "L1"
s3.filter = (table.L0 == "Philippines") & (table.level == "L1")
parent = current.request.get_vars.get("~.parent", None)
if level == "L1":
s3.crud_strings["gis_location"].title_list = T("Regions")
elif level == "L2":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Provinces in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Provinces")
elif level == "L3":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Municipalities and Cities in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Municipalities and Cities")
elif level == "L4":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Barangays in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Barangays")
list_fields = ["name",
"level",
"L1",
"L2",
"L3",
"L4",
]
s3db.configure("gis_location",
filter_widgets = None,
list_fields = list_fields,
list_layout = render_locations,
)
elif r.method == "profile":
# Customise tables used by widgets
#customise_hrm_human_resource_fields()
customise_org_facility_fields()
s3db.req_customise_req_fields()
s3db.req_customise_commit_fields()
# gis_location table (Sub-Locations)
table.parent.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["name",
"id",
]
location = r.record
record_id = location.id
# Override context as that's a Path
default = "~.(location)=%s" % record_id
map_widget = dict(label = "Map",
type = "map",
context = "location",
icon = "icon-map",
height = 383,
width = 568,
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
},
)
#locations_widget = dict(label = "Locations",
# insert = False,
# #label_create = "Create Location",
# type = "datalist",
# tablename = "gis_location",
# context = "location",
# icon = "icon-globe",
# # @ToDo: Show as Polygons?
# show_on_map = False,
# list_layout = render_locations_profile,
# )
#needs_widget = dict(label = "Needs",
# label_create = "Add New Need",
# type = "datalist",
# tablename = "req_site_needs",
# context = "location",
# icon = "icon-hand-up",
# multiple = False,
# # Would just show up on Sites
# show_on_map = False,
# list_layout = render_site_needs,
# )
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "location",
default = default,
filter = S3FieldSelector("req_status").belongs([0, 1]),
icon = "icon-flag",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = s3db.req_req_list_layout,
)
commits_widget = dict(label = "Donations",
label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "location",
default = default,
filter = S3FieldSelector("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
#resources_widget = dict(label = "Resources",
# label_create = "Create Resource",
# type = "datalist",
# tablename = "org_resource",
# context = "location",
# default = default,
# #filter = S3FieldSelector("req_status").belongs([0, 1]),
# icon = "icon-wrench",
# layer = "Resources",
# # provided by Catalogue Layer
# #marker = "resource",
# list_layout = s3db.org_resource_list_layout,
# )
sites_widget = dict(label = "Sites",
label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "location",
default = default,
filter = S3FieldSelector("obsolete") == False,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc and loc.wkt:
from s3.s3codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class="icon icon-edit"),
_href=URL(c="gis", f="location",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["gis_location"].title_update,
)
else:
edit_btn = ""
name = location.name
s3db.configure("gis_location",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["gis_location"].title_list,
name),
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
),
),
_class="pull-left",
#_href=location_url,
),
H2(name),
_class="profile_header",
),
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
#resources_widget,
sites_widget,
#locations_widget,
],
)
return True
s3.prep = custom_prep
return attr
settings.customise_gis_location_controller = customise_gis_location_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_fields():
"""
Customise hrm_human_resource for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.hrm_human_resource
table.site_id.represent = S3Represent(lookup="org_site")
s3db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
#table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["person_id",
"person_id$pe_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
"""
Customise hrm_human_resource controller
- used for 'more' popups
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customise_hrm_human_resource_fields()
current.s3db.configure("hrm_human_resource",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_contacts,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
table = current.s3db.hrm_job_title
# Configure fields
field = table.organisation_id
field.readable = field.writable = False
field.default = None
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("hrm_job_title")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("hrm_job_title")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
def customise_org_facility_fields():
"""
Customise org_facility for Profile widgets and 'more' popups
"""
# Truncate comments fields
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
tablename = "org_facility"
table = s3db.org_facility
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
field = table.comments
field.represent = lambda body: XML(s3_URLise(body))
field.comment = None
table.phone1.label = T("Phone")
# CRUD strings
ADD_FAC = T("Add Site")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_FAC,
title_display = T("Site Details"),
title_list = T("Sites"),
title_update = T("Edit Site"),
label_list_button = T("List Sites"),
label_delete_button = T("Delete Site"),
msg_record_created = T("Site Added"),
msg_record_modified = T("Site Updated"),
msg_record_deleted = T("Site Canceled"),
msg_list_empty = T("No Sites registered"))
list_fields = ["name",
"code",
"site_facility_type.facility_type_id",
"organisation_id",
"location_id",
"location_id$addr_street",
"location_id$level",
"location_id$parent",
"modified_by",
"modified_on",
"organisation_id$logo",
"opening_times",
"human_resource.person_id",
#"contact",
"phone1",
"status.facility_status",
"status.power_supply_type",
"comments",
]
crud_form = S3SQLCustomForm("name",
"code",
S3SQLInlineComponentMultiSelectWidget(
"facility_type",
label = T("Facility Type"),
field = "facility_type_id",
widget = "multiselect",
),
"organisation_id",
"location_id",
"opening_times",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "human_resource_site",
# label = T("Focal Point"),
# field = ["human_resource_id"],
# multiple = False,
#),
#"contact",
"phone1",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "needs",
# label = T("Needs"),
# multiple = False,
#),
S3SQLInlineComponent(
"status",
label = T("Status"),
multiple = False,
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_facility
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
customise_org_facility_fields()
# Which levels of Hierarchy are we using?
hierarchy = current.gis.get_location_hierarchy()
levels = hierarchy.keys()
if len(current.deployment_settings.gis.countries) == 1 or \
s3.gis.config.region_location_id:
levels.remove("L0")
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
location_field = table.location_id
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# Don't add new Locations here
location_field.comment = None
location_field.requires = IS_LOCATION_SELECTOR2(levels=levels)
location_field.widget = S3LocationSelectorWidget2(levels=levels,
show_address=True,
show_map=True)
# @ToDo: Proper button if we want this & amend functionality for Bootstrap)
#s3.cancel = True
if r.method == "datalist":
# Site selection page
# 2-column datalist, 6 rows per page
#s3.dl_pagelength = 12
#s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter(name = "type",
label = T("Type"),
field="site_facility_type.facility_type_id",
hidden = True,
),
S3OptionsFilter(name = "status",
label = T("Status"),
field = "status.facility_status",
hidden = True,
),
S3OptionsFilter(name = "power",
label = T("Power Supply"),
field = "status.power_supply_type",
hidden = True,
),
]
#get_vars = current.request.get_vars
#goods = get_vars.get("needs.goods", None)
#vol = get_vars.get("needs.vol", None)
#if goods:
# needs_fields = ["needs.goods_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Drop-off Goods")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Volunteer your time")
#else:
# yesno = {True: T("Yes"), False: T("No")}
# needs_fields = ["needs.goods_details", "needs.vol_details"]
# filter_widgets.insert(0, S3OptionsFilter("needs.goods",
# label = T("Drop-off Goods"),
# cols = 2,
# options = yesno,
# multiple = False,
# hidden = True,
# ))
# filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# label = T("Volunteer Time"),
# cols = 2,
# options = yesno,
# multiple = False,
# hidden = True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"code",
"comments",
], #+ needs_fields,
label = T("Search")))
s3db.configure("org_facility",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_sites,
filter_widgets = filter_widgets,
)
elif r.method == "profile":
# Customise tables used by widgets
customise_hrm_human_resource_fields()
customise_site_needs_fields(profile=True)
s3db.req_customise_req_fields()
list_fields = ["name",
"id",
]
record = r.record
record_id = record.id
# @ToDo: Center on the Site
map_widget = dict(label = "Map",
type = "map",
context = "site",
icon = "icon-map",
height = 383,
width = 568,
)
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "site",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Sites
list_layout = render_contacts,
)
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "site",
filter = S3FieldSelector("req_status").belongs([0, 1]),
icon = "icon-flag",
show_on_map = False, # Since they will show within Sites
list_layout = s3db.req_req_list_layout,
)
commits_widget = dict(label = "Donations",
#label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "site",
filter = S3FieldSelector("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_facility"].title_update,
)
else:
edit_btn = ""
name = record.name
code = record.code
if code:
name_code = "%s - %s" % (name, code)
else:
name_code = code
location = table.location_id.represent(record.location_id)
organisation_id = record.organisation_id
db = current.db
otable = db.org_organisation
query = (otable.id == organisation_id)
org = db(query).select(otable.name,
otable.logo,
limitby=(0, 1)).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
# Add primary resource to map
# Lookup Marker (type-dependent)
ftable = s3db.org_facility
ltable = s3db.org_site_facility_type
query = (ftable == record_id) & \
(ftable.site_id == ltable.site_id)
facility_type = db(query).select(ltable.facility_type_id,
limitby = (0, 1)
).first()
# Lookup Marker
if facility_type:
layer_filter = "facility_type.facility_type_id=%s" % \
facility_type.id
else:
layer_filter = ""
marker = current.gis.get_marker(controller = "org",
function = "facility",
filter = layer_filter)
lat = None
lon = None
gtable = s3db.gis_location
query = (r.id == ftable.id) & \
(ftable.location_id == gtable.id)
lat_lon = db(query).select(gtable.lat,
gtable.lon,
limitby = (0,1)).first()
if lat_lon:
lat = lat_lon["gis_location.lat"]
lon = lat_lon["gis_location.lon"]
map_widget["lat"] = lat
map_widget["lon"] = lon
tablename = "org_facility"
layer = dict(name = record.name,
id = "profile-header-%s-%s" % (tablename, record_id),
active = True,
tablename = r.tablename,
url = "/%s/org/facility.geojson?facility.id=%s" % \
(r.application, record_id),
marker = marker,
)
s3db.configure(tablename,
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["org_facility"].title_list,
name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=logo,
),
H2(name),
record.code and P(record.code) or "",
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.comments,
_class="s3-truncate"),
_class="profile_header",
),
profile_layers = [layer],
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
contacts_widget,
],
)
if r.interactive or r.representation == "aadata":
# Configure fields
#table.code.readable = table.code.writable = False
#table.phone1.readable = table.phone1.writable = False
table.phone2.readable = table.phone2.writable = False
table.email.readable = table.email.writable = False
elif r.representation == "geojson":
# Don't represent facility_status, but just show integers
s3db.org_site_status.facility_status.represent = None
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
if isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="facility",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Site"),
)
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_facility")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_facility")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
# @ToDo: Don't just hide but prevent building
#attr["rheader"] = None
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_needs_fields(profile=False):
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
table = s3db.req_organisation_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
table.vol.readable = table.vol.writable = False
table.vol_details.readable = table.vol_details.writable = False
# Hide money_details unless used
s3.jquery_ready.append(
'''$('#req_organisation_needs_money_details__row').hide()
$('#req_organisation_needs_money').change(function(){
$('#req_organisation_needs_money_details__row').toggle($(this).prop('checked'))
}).change()''')
list_fields = ["id",
"organisation_id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$logo",
"organisation_id$phone",
"organisation_id$website",
"money",
"money_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["organisation_id$name",
]
s3db.configure("req_organisation_needs",
list_fields=list_fields,
)
return
# -----------------------------------------------------------------------------
def customise_req_organisation_needs_controller(**attr):
"""
Customise req_organisation_needs controller
"""
customise_org_needs_fields()
return attr
settings.customise_req_organisation_needs_controller = customise_req_organisation_needs_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
"""
Customise org_organisation controller
- Profile Page
- Requests
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive or r.representation == "aadata":
# Load normal Model
s3db = current.s3db
table = s3db.org_organisation
list_fields = ["id",
"name",
"logo",
"phone",
"website",
"needs.money",
"needs.money_details",
#"needs.vol",
#"needs.vol_details",
]
if r.method == "profile":
# Customise tables used by widgets
customise_hrm_human_resource_fields()
customise_org_facility_fields()
customise_org_needs_fields(profile=True)
s3db.org_customise_org_resource_fields("profile")
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "organisation",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Offices
list_layout = render_contacts,
)
map_widget = dict(label = "Map",
type = "map",
context = "organisation",
icon = "icon-map",
height = 383,
width = 568,
)
needs_widget = dict(label = "Needs",
label_create = "Add New Need",
type = "datalist",
tablename = "req_organisation_needs",
multiple = False,
context = "organisation",
icon = "icon-hand-up",
show_on_map = False,
list_layout = render_org_needs,
)
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "organisation",
filter = S3FieldSelector("req_status").belongs([0, 1]),
icon = "icon-flag",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = s3db.req_req_list_layout,
)
#resources_widget = dict(label = "Resources",
# label_create = "Create Resource",
# type = "datalist",
# tablename = "org_resource",
# context = "organisation",
# #filter = S3FieldSelector("req_status").belongs([0, 1]),
# icon = "icon-wrench",
# layer = "Resources",
# # provided by Catalogue Layer
# #marker = "resource",
# list_layout = s3db.org_resource_list_layout,
# )
commits_widget = dict(label = "Donations",
#label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "organisation",
filter = S3FieldSelector("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
sites_widget = dict(label = "Sites",
label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "organisation",
filter = S3FieldSelector("obsolete") == False,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
record = r.record
record_id = record.id
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_organisation"].title_update,
)
else:
edit_btn = ""
s3db.configure("org_organisation",
profile_title = "%s : %s" % (s3.crud_strings["org_organisation"].title_list,
record.name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=URL(c="default", f="download",
args=[record.logo]),
),
H2(record.name),
_class="profile_header",
),
profile_widgets = [reqs_widget,
map_widget,
# @ToDo: Move to profile_header
#needs_widget,
#resources_widget,
commits_widget,
needs_widget,
contacts_widget,
sites_widget,
]
)
elif r.method == "datalist":
# Stakeholder selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter
filter_widgets = [
# no other filter widgets here yet?
]
# Needs page
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
get_vars = current.request.get_vars
money = get_vars.get("needs.money", None)
#vol = get_vars.get("needs.vol", None)
if money:
needs_fields = ["needs.money_details"]
s3.crud_strings["org_organisation"].title_list = T("Organizations soliciting Money")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_organisation"].title_list = T("Organizations with remote Volunteer opportunities")
else:
yesno = {True: T("Yes"), False: T("No")}
needs_fields = ["needs.money_details", "needs.vol_details"]
filter_widgets.insert(0, S3OptionsFilter("needs.money",
options = yesno,
multiple = False,
cols = 2,
hidden = True,
))
#filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# options = yesno,
# multiple = False,
# cols = 2,
# hidden = True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"acronym",
"website",
"comments",
] + needs_fields,
label = T("Search")))
ntable = s3db.req_organisation_needs
s3db.configure("org_organisation",
filter_widgets=filter_widgets
)
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
# Hide fields
table.organisation_type_id.readable = table.organisation_type_id.writable = False
table.region_id.readable = table.region_id.writable = False
table.country.readable = table.country.writable = False
table.year.readable = table.year.writable = False
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="organisation", args="datalist")
s3db.configure("org_organisation",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = render_organisations,
)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="organisation",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Create Organization"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_site_needs_fields(profile=False):
s3db = current.s3db
table = s3db.req_site_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["id",
"organisation_id$id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$name",
"organisation_id$logo",
"organisation_id$website",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
"location_id$addr_street",
"phone1",
#"goods",
#"goods_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["site_id$name"]
s3db.configure("req_site_needs",
list_fields=list_fields,
)
return
s3.customise_site_needs_fields = customise_site_needs_fields
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
request = current.request
s3 = current.response.s3
tablename = "pr_person"
table = s3db.pr_person
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "validate":
# Can't validate image without the file
image_field = s3db.pr_image.image
image_field.requires = None
if r.interactive or r.representation == "aadata":
if request.controller != "default":
# CRUD Strings
ADD_CONTACT = T("Create Contact")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = T("Contact Directory"),
title_update = T("Edit Contact Details"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
htable = s3db.hrm_human_resource
htable.organisation_id.widget = None
site_field = htable.site_id
represent = S3Represent(lookup="org_site")
site_field.represent = represent
site_field.requires = IS_ONE_OF(current.db, "org_site.site_id",
represent,
orderby = "org_site.name")
from s3layouts import S3AddResourceLink
site_field.comment = S3AddResourceLink(c="org", f="facility",
vars={"child": "site_id"},
label=T("Add New Site"),
title=T("Site"),
tooltip=T("If you don't see the Site in the list, you can add a new one by clicking link 'Add New Site'."))
# ImageCrop widget doesn't currently work within an Inline Form
s3db.pr_image.image.widget = None
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
"site_contact",
]
if r.method in ("create", "update"):
# Context from a Profile page?"
organisation_id = request.get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
s3_sql_custom_fields = [
"first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
S3SQLInlineComponent(
"image",
name = "image",
label = T("Photo"),
multiple = False,
fields = [("", "image")],
filterby = dict(field = "profile",
options = [True]
)
),
]
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Site"), "human_resource.site_id"),
(T("Site Contact"), "human_resource.site_contact"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
if r.id and request.controller == "default":
url_next = URL(c="default", f="person", args=[r.id, "read"])
else:
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="pr", f="person")
s3db.configure(tablename,
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
list_fields = list_fields,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_contacts,
update_next = url_next,
)
# Move fields to their desired Locations
# Disabled as breaks submission of inline_component
#i18n = []
#iappend = i18n.append
#iappend('''i18n.office="%s"''' % T("Office"))
#iappend('''i18n.organisation="%s"''' % T("Organization"))
#iappend('''i18n.job_title="%s"''' % T("Job Title"))
#i18n = '''\n'''.join(i18n)
#s3.js_global.append(i18n)
#s3.scripts.append('/%s/static/themes/DRMP/js/contacts.js' % request.application)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
output["rheader"] = ""
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="pr", f="person",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_doc_document_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
tablename = "doc_document"
table = s3db.doc_document
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
# Filter Out Docs from Newsfeed
current.response.s3.filter = (table.name != None)
if r.interactive:
s3.crud_strings[tablename] = Storage(
label_create = T("Add Document"),
title_display = T("Document"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List New Documents"),
label_delete_button = T("Remove Documents"),
msg_record_created = T("Documents added"),
msg_record_modified = T("Documents updated"),
msg_record_deleted = T("Documents removed"),
msg_list_empty = T("No Documents currently recorded"))
# Force added docs to have a name
table.name.requires = IS_NOT_EMPTY()
list_fields = ["name",
"file",
"url",
"organisation_id",
"comments",
]
crud_form = S3SQLCustomForm(*list_fields)
s3db.configure(tablename,
list_fields = list_fields,
crud_form = crud_form,
)
return True
s3.prep = custom_prep
return attr
settings.customise_doc_document_controller = customise_doc_document_controller
# -----------------------------------------------------------------------------
settings.req.req_type = ["Other"]
settings.req.requester_label = "Contact"
# Uncomment if the User Account logging the Request is NOT normally the Requester
settings.req.requester_is_author = False
# Uncomment to have Donations include a 'Value' field
settings.req.commit_value = True
# Uncomment if the User Account logging the Commitment is NOT normally the Committer
#settings.req.comittter_is_author = False
# Uncomment to allow Donations to be made without a matching Request
#settings.req.commit_without_request = True
# Set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
settings.req.requester_to_site = True
def customise_req_req_controller(**attr):
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
s3db = current.s3db
if r.component_name == "commit":
s3db.req_customise_commit_fields()
else:
s3db.req_customise_req_fields()
if r.method in ("datalist", "datalist.dl"):
s3.filter = (r.table.req_status.belongs([0, 1]))
elif r.method == "profile":
# Customise tables used by widgets
s3db.req_customise_commit_fields()
customise_org_facility_fields()
record = r.record
record_id = record.id
commits_widget = dict(label = "Donations",
label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "request",
default = "req_id=%s" % record_id,
filter = S3FieldSelector("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
filter = (S3FieldSelector("obsolete") == False)
sites_widget = dict(label = "Sites",
#label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
multiple = False,
context = "request",
filter = filter,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
if current.auth.s3_has_permission("update", r.table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="req", f="req",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["req_req"].title_update,
)
else:
edit_btn = ""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
site = db(query).select(stable.name,
stable.location_id,
stable.organisation_id,
limitby=(0, 1)
).first()
location = s3db.gis_LocationRepresent(sep=" | ")(site.location_id)
otable = db.org_organisation
org = db(otable.id == site.organisation_id).select(otable.name,
otable.logo,
limitby=(0, 1)
).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
s3db.configure("req_req",
profile_title = s3.crud_strings["req_req"].title_list,
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=logo,
),
_class="pull-left",
#_href=org_url,
),
H2(site.name),
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.purpose,
_class="s3-truncate"),
_class="profile_header",
),
profile_widgets = [commits_widget,
sites_widget,
],
)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
return attr
settings.customise_req_req_controller = customise_req_req_controller
# -----------------------------------------------------------------------------
def customise_req_commit_controller(**attr):
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
current.s3db.req_customise_commit_fields()
if r.method in ("datalist", "datalist.dl"):
s3.filter = (r.table.cancel != True)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
return attr
settings.customise_req_commit_controller = customise_req_commit_controller
# =============================================================================
# Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
#("event", Storage(
# name_nice = "Disasters",
# #description = "Events",
# restricted = True,
# module_type = None
#)),
("req", Storage(
name_nice = "Requests",
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = None,
)),
#("project", Storage(
# name_nice = "Projects",
# restricted = True,
# module_type = None
#)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
#("vulnerability", Storage(
# name_nice = "Vulnerability",
# restricted = True,
# module_type = None
#)),
#("transport", Storage(
# name_nice = "Transport",
# restricted = True,
# module_type = None
#)),
#("hms", Storage(
# name_nice = "Hospitals",
# restricted = True,
# module_type = None
#)),
#("cr", Storage(
# name_nice = "Shelters",
# restricted = True,
# module_type = None
#)),
("supply", Storage(
name_nice = "Supply Chain Management",
restricted = True,
module_type = None
)),
])
| code-for-india/sahana_shelter_worldbank | private/templates/Philippines/config.py | Python | mit | 128,438 | [
"Amber"
] | 85b7c76e9d9cc9c080dd7437ccf78ccc6c6d779533907fe14a47e9f57d074a01 |
"""Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, PurePoly, poly,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
nth_power_roots_poly,
cancel, reduced, groebner,
GroebnerBasis, is_zero_dimensional)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
OperationNotSupported,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
UnificationFailed,
RefinementFailed,
GeneratorsNeeded,
GeneratorsError,
PolynomialError,
CoercionFailed,
NotAlgebraic,
DomainError,
OptionError,
FlagError)
from sympy.polys.polyclasses import DMP, DMF
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys.monomialtools import lex, grlex, grevlex
from sympy import (
S, Integer, Rational, Float, Mul, Symbol, symbols, sqrt,
exp, sin, expand, oo, I, pi, re, im, RootOf, Eq, Tuple)
from sympy.core.compatibility import iterable
from sympy.core.mul import _keep_coeff
from sympy.utilities.pytest import raises, XFAIL
x,y,z,p,q,r,s,t,u,v,w,a,b,c,d,e = symbols('x,y,z,p,q,r,s,t,u,v,w,a,b,c,d,e')
def _epsilon_eq(a, b):
for x, y in zip(a, b):
if abs(x-y) > 1e-10:
return False
return True
def _strict_eq(a, b):
if type(a) == type(b):
if iterable(a):
if len(a) == len(b):
return all(_strict_eq(c, d) for c, d in zip(a, b))
else:
return False
else:
return isinstance(a, Poly) and a.eq(b, strict=True)
else:
return False
def test_Poly_from_dict():
K = FF(3)
assert Poly.from_dict({0: 1, 1: 2}, gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_dict({0: 1, 1: 5}, gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_dict({(0,): 1, (1,): 5}, gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_dict({(0, 0): 1, (1, 1): 2}, gens=(x,y), domain=K).rep == DMP([[K(2),K(0)],[K(1)]], K)
assert Poly.from_dict({0: 1, 1: 2}, gens=x).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_dict({0: 1, 1: 2}, gens=x, field=True).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_dict({0: 1, 1: 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_dict({0: 1, 1: 2}, gens=x, domain=QQ).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x, field=True).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_dict({(0,): 1, (1,): 2}, gens=x, domain=QQ).rep == DMP([QQ(2),QQ(1)], QQ)
def test_Poly_from_list():
K = FF(3)
assert Poly.from_list([2,1], gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_list([5,1], gens=x, domain=K).rep == DMP([K(2),K(1)], K)
assert Poly.from_list([2,1], gens=x).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_list([2,1], gens=x, field=True).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_list([2,1], gens=x, domain=ZZ).rep == DMP([ZZ(2),ZZ(1)], ZZ)
assert Poly.from_list([2,1], gens=x, domain=QQ).rep == DMP([QQ(2),QQ(1)], QQ)
assert Poly.from_list([0, 1.0], gens=x).rep == DMP([RR(1.0)], RR)
assert Poly.from_list([1.0, 0], gens=x).rep == DMP([RR(1.0), RR(0.0)], RR)
raises(MultivariatePolynomialError, "Poly.from_list([[]], gens=(x,y))")
def test_Poly_from_poly():
f = Poly(x+7, x, domain=ZZ)
g = Poly(x+2, x, modulus=3)
h = Poly(x+y, x, y, domain=ZZ)
K = FF(3)
assert Poly.from_poly(f) == f
assert Poly.from_poly(f, domain=K).rep == DMP([K(1),K(1)], K)
assert Poly.from_poly(f, domain=ZZ).rep == DMP([1,7], ZZ)
assert Poly.from_poly(f, domain=QQ).rep == DMP([1,7], QQ)
assert Poly.from_poly(f, gens=x) == f
assert Poly.from_poly(f, gens=x, domain=K).rep == DMP([K(1),K(1)], K)
assert Poly.from_poly(f, gens=x, domain=ZZ).rep == DMP([1,7], ZZ)
assert Poly.from_poly(f, gens=x, domain=QQ).rep == DMP([1,7], QQ)
assert Poly.from_poly(f, gens=y) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, "Poly.from_poly(f, gens=y, domain=K)")
raises(CoercionFailed, "Poly.from_poly(f, gens=y, domain=ZZ)")
raises(CoercionFailed, "Poly.from_poly(f, gens=y, domain=QQ)")
assert Poly.from_poly(f, gens=(x,y)) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(f, gens=(x,y), domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(f, gens=(x,y), domain=QQ) == Poly(x + 7, x, y, domain='QQ')
assert Poly.from_poly(f, gens=(x,y), modulus=3) == Poly(x + 7, x, y, domain='FF(3)')
K = FF(2)
assert Poly.from_poly(g) == g
assert Poly.from_poly(g, domain=ZZ).rep == DMP([1,-1], ZZ)
raises(CoercionFailed, "Poly.from_poly(g, domain=QQ)")
assert Poly.from_poly(g, domain=K).rep == DMP([K(1),K(0)], K)
assert Poly.from_poly(g, gens=x) == g
assert Poly.from_poly(g, gens=x, domain=ZZ).rep == DMP([1,-1], ZZ)
raises(CoercionFailed, "Poly.from_poly(g, gens=x, domain=QQ)")
assert Poly.from_poly(g, gens=x, domain=K).rep == DMP([K(1),K(0)], K)
K = FF(3)
assert Poly.from_poly(h) == h
assert Poly.from_poly(h, domain=ZZ).rep == DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ)
assert Poly.from_poly(h, domain=QQ).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
assert Poly.from_poly(h, domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_poly(h, gens=x) == Poly(x+y, x, domain=ZZ[y])
raises(CoercionFailed, "Poly.from_poly(h, gens=x, domain=ZZ)")
assert Poly.from_poly(h, gens=x, domain=ZZ[y]) == Poly(x+y, x, domain=ZZ[y])
raises(CoercionFailed, "Poly.from_poly(h, gens=x, domain=QQ)")
assert Poly.from_poly(h, gens=x, domain=QQ[y]) == Poly(x+y, x, domain=QQ[y])
raises(CoercionFailed, "Poly.from_poly(h, gens=x, modulus=3)")
assert Poly.from_poly(h, gens=y) == Poly(x+y, y, domain=ZZ[x])
raises(CoercionFailed, "Poly.from_poly(h, gens=y, domain=ZZ)")
assert Poly.from_poly(h, gens=y, domain=ZZ[x]) == Poly(x+y, y, domain=ZZ[x])
raises(CoercionFailed, "Poly.from_poly(h, gens=y, domain=QQ)")
assert Poly.from_poly(h, gens=y, domain=QQ[x]) == Poly(x+y, y, domain=QQ[x])
raises(CoercionFailed, "Poly.from_poly(h, gens=y, modulus=3)")
assert Poly.from_poly(h, gens=(x,y)) == h
assert Poly.from_poly(h, gens=(x,y), domain=ZZ).rep == DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ)
assert Poly.from_poly(h, gens=(x,y), domain=QQ).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
assert Poly.from_poly(h, gens=(x,y), domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_poly(h, gens=(y,x)).rep == DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ)
assert Poly.from_poly(h, gens=(y,x), domain=ZZ).rep == DMP([[ZZ(1)],[ZZ(1),ZZ(0)]], ZZ)
assert Poly.from_poly(h, gens=(y,x), domain=QQ).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
assert Poly.from_poly(h, gens=(y,x), domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_poly(h, gens=(x,y), field=True).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
assert Poly.from_poly(h, gens=(x,y), field=True).rep == DMP([[QQ(1)],[QQ(1),QQ(0)]], QQ)
def test_Poly_from_expr():
raises(GeneratorsNeeded, "Poly.from_expr(S(0))")
raises(GeneratorsNeeded, "Poly.from_expr(S(7))")
K = FF(3)
assert Poly.from_expr(x + 5, domain=K).rep == DMP([K(1),K(2)], K)
assert Poly.from_expr(y + 5, domain=K).rep == DMP([K(1),K(2)], K)
assert Poly.from_expr(x + 5, x, domain=K).rep == DMP([K(1),K(2)], K)
assert Poly.from_expr(y + 5, y, domain=K).rep == DMP([K(1),K(2)], K)
assert Poly.from_expr(x + y, domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_expr(x + y, x, y, domain=K).rep == DMP([[K(1)],[K(1),K(0)]], K)
assert Poly.from_expr(x + 5).rep == DMP([1,5], ZZ)
assert Poly.from_expr(y + 5).rep == DMP([1,5], ZZ)
assert Poly.from_expr(x + 5, x).rep == DMP([1,5], ZZ)
assert Poly.from_expr(y + 5, y).rep == DMP([1,5], ZZ)
assert Poly.from_expr(x + 5, domain=ZZ).rep == DMP([1,5], ZZ)
assert Poly.from_expr(y + 5, domain=ZZ).rep == DMP([1,5], ZZ)
assert Poly.from_expr(x + 5, x, domain=ZZ).rep == DMP([1,5], ZZ)
assert Poly.from_expr(y + 5, y, domain=ZZ).rep == DMP([1,5], ZZ)
assert Poly.from_expr(x + 5, x, y, domain=ZZ).rep == DMP([[1],[5]], ZZ)
assert Poly.from_expr(y + 5, x, y, domain=ZZ).rep == DMP([[1,5]], ZZ)
def test_Poly__new__():
raises(GeneratorsError, "Poly(x+1, x, x)")
raises(GeneratorsError, "Poly(x+y, x, y, domain=ZZ[x])")
raises(GeneratorsError, "Poly(x+y, x, y, domain=ZZ[y])")
raises(OptionError, "Poly(x, x, symmetric=True)")
raises(OptionError, "Poly(x+2, x, modulus=3, domain=QQ)")
raises(OptionError, "Poly(x+2, x, domain=ZZ, gaussian=True)")
raises(OptionError, "Poly(x+2, x, modulus=3, gaussian=True)")
raises(OptionError, "Poly(x+2, x, domain=ZZ, extension=[sqrt(3)])")
raises(OptionError, "Poly(x+2, x, modulus=3, extension=[sqrt(3)])")
raises(OptionError, "Poly(x+2, x, domain=ZZ, extension=True)")
raises(OptionError, "Poly(x+2, x, modulus=3, extension=True)")
raises(OptionError, "Poly(x+2, x, domain=ZZ, greedy=True)")
raises(OptionError, "Poly(x+2, x, domain=QQ, field=True)")
raises(OptionError, "Poly(x+2, x, domain=ZZ, greedy=False)")
raises(OptionError, "Poly(x+2, x, domain=QQ, field=False)")
raises(NotImplementedError, "Poly(x+1, x, modulus=3, order='grlex')")
raises(NotImplementedError, "Poly(x+1, x, order='grlex')")
raises(GeneratorsNeeded, "Poly({1: 2, 0: 1})")
raises(GeneratorsNeeded, "Poly([2, 1])")
raises(GeneratorsNeeded, "Poly((2, 1))")
raises(GeneratorsNeeded, "Poly(1)")
f = a*x**2 + b*x + c
assert Poly({2: a, 1: b, 0: c}, x) == f
assert Poly(iter([a, b, c]), x) == f
assert Poly([a, b, c], x) == f
assert Poly((a, b, c), x) == f
assert Poly(Poly(a*x + b*y, x, y), x) == Poly(a*x + b*y, x)
assert Poly(3*x**2 + 2*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, "Poly(3*x**2/5 + 2*x/5 + 1, domain='ZZ')")
assert Poly(3*x**2/5 + 2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1]
assert _epsilon_eq(Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(), [0.6, 0.4, 1.0])
assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, "Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ')")
assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1]
assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0]
assert Poly({(2,1): 1, (1,2): 2, (1,1): 3}, x, y) == \
Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y)
assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I)
f = 3*x**5 - x**4 + x**3 - x** 2 + 65538
assert Poly(f, x, modulus=65537, symmetric=True) == \
Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537, symmetric=True)
assert Poly(f, x, modulus=65537, symmetric=False) == \
Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x, modulus=65537, symmetric=False)
assert Poly(x**2 + x + 1.0).get_domain() == RR
def test_Poly__args():
assert Poly(x**2 + 1).args == [x**2 + 1]
def test_Poly__gens():
assert Poly((x-p)*(x-q), x).gens == (x,)
assert Poly((x-p)*(x-q), p).gens == (p,)
assert Poly((x-p)*(x-q), q).gens == (q,)
assert Poly((x-p)*(x-q), x, p).gens == (x, p)
assert Poly((x-p)*(x-q), x, q).gens == (x, q)
assert Poly((x-p)*(x-q), x, p, q).gens == (x, p, q)
assert Poly((x-p)*(x-q), p, x, q).gens == (p, x, q)
assert Poly((x-p)*(x-q), p, q, x).gens == (p, q, x)
assert Poly((x-p)*(x-q)).gens == (x, p, q)
assert Poly((x-p)*(x-q), sort='x > p > q').gens == (x, p, q)
assert Poly((x-p)*(x-q), sort='p > x > q').gens == (p, x, q)
assert Poly((x-p)*(x-q), sort='p > q > x').gens == (p, q, x)
assert Poly((x-p)*(x-q), x, p, q, sort='p > q > x').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='x').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='p').gens == (p, x, q)
assert Poly((x-p)*(x-q), wrt='q').gens == (q, x, p)
assert Poly((x-p)*(x-q), wrt=x).gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt=p).gens == (p, x, q)
assert Poly((x-p)*(x-q), wrt=q).gens == (q, x, p)
assert Poly((x-p)*(x-q), x, p, q, wrt='p').gens == (x, p, q)
assert Poly((x-p)*(x-q), wrt='p', sort='q > x').gens == (p, q, x)
assert Poly((x-p)*(x-q), wrt='q', sort='p > x').gens == (q, p, x)
def test_Poly_zero():
assert Poly(x).zero == Poly(0, x, domain=ZZ)
assert Poly(x/2).zero == Poly(0, x, domain=QQ)
def test_Poly_one():
assert Poly(x).one == Poly(1, x, domain=ZZ)
assert Poly(x/2).one == Poly(1, x, domain=QQ)
def test_Poly__unify():
raises(UnificationFailed, "Poly(x)._unify(y)")
K = FF(3)
raises(UnificationFailed, "Poly(x, x, modulus=3)._unify(Poly(x, x, modulus=5))")
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=3))[2:] == (DMP([[K(1)],[]], K), DMP([[K(1),K(0)]], K))
assert Poly(y, x, y)._unify(Poly(x, x, modulus=3))[2:] == (DMP([[K(1),K(0)]], K), DMP([[K(1)],[]], K))
assert Poly(x, x, modulus=3)._unify(Poly(y, x, y))[2:] == (DMP([[K(1)],[]], K), DMP([[K(1),K(0)]], K))
assert Poly(x+1, x)._unify(Poly(x+2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ))
assert Poly(x+1, x, domain='QQ')._unify(Poly(x+2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, domain='QQ')._unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ')._unify(Poly(x+2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ')._unify(Poly(x+2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, x, domain='QQ')._unify(Poly(x+2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, x)._unify(Poly(x+2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x)._unify(Poly(x+2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, y, x, domain='QQ')._unify(Poly(x+2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x)._unify(Poly(x+2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x+1, x, y, domain='QQ')._unify(Poly(x+2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, x, y)._unify(Poly(x+2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x+1, y, x)._unify(Poly(x+2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x+1, y, x, domain='QQ')._unify(Poly(x+2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x+1, y, x)._unify(Poly(x+2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(a*x, x, domain='ZZ[a]')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([DMF(([[1], []], [[1]]), ZZ), DMF(([[]], [[1]]), ZZ)], ZZ.frac_field(a,b)),
DMP([DMF(([[1, 0], []], [[1]]), ZZ), DMF(([[]], [[1]]), ZZ)], ZZ.frac_field(a,b)))
assert Poly(a*x, x, domain='ZZ(a)')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([DMF(([[1], []], [[1]]), ZZ), DMF(([[]], [[1]]), ZZ)], ZZ.frac_field(a,b)),
DMP([DMF(([[1, 0], []], [[1]]), ZZ), DMF(([[]], [[1]]), ZZ)], ZZ.frac_field(a,b)))
raises(CoercionFailed, "Poly(Poly(x**2 + x**2*z, y, field=True), domain='ZZ(x)')")
def test_Poly_free_symbols():
assert Poly(x**2 + 1).free_symbols == set([x])
assert Poly(x**2 + y*z).free_symbols == set([x, y, z])
assert Poly(x**2 + y*z, x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z)).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x).free_symbols == set([x, y, z])
assert Poly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([x, y, z])
def test_PurePoly_free_symbols():
assert PurePoly(x**2 + 1).free_symbols == set([])
assert PurePoly(x**2 + y*z).free_symbols == set([])
assert PurePoly(x**2 + y*z, x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z)).free_symbols == set([])
assert PurePoly(x**2 + sin(y*z), x).free_symbols == set([y, z])
assert PurePoly(x**2 + sin(y*z), x, domain=EX).free_symbols == set([y, z])
def test_Poly__eq__():
assert (Poly(x, x) == Poly(x, x)) == True
assert (Poly(x, x, domain=QQ) == Poly(x, x)) == True
assert (Poly(x, x) == Poly(x, x, domain=QQ)) == True
assert (Poly(x, x, domain=ZZ[a]) == Poly(x, x)) == True
assert (Poly(x, x) == Poly(x, x, domain=ZZ[a])) == True
assert (Poly(x*y, x, y) == Poly(x, x)) == False
assert (Poly(x, x, y) == Poly(x, x)) == False
assert (Poly(x, x) == Poly(x, x, y)) == False
assert (Poly(x**2 + 1, x) == Poly(y**2 + 1, y)) == False
assert (Poly(y**2 + 1, y) == Poly(x**2 + 1, x)) == False
f = Poly(x, x, domain=ZZ)
g = Poly(x, x, domain=QQ)
assert f.eq(g) == True
assert f.ne(g) == False
assert f.eq(g, strict=True) == False
assert f.ne(g, strict=True) == True
def test_PurePoly__eq__():
assert (PurePoly(x, x) == PurePoly(x, x)) == True
assert (PurePoly(x, x, domain=QQ) == PurePoly(x, x)) == True
assert (PurePoly(x, x) == PurePoly(x, x, domain=QQ)) == True
assert (PurePoly(x, x, domain=ZZ[a]) == PurePoly(x, x)) == True
assert (PurePoly(x, x) == PurePoly(x, x, domain=ZZ[a])) == True
assert (PurePoly(x*y, x, y) == PurePoly(x, x)) == False
assert (PurePoly(x, x, y) == PurePoly(x, x)) == False
assert (PurePoly(x, x) == PurePoly(x, x, y)) == False
assert (PurePoly(x**2 + 1, x) == PurePoly(y**2 + 1, y)) == True
assert (PurePoly(y**2 + 1, y) == PurePoly(x**2 + 1, x)) == True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(x, x, domain=QQ)
assert f.eq(g) == True
assert f.ne(g) == False
assert f.eq(g, strict=True) == False
assert f.ne(g, strict=True) == True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(y, y, domain=QQ)
assert f.eq(g) == True
assert f.ne(g) == False
assert f.eq(g, strict=True) == False
assert f.ne(g, strict=True) == True
def test_PurePoly_Poly():
assert isinstance(PurePoly(Poly(x**2 + 1)), PurePoly) == True
assert isinstance(Poly(PurePoly(x**2 + 1)), Poly) == True
def test_Poly_get_domain():
assert Poly(2*x).get_domain() == ZZ
assert Poly(2*x, domain='ZZ').get_domain() == ZZ
assert Poly(2*x, domain='QQ').get_domain() == QQ
assert Poly(x/2).get_domain() == QQ
raises(CoercionFailed, "Poly(x/2, domain='ZZ')")
assert Poly(x/2, domain='QQ').get_domain() == QQ
assert Poly(0.2*x).get_domain() == RR
def test_Poly_set_domain():
assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ')
assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1)
assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10)
raises(CoercionFailed, "Poly(x/2 + 1).set_domain(ZZ)")
raises(CoercionFailed, "Poly(x + 1, modulus=2).set_domain(QQ)")
raises(GeneratorsError, "Poly(x*y, x, y).set_domain(ZZ[y])")
def test_Poly_get_modulus():
assert Poly(x**2 + 1, modulus=2).get_modulus() == 2
raises(PolynomialError, "Poly(x**2 + 1).get_modulus()")
def test_Poly_set_modulus():
assert Poly(x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7)
assert Poly(x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2)
assert Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2)
raises(CoercionFailed, "Poly(x/2 + 1).set_modulus(2)")
def test_Poly_add_ground():
assert Poly(x + 1).add_ground(2) == Poly(x + 3)
def test_Poly_sub_ground():
assert Poly(x + 1).sub_ground(2) == Poly(x - 1)
def test_Poly_mul_ground():
assert Poly(x + 1).mul_ground(2) == Poly(2*x + 2)
def test_Poly_quo_ground():
assert Poly(2*x + 4).quo_ground(2) == Poly(x + 2)
assert Poly(2*x + 3).quo_ground(2) == Poly(x + 1)
def test_Poly_exquo_ground():
assert Poly(2*x + 4).exquo_ground(2) == Poly(x + 2)
raises(ExactQuotientFailed, "Poly(2*x + 3).exquo_ground(2)")
def test_Poly_abs():
assert Poly(-x+1, x).abs() == abs(Poly(-x+1, x)) == Poly(x+1, x)
def test_Poly_neg():
assert Poly(-x+1, x).neg() == -Poly(-x+1, x) == Poly(x-1, x)
def test_Poly_add():
assert Poly(0, x).add(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) + Poly(0, x) == Poly(0, x)
assert Poly(1, x).add(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y)
assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y)
assert Poly(1, x) + x == Poly(x+1, x)
assert Poly(1, x) + sin(x) == 1+sin(x)
assert Poly(x, x) + 1 == Poly(x+1, x)
assert 1 + Poly(x, x) == Poly(x+1, x)
def test_Poly_sub():
assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) - Poly(0, x) == Poly(0, x)
assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y)
assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y)
assert Poly(1, x) - x == Poly(1-x, x)
assert Poly(1, x) - sin(x) == 1-sin(x)
assert Poly(x, x) - 1 == Poly(x-1, x)
assert 1 - Poly(x, x) == Poly(1-x, x)
def test_Poly_mul():
assert Poly(0, x).mul(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) * Poly(0, x) == Poly(0, x)
assert Poly(2, x).mul(Poly(4, x)) == Poly(8, x)
assert Poly(2, x, y) * Poly(4, x) == Poly(8, x, y)
assert Poly(4, x).mul(Poly(2, x, y)) == Poly(8, x, y)
assert Poly(4, x, y) * Poly(2, x, y) == Poly(8, x, y)
assert Poly(1, x) * x == Poly(x, x)
assert Poly(1, x) * sin(x) == sin(x)
assert Poly(x, x) * 2 == Poly(2*x, x)
assert 2 * Poly(x, x) == Poly(2*x, x)
def test_Poly_sqr():
assert Poly(x*y, x, y).sqr() == Poly(x**2*y**2, x, y)
def test_Poly_pow():
assert Poly(x, x).pow(10) == Poly(x**10, x)
assert Poly(x, x).pow(Integer(10)) == Poly(x**10, x)
assert Poly(2*y, x, y).pow(4) == Poly(16*y**4, x, y)
assert Poly(2*y, x, y).pow(Integer(4)) == Poly(16*y**4, x, y)
assert Poly(7*x*y, x, y)**3 == Poly(343*x**3*y**3, x, y)
assert Poly(x*y+1, x, y)**(-1) == (x*y+1)**(-1)
assert Poly(x*y+1, x, y)**x == (x*y+1)**x
def test_Poly_divmod():
f, g = Poly(x**2), Poly(x)
q, r = g, Poly(0, x)
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
assert divmod(f, x) == (q, r)
assert f // x == q
assert f % x == r
q, r = Poly(0, x), Poly(2, x)
assert divmod(2, g) == (q, r)
assert 2 // g == q
assert 2 % g == r
assert Poly(x)/Poly(x) == 1
assert Poly(x**2)/Poly(x) == x
assert Poly(x)/Poly(x**2) == 1/x
def test_Poly_eq_ne():
assert (Poly(x+y, x, y) == Poly(x+y, x, y)) == True
assert (Poly(x+y, x) == Poly(x+y, x, y)) == False
assert (Poly(x+y, x, y) == Poly(x+y, x)) == False
assert (Poly(x+y, x) == Poly(x+y, x)) == True
assert (Poly(x+y, y) == Poly(x+y, y)) == True
assert (Poly(x+y, x, y) == x+y) == True
assert (Poly(x+y, x) == x+y) == True
assert (Poly(x+y, x, y) == x+y) == True
assert (Poly(x+y, x) == x+y) == True
assert (Poly(x+y, y) == x+y) == True
assert (Poly(x+y, x, y) != Poly(x+y, x, y)) == False
assert (Poly(x+y, x) != Poly(x+y, x, y)) == True
assert (Poly(x+y, x, y) != Poly(x+y, x)) == True
assert (Poly(x+y, x) != Poly(x+y, x)) == False
assert (Poly(x+y, y) != Poly(x+y, y)) == False
assert (Poly(x+y, x, y) != x+y) == False
assert (Poly(x+y, x) != x+y) == False
assert (Poly(x+y, x, y) != x+y) == False
assert (Poly(x+y, x) != x+y) == False
assert (Poly(x+y, y) != x+y) == False
assert (Poly(x, x) == sin(x)) == False
assert (Poly(x, x) != sin(x)) == True
def test_Poly_nonzero():
assert not bool(Poly(0, x)) == True
assert not bool(Poly(1, x)) == False
def test_Poly_properties():
assert Poly(0, x).is_zero == True
assert Poly(1, x).is_zero == False
assert Poly(1, x).is_one == True
assert Poly(2, x).is_one == False
assert Poly(x-1, x).is_sqf == True
assert Poly((x-1)**2, x).is_sqf == False
assert Poly(x-1, x).is_monic == True
assert Poly(2*x-1, x).is_monic == False
assert Poly(3*x+2, x).is_primitive == True
assert Poly(4*x+2, x).is_primitive == False
assert Poly(1, x).is_ground == True
assert Poly(x, x).is_ground == False
assert Poly(x+y+z+1).is_linear == True
assert Poly(x*y*z+1).is_linear == False
assert Poly(x*y+z+1).is_quadratic == True
assert Poly(x*y*z+1).is_quadratic == False
assert Poly(x*y).is_monomial == True
assert Poly(x*y+1).is_monomial == False
assert Poly(x**2 + x*y).is_homogeneous == True
assert Poly(x**3 + x*y).is_homogeneous == False
assert Poly(x).is_univariate == True
assert Poly(x*y).is_univariate == False
assert Poly(x*y).is_multivariate == True
assert Poly(x).is_multivariate == False
assert Poly(x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1).is_cyclotomic == False
assert Poly(x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1).is_cyclotomic == True
def test_Poly_is_irreducible():
assert Poly(x**2 + x + 1).is_irreducible == True
assert Poly(x**2 + 2*x + 1).is_irreducible == False
assert Poly(7*x + 3, modulus=11).is_irreducible == True
assert Poly(7*x**2 + 3*x + 1, modulus=11).is_irreducible == False
def test_Poly_subs():
assert Poly(x + 1).subs(x, 0) == 1
assert Poly(x + 1).subs(x, x) == Poly(x + 1)
assert Poly(x + 1).subs(x, y) == Poly(y + 1)
assert Poly(x*y, x).subs(y, x) == x**2
assert Poly(x*y, x).subs(x, y) == y**2
def test_Poly_replace():
assert Poly(x + 1).replace(x) == Poly(x + 1)
assert Poly(x + 1).replace(y) == Poly(y + 1)
raises(PolynomialError, "Poly(x + y).replace(z)")
assert Poly(x + 1).replace(x, x) == Poly(x + 1)
assert Poly(x + 1).replace(x, y) == Poly(y + 1)
assert Poly(x + y).replace(x, x) == Poly(x + y)
assert Poly(x + y).replace(x, z) == Poly(z + y, z, y)
assert Poly(x + y).replace(y, y) == Poly(x + y)
assert Poly(x + y).replace(y, z) == Poly(x + z, x, z)
raises(PolynomialError, "Poly(x + y).replace(x, y)")
raises(PolynomialError, "Poly(x + y).replace(z, t)")
assert Poly(x + y, x).replace(x, z) == Poly(z + y, z)
assert Poly(x + y, y).replace(y, z) == Poly(x + z, z)
raises(PolynomialError, "Poly(x + y, x).replace(x, y)")
raises(PolynomialError, "Poly(x + y, y).replace(y, x)")
def test_Poly_reorder():
raises(PolynomialError, "Poly(x+y).reorder(x, z)")
assert Poly(x + y, x, y).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, y, x).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, y, x).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, x, y).reorder(wrt=x) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(wrt=y) == Poly(x + y, y, x)
def test_Poly_ltrim():
f = Poly(y**2 + y*z**2, x, y, z).ltrim(y)
assert f.as_expr() == y**2 + y*z**2 and f.gens == (y, z)
raises(PolynomialError, "Poly(x*y**2 + y**2, x, y).ltrim(y)")
def test_Poly_has_only_gens():
assert Poly(x*y + 1, x, y, z).has_only_gens(x, y) == True
assert Poly(x*y + z, x, y, z).has_only_gens(x, y) == False
raises(GeneratorsError, "Poly(x*y**2 + y**2, x, y).has_only_gens(t)")
def test_Poly_to_ring():
assert Poly(2*x+1, domain='ZZ').to_ring() == Poly(2*x+1, domain='ZZ')
assert Poly(2*x+1, domain='QQ').to_ring() == Poly(2*x+1, domain='ZZ')
raises(CoercionFailed, "Poly(x/2+1).to_ring()")
raises(DomainError, "Poly(2*x+1, modulus=3).to_ring()")
def test_Poly_to_field():
assert Poly(2*x+1, domain='ZZ').to_field() == Poly(2*x+1, domain='QQ')
assert Poly(2*x+1, domain='QQ').to_field() == Poly(2*x+1, domain='QQ')
assert Poly(x/2+1, domain='QQ').to_field() == Poly(x/2+1, domain='QQ')
assert Poly(2*x+1, modulus=3).to_field() == Poly(2*x+1, modulus=3)
raises(DomainError, "Poly(2.0*x + 1.0).to_field()")
def test_Poly_to_exact():
assert Poly(2*x).to_exact() == Poly(2*x)
assert Poly(x/2).to_exact() == Poly(x/2)
assert Poly(0.1*x).to_exact() == Poly(x/10)
def test_Poly_retract():
f = Poly(x**2 + 1, x, domain=QQ[y])
assert f.retract() == Poly(x**2 + 1, x, domain='ZZ')
assert f.retract(field=True) == Poly(x**2 + 1, x, domain='QQ')
assert Poly(0, x, y).retract() == Poly(0, x, y)
def test_Poly_slice():
f = Poly(x**3 + 2*x**2 + 3*x + 4)
assert f.slice(0, 0) == Poly(0, x)
assert f.slice(0, 1) == Poly(4, x)
assert f.slice(0, 2) == Poly(3*x + 4, x)
assert f.slice(0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 0) == Poly(0, x)
assert f.slice(x, 0, 1) == Poly(4, x)
assert f.slice(x, 0, 2) == Poly(3*x + 4, x)
assert f.slice(x, 0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
def test_Poly_coeffs():
assert Poly(0, x).coeffs() == [0]
assert Poly(1, x).coeffs() == [1]
assert Poly(2*x+1, x).coeffs() == [2,1]
assert Poly(7*x**2+2*x+1, x).coeffs() == [7,2,1]
assert Poly(7*x**4+2*x+1, x).coeffs() == [7,2,1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('lex') == [2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('grlex') == [1, 2]
def test_Poly_monoms():
assert Poly(0, x).monoms() == [(0,)]
assert Poly(1, x).monoms() == [(0,)]
assert Poly(2*x+1, x).monoms() == [(1,),(0,)]
assert Poly(7*x**2+2*x+1, x).monoms() == [(2,),(1,),(0,)]
assert Poly(7*x**4+2*x+1, x).monoms() == [(4,),(1,),(0,)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('lex') == [(2, 3), (1, 7)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('grlex') == [(1, 7), (2, 3)]
def test_Poly_terms():
assert Poly(0, x).terms() == [((0,), 0)]
assert Poly(1, x).terms() == [((0,), 1)]
assert Poly(2*x+1, x).terms() == [((1,), 2),((0,), 1)]
assert Poly(7*x**2+2*x+1, x).terms() == [((2,), 7),((1,), 2),((0,), 1)]
assert Poly(7*x**4+2*x+1, x).terms() == [((4,), 7),((1,), 2),((0,), 1)]
assert Poly(x*y**7 + 2*x**2*y**3).terms('lex') == [((2, 3), 2), ((1, 7), 1)]
assert Poly(x*y**7 + 2*x**2*y**3).terms('grlex') == [((1, 7), 1), ((2, 3), 2)]
def test_Poly_all_coeffs():
assert Poly(0, x).all_coeffs() == [0]
assert Poly(1, x).all_coeffs() == [1]
assert Poly(2*x+1, x).all_coeffs() == [2,1]
assert Poly(7*x**2+2*x+1, x).all_coeffs() == [7,2,1]
assert Poly(7*x**4+2*x+1, x).all_coeffs() == [7,0,0,2,1]
def test_Poly_all_monoms():
assert Poly(0, x).all_monoms() == [(0,)]
assert Poly(1, x).all_monoms() == [(0,)]
assert Poly(2*x+1, x).all_monoms() == [(1,),(0,)]
assert Poly(7*x**2+2*x+1, x).all_monoms() == [(2,),(1,),(0,)]
assert Poly(7*x**4+2*x+1, x).all_monoms() == [(4,),(3,),(2,),(1,),(0,)]
def test_Poly_all_terms():
assert Poly(0, x).all_terms() == [((0,), 0)]
assert Poly(1, x).all_terms() == [((0,), 1)]
assert Poly(2*x+1, x).all_terms() == [((1,), 2),((0,), 1)]
assert Poly(7*x**2+2*x+1, x).all_terms() == [((2,), 7),((1,), 2),((0,), 1)]
assert Poly(7*x**4+2*x+1, x).all_terms() == [((4,), 7),((3,),0),((2,),0),((1,), 2),((0,), 1)]
def test_Poly_termwise():
f = Poly(x**2 + 20*x + 400)
g = Poly(x**2 + 2*x + 4)
def func(monom, coeff):
(k,) = monom
return coeff//10**(2-k)
assert f.termwise(func) == g
def func(monom, coeff):
(k,) = monom
return (k,), coeff//10**(2-k)
assert f.termwise(func) == g
def test_Poly_length():
assert Poly(0, x).length() == 0
assert Poly(1, x).length() == 1
assert Poly(x, x).length() == 1
assert Poly(x+1, x).length() == 2
assert Poly(x**2+1, x).length() == 2
assert Poly(x**2+x+1, x).length() == 3
def test_Poly_as_dict():
assert Poly(0, x).as_dict() == {}
assert Poly(0, x, y, z).as_dict() == {}
assert Poly(1, x).as_dict() == {(0,): 1}
assert Poly(1, x, y, z).as_dict() == {(0,0,0): 1}
assert Poly(x**2+3, x).as_dict() == {(2,): 1, (0,): 3}
assert Poly(x**2+3, x, y, z).as_dict() == {(2,0,0): 1, (0,0,0): 3}
assert Poly(3*x**2*y*z**3+4*x*y+5*x*z).as_dict() == {(2,1,3): 3, (1,1,0): 4, (1,0,1): 5}
def test_Poly_as_expr():
assert Poly(0, x).as_expr() == 0
assert Poly(0, x, y, z).as_expr() == 0
assert Poly(1, x).as_expr() == 1
assert Poly(1, x, y, z).as_expr() == 1
assert Poly(x**2+3, x).as_expr() == x**2 + 3
assert Poly(x**2+3, x, y, z).as_expr() == x**2 + 3
assert Poly(3*x**2*y*z**3+4*x*y+5*x*z).as_expr() == 3*x**2*y*z**3 + 4*x*y + 5*x*z
f = Poly(x**2 + 2*x*y**2 - y, x, y)
assert f.as_expr() == -y + x**2 + 2*x*y**2
assert f.as_expr({x: 5}) == 25 - y + 10*y**2
assert f.as_expr({y: 6}) == -6 + 72*x + x**2
assert f.as_expr({x: 5, y: 6}) == 379
assert f.as_expr(5, 6) == 379
raises(GeneratorsError, "f.as_expr({z: 7})")
def test_Poly_lift():
assert Poly(x**4 - I*x + 17*I, x, gaussian=True).lift() == \
Poly(x**16 + 2*x**10 + 578*x**8 + x**4 - 578*x**2 + 83521, x, domain='QQ')
def test_Poly_deflate():
assert Poly(0, x).deflate() == ((1,), Poly(0, x))
assert Poly(1, x).deflate() == ((1,), Poly(1, x))
assert Poly(x, x).deflate() == ((1,), Poly(x, x))
assert Poly(x**2, x).deflate() == ((2,), Poly(x, x))
assert Poly(x**17, x).deflate() == ((17,), Poly(x, x))
assert Poly(x**2*y*z**11+x**4*z**11).deflate() == ((2,1,11), Poly(x*y*z+x**2*z))
def test_Poly_inject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x)
assert f.inject() == Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.inject(front=True) == Poly(y**3*x + y*x**2 + y*x + 1, y, x)
def test_Poly_eject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.eject(x) == Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
assert f.eject(y) == Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
raises(DomainError, "Poly(x*y, x, y, domain=ZZ[z]).eject(y)")
raises(NotImplementedError, "Poly(x*y, x, y, z).eject(y)")
def test_Poly_exclude():
assert Poly(x, x, y).exclude() == Poly(x, x)
assert Poly(x*y, x, y).exclude() == Poly(x*y, x, y)
assert Poly(1, x, y).exclude() == Poly(1, x, y)
def test_Poly__gen_to_level():
assert Poly(1, x, y)._gen_to_level(-2) == 0
assert Poly(1, x, y)._gen_to_level(-1) == 1
assert Poly(1, x, y)._gen_to_level( 0) == 0
assert Poly(1, x, y)._gen_to_level( 1) == 1
raises(PolynomialError, "Poly(1, x, y)._gen_to_level(-3)")
raises(PolynomialError, "Poly(1, x, y)._gen_to_level( 2)")
assert Poly(1, x, y)._gen_to_level(x) == 0
assert Poly(1, x, y)._gen_to_level(y) == 1
assert Poly(1, x, y)._gen_to_level('x') == 0
assert Poly(1, x, y)._gen_to_level('y') == 1
raises(PolynomialError, "Poly(1, x, y)._gen_to_level(z)")
raises(PolynomialError, "Poly(1, x, y)._gen_to_level('z')")
def test_Poly_degree():
assert Poly(0, x).degree() ==-1
assert Poly(1, x).degree() == 0
assert Poly(x, x).degree() == 1
assert Poly(0, x).degree(gen=0) ==-1
assert Poly(1, x).degree(gen=0) == 0
assert Poly(x, x).degree(gen=0) == 1
assert Poly(0, x).degree(gen=x) ==-1
assert Poly(1, x).degree(gen=x) == 0
assert Poly(x, x).degree(gen=x) == 1
assert Poly(0, x).degree(gen='x') ==-1
assert Poly(1, x).degree(gen='x') == 0
assert Poly(x, x).degree(gen='x') == 1
raises(PolynomialError, "Poly(1, x).degree(gen=1)")
raises(PolynomialError, "Poly(1, x).degree(gen=y)")
raises(PolynomialError, "Poly(1, x).degree(gen='y')")
assert Poly(1, x, y).degree() == 0
assert Poly(2*y, x, y).degree() == 0
assert Poly(x*y, x, y).degree() == 1
assert Poly(1, x, y).degree(gen=x) == 0
assert Poly(2*y, x, y).degree(gen=x) == 0
assert Poly(x*y, x, y).degree(gen=x) == 1
assert Poly(1, x, y).degree(gen=y) == 0
assert Poly(2*y, x, y).degree(gen=y) == 1
assert Poly(x*y, x, y).degree(gen=y) == 1
assert degree(1, x) == 0
assert degree(x, x) == 1
assert degree(x*y**2, gen=x) == 1
assert degree(x*y**2, gen=y) == 2
assert degree(x*y**2, x, y) == 1
assert degree(x*y**2, y, x) == 2
raises(ComputationFailed, "degree(1)")
def test_Poly_degree_list():
assert Poly(0, x).degree_list() == (-1,)
assert Poly(0, x, y).degree_list() == (-1,-1)
assert Poly(0, x, y, z).degree_list() == (-1,-1,-1)
assert Poly(1, x).degree_list() == (0,)
assert Poly(1, x, y).degree_list() == (0,0)
assert Poly(1, x, y, z).degree_list() == (0,0,0)
assert Poly(x**2*y+x**3*z**2+1).degree_list() == (3,1,2)
assert degree_list(1, x) == (0,)
assert degree_list(x, x) == (1,)
assert degree_list(x*y**2) == (1,2)
raises(ComputationFailed, "degree_list(1)")
def test_Poly_total_degree():
assert Poly(x**2*y+x**3*z**2+1).total_degree() == 5
assert Poly(x**2 + z**3).total_degree() == 3
assert Poly(x*y*z + z**4).total_degree() == 4
assert Poly(x**3 + x + 1).total_degree() == 3
def test_Poly_homogeneous_order():
assert Poly(0, x, y).homogeneous_order() == -1
assert Poly(1, x, y).homogeneous_order() == 0
assert Poly(x, x, y).homogeneous_order() == 1
assert Poly(x*y, x, y).homogeneous_order() == 2
assert Poly(x + 1, x, y).homogeneous_order() is None
assert Poly(x*y + x, x, y).homogeneous_order() is None
assert Poly(x**5 + 2*x**3*y**2 + 9*x*y**4).homogeneous_order() == 5
assert Poly(x**5 + 2*x**3*y**3 + 9*x*y**4).homogeneous_order() is None
def test_Poly_LC():
assert Poly(0, x).LC() == 0
assert Poly(1, x).LC() == 1
assert Poly(2*x**2+x, x).LC() == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('lex') == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('grlex') == 1
assert LC(x*y**7 + 2*x**2*y**3, order='lex') == 2
assert LC(x*y**7 + 2*x**2*y**3, order='grlex') == 1
def test_Poly_TC():
assert Poly(0, x).TC() == 0
assert Poly(1, x).TC() == 1
assert Poly(2*x**2+x, x).TC() == 0
def test_Poly_EC():
assert Poly(0, x).EC() == 0
assert Poly(1, x).EC() == 1
assert Poly(2*x**2+x, x).EC() == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('lex') == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('grlex') == 2
def test_Poly_nth():
assert Poly(0, x).nth(0) == 0
assert Poly(0, x).nth(1) == 0
assert Poly(1, x).nth(0) == 1
assert Poly(1, x).nth(1) == 0
assert Poly(x**8, x).nth(0) == 0
assert Poly(x**8, x).nth(7) == 0
assert Poly(x**8, x).nth(8) == 1
assert Poly(x**8, x).nth(9) == 0
assert Poly(3*x*y**2 + 1).nth(0, 0) == 1
assert Poly(3*x*y**2 + 1).nth(1, 2) == 3
def test_Poly_LM():
assert Poly(0, x).LM() == (0,)
assert Poly(1, x).LM() == (0,)
assert Poly(2*x**2+x, x).LM() == (2,)
assert Poly(x*y**7 + 2*x**2*y**3).LM('lex') == (2, 3)
assert Poly(x*y**7 + 2*x**2*y**3).LM('grlex') == (1, 7)
assert LM(x*y**7 + 2*x**2*y**3, order='lex') == x**2*y**3
assert LM(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_LM_custom_order():
f = Poly(x**2*y**3*z + x**2*y*z**3 + x*y*z + 1)
rev_lex = lambda monom: tuple(reversed(monom))
assert f.LM(order='lex') == (2, 3, 1)
assert f.LM(order=rev_lex) == (2, 1, 3)
def test_Poly_EM():
assert Poly(0, x).EM() == (0,)
assert Poly(1, x).EM() == (0,)
assert Poly(2*x**2+x, x).EM() == (1,)
assert Poly(x*y**7 + 2*x**2*y**3).EM('lex') == (1, 7)
assert Poly(x*y**7 + 2*x**2*y**3).EM('grlex') == (2, 3)
def test_Poly_LT():
assert Poly(0, x).LT() == ((0,), 0)
assert Poly(1, x).LT() == ((0,), 1)
assert Poly(2*x**2+x, x).LT() == ((2,), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('lex') == ((2, 3), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('grlex') == ((1, 7), 1)
assert LT(x*y**7 + 2*x**2*y**3, order='lex') == 2*x**2*y**3
assert LT(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_ET():
assert Poly(0, x).ET() == ((0,), 0)
assert Poly(1, x).ET() == ((0,), 1)
assert Poly(2*x**2+x, x).ET() == ((1,), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('lex') == ((1, 7), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('grlex') == ((2, 3), 2)
def test_Poly_max_norm():
assert Poly(-1, x).max_norm() == 1
assert Poly( 0, x).max_norm() == 0
assert Poly( 1, x).max_norm() == 1
def test_Poly_l1_norm():
assert Poly(-1, x).l1_norm() == 1
assert Poly( 0, x).l1_norm() == 0
assert Poly( 1, x).l1_norm() == 1
def test_Poly_clear_denoms():
coeff, poly = Poly(x + 2, x).clear_denoms()
assert coeff == 1 and poly == Poly(x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/2 + 1, x).clear_denoms()
assert coeff == 2 and poly == Poly(x + 2, x, domain='QQ') and poly.get_domain() == QQ
coeff, poly = Poly(x/2 + 1, x).clear_denoms(convert=True)
assert coeff == 2 and poly == Poly(x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/y + 1, x).clear_denoms(convert=True)
assert coeff == y and poly == Poly(x + y, x, domain='ZZ[y]') and poly.get_domain() == ZZ[y]
def test_Poly_rat_clear_denoms():
f = Poly(x**2/y + 1, x)
g = Poly(x**3 + y, x)
assert f.rat_clear_denoms(g) == \
(Poly(x**2 + y, x), Poly(y*x**3 + y**2, x))
f = f.set_domain(EX)
g = g.set_domain(EX)
assert f.rat_clear_denoms(g) == (f, g)
def test_Poly_integrate():
assert Poly(x + 1).integrate() == Poly(x**2/2 + x)
assert Poly(x + 1).integrate(x) == Poly(x**2/2 + x)
assert Poly(x + 1).integrate((x, 1)) == Poly(x**2/2 + x)
assert Poly(x*y + 1).integrate(x) == Poly(x**2*y/2 + x)
assert Poly(x*y + 1).integrate(y) == Poly(x*y**2/2 + y)
assert Poly(x*y + 1).integrate(x, x) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate(y, y) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate((x, 2)) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate((y, 2)) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate(x, y) == Poly(x**2*y**2/4 + x*y)
assert Poly(x*y + 1).integrate(y, x) == Poly(x**2*y**2/4 + x*y)
def test_Poly_diff():
assert Poly(x**2 + x).diff() == Poly(2*x + 1)
assert Poly(x**2 + x).diff(x) == Poly(2*x + 1)
assert Poly(x**2 + x).diff((x, 1)) == Poly(2*x + 1)
assert Poly(x**2*y**2 + x*y).diff(x) == Poly(2*x*y**2 + y)
assert Poly(x**2*y**2 + x*y).diff(y) == Poly(2*x**2*y + x)
assert Poly(x**2*y**2 + x*y).diff(x, x) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(y, y) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((x, 2)) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((y, 2)) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(x, y) == Poly(4*x*y + 1)
assert Poly(x**2*y**2 + x*y).diff(y, x) == Poly(4*x*y + 1)
def test_Poly_eval():
assert Poly(0, x).eval(7) == 0
assert Poly(1, x).eval(7) == 1
assert Poly(x, x).eval(7) == 7
assert Poly(0, x).eval(0, 7) == 0
assert Poly(1, x).eval(0, 7) == 1
assert Poly(x, x).eval(0, 7) == 7
assert Poly(0, x).eval(x, 7) == 0
assert Poly(1, x).eval(x, 7) == 1
assert Poly(x, x).eval(x, 7) == 7
assert Poly(0, x).eval('x', 7) == 0
assert Poly(1, x).eval('x', 7) == 1
assert Poly(x, x).eval('x', 7) == 7
raises(PolynomialError, "Poly(1, x).eval(1, 7)")
raises(PolynomialError, "Poly(1, x).eval(y, 7)")
raises(PolynomialError, "Poly(1, x).eval('y', 7)")
assert Poly(123, x, y).eval(7) == Poly(123, y)
assert Poly(2*y, x, y).eval(7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7) == Poly(7*y, y)
assert Poly(123, x, y).eval(x, 7) == Poly(123, y)
assert Poly(2*y, x, y).eval(x, 7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(x, 7) == Poly(7*y, y)
assert Poly(123, x, y).eval(y, 7) == Poly(123, x)
assert Poly(2*y, x, y).eval(y, 7) == Poly(14, x)
assert Poly(x*y, x, y).eval(y, 7) == Poly(7*x, x)
assert Poly(x*y + y, x, y).eval({x: 7}) == Poly(8*y, y)
assert Poly(x*y + y, x, y).eval({y: 7}) == Poly(7*x + 7, x)
assert Poly(x*y + y, x, y).eval({x: 6, y: 7}) == 49
assert Poly(x*y + y, x, y).eval({x: 7, y: 6}) == 48
assert Poly(x*y + y, x, y).eval((6, 7)) == 49
assert Poly(x*y + y, x, y).eval([6, 7]) == 49
Poly(x + 1, domain='ZZ').eval(S(1)/2) == S(3)/2
Poly(x + 1, domain='ZZ').eval(sqrt(2)) == sqrt(2) + 1
raises(ValueError, "Poly(x*y + y, x, y).eval((6, 7, 8))")
raises(DomainError, "Poly(x+1, domain='ZZ').eval(S(1)/2, auto=False)")
def test_Poly___call__():
f = Poly(2*x*y + 3*x + y + 2*z)
assert f(2) == Poly(5*y + 2*z + 6)
assert f(2, 5) == Poly(2*z + 31)
assert f(2, 5, 7) == 45
def test_parallel_poly_from_expr():
assert parallel_poly_from_expr([x-1, x**2-1], x)[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([Poly(x-1, x), x**2-1], x)[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([x-1, Poly(x**2-1, x)], x)[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([Poly(x-1, x), Poly(x**2-1, x)], x)[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([x-1, x**2-1], x, y)[0] == [Poly(x-1, x, y), Poly(x**2-1, x, y)]
assert parallel_poly_from_expr([Poly(x-1, x), x**2-1], x, y)[0] == [Poly(x-1, x, y), Poly(x**2-1, x, y)]
assert parallel_poly_from_expr([x-1, Poly(x**2-1, x)], x, y)[0] == [Poly(x-1, x, y), Poly(x**2-1, x, y)]
assert parallel_poly_from_expr([Poly(x-1, x), Poly(x**2-1, x)], x, y)[0] == [Poly(x-1, x, y), Poly(x**2-1, x, y)]
assert parallel_poly_from_expr([x-1, x**2-1])[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([Poly(x-1, x), x**2-1])[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([x-1, Poly(x**2-1, x)])[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([Poly(x-1, x), Poly(x**2-1, x)])[0] == [Poly(x-1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([1, x**2-1])[0] == [Poly(1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([1, x**2-1])[0] == [Poly(1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([1, Poly(x**2-1, x)])[0] == [Poly(1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([1, Poly(x**2-1, x)])[0] == [Poly(1, x), Poly(x**2-1, x)]
assert parallel_poly_from_expr([x**2-1, 1])[0] == [Poly(x**2-1, x), Poly(1, x)]
assert parallel_poly_from_expr([x**2-1, 1])[0] == [Poly(x**2-1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x**2-1, x), 1])[0] == [Poly(x**2-1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x**2-1, x), 1])[0] == [Poly(x**2-1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x, x, y), Poly(y, x, y)], x, y, order='lex')[0] == \
[Poly(x, x, y, domain='ZZ'), Poly(y, x, y, domain='ZZ')]
raises(PolificationFailed, "parallel_poly_from_expr([0, 1])")
def test_pdiv():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.pdiv(G) == (Q, R)
assert F.prem(G) == R
assert F.pquo(G) == Q
assert F.pexquo(G) == Q
assert pdiv(f, g) == (q, r)
assert prem(f, g) == r
assert pquo(f, g) == q
assert pexquo(f, g) == q
assert pdiv(f, g, x, y) == (q, r)
assert prem(f, g, x, y) == r
assert pquo(f, g, x, y) == q
assert pexquo(f, g, x, y) == q
assert pdiv(f, g, (x,y)) == (q, r)
assert prem(f, g, (x,y)) == r
assert pquo(f, g, (x,y)) == q
assert pexquo(f, g, (x,y)) == q
assert pdiv(F, G) == (Q, R)
assert prem(F, G) == R
assert pquo(F, G) == Q
assert pexquo(F, G) == Q
assert pdiv(f, g, polys=True) == (Q, R)
assert prem(f, g, polys=True) == R
assert pquo(f, g, polys=True) == Q
assert pexquo(f, g, polys=True) == Q
assert pdiv(F, G, polys=False) == (q, r)
assert prem(F, G, polys=False) == r
assert pquo(F, G, polys=False) == q
assert pexquo(F, G, polys=False) == q
raises(ComputationFailed, "pdiv(4, 2)")
raises(ComputationFailed, "prem(4, 2)")
raises(ComputationFailed, "pquo(4, 2)")
raises(ComputationFailed, "pexquo(4, 2)")
def test_div():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.div(G) == (Q, R)
assert F.rem(G) == R
assert F.quo(G) == Q
assert F.exquo(G) == Q
assert div(f, g) == (q, r)
assert rem(f, g) == r
assert quo(f, g) == q
assert exquo(f, g) == q
assert div(f, g, x, y) == (q, r)
assert rem(f, g, x, y) == r
assert quo(f, g, x, y) == q
assert exquo(f, g, x, y) == q
assert div(f, g, (x,y)) == (q, r)
assert rem(f, g, (x,y)) == r
assert quo(f, g, (x,y)) == q
assert exquo(f, g, (x,y)) == q
assert div(F, G) == (Q, R)
assert rem(F, G) == R
assert quo(F, G) == Q
assert exquo(F, G) == Q
assert div(f, g, polys=True) == (Q, R)
assert rem(f, g, polys=True) == R
assert quo(f, g, polys=True) == Q
assert exquo(f, g, polys=True) == Q
assert div(F, G, polys=False) == (q, r)
assert rem(F, G, polys=False) == r
assert quo(F, G, polys=False) == q
assert exquo(F, G, polys=False) == q
raises(ComputationFailed, "div(4, 2)")
raises(ComputationFailed, "rem(4, 2)")
raises(ComputationFailed, "quo(4, 2)")
raises(ComputationFailed, "exquo(4, 2)")
f, g = x**2 + 1, 2*x - 4
qz, rz = 0, x**2 + 1
qq, rq = x/2 + 1, 5
assert div(f, g) == (qq, rq)
assert div(f, g, auto=True) == (qq, rq)
assert div(f, g, auto=False) == (qz, rz)
assert div(f, g, domain=ZZ) == (qz, rz)
assert div(f, g, domain=QQ) == (qq, rq)
assert div(f, g, domain=ZZ, auto=True) == (qq, rq)
assert div(f, g, domain=ZZ, auto=False) == (qz, rz)
assert div(f, g, domain=QQ, auto=True) == (qq, rq)
assert div(f, g, domain=QQ, auto=False) == (qq, rq)
assert rem(f, g) == rq
assert rem(f, g, auto=True) == rq
assert rem(f, g, auto=False) == rz
assert rem(f, g, domain=ZZ) == rz
assert rem(f, g, domain=QQ) == rq
assert rem(f, g, domain=ZZ, auto=True) == rq
assert rem(f, g, domain=ZZ, auto=False) == rz
assert rem(f, g, domain=QQ, auto=True) == rq
assert rem(f, g, domain=QQ, auto=False) == rq
assert quo(f, g) == qq
assert quo(f, g, auto=True) == qq
assert quo(f, g, auto=False) == qz
assert quo(f, g, domain=ZZ) == qz
assert quo(f, g, domain=QQ) == qq
assert quo(f, g, domain=ZZ, auto=True) == qq
assert quo(f, g, domain=ZZ, auto=False) == qz
assert quo(f, g, domain=QQ, auto=True) == qq
assert quo(f, g, domain=QQ, auto=False) == qq
f, g, q = x**2, 2*x, x/2
assert exquo(f, g) == q
assert exquo(f, g, auto=True) == q
raises(ExactQuotientFailed, "exquo(f, g, auto=False)")
raises(ExactQuotientFailed, "exquo(f, g, domain=ZZ)")
assert exquo(f, g, domain=QQ) == q
assert exquo(f, g, domain=ZZ, auto=True) == q
raises(ExactQuotientFailed, "exquo(f, g, domain=ZZ, auto=False)")
assert exquo(f, g, domain=QQ, auto=True) == q
assert exquo(f, g, domain=QQ, auto=False) == q
f, g = Poly(x**2), Poly(x)
q, r = f.div(g)
assert q.get_domain().is_ZZ and r.get_domain().is_ZZ
r = f.rem(g)
assert r.get_domain().is_ZZ
q = f.quo(g)
assert q.get_domain().is_ZZ
q = f.exquo(g)
assert q.get_domain().is_ZZ
def test_gcdex():
f, g = 2*x, x**2 - 16
s, t, h = x/32, -Rational(1,16), 1
F, G, S, T, H = [ Poly(u, x, domain='QQ') for u in (f, g, s, t, h) ]
assert F.half_gcdex(G) == (S, H)
assert F.gcdex(G) == (S, T, H)
assert F.invert(G) == S
assert half_gcdex(f, g) == (s, h)
assert gcdex(f, g) == (s, t, h)
assert invert(f, g) == s
assert half_gcdex(f, g, x) == (s, h)
assert gcdex(f, g, x) == (s, t, h)
assert invert(f, g, x) == s
assert half_gcdex(f, g, (x,)) == (s, h)
assert gcdex(f, g, (x,)) == (s, t, h)
assert invert(f, g, (x,)) == s
assert half_gcdex(F, G) == (S, H)
assert gcdex(F, G) == (S, T, H)
assert invert(F, G) == S
assert half_gcdex(f, g, polys=True) == (S, H)
assert gcdex(f, g, polys=True) == (S, T, H)
assert invert(f, g, polys=True) == S
assert half_gcdex(F, G, polys=False) == (s, h)
assert gcdex(F, G, polys=False) == (s, t, h)
assert invert(F, G, polys=False) == s
assert half_gcdex(100, 2004) == (-20, 4)
assert gcdex(100, 2004) == (-20, 1, 4)
assert invert(3, 7) == 5
raises(DomainError, "half_gcdex(x + 1, 2*x + 1, auto=False)")
raises(DomainError, "gcdex(x + 1, 2*x + 1, auto=False)")
raises(DomainError, "invert(x + 1, 2*x + 1, auto=False)")
def test_revert():
f = Poly(1 - x**2/2 + x**4/24 - x**6/720)
g = Poly(61*x**6/720 + 5*x**4/24 + x**2/2 + 1)
assert f.revert(8) == g
def test_subresultants():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 2*x - 2
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.subresultants(G) == [F, G, H]
assert subresultants(f, g) == [f, g, h]
assert subresultants(f, g, x) == [f, g, h]
assert subresultants(f, g, (x,)) == [f, g, h]
assert subresultants(F, G) == [F, G, H]
assert subresultants(f, g, polys=True) == [F, G, H]
assert subresultants(F, G, polys=False) == [f, g, h]
raises(ComputationFailed, "subresultants(4, 2)")
def test_resultant():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 0
F, G = Poly(f), Poly(g)
assert F.resultant(G) == h
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == h
assert resultant(f, g, polys=True) == h
assert resultant(F, G, polys=False) == h
f, g, h = x - a, x - b, a - b
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.resultant(G) == H
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == H
assert resultant(f, g, polys=True) == H
assert resultant(F, G, polys=False) == h
raises(ComputationFailed, "resultant(4, 2)")
def test_discriminant():
f, g = x**3 + 3*x**2 + 9*x - 13, -11664
F = Poly(f)
assert F.discriminant() == g
assert discriminant(f) == g
assert discriminant(f, x) == g
assert discriminant(f, (x,)) == g
assert discriminant(F) == g
assert discriminant(f, polys=True) == g
assert discriminant(F, polys=False) == g
f, g = a*x**2 + b*x + c, b**2 - 4*a*c
F, G = Poly(f), Poly(g)
assert F.discriminant() == G
assert discriminant(f) == g
assert discriminant(f, x, a, b, c) == g
assert discriminant(f, (x, a, b, c)) == g
assert discriminant(F) == G
assert discriminant(f, polys=True) == G
assert discriminant(F, polys=False) == g
raises(ComputationFailed, "discriminant(4)")
def test_gcd_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert gcd_list(F) == x - 1
assert gcd_list(F, polys=True) == Poly(x - 1)
assert gcd_list([]) == 0
assert gcd_list([1, 2]) == 1
assert gcd_list([4, 6, 8]) == 2
gcd = gcd_list([], x)
assert gcd.is_Number and gcd is S.Zero
gcd = gcd_list([], x, polys=True)
assert gcd.is_Poly and gcd.is_zero
raises(ComputationFailed, "gcd_list([], polys=True)")
def test_lcm_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert lcm_list(F) == x**5 - x**4 - 2*x**3 - x**2 + x + 2
assert lcm_list(F, polys=True) == Poly(x**5 - x**4 - 2*x**3 - x**2 + x + 2)
assert lcm_list([]) == 1
assert lcm_list([1, 2]) == 2
assert lcm_list([4, 6, 8]) == 24
lcm = lcm_list([], x)
assert lcm.is_Number and lcm is S.One
lcm = lcm_list([], x, polys=True)
assert lcm.is_Poly and lcm.is_one
raises(ComputationFailed, "lcm_list([], polys=True)")
def test_gcd():
f, g = x**3 - 1, x**2 - 1
s, t = x**2 + x + 1, x + 1
h, r = x - 1, x**4 + x**3 - x - 1
F, G, S, T, H, R = [ Poly(u) for u in (f, g, s, t, h, r) ]
assert F.cofactors(G) == (H, S, T)
assert F.gcd(G) == H
assert F.lcm(G) == R
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == r
assert cofactors(f, g, x) == (h, s, t)
assert gcd(f, g, x) == h
assert lcm(f, g, x) == r
assert cofactors(f, g, (x,)) == (h, s, t)
assert gcd(f, g, (x,)) == h
assert lcm(f, g, (x,)) == r
assert cofactors(F, G) == (H, S, T)
assert gcd(F, G) == H
assert lcm(F, G) == R
assert cofactors(f, g, polys=True) == (H, S, T)
assert gcd(f, g, polys=True) == H
assert lcm(f, g, polys=True) == R
assert cofactors(F, G, polys=False) == (h, s, t)
assert gcd(F, G, polys=False) == h
assert lcm(F, G, polys=False) == r
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
assert cofactors(8, 6) == (2, 4, 3)
assert gcd(8, 6) == 2
assert lcm(8, 6) == 24
f, g = x**2 - 3*x - 4, x**3 - 4*x**2 + x - 4
l = x**4 - 3*x**3 - 3*x**2 - 3*x - 4
h, s, t = x - 4, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11) == (h, s, t)
assert gcd(f, g, modulus=11) == h
assert lcm(f, g, modulus=11) == l
f, g = x**2 + 8*x + 7, x**3 + 7*x**2 + x + 7
l = x**4 + 8*x**3 + 8*x**2 + 8*x + 7
h, s, t = x + 7, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11, symmetric=False) == (h, s, t)
assert gcd(f, g, modulus=11, symmetric=False) == h
assert lcm(f, g, modulus=11, symmetric=False) == l
def test_terms_gcd():
assert terms_gcd(1) == 1
assert terms_gcd(1, x) == 1
assert terms_gcd(x - 1) == x - 1
assert terms_gcd(-x - 1) == -x - 1
assert terms_gcd(2*x + 3) == 2*x + 3
assert terms_gcd(6*x + 4) == Mul(2, 3*x + 2, evaluate=False)
assert terms_gcd(x**3*y + x*y**3) == x*y*(x**2 + y**2)
assert terms_gcd(2*x**3*y + 2*x*y**3) == 2*x*y*(x**2 + y**2)
assert terms_gcd(x**3*y/2 + x*y**3/2) == x*y/2*(x**2 + y**2)
assert terms_gcd(x**3*y + 2*x*y**3) == x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y + 4*x*y**3) == 2*x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y/3 + 4*x*y**3/5) == 2*x*y/15*(5*x**2 + 6*y**2)
assert terms_gcd(2.0*x**3*y + 4.1*x*y**3) == x*y*(2.0*x**2 + 4.1*y**2)
assert terms_gcd((3+3*x)*(x+x*y), expand=False) == \
(3*x + 3)*(x*y + x)
assert terms_gcd((3 + 3*x)*(x + x*sin(3 + 3*y)), expand=False, deep=True) == \
3*x*(x + 1)*(sin(Mul(3, y + 1, evaluate=False)) + 1)
assert terms_gcd(sin(x + x*y), deep=True) == \
sin(x*(y + 1))
def test_trunc():
f, g = x**5 + 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6, x**5 - x**4 + x**2 - x
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f, g = 6*x**5 + 5*x**4 + 4*x**3 + 3*x**2 + 2*x + 1, -x**4 + x**3 - x + 1
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f = Poly(x**2 + 2*x + 3, modulus=5)
assert f.trunc(2) == Poly(x**2 + 1, modulus=5)
def test_monic():
f, g = 2*x - 1, x - S(1)/2
F, G = Poly(f, domain='QQ'), Poly(g)
assert F.monic() == G
assert monic(f) == g
assert monic(f, x) == g
assert monic(f, (x,)) == g
assert monic(F) == G
assert monic(f, polys=True) == G
assert monic(F, polys=False) == g
raises(ComputationFailed, "monic(4)")
assert monic(2*x**2 + 6*x + 4, auto=False) == x**2 + 3*x + 2
raises(ExactQuotientFailed, "monic(2*x + 6*x + 1, auto=False)")
assert monic(2.0*x**2 + 6.0*x + 4.0) == 1.0*x**2 + 3.0*x + 2.0
assert monic(2*x**2 + 3*x + 4, modulus=5) == x**2 - x + 2
def test_content():
f, F = 4*x + 2, Poly(4*x + 2)
assert F.content() == 2
assert content(f) == 2
raises(ComputationFailed, "content(4)")
f = Poly(2*x, modulus=3)
assert f.content() == 1
def test_primitive():
f, g = 4*x + 2, 2*x + 1
F, G = Poly(f), Poly(g)
assert F.primitive() == (2, G)
assert primitive(f) == (2, g)
assert primitive(f, x) == (2, g)
assert primitive(f, (x,)) == (2, g)
assert primitive(F) == (2, G)
assert primitive(f, polys=True) == (2, G)
assert primitive(F, polys=False) == (2, g)
raises(ComputationFailed, "primitive(4)")
f = Poly(2*x, modulus=3)
g = Poly(2.0*x, domain=RR)
assert f.primitive() == (1, f)
assert g.primitive() == (1.0, g)
assert primitive(S('-3*x/4 + y + 11/8')) == \
S('(1/8, -6*x + 8*y + 11)')
def test_compose():
f = x**12+20*x**10+150*x**8+500*x**6+625*x**4-2*x**3-10*x+9
g = x**4 - 2*x + 9
h = x**3 + 5*x
F, G, H = map(Poly, (f, g, h))
assert G.compose(H) == F
assert compose(g, h) == f
assert compose(g, h, x) == f
assert compose(g, h, (x,)) == f
assert compose(G, H) == F
assert compose(g, h, polys=True) == F
assert compose(G, H, polys=False) == f
assert F.decompose() == [G, H]
assert decompose(f) == [g, h]
assert decompose(f, x) == [g, h]
assert decompose(f, (x,)) == [g, h]
assert decompose(F) == [G, H]
assert decompose(f, polys=True) == [G, H]
assert decompose(F, polys=False) == [g, h]
raises(ComputationFailed, "compose(4, 2)")
raises(ComputationFailed, "decompose(4)")
assert compose(x**2 - y**2, x - y, x, y) == x**2 - 2*x*y
assert compose(x**2 - y**2, x - y, y, x) == -y**2 + 2*x*y
def test_shift():
assert Poly(x**2 - 2*x + 1, x).shift(2) == Poly(x**2 + 2*x + 1, x)
def test_sturm():
f, F = x, Poly(x, domain='QQ')
g, G = 1, Poly(1, x, domain='QQ')
assert F.sturm() == [F, G]
assert sturm(f) == [f, g]
assert sturm(f, x) == [f, g]
assert sturm(f, (x,)) == [f, g]
assert sturm(F) == [F, G]
assert sturm(f, polys=True) == [F, G]
assert sturm(F, polys=False) == [f, g]
raises(ComputationFailed, "sturm(4)")
raises(DomainError, "sturm(f, auto=False)")
f = Poly(S(1024)/(15625*pi**8)*x**5 \
- S(4096)/(625*pi**8)*x**4 \
+ S(32)/(15625*pi**4)*x**3 \
- S(128)/(625*pi**4)*x**2 \
+ S(1)/62500*x \
- S(1)/625, x, domain='ZZ(pi)')
assert sturm(f) == \
[Poly(x**3 - 100*x**2 + pi**4/64*x - 25*pi**4/16, x, domain='ZZ(pi)'),
Poly(3*x**2 - 200*x + pi**4/64, x, domain='ZZ(pi)'),
Poly((S(20000)/9 - pi**4/96)*x + 25*pi**4/18, x, domain='ZZ(pi)'),
Poly((-3686400000000*pi**4 - 11520000*pi**8 - 9*pi**12)/(26214400000000 - 245760000*pi**4 + 576*pi**8), x, domain='ZZ(pi)')]
def test_gff():
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert Poly(f).gff_list() == [(Poly(x), 1), (Poly(x + 2), 4)]
assert gff_list(f) == [(x, 1), (x + 2, 4)]
raises(NotImplementedError, "gff(f)")
f = x*(x - 1)**3*(x - 2)**2*(x - 4)**2*(x - 5)
assert Poly(f).gff_list() == [(Poly(x**2 - 5*x + 4), 1), (Poly(x**2 - 5*x + 4), 2), (Poly(x), 3)]
assert gff_list(f) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(NotImplementedError, "gff(f)")
def test_sqf_norm():
assert sqf_norm(x**2-2, extension=sqrt(3)) == \
(1, x**2 - 2*sqrt(3)*x + 1, x**4 - 10*x**2 + 1)
assert sqf_norm(x**2-3, extension=sqrt(2)) == \
(1, x**2 - 2*sqrt(2)*x - 1, x**4 - 10*x**2 + 1)
assert Poly(x**2-2, extension=sqrt(3)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(3)*x + 1, x, extension=sqrt(3)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2-3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert F.sqf_list_include() == [(G, 1), (H, 2)]
raises(ComputationFailed, "sqf_part(4)")
assert sqf(1) == 1
assert sqf_list(1) == (1, [])
assert sqf((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d) == (g*h**2)/d
assert sqf(f/d, x) == (g*h**2)/d
assert sqf(f/d, (x,)) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) == x - 1
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
assert sqf(Poly(x**2 - 2*x + 1)) == (x - 1)**2
f = 3 + x - x*(1 + x) + x**2
assert sqf(f) == 3
f = (x**2 + 2*x + 1)**20000000000
assert sqf(f) == (x + 1)**40000000000
assert sqf_list(f) == (1, [(x + 1, 40000000000)])
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert F.factor_list_include() == [(U, 1), (V, 2), (W, 1)]
assert factor_list(1) == (1, [])
assert factor_list(6) == (6, [])
assert factor_list(sqrt(3), x) == (1, [(3, S.Half)])
assert factor_list((-1)**x, x) == (1, [(-1, x)])
assert factor_list((2*x)**y, x) == (1, [(2, y), (x, y)])
assert factor_list(sqrt(x*y),x) == (1, [(x*y, S.Half)])
assert factor(1) == 1
assert factor(6) == 6
assert factor_list(3*x) == (3, [(x, 1)])
assert factor_list(3*x**2) == (3, [(x, 2)])
assert factor(3*x) == 3*x
assert factor(3*x**2) == 3*x**2
assert factor((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q, r = x**2 - y**2, x - y, x + y, x**2 + 1
assert factor(f/g) == (u*v**2*w)/(p*q)
assert factor(f/g, x) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,)) == (u*v**2*w)/(p*q)
p = Symbol('p', positive=True)
i = Symbol('i', integer=True)
r = Symbol('r', real=True)
assert factor(sqrt(x*y)).is_Pow == True
assert factor(sqrt(3*x**2 - 3)) == sqrt(3)*sqrt((x - 1)*(x + 1))
assert factor(sqrt(3*x**2 + 3)) == sqrt(3)*sqrt(x**2 + 1)
assert factor((y*x**2 - y)**i) == y**i*(x - 1)**i*(x + 1)**i
assert factor((y*x**2 + y)**i) == y**i*(x**2 + 1)**i
assert factor((y*x**2 - y)**t) == (y*(x - 1)*(x + 1))**t
assert factor((y*x**2 + y)**t) == (y*(x**2 + 1))**t
f = sqrt(expand((r**2 + 1)*(p + 1)*(p - 1)*(p - 2)**3))
g = sqrt((p - 2)**3*(p - 1))*sqrt(p + 1)*sqrt(r**2 + 1)
assert factor(f) == g
assert factor(g) == g
f = sqrt(expand((x - 1)**5*(r**2 + 1)))
g = sqrt(r**2 + 1)*(x - 1)**(S(5)/2)
assert factor(f) == g
assert factor(g) == g
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 + x**3 + 65536*x** 2 + 1)
f = x/pi + x*sin(x)/pi
g = y/(pi**2 + 2*pi + 1) + y*sin(x)/(pi**2 + 2*pi + 1)
assert factor(f) == x*(sin(x) + 1)/pi
assert factor(g) == y*(sin(x) + 1)/(pi + 1)**2
assert factor(Eq(x**2 + 2*x + 1, x**3 + 1)) == Eq((x + 1)**2, (x + 1)*(x**2 - x + 1))
f = (x**2 - 1)/(x**2 + 4*x + 4)
assert factor(f) == (x + 1)*(x - 1)/(x + 2)**2
assert factor(f, x) == (x + 1)*(x - 1)/(x + 2)**2
f = 3 + x - x*(1 + x) + x**2
assert factor(f) == 3
assert factor(f, x) == 3
assert factor(1/(x**2 + 2*x + 1/x) - 1) == -((1 - x + 2*x**2 + x**3)/(1 + 2*x**2 + x**3))
assert factor(f, expand=False) == f
raises(PolynomialError, "factor(f, x, expand=False)")
raises(FlagError, "factor(x**2 - 1, polys=True)")
assert factor([x, Eq(x**2 - y**2, Tuple(x**2 - z**2, 1/x + 1/y))]) == \
[x, Eq((x - y)*(x + y), Tuple((x - z)*(x + z), (x + y)/x/y))]
assert not isinstance(Poly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) == True
assert isinstance(PurePoly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) == True
assert factor(sqrt(-x)) == sqrt(-x)
# issue 2818
e = (-2*x*(-x + 1)*(x - 1)*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)*(x**2*(x -
1) - x*(x - 1) - x) - (-2*x**2*(x - 1)**2 - x*(-x + 1)*(-x*(-x + 1) +
x*(x - 1)))*(x**2*(x - 1)**4 - x*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)))
assert factor(e) == 0
def test_factor_large():
f = (x**2 + 4*x + 4)**10000000*(x**2 + 1)*(x**2 + 2*x + 1)**1234567
g = ((x**2 + 2*x + 1)**3000*y**2 + (x**2 + 2*x + 1)**3000*2*y + (x**2 + 2*x + 1)**3000)
assert factor(f) == (x + 2)**20000000*(x**2 + 1)*(x + 1)**2469134
assert factor(g) == (x + 1)**6000*(y + 1)**2
assert factor_list(f) == (1, [(x + 1, 2469134), (x + 2, 20000000), (x**2 + 1, 1)])
assert factor_list(g) == (1, [(y + 1, 2), (x + 1, 6000)])
f = (x**2 - y**2)**200000*(x**7 + 1)
g = (x**2 + y**2)**200000*(x**7 + 1)
assert factor(f) == \
(x + 1)*(x - y)**200000*(x + y)**200000*(x**6 - x**5 + x**4 - x**3 + x**2 - x + 1)
assert factor(g, gaussian=True) == \
(x + 1)*(x - I*y)**200000*(x + I*y)**200000*(x**6 - x**5 + x**4 - x**3 + x**2 - x + 1)
assert factor_list(f) == \
(1, [(x + 1, 1), (x - y, 200000), (x + y, 200000), (x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
assert factor_list(g, gaussian=True) == \
(1, [(x + 1, 1), (x - I*y, 200000), (x + I*y, 200000), (x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
@XFAIL
def test_factor_noeval():
assert factor(6*x - 10) == 2*(3*x - 5)
assert factor((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
def test_intervals():
assert intervals(0) == []
assert intervals(1) == []
assert intervals(x, sqf=True) == [(0, 0)]
assert intervals(x) == [((0, 0), 1)]
assert intervals(x**128) == [((0, 0), 128)]
assert intervals([x**2, x**4]) == [((0, 0), {0: 2, 1: 4})]
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals() == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(fast=True, sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(fast=True) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2-3)**7*(x+1)*(7*x+3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
assert intervals([x**5 - 200, x**5 - 201]) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(283)/98, S(26)/9), {1: 1})]
assert intervals([x**5 - 200, x**5 - 201], fast=True) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(283)/98, S(26)/9), {1: 1})]
assert intervals([x**2 - 200, x**2 - 201]) == \
[((-S(71)/5, -S(85)/6), {1: 1}), ((-S(85)/6, -14), {0: 1}), ((14, S(85)/6), {0: 1}), ((S(85)/6, S(71)/5), {1: 1})]
assert intervals([x+1, x+2, x-1, x+1, 1, x-1, x-1, (x-2)**2]) == \
[((-2, -2), {1: 1}), ((-1, -1), {0: 1, 3: 1}), ((1, 1), {2: 1, 5: 1, 6: 1}), ((2, 2), {7: 2})]
f, g, h = x**2 - 2, x**4 - 4*x**2 + 4, x - 1
assert intervals(f, inf=S(7)/4, sqf=True) == []
assert intervals(f, inf=S(7)/5, sqf=True) == [(S(7)/5, S(3)/2)]
assert intervals(f, sup=S(7)/4, sqf=True) == [(-2, -1), (1, S(3)/2)]
assert intervals(f, sup=S(7)/5, sqf=True) == [(-2, -1)]
assert intervals(g, inf=S(7)/4) == []
assert intervals(g, inf=S(7)/5) == [((S(7)/5, S(3)/2), 2)]
assert intervals(g, sup=S(7)/4) == [((-2, -1), 2), ((1, S(3)/2), 2)]
assert intervals(g, sup=S(7)/5) == [((-2, -1), 2)]
assert intervals([g, h], inf=S(7)/4) == []
assert intervals([g, h], inf=S(7)/5) == [((S(7)/5, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(7)/4) == [((-2, -1), {0: 2}), ((1, 1), {1: 1}), ((1, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(7)/5) == [((-2, -1), {0: 2}), ((1, 1), {1: 1})]
assert intervals([x+2, x**2 - 2]) == \
[((-2, -2), {0: 1}), ((-2, -1), {1: 1}), ((1, 2), {1: 1})]
assert intervals([x+2, x**2 - 2], strict=True) == \
[((-2, -2), {0: 1}), ((-S(3)/2, -1), {1: 1}), ((1, 2), {1: 1})]
f = 7*z**4 - 19*z**3 + 20*z**2 + 17*z + 20
assert intervals(f) == []
real_part, complex_part = intervals(f, all=True, sqf=True)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
assert complex_part == [(-S(40)/7 - 40*I/7, 0), (-S(40)/7, 40*I/7),
(-40*I/7, S(40)/7), (0, S(40)/7 + 40*I/7)]
real_part, complex_part = intervals(f, all=True, sqf=True, eps=S(1)/10)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
raises(ValueError, "intervals(x**2 - 2, eps=10**-100000)")
raises(ValueError, "Poly(x**2 - 2).intervals(eps=10**-100000)")
raises(ValueError, "intervals([x**2 - 2, x**2 - 3], eps=10**-100000)")
def test_refine_root():
f = Poly(x**2 - 2)
assert f.refine_root(1, 2, steps=0) == (1, 2)
assert f.refine_root(-2, -1, steps=0) == (-2, -1)
assert f.refine_root(1, 2, steps=None) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=None) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1, fast=True) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert f.refine_root(1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert f.refine_root(1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, "(f**2).refine_root(1, 2, check_sqf=True)")
raises(RefinementFailed, "(f**2).refine_root(1, 2)")
raises(RefinementFailed, "(f**2).refine_root(2, 3)")
f = x**2 - 2
assert refine_root(f, 1, 2, steps=1) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, steps=1, fast=True) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert refine_root(f, 1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, "refine_root(1, 7, 8, eps=S(1)/100)")
raises(ValueError, "Poly(f).refine_root(1, 2, eps=10**-100000)")
raises(ValueError, "refine_root(f, 1, 2, eps=10**-100000)")
def test_count_roots():
assert count_roots(x**2 - 2) == 2
assert count_roots(x**2 - 2, inf=-oo) == 2
assert count_roots(x**2 - 2, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-oo, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-2) == 2
assert count_roots(x**2 - 2, inf=-1) == 1
assert count_roots(x**2 - 2, sup=1) == 1
assert count_roots(x**2 - 2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 + 2) == 0
assert count_roots(x**2 + 2, inf=-2*I) == 2
assert count_roots(x**2 + 2, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=-2*I, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=0) == 0
assert count_roots(x**2 + 2, sup=0) == 0
assert count_roots(x**2 + 2, inf=-I) == 1
assert count_roots(x**2 + 2, sup=+I) == 1
assert count_roots(x**2 + 2, inf=+I/2, sup=+I) == 0
assert count_roots(x**2 + 2, inf=-I, sup=-I/2) == 0
raises(PolynomialError, "count_roots(1)")
def test_Poly_root():
f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
assert f.root(0) == -S(1)/2
assert f.root(1) == 2
assert f.root(2) == 2
raises(IndexError, "f.root(3)")
assert Poly(x**5 + x + 1).root(0) == RootOf(x**3 - x**2 + 1, 0)
def test_real_roots():
assert real_roots(x) == [0]
assert real_roots(x, multiple=False) == [(0, 1)]
assert real_roots(x**3) == [0, 0, 0]
assert real_roots(x**3, multiple=False) == [(0, 3)]
assert real_roots(x*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0]
assert real_roots(x*(x**3 + x + 3), multiple=False) == [(RootOf(x**3 + x + 3, 0), 1), (0, 1)]
assert real_roots(x**3*(x**3 + x + 3)) == [RootOf(x**3 + x + 3, 0), 0, 0, 0]
assert real_roots(x**3*(x**3 + x + 3), multiple=False) == [(RootOf(x**3 + x + 3, 0), 1), (0, 3)]
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).real_roots() == [-S(1)/2, 2, 2]
assert Poly(g).real_roots() == [RootOf(g, 0)]
def test_all_roots():
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).all_roots() == [-S(1)/2, 2, 2]
assert Poly(g).all_roots() == [RootOf(g, 0), RootOf(g, 1), RootOf(g, 2)]
def test_nroots():
assert Poly(0, x).nroots() == []
assert Poly(1, x).nroots() == []
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-1.0*I, 1.0*I]
roots, error = Poly(x**2 - 1, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
roots, error = Poly(x**2 + 1, x).nroots(error=True)
assert roots == [-1.0*I, 1.0*I] and error < 1e25;
roots, error = Poly(x**2/3 - S(1)/3, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
roots, error = Poly(x**2/3 + S(1)/3, x).nroots(error=True)
assert roots == [-1.0*I, 1.0*I] and error < 1e25;
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(x**2 + 2*I, x, extension=I).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
roots = nroots(x**5 + x + 1, n=5)
eps = Float("1e-5")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is True
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is True
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is True
assert re(roots[3]).epsilon_eq(+0.87743, eps) is True
assert im(roots[3]).epsilon_eq(-0.74486, eps) is True
assert re(roots[4]).epsilon_eq(+0.87743, eps) is True
assert im(roots[4]).epsilon_eq(+0.74486, eps) is True
eps = Float("1e-6")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is False
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is False
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is False
assert re(roots[3]).epsilon_eq(+0.87743, eps) is False
assert im(roots[3]).epsilon_eq(-0.74486, eps) is False
assert re(roots[4]).epsilon_eq(+0.87743, eps) is False
assert im(roots[4]).epsilon_eq(+0.74486, eps) is False
raises(DomainError, "Poly(x + y, x).nroots()")
raises(MultivariatePolynomialError, "Poly(x + y).nroots()")
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots, error = nroots(x**2 - 1, error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
assert nroots(x + I) == [-1.0*I]
assert nroots(x + 2*I) == [-2.0*I]
raises(PolynomialError, "nroots(0)")
def test_ground_roots():
f = x**6 - 4*x**4 + 4*x**3 - x**2
assert Poly(f).ground_roots() == {S(1): 2, S(0): 2}
assert ground_roots(f) == {S(1): 2, S(0): 2}
def test_nth_power_roots_poly():
f = x**4 - x**2 + 1
f_2 = (x**2 - x + 1)**2
f_3 = (x**2 + 1)**2
f_4 = (x**2 + x + 1)**2
f_12 = (x - 1)**4
assert nth_power_roots_poly(f, 1) == f
raises(ValueError, "nth_power_roots_poly(f, 0)")
raises(ValueError, "nth_power_roots_poly(f, x)")
assert factor(nth_power_roots_poly(f, 2)) == f_2
assert factor(nth_power_roots_poly(f, 3)) == f_3
assert factor(nth_power_roots_poly(f, 4)) == f_4
assert factor(nth_power_roots_poly(f, 12)) == f_12
raises(MultivariatePolynomialError, "nth_power_roots_poly(x + y, 2, x, y)")
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2-4, 2*x-2, 2*x+2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2-y)/(x-y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2-y**2)/(x-y), x) == x + y
assert cancel((x**2-y**2)/(x-y), y) == x + y
assert cancel((x**2-y**2)/(x-y)) == x + y
assert cancel((x**3-1)/(x**2-1)) == (x**2+x+1)/(x+1)
assert cancel((x**3/2-S(1)/2)/(x**2-1)) == (x**2+x+1)/(2*x+2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
f = Poly(-2*x + 3, x)
g = Poly(-x**9 + x**8 + x**6 - x**5 + 2*x**2 - 3*x + 1, x)
assert cancel((f, g)) == (1, -f, -g)
f = Poly(y, y, domain='ZZ(x)')
g = Poly(1, y, domain='ZZ[x]')
assert f.cancel(g) == (1, Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
assert f.cancel(g, include=True) == (Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
f = Poly(5*x*y + x, y, domain='ZZ(x)')
g = Poly(2*x**2*y, y, domain='ZZ(x)')
assert f.cancel(g, include=True) == (Poly(5*y + 1, y, domain='ZZ(x)'), Poly(2*x*y, y, domain='ZZ(x)'))
def test_reduced():
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
H = groebner(G)
assert H.reduce(f) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert _strict_eq(reduced(f, G, polys=True), (Q, r))
assert _strict_eq(reduced(f, G, x, y, polys=True), (Q, r))
H = groebner(G, polys=True)
assert _strict_eq(H.reduce(f), (Q, r))
f = 2*x**3 + y**3 + 3*y
G = groebner([x**2 + y**2 - 1, x*y - 2])
Q = [x**2 - x*y**3/2 + x*y/2 + y**6/4 - y**4/2 + y**2/4, -y**5/4 + y**3/2 + 3*y/4]
r = 0
assert reduced(f, G) == (Q, r)
assert G.reduce(f) == (Q, r)
assert reduced(f, G, auto=False)[1] != 0
assert G.reduce(f, auto=False)[1] != 0
assert G.contains(f) == True
assert G.contains(f + 1) == False
assert reduced(1, [1], x) == ([1], 0)
raises(ComputationFailed, "reduced(1, [1])")
def test_groebner():
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3],
x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3],
x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
assert groebner([x**3 - 1, x**2 - 1]) == [x - 1]
assert groebner([Eq(x**3, 1), Eq(x**2, 1)]) == [x - 1]
F = [3*x**2 + y*z - 5*x - 1, 2*x + 3*x*y + y**2, x - 3*y + x*z - 2*z**2]
f = z**9 - x**2*y**3 - 3*x*y**2*z + 11*y*z**2 + x**2*z**2 - 5
G = groebner(F, x, y, z, modulus=7, symmetric=False)
assert G == [1 + x + y + 3*z + 2*z**2 + 2*z**3 + 6*z**4 + z**5,
1 + 3*y + y**2 + 6*z**2 + 3*z**3 + 3*z**4 + 3*z**5 + 4*z**6,
1 + 4*y + 4*z + y*z + 4*z**3 + z**4 + z**6,
6 + 6*z + z**2 + 4*z**3 + 3*z**4 + 6*z**5 + 3*z**6 + z**7]
Q, r = reduced(f, G, x, y, z, modulus=7, symmetric=False, polys=True)
assert sum([ q*g for q, g in zip(Q, G)]) + r == Poly(f, modulus=7)
F = [x*y - 2*y, 2*y**2 - x**2]
assert groebner(F, x, y, order='grevlex') == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner(F, y, x, order='grevlex') == \
[x**3 - 2*x**2, -x**2 + 2*y**2, x*y - 2*y]
assert groebner(F, order='grevlex', field=True) == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner([1], x) == [1]
raises(DomainError, "groebner([x**2 + 2.0*y], x, y)")
raises(ComputationFailed, "groebner([1])")
assert groebner([x**2 - 1, x**3 + 1], method='buchberger') == [x + 1]
assert groebner([x**2 - 1, x**3 + 1], method='f5b') == [x + 1]
raises(ValueError, "groebner([x, y], method='unknown')")
def test_fglm():
F = [a + b + c + d, a*b + a*d + b*c + b*d, a*b*c + a*b*d + a*c*d + b*c*d, a*b*c*d - 1]
G = groebner(F, a, b, c, d, order='grlex')
assert G.fglm('lex') == [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d,
d**12 - d**8 - d**4 + 1,
]
F = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
-72*t*x**7 - 252*t*x**6 + 192*t*x**5 + 1260*t*x**4 + 312*t*x**3 - 404*t*x**2 - 576*t*x + \
108*t - 72*x**7 - 256*x**6 + 192*x**5 + 1280*x**4 + 312*x**3 - 576*x + 96]
G = groebner(F, t, x, order='grlex')
assert G.fglm('lex') == [
203577793572507451707*t + 627982239411707112*x**7 - 666924143779443762*x**6 - \
10874593056632447619*x**5 + 5119998792707079562*x**4 + 72917161949456066376*x**3 + \
20362663855832380362*x**2 - 142079311455258371571*x + 183756699868981873194,
9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
]
F = [x**2 - x - 3*y + 1, -2*x + y**2 + y - 1]
G = groebner(F, x, y, order='lex')
assert G.fglm('grlex') == [
x**2 - x - 3*y + 1,
y**2 - 2*x + y - 1,
]
def test_is_zero_dimensional():
assert is_zero_dimensional([x, y], x, y) == True
assert is_zero_dimensional([x**3 + y**2], x, y) == False
assert is_zero_dimensional([x, y, z], x, y, z) == True
assert is_zero_dimensional([x, y, z], x, y, z, t) == False
F = [x*y - z, y*z - x, x*y - y]
assert is_zero_dimensional(F, x, y, z) == True
F = [x**2 - 2*x*z + 5, x*y**2 + y*z**3, 3*y**2 - 8*z**2]
assert is_zero_dimensional(F, x, y, z) == True
def test_GroebnerBasis():
F = [x*y - 2*y, 2*y**2 - x**2]
G = groebner(F, x, y, order='grevlex')
H = [y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
P = [ Poly(h, x, y) for h in H ]
assert isinstance(G, GroebnerBasis) == True
assert len(G) == 3
assert G[0] == H[0] and not G[0].is_Poly
assert G[1] == H[1] and not G[1].is_Poly
assert G[2] == H[2] and not G[2].is_Poly
assert G[1:] == H[1:] and not any(g.is_Poly for g in G[1:])
assert G[:2] == H[:2] and not any(g.is_Poly for g in G[1:])
assert G.exprs == H
assert G.polys == P
assert G.gens == (x, y)
assert G.domain == ZZ
assert G.order == grevlex
assert G == H
assert G == tuple(H)
assert G == P
assert G == tuple(P)
assert G != []
G = groebner(F, x, y, order='grevlex', polys=True)
assert G[0] == P[0] and G[0].is_Poly
assert G[1] == P[1] and G[1].is_Poly
assert G[2] == P[2] and G[2].is_Poly
assert G[1:] == P[1:] and all(g.is_Poly for g in G[1:])
assert G[:2] == P[:2] and all(g.is_Poly for g in G[1:])
def test_poly():
assert poly(x) == Poly(x, x)
assert poly(y) == Poly(y, y)
assert poly(x + y) == Poly(x + y, x, y)
assert poly(x + sin(x)) == Poly(x + sin(x), x, sin(x))
assert poly(x + y, wrt=y) == Poly(x + y, y, x)
assert poly(x + sin(x), wrt=sin(x)) == Poly(x + sin(x), sin(x), x)
assert poly(x*y + 2*x*z**2 + 17) == Poly(x*y + 2*x*z**2 + 17, x, y, z)
assert poly(2*(y + z)**2 - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - 1, y, z)
assert poly(x*(y + z)**2 - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - 1, x, y, z)
assert poly(2*x*(y + z)**2 - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - 1, x, y, z)
assert poly(2*(y + z)**2 - x - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - x - 1, x, y, z)
assert poly(x*(y + z)**2 - x - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - x - 1, x, y, z)
assert poly(2*x*(y + z)**2 - x - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - x - 1, x, y, z)
assert poly(x*y + (x + y)**2 + (x + z)**2) == \
Poly(2*x*z + 3*x*y + y**2 + z**2 + 2*x**2, x, y, z)
assert poly(x*y*(x + y)*(x + z)**2) == \
Poly(x**3*y**2 + x*y**2*z**2 + y*x**2*z**2 + 2*z*x**2*y**2 + 2*y*z*x**3 + y*x**4, x, y, z)
assert poly(Poly(x + y + z, y, x, z)) == Poly(x + y + z, y, x, z)
assert poly((x + y)**2, x) == Poly(x**2 + 2*x*y + y**2, x, domain=ZZ[y])
assert poly((x + y)**2, y) == Poly(x**2 + 2*x*y + y**2, y, domain=ZZ[x])
assert poly(1, x) == Poly(1, x)
raises(GeneratorsNeeded, "poly(1)")
# issue 3085
assert poly(x + y, x, y) == Poly(x + y, x, y)
assert poly(x + y, y, x) == Poly(x + y, y, x)
def test_keep_coeff():
u = Mul(2, x + 1, evaluate=False)
assert _keep_coeff(S(1), x) == x
assert _keep_coeff(S(-1), x) == -x
assert _keep_coeff(S(1), 2*x) == 2*x
assert _keep_coeff(S(2), x/2) == x
assert _keep_coeff(S(2), sin(x)) == 2*sin(x)
assert _keep_coeff(S(2), x + 1) == u
assert _keep_coeff(x, 1/x) == 1
assert _keep_coeff(x + 1, S(2)) == u
@XFAIL
def test_poly_matching_consistency():
# Test for this issue:
# http://code.google.com/p/sympy/issues/detail?id=2415
assert I * Poly(x, x) == Poly(I*x, x)
assert Poly(x, x) * I == Poly(I*x, x)
@XFAIL
def test_issue_2687():
assert expand(factor(expand((x - I*y)*(z - I*t)), extension=[I])) == -I*t*x - t*y + x*z - I*y*z
| ichuang/sympy | sympy/polys/tests/test_polytools.py | Python | bsd-3-clause | 96,007 | [
"Gaussian"
] | e31c9556ad9c060626e954abd8f175a8de67fc620d7d93550391ae5964f96474 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module is provides funcions for dict lists and functions processing
"""
import logging
logger = logging.getLogger(__name__)
import collections
import inspect
import copy
import numpy as np
def get_default_args(obj):
if ("__init__" in dir(obj)):
if inspect.isfunction(obj.__init__) or inspect.ismethod(obj.__init__):
argspec = inspect.getargspec(obj.__init__)
else:
argspec = inspect.getargspec(obj)
else:
argspec = inspect.getargspec(obj)
args = argspec.args[1:]
defaults = argspec.defaults
dc = collections.OrderedDict(zip(args, defaults))
return dc
def subdict(dct, keys):
if type(dct) == collections.OrderedDict:
p = collections.OrderedDict()
else:
p = {}
for key, value in dct.items():
if key in keys:
p[key] = value
# p = {key: value for key, value in dct.items() if key in keys}
return p
def list_filter(lst, startswith=None, notstartswith=None,
contain=None, notcontain=None):
""" Keep in list items according to filter parameters.
:param lst: item list
:param startswith: keep items starting with
:param notstartswith: remove items starting with
:return:
"""
keeped = []
for item in lst:
keep = False
if startswith is not None:
if item.startswith(startswith):
keep = True
if notstartswith is not None:
if not item.startswith(notstartswith):
keep = True
if contain is not None:
if contain in item:
keep = True
if notcontain is not None:
if not notcontain in item:
keep = True
if keep:
keeped.append(item)
return keeped
def kick_from_dict(dct, keys):
if type(dct) == collections.OrderedDict:
p = collections.OrderedDict()
else:
p = {}
for key, value in dct.items():
if key not in keys:
p[key] = value
# p = {key: value for key, value in dct.items() if key not in keys}
return p
def split_dict(dct, keys):
"""
Split dict into two subdicts based on keys.
:param dct:
:param keys:
:return: dict_in, dict_out
"""
if type(dct) == collections.OrderedDict:
dict_in = collections.OrderedDict()
dict_out = collections.OrderedDict()
else:
dict_in = {}
dict_out = {}
for key, value in dct.items:
if key in keys:
dict_in[key] = value
else:
dict_out[key] = value
return dict_in, dict_out
def recursive_update(d, u):
"""
Dict recursive update.
Based on Alex Martelli code on stackoverflow
http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth?answertab=votes#tab-top
:param d: dict to update
:param u: dict with new data
:return:
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = recursive_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
from collections import Mapping
from operator import add
_FLAG_FIRST = object()
def flatten_dict_join_keys(dct, join_symbol=" "):
""" Flatten dict with defined key join symbol.
:param dct: dict to flatten
:param join_symbol: default value is " "
:return:
"""
return dict( flatten_dict(dct, join=lambda a,b:a+join_symbol+b) )
def flatten_dict(dct, separator=None, join=add, lift=lambda x:x):
"""
Based on ninjagecko code on stackoveflow
http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys
:param dct: dict to flatten
:param separator: use preset values for join and lift.
Use empty list or tuple [], () for key hierarchy stored in list.
If simple_mode is string it is used as a separator.
:param join: join operation. To join keys with '_' use join=lambda a,b:a+'_'+b
:param lift: to have all hierarchy keys in lise use lift=lambda x:(x,))
:return:
For all keys from above hierarchy in list use:
dict( flattenDict(testData, lift=lambda x:(x,)) )
For all keys from abve hierarchy separated by '_' use:
dict( flattenDict(testData, join=lambda a,b:a+'_'+b) )
"""
if type(separator) is str:
join = lambda a, b: a + separator + b
elif type(separator) in (list, tuple):
lift = lambda x:(x,)
results = []
def visit(subdict, results, partialKey):
for k,v in subdict.items():
newKey = lift(k) if partialKey==_FLAG_FIRST else join(partialKey,lift(k))
if isinstance(v,Mapping):
visit(v, results, newKey)
else:
results.append((newKey,v))
visit(dct, results, _FLAG_FIRST)
return results
def list_contains(list_of_strings, substring, return_true_false_array=False):
""" Get strings in list which contains substring.
"""
key_tf = [keyi.find(substring) != -1 for keyi in list_of_strings]
if return_true_false_array:
return key_tf
keys_to_remove = list_of_strings[key_tf]
return keys_to_remove
def df_drop_duplicates(df, ignore_key_pattern="time"):
"""
Drop duplicates from dataframe ignore columns with keys containing defined pattern.
:param df:
:param noinfo_key_pattern:
:return:
"""
keys_to_remove = list_contains(df.keys(), ignore_key_pattern)
#key_tf = [key.find(noinfo_key_pattern) != -1 for key in df.keys()]
# keys_to_remove
# remove duplicates
ks = copy.copy(list(df.keys()))
for key in keys_to_remove:
ks.remove(key)
df = df.drop_duplicates(ks)
return df
def ndarray_to_list_in_structure(item, squeeze=True):
""" Change ndarray in structure of lists and dicts into lists.
"""
tp = type(item)
if tp == np.ndarray:
if squeeze:
item = item.squeeze()
item = item.tolist()
elif tp == list:
for i in range(len(item)):
item[i] = ndarray_to_list_in_structure(item[i])
elif tp == dict:
for lab in item:
item[lab] = ndarray_to_list_in_structure(item[lab])
return item
def dict_find_key(dd, value):
""" Find first suitable key in dict.
:param dd:
:param value:
:return:
"""
key = next(key for key, val in dd.items() if val == value)
return key
def sort_list_of_dicts(lst_of_dct, keys, reverse=False, **sort_args):
"""
Sort list of dicts by one or multiple keys.
If the key is not available, sort these to the end.
:param lst_of_dct: input structure. List of dicts.
:param keys: one or more keys in list
:param reverse:
:param sort_args:
:return:
"""
if type(keys) != list:
keys = [keys]
# dcmdir = lst_of_dct[:]
# lst_of_dct.sort(key=lambda x: [x[key] for key in keys], reverse=reverse, **sort_args)
lst_of_dct.sort(key=lambda x: [((False, x[key]) if key in x else (True, 0)) for key in keys], reverse=reverse, **sort_args)
return lst_of_dct
def ordered_dict_to_dict(config):
"""
Use dict instead of ordered dict in structure.
"""
if type(config) == collections.OrderedDict:
config = dict(config)
if type(config) == list:
for i in range(0, len(config)):
config[i] = ordered_dict_to_dict(config[i])
elif type(config) == dict:
for key in config:
config[key] = ordered_dict_to_dict(config[key])
return config
# def struct_to_yaml(cfg):
# """
# write complex struct with dicts and lists into yaml
# :param cfg:
# :return:
# """
# import yaml
# # convert values to json
# isconverted = {}
# for key, value in cfg.iteritems():
# if type(value) in (str, int, float, bool):
#
# isconverted[key] = False
# if type(value) is str:
# pass
#
# else:
# isconverted[key] = True
# cfg[key] = yaml.dump(value, default_flow_style=True)
# return cfg
| mjirik/imtools | imtools/dili.py | Python | mit | 8,160 | [
"VisIt"
] | cdae915d56662301e2ba4c2e0c2cbef0e3a51ee44fb6e97ec65e7be3822f50f4 |
#!/usr/bin/env python
# -*-python-*-
#
# Copyright (C) 1999-2013 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# CGI script to process and display queries to CVSdb
#
# This script is part of the ViewVC package. More information can be
# found at http://viewvc.org
#
# -----------------------------------------------------------------------
import os
import sys
import string
import time
import cvsdb
import viewvc
import ezt
import debug
import urllib
import fnmatch
class FormData:
def __init__(self, form):
self.valid = 0
self.repository = ""
self.branch = ""
self.directory = ""
self.file = ""
self.who = ""
self.sortby = ""
self.date = ""
self.hours = 0
self.decode_thyself(form)
def decode_thyself(self, form):
try:
self.repository = string.strip(form["repository"].value)
except KeyError:
pass
except TypeError:
pass
else:
self.valid = 1
try:
self.branch = string.strip(form["branch"].value)
except KeyError:
pass
except TypeError:
pass
else:
self.valid = 1
try:
self.directory = string.strip(form["directory"].value)
except KeyError:
pass
except TypeError:
pass
else:
self.valid = 1
try:
self.file = string.strip(form["file"].value)
except KeyError:
pass
except TypeError:
pass
else:
self.valid = 1
try:
self.who = string.strip(form["who"].value)
except KeyError:
pass
except TypeError:
pass
else:
self.valid = 1
try:
self.sortby = string.strip(form["sortby"].value)
except KeyError:
pass
except TypeError:
pass
try:
self.date = string.strip(form["date"].value)
except KeyError:
pass
except TypeError:
pass
try:
self.hours = int(form["hours"].value)
except KeyError:
pass
except TypeError:
pass
except ValueError:
pass
else:
self.valid = 1
## returns a tuple-list (mod-str, string)
def listparse_string(str):
return_list = []
cmd = ""
temp = ""
escaped = 0
state = "eat leading whitespace"
for c in str:
## handle escaped charactors
if not escaped and c == "\\":
escaped = 1
continue
## strip leading white space
if state == "eat leading whitespace":
if c in string.whitespace:
continue
else:
state = "get command or data"
## parse to '"' or ","
if state == "get command or data":
## just add escaped charactors
if escaped:
escaped = 0
temp = temp + c
continue
## the data is in quotes after the command
elif c == "\"":
cmd = temp
temp = ""
state = "get quoted data"
continue
## this tells us there was no quoted data, therefore no
## command; add the command and start over
elif c == ",":
## strip ending whitespace on un-quoted data
temp = string.rstrip(temp)
return_list.append( ("", temp) )
temp = ""
state = "eat leading whitespace"
continue
## record the data
else:
temp = temp + c
continue
## parse until ending '"'
if state == "get quoted data":
## just add escaped charactors
if escaped:
escaped = 0
temp = temp + c
continue
## look for ending '"'
elif c == "\"":
return_list.append( (cmd, temp) )
cmd = ""
temp = ""
state = "eat comma after quotes"
continue
## record the data
else:
temp = temp + c
continue
## parse until ","
if state == "eat comma after quotes":
if c in string.whitespace:
continue
elif c == ",":
state = "eat leading whitespace"
continue
else:
print "format error"
sys.exit(1)
if cmd or temp:
return_list.append((cmd, temp))
return return_list
def decode_command(cmd):
if cmd == "r":
return "regex"
elif cmd == "l":
return "like"
else:
return "exact"
def form_to_cvsdb_query(cfg, form_data):
query = cvsdb.CreateCheckinQuery()
query.SetLimit(cfg.cvsdb.row_limit)
if form_data.repository:
for cmd, str in listparse_string(form_data.repository):
cmd = decode_command(cmd)
query.SetRepository(str, cmd)
if form_data.branch:
for cmd, str in listparse_string(form_data.branch):
cmd = decode_command(cmd)
query.SetBranch(str, cmd)
if form_data.directory:
for cmd, str in listparse_string(form_data.directory):
cmd = decode_command(cmd)
query.SetDirectory(str, cmd)
if form_data.file:
for cmd, str in listparse_string(form_data.file):
cmd = decode_command(cmd)
query.SetFile(str, cmd)
if form_data.who:
for cmd, str in listparse_string(form_data.who):
cmd = decode_command(cmd)
query.SetAuthor(str, cmd)
if form_data.sortby == "author":
query.SetSortMethod("author")
elif form_data.sortby == "file":
query.SetSortMethod("file")
else:
query.SetSortMethod("date")
if form_data.date:
if form_data.date == "hours" and form_data.hours:
query.SetFromDateHoursAgo(form_data.hours)
elif form_data.date == "day":
query.SetFromDateDaysAgo(1)
elif form_data.date == "week":
query.SetFromDateDaysAgo(7)
elif form_data.date == "month":
query.SetFromDateDaysAgo(31)
return query
def prev_rev(rev):
'''Returns a string representing the previous revision of the argument.'''
r = string.split(rev, '.')
# decrement final revision component
r[-1] = str(int(r[-1]) - 1)
# prune if we pass the beginning of the branch
if len(r) > 2 and r[-1] == '0':
r = r[:-2]
return string.join(r, '.')
def is_forbidden(cfg, cvsroot_name, module):
'''Return 1 if MODULE in CVSROOT_NAME is forbidden; return 0 otherwise.'''
# CVSROOT_NAME might be None here if the data comes from an
# unconfigured root. This interfaces doesn't care that the root
# isn't configured, but if that's the case, it will consult only
# the base and per-vhost configuration for authorizer and
# authorizer parameters.
if cvsroot_name:
authorizer, params = cfg.get_authorizer_and_params_hack(cvsroot_name)
else:
authorizer = cfg.options.authorizer
params = cfg.get_authorizer_params()
# If CVSROOT_NAME isn't configured to use an authorizer, nothing
# is forbidden. If it's configured to use something other than
# the 'forbidden' authorizer, complain. Otherwise, check for
# forbiddenness per the PARAMS as expected.
if not authorizer:
return 0
if authorizer != 'forbidden':
raise Exception("The 'forbidden' authorizer is the only one supported "
"by this interface. The '%s' root is configured to "
"use a different one." % (cvsroot_name))
forbidden = params.get('forbidden', '')
forbidden = map(string.strip, filter(None, string.split(forbidden, ',')))
default = 0
for pat in forbidden:
if pat[0] == '!':
default = 1
if fnmatch.fnmatchcase(module, pat[1:]):
return 0
elif fnmatch.fnmatchcase(module, pat):
return 1
return default
def build_commit(server, cfg, desc, files, cvsroots, viewvc_link):
ob = _item(num_files=len(files), files=[])
ob.log = desc and string.replace(server.escape(desc), '\n', '<br />') or ''
for commit in files:
repository = commit.GetRepository()
directory = commit.GetDirectory()
cvsroot_name = cvsroots.get(repository)
## find the module name (if any)
try:
module = filter(None, string.split(directory, '/'))[0]
except IndexError:
module = None
## skip commits we aren't supposed to show
if module and ((module == 'CVSROOT' and cfg.options.hide_cvsroot) \
or is_forbidden(cfg, cvsroot_name, module)):
continue
ctime = commit.GetTime()
if not ctime:
ctime = " "
else:
if (cfg.options.use_localtime):
ctime = time.strftime("%y/%m/%d %H:%M %Z", time.localtime(ctime))
else:
ctime = time.strftime("%y/%m/%d %H:%M", time.gmtime(ctime)) \
+ ' UTC'
## make the file link
try:
file = (directory and directory + "/") + commit.GetFile()
except:
raise Exception, str([directory, commit.GetFile()])
## If we couldn't find the cvsroot path configured in the
## viewvc.conf file, or we don't have a VIEWVC_LINK, then
## don't make the link.
if cvsroot_name and viewvc_link:
flink = '[%s] <a href="%s/%s?root=%s">%s</a>' % (
cvsroot_name, viewvc_link, urllib.quote(file),
cvsroot_name, file)
if commit.GetType() == commit.CHANGE:
dlink = '%s/%s?root=%s&view=diff&r1=%s&r2=%s' % (
viewvc_link, urllib.quote(file), cvsroot_name,
prev_rev(commit.GetRevision()), commit.GetRevision())
else:
dlink = None
else:
flink = '[%s] %s' % (repository, file)
dlink = None
ob.files.append(_item(date=ctime,
author=commit.GetAuthor(),
link=flink,
rev=commit.GetRevision(),
branch=commit.GetBranch(),
plus=int(commit.GetPlusCount()),
minus=int(commit.GetMinusCount()),
type=commit.GetTypeString(),
difflink=dlink,
))
return ob
def run_query(server, cfg, form_data, viewvc_link):
query = form_to_cvsdb_query(cfg, form_data)
db = cvsdb.ConnectDatabaseReadOnly(cfg)
db.RunQuery(query)
commit_list = query.GetCommitList()
if not commit_list:
return [ ], 0
row_limit_reached = query.GetLimitReached()
commits = [ ]
files = [ ]
cvsroots = {}
viewvc.expand_root_parents(cfg)
rootitems = cfg.general.svn_roots.items() + cfg.general.cvs_roots.items()
for key, value in rootitems:
cvsroots[cvsdb.CleanRepository(value)] = key
current_desc = commit_list[0].GetDescription()
for commit in commit_list:
desc = commit.GetDescription()
if current_desc == desc:
files.append(commit)
continue
commits.append(build_commit(server, cfg, current_desc, files,
cvsroots, viewvc_link))
files = [ commit ]
current_desc = desc
## add the last file group to the commit list
commits.append(build_commit(server, cfg, current_desc, files,
cvsroots, viewvc_link))
# Strip out commits that don't have any files attached to them. The
# files probably aren't present because they've been blocked via
# forbiddenness.
def _only_with_files(commit):
return len(commit.files) > 0
commits = filter(_only_with_files, commits)
return commits, row_limit_reached
def main(server, cfg, viewvc_link):
try:
form = server.FieldStorage()
form_data = FormData(form)
if form_data.valid:
commits, row_limit_reached = run_query(server, cfg,
form_data, viewvc_link)
query = None
else:
commits = [ ]
row_limit_reached = 0
query = 'skipped'
docroot = cfg.options.docroot
if docroot is None and viewvc_link:
docroot = viewvc_link + '/' + viewvc.docroot_magic_path
data = ezt.TemplateData({
'cfg' : cfg,
'address' : cfg.general.address,
'vsn' : viewvc.__version__,
'repository' : server.escape(form_data.repository),
'branch' : server.escape(form_data.branch),
'directory' : server.escape(form_data.directory),
'file' : server.escape(form_data.file),
'who' : server.escape(form_data.who),
'docroot' : docroot,
'sortby' : form_data.sortby,
'date' : form_data.date,
'query' : query,
'row_limit_reached' : ezt.boolean(row_limit_reached),
'commits' : commits,
'num_commits' : len(commits),
'rss_href' : None,
'hours' : form_data.hours and form_data.hours or 2,
})
# generate the page
server.header()
template = viewvc.get_view_template(cfg, "query")
template.generate(server.file(), data)
except SystemExit, e:
pass
except:
exc_info = debug.GetExceptionData()
server.header(status=exc_info['status'])
debug.PrintException(server, exc_info)
class _item:
def __init__(self, **kw):
vars(self).update(kw)
| marcellodesales/svnedge-console | svn-server/lib/viewvc/query.py | Python | agpl-3.0 | 14,535 | [
"VisIt"
] | b021b9575618296e11ae6a82704eb02b0149d9a689f498d1c71e8bf8301e19dd |
"""
This program tests the correct addition and removal of components to the InstalledComponentsDB,
as well as the components
CLI functions are used to ensure the test is as similar as possible to a real user-to-cli interaction
This test assumes that there is a DIRAC master server running on the local machine
This test assumes that the Notification service is not installed
This test assumes that the FTS3DB database is not installed and doesn't exist in MySQL
"""
# FIXME: to restore
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-name,wrong-import-position
import sys
import unittest
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.FrameworkSystem.Client.ComponentInstaller import gComponentInstaller
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.FrameworkSystem.Client.ComponentMonitoringClient import ComponentMonitoringClient
from DIRAC.FrameworkSystem.Client.SystemAdministratorClientCLI import SystemAdministratorClientCLI
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUsernameForDN
class TestComponentInstallation(unittest.TestCase):
"""
Contains methods for testing of separate elements
"""
def setUp(self):
self.host = 'localhost'
self.notificationPort = 9154
self.rootPwd = ''
self.csClient = CSAPI()
self.monitoringClient = ComponentMonitoringClient()
self.client = SystemAdministratorClientCLI(self.host)
self.csClient.downloadCSData()
result = self.csClient.getCurrentCFG()
if not result['OK']:
raise Exception(result['Message'])
cfg = result['Value']
setup = cfg.getOption('DIRAC/Setup', 'dirac-JenkinsSetup')
self.frameworkSetup = cfg.getOption('DIRAC/Setups/' + setup + '/Framework')
self.rootPwd = cfg.getOption('Systems/Databases/Password')
self.diracPwd = self.rootPwd
result = getProxyInfo()
if not result['OK']:
raise Exception(result['Message'])
chain = result['Value']['chain']
result = chain.getCertInChain(-1)
if not result['OK']:
raise Exception(result['Message'])
result = result['Value'].getSubjectDN()
if not result['OK']:
raise Exception(result['Message'])
userDN = result['Value']
result = getUsernameForDN(userDN)
if not result['OK']:
raise Exception(result['Message'])
self.user = result['Value']
if not self.user:
self.user = 'unknown'
def tearDown(self):
pass
class ComponentInstallationChain(TestComponentInstallation):
def testComponent(self):
service1Present = False
service2Present = False
# Check whether the service is already present or not
cfg = self.csClient.getCurrentCFG()['Value']
if cfg.isSection('Systems/Framework/' +
self.frameworkSetup +
'/Services/Notification/') and cfg.isOption('Systems/Framework/' +
self.frameworkSetup +
'/URLs/Notification'):
service1Present = True
if not service1Present:
# Install component
self.client.do_install('service Framework Notification')
self.csClient.downloadCSData()
# Check installation in CS
cfg = self.csClient.getCurrentCFG()['Value']
self.assertTrue(cfg.isSection('Systems/Framework/' + self.frameworkSetup + '/Services/Notification/') and
cfg.isOption('Systems/Framework/' + self.frameworkSetup + '/URLs/Notification'))
self.assertTrue(cfg.getOption('Systems/Framework/' + self.frameworkSetup + '/URLs/Notification') ==
'dips://' + self.host + ':' + str(self.notificationPort) + '/Framework/Notification')
# Check installation in database
if not service1Present:
result = self.monitoringClient.getInstallations({'Instance': 'Notification',
'UnInstallationTime': None, 'InstalledBy': self.user},
{'DIRACSystem': 'Framework', 'Type': 'service',
'DIRACModule': 'Notification'},
{}, False)
else:
# We dont know who made the previous installation
result = self.monitoringClient.getInstallations({'Instance': 'Notification', 'UnInstallationTime': None},
{'DIRACSystem': 'Framework',
'Type': 'service',
'DIRACModule': 'Notification'},
{}, False)
self.assertTrue(result['OK'] and len(result['Value']) == 1)
# Check whether the second service is already present or not
cfg = self.csClient.getCurrentCFG()['Value']
if cfg.isSection('Systems/Framework/' +
self.frameworkSetup +
'/Services/Notification2/') and cfg.isOption('Systems/Framework/' +
self.frameworkSetup +
'/URLs/Notification2'):
service2Present = True
if not service2Present:
# Install second component
self.client.do_install('service Framework Notification2 -m Notification')
# Check installation in CS
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()['Value']
self.assertTrue(cfg.isSection('Systems/Framework/' + self.frameworkSetup + '/Services/Notification2/') and
cfg.isOption('Systems/Framework/' + self.frameworkSetup + '/URLs/Notification2'))
if not service1Present:
# Uninstall component
self.client.do_uninstall('-f Framework Notification')
# Check CS is intact ( there should still be at least one instance of Notification )
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()['Value']
self.assertTrue(cfg.isSection('Systems/Framework/' +
self.frameworkSetup +
'/Services/Notification/') and cfg.isSection('Systems/Framework/' +
self.frameworkSetup +
'/Services/Notification/') and
cfg.isOption('Systems/Framework/' +
self.frameworkSetup +
'/URLs/Notification'))
if not service2Present:
# Uninstall second component
self.client.do_uninstall('-f Framework Notification2')
if not service1Present and not service2Present:
# Check uninstallation in CS ( only if the services were not already present )
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()['Value']
self.assertTrue(not cfg.isSection('Systems/Framework/' +
self.frameworkSetup +
'/Services/Notification/') and not cfg.isSection(
'Systems/Framework/' +
self.frameworkSetup +
'/Services/Notification2/') and not
cfg.isOption('Systems/Framework/' +
self.frameworkSetup +
'/URLs/Notification'))
def testDatabase(self):
gComponentInstaller.setMySQLPasswords(self.rootPwd, self.diracPwd)
# Install database
self.client.do_install('db FTS3DB')
# Check installation in CS
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()['Value']
self.assertTrue(cfg.isSection('Systems/DataManagement/' + self.frameworkSetup + '/Databases/FTS3DB/'))
# Check in database
result = self.monitoringClient.getInstallations(
{'Instance': 'FTS3DB',
'UnInstallationTime': None,
'InstalledBy': self.user},
{'DIRACSystem': 'DataManagement',
'Type': 'DB',
'DIRACModule': 'FTS3DB'},
{},
False)
self.assertTrue(result['OK'] and len(result['Value']) == 1)
# Uninstall database
self.client.do_uninstall('db FTS3DB')
# Check uninstallation in CS
self.csClient.downloadCSData()
cfg = self.csClient.getCurrentCFG()['Value']
self.assertTrue(not cfg.isSection('Systems/DataManagement/' + self.frameworkSetup + '/Databases/FTS3DB/'))
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestComponentInstallation)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ComponentInstallationChain))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
| yujikato/DIRAC | tests/Integration/Framework/NotRun_Test_ComponentInstallUninstall.py | Python | gpl-3.0 | 9,118 | [
"DIRAC"
] | fd7340540ebc3458e2c17e8814d2e0c6106fbf4d3c5ae4544c764913595e8f07 |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example uses linear regression to predict the number of rings from the abalone data set. Linear regression
requires a simple linear relationship so it will not fit exactly, however, it is able to find some trends.
The output is shown here.
Solution coefficients: [[ -3.70992979e+12]
[ 3.70992979e+12]
[ 3.70992979e+12]
[ 3.70992979e+12]
[ -4.55322266e-01]
[ 1.10373535e+01]
[ 1.07808838e+01]
[ 8.97070312e+00]
[ -1.97780762e+01]
[ -1.05693359e+01]
[ 8.75000000e+00]]
-> Actual: [ 9.21191406], Ideal:15.0
-> Actual: [ 7.84033203], Ideal:7.0
-> Actual: [ 11.15673828], Ideal:9.0
-> Actual: [ 9.64404297], Ideal:10.0
-> Actual: [ 6.74414062], Ideal:7.0
-> Actual: [ 7.82226562], Ideal:8.0
...
-> Actual: [ 9.58837891], Ideal:8.0
-> Actual: [ 9.10546875], Ideal:10.0
-> Actual: [ 11.17578125], Ideal:10.0
-> Actual: [ 10.16894531], Ideal:8.0
-> Actual: [ 10.74511719], Ideal:11.0
-> Actual: [ 9.98828125], Ideal:10.0
-> Actual: [ 10.94140625], Ideal:9.0
-> Actual: [ 9.79785156], Ideal:10.0
-> Actual: [ 10.98486328], Ideal:12.0
Process finished with exit code 0
"""
__author__ = 'jheaton'
import os
import sys
import numpy as np
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from normalize import Normalize
def multi_linear_regression(x, y):
x_matrix = np.ones((len(x), len(x[0]) + 1), dtype=float)
x_matrix[:, 1:] = x
print(y)
return np.linalg.lstsq(x_matrix, y)[0]
def calc_linear_regression(coeff, x):
result = 0
for i in range(1, len(coeff)):
result += x[i - 1] * coeff[i]
result += coeff[0]
return result
# find the Iris data set
abaloneFile = os.path.dirname(os.path.realpath(__file__))
abaloneFile = os.path.abspath(abaloneFile + "../../datasets/abalone.csv")
# Normalize abalone file.
norm = Normalize()
abalone_work = norm.load_csv(abaloneFile)
# Make all columns beyond col #1 numeric.
for i in range(1, 9):
norm.make_col_numeric(abalone_work, i)
# Discover all of the classes for column #1, the gender.
classes = norm.build_class_map(abalone_work, 0)
# Normalize gender one-of-n encoding.
norm.norm_col_one_of_n(abalone_work, 0, classes, 0, 1)
# Separate into input and ideal.
training = np.array(abalone_work)
training_input = training[:, 0:10]
training_ideal = training[:, 10:11]
coeff = multi_linear_regression(training_input, training_ideal)
print("Solution coefficients: " + str(coeff))
# Evaluate.
for i in range(0, len(training_input)):
row = training_input[i]
y = calc_linear_regression(coeff, row)
print( " -> Actual: " + str(y) + ", Ideal:" + str(training_ideal[i][0])) | trenton3983/Artificial_Intelligence_for_Humans | vol1/python-examples/examples/example_linear_regression.py | Python | apache-2.0 | 3,831 | [
"VisIt"
] | f53091e572796597153141b616ad1e4d32785872dff6bc1e5b4f698f5729821e |
"""
setup.py
~~~~~~~~~~~~~
:copyright: (c) 2016 Sander Bollen
:copyright: (c) 2016 Leiden University Medical Center
:license: MIT
"""
from os.path import abspath, dirname, join
from setuptools import setup
readme_file = join(abspath(dirname(__file__)), "README.rst")
with open(readme_file) as desc_handle:
long_desc = desc_handle.read()
setup(
name="afplot",
version="0.2.1",
description="Plot allele frequencies in VCF files",
long_description=long_desc,
author="Sander Bollen",
author_email="a.h.b.bollen@lumc.nl",
url="https://github.com/sndrtj/afplot",
license="MIT",
packages=["afplot"],
install_requires=[
"click",
"numpy",
"matplotlib",
"pandas",
"seaborn",
"progressbar2",
"pysam",
"pyvcf"
],
entry_points={
"console_scripts": [
"afplot = afplot.cli:main"
]
},
classifiers=[
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
) | sndrtj/afplot | setup.py | Python | mit | 1,007 | [
"pysam"
] | 57c3efb990dc587df02311d8ef7d9e797038dd6b47d783181986f5048c4a935f |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import copy
import os
import os.path
import spack.util.environment
class Cp2k(MakefilePackage, CudaPackage):
"""CP2K is a quantum chemistry and solid state physics software package
that can perform atomistic simulations of solid state, liquid, molecular,
periodic, material, crystal, and biological systems
"""
homepage = 'https://www.cp2k.org'
url = 'https://github.com/cp2k/cp2k/releases/download/v3.0.0/cp2k-3.0.tar.bz2'
git = 'https://github.com/cp2k/cp2k.git'
list_url = 'https://github.com/cp2k/cp2k/releases'
maintainers = ['dev-zero']
version('8.2', sha256='2e24768720efed1a5a4a58e83e2aca502cd8b95544c21695eb0de71ed652f20a')
version('8.1', sha256='7f37aead120730234a60b2989d0547ae5e5498d93b1e9b5eb548c041ee8e7772')
version('7.1', sha256='ccd711a09a426145440e666310dd01cc5772ab103493c4ae6a3470898cd0addb')
version('6.1', sha256='af803558e0a6b9e9d9ce8a3ab955ba32bacd179922455424e061c82c9fefa34b')
version('5.1', sha256='e23613b593354fa82e0b8410e17d94c607a0b8c6d9b5d843528403ab09904412')
version('4.1', sha256='4a3e4a101d8a35ebd80a9e9ecb02697fb8256364f1eccdbe4e5a85d31fe21343')
version('3.0', sha256='1acfacef643141045b7cbade7006f9b7538476d861eeecd9658c9e468dc61151')
version('master', branch='master', submodules="True")
variant('mpi', default=True, description='Enable MPI support')
variant('openmp', default=True, description='Enable OpenMP support')
variant('smm', default='libxsmm', values=('libxsmm', 'libsmm', 'blas'),
description='Library for small matrix multiplications')
variant('plumed', default=False, description='Enable PLUMED support')
variant('libint', default=True,
description='Use libint, required for HFX (and possibly others)')
variant('libxc', default=True,
description='Support additional functionals via libxc')
variant('pexsi', default=False,
description=('Enable the alternative PEXSI method'
'for density matrix evaluation'))
variant('elpa', default=False,
description='Enable optimised diagonalisation routines from ELPA')
variant('sirius', default=False,
description=('Enable planewave electronic structure'
' calculations via SIRIUS'))
variant('cosma', default=False, description='Use COSMA for p?gemm')
variant('libvori', default=False,
description=('Enable support for Voronoi integration'
' and BQB compression'))
variant('spglib', default=False, description='Enable support for spglib')
with when('+cuda'):
variant('cuda_arch_35_k20x', default=False,
description=('CP2K (resp. DBCSR) has specific parameter sets for'
' different GPU models. Enable this when building'
' with cuda_arch=35 for a K20x instead of a K40'))
variant('cuda_fft', default=False,
description=('Use CUDA also for FFTs in the PW part of CP2K'))
variant('cuda_blas', default=False,
description=('Use CUBLAS for general matrix operations in DBCSR'))
HFX_LMAX_RANGE = range(4, 8)
variant('lmax',
description='Maximum supported angular momentum (HFX and others)',
default='5',
values=[str(x) for x in HFX_LMAX_RANGE],
multi=False)
depends_on('python', type='build')
depends_on('python@3:', when='@8:', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api@3')
# Force openmp propagation on some providers of blas / fftw-api
with when('+openmp'):
depends_on('fftw+openmp', when='^fftw')
depends_on('amdfftw+openmp', when='^amdfftw')
depends_on('openblas threads=openmp', when='^openblas')
with when('smm=libxsmm'):
# require libxsmm-1.11+ since 1.10 can leak file descriptors in Fortran
depends_on('libxsmm@1.11:~header-only')
# use pkg-config (support added in libxsmm-1.10) to link to libxsmm
depends_on('pkgconfig', type='build')
# please set variants: smm=blas by configuring packages.yaml or install
# cp2k with option smm=blas on aarch64
conflicts('target=aarch64:', msg='libxsmm is not available on arm')
with when('+libint'):
# ... and in CP2K 7.0+ for linking to libint2
depends_on('pkgconfig', type='build', when='@7.0:')
# libint & libxc are always statically linked
depends_on('libint@1.1.4:1.2', when='@3.0:6.9')
for lmax in HFX_LMAX_RANGE:
# libint2 can be linked dynamically again
depends_on('libint@2.6.0:+fortran tune=cp2k-lmax-{0}'.format(lmax),
when='@7.0: lmax={0}'.format(lmax))
with when('+libxc'):
depends_on('pkgconfig', type='build', when='@7.0:')
depends_on('libxc@2.2.2:3', when='@:5', type='build')
depends_on('libxc@4.0.3:4', when='@6.0:6.9', type='build')
depends_on('libxc@4.0.3:4', when='@7.0:8.1')
depends_on('libxc@5.1.3:5.1', when='@8.2:')
with when('+mpi'):
depends_on('mpi@2:')
depends_on('scalapack')
with when('+cosma'):
depends_on('cosma+scalapack')
depends_on('cosma+cuda', when='+cuda')
conflicts('~mpi')
# COSMA support was introduced in 8+
conflicts('@:7')
with when('+elpa'):
conflicts('~mpi', msg='elpa requires MPI')
depends_on('elpa+openmp', when='+openmp')
depends_on('elpa~openmp', when='~openmp')
depends_on('elpa@2011.12:2016.13', when='@:5')
depends_on('elpa@2011.12:2017.11', when='@6.0:6')
depends_on('elpa@2018.05:2020.11.001', when='@7.0:8.2')
depends_on('elpa@2021.05:', when='@8.3:')
with when('+plumed'):
depends_on('plumed+shared')
depends_on('plumed+mpi', when='+mpi')
depends_on('plumed~mpi', when='~mpi')
# while we link statically against PEXSI, its own deps may be linked in
# dynamically, therefore can't set this as pure build-type dependency.
with when('+pexsi'):
conflicts('~mpi', msg='pexsi requires MPI')
depends_on('pexsi+fortran@0.9.0:0.9', when='@:4')
depends_on('pexsi+fortran@0.10.0:', when='@5.0:')
# only OpenMP should be consistently used, all other common things
# like ELPA, SCALAPACK are independent and Spack will ensure that
# a consistent/compatible combination is pulled into the dependency graph.
with when('+sirius'):
depends_on('sirius+fortran+shared')
depends_on('sirius+openmp', when='+openmp')
depends_on('sirius~openmp', when='~openmp')
depends_on('sirius@:6', when='@:7')
depends_on('sirius@7.0.0:7.0', when='@8:8.2')
depends_on('sirius@7.2:', when='@8.3:')
conflicts('~mpi')
# sirius support was introduced in 7+
conflicts('@:6')
with when('+libvori'):
depends_on('libvori@201219:', when='@8.1', type='build')
depends_on('libvori@210412:', when='@8.2:', type='build')
# libvori support was introduced in 8+
conflicts('@:7')
# the bundled libcusmm uses numpy in the parameter prediction (v7+)
# which is written using Python 3
depends_on('py-numpy', when='@7:+cuda', type='build')
depends_on('python@3.6:', when='@7:+cuda', type='build')
depends_on('spglib', when='+spglib')
# Apparently cp2k@4.1 needs an "experimental" version of libwannier.a
# which is only available contacting the developer directly. See INSTALL
# in the stage of cp2k@4.1
depends_on('wannier90', when='@3.0+mpi', type='build')
# CP2K needs compiler specific compilation flags, e.g. optflags
conflicts('%apple-clang')
conflicts('%clang')
conflicts('%nag')
conflicts('~openmp', when='@8:', msg='Building without OpenMP is not supported in CP2K 8+')
# We only support specific cuda_archs for which we have parameter files
# for optimal kernels. Note that we don't override the cuda_archs property
# from the parent class, since the parent class defines constraints for all
# versions. Instead just mark all unsupported cuda archs as conflicting.
dbcsr_cuda_archs = ('35', '37', '60', '70')
cuda_msg = 'cp2k only supports cuda_arch {0}'.format(dbcsr_cuda_archs)
for arch in CudaPackage.cuda_arch_values:
if arch not in dbcsr_cuda_archs:
conflicts('+cuda', when='cuda_arch={0}'.format(arch), msg=cuda_msg)
conflicts('+cuda', when='cuda_arch=none', msg=cuda_msg)
# Fix 2- and 3-center integral calls to libint
patch("https://github.com/cp2k/cp2k/commit/5eaf864ed2bd21fb1b05a9173bb77a815ad4deda.patch",
sha256="18e58ba8fdde5c507bece48ec064f7f2b80e59d1b7cfe6b7a639e5f64f84d43f",
when="@8.2")
@property
def makefile_architecture(self):
return '{0.architecture}-{0.compiler.name}'.format(self.spec)
@property
def makefile_version(self):
return '{prefix}{suffix}'.format(
prefix='p' if '+mpi' in self.spec else 's',
suffix='smp' if '+openmp' in self.spec else 'opt'
)
@property
def makefile(self):
makefile_basename = '.'.join([
self.makefile_architecture, self.makefile_version
])
return join_path('arch', makefile_basename)
@property
def archive_files(self):
return [join_path(self.stage.source_path, self.makefile)]
def edit(self, spec, prefix):
pkgconf = which('pkg-config')
if '^fftw' in spec:
fftw = spec['fftw:openmp' if '+openmp' in spec else 'fftw']
fftw_header_dir = fftw.headers.directories[0]
elif '^amdfftw' in spec:
fftw = spec['amdfftw:openmp' if '+openmp' in spec else 'amdfftw']
fftw_header_dir = fftw.headers.directories[0]
elif '^intel-mkl' in spec:
fftw = spec['intel-mkl']
fftw_header_dir = fftw.headers.directories[0] + '/fftw'
elif '^intel-oneapi-mkl' in spec:
fftw = spec['intel-oneapi-mkl']
fftw_header_dir = fftw.headers.directories[0] + '/fftw'
elif '^intel-parallel-studio+mkl' in spec:
fftw = spec['intel-parallel-studio']
fftw_header_dir = '<NOTFOUND>'
for incdir in [join_path(f, 'fftw')
for f in fftw.headers.directories]:
if os.path.exists(incdir):
fftw_header_dir = incdir
break
elif '^cray-fftw' in spec:
fftw = spec['cray-fftw']
fftw_header_dir = fftw.headers.directories[0]
optimization_flags = {
'gcc': [
'-O2',
'-funroll-loops',
'-ftree-vectorize',
],
'intel': ['-O2', '-pc64', '-unroll', ],
'pgi': ['-fast'],
'nvhpc': ['-fast'],
'cce': ['-O2'],
'xl': ['-O3'],
'aocc': ['-O1'],
}
dflags = ['-DNDEBUG']
cppflags = [
'-D__FFTW3',
'-I{0}'.format(fftw_header_dir),
]
if '^mpi@3:' in spec:
cppflags.append('-D__MPI_VERSION=3')
elif '^mpi@2:' in spec:
cppflags.append('-D__MPI_VERSION=2')
cflags = optimization_flags[self.spec.compiler.name][:]
cxxflags = optimization_flags[self.spec.compiler.name][:]
fcflags = optimization_flags[self.spec.compiler.name][:]
nvflags = ['-O3']
ldflags = []
libs = []
gpuver = ''
if '%intel' in spec:
cflags.append('-fp-model precise')
cxxflags.append('-fp-model precise')
fcflags += [
'-fp-model precise',
'-heap-arrays 64',
'-g',
'-traceback',
]
elif '%gcc' in spec:
fcflags += [
'-ffree-form',
'-ffree-line-length-none',
'-ggdb', # make sure we get proper Fortran backtraces
]
elif '%aocc' in spec:
fcflags += [
'-ffree-form',
'-Mbackslash',
]
elif '%pgi' in spec or '%nvhpc' in spec:
fcflags += ['-Mfreeform', '-Mextend']
elif '%cce' in spec:
fcflags += ['-emf', '-ffree', '-hflex_mp=strict']
elif '%xl' in spec:
fcflags += ['-qpreprocess', '-qstrict', '-q64']
ldflags += ['-Wl,--allow-multiple-definition']
if '%gcc@10: +mpi' in spec and spec['mpi'].name in ['mpich', 'cray-mpich']:
fcflags += ['-fallow-argument-mismatch'] # https://github.com/pmodels/mpich/issues/4300
if '+openmp' in spec:
cflags.append(self.compiler.openmp_flag)
cxxflags.append(self.compiler.openmp_flag)
fcflags.append(self.compiler.openmp_flag)
ldflags.append(self.compiler.openmp_flag)
nvflags.append('-Xcompiler="{0}"'.format(
self.compiler.openmp_flag))
elif '%cce' in spec: # Cray enables OpenMP by default
cflags += ['-hnoomp']
cxxflags += ['-hnoomp']
fcflags += ['-hnoomp']
ldflags += ['-hnoomp']
if '@7:' in spec: # recent versions of CP2K use C++14 CUDA code
cxxflags.append(self.compiler.cxx14_flag)
nvflags.append(self.compiler.cxx14_flag)
ldflags.append(fftw.libs.search_flags)
if 'superlu-dist@4.3' in spec:
ldflags.insert(0, '-Wl,--allow-multiple-definition')
if '+plumed' in self.spec:
dflags.extend(['-D__PLUMED2'])
cppflags.extend(['-D__PLUMED2'])
libs.extend([
join_path(self.spec['plumed'].prefix.lib,
'libplumed.{0}'.format(dso_suffix))
])
cc = spack_cc if '~mpi' in spec else spec['mpi'].mpicc
cxx = spack_cxx if '~mpi' in spec else spec['mpi'].mpicxx
fc = spack_fc if '~mpi' in spec else spec['mpi'].mpifc
# Intel
if '%intel' in spec:
cppflags.extend([
'-D__INTEL',
'-D__HAS_ISO_C_BINDING',
'-D__USE_CP2K_TRACE',
])
fcflags.extend([
'-diag-disable 8290,8291,10010,10212,11060',
'-free',
'-fpp'
])
# FFTW, LAPACK, BLAS
lapack = spec['lapack'].libs
blas = spec['blas'].libs
ldflags.append((lapack + blas).search_flags)
libs.extend([str(x) for x in (fftw.libs, lapack, blas)])
if any(p in spec for p in ('^intel-mkl',
'^intel-parallel-studio+mkl',
'^intel-oneapi-mkl')):
cppflags += ['-D__MKL']
elif '^accelerate' in spec:
cppflags += ['-D__ACCELERATE']
if '+cosma' in spec:
# add before ScaLAPACK to override the p?gemm symbols
cosma = spec['cosma'].libs
ldflags.append(cosma.search_flags)
libs.extend(cosma)
# MPI
if '+mpi' in spec:
cppflags.extend([
'-D__parallel',
'-D__SCALAPACK'
])
if '^intel-oneapi-mpi' in spec:
mpi = [join_path(
spec['intel-oneapi-mpi'].libs.directories[0],
'libmpi.so')]
else:
mpi = spec['mpi:cxx'].libs
# while intel-mkl has a mpi variant and adds the scalapack
# libs to its libs, intel-oneapi-mkl does not.
if '^intel-oneapi-mkl' in spec:
mpi_impl = 'openmpi' if '^openmpi' in spec else 'intelmpi'
scalapack = [
join_path(
spec['intel-oneapi-mkl'].libs.directories[0],
'libmkl_scalapack_lp64.so'),
join_path(
spec['intel-oneapi-mkl'].libs.directories[0],
'libmkl_blacs_{0}_lp64.so'.format(mpi_impl)
)
]
else:
scalapack = spec['scalapack'].libs
ldflags.append(scalapack.search_flags)
libs.extend(scalapack)
libs.extend(mpi)
libs.extend(self.compiler.stdcxx_libs)
if 'wannier90' in spec:
cppflags.append('-D__WANNIER90')
wannier = join_path(
spec['wannier90'].libs.directories[0], 'libwannier.a'
)
libs.append(wannier)
if '+libint' in spec:
cppflags += ['-D__LIBINT']
if '@:6.9' in spec:
cppflags += [
'-D__LIBINT_MAX_AM=6',
'-D__LIBDERIV_MAX_AM1=5',
]
# libint-1.x.y has to be linked statically to work around
# inconsistencies in its Fortran interface definition
# (short-int vs int) which otherwise causes segfaults at
# runtime due to wrong offsets into the shared library
# symbols.
libs.extend([
join_path(
spec['libint'].libs.directories[0], 'libderiv.a'),
join_path(
spec['libint'].libs.directories[0], 'libint.a'),
])
else:
fcflags += pkgconf('--cflags', 'libint2', output=str).split()
libs += pkgconf('--libs', 'libint2', output=str).split()
if '+libxc' in spec:
cppflags += ['-D__LIBXC']
if '@:6.9' in spec:
libxc = spec['libxc:fortran,static']
cppflags += [libxc.headers.cpp_flags]
ldflags.append(libxc.libs.search_flags)
libs.append(str(libxc.libs))
else:
fcflags += pkgconf('--cflags', 'libxcf03', output=str).split()
# some Fortran functions seem to be direct wrappers of the
# C functions such that we get a direct dependency on them,
# requiring `-lxc` to be present in addition to `-lxcf03`
libs += pkgconf('--libs', 'libxcf03', 'libxc', output=str).split()
if '+pexsi' in spec:
cppflags.append('-D__LIBPEXSI')
fcflags.append('-I' + join_path(
spec['pexsi'].prefix, 'fortran'))
libs.extend([
join_path(spec['pexsi'].libs.directories[0], 'libpexsi.a'),
join_path(spec['superlu-dist'].libs.directories[0],
'libsuperlu_dist.a'),
join_path(
spec['parmetis'].libs.directories[0],
'libparmetis.{0}'.format(dso_suffix)
),
join_path(
spec['metis'].libs.directories[0],
'libmetis.{0}'.format(dso_suffix)
),
])
if '+elpa' in spec:
elpa = spec['elpa']
elpa_suffix = '_openmp' if '+openmp' in elpa else ''
elpa_incdir = elpa.headers.directories[0]
fcflags += ['-I{0}'.format(join_path(elpa_incdir, 'modules'))]
# Currently AOCC support only static libraries of ELPA
if '%aocc' in spec:
libs.append(join_path(elpa.prefix.lib,
('libelpa{elpa_suffix}.a'
.format(elpa_suffix=elpa_suffix))))
else:
libs.append(join_path(elpa.libs.directories[0],
('libelpa{elpa_suffix}.{dso_suffix}'
.format(elpa_suffix=elpa_suffix,
dso_suffix=dso_suffix))))
if spec.satisfies('@:4'):
if elpa.satisfies('@:2014.5'):
cppflags.append('-D__ELPA')
elif elpa.satisfies('@2014.6:2015.10'):
cppflags.append('-D__ELPA2')
else:
cppflags.append('-D__ELPA3')
else:
cppflags.append('-D__ELPA={0}{1:02d}'
.format(elpa.version[0],
int(elpa.version[1])))
fcflags += ['-I{0}'.format(join_path(elpa_incdir, 'elpa'))]
if spec.satisfies('+sirius'):
sirius = spec['sirius']
cppflags.append('-D__SIRIUS')
fcflags += ['-I{0}'.format(sirius.prefix.include.sirius)]
libs += list(sirius.libs)
if spec.satisfies('+cuda'):
cppflags += ['-D__ACC']
libs += ['-lcudart', '-lnvrtc', '-lcuda']
if spec.satisfies('+cuda_blas'):
cppflags += ['-D__DBCSR_ACC=2']
libs += ['-lcublas']
else:
cppflags += ['-D__DBCSR_ACC']
if spec.satisfies('+cuda_fft'):
cppflags += ['-D__PW_CUDA']
libs += ['-lcufft', '-lcublas']
cuda_arch = spec.variants['cuda_arch'].value[0]
if cuda_arch:
gpuver = {
'35': 'K40',
'37': 'K80',
'60': 'P100',
'70': 'V100',
}[cuda_arch]
if (cuda_arch == '35'
and spec.satisfies('+cuda_arch_35_k20x')):
gpuver = 'K20X'
if 'smm=libsmm' in spec:
lib_dir = join_path(
'lib', self.makefile_architecture, self.makefile_version
)
mkdirp(lib_dir)
try:
copy(env['LIBSMM_PATH'], join_path(lib_dir, 'libsmm.a'))
except KeyError:
raise KeyError('Point environment variable LIBSMM_PATH to '
'the absolute path of the libsmm.a file')
except IOError:
raise IOError('The file LIBSMM_PATH pointed to does not '
'exist. Note that it must be absolute path.')
cppflags.extend([
'-D__HAS_smm_dnn',
'-D__HAS_smm_vec',
])
libs.append('-lsmm')
elif 'smm=libxsmm' in spec:
cppflags += ['-D__LIBXSMM']
cppflags += pkgconf('--cflags-only-other', 'libxsmmf',
output=str).split()
fcflags += pkgconf('--cflags-only-I', 'libxsmmf',
output=str).split()
libs += pkgconf('--libs', 'libxsmmf', output=str).split()
if '+libvori' in spec:
cppflags += ['-D__LIBVORI']
libvori = spec['libvori'].libs
ldflags += [libvori.search_flags]
libs += libvori
libs += ['-lstdc++']
if '+spglib' in spec:
cppflags += ['-D__SPGLIB']
spglib = spec['spglib'].libs
ldflags += [spglib.search_flags]
libs += spglib
dflags.extend(cppflags)
cflags.extend(cppflags)
cxxflags.extend(cppflags)
fcflags.extend(cppflags)
nvflags.extend(cppflags)
with open(self.makefile, 'w') as mkf:
if '+plumed' in spec:
mkf.write('# include Plumed.inc as recommended by'
'PLUMED to include libraries and flags')
mkf.write('include {0}\n'.format(
spec['plumed'].package.plumed_inc
))
mkf.write('\n# COMPILER, LINKER, TOOLS\n\n')
mkf.write('FC = {0}\n'
'CC = {1}\n'
'CXX = {2}\n'
'LD = {3}\n'
.format(fc, cc, cxx, fc))
if '%intel' in spec:
intel_bin_dir = ancestor(self.compiler.cc)
# CPP is a commented command in Intel arch of CP2K
# This is the hack through which cp2k developers avoid doing :
#
# ${CPP} <file>.F > <file>.f90
#
# and use `-fpp` instead
mkf.write('CPP = # {0} -P\n'.format(spack_cc))
mkf.write('AR = {0}/xiar -r\n'.format(intel_bin_dir))
else:
mkf.write('CPP = # {0} -E\n'.format(spack_cc))
mkf.write('AR = ar -r\n')
if spec.satisfies('+cuda'):
mkf.write('NVCC = {0}\n'.format(
join_path(spec['cuda'].prefix, 'bin', 'nvcc')))
# Write compiler flags to file
def fflags(var, lst):
return '{0} = {1}\n\n'.format(
var,
' \\\n\t'.join(lst))
mkf.write('\n# FLAGS & LIBRARIES\n')
mkf.write(fflags('DFLAGS', dflags))
mkf.write(fflags('CPPFLAGS', cppflags))
mkf.write(fflags('CFLAGS', cflags))
mkf.write(fflags('CXXFLAGS', cxxflags))
mkf.write(fflags('NVFLAGS', nvflags))
mkf.write(fflags('FCFLAGS', fcflags))
mkf.write(fflags('LDFLAGS', ldflags))
mkf.write(fflags('LIBS', libs))
if '%intel' in spec:
mkf.write(fflags('LDFLAGS_C', ldflags + ['-nofor-main']))
mkf.write('# CP2K-specific flags\n\n')
mkf.write('GPUVER = {0}\n'.format(gpuver))
mkf.write('DATA_DIR = {0}\n'.format(self.prefix.share.data))
@property
def build_directory(self):
build_dir = self.stage.source_path
if self.spec.satisfies('@:6'):
# prior to version 7.1 was the Makefile located in makefiles/
build_dir = join_path(build_dir, 'makefiles')
return build_dir
@property
def build_targets(self):
return [
'ARCH={0}'.format(self.makefile_architecture),
'VERSION={0}'.format(self.makefile_version)
]
def build(self, spec, prefix):
if '+cuda' in spec and len(spec.variants['cuda_arch'].value) > 1:
raise InstallError("cp2k supports only one cuda_arch at a time")
# Apparently the Makefile bases its paths on PWD
# so we need to set PWD = self.build_directory
with spack.util.environment.set_env(PWD=self.build_directory):
super(Cp2k, self).build(spec, prefix)
def install(self, spec, prefix):
exe_dir = join_path('exe', self.makefile_architecture)
install_tree(exe_dir, self.prefix.bin)
install_tree('data', self.prefix.share.data)
def check(self):
data_dir = join_path(self.stage.source_path, 'data')
# CP2K < 7 still uses $PWD to detect the current working dir
# and Makefile is in a subdir, account for both facts here:
with spack.util.environment.set_env(CP2K_DATA_DIR=data_dir,
PWD=self.build_directory):
with working_dir(self.build_directory):
make('test', *self.build_targets)
| LLNL/spack | var/spack/repos/builtin/packages/cp2k/package.py | Python | lgpl-2.1 | 27,459 | [
"CP2K",
"CRYSTAL",
"Wannier90"
] | c90a877169553a64099762d2d0d0f4aa98b0023b8d8ee07653b390d00c36652d |
# -*- coding: utf-8 -*-
{
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '\\ " update\\ " è un\' espressione facoltativa come " field1=\'newvalue\'". Non è possibile aggiornare o eliminare i risultati di una JOIN',
'# of International Staff': '# di Personale Internazionale',
'# of National Staff': '# di Personale Nazionale',
'# of Vehicles': '# di Veicoli',
'# Results per query': '# Risultati della query',
'# selected': '# selezionato',
'%(app)s not installed. Ask the Server Administrator to install on Server.': '%(app)s non è installata. Verificare con l\'Amministratore del server',
'%(count)s Recipients': '%(count)s Contenitori',
'%(count)s Roles of the user removed': '%(count)s Ruoli dell\'utente rimossi',
'%(count)s Users removed from Role': '%(count)s Utenti rimossi dal Ruolo',
'%(label)s contains %(values)s': '%(label)s contiene %(values)s',
'%(label)s contains any of %(values)s': '%(label)s contains any of %(values)s',
'%(label)s does not contain %(values)s': '%(label)s non contiene %(values)s',
'%(label)s is %(values)s': '%(label)s è %(values)s',
'%(label)s like %(values)s': '%(label)s come %(values)s',
'%(label)s not like %(values)s': '%(label)s not like %(values)s',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nIf il tipo di richiesta è "%(type)s", inserire il %(type)s nel pannello successivo.',
'%(number)s assigned': '%(number)s assegnato',
'%(pe)s in %(location)s': '%(pe)s in %(location)s',
'%(proj4js)s definition': '%(proj4js)s definizione',
'%(resource)s Filter': '%(resource)s Filtrare',
'%(site_label)s Status': '%(site_label)s Stato',
'%(site_label)s Status added': '%(site_label)s Stato aggiunto',
'%(site_label)s Status deleted': '%(site_label)s Stato cancellato',
'%(site_label)s Status updated': '%(site_label)s Stato aggiornato',
'%(system_name)s - New User Registered': '%(system_name)s - Registrato un nuovo utente',
'%(system_name)s - New User Registration Approval Pending': '%(system_name)s - Registrazione di un nuovo utente in attesa di approvazione',
'%(system_name)s - Verify Email': '%(system_name)s - Verificare E-Mail',
'%(system_name)s has sent an email to %(email)s to verify your email address.\\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': '%(system_name)s ha inviato una email a %(email)s per verificare il tuo indirizzo.\\nSi prega di verificare la propria casella di posta.Se non ha ricevuto la mail, si prega di verificare nel junk email o i filtri sullo Spam.',
'%d/%m/%Y': '%d/%m/%Y',
'%s (%s)': '%s (%s)',
'%s and %s': '%s e %s',
'%s AND %s': '%s E %s',
'%s or %s': '%s o %s',
'%s OR %s': '%s O %s',
'%s rows deleted': '% di righe cancellate',
'%s rows updated': '% di righe aggiornate',
'& then click on the map below to adjust the Lat/Lon fields': '& quindi fare clic sulla mappa di seguito per regolare i campi Lat/Lon',
'\'Cancel\' will indicate an asset log entry did not occur': '\'Annulla\' indicherà che una voce di registrazione cespite non si è verificata',
'(filtered from _MAX_ total entries)': '(filtrato da _MAX_ total entries)',
'(RFC822)': '(RFC822)',
'* Required Fields': '* Campi obbligatori',
'...or add a new bin': '... o aggiungi un nuovo bin ',
'0-15 minutes': '0-15 minuti',
'1 Assessment': '1 Valutazione',
'1 location, shorter time, can contain multiple Tasks': '1 L\'ubicazione, il più breve tempo, può contenere più Attività',
'1-3 days': '1-3 giorni',
'1. Fill the necessary fields in BLOCK CAPITAL letters.': '1.Riempire i campi necessari con lettere maiuscole',
'15-30 minutes': '15-30 minuti',
'2 different options are provided here currently:': '2 diverse opzioni sono fornite in questa sede oggi:',
'2. Always use one box per letter and leave one box space to separate words.': '2. Usare una lettera per cella e lasciare uno spazio come separatore delle parole.',
'2x4 Car': 'Auto 2x4',
'3. Fill in the circles completely.': '3. Riempire completamente i cerchi.',
'30-60 minutes': '30-60 minuti',
'3W': '3W',
'4-7 days': '4-7 giorni',
'4x4 Car': 'AUTO 4x4',
'8-14 days': '8-14 giorni',
'_NUM_ duplicates found': '_NUM_ trovati duplicati',
'A brief description of the group (optional)': 'Una breve descrizione del gruppo (facoltativo)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Un file scaricato da un GPS contenente una serie di punti geografici in formato XML.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Un file in formato GPX preso da un GPS i cui timestamp possono essere correlati con i timestamp sulle foto per individuarle sulla mappa.',
'A file in GPX format taken from a GPS.': 'Un file in formato GPX preso da un GPS.',
'A library of digital resources, such as photos, documents and reports': 'Una libreria di risorse digitali, quali foto, documenti e relazioni',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': "Un gruppo ubicazione può essere utilizzato per definire l'estensione di un area interessato, se questa non rientra in una regione amministrativa.",
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Un gruppo ubicazione è una serie di ubicazioni (spesso, una serie di regioni amministrative che rappresenta una superficie complessiva).',
'A location group must have at least one member.': 'Un gruppo ubicazione deve avere almeno un Membro.',
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": "Una collocazione che specifica l' area geografica per questa regione. Questa può essere un'ubicazione dalla gerarchia delle ubicazioni, o una 'ubicazione di gruppo', o un'ubicazione che ha un confine per l'area.",
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': "Un Indicatore assegnato ad una singola ubicazione è impostato se c'è la necessità di sostituire l'Indicatore assegnato alla Classe Funzione.",
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Un documento di riferimento come ad esempio un file, URL o persona di contatto per verificare questi dati. È possibile immettere i primi pochi caratteri del nome del documento per collegarsi ad un documento esistente.',
'A strict location hierarchy cannot have gaps.': 'Una gerarchia di locazioni stretta non può avere gaps.',
'Abbreviation': 'Abbreviazione',
'Ability to customize the list of details tracked at a Shelter': 'Possibilità di personalizzare l\'elenco dei dettagli tracciati nella Struttura di Accoglienza',
'Ability to customize the list of human resource tracked at a Shelter': 'Possibilità di personalizzare l\'elenco di risorse umane tracciate nella Struttura di Accoglienza',
'Ability to customize the list of important facilities needed at a Shelter': 'Possibilità di personalizzare l\'elenco di strutture importanti necessarie alle strutture di Accoglienza.',
'Ability to Fill Out Surveys': 'Possibilità di compilare Indagini',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Possibilità di visualizzare I risultati di Indagini completate e/o parzialmente compilate',
'About': 'Info su',
'ABOUT THIS MODULE': 'SU QUESTO MODULO',
'Academic': 'Accademico',
'Accept Push': 'Accept Push',
'Accept unsolicited data transmissions from the repository.': 'Accept unsolicited data transmissions from the repository.',
'ACCESS DATA': 'ACCESSO AI DATI',
'Access denied': 'Accesso negato',
'Access level': 'Livello di accesso',
'Access to education services': 'Accesso a Servizi di istruzione',
'Access to Shelter': 'Accesso alla Struttura di Accoglienza',
'Accessibility of Affected Location': 'Accessibilità dell\'Ubicazione affetta',
'Accident': 'Incidente',
'Account added': 'Utente aggiunto',
'Account Name': 'Utente registrato',
'Account Registered - Please Check Your Email': 'Conto registrato-controllare la vostra posta elettronica',
'Acronym': 'Acronimo',
"Acronym of the organization's name, eg. IFRC.": "Acronimo del nome dell'organizzazione, ad es. IFRC.",
'Action': 'Azione',
'ACTION REQUIRED': 'AZIONE RICHIESTA',
'Actionable by all targeted recipients': 'Azionabili da tutti destinatari',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Attivabili solo dai partecipanti designati all\'esercizio partecipanti; l\'identificativo dell\'esercizio dovrebbe essere visualizzato in',
'Actioned?': 'Azionate?',
'Actions': 'Azioni',
'Actions taken as a result of this request.': 'Le azioni intraprese come risultato di questa richiesta.',
'Activate': 'Attivare',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': "Attivare eventi da modelli di Scenario per l' assegnazione di adeguate risorse (Umane, Risorse & infrastrutture).",
'activate to sort column ascending': 'Attivare ordinamamento ascendente',
'activate to sort column descending': 'Attivare ordinamamento discendente',
'active': 'attivo',
'Active': 'Attivo',
'Active Incidents': 'Active Incidents',
'Active Problems': 'I problemi attivi',
'Active?': 'Attivo?',
'Activities': 'Attività',
'Activities matching Assessments:': 'Attività corrispondenti alle Valutazioni:',
'Activities of boys 13-17yrs before disaster': 'Attività dei ragazzi 13-17 anni prima del disastro',
'Activities of boys 13-17yrs now': 'Attività dei ragazzi di 13-17 anni ora',
'Activities of boys <12yrs before disaster': 'Attività dei ragazzi <12 anni prima del disastro',
'Activities of boys <12yrs now': 'Attività dei ragazzi <12 anni ora',
'Activities of children': 'Attività di bambini',
'Activities of girls 13-17yrs before disaster': 'Attività delle ragazze dai 13 ai 17 anni prima della catastrofe',
'Activities of girls 13-17yrs now': 'Attività delle ragazze di età compresa tra i 13 ed i 17 anni ora',
'Activities of girls <12yrs before disaster': 'Attività delle ragazze di età inferiore ai 12 anni prima della catastrofe',
'Activities of girls <12yrs now': 'Attività delle ragazze di età inferiore ai 12 anni ora',
'Activity': 'Attività',
'Activity Added': 'Attività Aggiunta',
'Activity Coordinated Assessment': 'Attività di Valutazione coordinata',
'Activity Deleted': 'Attività eliminata',
'Activity Details': 'Dettagli attività',
'Activity Financing': 'Attività di Finanziamento',
'Activity Information Technology': 'Attività di Informatica',
'Activity Public Information': 'Attività di Informazione pubblica',
'Activity Report': "Report dell'Attività",
'Activity Reporting': 'Attività di Reportistica',
'Activity Reports': 'Report dell\'attività',
'Activity Training': 'Attività di Formazione',
'Activity Type': 'Tipo di attività',
'Activity Types': 'Tipi di attività',
'Activity Updated': 'Attività Aggiornata',
'Add': 'Aggiungi',
'Add %(site_label)s Status': 'Aggiungi %(site_label)s Stato',
'Add a new certificate to the catalog.': 'Aggiungi un nuovo certificato al catalogo.',
'Add a new competency rating to the catalog.': 'Aggiungi una nuova valutazione di competenza al catalogo.',
'Add a new course to the catalog.': 'Aggiungere un nuovo corso al catalogo.',
'Add a new job role to the catalog.': 'Aggiungere un nuovo ruolo di lavoro al catalogo.',
'Add a new program to the catalog.': 'Aggiungi un nuovo programma al catalogo.',
'Add a new skill provision to the catalog.': 'Aggiungi un nuovo approviggionamento di capacità al catalogo.',
'Add a new skill to the catalog.': 'Aggiungere una nuova capacità al catalogo.',
'Add a new skill type to the catalog.': 'Aggiungi un nuovo tipo di capacità al catalogo.',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'aggiugnere un documento di riferimento con un file, url o persona da contattare per verificare questi dati Se non si immette un documento di riferimento, sarà visualizzata invece la tua e-mail.',
'Add a Volunteer': 'Aggiungere un Volontario',
'Add Activity Type': 'Aggiungere tipo di attività',
'Add Address': 'Aggiungere indirizzo',
'Add Affiliation': 'Aggiungere Affiliazione',
'Add Alternative Item': 'Aggiungere Voce Alternativa',
'Add Appraisal': 'Aggiungi stima',
'Add Assessment': 'Aggiungere una Valutazione',
'Add Assessment Summary': 'Aggiungere Riepilogo valutazione',
'Add Asset Log Entry - Change Label': 'Aggiungere voce di log dei beni - Modifica Etichetta',
'Add Availability': 'Aumentare Disponibilità',
'Add Award': 'Aggiungi Premio',
'Add Baseline': 'Aggiungi Baseline',
'Add Baseline Type': 'Aggiungi Tipo Baseline',
'Add Branch Organization': 'Aggiunta l\'Unità Organizzativa',
'Add Bundle': 'Aggiungi Pacchetto',
'Add Camp': 'Aggiungi Campo',
'Add Camp Service': 'Aggiungi Servizio di Campo',
'Add Camp Status': 'Aggiungi Stato del Campo',
'Add Camp Type': 'Aggiungi Tipo di Campo',
'Add Certificate for Course': 'Aggiungi certificato del corso',
'Add Certification': 'Aggiungere certificazione',
'Add Competency': 'Aggiungi Competenza',
'Add Contact': 'Aggiungi contatto',
'Add Contact Information': 'Aggiungi Informazioni di Contatto',
'Add Credential': 'Aggiungi credenziale',
'Add Credentials': 'Aggiungi credenziali',
'Add Data to Theme Layer': 'Aggiungi dati al livello tema',
'Add Disaster Victims': 'Aggiungi Vittime del Disastro',
'Add Distribution.': 'Aggiungi distribuzione.',
'Add Donor': 'Aggiungi Donatore',
'Add Education Detail': 'Aggiungi Dettagli Istruzione',
'Add Education Level': 'Aggiungere Livello Istruzione',
'Add Email Account': "Aggiunta l'utenza email",
'Add Flood Report': "Aggiungi report sull'Alluvione",
'Add Group Member': 'Aggiungi Membro del gruppo',
'Add Hours': 'Aggiungi ore',
'Add Human Resource': 'Aggiungi risorsa umana',
'Add Identity': 'Aggiungi Identità',
'Add Image': 'Aggiungi immagine',
'Add Impact': 'Aggiungi Impatto',
'Add Impact Type': "Aggiungi il tipo d'Impatto",
'Add Item': 'Aggiungi voce',
'Add Item to Catalog': 'Aggiungi voce al Catalogo',
'Add Item to Commitment': "Aggiungi voce all'Impegno",
'Add Item to Inventory': "Aggiungi elemento all'Inventario",
'Add Item to Request': 'Aggiungi elemento alla Richiesta',
'Add Item to Shipment': 'Aggiungi elemento alla Spedizione',
'Add Job Role': 'Aggiungi professione',
'Add Key': 'Aggiungi chiave',
'Add Kit': 'Aggiungi Kit',
'Add Layer to this Profile': 'Aggiungi livello a questo profilo',
'Add Level 1 Assessment': 'Aggiungi Valutazione di livello 1',
'Add Level 2 Assessment': 'Aggiungi Valutazione di livello 2',
'Add Line': 'Aggiungi linea',
'Add Location': 'Aggiungi Ubicazione',
'Add Log Entry': 'Aggiungi Voce Registro',
'Add Member': 'Aggiungi Membro',
'Add Membership': 'Aggiungi appartenenza',
'Add Message': 'Aggiungi messaggio',
'Add Mission': 'Aggiungi Missione',
'Add Mobile Commons Settings': 'Aggiungi la configurazione tipo per il Mobile',
'Add Modem Channel': 'Aggiungi il canale Modem',
'Add Need': 'Aggiungi Necessità',
'Add Need Type': 'Aggiungi Tipo di necessità',
'Add New': 'Aggiungi nuovo',
'Add New Assessment Summary': 'Aggiungi un nuovo riepilogo della valutazione',
'Add New Baseline': 'Aggiungi nuovo riferimento',
'Add New Baseline Type': 'Aggiungi nuovo tipo di riferimento',
'Add New Budget': 'Aggiungi nuovo stanziamento',
'Add New Bundle': 'Aggiungi una nuova raccolta',
'Add New Camp': 'Aggiungi nuovo Campo',
'Add New Camp Service': 'Aggiungi nuovo Campo Servizio',
'Add New Camp Type': 'Aggiungi nuovo Campo Tipo',
'Add New Cluster': 'Aggiungi nuovo Cluster',
'Add New Cluster Subsector': 'Aggiungi nuovo Sottosettore del Cluster',
'Add New Commitment Item': "Aggiungi nuova voce d'impegno",
'Add New Document': 'Aggiungi nuovo documento',
'Add New Donor': 'Aggiungi Nuovo Donatore',
'Add New Entry': 'Aggiungi nuova voce',
'Add New Event': 'Aggiungere nuovo evento',
'Add New Flood Report': "Aggiungi un nuovo reportsull'alluvione",
'Add new Group': 'Aggiungi un nuovo gruppo',
'Add New Human Resource': 'Aggiungere nuove risorse umane',
'Add New Image': 'Aggiungi una nuova immagine',
'Add New Impact': 'Aggiungi nuovo Impatto',
'Add New Impact Type': 'Aggiungi nuovo Tipo di Impatto',
'Add new Individual': 'Aggiungere nuovo Individuo',
'Add New Item to Kit': 'Aggiungi nuovo elemento al Kit',
'Add New Key': 'Aggiungi nuova chiave',
'Add New Level 1 Assessment': 'Aggiungi nuova valutazione di livello 1',
'Add New Level 2 Assessment': 'Aggiungi nuova valutazione di livello 2',
'Add New Member': 'Aggiungi nuovo Membro',
'Add New Membership': 'Aggiungere nuova appartenenza',
'Add New Need': 'Aggiungi Nuove Necessità',
'Add New Need Type': "Aggiungi un nuovo tipo di necessita'",
'Add New Population Statistic': 'Aggiungi nuova Statistica di Popolazione',
'Add New Problem': 'Aggiungi nuovo problema',
'Add new project.': 'Aggiungi nuovo progetto.',
'Add New Rapid Assessment': 'Aggiungi nuovo Valutazione Rapida',
'Add New Received Item': 'Aggiungi nuovo Elemento Ricevuto',
'Add New Record': 'Aggiungi nuovo record',
'Add New Request': 'Aggiungi nuova richiesta',
'Add New Request Item': 'Aggiungi un nuovo elemento di richiesta',
'Add New Resource Type': 'Aggiungi un nuovo tipo di risorsa',
'Add New River': 'Aggiungi nuovo Fiume',
'Add New Role to User': 'Aggiungi nuovo ruolo a utente',
'Add New Scenario': 'Aggiungi nuovo Scenario',
'Add New Sent Item': 'Aggiungi nuovo Elemento Inviato',
'Add New Setting': 'Aggiungi nuova impostazione',
'Add New Solution': 'Aggiungi nuova soluzione',
'Add New Staff Type': 'Aggiungi nuovo tipo di personale',
'Add New Subsector': 'Aggiungi nuovo Sottosettore',
'Add New Survey Answer': 'Aggiungi nuova risposta a questionario',
'Add New Survey Question': 'Aggiungi nuova domanda a questionario',
'Add New Survey Series': 'Aggiungi nuova serie di questionari',
'Add New Survey Template': 'Aggiungi nuovo modello di sondaggio',
'Add New Team': 'Aggiungi nuova squadra',
'Add New Ticket': 'Aggiungi Nuovo Ticket',
'Add New Track': 'Aggiungi nuova Sessione',
'Add New User to Role': 'Aggiungi un nuovo ruolo utente',
'Add Participant': 'Aggiungi partecipante',
'Add Peer': 'Aggiungi collega',
'Add Person': 'Aggiungi persona',
"Add Person's Details": 'Aggiungi Dettagli della Persona',
'Add Photo': 'Aggiungi Foto',
'Add PoI': 'Aggiungi PoI',
'Add Point': 'Aggiungi punto',
'Add Polygon': 'Aggiungi poligono',
'Add Population Statistic': 'Aggiungi Statistica di Popolazione',
'Add Position': 'Aggiungi posizione',
'Add Problem': 'Aggiungi problema',
'Add Professional Experience': 'Aggiungi esperienza professionale',
'Add Profile Configuration for this Layer': 'Aggiungi configurazione di profilo per questo strato',
'Add Question': 'Aggiungi una domanda',
'Add Rapid Assessment': 'Aggiungi una Valutazione Rapida',
'Add Record': 'Aggiungi record',
'Add Reference Document': 'Aggiungi documento di riferimento',
'Add Region': 'Aggiunta la Regione',
'Add Report': 'Aggiungi Report',
'Add Request': 'Aggiungi richiesta',
'Add Resource': 'Aggiungi risorsa',
'Add Resource Type': 'Aggiunto il tipo di risorsa',
'Add RSS Channel': 'Aggiungi canale RSS',
'Add Section': 'Aggiungi sezione',
'Add Setting': 'Aggiungi impostazione',
'Add Shelter': 'Aggiungi Struttura di Accoglienza',
'Add Skill Equivalence': 'Aggiungere Capacità Equivalente',
'Add Skill Provision': 'Aggiungi approviggionamento di Capacità',
'Add Solution': 'Aggiungi soluzione',
'Add staff members': 'Aggiungi membri al personale',
'Add Staff Type': 'Aggiungi tipo di personale',
'Add strings manually': 'Aggiungi stringhe manualmente',
'Add strings manually through a text file': 'Aggiungi stringhe manualmente attraverso un file di testo',
'Add Subscription': 'Aggiungi sottoscrizione',
'Add Subsector': 'Aggiungi Sottosettore',
'Add Survey Answer': "Aggiungi la risposta all'indagine",
'Add Survey Question': "Aggiungi la domanda dell'indagine",
'Add Survey Series': "Aggiungi le serie dell'indagine",
'Add Survey Template': 'Aggiungi Modello Sondaggio',
'Add Symbology to Layer': 'Aggiungi simbologia allo strato',
'Add Team': 'Aggiungi squadra',
'Add Team Member': 'Aggiungi Membro alla squadra',
'Add this entry': 'Aggiungi questa voce',
'Add Ticket': 'Aggiungi il biglietto',
'Add to a Team': 'Aggiungi ad una squadra',
'Add to budget': 'Aggiungi al bilancio',
'Add to Bundle': 'Aggiungi un fardello',
'Add Training': 'Aggiungere Formazione',
'Add Twilio Channel': 'Aggiungi canale Twilio',
'Add Twitter Search Query': 'Aggiungi la ricerca Twitter',
'Add Unit': 'Aggiungi unità',
'Add Volunteer Availability': 'Aggiungi disponibilità del volontario',
'Add volunteers': 'Aggiungi Volontari',
'Add...': 'Aggiungi …',
'Add/Edit/Remove Layers': 'Aggiungi / Modifica / Rimuovi i Livelli',
'added': 'Aggiunto',
'Added to Group': 'Aggiunto al Gruppo',
'Added to Team': 'Appartenenza aggiunta',
'addetto mensa': 'addetto mensa',
'Additional Beds / 24hrs': 'Letti aggiuntivi/24hrs',
'Address': 'Indirizzo',
'Address added': 'Indirizzo aggiunto',
'Address deleted': 'Indirizzo cancellato',
'Address Details': "Dettagli dell'indirizzo",
'Address Found': 'Indirizzo trovato',
'Address Mapped': 'Indirizzo mappato',
'Address NOT Found': 'Indirizzo NON trovato',
'Address NOT Mapped': 'Indirizzo NON mappato',
"Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": "Indirizzo di un'immagine da utilizzare per questo layer nella legenda. Questo permette l'uso di un'immagine statica controllata piuttosto che interrogare il server automaticamente per quello che offre (che non funzionerà attraverso GeoWebCache comunque).",
'Address Type': 'Tipo di indirizzo',
'Address updated': 'Indirizzo aggiornato',
'Addresses': 'Indirizzi',
'Adequate': 'Adeguato',
'Adequate food and water available': 'Cibo e acqua sufficienti disponibili',
'Admin': 'Amministrazione',
'Admin Assistant': 'Assistente amministrativo',
'Admin Email': 'E-mail del responsabile',
'Admin Name': "Nome dell'amministratore di sistema",
'Admin Tel': "Numero di telefono dell'amministratore",
'Administration': 'Gestione',
'Admissions/24hrs': 'Ammissione 24 ore su 24',
'ADR Class B / A o B+explosives / B+radioactive (Vehicles carrying dangerous goods)': 'ADR Tipo B / A o B+esplosivi / B+radioattivi (Veicoli che trasportano merci pericolose)',
'Adolescent (12-20)': 'Adolescenti (12-20)',
'Adolescent participating in coping activities': 'Adolescenti partecipa a reggere attività',
'Adult (21-50)': 'Adulti (21-50)',
'Adult female': 'Adulti femmina',
'Adult ICU': 'Adulti ICU',
'Adult male': 'Adulti maschio',
'Adult Psychiatric': 'Adulti psichiatrici',
'Adults in prisons': 'Adulti nelle carceri',
'Advanced:': 'Avanzato:',
'Advisory': 'Informativa',
'Affiliation added': 'Affiliazione aggiunta',
'Affiliation deleted': 'Affiliazione cancellata',
'Affiliation Details': 'Dettagli affiliazione',
'Affiliation updated': 'Affiliazione aggiornata',
'Affiliations': 'Affiliazioni',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': "Dopo aver fatto clic sul pulsante, una serie di coppie di elementi verranno mostrate una per una. Si prega di selezionare da ogni coppia la soluzione che si preferisce sull'altra.",
'Age': 'Età',
'Age group': 'Gruppo di età',
'Age Group': 'Gruppo di età',
'Age group does not match actual age.': "Età del gruppo non corrisponde all'età effettiva.",
'Aggiungere persona': 'Aggiungere persona',
'Aggiungi persona': 'Aggiungi persona',
'Aggravating factors': 'Fattori aggravanti',
'Aggregate': 'Aggregate',
'Agriculture': 'Agricoltura',
'Air Transport Service': 'Servizio di trasporto aereo',
'Aircraft Crash': 'Incidente Aereo',
'Aircraft Hijacking': 'Dirottamenti aerei',
'Airport': 'Aeroporto',
'Airport Closure': 'Chiusura Aeroporto',
'Airspace Closure': 'Chiusura dello spazio aereo',
'Albanian - Spoken': 'Albanese - Parlato',
'Albanian - Written': 'Albanese - Scritto',
'Alcohol': 'Alcol',
'Alert': 'Avviso',
'All': 'Tutti',
'ALL': 'Tutti',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': "Tutti i dati forniti dalla Fondazione Software Sahana provenienti da questo sito sono licenziati in base ad una licenza Creative Commons Attribution. Tuttavia, non tutti i dati originano da qui. La prego di consultare il campo d'origine di ogni voce.",
'All Entities': 'Tutte le entità',
'All Inbound & Outbound Messages are stored here': 'Tutti i messaggi in entrata & in uscita vengono archiviati qui',
'All Records': 'Tutti i records',
'all records': 'Tutti i record',
'All Resources': 'Tutte le risorse',
'All selected': 'Tutti i selezionati',
'Allocate Group': 'Assegna Gruppo',
'allocated': 'Assegnato ',
'Allocated Groups': 'Gruppi assegnati',
'Allocation Details': 'Allocazione Dettagli',
'Allowed to push': 'Consentito spingere',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'consente che un bilancio sia sviluppato in base ai costi del personale e delle attrezzature, incluse eventuali spese generali di amministrazione.',
'Allows a Budget to be drawn up': 'Consente che un bilancio sia sviluppato',
'Allows authorized users to control which layers are available to the situation map.': 'Consente agli utenti autorizzati di controllare quali livelli sono disponibili alla mappa della situazione.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'consente la creazione e gestione delle indagini per valutare il danno a seguito di una catastrofe naturale.',
'Alternative Item': 'Voce alternativa',
'Alternative Item added': 'Voce aggiunta alternativa',
'Alternative Item deleted': 'Elemento alternativo eliminato',
'Alternative Item Details': 'Dettagli Item alternativi',
'Alternative Item updated': 'Elementi alternativi aggiornati',
'Alternative Items': 'Elementi alternativi',
'Alternative places for studying': 'Luoghi alternativi per studiare',
'Ambulance Service': 'Servizio ambulanza',
'An error occured, please %(reload)s the page.': 'Si è verificato un errore, %(reload)s la pagina.',
'An ESRI Shapefile (zipped)': 'Un Shape file ESRI (zipped)',
'an individual/team to do in 1-2 days': 'Un singolo/squadra da fare in 1-2 giorni',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Un sistema di ingestione, un sistema di gestione di magazzino, tracking di prodotti, gestione della catena di fornitura, appalti e altri asset e funzioni di gestione delle risorse.',
'An item which can be used in place of another item': 'Un elemento che può essere utilizzato in sostituzione di un altro elemento',
'Analysis of Completed Surveys': 'Analisi delle Indagini Completate',
'Analyze with KeyGraph': 'Analizza con KeyGraph',
'and': 'E ',
'Animal Die Off': 'Animale Muore',
'Animal Feed': 'Cibo per Animali',
'Animal Presence': 'Presenza di animali',
'Animal presence': 'Presenza di animali',
'anonymous user': 'Utente anonimo',
'Anthropology': 'Antropolgia',
'Antibiotics available': 'Antibiotici disponibili',
'Antibiotics needed per 24h': 'Antibiotici necessari per 24 ore',
'Any': 'Qualsiasi',
'ANY': 'Qualsiasi',
'API is documented here': "L'API e' documentata qui",
'Apparent Age': "Eta' apparente",
'Apparent Gender': 'Genere Apparente',
'Application': 'Applicazione',
'Application Deadline': 'Termine applicazione',
'Application Permissions': "Permessi dell'applicazione",
'Apply': 'Applica ',
'Appraisal added': 'Valutazione aggiunta',
'Appraisal deleted': 'Valutazione cancellata',
'Appraisal Details': 'Dettagli valutazione ',
'Appraisal updated': 'Valutazione aggiornata',
'Appraisals': 'Valutazioni',
'Approve': 'Approvare',
'Approved': 'Approvato',
'Approver': 'Approvatore',
'Arabic - Spoken': 'Arabo - Parlato',
'Arabic - Written': 'Arabo - Scritto',
'ArcGIS REST Layer': 'Strato ArcGIS REST',
'Arctic Outflow': 'Deflusso Artico',
'Are you sure you want to delete this record?': 'Sei sicuro di voler cancellare questo record?',
'Are you susbscribed?': 'Sei sottoscritto ?',
'Areas inspected': 'Aree ispezionate',
'Assessment': 'Valutazione',
'Assessment added': 'Valutazione aggiunta',
'Assessment admin level': 'Valutazione a livello admin',
'Assessment deleted': 'Valutazione eliminato',
'Assessment Details': 'Valutazione Dettagli',
'Assessment Reported': 'Valutazione riferita',
'Assessment Summaries': 'Riepiloghi di Valutazione',
'Assessment Summary added': 'Riepilogo di valutazione aggiunto',
'Assessment Summary deleted': 'Riepilogo valutazione eliminato',
'Assessment Summary Details': 'Dettagli riepilogo valutazione',
'Assessment Summary updated': 'Riepilogo valutazione aggiornata',
'Assessment timeline': 'Valutazione temporale',
'Assessment updated': 'Valutazione aggiornata',
'Assessments': 'Valutazioni',
'Assessments and Activities': 'Valutazioni e Attività',
'Assessments Needs vs. Activities': 'Valutazioni di Fabbisogno vs. Attività',
'Assessments:': 'Valutazioni:',
'Assessor': 'Valutatore',
'Asset': 'Bene',
'Asset added': 'Risorsa aggiunta',
'Asset deleted': 'Asset eliminato',
'Asset Details': 'Dettagli della risorsa',
'Asset Log': "Log dell'Asset",
'Asset Log Details': "Dettagli Log dell'Asset",
'Asset Log Empty': "Log dell'Asset vuoto",
'Asset Log Entry Added - Change Label': "Entrata di Log dell'Asset aggiunta - Modificare l'etichetta",
'Asset Log Entry deleted': 'Entrata del Log di Asset eliminata',
'Asset Log Entry updated': 'Entrata del Log di Asset aggiornata',
'Asset Management': 'Gestione Asset',
'Asset Number': "Numero dell'Asset",
'Asset removed': 'Risorsa eliminata',
'Asset updated': 'Bene aggiornato',
'Assets': 'Beni',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'I beni patrimoniali sono risorse che non sono di consumo, ma sono da restituire indietro, pertanto hanno bisogno di essere tracciati.',
'Assign': 'Assegnare',
'Assign %(staff)s': 'Assegna %(staff)s',
'Assign another Role': 'Assegna un altro ruolo',
'Assign Asset': 'Bene assegnato',
'Assign Facility': 'Assegna infrastruttura',
'Assign Group': 'Assegna gruppo',
'Assign Human Resource': 'Assegna risorsa umana',
'Assign Role to a User': 'Assegna ruolo ad un utente',
'Assign Roles': 'Assegna Ruoli',
'Assign Staff': 'Assegna Personale',
'Assign to Org.': 'Assegna a Org.',
'Assign to Organization': "Assegna all'Organizzazione",
'Assign to Person': 'Assegna alla Persona',
'Assign to Site': 'Assegnare al sito',
'assigned': 'Assegnato',
'Assigned': 'Assegnato',
'Assigned By': 'Assegnata da',
'Assigned Human Resources': 'Risorse umane assegnate',
'Assigned Roles': 'Assigned Roles',
'Assigned To': 'Assegnato a',
'Assigned to': 'Assegnato a',
'Assigned to Organization': 'Assegnato a Organizzazione',
'Assigned to Person': 'Assegnato a Persona',
'Assigned to Site': 'Assegnato al sito',
'Associate Event': 'Evento associato',
'Association': 'Associazione',
'At/Visited Location (not virtual)': 'A/Visitato Ubicazione (non virtuale)',
'ATC-20 Rapid Evaluation modified for New Zealand': 'Valutazione Rapida ATC-20 modificata per la Nuova Zelanda',
'Attachments': 'Allegati',
'Attend to information sources as described in <instruction>': 'Partecipare alla natura delle informazioni come descritto in <instruction>',
'Attributes': 'Attributi',
'Attribution': 'Attribuzione',
'AUTH TOKEN': 'Token di autorizzazione',
"Authenticate system's Twitter account": 'Autenticare account Twitter del sistema',
'Authentication Required': 'Autenticazione richiesta',
'Author': 'Autore',
'Automatic Message': 'Messaggio automatico',
'Availability': 'Disponibilità',
'Availability of bath handicap facilities': 'Disponibilità di bagni per disabili',
'Availability of shower handicap facilities': 'Disponibilità di docce per disabili',
'available': 'Disponibile ',
'Available': 'Disponibile',
'Available Alternative Inventories': 'Inventari alternativi disponibili',
'Available Bath': 'Disponibilità di Vasca da bagno',
'Available Beds': 'Letti disponibili',
'Available Capacity (Night)': 'Disponibilità di posti (per la notte)',
'Available databases and tables': 'Database e tabelle disponibili',
'Available for Location': 'Disponibile per Ubicazione',
'Available Forms': 'Moduli disponibili',
'Available from': 'Disponibile da',
'Available in Viewer?': 'Disponibili nel visualizzatore?',
'Available Inventories': 'Inventari disponibili',
'Available Messages': 'Messaggi disponibili',
'Available Records': 'Record disponibili',
'Available Shower': 'Disponibilità di Docce',
'Available until': 'Disponibile fino a',
'Avalanche': 'Valanga',
'Average': 'Nella media',
'average': 'Media',
'Avoid the subject event as per the <instruction>': "Evitare l' evento in oggetto come per il",
'Award': 'Premio',
'Award added': 'Premio aggiunto',
'Award deleted': 'Premio cancellato',
'Award updated': 'Premio aggiornato',
'Awards': 'Premi',
'Back to Roles List': 'Torna alla lista dei ruoli',
'Back to Top': 'Torna in alto',
'Back to Users List': 'Torna alla lista degli utenti',
'Background Color': 'Colore di sfondo',
'Background Color for Text blocks': 'Colore sfondo per i blocchi di testo',
'Bad': 'Cattivo',
'Bahai': 'Bahai',
'Baldness': 'Calvizie',
'Bank/micro finance': 'Banca / micro finanza',
'Barricades are needed': 'Sono necessarie delle barricate',
'Base Layer?': 'Strato di Base?',
'Base Layers': 'Strati base',
'Base Location': 'Ubicazione di base',
'Base Location Updated!': 'Locazione base aggiornata!',
'Base Site Set': 'Di Base impostati nel sito',
'Base Station added': 'Postazione di base aggiunta',
'Base Station deleted': 'Postazione di base cancellata',
'Base Station Details': 'Dettagli della postazione di base',
'Base Station updated': 'Postazione di base aggiornata',
'Base Stations': 'Postazioni di base',
'Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden': 'Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden',
'Base/ External Electric Power Generator': 'Base/ Generatore di corrente elettrica esterno',
'Base/ Imhoff Tank': 'Base/ Imhoff Serbatoio',
'Base/ Showers': 'Base/ Docce',
'Base/ Showers (Handicap facilities)': 'Base/ Docce(accessibile ai disabili)',
'Base/ Static baths': 'Base/ Bagni stabili',
'Base/ Static baths (Handicap facilities)': 'Base/ Bagni stabili accessibile ai disabili)',
'Base/ Temporary Illumination System': 'Base/ Sistema di illuminazione temporaneo',
'Base/ Temporary baths': 'Base/ Bagni temporanei',
'Base/ Temporary baths (Handicap facilities)': 'Base/ Bagni temporanei(accessibile ai disabili)',
'Base/ Temporary baths (Children)': 'Base/ Bagni temporanei(accessibile ai bambini)',
'Baseline added': 'Aggiunto baseline',
'Baseline Data': 'Dati della base di riferimento',
'Baseline deleted': 'Eliminato baseline',
'Baseline Number of Beds': 'Linea base del numero di letti',
'Baseline number of beds of that type in this unit.': 'Linea base del numero di letti di quel tipo in questa unità.',
'Baseline Type': 'Tipo base',
'Baseline Type added': 'Tipo Base aggiunto',
'Baseline Type deleted': 'Tipo base eliminato',
'Baseline Type Details': 'Dettagli tipo base',
'Baseline Type updated': 'Tipo base aggiornato',
'Baseline Types': 'Tipi base',
'Baseline updated': 'Base aggiornata',
'Baselines': 'Linee di base',
'Baselines Details': 'Dettagli della linea di base',
'Basic Assessment': 'Valutazione di base',
'Basic Assessment Reported': 'Valutazione di base riferita',
'Basic Details': 'Dettagli di base',
'Basic reports on the Shelter and drill-down by region': 'Reports di base sulla Struttura di Accoglienza e dettaglio per regione',
'Bath Availability': 'Bath Disponibilità',
'Bath Handicap Facilities': 'Struttura con Vasca da Bagno disabili',
'Bath with handicap facilities': 'Stanza da Bagno con strutture per disabili',
'Baud': 'Baud',
'Baud rate to use for your modem - The default is safe for most cases': 'Velocità in Baud da utilizzare per il proprio modem - il valore predefinito è sicuro per la maggior parte dei casi',
'Beam': 'trave',
'Bed Capacity': 'Posti letto',
'Bed Capacity per Unit': 'Posti letto per unità',
'Bed Type': 'Tipo di letto',
'Bed type already registered': 'Tipo di letto già registrato',
'Below ground level': 'Sotto il livello del suolo',
'Beneficiaries': 'Beneficiari',
'Beneficiary Type': 'Tipo di beneficiario',
'Beneficiary Types': 'Tipi di beneficiario',
'Bilateral': 'Bilaterale',
'Bing Layer': 'Strato bing',
'Biological Hazard': 'Rischio Biologico',
'Biscuits': 'Biscotti',
'black': 'Nero',
'Blizzard': 'Tempesta di neve',
'blond': 'Biondo',
'Blood Type (AB0)': 'Gruppo Sanguigno (AB0)',
'Blowing Snow': 'Soffiare la neve',
'blue': 'Blu',
'Boat': 'Barca',
'Bodies found': 'Corpi trovati',
'Bodies recovered': 'Corpi recuperati',
'Body': 'Corpo',
'Body Hair': 'Peli del corpo',
'Body Recovery Request': 'Richiesta di recupero corpo',
'Body Recovery Requests': 'Richieste di recupero corpi',
'Bomb': 'Bomba',
'Bomb Explosion': 'Esplosione di Bomba',
'Bomb Threat': 'Minaccia di Bomba',
'Border Color for Text blocks': 'Colore del bordo per i blocchi di testo',
'Both': 'Entrambi',
'Branch': 'Unità',
'Branch Coordinator': 'Coordinatore di dipartimento',
'Branch Organization added': 'Aggiunta Unità organizzativa',
'Branch Organization deleted': 'Cancellata Unità organizzativa',
'Branch Organization Details': 'Dettagli Unità organizzativa',
'Branch Organization updated': 'Aggiornata Unità organizzativa',
'Branch Organizations': 'Unità organizzativa',
'Branches': 'Unità',
'Brand': 'Marchio',
'Brand added': 'Marchio aggiunto',
'Brand deleted': 'Marchio Eliminato',
'Brand Details': 'Dettagli del marchio',
'Brand updated': 'Marchio aggiornato',
'Brands': 'Marchi',
'Breakdown': 'Guasto',
'Bricks': 'Mattoni',
'Bridge Closed': 'Ponte Chiuso',
'brown': 'Marrone',
'Bucket': 'Secchio',
'Buddhist': 'Buddista',
'Budget added': 'Bilancio aggiunto',
'Budget deleted': 'Bilancio cancellato',
'Budget Details': 'Dettagli di bilancio',
'Budget Updated': 'Bilancio Aggiornato',
'Budget updated': 'Bilancio aggiornato',
'Budgeting Module': 'Modulo Budgeting',
'Budgets': 'Budget',
'Buffer': 'Buffer',
'Bug': 'difetto',
'Building Assessments': 'Valutazione edifici',
'Building Collapsed': 'Edificio COMPRESSO',
'Building Name': 'Nome edificio',
'Building or storey leaning': "Pendenza dell'edificio o dei piani",
'Building Safety Assessments': "Valutazione della sicurezza dell'edificio",
'Building Short Name/Business Name': 'Alias Edificio/Nome aziendale',
'Built using the Template agreed by a group of NGOs working together as the': 'Costruito utilizzando il modello approvato da un gruppo di ONG che lavorano insieme come la',
'Bulk Uploader': 'Caricatore di carico',
'Bundle added': 'Bundle aggiunto',
'Bundle Contents': 'Contenuto della raccolta',
'Bundle deleted': 'Raccolta eliminata',
'Bundle Details': 'Dettagli della raccolta',
'Bundle Updated': 'Bundle aggiornato',
'Bundle updated': 'Raccolta aggiornata',
'Bundles': 'Raccolte',
'Burn': 'Ustione',
'Burn ICU': 'Unità di Terapia Intensiva per le Ustioni',
'Burned/charred': 'Bruciato/carbonizzato',
'by': 'per',
'by %(person)s': 'by %(persona)',
'By Facility': 'Per Struttura',
'By Inventory': 'Da Inventorio',
'By selecting this you agree that we may contact you.': 'Selezionando ciò, lei accetta che potrà essere contattato.',
'By using the Sahana System you agree to these Terms of Service.': "Usando il Sistema Sahana lei accetta a questi termini d'uso.",
'c/o Name': 'C / o nome',
'Calculate': 'Calcola',
'Camp': 'Campo',
'Camp added': 'Camp aggiunto',
'Camp Coordination/Management': 'Coordinamento/Gestione del campo',
'Camp deleted': 'Campo eliminato',
'Camp Details': 'Dettagli del campo',
'Camp IDP Refugee Camp': 'Campo IDP di Rifugiati',
'Camp Service': 'Servizio di accampamento',
'Camp Service added': 'Servizio di accampamento aggiunto',
'Camp Service deleted': 'Servizio di accampamento eliminato',
'Camp Service Details': 'Dettagli servizio Campo',
'Camp Service updated': 'Servizio del Campo aggiornato',
'Camp Services': 'Servizi Campo',
'Camp Status': 'Stato del campo',
'Camp Status added': 'Campo di Stato aggiunto ',
'Camp Status deleted': 'Campo stato cancellato ',
'Camp Status Details': 'Dettagli Campo di stato ',
'Camp Status updated': 'Campo stato aggiornato ',
'Camp Statuses': 'Stato dei campi',
'Camp Type': 'Tipo di campo',
'Camp Type added': 'Tipo di campo aggiunto',
'Camp Type deleted': 'Tipo di Campo eliminato',
'Camp Type Details': 'Dettagli Tipo di Campo',
'Camp Type updated': 'Tipo di campo aggiornato',
'Camp Types': 'Tipi di campo',
'Camp Types and Services': 'Tipi di campo e Servizi',
'Camp updated': 'Campo aggiornato',
'Campaign ID': 'Identificativo della promozione',
'Camps': 'Accampamenti',
'can be used to extract data from spreadsheets and put them into database tables.': 'Può essere utilizzato per estrarre dati da fogli elettronici ed inserirli nelle tabelle del database.',
'Can only disable 1 record at a time!': 'È possibile disattivare solo 1 un record alla volta!',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Può leggere PoIs sia da un file OpenStreetMap (.osm) o da un sito mirror.',
"Can't import tweepy": 'Impossibile importare tweepy',
'Cancel': 'Cancella',
'Cancel Crop': 'Annulla ritaglio.',
'Cancel editing': 'Annulla la revisione.',
'Cancel Log Entry': 'Elimina voce di registrazione',
'Cancel Shipment': 'Annullare spedizione',
'Canceled': 'Annullato',
'Candidate Matches for Body %s': 'Riscontro candidato per il corpo %s',
'Canned Fish': 'Pesce in scatola',
'cannot be deleted.': 'Impossibile cancellare',
'Cannot be empty': 'Non può essere vuoto',
'Cannot disable your own account!': 'Impossibile disabilitare il proprio account.',
'Cannot make an Organization a branch of itself!': "Non si può definire l'Organizzazione come Unità di se stessa!",
'Cannot open created OSM file!': 'Non può aprire il file OSM creato!',
'Cannot read from file: %(filename)s': 'Non può leggere dal file: %(filename)s',
'Cannot send messages if Messaging module disabled': 'Non può inviare i messaggi se il modulo messaging è disabilitato',
'Capacity (Day and Night)': 'Capienza (giorno e notte)',
'Capacity (Max Persons)': 'Capacità (Numero Massimo Di persone)',
'Capacity (Night only)': 'Capienza (solo pernottamento)',
'Capacity of the housing unit for people who need to stay both day and night': "Capacità dell'unità abitativa per persone che hanno bisogno di rimanere giorno e notte",
'Capacity of the shelter for people who need to stay both day and night': 'Capacità di Accoglienza per persone che hanno bisogno di rimanere giorno e notte',
'Capacity of the shelter for people who need to stay for night only': 'Capacità di Accoglienza per persone che hanno bisogno di rimanere per la notte solo',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Acquisire informazioni sui gruppi di Vittime di Disastro (Turisti, Passeggeri, Famiglie, ecc. )',
'Capture Information on each disaster victim': 'Acquisire informazioni su ogni vittima di catastrofe',
'Capturing the projects each organization is providing and where': 'Cattura i progetti ciascuna organizzazione è fornire e dove',
'Car available': 'Car available',
'Card holder': 'Titolare della carta',
'Cardiology': 'Cardiologia',
'Cassava': 'Manioca',
'Casual Labor': 'Lavoro occasionale',
'Casualties': 'Vittime',
'Catalog': 'catalogo',
'Catalog added': 'Catalogo aggiunto',
'Catalog deleted': 'Catalogo eliminato',
'Catalog Details': 'Dettagli catalogo',
'Catalog Item added': 'Voce di catalogo aggiunta',
'Catalog Item deleted': 'Voce di catalogo eliminata',
'Catalog Item updated': 'Voce di catalogo aggiornata',
'Catalog Items': 'Elementi di catalogo',
'Catalog updated': 'Catalogo aggiornato',
'Catalogs': 'Cataloghi',
'Categories': 'categories',
'Category': 'Categoria',
'caucasoid': 'Caucasico',
"Caution: doesn't respect the framework rules!": 'Attenzione: non rispetta le regole di ambiente!',
'CBA Women': 'Donne CBA',
'Ceilings, light fixtures': 'Massimali, infissi di luce',
'Cell Tower': 'Torre della cella',
'Central point to record details on People': 'Punto centrale per registrare i dettagli sulle persone',
'Certificate': 'Certificato',
'Certificate added': 'Certificato aggiunto',
'Certificate Catalog': 'Catalogo del certificato',
'Certificate deleted': 'Certificato eliminato',
'Certificate Details': 'Dettagli del certificato',
'Certificate Status': 'Stato del certificato',
'Certificate updated': 'Certificato aggiornato',
'Certificates': 'Certificati',
'Certification': 'Certificazione',
'Certification added': 'Certificazione aggiunta',
'Certification deleted': 'Certificazione eliminata',
'Certification Details': 'Dettagli della Certificazione',
'Certification updated': 'Certificazione aggiornata',
'Certifications': 'Certificazioni',
'Certifying Organization': "Certificazione dell' Organizzazione",
'Change Password': 'Modificare password',
'Channel': 'Canale',
'Channel added': 'Aggiunto canale',
'Check': 'Verifica',
'check all': 'Seleziona tutto',
'Check all': 'Controllare tutti',
'Check for errors in the URL, maybe the address was mistyped.': "Verificare la presenza di errori nell'URL, forse l'indirizzo è stato digitato incorrettamente.",
'Check if the URL is pointing to a directory instead of a webpage.': 'Verificare se URL sta puntando a una directory invece che ad una pagina Web.',
'Check outbox for the message status': 'Controlla la posta in uscita per lo stato dei messaggi',
'Check Request': 'Richiesta di verifica',
'Check to delete': 'Selezionare per eliminare',
'Check-In': 'Check-In',
'Check-in date': 'Data di arrivo',
'Check-Out': 'Check-Out',
'Check-out date': 'Data di partenza',
'Checked': 'Verificato',
'Checked-in': 'Check-in effettuato ',
'Checked-In successfully!': 'Checked-In con successo!',
'Checked-out': 'Check-out effettuato ',
'Checked-Out successfully!': 'Checked-Out con successo!',
'Checklist': 'Elenco di controllo',
'Checklist created': 'Lista di controllo creata',
'Checklist deleted': 'Lista di Controllo eliminata',
'Checklist of Operations': 'Controllo delle operazioni',
'Checklist updated': 'Elenco aggiornato',
'Chemical Hazard': 'rischio chimico',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Minaccia o attacco chimico, biologico, radiologico, nucleare o ad alto rendimento esplosivo',
'Chicken': 'Pollo',
'Child': 'bambino',
'Child (2-11)': 'Bambino (da 2 a 11 anni)',
'Child (< 18 yrs)': "bambino (eta' inferiore ai 18 anni)",
'Child Abduction Emergency': 'emergenza per rapimento di bambino',
'Child headed households (<18 yrs)': 'Figlio capofamiglia (<18 anni)',
'Children (2-5 years)': 'BAMBINI (2-5 anni)',
'Children (5-15 years)': 'BAMBINI (5-15 anni)',
'Children (< 2 years)': 'BAMBINI (< 2 anni)',
'Children in adult prisons': 'Bambini in prigioni per adulti',
'Children in boarding schools': 'Bambini in collegio',
'Children in homes for disabled children': 'Bambini in casa per bambini disabili',
'Children in juvenile detention': 'Bambini in detenzione giovanile',
'Children in orphanages': 'Bambini in orfanotrofi',
'Children living on their own (without adults)': 'Bambini che vivono in proprio (senza adulti)',
'Children not enrolled in new school': 'Bambini non iscritti in una nuova scuola',
'Children orphaned by the disaster': 'Bambini resi orfani dal disastro',
'Children separated from their parents/caregivers': 'Bambini separati dai loro genitori / chi si prende cura di loro',
'Children that have been sent to safe places': 'Bambini che sono stati inviati nei luoghi di sicurezza',
'Children who have disappeared since the disaster': 'Bambini che sono scomparsi dopo la catastrofe',
'Chinese (Taiwan)': 'Cinese (Taiwan)',
'Chinese - Spoken': 'Cinese - Parlato',
'Chinese - Written': 'Cinese - Scritto',
'Cholera Treatment': 'COLERA Trattamento',
'Cholera Treatment Capability': 'COLERA Trattamento Capacità',
'Cholera Treatment Center': 'COLERA Trattamento Centro',
'Cholera-Treatment-Center': 'Centro di trattamento del colera',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Scegliere una nuova assegnazione sulla base della nuova valutazione e giudizio della squadra. Gravi condizioni riguardanti l\'intero edificio costituiscono motivo per un messaggio UNSAFE. Condizioni localizzate GRAVI e in generale MODERATE possono richiedere una autorizzazione "Ad USO RISTRETTO". Porre la placca ISPEZIONATO all\'entrata principale. Attacca tutte le altre placche ad ogni accesso rilevante.',
'Choose Country': 'Scegli il paese',
'Christian': 'Cristiano',
'Church': 'Chiesa',
'City': 'Città',
'Civil Emergency': 'Emergenza civile',
'Civil Unrest': 'Disordine civile',
'Cladding, glazing': 'Rivestimenti, vetri',
'Cleaner': 'Addetto alle pulizie',
'clear': 'Chiaro',
'Clear All': 'Cancella tutto',
'Clear all Layers': 'Cancella tutti gli strati',
'Clear filter': 'Cancella i filtri',
'click for more details': 'Fare clic per maggiori dettagli',
'click here': 'Cliccare qui',
'Click on the link %(url)s to reset your password': 'Fare clic sul collegamento %(url)s per azzerare la password',
'Click on the link %(url)s to verify your email': 'Fare clic sul collegamento %(url)s per verificare la vostra e-mail',
'Click on the slider to choose a value': 'Fare click su un dispositivo di scorrimento per scegliere un valore',
'Click to edit': 'Fare clic per modificare',
'Click where you want to open Streetview': 'Fare click dove si desidera aprire Streetview',
'Client ID': 'Client ID',
'Client Registration': 'Registrazione di persone',
'Client Reservation': 'Prenotazione di persone',
'Client Secret': 'Client Secret',
'Clinical Laboratory': 'Laboratorio Clinico',
'Clinical Operations': 'Operazioni cliniche',
'Clinical Status': 'Stato clinico',
'Close': 'Chiudi',
'Close map': 'Chiudi la mappa',
'Closed': 'Chiuso',
'CLOSED': 'Chiuso',
'Closed? %s': 'Chiuso? %s',
'Clothing': 'Abbigliamento',
'Cluster': 'Cluster',
'Cluster added': 'Cluster aggiunto',
'Cluster Attribute': 'Attributo Cluster',
'Cluster CCCM': 'Raggruppamento CCCM',
'Cluster deleted': 'Cluster eliminato',
'Cluster Details': 'Dettagli del Cluster',
'Cluster Distance': 'Distanza del Cluster',
'Cluster Early Recovery': 'Raggruppamento di primo soccorso',
'Cluster Emergency Telecommunications': 'Raggruppamento per le Telecomunicazioni in Emergenza',
'Cluster Food Security': 'Raggruppamento per la sicurezza del cibo',
'Cluster Health': 'Punto Salute',
'Cluster Logistics': 'Punto servizi di Logistica',
'Cluster Shelter': 'Raggruppamento Struttura di Accoglienza',
'Cluster Subsector': 'Sottosettore del Cluster',
'Cluster Subsector added': 'Sottosettore del Cluster aggiunto',
'Cluster Subsector deleted': 'Sottosettore del Cluster eliminato',
'Cluster Subsector Details': 'Dettagli Sottosettore del Cluster',
'Cluster Subsector updated': 'Sottosettore Cluster aggiornato',
'Cluster Subsectors': 'Sottosettori cluster',
'Cluster Threshold': 'Soglia Cluster',
'Cluster updated': 'Cluster aggiornato',
'Cluster WASH': 'Punto Lavaggio',
'Cluster(s)': 'Cluster',
'Clusters': 'Clusters',
'CN': 'Nc',
'Coalition added': 'Aggiunta la coalizione',
'Coalition Details': 'Dettagli della coalizione',
'Coalition removed': 'Rimossa la coalizione',
'Coalition updated': 'Coalizione aggiornata',
'Coalitions': 'Coalizioni',
'Code': 'Codice',
'Code of Conduct': 'Codice di condotta',
'Cold Wave': 'Ondata di freddo',
'Collapse': 'Crollo',
'Collapse, partial collapse, off foundation': 'Collasso, collasso parziale, cedimento delle fondamenta',
'Collective center': 'centro collettivo',
'Color for Underline of Subheadings': 'Colore per la Sottolineatura delle SottoIntestazioni',
'Color of bottom of Buttons when not pressed': 'Colore di fondo dei pulsanti quando non premuto',
'Color of bottom of Buttons when pressed': 'Colore della parte inferiore dei Pulsanti quando vengono premuti',
'Color of Buttons when hovering': 'Colore dei Pulsanti quando il cursore vi passa sopra',
'Color of dropdown menus': 'Colore dei menu a discesa',
'Color of selected Input fields': 'Colore dei campi di Immisisone selezionati',
'Color of selected menu items': 'Colore delle voci di menu selezionati',
'Columns, pilasters, corbels': 'Colonne, pilastri e mensole',
'Combined Method': 'Metodo combinato',
'Come back later.': 'Tornare più tardi.',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Tornare più tardi. Colui che visita questo sito sta probabilmente vivendo lo stesso suo problema.',
'Comments': 'Commenti',
'Commercial/Offices': 'Commerciale / Uffici',
'Commit': 'rendere definitivo',
'Commit Date': "Data dell'impegno",
'Commit from %s': 'Commit dal %s',
'Commit Status': "Stato dell'impegno",
'Commiting a changed spreadsheet to the database': 'Commettendo un foglio elettronico modificato nel database',
'Commitment': 'Impegno',
'Commitment Added': 'Impegno Aggiunto',
'Commitment Canceled': 'Impegno Annullato',
'Commitment Details': 'Dettagli impegno',
'Commitment Item added': "Aggiunto elemento dell'impegno",
'Commitment Item deleted': "Eliminato elemento dell'impegno",
'Commitment Item Details': "Dettagli dell'articolo dell'impegno",
'Commitment Item updated': 'Elemento di Impegno aggiornato',
'Commitment Items': 'V o c i di I m p e g n o',
'Commitment Status': "Stato dell'impegno",
'Commitment Updated': 'Impegno Aggiornato',
'Commitments': 'Impegni',
'Committed': 'Impegnato',
'Committed By': 'Impegnato Da',
'Committing Inventory': "Confermare l'Inventario",
'Communication Officer': 'Ufficio comunicazione',
'Communication problems': 'Problemi di comunicazione',
'Communities': 'Comunità',
'Community Centre': 'Centro della comunità',
'Community Contacts': 'Contatti della comunità',
'Community Health Center': 'Comunità Health Center',
'Community Member': 'Membro della comunità',
'Company': 'Compagnia',
'Competencies': 'Competenze',
'Competency': 'Competenza',
'Competency added': 'Competenza aggiunto',
'Competency deleted': 'Competenza eliminata',
'Competency Details': 'Dettagli competenza',
'Competency Rating': 'Valutazione Competenza.',
'Competency Rating added': 'Aggiunta Valutazione Competenza',
'Competency Rating Catalog': 'Catalogo Valutazione Competenza',
'Competency Rating deleted': 'Eliminata Valutazione Competenza',
'Competency Rating Details': 'Dettagli Valutazione Competenza',
'Competency Rating updated': 'Aggiornamento Valutazione Competenza',
'Competency Ratings': 'Valutazioni Competenze',
'Competency updated': 'Aggiornamento Competenza',
'Complex Emergency': 'Emergenza complessa',
'Complete': 'Completo',
'Completed': 'Completo',
'completed': 'Completo',
'Complexion': 'Carnagione',
'Compose': 'Componi',
'Compromised': 'Compromesso',
'Comune': 'Comune',
'Concrete frame': 'Telaio in calcestruzzo',
'Concrete shear wall': 'Muro in cemento di taglio',
'Condition': 'Condizione',
'Config not found!': 'Configurazione non trovata!',
'Configuration': 'Configurazione ',
'Configurations': 'Configurazioni',
'Configure Layer for this Symbology': 'Configura strato per questa simbologia',
'Configure Run-time Settings': 'Configurare le impostazioni di run-time',
'Confirm Shipment Received': 'Confermare il ricevimento della spedizione',
'Confirmed': 'Confermato',
'Confirming Organization': 'Conferma Organizzazione',
'Conflict Details': 'Dettagli del conflitto',
'Conflict Policy': 'Conflict Policy',
'Conflict Resolution': 'Risoluzione dei conflitti',
'Connect Parser': 'Collega il decodificatore',
'consider': 'Considera',
'Consignment Note': 'nota di consegna',
'Constraints Only': 'Solamente vincoli',
'Consumable': 'Utilizzabile',
'Contact': 'Contatto',
'Contact added': 'Contatto aggiunto',
'Contact Data': 'Dati dei contatti',
'Contact deleted': 'Contatto cancellato',
'Contact Description': 'Descrizione del Contatto',
'Contact details': 'Dettagli Contatto',
'Contact Details': 'Dettagli Contatto',
'Contact Details updated': 'Dettagli di Contatto Aggiornato.',
'Contact Info': 'Informazioni sul Contatto',
'Contact Information': 'Informazioni sul Contatto',
'Contact Information Added': 'Informazioni sul Contatto Aggiunto',
'Contact information added': 'Informazioni sul Contatto aggiunto',
'Contact Information Deleted': 'Informazioni sul Contatto Cancellato',
'Contact information deleted': 'Informazioni sul Contatto cancellato',
'Contact Information Updated': 'Informazione sul Contatto Aggiornato',
'Contact information updated': 'Informazioni sul contatto aggiornato',
'Contact Method': 'Metodo di Contatto',
'Contact Name': 'Nome Contatto',
'Contact Person': 'Persona da contattare',
'Contact Person / Camp Owner': 'Persona da contattare / Responsabile del campo',
'Contact Phone': 'Telefono da contattare',
'Contact Us': 'Contattaci',
'Contact us': 'Contattaci',
'Contacts': 'Contatti',
'Content': 'Contenuto',
'Contents': 'Contenuti',
'Context': 'Contesto',
'Contract End Date': 'Data fine contratto',
'Contributor': 'Contributore',
'Controller': 'Controllore',
'Conversion Tool': 'Strumento di conversione',
'Cooking NFIs': 'Cottura NFI',
'Cooking Oil': 'olio da cucina',
'Coordinate Conversion': 'Conversione delle coordinate',
'Coordinate Layer': 'Coordinare strato',
'Coordinator': 'Coordinatore',
'Coping Activities': 'Affrontare le attività',
'Copy': 'copia',
'Corn': 'Mais',
'Corporate Entity': 'Entità aziendale',
'Cost per Megabyte': 'Costo per Megabyte',
'Cost per Minute': 'Costo al minuto',
'Cost Type': 'Tipo Di costo',
'Could not add person record': 'Impossibile aggiungere record di persona',
'Could not create record.': 'Impossibile creare record.',
'Could not merge records. (Internal Error: %s)': 'Impossibile unire i records. (Errore interno: %s)',
"couldn't be parsed so NetworkLinks not followed.": 'non è stato possibile analizzarlo quindi NetworkLinks non seguiti.',
"Couldn't open %s!": 'Impossibile aprire %s!',
'Count': 'Count',
'Country': 'Paese',
'Country Code': 'Codice del Paese',
'Country is required!': 'Il paese è richiesto!',
'Country of Residence': 'Paese di residenza',
'County': 'Contea',
'Course': 'Corso',
'Course added': 'Corso aggiunto',
'Course Catalog': 'Catalogo corsi',
'Course Certificate added': 'Aggiunto Certificato del Corso',
'Course Certificate deleted': 'Eliminato Certificato del Corso',
'Course Certificate Details': 'Dettagli del certificato del Corso',
'Course Certificate updated': 'Aggiornato Certificato del Corso',
'Course Certificates': 'Certificati del Corso',
'Course deleted': 'Corso eliminato',
'Course Details': 'Dettagli Corso',
'Course updated': 'Corso aggiornato',
'Courses': 'Corsi',
'Create': 'Crea',
'Create & manage Distribution groups to receive Alerts': 'Crea & Gestisci gruppi di distribuzione per ricevere Segnalazioni',
"Create 'More Info'": "Crea 'Maggiori Informazioni'",
'Create a group entry in the registry.': 'Creare una voce di gruppo nel registro.',
'Create a new facility or ensure that you have permissions for an existing facility.': 'Creare un nuovo impianto o assicurarsi di avere le autorizzazioni per un impianto esistente.',
'Create a new Group': 'Creare un nuovo Gruppo',
'Create a new organization or ensure that you have permissions for an existing organization.': "Creare una nuova organizzazione o assicurarsi di avere le autorizzazioni per un'organizzazione esistente.",
'Create a new Team': 'Creare una nuova Squadra',
'Create a Person': 'Creare una Persona',
'Create Activity': 'Crea Attività',
'Create Activity Report': 'Crea Report su Attività',
'Create Activity Type': 'Crea tipo di Attività',
'Create Assessment': 'Crea una nuova valutazione',
'Create Asset': 'Crea Cespite',
'Create Award': 'Creare Premio',
'Create Base Station': 'Crea la Postazione di Base',
'Create Bed Type': 'Crea Tipo Letto',
'Create Brand': 'Crea Marca',
'Create Budget': 'Crea Bilancio',
'Create Catalog': 'Crea catalogo',
'Create Catalog Item': 'Crea voce di Catalogo',
'Create Certificate': 'Aggiungere Certificato',
'Create Checklist': 'Crea lista di Controllo',
'Create Cholera Treatment Capability Information': 'Crea informazioni per il Trattamento del Colera',
'Create Cluster': 'Crea Cluster',
'Create Cluster Subsector': 'Crea Gruppo Sottosettore',
'Create Coalition': 'Crea una Coalizione',
'Create Competency Rating': 'Aggiungi Valutazione Competenza',
'Create Contact': 'Aggiungi Contatto',
'Create Course': 'Aggiungi Corso',
'Create Dead Body Report': 'Crea Report sul Cadavere',
'Create Department': 'Crea Dipartimento',
'Create Email Account': "Crea l'utenza email",
'Create Event': 'Creazione Evento',
'Create Event Type': 'Crea Tipo di Evento',
'Create Facility': 'Crea Struttura',
'Create Facility Type': 'Crea tipo di Struttura',
'Create Feature Layer': 'Aggiungi Livello di Funzione',
'Create Group': 'Creare Gruppo',
'Create Group Entry': 'Crea voce di Gruppo',
'Create Hospital': 'Crea Ospedale',
'Create Identification Report': 'Crea Report su Identificazione',
'Create Impact Assessment': "Crea valutazione dell'impatto",
'Create Incident': 'Crea Incidente',
'Create Incident Report': 'Crea Report su Incidente',
'Create Incident Type': 'Crea tipo di incidente',
'Create Item': 'Crea Elemento',
'Create Item Category': 'Crea Categoria Elementi',
'Create Item Pack': 'Crea Confezione Elementi',
'Create Job': 'Crea Job',
'Create Job Title': 'Crea Ruolo Professionale',
'Create Kit': 'Crea Kit',
'Create Layer': 'Aggiungere Strato',
'Create Location': 'Aggiungi Ubicazione',
'Create Location Hierarchy': 'Aggiungi Gerarchia di Locazioni',
'Create Mailing List': 'Crea Lista di Distribuzione email',
'Create Map Configuration': 'Aggiungere Configurazione della Mappa',
'Create Marker': 'Aggiungi Indicatore',
'Create Member': 'Crea Membro',
'Create Mobile Impact Assessment': 'Creare valutazione di impatto mobile',
'Create Network': 'Crea Rete',
'Create Office': 'Crea Ufficio',
'Create Office Type': 'Crea tipo di Ufficio',
'Create Organization': 'Crea Organizzazione',
'Create Organization Type': 'Crea tipo di Organizzazione',
'Create Personal Effects': 'Crea Effetti Personali',
'Create PoI Type': 'Aggiungi Tipo di PoI',
'Create Point of Interest': 'Aggiungi Punto di Interesse',
'Create Program': 'Crea Programma',
'Create Project': 'Crea Progetto',
'Create Projection': 'Aggiungi Proiezione',
'Create Rapid Assessment': 'Creazione Valutazione Rapida',
'Create Report': 'Crea Report',
'Create Request': 'Creazione Richiesta',
'Create Resource': 'Crea Risorsa',
'Create River': 'Crea Fiume',
'Create Role': 'Aggiungi Ruolo',
'Create Room': 'Crea Stanza',
'Create Scenario': 'Crea Scenario',
'Create Sector': 'Crea Settore',
'Create Service': 'Crea Servizio',
'Create Service Profile': 'Crea profilo del servizio',
'Create Shelter': 'Crea Struttura di Accoglienza',
'Create Shelter Service': 'Crea servizi per la Struttura di Accoglienza',
'Create Shelter Status': 'Crea lo stato per la Struttura di Accoglienza',
'Create Shelter Type': 'Crea un tipo per la Struttura di Accoglienza',
'Create Skill': 'Aggiungi Capacità',
'Create Skill Type': 'Aggiungere tipo di Capacità',
'Create SMS Outbound Gateway': 'Crea un gateway per gli SMS in uscita',
'Create SMTP to SMS Channel': 'Crea il canale SNMP per gli SMS',
'Create Staff Member': 'Aggiungere Membro al Personale',
'Create Status': 'Crea uno stato',
'Create Symbology': 'Aggiungi utente',
'Create Task': 'Aggiungi Incombenza',
'Create Team': 'Crea Squadra',
'Create Theme': 'Crea Tema',
'Create Training Event': 'Crea evento di formazione',
'Create Tropo Channel': 'Crea il canale Tropo',
'Create User': 'Aggiungi utente',
'Create Volunteer': 'Aggiungere Volontario',
'Create Volunteer Cluster': 'Creare Gruppo di Volontari',
'Create Volunteer Cluster Position': 'Creare Posizione del Gruppo di Volontari',
'Create Volunteer Cluster Type': 'Creare Tipo del Gruppo di Volontari',
'Create Volunteer Role': 'Creare ruolo volontario',
'Create Warehouse': 'Crea Magazzino',
'Create Web API Channel': 'Crea il Channel WEB Api',
'Create, enter, and manage surveys.': 'Creare, immettere e gestire le indagini.',
'created': 'Creato',
'Created By': 'Creato da',
'Created on %s': 'Creato su %s',
'Created on %s by %s': 'Creato su %s da %s',
'Creation of Surveys': 'Creazione Valutazioni',
'Credential': 'Credenziale',
'Credential added': 'Credenziale aggiunta',
'Credential deleted': 'Credenziale cancellata',
'Credential Details': 'Dettagli delle credenziali',
'Credential updated': 'Credenziale aggiornata',
'Credentialling Organization': 'Organizzazione Accreditata',
'Credentials': 'Credenziali',
'Credit Card': 'Carta di credito',
'Crime': 'Criminalità',
'Crisis Conflict': 'Conflitto per la crisi',
'Criteria': 'Criteri',
'Croatian - Spoken': 'Croato - Parlato',
'Croatian - Written': 'Croato - Scritto',
'Crop Image': 'Taglia Immagine',
'CSS file %s not writable - unable to apply theme!': 'File CSS %s non scrivibile - impossibile applicare tema!',
'curly': 'riccio',
'Currency': 'Valuta',
'current': 'corrente ',
'Current community priorities': 'Attuali priorità comunitarie',
'Current Entries': 'Voci Correnti',
'Current estimated population': 'Popolazione attuale stimata',
'Current estimated population in shelter. Staff, Volunteers and Evacuees.': 'Popolazione stimata presente nella Struttura di Accoglienza. Personale, Volontari e Evacuati.',
'Current general needs': 'Attuali esigenze generali',
'Current greatest needs of vulnerable groups': 'Attuali maggiori esigenze di gruppi vulnerabili',
'Current Group Members': 'Membri correnti del gruppo',
'Current health problems': 'Problemi sanitari attuali',
'Current Home Address': 'Attuale Indirizzo di Casa',
'Current Identities': 'Attuali Identità',
'Current Location': 'Attuale Ubicazione',
'Current Log Entries': 'Voci Log correnti',
'Current Memberships': 'Appartenenze attuali',
'Current number of patients': 'Attuale numero di pazienti',
'Current Population (Day and Night)': 'Popolazione attuale (giorno e notte)',
'Current Population (Night)': 'Popolazione attuale (Notte)',
'Current Population Availability (Day and Night)': 'Attuale disponibilità di popolazione (giorno e notte)',
'Current Population Availability (Night)': 'Attuale disponibilità di popolazione ( notte)',
'Current problems, categories': 'I problemi attuali, categorie',
'Current problems, details': 'I problemi attuali, dettagli',
'Current Records': 'Record correnti',
'Current Registrations': 'Le registrazioni correnti',
'Current request': 'Richiesta corrente',
'Current response': 'Risposta Corrente',
'Current session': 'sessione corrente',
'Current Status': 'Stato corrente',
'Current Team Members': 'Attuali membri della squadra',
'Current Twitter account': 'Attuale account Twitter',
'Currently no Appraisals entered': 'Attualmente non ci sono Valutazioni registrate.',
'Currently no Certifications registered': 'Attualmente non ci sono certificazioni registrate',
'Currently no Competencies registered': 'Attualmente non ci sono competenze registrate',
'Currently no Course Certificates registered': 'Attualmente non ci sono Certificati di Corso registrati',
'Currently no Credentials registered': 'Attualmente le credenziali non sono registrate',
'Currently no entries in the catalog': "Attualmente non c'è alcuna voce nel catalogo",
'Currently no hours recorded for this volunteer': 'Attualmente non ci sono ore registrate per questo volontario.',
'Currently no Missions registered': 'Attualmente non ci sono Missioni registrate',
'Currently no Participants registered': 'Attualmente non ci sono partecipanti registrati',
'Currently no Professional Experience entered': 'Attualmente non ci sono esperienze professionali registrate.',
'Currently no programs registered': 'Attualmente non ci sono programmi registrati.',
'Currently no Skill Equivalences registered': 'Attualmente non ci sono capacità equivalenti registrate',
'Currently no Skills registered': 'Attualmente non ci sono competenze registrate.',
'Currently no staff assigned': "Attualmente non c'è personale assegnato.",
'Currently no training events registered': 'Attualmente non ci sono eventi di formazione registrati.',
'Currently no Trainings registered': 'Attualmente nessun addestramento registrato',
'currently registered': 'Attualmente registrato',
'CV': 'CV',
'Daily': 'Giornalmente',
'daily': 'giornaliero',
'Daily Work': 'Lavoro giornaliero',
'Dam Overflow': 'Inondazione di diga',
'Damage': 'danno',
'Dangerous Person': 'Persona pericolosa',
'dark': 'Scuro',
'Dashboard': 'dashboard',
'Data': 'Dati',
'Data added to Theme Layer': 'Dati aggiunti allo strato tema',
'Data Type': 'Tipo di dato',
'data uploaded': 'Dati caricati',
'Data uploaded': 'Dati caricati',
'Database': 'Database',
'database %s select': 'Select del database',
'Database Administrator': 'Amministratore del database',
'Date': 'Data',
'Date & Time': 'Data & Ora',
'Date and Time': 'Data e Ora',
'Date and time this report relates to.': 'Data e Ora a cui questo report si riferisce.',
'Date Available': 'Data disponibile',
'Date must be %(max)s or earlier!': 'La data deve essere %(max)s o precedente!',
'Date must be %(min)s or later!': 'La data deve essere %(min)s o successiva!',
'Date must be between %(min)s and %(max)s!': 'La data deve essere compresa tra %(min)s e %(max)s!',
'Date of Birth': 'Data di nascita',
'Date of Latest Information on Beneficiaries Reached': 'data delle ultime informazioni sui beneficiari raggiunti',
'Date of Report': 'Data del Report',
'Date Printed': 'Data Stampata',
'Date Received': 'Data ricevuta',
'Date Requested': 'Data richiesta',
'Date Required': 'Data obbligatoria',
'Date Sent': 'Data inviata',
'Date Until': 'Data Fino A',
'Date/Time': 'Data/ora',
'Date/Time of Find': 'Data / ora di ricerca',
'Date/Time when found': 'Data/Ora del ritrovamento',
'Date/Time when last seen': 'Data/Ora di ultimo avvistamento',
'Day and Night': 'Giorno e Notte',
'db': 'DB',
'DC': '=corrente continua',
'De-duplicate': 'Duplicare',
'De-duplicate Records': 'Duplicare records',
'De-duplicator': 'De-duplicatore',
'Dead Body': 'Corpo morto',
'Dead Body Details': 'Dettagli del corpo morto',
'Dead body report added': 'Report del cavadevere aggiunto',
'Dead body report deleted': 'Report del cavadevere eliminato',
'Dead body report updated': 'Report del cavadevere aggiornato',
'Dead Body Reports': 'Report dei cadaveri',
'Deaths in the past 24h': 'Morti nelle 24 ore scorse',
'Deaths/24hrs': 'Deceduti / 24h',
'deceased': 'defunto',
'Deceased': 'Defunto',
'Decimal Degrees': 'Gradi decimali',
'Decision': 'Decisione',
'Decomposed': 'Decomposti',
'Default': 'Default',
'Default Base layer?': 'Strato base di default?',
'Default Height of the map window.': 'Altezza predefinita della finestra della mappa.',
'Default Location': 'Locazione di default',
'Default Map': 'Mappa predefinita',
'Default Marker': 'Indicatore predefinito',
'Default Realm': 'Default Realm',
'Default Realm = All Entities the User is a Staff Member of': "Default Realm = Tutte le Entries di cui l'utente è un membreo del persolane di",
'Default synchronization policy': 'Politica di sincronizzazione predefinita',
'Default Width of the map window.': 'Larghezza predefinita della finestra.',
'Default?': 'Default?',
'Defecation area for animals': 'Area deiezioni per animali',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': "Definire gli scenari per l'assegnazione di adeguate risorse (Umane, Beni & Servizi).",
'Defines the icon used for display of features on handheld GPS.': "Definisce l' icona utilizzata per la visualizzazione delle funzioni su palmari GPS.",
'Defines the icon used for display of features on interactive map & KML exports.': "Definisce l' icona utilizzata per la visualizzazione delle funzioni sulla mappa interattiva & le esportazioni KML.",
'Defines the marker used for display & the attributes visible in the popup.': "Definisce l'indicatore utilizzato per la visualizzazione e gli attributi visibili nel menu a comparsa.",
'Degrees in a latitude must be between -90 to 90.': 'Gradi in latitudine deve essere compresa fra -90 e 90.',
'Degrees in a longitude must be between -180 to 180.': 'Gradi in longitudine deve essere compresa fra -180 e 180.',
'Degrees must be a number between -180 and 180': 'I gradi devono essere un numero compreso tra -180 e 180',
'Degrees must be a number.': 'Gradi deve essere un numero',
'Dehydration': 'Disidratazione',
'Delete': 'Elimina',
'Delete Affiliation': 'Eliminare Affiliazione',
'delete all checked': 'Eliminare tutte le voci marcate',
'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': "Elimina tutti i dati di questo tipo, di cui l'utente era autorizzato prima del caricamento. Questo è progettato per i flussi di lavoro in cui i dati vengono mantenuti in un foglio di calcolo offline e caricato solo per lettura.",
'Delete Alternative Item': 'Eliminare Voce Alternativa',
'Delete Appraisal': 'Elimina valutazione.',
'Delete Assessment': 'Elimina Valutazione',
'Delete Assessment Summary': 'Elimina Riepilogo valutazione',
'Delete Asset': 'Elimina asset',
'Delete Asset Log Entry': "Elimina voce di Log dell'Asset",
'Delete Award': 'Eliminare Premio',
'Delete Base Station': 'Elimina la postazione di base',
'Delete Baseline': 'Cancella Baseline',
'Delete Baseline Type': 'Cancella tipo di baseline',
'Delete Branch': "Elimina l'Unità",
'Delete Brand': 'Cancella Marca',
'Delete Budget': 'Cancella Budget',
'Delete Bundle': 'Cancella raggruppamento',
'Delete Catalog': 'Eliminazione catalogo',
'Delete Catalog Item': 'Eliminare voce catalogo',
'Delete Certificate': 'Elimina certificato',
'Delete Certification': 'Elimina Certificazione',
'Delete Cluster': 'Elimina Cluster',
'Delete Cluster Subsector': 'Cancellare Sottosettore del Cluster',
'Delete Commitment': 'Cancellare Impegno',
'Delete Commitment Item': 'Eliminare Voce Impegno',
'Delete Competency': 'Elimina competenza',
'Delete Competency Rating': 'Elimina Rating della Competenza',
'Delete Contact': 'Elimina contratto.',
'Delete Contact Information': 'Eliminare Informazioni del Contatto',
'Delete Course': 'Elimina Corso',
'Delete Course Certificate': 'Elimina Certificato del Corso',
'Delete Credential': 'Eliminare credenziali',
'Delete Data from Theme layer': 'Eliminare dati dallo strato Theme',
'Delete Department': 'Elimina dipartimento',
'Delete Document': 'Elimina documento',
'Delete Donor': 'Elimina Donatore',
'Delete Email': "Elimina l'indirizzo email",
'Delete Entry': 'Elimina voce',
'Delete Event': 'Elimina evento',
'Delete Event Type': 'Cancella tipo di evento',
'Delete Facility': 'Elimina la Struttura',
'Delete Facility Type': 'Elimina il tipo di Struttura',
'Delete Feature Layer': 'Eliminare la funzione di strato',
'Delete from Server?': 'Eliminare dal Server?',
'Delete Group': 'Eliminare il Gruppo',
'Delete Hospital': 'Eliminare Ospedale',
'Delete Hours': 'Elimina ore',
'Delete Image': 'Eliminare Immagine',
'Delete Impact': 'Eliminare impatto',
'Delete Impact Type': 'Eliminare il tipo di impatto',
'Delete Incident Report': 'Eliminare Report incidente',
'Delete Item': 'Elimina elemento',
'Delete Item Category': 'Elimina Categoria elementi',
'Delete Item Pack': 'Elimina pacchetto elementi',
'Delete Job Role': 'Elimina il ruolo di lavoro',
'Delete Job Title': 'Elimina titolo di lavoro',
'Delete Key': 'Tasto Canc',
'Delete Kit': 'Eliminare Kit',
'Delete Layer': 'Elimina livello',
'Delete Level 1 Assessment': 'Elimina valutazione di Livello 1',
'Delete Level 2 Assessment': 'Elimina Valutazione di Livello 2',
'Delete Location': 'Elimina locazione',
'Delete Location Hierarchy': 'Eliminare la gerachia di locazioni',
'Delete Mailing List': 'Eliminare Lista di Posta',
'Delete Map Configuration': 'Elimina la configurazione di mappa',
'Delete Marker': 'Elimina indicatore',
'Delete Membership': 'Elimina Adesione',
'Delete Message': 'Elimina messaggio',
'Delete Mission': 'Eliminare la Missione',
'Delete Need': 'Eliminare Necessità',
'Delete Need Type': 'Elimina tipo di necessità',
'Delete Office': "Elimina l'Ufficio",
'Delete Office Type': 'Elimina il tipo di Ufficio',
'Delete Organization': 'Elimina Organizzazione',
'Delete Organization Type': 'Elimina il tipo di Organizzazione',
'Delete Participant': 'Eliminare Partecipante',
'Delete Peer': 'Eliminare Peer',
'Delete Person': 'Eliminare persona',
'Delete Photo': 'Cancellare Foto',
'Delete PoI Type': 'Eliminare il tipo PoI',
'Delete Point of Interest': 'Eliminare Punto di Interesse',
'Delete Population Statistic': 'Eliminare la statistica della Popolazione',
'Delete Position': 'Eliminare Posizione',
'Delete Post': 'Elimina il Post',
'Delete Professional Experience': 'Eliminare Esperienza Professionale',
'Delete Program': 'Eliminare Programma',
'Delete Project': 'Elimina progetto',
'Delete Projection': 'Eliminare Proiezione',
'Delete Rapid Assessment': 'Eliminare Rapida Valutazione',
'Delete Received Item': 'Eliminare Elemento Ricevuto',
'Delete Received Shipment': 'Cancellare la Spedizione ricevuta',
'Delete Record': 'Elimina record',
'Delete Region': 'Elimina la Regione',
'Delete Report': 'Elimina Report',
'Delete Request': 'Elimina richiesta',
'Delete Request Item': "Elimina l'item richiesto",
'Delete Resource': 'Elimina risorsa',
'Delete Resource Type': 'Elimina il tipo di Risorsa',
'Delete Role': 'Eliminare Ruolo',
'Delete Room': 'Eliminare la stanza',
'Delete saved filter': 'Delete saved filter',
'Delete Scenario': 'Elimina lo scenario',
'Delete Section': 'Elimina sezione',
'Delete Sector': 'Elimina Settore',
'Delete Sent Item': 'Cancellazione voce inviata',
'Delete Sent Shipment': 'Cancellazione Spedizione inviata',
'Delete Service': 'Elimina Servizio',
'Delete Service Profile': 'Elimina profilo servizio',
'Delete Setting': 'Elimina impostazione',
'Delete Skill': 'Eliminare Capacità',
'Delete Skill Equivalence': 'Eliminare Capacità equivalente',
'Delete Skill Provision': 'Elimina approviggionamento di capacità',
'Delete Skill Type': 'Elimina Tipo di Capacità',
'Delete SMS': 'Elimina SMS',
'Delete SMS Outbound Gateway': 'Elimina Gateway di SMS in uscita',
'Delete SMTP to SMS Channel': 'Elimina il Canale SNMP per gli SMS',
'Delete Staff Assignment': 'Elimina Assegnazione Personale',
'Delete Staff Member': 'Elimina Membro del Personale',
'Delete Staff Type': 'Elimina il Tipo di Personale',
'Delete Status': 'Elimina stato',
'Delete Subscription': 'Elimina sottoscrizione',
'Delete Subsector': 'Elimina Sottosettore',
'Delete Survey Answer': "Elimina la risposta all'indagine",
'Delete Survey Question': "Eliminare la domanda dell'indagine",
'Delete Survey Series': "Elimina le serie dell'indagine",
'Delete Survey Template': "Elimina il modello dell'indagine",
'Delete Symbology': 'Eliminare la simbologia',
'Delete this Filter': 'Elimina questo Filtro',
'Delete this filter?': 'Delete this filter?',
'Delete Training': 'Elimina Formazione',
'Delete Training Event': 'Elimina Evento Formazione',
'Delete Tropo Channel': 'Elimina il canale Tropo',
'Delete Tweet': 'Elimina il Tweet',
'Delete Unit': "Elimina l'unità",
'Delete User': "Elimina l'utente",
'Delete Volunteer': 'Elimina Volontario',
'Delete Volunteer Cluster': 'Elimina Gruppo di Volontari',
'Delete Volunteer Cluster Position': 'Elimina Posizione del Gruppo di Volontari',
'Delete Volunteer Cluster Type': 'Elimina il Tipo del Gruppo di Volontari',
'Delete Volunteer Role': 'Elimina Ruolo Volontario',
'Delete Warehouse': 'Elimina Magazzino',
'Delete Web API Channel': 'Elimina il canale Web API',
'deleted': 'Cancellato',
'Delphi Decision Maker': 'Decisore Delphi',
'Demographic': 'Demografico',
'Demographic Data': 'Dati demografici',
'Demographics': 'Demografici',
'Demonstrations': 'Manifestazioni',
'Dental Examination': 'Esame dentale',
'Dental Profile': 'Profilo dentale',
'departed': 'Partito ',
'Department / Unit': 'Dipartimento / Unità',
'Department added': 'Dipartimento aggiunto',
'Department Catalog': 'Catalogo del Dipartimento ',
'Department deleted': 'Dipartimento cancellato',
'Department Details': 'Dettagli del Dipartimento',
'Department updated': 'Dipartimento aggiornato',
'Deployed': 'Distribuito',
'Deployment': 'Distribuzione',
'Deployment Alert': 'Avviso Distribuzione',
'Deployment Request': 'Richiesta di distribuzione',
'Describe the condition of the roads to your hospital.': "Descrivere la condizione delle strade per l'ospedale.",
'Describe the procedure which this record relates to (e.g. "medical examination")': 'Descrivere la procedura a cui questo record si riferisce (ad es. "esame medico")',
'Description': 'Descrizione',
'Description of Contacts': 'Descrizione dei contatti',
'Description of defecation area': "Descrizione dell'area defecazione",
'Description of drinking water source': "Descrizione origine dell'acqua potabile",
'Description of sanitary water source': "Descrizione origine dell'acqua sanitaria",
'Description of water source before the disaster': "Descrizione della fonte d'acqua prima del disastro",
'Description/ ADSL Available': 'Descrizione/ ADSL disponibile',
'Description/ Bancomat Available': 'Descrizione/ Bancomat disponibile',
'Description/ Drainage System Available': 'Descrizione/ Sistema di drenaggio disponibile',
'Description/ Drinkable Water Available': 'Descrizione/ Acqua potabile disponibile',
'Description/ Electric Power Network Available': 'Descrizione/ Rete Elettrica disponibile',
'Description/ Local Municipal Office': 'Descrizione/ Municipio',
'Description/ Nearby Available Area': 'Descrizione/ Area disponibile nelle vicinanze',
'Description/ Nearby Parking Area': 'Descrizione/ Area parcheggio disponibile nelle vicinanze',
'Description/ Poste Italiane Office': 'Descrizione/ Ufficio Poste Italiane',
'Description/ Sewer System Available': 'Descrizione/ Sistema fognario disponibile',
'design': 'progettazione',
'Designed Organisation': 'Organizzazione disegnata',
'Desire to remain with family': 'Desiderio di restare con la famiglia',
'Destination': 'Destinazione',
'Destroyed': 'Eliminato',
'Details': 'Dettagli',
'Details field is required!': 'Il campo dettagli è obbligatorio!',
'Dialysis': 'Dialisi',
'Diaphragms, horizontal bracing': 'Diaframma, graffa orizzontale',
'Different Place of Birth': 'Different Place of Birth',
'Dignitary Visit': 'Visita Dignitari',
'Direction': 'Direzione',
'Disable': 'Disabilita',
'Disabled': 'Disabilitato',
'Disabled participating in coping activities': 'Disabile che partecipa',
'Disabled?': 'Disabilitata?',
'Disaster Cyclone': 'Ciclone disastroso',
'Disaster clean-up/repairs': 'Pulizia/riparazione del disastro',
'Disaster Earthquake': 'Terremoto disastroso',
'Disaster Epidemic': 'Epidemia disastrosa',
'Disaster Fire': 'Incendio disastroso',
'Disaster Flood': 'Alluvione disastrosa',
'Disaster Heavy Rain': 'Pioggia torrenziale disastrosa',
'Disaster Landslide': 'Frana disastrosa',
'Disaster Management System Officer': 'Ufficiale resposabile del sistema di gestione disastri',
'Disaster Management Unit Assistant': 'Assistente della unità di gestione disastri',
'Disaster Risk Reduction': 'Disaster Risk Reduction',
'Disaster Storm': 'Tempesta disastrosa',
'Disaster Tornado': 'Tornado disastroso',
'Disaster Tsunami': 'Tsunami disastroso',
'Disaster Victim Identification': 'Identificazione della vittima di disastro',
'Disaster Victim Registry': 'Registro delle vittime di disastro',
'Disaster Volcano': 'Vulcano disastroso',
'Discharge (cusecs)': 'Emissione (cusecs)',
'Discharges/24hrs': 'Emissioni/24h',
'Discussion Forum': 'Forum di discussione',
'Discussion Forum on item': "Forum di discussione sull'elemento",
'Disease vectors': 'Portatori di malattie',
'diseased': 'Malato',
'Dispensary': 'Dispensario',
'displaced': 'Sfollato',
'Displaced': 'Spostato',
'Displaced Populations': 'Le popolazioni sfollate',
'Display Polygons?': 'Visualizzare i Poligoni?',
'Display Routes?': 'Visualizzare gli instradamenti?',
'Display Tracks?': 'Visualizzare Tracciati?',
'Display Waypoints?': 'Visualizzare Punti di Passaggio?',
'Distance between defecation area and water source': "Distanza tra l'area di defecazione e la sorgente d'acqua",
'Distance from %s:': 'Distanza da %s:',
'Distance(Kms)': 'Distanza (Km)',
'Distribution': 'Distribuzione',
'Distribution groups': 'Gruppi di distribuzione',
'Distribution Hub': 'Centro distribuzione',
'District': 'Distretto',
'divorced': 'Divorziato',
'DNA Profile': 'Profilo del DNA',
'DNA Profiling': 'Profilazione del DNA',
'Do you really want to approve this record?': 'Si desidera davvero approvare questo record?',
'Do you really want to delete these records?': 'Si desidera davvero eliminare questi record?',
'Do you really want to delete this record? (This action can not be reversed)': 'Si desidera davvero eliminare questo record? ( Questa azione non può essere annullata)',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': "Si desidera annullare questa spedizione ricevuta? Gli elementi verranno rimossi dall' inventario. Questa azione non può essere annullata!",
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': "Si desidera annullare questa spedizione inviata? Gli elementi verranno rimessi nell' inventario. Questa azione non può essere annullata!",
'Do you want to receive this shipment?': 'Si desidera ricevere questa spedizione?',
'Do you want to send these Committed items?': 'Si desidera inviare questi elementi impegnati?',
'Do you want to send this shipment?': 'Si desidera inviare questa spedizione?',
'Document added': 'Documento aggiunto',
'Document deleted': 'Documento eliminato',
'Document Details': 'Dettagli documento',
'Document Scan': 'Scansione del documento',
'Document updated': 'Documento aggiornato',
'Documents': 'Documenti',
'Documents and Photos': 'Documenti e Fotografie',
'Does this facility provide a cholera treatment center?': 'Questa Struttura fornisce un centro per il trattamento del colera?',
'Doing nothing (no structured activity)': 'Facendo Niente (nessuna attività Strutturata)',
'Dollars': 'Dollari',
'Domain': 'Dominio',
'Domestic chores': 'Condividessimo DOMESTICO',
'dominic': 'dominic',
'Donated': 'Donato',
'Donation Certificate': 'Certificato di Donazione',
'Donation Phone #': 'Donazione Telefono #',
'done!': 'Fine',
'Donor': 'Donatore',
'Donor added': 'Donatore aggiunto',
'Donor deleted': 'Donatore eliminato',
'Donor Details': 'Dettagli donatore',
'Donor updated': 'Donatore aggiornato',
'Donors': 'Donatori',
'Donors Report': 'Report donatori',
'Door frame': 'Cornice della porta',
'Download OCR-able PDF Form': 'Scarica modulo PDF abilitato OCR.',
'Download PDF': 'Scarica PDF',
'Draft': 'BOZZA',
'Draft Features': 'Bozza di caratteristiche',
'Drag an image below to crop and scale it before uploading it:': "Trascina un'immagine sotto per tagliare e scalarla prima di caricarla:",
'Drainage': 'Prosciugamento',
'Draw on Map': 'Disegna sulla mappa',
'Drawing up a Budget for Staff & Equipment across various Locations.': "L'elaborazione di un bilancio per il personale e le attrezzature in varie locazioni.",
'Drill Down by Group': 'Ricerca per Gruppo',
'Drill Down by Incident': 'ricerca per Incidente',
'Drill Down by Shelter': 'Ricerca per Struttura di Accoglienza',
'Driver': 'Conducente',
'Driving - Car': 'Guida - Macchina',
'Driving - Class A (20 o 21 years)': 'Guida - Tipo A (20 o 21 Anni)',
'Driving - Class A1 (16 years)': 'Guida - Tipo A1 (16 anni)',
'Driving - Class A2 (18 years)': 'Guida - Tipo A2 (18 anni)',
'Driving - Class AM (14 years)': 'Guida - Tipo AM (14 anni)',
'Driving - Class B/B96 (18 years)': 'Guida - Tipo B/B96 (18 anni)',
'Driving - Class B1 (16 years)': 'Guida - Tipo B1 (16 anni)',
'Driving - Class BE (18 years)': 'Guida - Tipo BE (18 anni)',
'Driving - Class C (21 years)': 'Guida - Tipo C (21 anni)',
'Driving - Class C1 (18 years)': 'Guida - Tipo C1 (18 anni)',
'Driving - Class C1E (18 years)': 'Guida - Tipo C1E (18 anni)',
'Driving - Class CE (21 years)': 'Guida - Tipo CE (21 anni)',
'Driving - Class CFP': 'Guida - Tipo CFP',
'Driving - Class CQC Goods (21 years)': 'Guida - Tipo CQC Merci (21 anni)',
'Driving - Class CQC Person (21 years)': 'Guida - Tipo CQC Persone (21 anni)',
'Driving - Class D (24 years)': 'Guida - Tipo D (24 anni)',
'Driving - Class D1 (21 years)': 'Guida - Tipo D1 (21 anni)',
'Driving - Class D1E (21 years)': 'Guida - Tipo D1E (21 anni)',
'Driving - Class DE (24 years)': 'Guida - Tipo DE (24 anni)',
'Driving - Class KA (21 years)': 'Guida - Tipo KA (21 anni)',
'Driving - Class KB (21 years)': 'Guida - Tipo KB (21 anni)',
'Driving License': 'Patente di guida',
'Driving - Motorcycle': 'Guida - Motocicli',
'Drop-Off': 'Consegna',
'Drought': 'Siccità',
'Drugs': 'Droghe',
'Dug Well': 'Scavato Bene',
'duplicate': 'Duplica',
'Duplicate': 'Duplica ',
'Duplicate?': 'Duplicato?',
'Duration': 'Durata prevista',
'Dust Storm': 'Tempesta di polvere',
'DVI Navigator': 'Navigatore DVI',
'Dwelling': 'Residenza',
'Early Recovery': 'Guarigione prematura',
'Earthquake': 'Terremoto',
'Edit': "Edita 'Maggiori informazioni'",
'edit': 'Modifica',
'Edit %(site_label)s Status': 'Edita %(site_label)s stato',
"Edit 'More Info'": "Edita 'Maggiori informazioni'",
'Edit Activity': 'Modifica attività',
'Edit Address': 'Modificare indirizzo',
'Edit Affiliation': 'Modificare Affiliazione',
'Edit Allocation': 'Modifica Allocation',
'Edit Alternative Item': 'Modifica Voce Alternativa',
'Edit Application': 'Modifica applicazione',
'Edit Appraisal': 'Modificare Valutazione',
'Edit Assessment': 'Modifica Valutazione',
'Edit Assessment Summary': 'Modifica riepilogo valutazione',
'Edit Asset': 'Modifica attività',
'Edit Asset Log Entry': 'Voce di Log relativa alla Modifica Asset',
'Edit Award': 'Modificare Premio',
'Edit Base Station': 'Editare la postazione di base',
'Edit Baseline': 'Modifica Baseline',
'Edit Baseline Type': 'Modifica tipo di Baseline',
'Edit Branch Organization': 'Edita Unità organizzativa',
'Edit Brand': 'Modifica Brand',
'Edit Budget': 'Modifica Bilancio',
'Edit Bundle': 'Modifica Bundle',
'Edit Camp': 'Modifica Campo',
'Edit Camp Service': 'Modifica Servizi del Campo',
'Edit Camp Status': 'Modifica Stato del Campo',
'Edit Camp Type': 'Modifica Tipo di Campo',
'Edit Catalog': 'Modifica catalogo',
'Edit Catalog Item': 'Modifica voce catalogo',
'Edit Certificate': 'Modificare Certificato',
'Edit Certification': 'Modificare Certificazione',
'Edit Cluster': 'Edita Cluster',
'Edit Cluster Subsector': 'Modifica Sottosettore Cluster',
'Edit Commitment': 'Modifica Impegno',
'Edit Commitment Item': "Modificare la Voce dell'impegno",
'Edit Competency': 'Modifica Competenza',
'Edit Competency Rating': 'Modificare Valutazione sulle Competenze',
'Edit Contact': 'Modificare Contatto',
'Edit Contact Details': 'Modificare Dettagli del Contatto',
'Edit Contact Information': 'Modificare Informazioni Contatto',
'Edit Contents': 'Modifica contenuti',
'Edit Course': 'Modificare Corso',
'Edit Course Certificate': 'Modificare Certificato del Corso',
'Edit Credential': 'Modificare Credenziali',
'Edit current record': 'Modifica record corrente',
'Edit Dead Body Details': 'Modifica i dettagli del cadavere',
'Edit Department': 'Modificare Dipartimento',
'Edit Description': 'Modifica descrizione',
'Edit Details': 'Edita dei dettagli',
'Edit Disaster Victims': 'Modifica delle Vittime del Disastro',
'Edit Document': 'Modifica documento',
'Edit Donor': 'Edita Donatore',
'Edit Education Details': 'Modifica Dettagli Istruzione',
'Edit Education Level': 'Modifica Livello Istruzione',
'Edit Email Settings': 'Modifica le impostazioni E-Mail',
'Edit Entry': 'Edita voce',
'Edit Event': 'Modifica evento',
'Edit Event Type': 'Modifica tipo di evento',
'Edit Experience': 'Modificare Esperienza',
'Edit Facility': 'Modifica Impianto',
'Edit Facility Type': 'Modifica tipo di Struttura',
'Edit Feature Layer': 'Edita Funzione Layer',
'Edit Flood Report': 'Modifica Report Alluvione',
'Edit Gateway Settings': 'Modifica Impostazioni Gateway',
'Edit Group': 'Modifica Gruppo',
'Edit Hospital': 'Modifica Ospedale',
'Edit Hours': 'Modificare Ore',
'Edit Human Resource': 'Modifica risorse umane',
'Edit Identification Report': 'Modifica Identificazione Report',
'Edit Identity': 'Modifica Identità',
'Edit Image Details': 'Modifica Dettagli Immagine',
'Edit Impact': 'Modifica Impatto',
'Edit Impact Type': 'Modifica Tipo Di Impatto',
'Edit Incident': 'Modifica incidente',
'Edit Incident Report': 'Modifica Report incidente',
'Edit Incident Type': 'Modifica tipo di incidente',
'Edit Inventory Item': 'Modifica voce di inventario',
'Edit Item': 'Modifica elemento',
'Edit Item Category': "Modifica la categoria dell'elemento",
'Edit Item Pack': 'Modifica confezione elementi',
'Edit Job': 'Edita Ruolo professionale',
'Edit Job Role': 'Modifica Ruolo',
'Edit Job Title': 'Modifica Titolo del Ruolo professionale',
'Edit Key': 'Modifica chiave',
'Edit Kit': 'Modifica Kit',
'Edit Layer': 'Edita Livello',
'Edit Level %d Locations?': 'Edita locazioni di livello %d ?',
'Edit Level 1 Assessment': 'Modifica valutazione del Livello 1',
'Edit Level 2 Assessment': 'Modifica valutazione di Livello 2',
'Edit Location': "Edita Ubicazione",
'Edit Location Details': 'Edita dettagli di locazione',
'Edit Location Hierarchy': 'Edita gerarchia di locazioni',
'Edit Log Entry': 'Modificare la Voce di Log',
'Edit Mailing List': 'Modificare la Lista di Posta',
'Edit Map Configuration': 'Edita Configurazione della Mappa',
'Edit Map Services': 'Modifica i servizi di tracciatura',
'Edit Marker': 'Edita Indicatore',
'Edit Membership': 'Modificare Appartenenza',
'Edit Message': 'Modificare messaggio',
'Edit message': 'Modifica messaggio',
'Edit Messaging Settings': 'Modifica impostazioni di messaggistica',
'Edit Mission': 'Modifica Missione',
'Edit Mobile Commons Settings': 'Editare le configurazioni di base del Mobile',
'Edit Modem Channel': 'Editare il canale Modem',
'Edit Modem Settings': 'Modifica Impostazioni Modem',
'Edit Need': 'Necessità di modifica',
'Edit Need Type': 'Necessità di Modifica del carattere',
'Edit Network': 'Edita Rete',
'Edit Office': 'Edita Ufficio',
'Edit Office Type': "Edita tipo d'Ufficio",
'Edit Options': 'Modifica opzioni',
'Edit Organization': 'Edita Organizzazione',
'Edit Organization Type': 'Edita tipo di Organizzazione',
'Edit Parameters': 'Modifica parametri',
'Edit Parser Connection': 'Modifica il collegamento al decodificatore',
'Edit Participant': 'Modificare Partecipante',
'Edit Peer Details': 'Modifica Dettagli Peer',
'Edit Permissions for %(role)s': 'Edita permessi per %(role)s',
'Edit Person Details': 'Modifica Dettagli Persona',
"Edit Person's Details": 'Modifica Dettagli della Persona',
'Edit Personal Effects Details': 'Modifica i dettagli degli effetti personali',
'Edit Photo': 'Modificare Foto',
'Edit PoI Type': 'Edita tipo di PoI',
'Edit Point of Interest': 'Edita Punto di Interesse',
'Edit Population Statistic': 'Modifica Statistica della Popolazione',
'Edit Position': 'Modifica posizione',
'Edit Problem': 'Modifica problema',
'Edit Professional Experience': 'Modifica Esperienza Professionale',
'Edit Profile Configuration': 'Edita configurazione di profilo',
'Edit Program': 'Modifica Programma',
'Edit Project': 'Modifica progetto',
'Edit Projection': 'Rivedere una proiezione',
'Edit Rapid Assessment': 'Rivedere una valutazione rapida',
'Edit Received Item': 'Rivedere un elemento ricevuto',
'Edit Received Shipment': 'Rivedere una spedizione ricevuta',
'Edit Record': 'Rivedere un record',
'Edit Region': 'Edita la Regione',
'Edit Registration': 'Modifica di registrazione',
'Edit Registration Details': 'Modifica i dettagli di registrazione',
'Edit Request': 'Modifica Richiesta',
'Edit Request Item': 'Modifica Richiesta Articolo',
'Edit Resource': 'Modifica Risorsa',
'Edit Resource Type': 'Edita il tipo di Risorsa',
'Edit River': 'Modifica Fiume',
'Edit Role': 'Edita ruolo',
'Edit Room': 'Edita la stanza',
'Edit RSS Channel': 'Editare il Canale RSS',
'Edit Scenario': 'Modifica scenario',
'Edit Sector': 'Editare il settore',
'Edit Sender Priority': 'Editare la Priorità del Sender',
'Edit Sent Item': "Editare l'articolo inviato",
'Edit Service': 'Edita il Servizio',
'Edit Setting': 'Modificare impostazione',
'Edit Settings': 'Modifica impostazioni',
'Edit Shelter': 'Modifica Struttura di Accoglienza',
'Edit Shelter Service': 'Modifica servizi della Struttura di Accoglienza',
'Edit Shelter Status': 'Modifica Stato della Struttura di Accoglienza',
'Edit Shelter Type': 'Modifica Tipo della Struttura di Accoglienza',
'Edit Skill': 'Modifica Capacità',
'Edit Skill Equivalence': 'Modifica Capacità equivalente',
'Edit Skill Provision': 'Modifica Disponibilità di Capacità',
'Edit Skill Type': 'Modifica tipo di Capacità',
'Edit SMS Outbound Gateway': 'Editare Gateway di SMS in uscita',
'Edit SMTP to SMS Channel': 'Editare il Canale SNMP per gli SMS',
'Edit Solution': 'Modifica Soluzione',
'Edit Staff Assignment': 'Modificare Assegnazione Personale',
'Edit Staff Member Details': 'Modificare Dettagli Membri del Personale',
'Edit Staff Type': 'Modifica Tipo di Personale',
'Edit Status': 'Edita lo stato',
'Edit Subscription': 'Modifica sottoscrizione',
'Edit Subsector': 'Modifica Sottosettore',
'Edit Survey Answer': 'Modifica Risposta del Sondaggio',
'Edit Survey Question': 'Modifica domanda del sondaggio',
'Edit Survey Series': 'Modifica le Serie di sondaggi',
'Edit Survey Template': 'Modifica il Modello di sondaggio',
'Edit Symbology': 'Edita simbologia',
'Edit Tag': 'Edita etichetta',
'Edit Task': 'Modifica attività',
'Edit Team': 'Modificare la squadra',
'Edit the OpenStreetMap data for this area': "Edita dati dell'OpenStreetMap per questa area ",
'Edit Theme': 'Modifica tema',
'Edit Theme Data': 'Edita dati di tema',
'Edit Themes': 'Modifica Dei Temi',
'Edit this entry': 'Edita questa voce',
'Edit Ticket': 'Modifica Ticket',
'Edit Track': 'Modifica traccia',
'Edit Training': 'Modificare Formazione',
'Edit Training Event': 'Modificare Evento Formazione',
'Edit Tropo Channel': 'Edita il Canale Tropo',
'Edit Tropo Settings': 'Modifica Impostazioni Tropo',
'Edit Twilio Channel': 'Edita il canale Twilio',
'Edit Twitter account': "Edita l'utenza Twitter",
'Edit Twitter Search Query': 'Edita la ricerca Twitter',
'Edit User': 'Modifica Utente',
'Edit Volunteer Availability': 'Modifica Disponibilità del volontario',
'Edit Volunteer Cluster': 'Modificare Gruppo di Volontari',
'Edit Volunteer Cluster Position': 'Modificare Posizione del Gruppo di Volontari',
'Edit Volunteer Cluster Type': 'Modificare Tipo del Gruppo di Volontari',
'Edit Volunteer Details': 'Modificare Dettagli Volontario',
'Edit Volunteer Role': 'Modificare Ruolo del Volontario',
'Edit Warehouse': 'Modifica warehouse',
'Edit Web API Channel': 'Editare il canale Web API',
'Editable?': 'Modificabile?',
'Education': 'Istruzione',
'Education Details': 'Dettagli Istruzione',
'Education details added': 'Aggiunti dettagli Istruzione',
'Education details deleted': 'Cancellati dettagli Istruzione',
'Education details updated': 'Aggiornati dettagli Istruzione',
'Education Level': 'Livello Istruzione',
'Education Level added': 'Aggiunto Livello Istruzione',
'Education Level deleted': 'Cancellato Livello Istruzione',
'Education Level updated': 'Aggiornato Livello Istruzione',
'Education Levels': 'Livelli Istruzione',
'Education materials received': 'Ricevuto materiale di istruzione',
'Education materials, source': 'Sorgente del materiale di istruzione',
'Effects Inventory': 'Inventario degli effetti',
'eg. gas, electricity, water': 'Es. Gas, elettricità, acqua',
'Eggs': 'Uova',
'Either a shelter or a location must be specified': 'Una Struttura di Accoglienza o ubicazione devono essere specificati',
'Either file upload or document URL required.': "Caricare il file o URL del documento richiesto.",
'Either file upload or image URL required.': "Caricamento file o URL dell'immagine richiesti.",
'Elderly person headed households (>60 yrs)': 'Persona anziana a guida della famiglia (>60 anni)',
'Electrical': 'elettrico',
'Electrical, gas, sewerage, water, hazmats': 'Elettrico, gas, rete fognaria, acqua, hazmats',
'Elevated': 'Elevato',
'Elevators': 'Ascensori',
'Email': 'EMAIL',
'Email (Inbound)': 'Email (in arrivo)',
'Email Account deleted': 'Utenza email cancellata',
'Email Accounts': 'Utenze email',
'Email Address': 'Indirizzo e-mail',
'Email Address to which to send SMS messages. Assumes sending to phonenumber@address': 'Indirizzo email al quale mandare messaggi SMS. Assumi di mandarli a numerodi telefono@indirizzo',
'Email Channels (Inbound)': 'Email Canali (In Arrivo)',
'Email deleted': 'Email cancellata',
'Email Details': 'Dettagli email',
'Email InBox': 'Casella di posta',
'Email Settings': 'Impostazioni email',
'Email Settings updated': 'Impostazioni email aggiornate',
'Email settings updated': 'Impostazioni e-mail aggiornate',
'Email: %s': 'Email: %s',
'Embalming': 'Imbalsamazione',
'Embassy': 'Ambasciata',
'Emergency Capacity Building project': "Progetto 'Emergency Capacity Building'",
'Emergency Contacts': 'Contatti Emergenza',
'Emergency Department': 'Reparto Emergenza',
'Emergency Shelter': 'Struttura di Accoglienza di Emergenza',
'Emergency Support Facility': "Struttura di supporto all'emergenza",
'Emergency Support Service': "Servizio di supporto all'emergenza",
'Emergency Telecommunications': 'Telecomunicazioni di emergenza',
'EMS Reason': 'Motivo EMS',
'EMS Status': 'Status EMS',
'Enable': 'Abilita',
'Enable in Default Config?': 'Abilita in configurazione di default?',
'Enable/Disable Layers': 'Abilitare/Disabilitare gli strati',
'Enabled': 'Abilitato',
'Enabled?': 'Abilitato?',
'enclosed area': 'Area chiusa',
'End date': 'Data di termine',
'End Date': 'Data di fine',
'End date should be after start date': 'la Data di fine deve essere successiva alla data di inizio',
'End of Period': 'fine della frase',
'English': 'Inglese',
'English - Spoken': 'Inglese - Parlato',
'English - Written': 'Inglese - Scritto',
'Enter a GPS Coord': 'immettere le coordinate GPS',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Inserire un nome per il foglio elettronico che si sta caricando (obbligatorio).',
'Enter a new support request.': 'Inserire una nuova richiesta di supporto.',
'Enter a number between %(min)g and %(max)g': 'Enter a number between %(min)g and %(max)g',
'Enter a title...': 'Enter a title...',
'Enter a unique label!': "Inserire un'etichetta univoca!",
'Enter a valid date before': 'Immettere prima una data valida',
'Enter a valid email': 'Inserire una valida email',
'Enter a valid future date': 'Inserire un valore valido di data futura',
'Enter a valid phone number': 'Immetti un numero telefonica valido',
'enter a value': 'Inserire un valore',
'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'Inserisci attantamente un valore senza errori di ortografia, questo campo deve corrispondere a dati esistenti.',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'Enter an integer greater than or equal to %(min)g': 'Enter an integer greater than or equal to %(min)g',
'Enter Coordinates:': 'Inserire le coordinate:',
'enter date and time': 'Inserire data e ora',
'enter date and time in range %(min)s %(max)s': 'Inserire data e ora in un intervallo %(min)s %(max)s',
'enter date and time on or after %(min)s': 'Inserire data e ora il o dopo %(min)s',
'enter date and time on or before %(max)s': 'Inserire data e ora entro %(max)s',
'Enter date on or before %(max)s': 'Enter date on or before %(max)s',
'Enter phone number in international format like +46783754957': 'Inserire il numero di telefono in formato internazionale come +46783754957',
'Enter some characters to bring up a list of possible matches': 'Immettere alcuni caratteri per aprire un elenco di possibili corrispondenze',
'Enter some characters to bring up a list of possible matches.': 'Immettere alcuni caratteri per aprire un elenco di possibili corrispondenze.',
'Enter tags separated by commas.': 'Immettere le tag separate da virgole.',
'Enter the same password as above': 'Immettere la stessa password come sopra',
'Enter your first name': 'Inserisci il tuo nome',
'Enter your organization': 'Inserisci la tua organizzazione',
'Entered': 'Immesso',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': "L'inserimento di un numero telefonico è facoltativo, ma così facendo consente di registrarsi per ricevere messaggi SMS.",
'Entity': 'Entità',
'Entity Information': "Informazione sull'entità",
'Entry deleted': 'Voce eliminata',
'Environment': 'Ambiente',
'Environmental Characteristics': 'Caratteristiche ambientali',
'Epidemic': 'Epidemia',
'Equipment': 'Apparecchiatura',
'ER Status': 'Stato ER',
'ER Status Reason': 'Motivo Stato ER',
'Error encountered while applying the theme.': "È stato rilevato un errore durante l'applicazione del tema.",
'Error in message': 'Errore nel messaggio',
'Error logs for "%(app)s"': 'Log degli errori per "%(app)s"',
'Error sending message': "Errore durante l'invio del messaggio",
'Error sending message!': "Errore durante l'invio del messaggio!",
'Error Tickets': 'Lista Errori',
'Errors': 'Errori',
'ESRI Shape File': 'Forma di file ESRI',
'Essential Staff?': 'Personale Essenziale?',
'Est. Delivery Date': 'Est. Data di consegna',
'Estimated # of households who are affected by the emergency': "N. stimato di famiglie che sono interessati dall' emergenza",
'Estimated # of people who are affected by the emergency': "N. stimato di persone che sono interessati dall' emergenza",
'Estimated Overall Building Damage': 'Stima Complessiva Edificio Danni',
'Estimated Population': 'Popolazione stimata',
'Estimated Reopening Date': 'Data presunta di riapertura',
'Estimated total number of people in institutions': 'Numero totale stimato di persone nelle Istituzioni',
'ETag': 'ETag',
'Ethnicity': 'Etnia',
'Euros': 'Euro',
'Evacuating': 'in Evacuazione',
'Evacuee requiring dedicated assistance at home': 'Evacuee requiring dedicated assistance at home',
'Evacuee subject to Social Welfare': 'Evacuee subject to Social Welfare',
'Evacuee subject to special or legal measures/penalities': 'Evacuee subject to special or legal measures/penalities',
'Evacuees': 'Evacuati',
'Evacuees Available Capacity (Day and Night)': 'Capienza disponibile per gli sfollati (giorno e notte)',
'Evacuees Available Capacity (Night only)': 'Capienza disponibile per gli sfollati (solo pernottamento)',
'Evacuees Capacity (Day and Night)': 'Capienza Totale Sfollati (giorno e notte)',
'Evacuees Capacity (Night only)': 'Capienza Totale Sfollati (solo pernottamento)',
'Evacuees Current Population (Day and Night)': 'Popolazione totale sfollati (giorno e notte)',
'Evacuues Current Population (Night only)': 'Popolazione totale sfollati (solo pernottamento)',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Valutare le informazioni in questo messaggio. (Questo valore NON deve essere utilizzato in applicazioni di avviso pubblico )',
'EVASS': 'EVASS',
'EVASS - Sahana Eden for Italy': "EVASS – Sahana Eden per l'Italia",
'Event': 'Evento',
'Event added': 'Evento aggiunto',
'Event deleted': 'Evento eliminato',
'Event Details': "Dettagli relativi all'evento",
'Event ID: %s, ': 'Identificativo evento: %s.',
'Event removed': 'Evento rimosso',
'Event Resource': "Risorsa d'evento",
'Event started: %s': 'Evento avviato: %s',
'Event Type': 'Tipo di evento',
'Event Type added': 'Tipo di evento aggiunto',
'Event Type Details': 'Dettagli sul tipo di evento',
'Event Type removed': 'Tipo di evento rimosso',
'Event Type updated': 'Tipo di evento aggiornato',
'Event Types': 'Tipi di eventi',
'Event updated': 'Evento aggiornato',
'Events': 'Eventi',
'Example': 'Esempio',
'Exceeded': 'Superato',
'Excellent': 'Eccellente',
'Exclude contents': 'Escludi contenuti',
'Excreta disposal': 'Escrementi di smaltimento',
'Execute a pre-planned activity identified in <instruction>': "Eseguire un'attività pre-pianificata identificata in",
'Exercise': 'Esercizio',
'EXERCISE': 'ESERCIZIO',
'Exercise= %s,': 'Esercizio= %s,',
'Exercise?': 'Esercizio?',
'Exercise? %s,': 'Esercizio? %s,',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Esercizi significa che tutti gli schermi hanno una filigrana & tutte le notifiche hanno un prefisso.',
'Existing food stocks': 'Scorta alimentare esistente',
'Existing location cannot be converted into a group.': "L'ubicazione esistente non può essere convertita in un gruppo.",
'Existing Placard Type': 'Tipo di Affissi Esistente',
'Exits': 'Uscite',
'Experience': 'Esperienza',
'Expiry (months)': 'Scadenza (mesi)',
'Expiry Date': 'Data di scadenza',
'Explosion': 'Esplosione',
'Explosive Hazard': 'Pericolo Esplosivi',
'Export': 'Esporta',
'Export as': 'Esporta come',
'export as csv file': 'Esportare come file CVS',
'Export Data': 'Esportare i dati',
'Export Database as CSV': 'Esportare il Database come CSV',
'Export in %(format)s format': 'Esporta in %(format)s formato',
'Export in GPX format': 'Esportare in formato GPX',
'Export in KML format': 'Esportare in formato KML',
'Export in OSM format': 'Esportare in formato OSM',
'Export in PDF format': 'esporta in formato PDF',
'Export in RSS format': 'Esportazione in formato RSS',
'Export in XLS format': 'Esportare in formato XLS',
'Exterior and Interior': 'Esterno ed Interno',
'Exterior Only': 'Solo esterno',
'Extreme Winter Conditions': 'Condizioni invernali estreme',
'Eye Color': 'Colore degli occhi',
'Facebook': 'Facebook',
'Facebook Channels': 'Facebook Canali',
'Facial hair, color': 'Capelli del viso, colore',
'Facial hair, comment': 'Capelli del viso, commento',
'Facial hair, length': 'Capelli del viso, lungezza',
'Facial hair, type': 'Capelli del viso, tipo',
'Facial hear, length': 'Peli sul viso, lunghezza',
'Facilities': 'Strutture',
'Facility': 'InfraStruttura',
'Facility added': 'InfraStruttura aggiunta',
'Facility Contact': 'Contatto Struttura',
'Facility deleted': 'Struttura cancellata',
'Facility Details': "Dettagli dell'infraStruttura",
'Facility Operations': 'Impianto Operazioni',
'Facility or Location': 'Struttura o Ubicazione',
'Facility removed': 'Funzione rimossa',
'Facility Status': 'Stato della Struttura',
'Facility Type': 'Tipo di Struttura',
'Facility Type added': 'Aggiunto tipo di Struttura',
'Facility Type deleted': 'Rimosso tipo di Struttura',
'Facility Type Details': 'Dettagli su tipo Struttura',
'Facility Type updated': 'Aggiornato tipo di Struttura',
'Facility Types': 'Tipi di Struttura',
'Facility updated': 'Impianto aggiornato',
'Facility/ Fenced-in Area': 'Struttura/ Area recintata',
'Facility/ Local Kitchen': 'Struttura/ Cucina locale',
'Facility/ Local Warehouse': 'Struttura/ Magazzino locale',
'Facility/ Local Pharmacy': 'Struttura/ Farmacia locale',
'Facility/ Local Canteen': 'Struttura/ Mensa locale',
'Facility/ Local Medical Area': 'Struttura/ Area medica locale',
'Facility/ Local Scholastic Facilities': 'Struttura/ Strutture scolastiche locali',
'Facility/ Local Conference Room': 'Struttura/ Sala conferenze locale',
'Facility/ Nursery School': 'Struttura/ Scuola materna',
'Fail': 'Fallire',
'Failed': 'Fallito',
'Failed!': 'Operazione non riuscita!',
'Fair': 'Povero',
'Falling Object Hazard': 'Rischio di caduta oggetti',
'Families/HH': 'Famiglie/HH',
'Family': 'Famiglia',
'Family tarpaulins received': 'Copertoni per la famiglia ricevuti',
'Family tarpaulins, source': 'Copertoni per famiglia, origine',
'Family/friends': 'Famiglia/amici',
'Farmland/fishing material assistance, Rank': 'Assistenza su materiali agricoli/da pesca',
'fat': 'Grasso',
'Fatalities': 'Decessi',
'Fax': 'Numero fax',
'Feature Info': 'Informazione di caratteristica',
'Feature Layer': 'Strato di caratteristica',
'Feature Layer added': 'Strato di funzione aggiunto',
'Feature Layer deleted': 'Strato di funzione eliminato',
'Feature Layer Details': 'Dettagli dello strato delle funzioni',
'Feature Layer updated': 'Livello della funzione aggiornato',
'Feature Layers': 'Livelli funzione',
'Feature Namespace': 'Funzione Namespace',
'Feature Request': 'Richiesta funzione',
'Feature Type': 'Tipo funzione',
'Features Include': 'Le funzioni includono',
'feedback': 'Feedback',
'Female': 'Femminile',
'female': 'Femminile',
'Female headed households': 'Famiglie con capofamiglia femminile',
'Few': 'Pochi',
'Field': 'Campo',
'Field Hospital': 'Ospedale da campo',
'File': 'File',
'Files': 'File',
'Fill in Latitude': 'Inserire la latitudine',
'Fill in Longitude': 'Inserire la longitudine',
'fill in order: day(2) month(2) year(4)': 'Inserire: giorno (2) mese (2) anno (4)',
'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'Inserire : ore (2) minuti (2) giorno (2) mese (2) anno (4)',
'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'Inserire : ore (2) minuti (2) mese (2) giorno (2) anno (4)',
'fill in order: month(2) day(2) year(4)': 'Inserire : mese (2) giorno (2) anno (4)',
'Filter': 'Filtro',
'Filter by Location': 'Filtro per locazione',
'Filter Field': 'Campo Filtro',
'Filter Options': 'Opzioni di filtro',
'Filter Tweets by the date they were tweeted on': 'Filtra i tweets secondo la data',
'Filter Tweets by who tweeted them': 'Filtra i tweets per mittente',
'Filter type': 'Tipo di filtro',
'Filter Value': 'Valore del filtro',
'Finance Officer': 'Funzionario amministrativo',
'Find': 'Trova',
'Find a Person Record': 'Trovare un record persona',
'Find Dead Body Report': 'Trova report del cadavere',
'Find Hospital': 'Ricerca Ospedale',
'Find more': 'Trova altri',
'Find on Map': 'Trova sulla mappa',
'Find Person Record': 'Ricerca Record della Persona',
'Find Volunteers': 'Trovare Volontari',
'Finder': 'programma di ricerca',
'Fingerprint': 'Impronta digitale',
'Fingerprinting': 'Prendere le impronte digitali',
'Fingerprints': 'Impronte digitali',
'Finished Jobs': 'Lavori finiti',
'Fire': 'Incendio',
'Fire suppression and rescue': 'SOPPRESSIONE incendio e salvataggio',
'First': 'Primo',
'First name': 'Nome',
'First Name': 'Nome',
'Fiscal Code': 'Codice Fiscale',
'Fishing': 'Pesca',
'Flash Flood': 'Alluvione improvvisa',
'Flash Floods': 'Inondazioni improvvise',
'Flash Freeze': 'Congelamento improvviso',
'Fleet Manager': 'Gestore parco',
'Flexible Impact Assessments': 'Valutazioni su impatto flessibile',
'Flood': 'Alluvione',
'Flood Alerts': 'Avvisi di Alluvione',
'Flood Alerts show water levels in various parts of the country': "Gli avvisi sulla piene mostrano i livelli dell'acqua nelle diverse zone del paese",
'Flood Report': 'Report sulle piene',
'Flood Report added': 'Report sulle piene aggiunto',
'Flood Report deleted': 'Report sulle piene cancellato',
'Flood Report Details': 'Dettagli del Report sulle piene',
'Flood Report updated': 'Report sulle piene aggiornato',
'Flood Reports': 'Reports sulle piene',
'Floods': 'Inondazioni',
'Flow Status': 'Stato del flusso',
'Fluent': 'Fluente',
'flush latrine with septic tank': 'latrina a risciacquo con serbatoio settico',
'Fog': 'Nebbia',
'Folder': 'Cartella',
'Food': 'Alimentare',
'Food assistance': 'Assistenza alimentare',
'Food NFI Bucket': 'Cibo NFI Secchio',
'Food NFI Food': 'Cibo NFI Food',
'Food NFI Nonfood Item': 'Cibo NFI Nonfood Item',
'Food NFI Tent': 'Cibo NFI Tenda',
'Food Restrictions': 'Food Restrictions',
'Food Security': 'Sicurezza Alimentare',
'Food Service': 'Ristorazione',
'Food Supply': 'Approvvigionamento alimentare',
'Footer': 'piè di pagina',
'Footer file %s missing!': 'Il file piè di pagina è mancante!',
'For': 'per',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': "Per un paese questa sarebbe il codice ISO2, per una città, sarebbe il codice dell' aeroporto",
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': "Per ogni partner di sincronizzazione, c' è un lavoro predefinito di sync che viene eseguito dopo un determinato intervallo di tempo. È possibile anche impostare più lavori di dincronizzazione che possono essere personalizzati a seconda delle necessità. Fare clic sul collegamento sulla destra per iniziare.",
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Per una migliore sicurezza, si consiglia di immettere un nome utente e la password e notificare agli amministratori di altre macchine nella propria organizzazione di aggiungere questo nome utente e password rispetto al proprio UUID nella sincronizzazione: Partnre di Sincronizzazione',
'For Entity': 'Per entità',
'For live help from the Sahana community on using this application, go to': "Per una guida in tempo reale dalla comunità Sahana sull'utilizzo di questa applicazione, andare a",
'For messages that support alert network internal functions': 'Per i messaggi che supportano le funzioni interne di allarme per rete',
'For more details on the Sahana Eden system, see the': 'Per ulteriori dettagli sul sistema di Sahana Eden, consultare il',
'For more information, see': 'Per ulteriori informazioni, vedere',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': 'Per POP-3 questo è di solito 110 (995 per SSL), per IMAP questo è di solito 143 (993 per IMAP).',
'forehead': 'Fronte',
'Forest Fire': 'Incendi boschivi',
'form data': 'Dati del modulo',
'Form Settings': 'Impostazioni del modulo',
'Formal camp': 'Campo formale',
'Format': 'Formato',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formattare l'elenco dei valori di attributo e il valore RGB da usare per questi quale un oggetto JSON, per es. {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}",
'Forms': 'moduli',
'Found': 'trovato',
'found': 'Trovato',
'Foundations': 'Fondazioni',
'Free for domestic animals': 'Accesso libero agli animali domestici',
'Freezing Drizzle': 'Pioggerella ghiacciata',
'Freezing Rain': 'Pioggia gelata',
'Freezing Spray': 'Spray congelante',
'French': 'Francese',
'French - Spoken': 'Francese - Parlato',
'French - Written': 'Francese - Scritto',
'Friday': 'Venerdì',
'From': 'Da',
'From Inventory': "Dall'inventario",
'From Location': 'Ubicazione di partenza',
'From Organization': "Dall'organizzazione",
'from Twitter': 'Da Twitter',
'Frost': 'Gelo',
'Fulfil. Status': 'Soddisfare. stato',
'Fulfillment Status': 'Lo Stato della realizzazione',
'Full': 'Completo',
'Full beard': 'Barba folta',
'Fullscreen Map': 'Mappa a schermo intero',
'Function': 'Funzione ',
'Function Permissions': 'Autorizzazioni della Funzione',
'Functions available': 'Funzioni disponibili',
'Funding': 'Finanziamenti',
'Funding Organization': 'Organizzazione di finanziamento',
'Funeral': 'Funerale',
'Further Action Recommended': 'Ulteriore azione consigliata',
'Gale Wind': 'Tempesta di vento',
'Gap Analysis': 'Analisi gap',
'Gap Analysis Map': 'Analisi Mappa gap',
'Gap Analysis Report': 'Report su Analisi delle discrepanze',
'Gap Map': 'Mappa delle discrepanze',
'Gap Report': 'Report sulle discrepanze',
'Gateway Settings': 'Impostazioni del Gateway',
'Gateway settings updated': 'Impostazioni del Gateway aggiornate',
'Gender': 'Sesso',
'General': 'Generale',
'General Comment': 'Commento generico',
'General emergency and public safety': 'Emergenza generale e pubblica sicurezza',
'General information on demographics': 'Informazioni generali sui dati demografici',
'General Medical/Surgical': 'Medico Generico / Chirurgo',
'Generator': 'Generatore',
'Geocode': 'Geocode',
'Geocoder Selection': 'Selezione del Geocoder',
'GeoJSON Layer': 'Livello GeoJSON ',
'Geometry Name': 'Nome della Geometria',
'Geophysical (inc. landslide)': 'Geofisica (inc. frana)',
'GeoRSS Layer': 'Livello GeoRSS',
'Geotechnical': 'Geotecnico',
'Geotechnical Hazards': 'Rischi Geotecnici',
'Geraldo module not available within the running Python - this needs installing for PDF output!': "Modulo Geraldo non disponibile all'interno del Python in esecuzione - questo deve essere installato per l'emissione in formato PDF.",
'Get Feature Info': 'Ottenere informazioni sulla funzionalità',
'Get incoming recovery requests as RSS feed': 'Ottieni le richieste di ripristino in entrata come feed RSS',
'getting': 'Ottenere ',
'GIS integration to view location details of the Shelter': 'Integrazione GIS per visualizzare i dettagli della Struttura di Accoglienza',
'GIS Reports of Shelter': 'Reports GIS della Struttura di Accoglienza',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': "Fornire una breve descrizione dell'immagine, ad esempio cosa può essere visto nell'immagine (facoltativo).",
'Give information about where and when you have seen them': 'Fornire informazioni su dove e quando li avete visto',
'Global Messaging Settings': 'Impostazioni di messaggistica globale',
'Go': 'Vai',
"Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": "Vai a %(url)s, autenticati e poi registra la tua applicazione. È possibile inserire qualsiasi URL in & è sufficiente selezionare l'autorizzazione 'modifica la mappa'.",
'Go to Request': 'vai alla Richiesta',
'Goatee': 'Pizzetto',
'Good': 'Buono',
'Good Condition': 'Buona Condizione',
'Goods Received Note': 'Nota della Merci Ricevute',
'Google Layer': 'Livello Google',
'Government': 'Governo',
'Government building': 'Edificio governativo',
'Government UID': 'UID governativo',
'GPS Marker': 'Indicatore GPS',
'GPS Track': 'Traccia GPS',
'GPS Track File': 'File di traccia GPS',
'GPX Layer': 'Livello GPX',
'GPX Track': 'Traccia GPX',
'Grade': 'Grado',
'Great British Pounds': 'Sterline Britanniche',
'Greater than 10 matches. Please refine search further': 'Più di 10 occorrenze. Affinare ulteriormente la ricerca',
'Greek': 'greco',
'green': 'Verde',
'Green': 'verde',
'grey': 'Grigio',
'Grid': 'Griglia',
'Ground movement, fissures': 'Movimento della terra, crepe',
'Ground movement, settlement, slips': 'Movimento della terra, assestamento, frane',
'Ground/ Beaten Earth': 'Terreno/ Terra battuta',
'Ground/ Cemented': 'Terreno/ Cementato',
'Ground/ Other': 'Terreno/ Altro',
'Ground/ Paved': 'Terreno/ Asfaltato',
'Ground/ Sandy': 'Terreno/ Sabbioso',
'Ground/ Synthetic Material': 'Terreno/ Materiale sintetico',
'Ground/ Untreated Plot': 'Terreno/ Piano non curato',
'Group': 'Gruppo',
'Group added': 'Gruppo aggiunto',
'Group deleted': 'Gruppo cancellato',
'Group description': 'Descrizione del gruppo',
'Group Description': 'Descrizione del Gruppo',
'Group Details': 'Dettagli del gruppo',
'Group Head': 'Capogruppo',
'Group ID': 'Group ID',
'Group Leader': 'Capogruppo',
'Group Member added': 'Membro di gruppo aggiunto',
'Group Members': 'Membri del gruppo',
'Group Memberships': 'Appartenenze al gruppo',
'Group Name': 'Nome gruppo',
'Group Title': 'Titolo gruppo',
'Group Type': 'Tipo del gruppo',
'Group updated': 'Gruppo aggiornato',
'Grouped by': 'Raggrupato per',
'Groups': 'Gruppi',
'Groups removed': 'Gruppi rimossi',
'Guest': 'utente generico',
'Hail': 'Grandine',
'Hair Color': 'Colore dei capelli',
'Hair Comments': 'Commenti sui capelli',
'Hair Length': 'Lunghezza dei capelli',
'Hair Style': 'Pettinatura',
'Has data from this Reference Document been entered into Sahana?': 'I dati di questo documento di riferimento sono stati inseriti in Sahana?',
'Has the Certificate for receipt of the shipment been given to the sender?': "Il certificato per la ricevuta della spedizione e' stato dato al mittente?",
'Has the GRN (Goods Received Note) been completed?': "E' stata completata la GRN (Goods Received Note)?",
'Hazard Pay': 'Indennità di rischio',
'Hazardous Material': 'Materiale pericoloso',
'Hazardous Road Conditions': 'Condizioni stradali pericolose',
'Hazards': 'Rischi',
'Header Background': "Sfondo dell'intestazione",
'Header background file %s missing!': "File di sfondo dell'intestazione %s mancante!",
'Headquarters': 'Direzione centrale',
'Health': 'Salute',
'Health care assistance, Rank': 'Assistenza sanitaria, Classificazione',
'Health center': 'Centro di cura',
'Health center with beds': 'Centro di cura con letti',
'Health center without beds': 'Centro di cura senza letti',
'Health services status': 'Stato dei servizi sanitari',
'Healthcare Worker': 'Lavoratore della Sanità',
'Heat and Humidity': 'Calore e Umidità',
'Heat Wave': 'Onda di Caldo',
'Height': 'Altezza',
'Height (cm)': 'Altezza (cm)',
'Height (m)': 'Altezza (m)',
'Heliport': 'Eliporto',
'Help': 'Guida',
'Helps to monitor status of hospitals': 'Aiuta a monitorare lo stato degli ospedali',
'Helps to report and search for missing persons': 'Consente la notifica e la ricerca di persone scomparse',
'here': 'in questo punto',
'Here are the solution items related to the problem.': 'Qui ci sono gli elementi della soluzione correlati al problema.',
'Heritage Listed': 'Beni patrimoniali Elencati',
'Hide': 'Nascondi',
'Hide Table': 'Nascondi la tabella',
'Hierarchy': 'Gerarchia',
'Hierarchy Level 0 Name (i.e. Country)': 'Livello di Gerarchia 0 Nome (ad esempio paese)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Livello gerarchia 1 Nome (ad esempio Stato o Provincia)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Livello gerarchia 2 Nome (ad esempio Distretto o Contea)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Livello gerarchia 3 Nome (ad esempio Città / Paese / Villaggio)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Livello gerarchia 4 Nome (ad es. Vicinato)',
'Hierarchy Level 5 Name': 'Nome della gerarchia Livello 5',
'high': 'Elevato',
'High': 'Elevato',
'High Water': 'Acqua alta',
'Highest Priority Open Requests': 'Richieste aperte a più alta priorità',
'Hindu': 'Indù',
'History': 'Cronologia',
'Hit the back button on your browser to try again.': 'Premere il pulsante pagina precedente del browser per tentare nuovamente.',
'Holiday Address': 'Indirizzo durante le vacanze',
'Home': 'Casa',
'Home Address': 'Indirizzo di casa',
'Home Country': 'Paese di appartenenza',
'Home Crime': 'Crimine casalingo',
'Home Help': 'Aiuto a casa',
'Home Phone': 'Telefono di casa',
'Homeless Shelter Area': 'Area Struttura di Accoglienza per senzatetto',
'Homeless Shelter Centre': 'Centro Struttura di Accoglienza per senzatetto',
'Hospital': 'Ospedale',
'Hospital Details': "Dettagli dell'Ospedale",
'Hospital information added': "Informazione sull'Ospedale aggiunta",
'Hospital information deleted': "Informazione sull'Ospedale eliminata",
'Hospital information updated': "Informazione sull'Ospedale aggiornata",
'Hospital status assessment.': "Valutazione dello stato dell'Ospedale.",
'Hospital Status Report': "Report sullo stato dell'Ospedale",
'Hospitals': 'Ospedali',
'Host': 'Ospite',
'Hot Spot': 'area sensibile',
'Hotel Accommodation': 'Sistemazione in Albergo',
'Hour': 'Ora',
'Hourly': 'Ogni ora',
'hourly': 'Su base oraria',
'Hours': 'Ore',
'Hours added': 'Ore aggiunte',
'Hours deleted': 'Ore cancellate',
'Hours Details': 'Dettagli delle Ore',
'Hours updated': 'Ore aggiornate',
'Household kits received': 'Kit domestici ricevuti',
'Household kits, source': 'Kit domestici, origine',
'households': 'domestici',
'Housing Unit Capacity (Day and Night)': "Capienza dell'unità abitativa (giorno e notte)",
'Housing Unit Capacity (Night)': "Capienza dell'unità abitativa (Notte)",
'Housing Unit Current Population': 'Popolazione attuale delle Unità Abitative',
'Housing Unit Name': 'Nome delle Unità Abitative',
'Housing unit with currently animal presence': 'Unità abitativa con presenza di animale',
'Housing Units': 'Unità abitative',
'How data shall be transferred': 'How data shall be transferred',
'How does it work?': 'Funzionamento',
'How is this person affected by the disaster? (Select all that apply)': 'In che modo questa persona è coinvolta dalla calamità? (Seleziona tutte le opzioni valide)',
'How local records shall be updated': 'How local records shall be updated',
'How long will the food last?': 'Quanto tempo durerà il cibo?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'Quanti Ragazzi (0-17 anni) sono morti a causa della crisi',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'Quanti Ragazzi (0-17 anni) vengono feriti a causa della crisi',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Quanti ragazzi (0-17 anni) risultano mancanti a causa della crisi',
'How many days will the supplies last?': 'Quanti giorni durerà la fornitura?',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'Quante Ragazze (0-17 anni) sono morti a causa della crisi',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'Quante Ragazze ( 0 - 17 anni ) vengono ferite a causa della crisi',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'Quante Ragazze ( 0 - 17 anni ) risultano mancanti a causa della crisi',
'How many Men (18 yrs+) are Dead due to the crisis': 'Quanti uomini ( 18 anni o più ) sono morti a causa della crisi',
'How many Men (18 yrs+) are Injured due to the crisis': 'Come molti uomini (18 anni+) sono stati feriti a causa della crisi',
'How many Men (18 yrs+) are Missing due to the crisis': 'Come molti uomini (18 anni+) risultano dispersi a causa della crisi',
'How many new cases have been admitted to this facility in the past 24h?': 'Quanti nuovi casi sono stati ammessi a questa funzione nelle ultime 24h?',
'How many of the patients with the disease died in the past 24h at this facility?': 'Quanti pazienti con la malattia sono morti nelle ultime 24h in questa Struttura?',
'How many patients with the disease are currently hospitalized at this facility?': 'Quanti pazienti con la malattia sono attualmente ricoverati in questa Struttura?',
'How many Women (18 yrs+) are Dead due to the crisis': "Quante donne (di eta' superiore ai 18 anni) sono morte a causa della crisi",
'How many Women (18 yrs+) are Injured due to the crisis': "quante donne (di eta' superiore ai 18 anni) sono rimaste ferite a causa della crisi",
'How many Women (18 yrs+) are Missing due to the crisis': 'Quante donne (18 o più anni) risultano mancanti a causa della crisi',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': "Quanti dettagli vengono visualizzati Un alto livello di Zoom significa molti dettagli, ma non un'ampia area. Un basso livello di Zoom significa vedere un ampia area, ma non un elevato livello di dettaglio.",
'HR Manager': 'Responsabile HR',
'HTML': 'HTML',
'Human Resource': 'Risorse umane',
'Human Resource added': 'Aggiunte Risorse Umane',
'Human Resource assigned': 'Risorse umane assegnate',
'Human Resource Assignment updated': 'Assewgnazione risorse umane aggiornate',
'Human Resource Details': 'Dettagli risorse umane',
'Human Resource Management': 'Gestione risorse umane',
'Human Resource removed': 'Risorsa Umana rimossa',
'Human Resource unassigned': 'Risorse umane non assegnate',
'Human Resource updated': 'Risorsa Umana aggiornata',
'Human Resources': 'Risorse umane',
'Human Resources Management': 'Gestione risorse umane',
'Humanitarian NGO': 'ONG umanitarie',
'Hurricane': 'Uragano',
'Hurricane Force Wind': 'Venti da Uragano',
'Hygiene': 'Igiene',
'Hygiene kits received': 'Kit di igiene ricevuti',
'Hygiene kits, source': 'Origine, kit di igiene',
'Hygiene NFIs': 'Igiene, oggetti non commestibili',
'Hygiene practice': 'Pratica igienica',
'Hygiene problems': 'Problemi di igiene',
'I agree to the %(terms_of_service)s': "Sono d'accordo con i %(terms_of_service)s",
'I am available in the following area(s)': 'Sono disponibile nelle seguenti aree',
'Ice Pressure': 'Pressione del ghiaccio',
'ID': 'Identificativo',
'ID Tag': 'Tag ID',
'ID Tag Number': 'Numero Tag ID',
'ID Type': 'Tipo ID',
'Identification': 'Identificazione',
'Identification Report': 'Report di identificazione',
'Identification Reports': 'Reports di identificativo',
'Identification Status': "Stato d'Identificazione",
'identified': 'Identificato',
'Identified as': 'Identificato come',
'Identified by': 'Identificato da',
'Identifier Name for your Twilio Account.': "Nome identificatore per l'account Twilio",
'Identities': 'Identità',
'Identity': 'Identità',
'Identity added': 'Identità aggiunta',
'Identity deleted': 'Identità cancellata',
'Identity Details': 'Dettagli sulla identità',
'Identity Documents': 'Identity Documents',
'Identity updated': 'Identità aggiornata',
'If a ticket was issued then please provide the Ticket ID.': "Se è stato emesso un biglietto si prega di fornire l'ID del biglietto.",
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': "Se un utente verifica che essi possiedono un indirizzo e-mail con questo dominio, il campo Approvatore viene utilizzato per determinare se & da chi è richiesta un'ulteriore approvazione.",
'If it is a URL leading to HTML, then this will downloaded.': 'Se è un URL che porta ad un file HTML, allora questo sarà scaricato.',
'If neither are defined, then the Default Marker is used.': 'Se nessuno dei due è definito, allora viene usato il Marcatore Predefinito.',
'If no marker defined then the system default marker is used': "Se nessun indicatore è definito, allora è utilizzato l'indicatore predefinito di sistema",
'If no, specify why': 'Se no, specificare perché',
'If none are selected, then all are searched.': 'Se nessuno viene selezionato, allora tutti vengono ricercati.',
'If not found, you can have a new location created.': 'Se non si trova, è possibile disporre di una nuova ubicazione',
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": "Se selezionato, allora questa ubicazione dei beni sarà aggiornata ogni volta che l' ubicazione della persona viene aggiornata.",
'If the location is a geographic area, then state at what level here.': "Se l'ubicazione è un'area geografica, allora dichiarare a quale livello in questa sede.",
'If the person counts as essential staff when evacuating all non-essential staff.': "Se la persona è considerata personale essenziale durante l'evacuazione di tutto il personale non essenziale.",
'If the request type is "Other", please enter request details here.': 'Se il tipo di richiesta è "Altro", immettere qui i dettagli della richiesta.',
'If the service requries HTTP BASIC Auth (e.g. Mobile Commons)': 'Se il servizio richiede Autorizzazione HTTP BASIC (e.g. Mobile Commons)',
'If there are multiple configs for a person, which should be their default?': 'Se ci sono più configurazioni per una persona, quale dovrebbe essere la loro impostazione predefinita?',
"If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": "Se questa configurazione è visualizzato sul menu di configurazione del GIS, dargli un nome da utilizzare nel menu. Il nome per la configurazione personale della mappa verrà impostato al nome dell'utente.",
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": "Se questa configurazione rappresenta una regione per il menu Regioni, assegnategli un nome da utilizzare nel menu. Il nome della configurazione personale della mappa sarà impostato con il nome dell'utente.",
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Se questo campo viene compilato allora un utente, che specifica questa organizzazione quando si firma, verrà assegnato come elemento del personale di questa organizzazione a meno che il dominio non corrisponde al campo di dominio.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Se questo campo viene compilato ogni utente con questo valore nel dominio verrà automaticamente assegnato come Personale di questa organizzazione',
'If this is set to True then mails will be deleted from the server after downloading.': 'Se questo valore è impostato a True (vero) le mail verranno cancellate dal server dopo il download.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": "Se viene selezionato, questa diventa la sede base dell'utente e questa posizione verra' usata per visualizzare l'utente sulla mappa.",
'If this record should be restricted then select which role is required to access the record here.': "Se l'accesso al record deve essere limitatato selezionare il ruolo che può accedere al record",
'If this record should be restricted then select which role(s) are permitted to access the record here.': "Se l'accesso al record deve essere limitatato selezionare i ruoli che possono accedere al record",
'If yes, specify what and by whom': 'Se sì, specificare cosa e da chi',
'If yes, which and how': 'Se sì, quali e come',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Se non si immette un documento di riferimento, la tua e-mail verrà visualizzato per consentire la verifica di questi dati.',
"If you don't see the Cluster in the list, you can add a new one by clicking link 'Add New Cluster'.": "Se il cluster non è nella lista, può essere aggiunto selezionando 'Aggiungi un nuovo cluster'",
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Se non si vede l'ospedale nell'elenco, è possibile aggiungerne uno nuovo facendo clic sul collegamento 'Aggiungi Ospedale'.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Se non si vede l'Ufficio nell'elenco, è possibile aggiungerne uno nuovo facendo click sul collegamento 'Aggiungi Ufficio'.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "Se l'Organizzazione non è nella lista, può essere aggiunta selezionando 'Crea Organizzazione'",
"If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": "Se il Settore non è nella lista, può essere aggiunta selezionando 'Crea Settore'",
"If you don't see the Type in the list, you can add a new one by clicking link 'Add Region'.": "Se il Tipo non è nella lista, può essere aggiunto selezionando 'Aggiungi Regione'",
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": "Se il Tipo non è nella lista, può essere aggiunto selezionando 'Crea il Tipo Struttura'",
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": "Se il Tipo non è nella lista, può essere aggiunto selezionando 'Crea il Tipo Ufficio'",
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": "Se il Tipo non è nella lista, può essere aggiunto selezionando 'Crea il Tipo Organizzazione'",
"If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": "Se si immette un nome di cartella allora il livello sarà visualizzato in questa cartella nello switcher del livello della mappa. Una sotto-cartella può essere creata separando i nomi con una ' /'",
'If you know what the Geonames ID of this location is then you can enter it here.': 'Se si conosce qual è lo Geonames ID di questa ubicazione allora è possibile immetterlo qui.',
'If you know what the OSM ID of this location is then you can enter it here.': "Se si conosce qual è l'ID OSM di questa ubicazione allora è possibile immetterlo qui.",
'If you need to add a new document then you can click here to attach one.': 'Se è necessario aggiungere un nuovo documento allora è possibile fare clic qui per allegarne uno.',
'If you want several values, then separate with': 'Se si desidera più valori, allora separarli con',
'If you would like to help, then please': "Se si desidera essere d'aiuto, si prega",
'If you would like to help, then please %(sign_up_now)s': 'Se hai bisogno di aiuto, allora per favore %(sign_up_now)s',
'ignore': 'Ignora',
'Ignore Errors?': 'Ignorare gli errori?',
'Illegal Immigrant': 'Immigrato illegale',
'Image': 'Immagine',
'Image added': 'Immagine aggiunta',
'Image deleted': 'Immagine cancellata',
'Image Details': "Dettagli dell'immagine",
'Image File(s), one image per page': "File dell'immagine, un'immagine per pagina",
'Image Tags': 'Tag immagine',
'Image Type': 'Tipo di immagine',
'Image updated': 'Immagine aggiornata',
'Image Upload': 'Caricamento immagine',
'Imagery': 'Immagini',
'Images': 'Immagini',
'Immediately': 'Immediatamente',
'Impact added': 'Impatto aggiunto',
'Impact Assessments': "Valutazioni dell'impatto",
'Impact deleted': 'Impatto eliminato',
'Impact Details': "Dettagli dell'impatto",
'Impact Type': 'Tipo di impatto',
'Impact Type added': 'Tipo di impatto aggiunto',
'Impact Type deleted': 'Tipo di impatto cancellato',
'Impact Type Details': 'Dettagli del tipo di impatto',
'Impact Type updated': 'Tipo di impatto aggiornato',
'Impact Types': 'Tipi di impatto',
'Impact updated': 'Impatto aggiornato',
'Impacts': 'Impatti',
'Import': 'Importa',
'Import & Export Data': 'Importazione & Esportazione Dati',
'Import and Export': 'Importazione ed esportazione',
'Import Awards': 'Importa i Premi',
'Import Base Stations': 'Importa stazioni base',
'Import Certificates': 'Importa Certificati',
'Import Contacts': 'Importa Contatti',
'Import Courses': 'Importa Corsi',
'Import Data': 'Importa i dati',
'Import Data for Theme Layer': 'Importa dati per il livello di tema',
'Import Departments': 'Importa Dipartimenti',
'Import Event Types': 'Importa tipi di evento',
'Import Facilities': 'Importa strutture',
'Import Facility Types': 'Importa il Tipo di Struttura',
'Import from CSV': 'Importa da CSV',
'Import from OpenStreetMap': 'Importa da OpenStreetMap',
'Import from Ushahidi Instance': "Importa da un'istanza di Ushahidi",
'Import Hours': 'Importa Ore',
'Import if Master': 'Importa se Master',
'Import Incident Types': 'Importa tipi di incidente',
'Import Jobs': 'Importa Lavori',
'Import Layers': 'Importa Livelli',
'Import Location data': 'Importa dati di locazione',
'Import Locations': 'Importa le Ubicazioni',
'Import multiple tables as CSV': 'Importa più tabelle come CSV',
'Import Offices': 'Importa gli uffici',
'Import Organizations': 'Importa Organizzazioni',
'Import Participants': 'Importa Partecipanti',
'Import PoI Types': 'Importa tipi di PoI',
'Import Points of Interest': 'Importa Punti di Interesse',
'Import Project Organizations': 'Importa organizzazioni di progetto',
'Import Projects': 'Importa progetti',
'Import Resource Types': 'Importa tipi di risorsa',
'Import Resources': 'Importa risorse',
'Import Sector data': 'Importa dati di settore',
'Import Service data': 'Importa dati di servizio',
'Import Services': 'Importa Servizi',
'Import Staff': 'Importa Personale',
'Import Tasks': 'Importa Attività',
'Import Training Events': 'Importa Eventi di Formazione',
'Import Training Participants': 'Importa Partecipanti alla Formazione',
'Import Users': 'Importa utenti',
'Import Volunteer Cluster Positions': 'Importa le posizioni dei Gruppi di Volontari',
'Import Volunteer Cluster Types': 'Importa i Tipi dei Gruppi di Volontari',
'Import Volunteer Clusters': 'Importa i Gruppi di Volontari',
'Import Volunteers': 'Importa Volontari',
'Import/Export': 'Importa/Esporta',
'Important': 'Importante',
'Importantly where there are no aid services being provided': 'Importante evidenziare dove non vengono forniti servizi di aiuto',
'Importing data from spreadsheets': 'Importazione dei dati da fogli elettronici',
'Improper decontamination': 'Decontaminazione impropria',
'Improper handling of dead bodies': 'Scorretta gestione dei cadaveri',
'In': 'Dentro',
'In Catalogs': 'Nei Cataloghi',
'in Deg Min Sec format': 'In Poerio Min Secondo formato',
'In error': 'In errore',
'in GPS format': 'In formato GPS',
'In Inventories': 'Negli Inventari',
'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'Al fine di essere in grado di modificare i dati di OpenStreetMap %(name_short)s, è necessario registrarsi con un account sul server OpenStreetMap.',
'In Process': 'In corso',
'In Progress': 'In corso',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Nella finestra layout la mappa viene massimizzata per riempire la finestra, quindi non è necessario impostare qui un valore grande.',
'inactive': 'inattivo',
'Inbound Mail Settings': 'Impostazioni posta in entrata',
'InBox': 'In Arrivo',
'Incident': 'Incidente',
'Incident added': 'Incidenti aggiunti',
'Incident Categories': 'Gategorie Incidente',
'Incident Details': 'Dettagli incidente',
'Incident ID: %s, ': 'ID incidente: %s',
'Incident removed': 'Incidenti rimossi',
'Incident Report': 'Report di incidente',
'Incident Report added': 'Report incidente aggiunto',
'Incident Report deleted': 'Report Incidente eliminato',
'Incident Report Details': "Dettagli Report d'Incidente",
'Incident Report removed': 'Report su incidente rimosso',
'Incident Report updated': 'Report su incidente aggiornato',
'Incident Reporting': 'Segnala Incidente',
'Incident Reporting System': 'Sistema di segnalazione Incidenti',
'Incident Reports': 'Reports su incidenti',
'Incident started: %s': 'Incidenti iniziato: %s',
'Incident Type': 'Tipo di incidente',
'Incident Type added': 'Tipo di incidente aggiunto',
'Incident Type Details': 'Dettagli tipo di incidente',
'Incident Type removed': 'Tipo di incidente rimosso',
'Incident Type updated': 'Tipo di incidente aggiornato',
'Incident Types': 'Tipi di incidente',
'Incident updated': 'Incidenti aggiornati',
'Incidents': 'Incidenti',
'Include Entity Information?': 'Includi informazioni identità?',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Include un GroundOverlay o ScreenOverlay che non sono ancora supportati in OpenLayers, ma potrebbe non funzionare correttamente.',
'Incoming': 'Entrante',
'Incoming Shipment canceled': 'Spedizione in arrivo annullata',
'Incoming Shipment updated': 'Spedizione in arrivo aggiornata',
'Incomplete': 'Incompleto',
'Incorrect parameters': 'Paramatri non corretti',
'Independent Accommodation': 'Sistemazione Indipendente',
'Individuals': 'Singoli',
'Industrial': 'Industriale',
'Industrial Crime': 'Crimine industriale',
'Industry Fire': 'Incendio industriale',
'Infant (0-1)': 'Infante (0-1)',
'Infectious Disease': 'Malattie infettive',
'Infectious Disease (Hazardous Material)': 'Malattie infettive (materiali pericolosi)',
'Infectious Diseases': 'Malattie infettive',
'Infestation': 'Infestazione',
'Informal camp': 'Campo informale',
'Informal Leader': 'Leader informale',
'Information gaps': 'Lacune nelle informazioni',
'Infrastructure Building': 'Palazzo',
'Infrastructure Church': 'Chiesa',
'Infrastructure Community Building': 'Palazzi della Comunità',
'Infrastructure Government Office': 'Uffici Governativi',
'Infrastructure Hospital': 'Ospedali',
'Infrastructure Hotel': 'Hotel',
'Infrastructure House': 'Case',
'Infrastructure NGO Office': 'Uffici NGO',
'Infrastructure Police Station': 'Stazione di Polizia',
'Infrastructure School': 'Scuola',
'Infrastructure University': 'Università',
'Infusion catheters available': 'Cateteri per trasfusione disponibili',
'Infusion catheters need per 24h': 'Necessità di cateteri per trsfusione nelle 24h',
'Infusion catheters needed per 24h': 'Cateteri per trasfusione necessari per 24h',
'Infusions available': 'Tisane disponibili',
'Infusions needed per 24h': 'Infusioni necessarie per 24h',
'Inherited?': 'Ereditato?',
'Initials': 'Iniziali',
'injured': 'Feriti',
'input': 'Input',
'insert new': 'Inserisci nuovo',
'insert new %s': 'Inserisci nuovo %s',
'Insert the fiscal code with no spaces': 'Insert the fiscal code with no spaces',
'Inspected': 'Ispezionato',
'Inspection Date': 'Data ispezione',
'Inspection date and time': "Data e Ora dell'ispezione",
'Inspection time': "Tempo d'ispezione",
'Inspector ID': 'ID ispettore',
'Instant Porridge': 'Porridge istantaneo',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": "Invece della sincronizzazione automatica dagli altri peers sulla rete, è possibile anche la sincronizzazione dai file, che è necessaria quando non c'è alcuna rete. È possibile utilizzare questa pagina per importare dati di sincronizzazione dai file ed anche esportare i dati ai file sincronizzati. Fare clic sul collegamento sulla destra per accedere a questa pagina.",
'Instead of showing individual features, aggregate by Location Hierarchy': 'Invece di aggregare funzionalità individuali, aggregate per gerarchia di locazione',
'Institution': 'Istituzione',
'Instructor': 'Istruttore',
'Insufficient': 'Insufficiente',
'insufficient number of pages provided': 'Numero di pagine fornito insufficiente',
'Insufficient Privileges': 'Privilegi insufficienti',
'Insufficient vars: Need module, resource, jresource, instance': 'Variabili insufficienti: è necessario modulo, risorsa, jresource, istanza',
'Intake Items': 'Ricevere voci',
'Integrated bath within housing unit': 'Unità abitativa con vasca da bagno',
'Integrated shower within housing unit': 'Unità abitativa con doccia',
'Intergovernmental': 'Intergovernativo',
'Intergovernmental Organization': 'Organizzazione intergovernativa',
'Interior walls, partitions': 'Pareti interne, partizioni',
'Internal State': 'Stato interno',
'International NGO': 'ONG internazionale',
'International Organization': 'Organizzazione internazionale',
'Interpreter / Cultural Mediator': 'Interpreter / Cultural Mediator',
'Interpreter / Cultural Mediator Required': 'Interpreter / Cultural Mediator Required',
'Interview taking place at': "L'intervista si svolge a",
'invalid': 'invalido',
'Invalid': 'Invalido',
'Invalid data: record %(id)s not accessible in table %(table)s': 'Dati non validi: record %(id)s non accessibile nella tabella %(table)s',
'Invalid email': 'Invalid email',
'Invalid form (re-opened in another window?)': "Forma non valida (riaperto in un'altra finestra?)",
'Invalid Location!': 'Ubicazione non valida!',
'Invalid phone number': 'Numero di telefono non valido',
'Invalid Query': 'Query non valida',
'invalid request': 'Richiesta non valida',
'Invalid request!': 'Richiesta non valida!',
'Invalid Site!': 'Sito non valido!',
'Invalid ticket': 'Ticket non valido',
'Inventories': 'Inventari',
'Inventory': 'Inventario',
'Inventory Item': 'Voce di inventario',
'Inventory Item Details': "Dettagli dell' elemento dell' inventario",
'Inventory Item updated': 'Voce di inventario aggiornata',
'Inventory Items': 'Voci di inventario',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Elementi inventario includono sia forniture utilizzabili & quelli che saranno trasformati in Beni alla loro destinazione.',
'Inventory Management': 'Gestione inventario',
'Inventory of Effects': 'Inventario degli effetti',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': "È un repository centrale in linea in cui possono essere conservate le informazioni su tutte le vittime e famiglie del disastro, soprattutto vittime identificate, evacuati e persone sfollate. Informazioni quali nome, età, numero di contatto, numero di carta d'identità, ubicazione spostata, e altri dettagli vengono catturati. Dettagli di foto e impronta digitale delle persone possono essere caricate nel sistema. Le persone possono inoltre essere acquisite dal gruppo per efficienza e convenienza.",
'Is editing level L%d locations allowed?': 'È consentita la modifica delle ubicazioni del livello L%d?',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': "è previsto che sia composto di vari moduli secondari che lavorano insieme per fornire complesse funzionalità per la gestione degli aiuti e degli elementi di progetto da parte di un'organizzazione. questo include un sistema di approviggionamento, di gestione magazzini, di tracciatura asset, di gestione fornitori, gestione parco veicoli, acquisti, gestione finanziaria e altre risorse.",
'Is it safe to collect water?': 'È sicuro raccogliere acqua?',
'Is this a strict hierarchy?': 'È questa una gerarchia rigida?',
'Issuing Authority': 'Autorità emittente',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Essa cattura non solo i luoghi in cui sono attivi, ma anche le informazioni sulla gamma dei progetti che stanno fornendo in ogni settore.',
'Italian': 'Italiano',
'Item': 'Elemento',
'Item added': 'Elemento aggiunto',
'Item added to Inventory': "Voce aggiunta all'Inventario",
'Item Added to Shipment': 'Elemento aggiunto a Spedizione',
'Item added to shipment': 'Elemento aggiunto a spedizione',
'Item already in budget!': 'Voce già nel bilancio!',
'Item already in Bundle!': 'Elemento già nel Bundle!',
'Item already in Kit!': 'Elemento già nel Kit!',
'Item Catalog Details': 'Dettagli voce Catalogo',
'Item Categories': 'Categorie elementi',
'Item Category': 'Categoria elementi',
'Item Category added': 'Categoria elementi aggiunta',
'Item Category deleted': 'Categoria elementi eliminata',
'Item Category Details': 'Dettagli di categoria elementi',
'Item Category updated': 'Categoria elementi aggiornata',
'Item deleted': 'Elemento eliminato',
'Item Details': "Dettagli dell'articolo",
'Item Pack added': 'Aggiunto pacco articolo',
'Item Pack deleted': 'Eliminato pacco articolo',
'Item Pack Details': "Dettagli dell'articolo",
'Item Pack updated': 'Confezione elementi aggiornata',
'Item Packs': 'Confezioni elementi',
'Item removed from Inventory': "Elemento rimosso dall' inventario",
'Item updated': 'Voce aggiornata',
'Items': 'Elementi',
'Items in Category can be Assets': 'Voci nella categoria possono essere asset',
'Japanese': 'Giapponese',
'Jerry can': 'Jerry può',
'Jew': 'Ebreo',
'Jewish': 'Ebreo',
'Job added': 'Processo aggiunto',
'Job deleted': 'Processo cancellato',
'Job lost by event': 'Job lost by event',
'Job Role': 'Ruolo professionale',
'Job Role added': 'Ruolo Professionale aggiunto',
'Job Role Catalog': 'Catalogo di Ruoli Professionali',
'Job Role deleted': 'Ruolo Professionale eliminato',
'Job Role Details': 'Dettagli professione',
'Job Role updated': 'Professione aggiornata',
'Job Roles': 'Ruoli Professionali',
'Job Schedule': 'Pianificazione del processo',
'Job Title': 'Ruolo professionale',
'Job Title added': 'Ruolo professionale aggiunto',
'Job Title Catalog': 'Catalogo dei Ruoli Professionali',
'Job Title deleted': 'Ruolo professionale cancellato',
'Job Title Details': 'Dettagli del Ruolo professionale',
'Job Title updated': 'Ruolo professionale aggiornato',
'Job updated': 'Processo aggiornato',
'Jobs': 'Processi/Lavori.',
'Journal': 'Diario',
'Journal entry added': 'Aggiunta Voce Diario',
'Journal entry deleted': 'Cancellata Voce Diario',
'Journal Entry Details': 'Dettagli voce diario',
'Journal entry updated': 'Aggiornata Voce Diario',
'JS Layer': 'Livello JS',
'Karts of total mass over 1,3 tons and taxi. Rental service with driver.': 'Motoveicoli di massa complessiva oltre 1,3 tonnellate e taxi. Servizio di noleggio con conducente.',
'Karts of total mass until ad 1,3 tons. Rental service with driver.': 'Motoveicoli di massa complessiva fino ad 1,3 tonnellate. Servizio di noleggio con conducente.',
'Keep Duplicate': 'Conservare il duplicato',
'Keep Original': "Conservare l'originale",
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Tiene traccia di tutte le pratiche in entrata permettendo loro di essere classificate & instradate al luogo idoneo per essere evase.',
'Key': 'Chiave',
'Key added': 'Chiave aggiunta',
'Key deleted': 'Chiave eliminata',
'Key Details': 'Dettagli della chiave',
'Key updated': 'Chiave aggiornata',
'Key Value pairs': 'Coppie chiave-valore',
'Keys': 'keys',
'Keyword': 'Parola chiave',
'Keywords': 'Parole chiavi',
'Kit': 'kit',
'Kit added': 'Kit aggiunto',
'Kit Contents': "Contenuto dell'equipaggiamento",
'Kit deleted': 'Equipaggiamento eliminato',
'Kit Details': "Dettagli dell'equipaggiamento",
'Kit Updated': 'Equipaggiamento aggiornato',
'Kit updated': 'Equipaggiamento aggiornamento',
'Kitchen': 'Cucina',
'Kits': 'Equipaggiamenti',
'KML Layer': 'Livello KML',
'Known Identities': 'Identità conosciute',
'Known incidents of violence against women/girls': 'Episodi noti di violenza nei confronti delle donne/ragazze',
'Known incidents of violence since disaster': 'Episodi noti di violenza dal disatro',
'Lack of material': 'Mancanza di materiale',
'Lack of school uniform': 'Mancanza di uniformi scolastiche',
'Lack of supplies at school': 'Mancanza di forniture a scuola',
'Lack of transport to school': 'Mancanza di trasporti per la scuola',
'Lactating women': 'Donne in allattamento',
'Land Slide': 'Slavina',
'LandScape/ Coastal Area': 'Territorio/ Zona Costiera',
'LandScape/ Desert Area': 'Territorio/ Zona Desertica',
'LandScape/ Forest Area': 'Territorio/ Zona Forestale',
'LandScape/ Lake Area': 'Territorio/ Zona di Laghi',
'LandScape/ Mountain Area': 'Territorio/ Zona di Montagna',
'LandScape/ Other': 'Territorio/ Altro',
'LandScape/ River Area': 'Territorio/ Zona Fluviale',
'Landslide': 'Frana',
'Language': 'Linguaggio',
'Language Code': 'Language Code',
'Last': 'Ultimo',
'Last Contacted': 'Ultimo contattato',
'Last Downloaded': 'Ultimo scaricato',
'Last known location': 'Ultima ubicazione nota',
"Last Month's Work": 'Lavoro del mese scorso',
'Last Name': 'Cognome',
'Last Polled': 'Ultimo polling',
'Last run': 'Ultimo giro',
'Last status': 'Ultimo stato',
'Last synchronization time': 'Ora ultima sincronizzazione',
'Last updated': 'Ultimo aggiornato',
'Last updated ': 'Aggiornato per ultimo',
'Last updated by': 'Autore ultimo aggiornamento',
'Last updated on': 'Data ultimo aggiornamento',
"Last Week's Work": 'Lavoro della scorsa settimana',
'Latitude': 'Latitudine',
'Latitude & Longitude': 'Latitudine & Longitudine',
'Latitude and Longitude are required': 'Latitudine e Longitudine sono richiesti',
'Latitude is Invalid!': 'Latitudine non valida!',
'Latitude is North - South (Up-Down).': 'La latitudine è Nord - Sud (Sopra-Sotto).',
'Latitude is North-South (Up-Down).': 'La latitudine è Nord-Sud (Sopra-Sotto).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': "La latitudine è zero all'equatore, positiva nell' emisfero settentrionale e negativa nell'emisfero meridionale",
'Latitude must be between -90 and 90.': 'La latitudine deve essere tra -90 e 90',
'Latitude of far northern end of the region of interest.': 'Latitudine della parte più a nord della regione di interesse.',
'Latitude of far southern end of the region of interest.': 'Latitudine della parte più a sud della zone di interesse.',
'Latitude of Map Center': 'Latitudine del centro mappa.',
'Latitude should be between': 'La latitudine dovrebbe essere compresa tra',
'latrines': 'Latrina',
'Latrines': 'Latrina',
'Law enforcement, military, homeland and local/private security': 'Applicazione delle leggi, sicurezza militare, interna e locale/privata',
'Layer': 'Livello',
'Layer added': 'Livello aggiunto',
'Layer deleted': 'Livello eliminato',
'Layer Details': 'Dettagli Livello',
'Layer has been Disabled': 'Il livello è stato Disabilitato',
'Layer has been Enabled': 'Il livello è stato Abilitato',
'Layer Name': 'Nome del Livello',
'Layer Properties': 'Proprietà del Livello',
'Layer removed from Symbology': 'Livello rimosso dalla Simbologia',
'Layer updated': 'Livelli aggiornati',
'Layers': 'Livelli',
'Layers updated': 'Livelli aggiornati',
'Lead Organization': 'Lead Organization',
'Leader': 'Capo',
'leave empty to detach account': "Lasciare vuoto per scollegare l'account",
'Left-side is fully transparent (0), right-side is opaque (1.0).': 'Il lato sinistro è completamente trasparente (0), il lato destro è opaco (1.0)',
'Legal measure / Home warrant': 'Legal measure / Home warrant',
'Legend': 'Legenda',
'Legend Format': 'Formato della leggenda',
'legend URL': 'URL della leggenda',
'Legend URL': 'URL della legenda',
'Length (m)': 'Lunghezza (m)',
'less': 'Meno',
'Less Options': 'Meno Opzioni',
'Level': 'Livello',
'Level 1': 'Livello 1',
'Level 1 Assessment added': 'Valutazione Livello 1 aggiunta',
'Level 1 Assessment deleted': 'Valutazione Livello 1 eliminata',
'Level 1 Assessment Details': 'Dettagli Valutazione Livello 1',
'Level 1 Assessment updated': 'Valutazione Livello 1 aggiornata',
'Level 1 Assessments': 'Valutazioni Livello 1',
'Level 2': 'Livello 2',
'Level 2 Assessment added': 'Livello 2 Aggiunta valutazione',
'Level 2 Assessment deleted': 'Livello 2 Eliminata valutazione',
'Level 2 Assessment Details': 'Livello 2 Dettagli valutazione',
'Level 2 Assessment updated': 'Livello 2 Aggiornata valutazione',
'Level 2 Assessments': 'Livello 2 Valutazioni',
'Level 2 or detailed engineering evaluation recommended': 'Livello 2 o raccomandata valutazione dettagliata di ingegneria',
'Level 3': 'Livello 3',
"Level is higher than parent's": 'Il livello è superiore a quello principale',
'Level of Award': 'Livello di Premio',
'Level of competency this person has with this skill.': 'Livello di competenza che questa persona ha con questo abilità',
'Library support not available for OpenID': 'Supporto alla libreria non disponibile per OpenID',
'LICENSE': 'LICENZA',
'License Number': 'Numero di Licenza',
'light': 'Luce',
'LineString': 'LINEARE',
'Link': 'Collega',
'Link for the RSS Feed.': 'Collegamento per RSS Feed',
'Link to this result': 'Collegare a questo risultato',
'List': 'Elenco',
'List %(site_label)s Status': 'Lista %(site_label)s Stato',
'List / Add Baseline Types': 'Elenca/Aggiungi Tipi di Baseline',
'List / Add Impact Types': 'Elenca/Aggiungi Tipi Di Impatto',
'List / Add Services': 'Elenca/Aggiungi Servizi',
'List / Add Types': 'Elenca/Aggiungi Tipi',
'List Activities': 'elenca attività',
'List Addresses': 'Elencare Inditrizzi',
'List Affiliations': 'Elencare Affiliazioni',
'List all': 'Elenca tutti',
'List All': 'Elenca tutti',
'List All Assets': 'Elencare tutti i componenti del patrimonio',
'List All Catalog Items': 'Elencare tutti Gli Elementi Del Catalogo',
'List All Commitments': 'Elencare tutti gli impegni',
'List All Entries': 'Elencare tutte le voci',
'List All Item Categories': 'Elencare tutte le categorie degli elementi',
'List All Memberships': 'Elenco Tutti gli appartenenti',
'List All Organization Approvers & Whitelists': 'List All Organization Approvers & Whitelists',
'List All Received Shipments': 'Elencare tutte le spedizioni ricevute',
'List All Records': 'Elencare tutti gli elementi',
'List All Requested Items': 'Elencare tutti gli elementi richiesti',
'List All Requests': 'Elencare tutte le richieste',
'List All Roles': 'List All Roles',
'List All Sent Shipments': 'Elenca tutte le spedizioni inviate',
'List All Users': 'List All Users',
'List Allocations': 'Lista assegnazioni',
'List Alternative Items': 'Elenca elementi alternativi',
'List Assessment Summaries': 'Elenco Riepiloghi delle Valutazioni',
'List Assessments': 'Elenco Valutazioni',
'List Assets': 'Elenca Beni',
'List Assigned Human Resources': 'Elenca risorse umane assegnate',
'List Availability': 'Elenca disponibilità',
'List available Scenarios': 'Elenca scenari disponibili',
'List Awards': 'Elencare i Premi',
'List Base Stations': 'Elenca le stazioni di base',
'List Baseline Types': 'Elenca Tipi di Baseline',
'List Baselines': 'Elenca Baseline',
'List Branch Organizations': 'Elenca le Unità organizzative',
'List Brands': 'Elenca Marchi',
'List Budgets': 'Elenca Bilanci',
'List Bundles': 'Elenca Bundle',
'List Camp Services': 'Elenca servizi del campo',
'List Camp Statuses': 'Elenaca stati del campo',
'List Camp Types': 'Elenca Tipi di Campo',
'List Camps': 'Elenca Campi',
'List Catalog Items': 'Elenca Voci Catalogo',
'List Catalogs': 'Elenco Cataloghi',
'List Certificates': 'Elencare i Certificati',
'List Certifications': 'Elencare le Certificazioni',
'List Checklists': 'Elenco Elenchi',
'List Cluster Subsectors': 'Elenco Cluster Sottosettori',
'List Clusters': 'Elenco Cluster',
'List Coalitions': 'Elenco coalizioni',
'List Commitment Items': "Elenco degli articoli dell'impegno",
'List Commitments': 'Elenco Impegni',
'List Competencies': 'Elenco Competenze',
'List Competency Ratings': 'Elencare le Valutazioni delle Competenze',
'List Conflicts': 'Elenco Conflitti',
'List Contact Information': 'Elencare Informazioni Contatto',
'List Contacts': 'Elencare i Contatti',
'List Course Certificates': 'Elencare i Certificati Del Corso',
'List Courses': 'Elencare i Corsi',
'List Credentials': 'Elencare le Credenziali',
'List Current': 'Elenco corrente',
'List Data in Theme Layer': 'Elencare i dati a livello di tema',
'List Departments': 'Elencare i Dipartimenti',
'List Documents': 'Elenco Documenti',
'List Donors': 'Elenco Donatori',
'List Education Details': 'Elencare Dettagli Istruzione',
'List Education Levels': 'Elencare Livelli Istruzione',
'List Event Types': 'Elenca tipi di eventi',
'List Events': 'Elenca Eventi',
'List Facilities': 'Elenca Strutture',
'List Facility Types': 'Elenco tipi di Struttura',
'List Feature Layers': 'Elencare i livelli delle funzioni',
'List Flood Reports': 'Elencare i reports sulle alluvioni',
'List Groups': 'Elencare Gruppi',
'List Groups/View Members': 'Elencare i gruppi, visualizzare i membri',
'List Hospitals': 'Elencare ospedali',
'List Hours': 'Elencare le Ore',
'List Human Resources': 'Elenca risorse umane',
'List Identities': 'Elencare Identità',
'List Images': 'Elencare Immagini',
'List Impact Assessments': "Elenca Valutazioni d'impatto",
'List Impact Types': 'Elenca Tipi Di Impatto',
'List Impacts': 'Elenca Impatti',
'List Incident Reports': 'Elenca Reports di incidente',
'List Incident Types': 'Elenca tipi di incidenti',
'List Incidents': 'Elenco incidenti',
'List Item Categories': 'Elenca categorie di elementi',
'List Item Packs': 'Elenca confezioni elementi',
'List Items': 'Elenca elementi',
'List Items in Inventory': 'Elenca elementi in inventario',
'List Job Roles': 'Elenco Lavoro Ruoli',
'List Job Titles': 'Elencare i Titoli dei Processi',
'List Jobs': 'Elencare i Processi',
'List Keys': 'Elenca chiavi',
'List Kits': 'Elenca Kit',
'List Layers': 'Elencare i Livelli',
'List Layers in Profile': 'Elencare i Livelli nel Profilo',
'List Layers in Symbology': 'Elencare i Livelli nella Simbologia',
'List Level 1 Assessments': 'Elencare le Valutazioni di primo livello',
'List Level 1 assessments': 'Elencare le Valutazioni di primo livello',
'List Level 2 Assessments': 'Elencare le Valutazioni di secondo livello',
'List Level 2 assessments': 'Elencare le Valutazioni di secondo livello',
'List Location Hierarchies': 'Elencare le Gerarchie di Ubicazioni',
'List Locations': 'Elencare le Sedi',
'List Log Entries': 'Elencare Voci Log',
'List Mailing Lists': 'Elencare Lista di Posta',
'List Map Configurations': 'Elencare le Configurazioni della Mappa',
'List Markers': 'Elencare le Etichette',
'List Members': 'Elencare i Membri',
'List Memberships': 'Elencare le Appartenenze',
'List Messages': 'Elenca i messaggi',
'List Missing Persons': 'Elenca i dispersi',
'List Missions': 'Elenco Missioni',
'List Need Types': 'Elenca i tipi di Esigenze',
'List Needs': 'Elenca le Esigenze',
'List Networks': 'Elenca Reti',
'List of addresses': 'Elenco di indirizzi',
'List of Appraisals': 'Elenco delle Valutazioni',
'List of Facilities': 'Elenco Strutture',
'List of Items': 'Elenco degli elementi',
'List of Missing Persons': 'Elenco di persone scomparse',
'List of Peers': 'Elenco di peer',
'List of Professional Experience': 'Elenco delle Esperienze Professionali',
'List of Reports': 'Elenco dei Reports',
'List of Requests': 'Elenco di richieste',
'List of Roles': 'Elenco dei Ruoli',
'List of Spreadsheets': 'Elenco di fogli elettronici',
'List of Spreadsheets uploaded': 'Elenco di fogli elettronici caricati',
'List of Volunteers': 'Elenco dei volontari',
'List of Volunteers for this skill set': 'Elenco di volontari per questo ambito di capacità',
'List Office Types': 'Elenca Tipi di Ufficio',
'List Offices': 'Elenco Uffici',
'List Organization Types': 'Elenca Tipi di organizzazione',
'List Organizations': 'Elenco delle Organizzazioni',
'List Participants': 'Elencare i Partecipanti',
'List Peers': 'Elenco Peer',
'List Personal Effects': 'Elenco effetti personali',
'List Persons': 'Elencare Persone',
"List Persons' Details": 'Elencare Dettagli delle Persone',
'List Photos': 'Elenco Foto',
'List PoI Types': 'Elencare i Tipi PoI',
'List Points of Interest': 'Elencare i Punti di Interesse',
'List Population Statistics': 'Elenco Statistiche Demografiche',
'List Positions': 'Elenco Posizioni',
'List Problems': 'Elenco Problemi',
'List Profiles configured for this Layer': 'Elencare i Profili configurati per questo Livello',
'List Programs': 'Elencare i Programmi',
'List Projections': 'Elenco Proiezioni',
'List Projects': 'Elenca progetti',
'List Rapid Assessments': 'Elenco Valutazioni rapide',
'List Received Items': 'Elenco oggetti ricevuti',
'List Received Shipments': 'Elenco Spedizioni ricevute',
'List Records': 'Elenco Records',
'List Recurring Requests': 'Elenca le richieste ripetitive',
'List Regions': 'Elenco Regioni',
'List Registrations': 'Elenco Registrazioni',
'List Reports': 'Lista i reports',
'List Request Items': 'Elenca voci di richiesta',
'List Requests': 'Elenca richieste',
'List Resources': 'Elenca risorse',
'List Rivers': 'Elenco Fiumi',
'List Roles': 'Elencare Ruoli',
'List Rooms': 'Elenco stanze',
'List Scenarios': 'Elenco Scenari',
'List Sections': 'Elenca Sezioni',
'List Sectors': 'Elenco Settori',
'List Sent Items': 'Elenca elementi inviati',
'List Sent Shipments': 'Elenco Spedizioni inviate',
'List Service Profiles': 'Elenca profili di servizio',
'List Services': 'Elenco Servizi',
'List Settings': 'Elenca Impostazioni',
'List Shelter Services': 'Elenca servizi della Struttura di Accoglienza',
'List Shelter Statuses': 'Elenca stato della Struttura di Accoglienza',
'List Shelter Types': 'Elenca i tipi della Struttura di Accoglienza',
'List Shelters': 'Elenca le Strutture di Accoglienza',
'List Skill Equivalences': 'Elenca le Capacità equivalenti',
'List Skill Provisions': 'Elenco Capacità a disposizione',
'List Skill Types': 'Elenca le tipologie di Capacità',
'List Skills': 'Elenca le Capacità',
'List SMS Outbound Gateways': 'Elenca i Gateways degli SMS in uscita',
'List SMTP to SMS Channels': 'Elenca SMTP per i Canali SMS',
'List Solutions': 'Elenco Soluzioni',
'List Staff & Volunteers': 'Elenca Personale & Volontari',
'List Staff Assignments': 'Elenca le assegnazioni del Personale',
'List Staff Members': 'Elenca i Membri del Personale',
'List Staff Types': 'Elenco Tipi di Personale',
'List Status': 'Elenco Stauts',
'List Statuses': 'Elenco Stati',
'List Subscriptions': 'Elenco sottoscrizioni',
'List Subsectors': 'Elenco Sottosettori',
'List Support Requests': 'Elenca le richieste di supporto',
'List Survey Answers': 'Elenca le Risposte al Sondaggio',
'List Survey Questions': 'Elenca le Domande del Sondaggio',
'List Survey Series': 'Elenca le Serie del Sondaggio',
'List Survey Templates': 'Elenca i Modelli del Sondaggio',
'List Symbologies': 'Elenco Simbologie',
'List Symbologies for Layer': 'Elenco Simbologie per Livello',
'List Tags': 'Elenca etichette',
'List Tasks': 'Elenca attività',
'List Teams': 'Elencare le Squadre',
'List Themes': 'Elenco Temi',
'List Tickets': 'Elenco Dei Biglietti',
'List Tracks': 'Elenco Delle Tracce',
'List Training Events': 'Elencare gli Eventi di Formazione',
'List Trainings': 'Elencare i Corsi',
'List Tropo Channels': 'Elenca Canali Tropo',
'List unidentified': 'Elenca non identificato',
'List Units': 'Elenca Unità',
'List Users': 'Elenco degli utenti',
'List Volunteer Cluster Positions': 'Elencare le Posizioni dei Gruppi di Volontari',
'List Volunteer Cluster Types': 'Elencare i Tipi di Gruppi di Volontari',
'List Volunteer Clusters': 'Elencare i Gruppi di Volontari',
'List Volunteer Roles': 'Elenca i Ruoli dei Volontari',
'List Volunteers': 'Elenca i Volontari',
'List Warehouses': 'Elenco dei Magazzini',
'List Web API Channels': 'Elenca canali Web API',
'List/Add': 'Elenca/Aggiungi',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Elenca "chi fa che cosa e dove". Consente alle agenzie di soccorso di coordinare le loro attività',
'Live Help': 'Aiuto dal vivo',
'Livelihood': 'Mezzi di sostentamento',
'Livelihood Manager': 'Capo dei mezzi di sostentamento',
'Livelihoods': 'Mezzi di sostentamento',
'Load Cleaned Data into Database': 'Caricamento nel Database dei Dati corretti',
'Load filter': 'Load filter',
'Load Raw File into Grid': 'Caricamento nella griglia dei File grezzi',
'Loading': 'Caricamento in corso',
'Local Name': 'Nome locale',
'Local Names': 'Nomi locali',
'Local Storm': 'Tempesta locale',
'Location': 'Ubicazione',
'Location 1': 'Ubicazione 1',
'Location 2': 'Ubicazione 2',
'Location added': 'Ubicazione aggiunta',
'Location added to Organization': "Ubicazione aggiunta all'Organizzazione",
'Location deleted': 'Ubicazione eliminata',
'Location Details': "Dettagli dell'ubicazione",
'Location Group': 'Gruppo di ubicazioni',
'Location group cannot be a parent.': 'Il gruppo di ubicazioni non può essere un elemento principale.',
'Location group cannot have a parent.': 'Il gruppo di ubicazioni non può avere un parent.',
'Location groups can be used in the Regions menu.': 'I gruppi di ubicazioni possono essere utilizzati nel menu Regions.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'I gruppi di ubicazione possono essere utilizzati per filtrare quanto viene mostrato sulla mappa e nei risultati della ricerca solo a entità interessate dalle ubicazioni nel gruppo.',
'Location Hierarchies': 'Gerarchie di ubicazioni',
'Location Hierarchy': 'Gerarchia di ubicazioni',
'Location Hierarchy added': 'Gerarchia di ubicazioni aggiunta',
'Location Hierarchy deleted': 'Gerarchia di ubicazioni cancellata',
'Location Hierarchy Level 0 Name': 'Gerarchia di collocazione A Livello 0 Nome',
'Location Hierarchy Level 1 Name': 'Nome a Livello 1 della Gerarchia di ubicazioni',
'Location Hierarchy Level 2 Name': 'Nome a Livello 2 della Gerarchia di ubicazioni',
'Location Hierarchy Level 3 Name': 'Nome a Livello 3 della Gerarchia di ubicazioni',
'Location Hierarchy Level 4 Name': 'Nome a Livello 4 della Gerarchia di ubicazioni',
'Location Hierarchy Level 5 Name': 'Nome a Livello 5 della Gerarchia di ubicazioni',
'Location Hierarchy updated': 'Gerarchia di ubicazioni aggiornata',
'Location is of incorrect level!': "L'ubicazione è a livello sbagliato!",
'Location is Required!': "L'ubicazione è richiesta!",
'Location needs to have WKT!': "L'ubicazione necessita di WKT!",
'Location removed from Organization': "Ubicazione rimossa dall'Organizzazione",
'Location Required!': 'Ubicazione richiesta!',
'Location updated': 'Ubicazione aggiornata',
'Location:': 'Ubicazione:',
'Locations': 'Ubicazioni',
'Locations of this level need to have a parent of level': 'Le ubicazioni di questo livello devono avere un livello superiore',
'Lockdown': 'Blocco',
'Locust': 'Locusta',
'Log': 'log',
'Log entry added': 'Voce di Log aggiunta',
'Log entry deleted': 'Voce di Log eliminata',
'Log Entry Deleted': 'Voce di Log Cancellata',
'Log Entry Details': 'Dettagli Voce di Log',
'Log entry updated': 'Voce di Log aggiornata',
'Logged out': 'Logged out',
'Login': 'Connettersi',
'login': 'Login',
'Login using Facebook account': "Connettersi usando l'utenza di Facebook",
'Login using Google account': "Connettersi usando l'utenza di Google",
'Login with Facebook': 'Connettersi con Facebook',
'Login with Google': 'Connettersi con Google',
'Logistics': 'Logistica',
'Logistics Airport': 'Logistica aereoportuale',
'Logistics Airport Military': 'Logistica militare aereoportuale',
'Logistics Boat': 'Logistica per le barche',
'Logistics Bridge': 'Logistica per i ponti',
'Logistics Bus': 'Logistica per gli autobus',
'Logistics Car': 'Logistica per le macchine',
'Logistics Ferry': 'Logistica Traghetto',
'Logistics Gas Station': 'Logistica Stazioni di Servizio',
'Logistics Helipad': 'Logistica per Elisuperficie',
'Logistics Management System': 'Sistema di gestione della logistica',
'Logistics Port': 'Logistica Portuale',
'Logistics Ship': 'Logistics Navale',
'Logistics Train': 'Logistica Treni',
'Logistics Truck': 'Logistica Camion',
'Logistics Tunnel': 'Logistics Gallerie',
'Logo': 'Logo',
'Logo file %s missing!': 'File Logo %s mancanti!',
'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': "Logo dell'Organizzazione. Utilizzare un file di tipo png o jpeg e non maggiore di 400x400",
'Logout': 'Logout',
'long': 'Lungo',
'Long Name': 'Nome per esteso',
'Long Text': 'Testo esteso',
'long>12cm': 'Lungo > 12cm',
'Longitude': 'Longitudine',
'Longitude is Invalid!': 'La longitudine non è valida!',
'Longitude is West - East (sideways).': 'La Longitudine è Ovest-Est (lateralmente).',
'Longitude is West-East (sideways).': 'Longitudine è a ovest-est (da un lato).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': "La Longitudine è zero sul primo meridiano (Greenwich Mean Time) ed è positiva verso est, attraverso l'Europa e l'Asia. Longitudine è negativa per l' Occidente, attraverso l' Atlantico e le Americhe.",
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': "La longitudine è zero al primo meridiano (attraverso Greenwich, Regno Unito) ed è positivo verso est, attraverso Europa ed Asia. La longitudine è negativa per l' Occidente, attraverso l' Atlantico e le Americhe.",
'Longitude must be between -180 and 180.': 'La longitudine deve essere tra -180 e 180',
'Longitude of far eastern end of the region of interest.': "Longitudine dell'Estremo Oriente dell'area di interesse.",
'Longitude of far western end of the region of interest.': "Longitudine dell'Estremo Occidente dell'area di interesse.",
'Longitude of Map Center': 'Longitudine del centro della mappa',
'Longitude should be between': 'La Longitudine dovrebbe essere compresa tra',
'Looting': 'Saccheggi',
'Lost': 'Perso',
'Lost Password': 'Password smarrita',
'low': 'basso',
'Low': 'Basso',
'Magnetic Storm': 'Tempesta magnetica',
'Mailing list': 'Lista di posta',
'Mailing list added': 'Aggiunta Lista di Posta',
'Mailing list deleted': 'Cancellata Lista di Posta',
'Mailing List Details': 'Dettagli della lista mail',
'Mailing List Name': 'Nome della lista email',
'Mailing list updated': 'Aggiornata Lista di Posta',
'Mailing Lists': 'Liste di posta',
'Main?': 'Principale?',
'Major': 'Principale',
'Major Damage': 'Grandi Danni',
'Major expenses': 'Grandi spese',
'Major outward damage': "Grave danno verso l' esterno",
'Make Commitment': 'Prendere un Impegno',
'Make New Commitment': 'Prendere nuovo impegno',
'Make preparations per the <instruction>': 'Fare i preparativi per la',
'Make Request': 'Effettuare una richiesta',
'male': 'maschio',
'Male': 'Maschio',
'Manage Events': 'Gestisci eventi',
'Manage Incidents': 'Gestisci incidenti',
'Manage Layers in Catalog': 'Gestire i livelli nel Catalogo',
'Manage Relief Item Catalogue': 'Gestire il catalogo delle voci di assistenza',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Gestire le richieste di forniture, Asset, personale o altre risorse. Corrispondenze negli Inventari in cui sono richieste le forniture.',
'Manage requests of hospitals for assistance.': "Gestire le richieste d'assistenza degli ospedali.",
'Manage Users & Roles': 'Gestire utenti & Ruoli',
'Manage volunteers by capturing their skills, availability and allocation': 'Gestire volontari acquisendo le loro capacità, disponibilità e assegnazione',
'Manage Warehouses/Sites': 'Gestire Depositi/Siti',
'Manage Your Facilities': 'Gestire le Strutture',
'Managing Office': 'Gestione Ufficio',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': "Obbligatorio. In GeoServer, questo è il nome del livello. All'interno del WFS nel getCapabilities, questa è la parte del nome FeatureType dopo i due punti (:).",
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'Obbligatorio. La URL per accedere al servizio. e.g. http://host.domain/geoserver/wfs?',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'Obbligatorio. La URL per accedere al servizio. e.g. http://host.domain/geoserver/wms?',
'Mandatory. The URL to access the service.': "Obbligatorio. L' URL per accedere al servizio.",
'manual': 'manuale',
'Manual': 'Manuale',
'Manual Synchronization': 'Sincronizzazione manuale',
'Many': 'Molti',
'Map': 'Mappa',
'Map cannot display without prepop data!': 'La mappa non può essere visualizzata senza dati prepop!',
'Map Center Latitude': 'Latitudine del centro della mappa',
'Map Center Longitude': 'Longitudine del centro della mappa',
'Map Configuration': 'Configurazione della mappa ',
'Map Configuration added': 'Configurazione della mappa aggiunta',
'Map Configuration deleted': 'Configurazione della mappa eliminata',
'Map Configuration Details': 'Dettagli di configurazione di mappa',
'Map Configuration removed': 'Configurazione di mappa eliminata',
'Map Configuration updated': 'Configurazione della mappa aggiornata',
'Map Configurations': 'Configurazioni della mappa',
'Map has been copied and set as Default': 'La mappa è stata copiata e impostata come impostazione predefinita',
'Map has been set as Default': 'La mappa è stata impostata come impostazione predefinita',
'Map Height': 'Altezza della mappa',
'Map is already your Default': 'La mappa è già la tua impostazione predefinita',
'Map not available: Cannot write projection file - %s': 'Mappa non disponibile: non può scrivere il file di proiezione - %s',
'Map not available: No Projection configured': 'Mappa non disponibile: la proiezione %(projection)s ',
'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'Mappa non disponibile: la proiezione non è configurata non è supportata - si prega di aggiungere la definizione a %(path)s',
'Map of Base Stations': 'Mappa delle Stazioni base',
'Map of Facilities': 'Mappa delle Strutture',
'Map of Hospitals': 'Mappa degli ospedali',
'Map of Offices': 'Mappa degli Uffici',
'Map of Resources': 'Mappa delle risorse',
'Map Service Catalog': 'Servizio di Catalogo della Mappa',
'Map Settings': 'Impostazioni della mappa',
'Map Viewing Client': 'Client per la visualizzazione della mappa',
'Map Width': 'Larghezza mappa',
'Map Zoom': 'Zoom della mappa',
'Marine Security': 'Sicurezza marittima',
'Marital Status': 'Stato civile',
'Mark as duplicate': 'Indica come duplicato',
'Mark Sender': 'Segnala Mittente',
'Marker': 'Indicatore',
'Marker added': 'Indicatore aggiunto',
'Marker deleted': 'Indicatore eliminato',
'Marker Details': 'Dettagli Indicatore',
'Marker updated': 'Indicatore aggiornato',
'Markers': 'Indicatori',
'married': 'Coniugato',
'Master Message Log': 'Log dei messaggi principale',
'Master Message Log to process incoming reports & requests': 'Il log dei messaggi principale per processare i reports e le richieste in ingresso',
'Match Percentage': 'Corrispondenza Percentuale',
'Match percentage indicates the % match between these two records': 'La percentuale di corrispondenze indica la percentuale di corrispondenze tra questi due record',
'Match Requests': 'Corrispondenza di richieste',
'Match?': 'Corrispondenza?',
'Matching Catalog Items': 'Voci di catalogo corrispondenti',
'Matching Items': 'Elementi corrispondenti',
'Matching Records': 'Record corrispondenti',
'Maximum Extent': 'Misura massima',
'Maximum Location Latitude': 'Latitudine della posizione massima',
'Maximum Location Longitude': 'Longitudine della posizione massima',
'Maximum:': 'Massimo:',
'Measure Area: Click the points around the polygon & end with a double-click': 'Area di misurazione: fare click sui punti attorno al poligono e terminare con un doppio click',
'Measure Length: Click the points along the path & end with a double-click': 'Misurare la lunghezza: fare click sui punti lungo il percorso e terminare con un doppio click',
'Medical and public health': 'Medici e sanità pubblica',
'Medical Clinic': 'Clinica Medica',
'Medical Conditions': 'Condizioni Mediche',
'Medical Information': 'Informazioni Mediche',
'medium': 'medio',
'Medium': 'Medio',
'medium<12cm': 'medio < 12cm',
'Megabytes per Month': 'Megabyte al mese',
'Member Organizations': 'Membro dell?Organizzazione',
'Member removed from Group': 'Membro rimosso dal gruppo',
'Members': 'Membri',
'Membership': 'Appartenenza',
'Membership Details': "Dettagli sull'appartenenza",
'Membership updated': 'Appartenenza aggiornata',
'Memberships': 'Appartenenze',
'Menu': 'Menu',
'Merge': 'Unire',
'Merge records': 'Unire i record',
'Message': 'Messaggio',
'Message added': 'Messaggio aggiunto',
'Message deleted': 'Messaggio eliminato',
'Message Details': 'Dettagli messaggio',
'Message Log': 'Log dei Messaggi',
'Message Source': 'Origine dei Messaggi',
'Message updated': 'Messaggio aggiornato',
'Message variable': 'Variabile del messaggio',
'Message Variable': 'Variabile del messaggio',
'Messages': 'Messaggi',
'Messaging': 'Messaggistica',
'Messaging settings updated': 'impostazioni di messaggistica aggiornate',
'Meteorological (inc. flood)': 'Meteorologico (inc. Alluvione)',
'meters': 'metri',
'Method used': 'Metodo utilizzato',
'MGRS Layer': 'Livello MGRS',
'Middle Name': 'Secondo nome',
'Migrants or ethnic minorities': 'Migranti o minoranze etniche',
'Military': 'Militare',
'Minimum Location Latitude': 'Minima posizione della Latitudine',
'Minimum Location Longitude': 'Minima posizione della Longitudine',
'Minimum shift time is 6 hours': 'Lo spostamento temporale minimo è di 6 ore.',
'Minimum:': 'Minimo:',
'Minor Damage': 'Minor Danno',
'Minor/None': 'Poco / Nessuno',
'Minorities participating in coping activities': 'Le minoranze aiutano a far fronte alle circostanze',
'Minute': 'Minuto',
'Minutes must be a number between 0 and 60': 'Minuti deveno essere un numero compreso tra 0 e 60',
'Minutes must be a number.': 'I minuti devono essere un numero',
'Minutes must be less than 60.': 'I minuti devono essere meno di 60',
'Minutes per Month': 'Minuti al mese',
'Minutes should be a number greater than 0 and less than 60': 'Minuti deve essere un numero maggiore di 0 e inferiore a 60',
'Miscellaneous': 'Varie',
'missing': 'Mancante',
'Missing': 'Mancante',
'Missing Person': 'Persona scomparsa',
'Missing Person Details': 'Dettagli della persona scomparsa',
'Missing Person Registry': 'Archivio della persona mancante',
'Missing Persons': 'Persone scomparse',
'Missing Persons Registry': 'Registro delle persone scomparse',
'Missing Persons Report': 'Report sulle persone scomparse',
'Missing Report': 'Report mancante',
'Missing Senior Citizen': 'Anziano Disperso',
'Missing Vulnerable Person': 'Persona Vulnerabile dispersa',
'Mission added': 'Missione aggiunta',
'Mission deleted': 'Missione eliminata',
'Mission Details': 'Dettagli Missione',
'Mission Record': 'Record Missione',
'Mission updated': 'Missione aggiornata',
'Missions': 'Missioni',
'Mobile': 'Cellulare',
'Mobile Basic Assessment': 'Mobile Base Valutazione',
'Mobile Commons (Inbound)': "Mobile Commons (In Arrivo)",
'Mobile Commons Channels': 'Canali Mobile Commons',
'Mobile Commons Setting added': 'Configurazione Mobile Commons aggiunta',
'Mobile Commons Setting deleted': 'Configurazione Mobile Commons rimossa',
'Mobile Commons Setting Details': 'Dettagli Configurazione Mobile Commons',
'Mobile Commons Settings': 'Configurazione Mobile Commons',
'Mobile Commons settings updated': 'Configurazione Mobile Commons aggiornata',
'Mobile Phone': 'Telefono cellulare',
'Mobile Phone Number': 'Numero di telefono del cellulare',
'Mode': 'Modalità',
'Model/Type': 'Modello/Tipo',
'Modem': 'Modem',
'Modem Channel added': 'Canale modem aggiunto',
'Modem Channel deleted': 'Canale modem cancellato',
'Modem Channel Details': 'Dettagli canale modem',
'Modem Channel updated': 'Canale modem aggiornato',
'Modem Channels': 'Canali modem',
'Modem Settings': 'Impostazioni modem',
'Modem settings updated': 'Aggiornate le impostazioni del Modem',
'Moderate': 'Moderato',
'Moderator': 'Moderatore',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'Funzionalità di modifica, selezionare la funzione che desiderate a deformare e quindi trascinare uno dei punti per deformare la funzione nel modo scelto',
'Modify Information on groups and individuals': 'Modificare le informazioni sui gruppi e persone',
'Modifying data in spreadsheet before importing it to the database': 'Modifica dei dati nel foglio elettronico prima di importarlo nel database',
'Module': 'Modulo',
'module allows the site administrator to configure various options.': 'il modulo consente al responsabile del sito di configurare varie opzioni.',
'Module Day and Night Capacity': 'Modula la capienza diurna e notturna',
'module helps monitoring the status of hospitals.': 'Modulo consente il monitoraggio dello stato degli ospedali.',
'Module Night Capacity': 'Modula la capienza notturna',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': "Modulo fornisce un meccanismo per fornire in modo collaborativo una panoramica dell'emergenza in corso, utilizzando cartografia in linea (GIS).",
'Module provides access to information on current Flood Levels.': "Il modulo fornisce accesso alle informazioni sugli attuali livelli dell'alluvione.",
'Monday': 'Lunedì',
'mongoloid': 'Mongoloide',
'Month': 'Mese',
'Monthly Cost': 'Costo mensile',
'Monthly Salary': 'Stipendio Mensile',
'Months': 'mesi',
'more': 'Più',
'More Info': 'Più informazioni',
'More Options': 'Più opzioni',
'more...': 'Più..',
'Morgue': 'Polizia Mortuaria',
'Morgue Status': "Stato dell' obitorio",
'Morgue Units Available': 'Obitori disponibili',
'Mosque': 'Moschea',
'Motorcycle': 'Motociclo',
'Motorcycles. Power not exceeding 35 kW': 'Motocicli. Potenza non superiore a 35 kW',
'Moustache': 'Baffi',
'Motorcycles, Tricycles, Agricultural Machinery. Maximum power 11 kW (Motorcycles) e 15 kW (Tricycles)': 'Motocicli, Tricicli, Macchine Agricole. Potenza massima di 11 kW (Motocicli) e 15 kW (Tricicli)',
'Move Feature: Drag feature to desired location': 'Funzionalità di movimento: trascinare la funzione nella posizione desiderata ',
'Multiple': 'Molteplice',
'Multiple Matches': 'Molteplici corrispondenze',
'MultiPolygon': 'Multipoligono',
'Muslim': 'Musulmano',
'Must a location have a parent location?': "Un'ubicazione deve avere un'ubicazione origine?",
'My Current function': 'La mia attuale funzione',
'My Logged Hours': 'Le mie ore registrate',
'My Maps': 'Le mie mappe',
'My Open Tasks': 'I miei tasks aperti',
'My Tasks': 'Compiti personali',
'n/a': 'N/D',
'N/A': 'N/D',
'Name': 'Nome',
'Name and/or ID': 'Nome e/o ID',
'Name field is required!': 'Il campo nome è richiesto!',
'Name for your Mobile Commons Account': 'Nome per il tuo account Mobile Commons',
'Name of Award': 'Nome del Premio',
'Name of Father': 'Nome del Padre',
'Name of Institute': "Nome dell'Istituto",
'Name of Map': 'Nome della mappa',
'Name of Mother': 'Nome della Madre',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': "Nome del file (& sotto-percorso facoltativo) ubicato in statico che dovrebbe essere utilizzato per lo sfondo dell'intestazione.",
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': "Nome del file (& FACOLTATIVO sub-percorso) si trova nella statico che deve essere utilizzato per la parte superiore a sinistra dell' immagine.",
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Nome del file (e il sotto-percorso facoltativo) si trova nelle viste che dovrebbe essere utilizzate per piè di pagina.',
'Name of the person in local language and script (optional).': 'Il nome della persona in lingua locale e script (facoltativo).',
'Name of the repository (for you own reference)': 'Name of the repository (for you own reference)',
'Name, Org and/or ID': "Nome, Organizzazione e / o l' ID",
'Names can be added in multiple languages': 'I nomi possono essere aggiunti in più lingue',
'National': 'Nazionale',
'National ID Card': "Carta d'identità nazionale",
'National NGO': 'ONG nazionali',
'Nationality': 'Nazionalità',
'Nationality of the person.': 'Nazionalità della persona.',
'Nautical Accident': 'incidente nautico',
'Nautical Hijacking': 'dirottamento nautico',
"Need a 'url' argument!": "È necessario un argomento 'url'!",
'Need added': "necessita' aggiunta",
'Need deleted': "necessita' eliminata",
'Need to be logged-in to be able to submit assessments': 'Necessità di essere registrati per essere in grado di sottomettere Valutazioni',
'Need to configure Twitter Authentication': "Necessità di l'autenticazione di Twitter",
'Need to specify a Budget!': 'È necessario specificare uno Stanziamento!',
'Need to specify a bundle!': 'È necessario specificare un pacchetto!',
'Need to specify a group!': 'È necessario specificare un gruppo!',
'Need to specify a Kit!': 'È necessario specificare un Kit!',
'Need to specify a location to search for.': 'È necessario specificare un percorso da ricercare.',
'Need to specify a Resource!': 'È necessario specificare una risorsa.',
'Need to specify a role!': 'È necessario specificare un ruolo!',
'Need to specify a table!': 'È necessario specificare una tabella!',
'Need to specify a user!': 'È necessario specificare un utente!',
'Need Type': "E' necessario scrivere",
'Need Type added': 'Testo Necessario aggiunto',
'Need Type deleted': 'Testo necessario eliminato',
'Need Type Details': 'Necessario immettere i dettagli',
'Need Type updated': 'Tipo di necessità aggiornato',
'Need Types': 'Tipi di necessità',
'Need updated': 'Necessità aggiornata',
'Needs': 'Necessità',
'Needs Details': 'Dettagli delle esigenze',
'Needs Maintenance': 'Necessita di manutenzione',
'Needs to reduce vulnerability to violence': 'Necessità di ridurre la vulnerabilità rispetto a violenza',
'Negative Flow Isolation': 'Isolamento del flusso negativo',
'negroid': 'Negroide',
'Neighborhood': 'Risorse',
'Neighbouring building hazard': 'rischio di costruzione nel vicinato',
'Neonatal ICU': 'ICU Neonatale',
'Neonatology': 'Neonatologia',
'Network': 'rete',
'Network added': 'Aggiunta la rete',
'Network Details': 'Dettagli di rete',
'Network removed': 'Rete rimossa',
'Network updated': 'Rete aggiornata',
'Networks': 'Reti',
'Neurology': 'Neurologia',
'Never': 'Mai',
'never': 'mai',
'new': 'Nuovo',
'New': 'Nuovo',
'new ACL': 'Nuova ACL',
'New Assessment reported from': 'Nuova valutazione riportata da',
'New cases in the past 24h': 'Nuovi casi nelle ultime 24 ore',
'New Certificate': 'Nuovo certificato',
'New Checklist': 'Nuova Lista di controllo',
'New Entry': 'Nuova Voce',
'New Event': 'Nuovo evento',
'New Item Category': 'Nuovo categoria di elementi',
'New Job Role': 'Nuovo Ruolo di lavoro',
'New Location': 'Nuova collocazione',
'New Location Group': 'Nuova ubicazione del Gruppo',
'New Peer': 'Nuovo Peer',
'New Record': 'Nuovo record',
'new record inserted': 'Nuovo record inserito',
'New Records': 'Nuovi Record',
'New Request': 'Nuova Richiesta',
'New Role': 'Nuovo Ruolo',
'New Scenario': 'Nuovo scenario',
'New Sector': 'Nuovo Settore',
'New Service': 'Nuovo Servizio',
'New Skill': 'Nuova Capacità',
'New Solution Choice': 'Nuova scelta di soluzione',
'New Staff Member': 'Nuovo Membro dello Staff',
'New Support Request': 'Nuova richiesta di supporto',
'New Synchronization Peer': 'Nuovo peer di sincronizzazione',
'New Team': 'Nuova squadra',
'New Training Course': 'Nuovo Corso di Formazione',
'New updates are available.': 'Sono disponibili nuovi aggiornamenti',
'New Volunteer': 'Nuovo Volontario',
'News': 'Notizie',
'Next': 'Prossimo',
'next 100 rows': 'Prossime 100 righe',
'Next run': 'Prossimo giro',
'Next View': 'Prossima Vista',
'NGO': 'NGO',
'Night only': 'Solo di notte',
'No': 'No',
'NO': 'No',
'no': 'No',
'No access at all': 'Nessun accesso affato',
'No access to this record!': 'Nessun accesso a questo record!',
'No Accounts currently defined': 'Al momento, non ci sono account definiti',
'No action recommended': 'nessuna azione consigliata',
'No Activities Found': 'N e s s u n a a t t i v i t à t r o v a t a',
'No Affiliations defined': 'Nessuna Affiliazione definita',
'No Alternative Items currently registered': 'Nessuna voce alternativa è attualmente registrata',
'No Appraisals found': 'Valutazioni non trovate',
'No Assessment Summaries currently registered': 'Nessun Riepilogo di valutazione attualmente registrato',
'No Assessments currently registered': 'Valutazioni non attualmente registrati',
'No Assets currently registered': 'Nessun bene attualmente registrato',
'No Assets currently registered in this event': 'Nessun asset è attualmente registrati in questo evento',
'No Assets currently registered in this incident': 'Nessun bene attualmente registrato in questo incidente',
'No Assets currently registered in this scenario': 'Nessun asset è attualmente registrato in questo scenario',
'No Awards found': 'Nessun Premio trovato',
'No Base Layer': 'Nessun livello di base',
'No Base Stations currently registered': 'Al momento non vi sono stazioni base, registrate.',
'No Baseline Types currently registered': 'Nessuna tipo di linea base attualmente registrato',
'No Baselines currently registered': 'Nessuna baseline attualmente registrata',
'No Branch Organizations currently registered': 'Nessuna Unità Organizzativa registrata',
'No Brands currently registered': 'Nessun marchio attualmente registrato',
'No Budgets currently registered': 'Nessuno stanziamento attualmente registrato',
'No Bundles currently registered': 'Nessun pacchetto attualmente registrato',
'No Camp Services currently registered': 'Nessun Servizio di Campo attualmente registrato',
'No Camp Statuses currently registered': 'Nessuno Stato di Campo attualmente registrato',
'No Camp Types currently registered': 'Nessun Tipo di Campo attualmente registrato',
'No Camps currently registered': 'Nessun campo attualmente registrato',
'No Catalog Items currently registered': 'Nessuna voce del catalogo attualmente registrata',
'No Catalogs currently registered': 'Nessun catalogo attualmente registrato',
'No Checklist available': 'Lista di controllo non disponibile',
'No Cluster Subsectors currently registered': 'Nessun Sottosettore di Raggruppamento attualmente registrato',
'No Clusters currently registered': 'Nessun raggruppamento attualmente registrato',
'No Coalitions currently recorded': 'Nessuna coalizione attualmente registrata',
'No Commitment Items currently registered': 'Nessuna voce di impegno attualmente registrata',
'No Commitments': 'Nessun impegno',
'No conflicts logged': 'Nessun conflitto registrato',
'No contact information available': 'Nessuna informazione di contatto disponibile',
'No contact method found': 'Nessun metodo di contatto trovato',
'No Contacts currently registered': 'Nessun Contatto attualmente registrato',
'No contacts currently registered': 'Nessun contatto attualmente registrato',
'No Credentials currently set': 'Nessuna Credenziala attualmente impostata',
'No data available': 'Nessun dato disponibile',
'No Data currently defined for this Theme Layer': 'Nessun dato attualmente definito per questo livello tematico',
'No data in this table - cannot create PDF!': 'Nessun dato in questa tabella - impossibile creare PDF!',
'No databases in this application': 'Nessun database in questa applicazione',
'No dead body reports available': 'Nessun reports disponibile sui cadaveri',
'No Details currently registered': 'Nessun dettaglio attualmente registrato',
'No Documents found': 'Nessun documento trovato',
'No Donors currently registered': 'Nessun donatore attualmente registrato',
'No education details currently registered': 'Nessun dettaglio di istruzione attualmente registrato',
'No Education Levels currently registered': 'Nessun Livello di Istruzione attualmente registrato',
'No Emails currently in InBox': 'Al momento non vi sono emails nella InBox.',
'No Emails currently in Outbox': 'Al momento non vi sono emails in uscita.',
'No entries currently available': 'Nessuna entrata attualmente disponibile',
'No entries found': 'Nessuna voce trovata',
'No entries matching the query': 'nessuna voce corrisponde alla richiesta',
'No entry available': 'Nessuna voce disponibile',
'No Event Types currently registered': 'Nessun tipo di evento attualmente registrato',
'No Events currently registered': 'Nessun evento attualmente registrato',
'No Events currently tagged to this Shelter': 'Nessun evento è attualmente contrassegnato per questa Struttura di Accoglienza.',
'No Facilities currently registered': 'Nessuna Struttura attualmente registrata',
'No Facilities currently registered in this event': 'Nessuna Struttura attualmente registrata per questo evento',
'No Facilities currently registered in this incident': 'Nessun servizio è attualmente registrato in questo incidente',
'No Facilities currently registered in this scenario': 'Nessuna Struttura attualmente registrata in questo scenario',
'No Facility Types currently registered': 'Nessun tipo di Struttura attualmente registrata',
'No Feature Layers currently defined': 'Nessun livello di funzione attualmente definito',
'No Flood Reports currently registered': 'Nessun reports di alluvione attualmente registrato',
'No forms to the corresponding resource have been downloaded yet.': 'Nessun modulo per la risorsa corrispondente è stato scaricato',
'No further users can be assigned.': 'Nessun ulteriore utente può essere assegnato',
'No groups currently allocated for this shelter': 'Nessun gruppo attualmente assegnato a questa Struttura di Accoglienza',
'No Groups currently defined': 'nessun gruppo attualmente definito',
'No Groups currently registered': 'Nessun gruppo attualmente registrato',
'No Hospitals currently registered': 'Nessun ospedale attualmente registrato',
'No Human Resources currently assigned to this incident': 'Nessuna risorsa umana è attualmente assegnata a questo incidente',
'No Human Resources currently registered in this event': 'Nessuna Risorsa Umana attualmente registrata in questo caso',
'No Human Resources currently registered in this scenario': 'Nessuna risorsa umana attualmente registrata in questo scenario',
'No Identification Report Available': 'Nessun reports di identificazione disponibile',
'No Identities currently registered': 'Nessuna identità attualmente registrata',
'No Image': 'Nessuna immagine',
'No Images currently registered': 'Nessuna immagine attualmente registrata',
'No Impact Types currently registered': 'Nessun tipo di impatto attualmente registrato',
'No Impacts currently registered': 'Nessun impatto attualmente registrato',
'No Incident Reports currently registered': 'Nessun Reports di incidente attualmente registrato',
'No Incident Reports currently registered for this event': 'Nessun Reports incidente attualmente registrato per questo evento',
'No Incident Reports currently registered in this incident': 'Nessun Reports incidente attualmente registrato in questo evento',
'No Incident Types currently registered': 'Nessun tipo di incidente attualmente registrato',
'No Incidents currently registered in this event': 'Nessun incidente attualmente registrato in questo evento',
'No Incoming Shipments': 'Nessuna spedizione in ingresso',
'No Item Categories currently registered': 'Nessun elemento Categorie attualmente registrato',
'No Item Packs currently registered': 'Nessun pacco di articoli attualmente registrato',
'No Items currently registered': 'Non vi sono elementi registrati al momento',
'No Items currently registered in this Inventory': 'Nessun articolo attualmente registrato in questo inventario',
'No jobs configured': 'Nessun lavoro configurato',
'No jobs configured yet': 'Nessun lavoro è stato ancora configurato ',
'No Keys currently defined': 'Nessun tasto attualmente definito',
'No Kits currently registered': 'Non ci sono Kit registrati al momento',
'No Layers currently configured in this Profile': 'Nessun livello attualmente configurato in questo profilo',
'No Layers currently defined': 'Nessun livello attualmente definito',
'No Layers currently defined in this Symbology': 'Nessun livello attualmente definito in questa Simbologia',
'No Level 1 Assessments currently registered': 'Nessuna Valutazione di Livello 1 è attualmente registrata',
'No Level 2 Assessments currently registered': 'Nessuna valutazione di livello 2 attualmente registrata',
'No Location Hierarchies currently defined': 'Nessuna gerarchia di posizione attualmente definita',
'No location information defined!': 'Nessuna informazione sulla posizione definita!',
'No location known for this person': 'Alcuna ubicazione nota per questa persona',
'No Locations currently available': 'Nessuna ubicazione attualmente disponibile',
'No Locations currently registered': 'Nessuna ubicazione attualmente registrata',
'No locations found for members of this team': 'Nessun percorso trovato per i membri di questa squadra',
'No Locations found for this Organization': 'Nessuna ubicazione trovata per qeusa Organizzazione',
'No log entries matching the query': 'Nessuna Voce di registro corrispondente alla query',
'No Mailing List currently established': 'Nessuna Lista di Posta attualmente stabilita',
'No Map Configurations currently defined': 'Nessuna Configurazione della mappa attualmente definita',
'No Map Configurations currently registered in this event': 'Nessuna Configurazione di mappa attualmente registrata in questo caso',
'No Map Configurations currently registered in this incident': 'Nessuna configurazione di mappa attualmente registrata in questo incidente',
'No Map Configurations currently registered in this scenario': 'Nessuna configurazione di mappa attualmente registrata in questo scenario',
'No Markers currently available': 'Nessun Indicatore attualmente disponibile',
'No Match': 'Nessuna corrispondenza',
'No match': 'Nessuna corrispondenza',
'No Matching Catalog Items': 'Nessuna Voce di catalogo corrispondente',
'No Matching Items': 'Nessun elemento corrispondente',
'No Matching Records': 'Nessun record corrispondente',
'No matching records found': 'Nessun record corrispondente trovato',
'No Members currently registered': 'Nessun Membro attualmente registrato',
'No Memberships currently defined': 'Nessuna adesione attualmente definita',
'No Messages currently in InBox': 'Al momento non vi sono messaggi nella InBox',
'No Messages currently in Outbox': 'Nessun messaggio al momento in uscita',
'No Messages currently in the Message Log': 'Non ci sono messaggi attualmente nel messaggio del registro ',
'No messages in the system': 'Nessun messaggio nel sistema',
'No Mobile Commons Settings currently defined': 'Nessun mobile Commons impostazioni attualmente definite ',
'No Modem Channels currently defined': 'Nessun Canale Modem attualmente definiti',
'No Need Types currently registered': 'Non necessario; tipo già registrato',
'No Needs currently registered': 'Non necessario; attualmente già registrato',
'No Networks currently recorded': 'Nessuna rete attualmente memorizzata',
'No Office Types currently registered': 'Nessun tipo di ufficio attualmente registrato',
'No Offices currently registered': 'No Uffici attualmente registrati',
'No Offices found!': 'Gli Uffici non trovato!',
'No options available': 'Nessuna opzione disponibile',
'no options available': 'Nessuna opzione disponibile',
'No options currently available': 'Nessuna opzione attualmente disponibile',
'No Organization Types currently registered': 'Nessun tipo di Organizzazione attualmente registrato',
'No Organizations currently registered': 'Nessuna organizzazione attualmente registrati',
'No Parsers currently connected': 'Nessun decodificatore attualmente connesso ',
'No peers currently registered': 'Nessun peer attualmente registrato',
'No pending registrations found': 'Nessuna registrazione in sospeso trovata',
'No pending registrations matching the query': 'Nessuna registrazione in sospeso che corrisponde alla query',
'No People currently registered in this camp': 'Nessuna persona attualmente registrata in questo campo',
'No People currently registered in this shelter': 'Nessuno è attualmente registrato in questa Struttura di Accoglienza',
'No people currently registered in this shelter': 'Nessuno è attualmente registrato in questa Struttura di Accoglienza',
'No person record found for current user.': "Non è stato trovato alcun record persona per l'utente corrente.",
'No Persons currently registered': 'Nessuna persona attualmente registrata',
'No Persons currently reported missing': 'Nessuna persona attualmente riportata mancante',
'No Persons found': 'Nessuna Persona trovata',
'No Photos found': 'Nessuna foto trovata',
'No Picture': 'Nessuna fotografia',
'No PoI Types currently available': 'Nessun tipo PoI attualmente disponibile',
'No Points of Interest currently available': 'Nessun PoI disponibile',
'No PoIs available.': 'Nessun PoI disponibile',
'No Population Statistics currently registered': 'nessuna statisticha sulla popolazione attualmente registrata',
'No Posts available': 'Nessun Post disponibili',
'No Posts currently tagged to this event': 'Nessuna pubblicazione attualmente contrassegnata a questo evento',
'No Presence Log Entries currently registered': 'Nessuna voce di presenza attualmente registrata nel log',
'No problem group defined yet': 'Nessun gruppo di problema ancora definito',
'No Problems currently defined': 'Nessun problema attualmente definito',
'No Professional Experience found': 'Nessuna Esperienza Professionale trovata',
'No Profiles currently have Configurations for this Layer': 'Nessun Profilo ha attualmente configurazioni per questo livello',
'No Projections currently defined': 'Nessuna proiezione attualmente definita',
'No Projects currently registered': 'Nessun progetto attualmente registrato',
'No Query currently defined': 'No Query attualmente definito ',
'No Rapid Assessments currently registered': 'Nessuna Valutazioni rapida attualmente registrata',
'No Ratings for Skill Type': 'Nessuna Valutazione per il tipo di capacità',
'No Received Items currently registered': 'Nessuna voce ricevuta attualmente registrata',
'No Received Shipments': 'Nessuna Spedizione Ricevuta',
'No Records currently available': 'Nessun record attualmente disponibile',
'No records found': 'Nessun record trovato',
'No records in this resource. Add one more records manually and then retry.': 'Nessun record in questa risorsa. Aggiungere uno o più record manualmente e quindi riprovare.',
'No records matching the query': 'Nessun record corrispondente alla domanda',
'No records to review': 'Nessun record da rivedere',
'No Regions currently registered': 'Nessuna Regione attualmente registrata',
'No report specified.': 'Nessun report specificato',
'No reports available.': 'Nessun reports disponibile.',
'No reports currently available': 'Nessun reports attualmente disponibile',
'No Request Items currently registered': 'Nessuna richiesta attualmente registrata',
'No Requests': 'Nessuna Richiesta',
'No requests found': 'nessuna richiesta trovata',
'No Resource Types defined': 'Nessun tipo di Risorsa attualmente registrato',
'No Resources assigned to Incident': "Non vi sono risorse assegnate all'incidente ",
'No resources currently reported': 'Nessuna risorsa attualmente riportata',
'No Resources in Inventory': "Nessuna risorsa nell'Inventario",
'No Response': 'Nessuna risposta',
'No Restrictions': 'Nessuna restrizione',
'No Rivers currently registered': 'Nessun Fiume attualmente registrato',
'No role to delete': 'Nessun ruolo da cancellare',
'No roles currently assigned to this user.': 'Nessun ruolo attualmente assegnato a questo utente',
'No Roles currently defined': 'Ruoli correntemente non definiti',
'No Roles defined': 'Nessun Ruolo definito',
'No Rooms currently registered': 'Nessuna stanza attualmente registrata',
'No RSS Channels currently defined': 'Nessun Canale RSS attualmente definiti',
'No saved filters': 'No saved filters',
'No Scenarios currently registered': 'Nessuno scenario attualmente registrato',
'No Sections currently registered': 'Nessuna sezione attualmente registrata',
'No Sectors currently registered': 'Nessun settore attualmente registrato',
'No Sectors found for this Organization': 'Nessun settore associato a questa Organizzazione',
'No Senders Whitelisted': 'No mittenti Whitelist ',
'No Sent Items currently registered': 'Nessun elemento inviato attualmente registrato',
'No Sent Shipments': 'Nessuna spedizione inviata',
'No service profile available': 'Nessun profilo di servizio disponibile',
'No Services currently registered': 'Nessun Servizio attualmente registrato',
'No Services found for this Organization': 'Nessun servizio associato a questa Organizzazione',
'No Settings currently defined': 'Nessuna impostazione attualmente definita',
'No Shelter Services currently registered': 'Nessun servizio della Struttura di Accoglienza attualmente registrato',
'No Shelter Statuses currently registered': 'Nessuno Stato della Struttura di Accoglienza attualmente registrato',
'No Shelter Types currently registered': 'Nessun tipo di Struttura di Accoglienza attualmente registrato',
'No Shelters currently registered': 'Nessuna Struttura di Accoglienza attualmente registrata',
'No Shelters currently tagged to this event': 'Nessuna Struttura di Accoglienza attualmente contrassegnata per questo evento',
'No skills currently set': 'Nessuna capacità correntemente impostata',
'No SMS currently in InBox': 'No SMS attualmente in entrata',
'No SMS currently in Outbox': 'No SMS attualmente in uscita ',
'No SMS Outbound Gateways currently registered': 'No SMS Gateway in uscita attualmente registrato ',
'No SMTP to SMS Channels currently registered': 'Nessun canale attualmente registrato per SNMP a SMS',
'No Solutions currently defined': 'Nessuna soluzione attualmente definita',
'No Staff currently registered': 'Nessun Personale attualmente registrato',
'No staff or volunteers currently registered': 'Nessun personale o volontari attualmente registrati',
'No Staff Types currently registered': 'Nessun tipo di personale attualmente registrato',
'No status information available': 'Non sono disponibili informazioni sullo stato',
'No Statuses currently defined': 'Nessuno Stato attualmente definito',
'No Subscription available': 'Nessuna sottoscrizione disponibile',
'No Subsectors currently registered': 'Non ci sono sottosettori attualmente registrati',
'No Support Requests currently registered': 'Nessuna richiesta di supporto attualmente registrata',
'No Survey Answers currently entered.': 'Non ci sono Risposte al Sondaggio attualmente immesse.',
'No Survey Questions currently registered': 'Nessuna domanda sul sondaggio attualmente registrata',
'No Survey Series currently registered': 'Nessuna serie del sondaggio attualmente registrata',
'No Survey Template currently registered': "Nessuna Modello d'Indagine attualmente registrato",
'No Symbologies currently defined': 'Nessuna simbologia attualmente definita',
'No Symbologies currently defined for this Layer': 'Nessuna simbologia attualmente definita per questo livello',
'No synchronization': 'Nessuna sincronizzazione',
'No tasks currently registered': 'Nessuna attività attualmente registrata',
'No Tasks currently registered in this incident': 'Non vi sono attività attualmente registrati in questo incidente',
'No Tasks with Location Data': 'Nessuna attività con dati di ubicazione',
'No Teams currently registered': 'Nessuna Squadra attualmente registrato',
'No template found!': 'Nessun modello trovato!',
'No Themes currently defined': 'Nessun tema attualmente definito',
'No Tickets currently registered': 'Nessun ticket attualmente registrato',
'No Tracks currently available': 'Nessuna traccia attualmente disponibile',
'No Tropo Channels currently registered': 'Nessun Canale Tropo attualmente registrati ',
'No Tweets Available.': 'Nessun Tweets disponibili. ',
'No Tweets currently in InBox': 'Nessuna Tweets attualmente in InBox ',
'No Tweets currently in Outbox': 'No Tweets attualmente in uscita ',
'No Twilio Channels currently defined': 'Nessun Canale Twilio attualmente definito ',
'No units currently registered': 'Nessuna unità attualmente registrati',
'No Users currently registered': 'Nessun utente attualmente registrato',
'No users with this role at the moment.': 'Nessun utente con questo ruolo al momento',
"No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": 'Nessuna differenza UTC trovata. Si prega di impostare la differenza UTC nei dettagli del vostro "Profilo Utente". Ad esempio: UTC 0530',
'No volunteer availability registered': 'Nessuna disponibilità di volontari registrata',
'No Volunteer Cluster Positions': 'Nessuna Posizione per Gruppi di Volontari',
'No Volunteer Cluster Types': 'Nessun Tipo per Gruppi di Volontari',
'No Volunteer Clusters': 'Nessun Gruppo di Volontari',
'No Volunteers currently registered': 'Nessun Volontario attualmente registrato',
'No Warehouses currently registered': 'Nessuna Warehouse attualmente resgistrata',
'No Web API Channels currently registered': 'Nessun Canali API Web attualmente registrato',
'Non-structural Hazards': 'Rischi non Strutturali',
'None': 'Nessuno',
'none': 'Nessuno',
'None (no such record)': 'Nessuno (no record)',
'None of the above': 'Nessuno dei precedenti',
'Noodles': 'Tagliatelle',
'Normal': 'Normale',
'normal': 'normale',
'Normal Job': 'Professione standard',
'NOT %s AND NOT %s': 'NOT %s AND NOT %s',
'NOT %s OR NOT %s': 'NOT %s OR NOT %s',
'not accessible - no cached version available!': 'Non accessibile - versione cache non disponibile!',
'not accessible - using cached version from': 'Non accessibile - in uso versione cache da',
'Not Applicable': 'Non applicabile',
'Not Authorised!': 'Non Autorizzato!',
'Not Authorized': 'Non autorizzato',
'Not Available': 'Non disponibile',
'Not Defined': 'Non definito',
'Not installed or incorrectly configured.': 'Non installato o configurato in modo non corretto.',
'Not Parsed': 'Non analizzato',
'Not Possible': 'Impossibile',
'Not Set': 'Non impostato',
'not specified': 'non specificato',
'Not yet a Member of any Group': 'Non ancora Membro di alcun Gruppo',
'Not yet a Member of any Team': 'Non ancora Membro di nessuna squadra',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Notare che questo elenco mostra solo volontari attivi. Per visualizzare tutte le persone registrate nel sistema, ricercare invece da questo pannello',
'Note that when using geowebcache, this can be set in the GWC config.': 'Si noti che quando si utilizza geowebcache, questo può essere impostato in GWC config.',
'Note: Make sure that all the text cells are quoted in the csv file before uploading': 'Note: Make sure that all the text cells are quoted in the csv file before uploading',
'Notice to Airmen': 'Avviso per Aviatori',
'Number': 'Numero',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Numero di ulteriori letti di tale tipo previsto per diventare disponibili in questa unità entro le prossime 24 ore.',
'Number of alternative places for studying': 'Numero di luoghi di studio alternativi',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Numero di letti disponibili/vacanti di quel tipo in questa unità al momento della generazione reports.',
'Number of Columns': 'Numero di colonne',
'Number of deaths during the past 24 hours.': 'Numero di deceduti nelle precedenti 24 ore.',
'Number of Disasters': 'Numero di disastri',
'Number of discharged patients during the past 24 hours.': 'Numero di ricoverati nelle precedenti 24 ore',
'Number of doctors': 'Numero di medici',
'Number of evacuees registered in the shelter for day and night': 'Numero di sfollati registrati nella Struttura di Accoglienza per il giorno e la notte',
'Number of evacuees registered in this housing unit (Day and Night)': 'Numero di sfollati registrati in questa unità abitativa (giorno e notte)',
'Number of evacuees registered in this housing unit (Night)': 'Numero di sfollati registrati in questa unità abitativa (Notte)',
'Number of Facilities': 'Numero di strutture',
'Number of in-patients at the time of reporting.': 'Numero di ricoverati al momento della creazione del report.',
'Number of newly admitted patients during the past 24 hours.': 'Numero di pazienti appena ammessi durante le ultime 24 ore.',
'Number of non-medical staff': 'Quantità di personale non-medico',
'Number of nurses': 'Numero di infermieri',
'Number of Patients': 'Numero di pazienti',
'Number of people registered in the shelter for night only': 'Numero di persone registrate nella Struttura di Accoglienza solo per la notte',
'Number of private schools': 'Numero di scuole private',
'Number of public schools': 'Numero di scuole pubbliche',
'Number of religious schools': 'Numero di scuole religiose',
'Number of residential units': 'Numero di unità residenziali',
'Number of residential units not habitable': 'Numero di unità residenziali non abitabili',
'Number of Resources': 'Numero di risorse',
'Number of Rows': 'Numero di righe',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Numero di posti letto vacanti / disponibili in questo ospedale. Aggiornato automaticamente dai report giornalieri.',
'Number of vacant/available units to which victims can be transported immediately.': 'Numero di unità vacanti / disponibili presso cui le vittime possono essere trasportate immediatamente.',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Numero o codice utilizzato per contrassegnare il luogo di ricerca, ad esempio il codice contrassegno, le coordinate di griglia, numero di riferimento del sito o simili (se disponibile)',
'Number or Label on the identification tag this person is wearing (if any).': 'Numero o etichetta sulla targhetta di identificazione che questa persona sta indossando (se presente).',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Numero / percentuale di popolazione colpita che è femmina & di età compresa tra 0 - 5',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Numero / percentuale di popolazione colpita che è femmina & di età compresa tra 13 - 17',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Numero / percentuale della popolazione colpita che è di sesso femmina e di età compresa tra i 18 e i 25 anni',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Numero / percentuale di popolazione colpita che è femmina e di età compresa tra i 26 ed i 60 anni',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Numero / percentuale di popolazione colpita che è di sesso femminile e di età compresa tra 6 e 12 anni',
'Number/Percentage of affected population that is Female & Aged 61+': 'Numero / percentuale di popolazione colpita che è femmina e di età maggiore di 61 anni',
'Number/Percentage of affected population that is Male & Aged 0-5': "Numero / percentuale di popolazioni colpita che è di sesso maschile ed ha un'età compresa tra 0 e 5 anni",
'Number/Percentage of affected population that is Male & Aged 13-17': 'Numero / percentuale di popolazioni colpita che è di sesso maschile e di età tra i 13 ed i 17 anni',
'Number/Percentage of affected population that is Male & Aged 18-25': "Numero / percentuale di popolazione colpita di sesso maschile e ha un'età tra i 18 ed i 25 anni",
'Number/Percentage of affected population that is Male & Aged 26-60': 'Numero/percentuale della popolazione affetta che è Maschio & e nella fascia 26-60 anni',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Numero/percentuale della popolazione affetta che è Maschio & e nella fascia 6-12 anni',
'Number/Percentage of affected population that is Male & Aged 61+': 'Numero/percentuale della popolazione affetta che è Maschio & più di 61 anni',
'Nursery Beds': 'Letti della Nursery',
'Nutrition': 'Nutrizione',
'Nutrition problems': 'Problemi di alimentazione',
'NZSEE Level 1': 'NZSEE Livello 1',
'NZSEE Level 2': 'NZSEE Livello 2',
'Object': 'Oggetto',
'Observer': 'Osservatore',
'Obsolete': 'Obsolteo',
'obsolete': 'Obsoleto ',
'Obstetrics/Gynecology': 'Ostetricia / Gynecology',
'occupied': 'Occupato ',
'OCR Form Review': 'Revisione del modulo OCR',
'OCR module is disabled. Ask the Server Administrator to enable it.': "Il modulo OCR è disabilitato. Chiedere all'Amministratore del Server di abilitarlo",
'OCR review data has been stored into the database successfully.': 'I dati di revisione OCR sono stati memorizzati nel database con successo',
'OD Coordinator': 'Coordinatore OD',
'Office': 'Ufficio',
'Office added': 'Ufficio aggiunto',
'Office Address': 'Indirizzo Ufficio',
'Office deleted': 'Ufficio eliminato',
'Office Details': "Dettagli dell'ufficio",
'Office Phone': 'Telefono Ufficio',
'Office Type': 'Tipo di Ufficio',
'Office Type added': "Aggiunto Tipo d'Ufficio",
'Office Type deleted': "Cancellato Tipo d'Ufficio",
'Office Type Details': "Dettagli sul tipo d'Ufficio",
'Office Type updated': "Aggiornato Tipo d'Ufficio",
'Office Types': "Tipi d'Ufficio",
'Office updated': 'Ufficio aggiornato',
'Offices': 'Uffici',
'Offices & Warehouses': 'Uffici & Magazzini',
'Offline Sync': 'Sincronizzazione Offline',
'Offline Sync (from USB/File Backup)': 'Sincronizzazione Offline (da Backup su USB/File)',
'OK': 'Ok',
'Older people as primary caregivers of children': "Anziani come addetti primari all' assistenza dei bambini",
'Older people in care homes': 'Anziani nelle case di cura',
'Older people participating in coping activities': 'Anziani partecipanti ad attività di resistenza',
'Older person (>60 yrs)': 'anziani (età superiore ai 60 anni)',
'on %(date)s': 'Il %(giorno)',
'On by default?': 'Per impostazione predefinita?',
'On by default? (only applicable to Overlays)': 'Per impostazione predefinita? (applicabile solo alle coperture)',
'On Scene': 'Sul posto',
'once': 'una volta',
'One time cost': 'Costo una tantum',
'One Time Cost': 'Costo una tantum',
'One-time': 'Una volta',
'One-time costs': 'Costi una tantum',
'Only showing accessible records!': 'Mostra solo record accessibili!',
'Oops! something went wrong on our side.': 'Ohi! Qualcosa è andato storto dalla nostra parte.',
'Oops! Something went wrong...': 'Ohi! Qualcosa è andato storto......',
'Opacity': 'Opacità',
'Opacity (1 for opaque, 0 for fully-transparent)': 'Opacità (1 per opaco, 0 per completamente trasparente)',
'Open': 'Apri',
'Open area': 'Area aperta',
'Open Chart': 'Aprire Grafico',
'open defecation': 'Aprire defecation',
'Open Map': 'Apri Mappa',
'Open recent': 'Aprire recente',
'Open Report': 'Apri Report',
'Open Table': 'Apri Tabella',
'Open Tasks for Project': 'Apri le attività relative al progetto',
'Opening Times': 'Tempi di apertura',
'OpenStreetMap Layer': 'Livello OpenStreetMap ',
'OpenStreetMap OAuth Consumer Key': 'Chiave OpenStreetMap OAuth Consumer ',
'OpenStreetMap OAuth Consumer Secret': 'Segreto OpenStreetMap OAuth Consumer',
'OpenWeatherMap Layer': 'Livello OpenWeatherMap',
'Operating Rooms': 'Sale operatorie',
'Operator': 'Operatore',
'optional': 'Opzionale ',
'Optional link to an Incident which this Assessment was triggered by.': "Collegamento facoltativo all'incidente che ha attivato questa valutazione.",
'Optional password for HTTP Basic Authentication.': "Password opzionale per l'autenticazione HTTP Basic",
'Optional selection of a background color.': 'Selezione opzionale di un colore di sfondo',
'Optional selection of a MapServer map.': 'Selezione opzionale di una mappa MapServer',
'Optional selection of an alternate style.': 'Selezione opzionale di uno stile alternativo',
'Optional Subject to put into Email - can be used as a Security Password by the service provider': 'Oggetto opzionale da inserire nella mail - può essere usato come password di sicurezza dal fornitore del servizio.',
'Optional username for HTTP Basic Authentication.': "Nome dell'utente opzionale per l'autenticazione HTTP Basic",
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': "Facoltativo. Se si desidera dare uno stile alle funzioni in base ai valori di un attributo, selezionare qui l'attributo da utilizzare.",
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': "Facoltativo. In GeoServer, questo è il Workspace Namespace URI (non il nome!). All'interno della WFS getCapabilities, il workspace è la parte del nome FeatureType prima dei due punti (:).",
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': "Facoltativo. In GeoServer, questo è l'URI (non il nome!) del Workspace Namespace. Nel WFS getCapabilities, questo è la parte del FeatureType Name prima dei due punti (:).",
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Facoltativo. Il nome di un elemento i cui contenuti dovrebbero essere una URL di un file di immagine messo nei Popup.',
'Optional. The name of an element whose contents should be put into Popups.': 'Facoltativo. Il nome di un elemento i cui contenuti dovrebbero essere messi dentro dei popup.',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Facoltativo. Il nome della colonna geometria. In PostGIS questo valore predefinito è 'the_geom'.",
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Facoltativo. Il nome dello schema. In Geoserver questo ha il formato http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Options': 'opzioni',
'or': 'O ',
'or import from csv file': 'o importa da file csv',
'OR Reason': 'o Motivo',
'OR Status': 'o Stato',
'OR Status Reason': 'o motivo di stato',
'Organisation designed to take care of evacuee': 'Organizzazione dedicata a prendersi cura degli evacuati',
'Organization': 'Organizzazione',
'Organization added': 'Organizzazione aggiunta',
'Organization deleted': 'Organization deleted',
'Organization Details': 'Dettagli organizzazione',
'Organization group': 'Gruppo Organizzazione',
'Organization Registry': 'Registro organizzazione',
'Organization Type': 'Tipo di Organizzazione',
'Organization Type added': 'Aggiunto tipo di Organizzazione',
'Organization Type deleted': 'Rimosso tipo di Organizzazione',
'Organization Type Details': 'Dettagli del tipo di Organizzazione',
'Organization Type updated': 'Aggiornato tipo di Organizzazione',
'Organization Types': 'Tipi di Organizzazione',
'Organization Units': 'Unità Organizzative',
'Organization updated': 'Organizzazione aggiornata',
'Organizational Development': 'Organizational Development',
'Organizations': 'Organizzazioni',
'Organizations / Teams / Facilities': 'Organizzazioni / Squadre / Strutture',
'Organized By': 'Organizzato da',
'Origin': 'Origine',
'Origin of the separated children': 'Origine del bambini separati',
'Original': 'Originale',
'OSM file generation failed!': 'Generazione del file OSM fallita!',
'OSM file generation failed: %s': 'Generazione del file OSM fallita: %s',
'Other': 'Altro',
'other': 'altro',
'Other (describe)': 'Altro (descrivere)',
'Other (specify)': 'Altro (specificare)',
'Other activities of boys 13-17yrs': 'Altre attività dei ragazzi di 13-17 anni',
'Other activities of boys 13-17yrs before disaster': "Altre attività dei ragazzi di 13-17 anni prima dell'emergenza",
'Other activities of boys <12yrs': 'Altre attività di ragazzi <12 anni',
'Other activities of boys <12yrs before disaster': "Altre attività di ragazzi minori di 12 anni prima dell'emergenza",
'Other activities of girls 13-17yrs': 'Altre attività delle ragazze di 13-17 anni',
'Other activities of girls 13-17yrs before disaster': "Altre attività delle ragazze di 13-17 anni prima dell'emergenza",
'Other activities of girls<12yrs': 'Altre attività di ragazze minori di 12 anni',
'Other activities of girls<12yrs before disaster': "Altre attività di ragazze minori di 12 anni prima dell'emergenza",
'Other Address': 'Altro Indirizzo',
'Other alternative infant nutrition in use': "Altra alternativa per l'alimentazione infantile in uso",
'Other alternative places for study': 'Altra luoghi alternativi di studio',
'Other assistance needed': 'Ulteriore assistenza necessaria',
'Other assistance, Rank': 'Altri aiuti, Classificazione',
'Other Cluster Rule Of Law': 'Altri raggruppamenti di legge',
'Other Cluster Safety Security': 'Altri raggruppamenti sicuri per la salute',
'Other current health problems, adults': 'Stato corrente di altri problemi sanitari, adulti',
'Other current health problems, children': 'Stato corrente di altri problemi sanitari, bambini',
'Other Details': 'Altri Dettagli',
'Other events': 'Altri eventi',
'Other Evidence': 'Altre prove',
'Other factors affecting school attendance': 'Altri fattori che influenzano la presenza a scuola',
'Other Faucet/Piped Water': "Altri rubinetti/tubi dell'acqua",
'Other Isolation': 'Altro Isolamento',
'Other major expenses': 'Altri grandi spese',
'Other Name': 'Altro nome',
'Other non-food items': 'Altre voci non alimentari',
'Other recommendations': 'Altre raccomandazioni',
'Other residential': 'Altro edificabile',
'Other school assistance received': 'Ricevuta assistenza da altra scuola',
'Other school assistance, details': 'Altri scuola assistenza, dettagli',
'Other school assistance, source': 'Altri scuola assistenza, origine',
'Other settings can only be set by editing a file on the server': 'Altre impostazioni possono essere impostate solo modificando un file sul server',
'Other side dishes in stock': 'Altri contorni in riserva',
'Other types of water storage containers': 'Altri tipi di contenitori per la raccolta di acqua',
'Other Users': 'Altri utenti',
'Other ways to obtain food': 'Altri modi per ottenere alimenti',
'Others': 'Altri ',
'Out': 'Fuori',
'Outbound Mail settings are configured in models/000_config.py.': 'Le impostazioni di posta in uscita sono configurate in models/000_config.py.',
'Outbox': 'In Uscita',
'Outgoing SMS handler': 'Gestore degli SMS in uscita',
'Outgoing SMS Handler': 'Gestore SMS in uscita',
'over one hour': "Oltre un'ora",
'Overall Hazards': 'Rischi complessivi',
'Overhead falling hazard': 'Rischio di caduta in testa',
'Overland Flow Flood': "Flusso terrestre dell'inondazione",
'Overlays': 'Sovrapposizioni',
'Owned Records': 'Record posseduti',
'Owned Resources': 'Risorse possedute',
'Pack': 'Pacco',
'Packs': 'Pacchi',
'Page': 'Pagina',
'PAHO UID': 'PAHO UID',
'Pan Map: keep the left mouse button pressed and drag the map': 'Mappa panoramica: tenere premuto il pulsante sinistro del mouse e trascinare la mappa',
'Parameters': 'Parametri',
'Parapets, ornamentation': 'Parapetti, ornamenti',
'Parent': 'Origine',
"Parent level should be higher than this record's level. Parent level is": 'Il livello origine dovrebbe essere superiore a questo livello di record. Il livello origine è',
'Parent needs to be of the correct level': "L'origine deve essere del livello corretto",
'Parent needs to be set': "L'origine deve essere impostata",
'Parent needs to be set for locations of level': "L'origine deve essere impostata per le ubicazioni di livello",
'Parent Office': 'Ufficio controllante',
'Parents/Caregivers missing children': 'Bambini scomparsi di Genitori/Curatori',
'Parse': 'Decodifica',
'Parsed': 'Decodifciato',
'Parser': 'Decodificatore',
'Parser connected': 'Decodificatore connesso',
'Parser Connection Details': 'Dettagli di connessione del decodificatore',
'Parser connection removed': 'Connessione decodificatore rimossa',
'Parser connection updated': 'Connessione decodificatore aggiornata',
'Parser Connections': 'Connessioni decodificatore',
'Parsers': 'Decodificatori',
'Parsing Status': 'Stato della Decodifica',
'Part of the URL to call to access the Features': "Parte dell' URL da utilizzare per accedere alle funzioni",
'Partial': 'parziale',
'Participant': 'Partecipante',
'Participant added': 'Partecipante aggiunto',
'Participant deleted': 'Partecipante eliminato',
'Participant Details': 'Dettagli Partecipante',
'Participant updated': 'Partecipante aggiornato',
'Participants': 'Partecipanti',
'Partner Organizations': 'Organizzazioni del Partner',
'Pashto': 'Pashtun',
'Pass': 'Passare',
'Passport': 'Passaporto',
'Password': 'Password',
"Password fields don't match": 'I campi Password non corrispondono',
'Password to use for authentication at the remote site.': 'Password to use for authentication at the remote site.',
'Path': 'Percorso',
'Pathology': 'Patologia',
'Patients': 'Pazienti',
'PDF File': 'File PDF',
'Pediatric ICU': 'ICU Pediatrico',
'Pediatric Psychiatric': 'Psichiatria Pediatrica',
'Pediatrics': 'Pediatria',
'Peer added': 'Peer aggiunto',
'Peer deleted': 'Peer eliminato',
'Peer Details': 'Dettagli Peer',
'Peer not allowed to push': 'Peer non sono consentiti per spingere',
'Peer Registration': 'Registrazione Peer',
'Peer Registration Details': 'Dettagli Registrazione Peer',
'Peer Registration Request': 'Richiesta Registrazione Peer',
'Peer registration request added': 'Richiesta registrazione peer aggiunta',
'Peer registration request deleted': 'Richiesta registrazione peer cancellata',
'Peer registration request updated': 'Richiesta registrazione peer aggiornata',
'Peer Type': 'Tipo Peer',
'Peer UID': 'UID Peer',
'Peer updated': 'Peer aggiornato',
'Peers': 'Peer',
'Pending': 'In sospeso',
'Pending Requests': 'Richieste in sospeso',
'people': 'Persone',
'People': 'Persone',
'People Infant': 'Popolazione infantile',
'People Man': 'Popolazione maschile',
'People Needing Food': 'Persone che necessitano di alimenti',
'People Needing Shelter': 'Persone che necessitano di ricovero',
'People Needing Water': 'Persone che necessitano di acqua',
'People Physical Impairments': 'Popolazione con invalidità fisica',
'People Registration': 'Registrazione di persone',
'People Reservation': 'Prenotazione di persone',
'People Trapped': 'Persone Intrappolate',
'People Woman': 'Popolazione femminile',
'per': 'Per ',
'Performance Rating': 'Valutazione delle prestazioni',
'Permanent Home Address': 'Indirizzo di Casa Permanente',
'Person': 'Persona',
'Person 1': 'Persona 1',
'Person 1, Person 2 are the potentially duplicate records': 'Persona 1, Persona 2 sono i potenziali record duplicati',
'Person 2': 'Persona 2',
'Person added': 'Persona aggiunta',
'Person added to Group': 'Persona aggiunta al Gruppo',
'Person added to Team': 'Persona aggiunta alla squadra',
'Person De-duplicator': 'Persona De-duplicator',
'Person deleted': 'Persona cancellata',
'Person Details': 'Dettagli della persona',
'Person details updated': 'Dettagli persona aggiornati',
'Person interviewed': 'Intervistato persona',
'Person must be specified!': 'La persona deve essere specificata!',
'Person or OU': 'Persona o OU',
'Person Registry': 'Registro Persona',
'Person removed from Group': 'Persona rimossa dal Gruppo',
'Person removed from Team': 'Persona rimossa dalla Squadra',
'Person who has actually seen the person/group.': 'Persona che ha visto effettivamente la persona/gruppo.',
"Person's Details": 'Dettagli della Persona',
"Person's Details added": 'Dettagli della Persona aggiunti',
"Person's Details deleted": 'Dettagli della Persona cancellati',
"Person's Details updated": 'Dettagli della Persona aggiornati',
'Person/Group': 'Persona/Gruppo',
'Personal': 'Personale',
'Personal Data': 'Dati personali',
'Personal Effects': 'Effetti personali',
'Personal Effects Details': 'Dettagli degli effetti personali',
'Personal impact of disaster': 'Impatto personale del disastro',
'Personal Map': 'Mappa Personale',
'Personal Profile': 'Profilo personale',
'Persons': 'Persone',
'Persons in institutions': 'Persone nelle istituzioni',
'Persons with disability (mental)': 'Persone con disabilità (mentali)',
'Persons with disability (physical)': 'Persone con disabilità (fisiche)',
"Persons' Details": 'Dettagli delle Persone',
'Philippine - Spoken': 'Filippino - Parlato',
'Philippine - Written': 'Filippino - Scritto',
'Phone': 'Telefono',
'Phone #': 'Telefono #',
'Phone 1': 'Telefono 1',
'Phone 2': 'Telefono 2',
'Phone number is required': 'Il numero di telefono è richiesto',
"Phone number to donate to this organization's relief efforts.": 'Numero di telefono per effettuare donazioni alle attività di soccorso di questa organizzazione.',
'Phone/Business': 'Telefono / Business',
'Phone/Emergency': 'Telefono / Emergenza',
'Phone/Exchange (Switchboard)': 'Smistamento Chiamate (Centralino)',
'Photo': 'Foto',
'Photo added': 'Foto aggiunta',
'Photo deleted': 'Foto eliminata',
'Photo Details': 'Dettagli Foto',
'Photo Taken?': 'Foto presa?',
'Photo updated': 'Foto aggiornata',
'Photograph': 'Fotografia',
'Photos': 'Foto',
'Physical Description': 'Descrizione fisica',
'Physical Safety': 'Sicurezza fisica',
'Picture': 'Immagine',
'Picture upload and finger print upload facility': 'Funzione di caricamento di immagine e impronta digitale',
'piece': 'Parte',
'PIL (Python Image Library) not installed': 'PIL ((Python Image Library) non installato',
'PIN number': 'Codice PIN',
'pit': 'fossa',
'pit latrine': 'fossa per latrina',
'PL Women': 'PL Donne',
'Place': 'posizione',
'Place of Birth': 'Luogo di nascita',
'Place of Recovery': 'Luogo di Ripristino',
'Place on Map': 'Posizione sulla mappa',
'Places for defecation': 'Luoghi per purificazione',
'Places the children have been sent to': 'Posti dovi i bambini sono stati inviati',
'Planned': 'Pianificato',
'Playing': 'In gioco',
'Please choose a type': 'Seleziona un Tipo',
"Please come back after sometime if that doesn't help.": 'Si prega di ritornare dopo qualche tempo se quello non aiuta.',
'Please correct all errors.': 'La prego di correggere tutti gli errori.',
'Please enter a date of birth': 'Si prega di inserire una data di nascita',
'Please enter a first name': 'Si prega di inserire un nome',
'Please enter a last name': 'Si prega di inserire un cognome',
'Please enter a number only': 'Si prega di inserite un numero solo',
'Please enter a place of birth': 'Si prega di inserire un luogo di nascita',
'Please enter a site OR a location': "Immettere una sede O un' ubicazione",
'Please enter a valid email address': 'Si prega di inserire un indirizzo email valido',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Immettere le prime poche lettere della persona / gruppo per il completamento automatico.',
'Please enter the recipient': 'PEr piacere Immettere il destinatario',
'Please enter the recipient(s)': 'Please enter the recipient(s)',
'Please fill this!': 'Immettere questo!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': "Si prega di fornire l'URL della pagina a cui si fa riferimento, una descrizione di ciò che si aspettava accadere & cosa effettivamente è successo.",
'Please report here where you are:': 'Si prega di riportare qui dove si è:',
'Please select': 'Si prega di selezionare',
'Please Select a Facility': 'Si prega di selezionare una Struttura',
'Please select a valid image!': "Si prega di selezionare in'immagine valida!",
'Please select another level': 'Si prega di selezionare un altro livello',
'Please select exactly two records': 'Si prega di selezionare esattamente due record',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': "La prego di sign-up con il telefono cellulare come questo ci consente l' invio di messaggi di testo. Prego includere piena Area codice.",
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Si prega di specificare tutti i problemi e gli ostacoli con la gestione appropriata della malattia, in maniera dettagliata (in numeri, dove appropriato). È anche possibile aggiungere suggerimenti la situazione potrebbe essere migliorata.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Si prega di utilizzare questo campo per registrare eventuali informazioni supplementari, inclusa la cronologia del record se viene aggiornato.',
'Please use this field to record any additional information, including any Special Needs.': 'Si prega di utilizzare questo campo per registrare eventuali informazioni supplementari, comprese eventuali esigenze particolari.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Si prega di utilizzare questo campo per registrare tutte le informazioni aggiuntive, come le ID di istanza Ushahidi. Includere una cronologia del record se viene aggiornato.',
'Pledge Support': 'Supporto al pegno',
'POI': 'POI',
'PoI': 'PoI',
'PoI Type added': 'Tipo PoI aggiunto',
'PoI Type deleted': 'Tipo PoI cancellato',
'PoI Type Details': 'Dettagli del tipo PoI',
'PoI Type updated': 'Tipo PoI aggiornato',
'PoI Types': 'Tipo PoI',
'Point': 'Punto',
'Point of Interest added': 'Punti di interesse aggiunti',
'Point of Interest deleted': 'Punti di interesse cancellati',
'Point of Interest Details': 'Dettagli dei punti di interesse',
'Point of Interest updated': 'Punti di interesse aggiornati',
'Points of Interest': 'Punti di interesse',
'PoIs': 'PoIs',
'PoIs successfully imported.': 'PoIs importati con successo',
'Poisoning': 'Avvelenamento',
'Poisonous Gas': 'Gas Velenoso',
'Police': 'Polizia',
'Poll': 'Sondaggio',
'Pollution and other environmental': 'Inquinamento ambientale e altri',
'Polygon': 'Poligono',
'Polygon reference of the rating unit': "Poligono di riferimento dell'unità di rating",
'Poor': 'Povero',
'Population': 'Popolazione ',
'Population (Day)': 'Popolazione (Day)',
'Population (Night)': 'Popolazione (Notte)',
'Population and number of households': 'Popolazione e il numero di famiglie',
'Population Availability (Day and Night)': 'Disponibilità di Popolazione (giorno e notte)',
'Population Availability (Night)': 'Disponibilità di Popolazione (Notte)',
'Population movement': 'Spostamento della popolazione',
'Population Statistic added': 'Statistica della popolazione aggiunta',
'Population Statistic deleted': 'Statistica della popolazione eliminata',
'Population Statistic Details': 'Dettagli della statistica della popolazione',
'Population Statistic updated': 'Statistica della popolazione aggiornata',
'Population Statistics': 'Statistiche della popolazione',
'Popup Fields': 'Campi del Popup',
'Popup Format': 'Formato del Popup',
'Popup Label': 'Etichetta del Popup',
'Port': 'Porta',
'Port Closure': 'Porta Chiusura',
'Portable App': 'Portable App',
'Position': 'Posizione',
'Position added': 'Posizione aggiunta',
'Position Catalog': 'Catalogo delle posizioni',
'Position deleted': 'Posizione eliminata',
'Position Details': 'Dettagli della posizione',
'Position updated': 'Posizione aggiornata',
'Positions': 'Posizioni',
'Postcode': 'Codice di avviamento postale',
'Posted on': 'Postato su',
'postponed': 'Rinviato',
'Poultry': 'Pollame',
'Poultry restocking, Rank': 'Ripopolamento del pollame, Classificazione',
'Pounds': 'libbre',
'Power Failure': 'Malfunzionamento di alimentazione',
'Power Supply Type': 'Tipo di ',
'Powered by Sahana': 'Powered by Sahana',
'Pre-cast connections': 'Connessioni prefabbricate',
'Preferred Name': 'Nome preferito',
'Pregnant women': 'Donne incinte',
'Preliminary': 'Preliminare',
'preliminary template or draft, not actionable in its current form': 'Bozza o maschera preliminare, non attivabile nella sua forma attuale',
'Presence': 'Presenza',
'Presence Condition': 'Condizione presenza',
'Presence in the shelter': 'Presenza nella Struttura di Accoglienza',
'Presence Log': 'Registro Presenze',
'Previous': 'Precedente',
'previous 100 rows': 'Precedenti 100 righe',
'Previous View': 'Vista precedente',
'Primary Occupancy': 'Occupazione principale',
'Print': 'Stampa',
'Priority': 'priorità',
'Priority from 1 to 9. 1 is most preferred.': 'Priorità da 1 a 9. 1 è il preferito. ',
'Privacy': 'Privacy',
'Private': 'Privato',
'Private Residence': 'Residenza privata',
'Problem': 'Problema',
'Problem added': 'Problema aggiunto',
'Problem Administration': 'Gestione del problema',
'Problem connecting to twitter.com - please refresh': 'Problema durante la connessione a twitter.com - Prego aggiornare',
'Problem deleted': 'Problema eliminato',
'Problem Details': 'Dettagli del problema',
'Problem Group': 'Gruppo di problemi',
'Problem Title': 'Titolo del problema',
'Problem updated': 'Problema aggiornato',
'Problems': 'problemi',
'Procedure': 'Procedura',
'Process Received Shipment': 'Elaborare la spedizione ricevuta',
'Process Shipment to Send': 'Elaborare la spedizione da inviare',
'Processed with KeyGraph?': 'Processato con KeyGraph?',
'Processing': 'In elaborazione',
'Procurement Plans': 'Piani di approviggionamento',
'Product Type Report': 'Report tipo di prodotto',
'Profession': 'Professione',
'Professional Experience': 'Esperienze professionali',
'Professional Experience added': 'Esperienza Professionale aggiunta',
'Professional Experience deleted': 'Esperienza Professionale cancellata',
'Professional Experience Details': 'Esperienza professionale Dettagli',
'Professional Experience updated': 'Esperienze professionali aggiornate',
'Profile': 'Profilo',
'Profile Configuration': 'Configurazione del profilo',
'Profile Configuration removed': 'Configurazione Profilo rimossa ',
'Profile Configuration updated': 'Configurazione profilo aggiornata',
'Profile Configurations': 'Configurazioni Profilo ',
'Profile Configured': 'Profilo configurato ',
'Profile Details': 'Dettagli del Profilo ',
'Profile Page': 'Pagina del Profilo ',
'Profile Picture': 'Foto del Profilo ',
'Profile Picture?': 'Foto Profilo?',
'Profiles': 'Profili ',
'Program': 'Programma',
'Program added': 'Programma aggiunto',
'Program deleted': 'Programma cancellato',
'Program Details': 'Dettagli del programma',
'Program Hours (Month)': 'Ore di programma (mese)',
'Program Hours (Year)': 'Ore di programma (Anno)',
'Program updated': 'Programma aggiornato',
'Programs': 'Programmi',
'Project': 'Progetto',
'Project added': 'Progetto aggiunto',
'Project deleted': 'Progetto eliminato',
'Project Details': 'Dettagli del progetto',
'Project has no Lat/Lon': 'Progetto non ha Latitudine / Longitudine',
'Project Status': 'Stato del progetto',
'Project Time Report': 'Report sui tempi di progetto',
'Project updated': 'Progetto aggiornato',
'Projection': 'Proiezione ',
'Projection added': 'Proiezione aggiunta',
'Projection deleted': 'Proiezione eliminata',
'Projection Details': 'Dettagli della proiezione ',
'Projection Type': 'Tipo di proiezione ',
'Projection updated': 'Proiezione aggiornata',
'Projections': 'Proiezioni',
'Projects': 'Progetti',
'Property reference in the council system': 'Riferimento proprietà nel sistema del consiglio',
'Protected resource': 'Risorsa protetta',
'Protection': 'protezione',
'Protocol': 'Protocollo',
'Provide a password': 'Fornire una password',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': "Fornire uno schizzo facoltativo dell'intero edificio o dei punti danneggiati. Indicare i punti di danneggiamento.",
'Provide Metadata for your media files': 'Fornire dei metadati per i file multimediali',
'Provincia': 'Provincia',
'Proxy Server URL': 'Proxy Server URL',
'Psychiatrics/Adult': 'Psichiatri / Adulto',
'Psychiatrics/Pediatric': 'Psichiatri / Pediatrico',
'Public': 'Pubblico',
'Public and private transportation': 'Trasporto pubblico e privato',
'Public assembly': 'Pubblica assemblea',
'Public Event': 'Evento pubblico',
'Published on': 'Pubblicato su',
'Pull tickets from external feed': 'Estrarre il dei biglietti dal distributore esterno',
'Punjabi': 'del Punjab',
'Purchase Date': 'Data di acquisto',
'Push tickets to external system': 'Spingere i biglietti nel sistema esterno',
'Pyroclastic Flow': 'flusso piroclastico',
'Pyroclastic Surge': 'Sovraccarico piroclastico',
'Python GDAL required for Shapefile support!': 'Python GDAL necessario per il supporto Shapefile! ',
'Python needs the ReportLab module installed for PDF export': "Modulo ReportLab non disponibile all' interno dell' esecuzione Python - ne è necessaria l'installazione per l' emissione di PDF.",
'Python Serial module not available within the running Python - this needs installing to activate the Modem': "Modulo Seriale Python non disponibile all' interno del Python in esecuzione - è necessario effettuare l'installazione per attivare il Modem",
'Quadricycles. Mass less or equal to 400 kg (550 kg for vehicles used for the transport of goods, maximum engine power less than or equal to 15 kW': 'Quadricicli. Massa inferiore o pari a 400 kg (550 kg per veicoli destinati al trasporto di merci, potenza massima del motore inferiore o uguale a 15 kW',
'Quantity': 'Quantità',
'Quantity Committed': 'Quantità Allocata',
'Quantity Fulfilled': 'Quantità Soddisfatte',
"Quantity in %s's Inventory": "Quantità in % dell' Inventario",
'Quantity in Transit': 'Quantità in transito',
'Quarantine': 'Quarantena',
'Queries': 'Quesiti',
'Query': 'Quesito',
'Query added': 'Interrogazione aggiunta',
'Query deleted': 'Interrogazione cancellata',
'Query Feature': 'Caratteristica della Query ',
'Query updated': 'Interrogazione aggiornata',
'Queryable?': 'Interrogabile?',
'Race': 'Corsa',
'Radio Callsign': 'Segnale di Chiamata Radio',
'Radiological Hazard': 'Rischio Radiologico',
'Radiology': 'Radiologia',
'Railway Accident': 'Incidente ferroviario',
'Railway Hijacking': 'Dirottamento Ferroviario',
'Rain Fall': 'Caduta di pioggia',
'Rapid Assessment': 'Valutazione rapida',
'Rapid Assessment added': 'Agginta valutazione rapida',
'Rapid Assessment deleted': 'Eliminata valutazione rapida',
'Rapid Assessment Details': 'Dettagli valutazione rapida',
'Rapid Assessment updated': 'Aggiornata valutazione rapida',
'Rapid Assessments': 'Valutazioni Rapide',
'Rapid Assessments & Flexible Impact Assessments': "Valutazioni Rapide & Valutazioni d'impatto flessibile",
'Rapid Close Lead': 'Rapida Stretta PIOMBO',
'Rapid Data Entry': 'Inserimento dati rapido ',
'Rating': 'Valutazione ',
'Raw Database access': 'Accesso diretto al Database',
'RC frame with masonry infill': 'Struttura in cemento armato con tamponamento in muratura',
'Receive': 'ricevere',
'Receive %(opt_in)s updates:': 'Ricevere %(opt_in) aggiornamenti: ',
'Receive New Shipment': 'Ricevere Nuova Spedizione',
'Receive Shipment': 'Ricevi spedizione',
'Receive this shipment?': 'Ricevere questa spedizione?',
'Receive updates': 'Ricevi gli aggiornamenti ',
'Received': 'ricevuto',
'Received By': 'ricevuto da',
'Received By Person': 'Ricevuto dalla Persona',
'Received Item deleted': 'Elemento ricevuto eliminato',
'Received Item Details': "Dettagli dell'elemento ricevuto",
'Received Item updated': 'Elemento ricevuto aggiornato',
'Received Shipment canceled': 'Spedizione ricevuta annullata',
'Received Shipment canceled and items removed from Inventory': "Spedizione ricevuta annullata ed elementi rimossi dall'inventario",
'Received Shipment Details': 'Dettagli della Spedizione ricevuta',
'Received Shipment updated': 'Spedizione ricevuta aggiornata',
'Received Shipments': 'Spedizioni ricevute ',
'Receiving and Sending Items': 'Ricezione e invio Elementi',
'Recipient': 'Destinatario',
'Recipient(s)': 'Destinatari',
'Recipients': 'Destinatari',
'Recommendations for Repair and Reconstruction or Demolition': 'Raccomandazioni per la riparazione e ricostruzione o demolizione',
'Record': 'Record',
'Record added': 'Record aggiunto ',
'Record any restriction on use or entry': "Record qualsiasi limitazione sull'utilizzo o entrata",
'Record approved': 'Record approvato ',
'Record could not be approved.': 'Il record non poteva essere approvato. ',
'Record could not be deleted.': 'Il record non può essere cancellato. ',
'Record deleted': 'Record eliminato',
'Record Details': 'Record Dettagli ',
'record does not exist': 'Il record non esiste',
'record id': 'Id record',
'Record last updated': 'Ultimo aggiornamento Record',
'Record not found': 'record non trovato',
'Record not found!': 'Record non trovato!',
'Record Saved': 'Record Salvato',
'Record updated': 'Record aggiornati',
'Record Updates': 'Aggiornamenti Record',
'Recording and Assigning Assets': 'Registrazione e Assegnazione Asset',
'Records': 'Record',
'records deleted': 'Record cancellati',
'Records merged successfully.': 'Record unificati con successo',
'Recovery': 'ripristino',
'Recovery Request': 'richiesta di ripristino',
'Recovery Request added': 'richiesta di ripristino aggiunta',
'Recovery Request deleted': 'richiesta di ripristino cancellata',
'Recovery Request updated': 'richiesta di ripristino aggiornata',
'Recovery Requests': 'richieste di ripristino',
'Recurring': 'Ricorrente',
'Recurring Cost': 'Costo ricorrente',
'Recurring cost': 'Costo ricorrente',
'Recurring costs': 'Costi ricorrenti',
'Red': 'rosso',
'red': 'Rosso',
'Red Cross / Red Crescent': 'Croce Rossa / Mezzaluna Rossa',
'Reference Document': 'Documento di riferimento',
'Refresh Rate (seconds)': 'Frequenza di aggiornamento (in secondi)',
'Region': 'Regione',
'Region added': 'Regione aggiunta',
'Region deleted': 'Regione cancellata',
'Region Details': 'Dettagli della Regione',
'Region Location': 'Ubicazione nella regione',
'Region updated': 'Regione aggiornata',
'Regional': 'Regionale',
'Regione': 'Regione',
'Regions': 'Regioni',
'Register': 'Registrare',
'Register As': 'Registrare come ',
'Register for Account': 'Registrare un profilo ',
'Register Person': 'Registra persona',
'Register Person into this Camp': 'Registra la Persona in questo Campo',
'Register Person into this Shelter': 'Registra la Persona in questa Struttura di Accoglienza',
'Register them as a volunteer': 'Registrare loro come volontario',
'Registered People': 'Persone registrate',
'Registered users can': 'Gli utenti registrati possono',
'Registered users can %(login)s to access the system': 'Gli utenti registrati possono fare %(login)s per accedere al sistema ',
'Registration': 'Registrazione',
'Registration added': 'Registrazione aggiunta',
'Registration Details': 'Dettagli di registrazione',
'Registration entry deleted': 'Voce di registrazione cancellata',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'La registrazione è ancora in attesa di approvazione (%s) - si prega di attendere fino a conferma ricevuta.',
'Registration key': 'Registration key',
'Registration not permitted': 'Registrazione non autorizzata ',
'Registration updated': 'Registrazione aggiornata',
'Rehabilitation/Long Term Care': 'Riabilitazione / Assistenza a lungo termine',
'Reinforced masonry': 'muratura rinforzata',
'Reject': 'Respingere',
'Rejected': 'Rifiutato',
'Related event: %s': 'Eventi relativi: %s',
'Relationship': 'Relazione',
'Relief': 'Sollievo',
'Relief Site': 'Luogo di soccorso',
'Relief Team': 'Squadra di soccorso',
'Religion': 'Religione',
'Religious': 'Religioso',
'Religious Leader': 'Leader religioso',
'Reload': 'Ricaricare ',
'reload': 'Ricarica',
'Relocate as instructed in the <instruction>': 'Riposizionare come indicato in',
'Remove': 'Rimuovere ',
'Remove Asset from this event': 'Rimuovi Asset da questo evento',
'Remove Asset from this incident': 'Rimuovi bene da questo incidente',
'Remove Asset from this scenario': 'Rimuovi Asset da questo scenario',
'Remove Coalition': 'Rimuovi la coalizione',
'Remove Event for this Shelter': 'Rimuovi evento per questa Struttura di Accoglienza',
'Remove existing data before import': 'Rimuovere i dati esistenti prima di sottomettere',
'Remove Facility from this event': 'Rimuovere la Struttura da questo evento',
'Remove Facility from this incident': 'Rimuovi Struttura per questo incidente',
'Remove Facility from this scenario': 'Rimuovi la Struttura da questo scenario',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'Rimuovere Caratteristica: Selezionare la caratteristica che si desidera rimuovere e premere il tasto di cancellazione ',
'Remove Human Resource from this event': 'Rimuovere Risorse umane da questo evento',
'Remove Human Resource from this incident': 'Rimuovi risorsa umana da questo incidente',
'Remove Human Resource from this scenario': 'Rimuovere Risorse umane da questo scenario',
'Remove Incident from this event': 'Rimuovi incidente da questo evento',
'Remove Incident Report from this event': 'Rimuovi Report incidente da questo evento',
'Remove Incident Report from this incident': 'Rimuovi Report incidente da questo incidente',
'Remove Incident Type from this event': 'Rimuovi tipo di incidente da questo evento',
'Remove Item from Inventory': "Rimuovere l'elemento dall'inventario",
'Remove Layer from Profile': 'Rimuovere livello dal profilo ',
'Remove Layer from Symbology': 'Rimuovere livello dal Simbologia ',
'Remove Map Configuration from this event': 'Rimuovere configurazione di associazione da questo evento',
'Remove Map Configuration from this incident': 'Rimuovi configurazione mappa da questo incidente',
'Remove Map Configuration from this scenario': 'Rimuovere configurazione di associazione da questo scenario',
'Remove Network': 'Rimuovi la rete',
'Remove Person from Group': 'Rimuovere Persona dal Gruppo',
'Remove Person from Team': 'Rimuovere Persona dalla Squadra',
'Remove Profile Configuration for Layer': 'Rimuovere la configurazione del profilo per livello ',
'Remove selection': 'Rimuovere la selezione ',
'Remove Shelter for this Event': 'Rimuovi la Struttura di Accoglienza per questo evento',
'Remove Skill': 'Rimuovere Capacità',
'Remove Symbology from Layer': 'Rimuovere Simbologia dal livello ',
'Remove Tag for this Event from this Post': 'Rimuovi etichetta per questo evento da questo annuncio',
'Remove Task from this incident': 'Rimuovi task da questo incidente',
'Remove this asset from this event': 'Eliminare questo asset da questo evento',
'Remove this asset from this scenario': 'Eliminare questo asset da questo scenario',
'Remove this entry': 'Rimuovi questa voce ',
'Removed from Group': 'Rimosso dal Gruppo',
'Removed from Team': 'Rimosso dalla Squadra',
'Repair': 'Riparazione',
'Repaired': 'Riparato',
'Repeat': 'Ripetere ',
'Repeat your password': 'Ripetere la password',
'Replace': 'Sostituire',
'Replace if Master': 'Sostituire se Master',
'Replace if Newer': 'Sostituisci se più recente',
'Reply': 'Replicare',
'Report': 'Report',
'Report added': 'Report aggiunto',
'Report Another Assessment...': "Registrare un'altra valutazione......",
'Report deleted': 'Report eliminato',
'Report Details': 'Dettagli Report',
'Report my location': 'Segnala la mia posizione',
'Report of': 'Report di',
'Report Options': 'Opzioni report',
'Report Resource': 'Report sulla Risorsa',
'Report the contributing factors for the current EMS status.': "Segnalare i fattori che hanno contribuito allo stato corrente dell'EMS.",
'Report the contributing factors for the current OR status.': "Segnalare i fattori che hanno contribuito allo stato corrente dell'OR.",
'Report them as found': 'Segnalare che sono stati ritrovati',
'Report them missing': 'Segnalare che sono dispersi',
'Report Types Include': 'Tipi di Report includono',
'Report updated': 'Report aggiornato',
'Reported By': 'Riportato da',
'Reporter': 'Reporter',
'Reporter Name': 'Nome Reporter',
'Reporting on the projects in the region': 'Relazione sui progetti nella regione',
'ReportLab not installed': 'ReportLab non installato',
'Reports': 'Reports',
'reports successfully imported.': 'reports importati correttamente.',
'Repositories': 'Repositories',
'Repository Base URL': 'Repository Base URL',
'Repository Name': 'Repository Name',
'representation of the Polygon/Line.': 'Rappresentazione del poligono / linea.',
'Request': 'Richiesta',
'Request Added': 'Richiesta Aggiunta',
'Request added': 'Richiesta aggiunta',
'Request Canceled': 'Richiesta annullata',
'Request deleted': 'Richiesta eliminata',
'Request Details': 'Dettagli richiesti',
'Request for Role Upgrade': 'Richiesta per Aggiornamento di ruolo',
'Request From': 'Richiesta da',
'Request Item': 'Richiesta Voce',
'Request Item added': 'Voce di richiesta aggiunta',
'Request Item deleted': 'Voce di Richiesta eliminata',
'Request Item Details': 'Dettagli Voce di Richiesta',
'Request Item from Available Inventory': "Richiesta voce dall'inventario disponibile",
'Request Item updated': 'Voce Richiesta aggiornata',
'Request Items': 'Voci di Richiesta',
'Request Status': 'Stato richiesta',
'Request Type': 'Tipo di richiesta',
'Request Updated': 'Richiesta aggiornata',
'Request updated': 'Richiesta aggiornata',
'Request, Response & Session': 'Richiesta, Risposta & Sessione',
'Requested': 'Richiesto',
'requested': 'Richiesto ',
'Requested By': 'Richiesto da',
'Requested by': 'Richiesto da',
'Requested By Facility': 'Richiesto da Struttura ',
'Requested From': 'Richiesto da',
'Requested Items': 'Articoli richiesti',
'Requested on': 'Su richiesta',
'Requester': 'Richiedente',
'Requests': 'Richieste',
'Requests Management': 'Gestione richieste',
'Requires Login': "È necessario effettuare l'accesso",
'Requires Login!': "Richiede l'accesso!",
'Rescue and recovery': 'Ripristino e recupero',
'Reservation done': 'Prenotazione effettuata',
'Reservation entry deleted': 'Prenotazione cancellata',
'Reservation updated': 'Prenotazione aggiornata',
'Reset': 'Azzerare',
'Reset Password': 'Reimposta password',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'Ridimensiona Caratteristica: Selezionare la caratteristica che si desidera ridimensionare e poi trascinare il punto associato alla dimensione desiderata',
'Residential Building': 'Palazzo residenziale',
'Resolve': 'Risolvi',
'Resolve Conflict': 'Risolvi conflitto',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Risolvi collegamento apre una nuova schermata che consente di risolvere questi record duplicati e aggiornare il database.',
'Resource': 'risorsa',
'Resource added': 'Risorsa aggiunta',
'Resource deleted': 'Risorsa cancellata',
'Resource Details': 'Dettagli risorsa',
'Resource Inventory': 'Inventario Risorse',
'Resource Name': 'Nome Risorsa',
'Resource Type': 'Tipo Risorsa',
'Resource Type added': 'Aggiunto tipo di Risorsa',
'Resource Type deleted': 'Cancellato tipo di Risorsa',
'Resource Type Details': 'Dettagli su tipo di Risorse',
'Resource Type updated': 'Aggiornato il tipo di Risorsa',
'Resource Types': 'Tipi di Risorsa',
'Resource updated': 'Risorsa aggiornata',
'Resources': 'Risorse',
'Respiratory Infections': 'Infezioni respiratorie',
'Responded': 'Risposto ',
'Responding': 'Rispondendo',
'Response': 'Risposta',
'REST Filter': 'Filtro REST',
'Restricted Access': 'Accesso limitato',
'Restricted Use': 'Utilizzo limitato',
'Results': 'Risultati',
'Retail Crime': 'Crimine commerciale',
'retired': 'ritirato',
'Retrieve Password': 'Recupera password',
'retry': 'Riprova',
'Return': 'Ritorna',
'Return to Request': 'Tornare alla Richiesta',
'Returned': 'Reso',
'Returned From': 'Restituito da',
'Revert Entry': 'Ripristina Entry',
'Review': 'Revisione',
'Review Incoming Shipment to Receive': 'Revisione della spedizione in arrivo da ricevere',
'Rice': 'Riso',
'Riot': 'Sommossa',
'river': 'fiume',
'River': 'Fiume',
'River added': 'Fiume aggiunto',
'River deleted': 'Fiume eliminato',
'River Details': 'Dettagli del fiume',
'River updated': 'Fiume aggiornato',
'Rivers': 'Fiumi',
'Road Accident': 'Incidente stradale',
'Road Closed': 'Strada Chiusa',
'Road Conditions': 'Condizioni della strada',
'Road Delay': 'Ritardo del viaggio',
'Road Hijacking': 'Dirottamento della strada',
'Road Usage Condition': 'Condizione di utilizzo della strada',
'Rockfall': 'Caduta massi',
'Role': 'Ruolo ',
'Role added': 'Ruolo aggiunto',
'Role assigned to User': 'Ruolo assegnato a utente',
'Role deleted': 'Ruolo eliminato',
'Role Details': 'Dettagli ruolo',
'Role Name': 'Nome ruolo',
'Role Required': 'ruolo Richiesto',
'Role updated': 'Ruolo aggiornato',
'Role Updated': 'Ruolo Aggiornato',
'Role-based': 'Basato suo ruolo',
'Roles': 'Ruoli',
'Roles currently assigned': 'Ruoli attualmente assegnati',
'Roles of User': 'Ruoli di utente',
'Roles Permitted': 'Ruoli consentiti',
'Roles updated': 'Ruoli aggiornati',
'Roof tile': 'tegola',
'Roofs, floors (vertical load)': 'Tetti, pavimenti (Carico verticale)',
'Room': 'Sala',
'Room added': 'Sala aggiunta',
'Room deleted': 'Stanza eliminata',
'Room Details': 'Dettagli della stanza',
'Room updated': 'Stanza aggiornata',
'Rooms': 'Stanze',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'Ruotare Caratteristica: Selezionare la caratteristica che si desidera ruotare e quindi trascinare il punto associato a ruotare alla posizione desiderata',
'Rows in table': 'Righe nella tabella',
'Rows selected': 'Righe selezionate',
'RSS': 'RSS',
'RSS Channel deleted': 'Canale RSS cancellato',
'RSS Channel Details': 'Dettagli canale RSS',
'RSS Channel updated': 'Canale RSS aggiornato',
'RSS Channels': 'Canali RSS',
'RSS Feed': 'RSS Feed',
'RSS Post deleted': 'RSS Post cancellato',
'RSS Post Details': 'Dettagli RSS Post',
'RSS Posts': 'RSS Posts',
'Run every': 'Esegui ogni ',
'Run Interval': 'Gira a Intervallo',
'Running Cost': "Costo dell'esecuzione",
'Safe environment for vulnerable groups': 'Ambiente sicuro per gruppi vulnerabili',
'Safety Assessment Form': 'Modulo Valutazione della sicurezza',
'Safety of children and women affected by disaster?': 'Sicurezza dei bambini e delle donne colpite dalla catastrofe?',
'Sahana access granted': 'Accesso a Sahana concesso',
'Sahana Administrator': 'Amministratore di Sahana',
'Sahana Blue': 'Sahana Blu',
'Sahana Community Chat': 'Conversazione della Comunità Sahana',
'Sahana Eden': 'Sahana Eden',
'Sahana Eden <=> Other': 'Sahana Eden <=> Altri',
'Sahana Eden for Italy': "Sahana Eden per l'Italia",
'Sahana Eden Humanitarian Management Platform': 'Piattaforma di Gestione Umanitaria del Paradiso Sahana',
'Sahana Eden Website': 'Sito di Sahana Eden',
'Sahana Facilitators may choose to moderate and either ban any user or edit any content which they believe has breached this Code of Conduct.': 'I facilitatori di Sahana possono scegliere di moderare o di blocacre qualsiasi utente o modificare qualsiasi contenuto che appaia una violazione il presente Codice di Condotta. ',
'Sahana Green': 'Sahana Green',
'Sahana Steel': 'Acciao Steel',
'Salted Fish': 'Pesce Salato',
'Sanitation problems': 'Problemi igienici',
'Saturday': 'Sabato',
'Save': 'Salva',
'Save and Continue Editing': 'Salvare e continuare ',
'Save as New Map?': 'Salvare come Nuova Mappa?',
'Save current options as new filter': 'Salva le opzioni attuali come nuovo Filtro',
'Save Map': 'Salvare la mappa',
'Save: Default Lat, Lon & Zoom for the Viewport': 'Salvare: impostazione predefinita Latitudine, Longitudine e zoom per la finestra ',
'Saved': 'Salvato',
'Saved Filters': 'Filtri Salvati',
'Saved Maps': 'Mappe salvate',
'Saved.': 'Salvato.',
'Saving...': 'Salvataggio...',
'Scale of Results': 'Scala dei risultati',
'Scanned Copy': 'Copia digitalizzata',
'Scanned Forms Upload': 'Carica Form digitalizzati',
'Scenario added': 'Scenario aggiunto',
'Scenario deleted': 'Scenario eliminato',
'Scenario Details': 'Dettagli scenario',
'Scenario updated': 'Scenario aggiornato',
'Scenarios': 'Scenari',
'Schedule': 'Pianificazione',
'Scheduled Jobs': 'Lavori pianificati ',
'Schema': 'Schema ',
'School': 'Scuola',
'School activities': 'Scuola attività',
'School assistance': 'Assistenza scolastica',
'School attendance': 'Presenza a scuola',
'School Closure': 'Chiusura scuola',
'School destroyed': 'Scuola distrutta',
'School heavily damaged': 'Scuola fortemente danneggiata',
'School Holidays only': 'Vacanze scolastiche',
'School Lockdown': 'Chiusura forzata della Scuola',
'School Teacher': 'Insegnante di Scuola',
'School tents received': 'Tende scolastiche ricevute',
'School tents, source': 'Tende scolastiche, origine',
'School used for other purpose': 'Scuola utilizzata per altri scopi',
'School/studying': 'Scuola/Studio',
'Schools': 'Scuole',
'Scope': 'Ambito',
'Seaport': 'Porto ',
'Search': 'Ricerca',
'Search Activities': 'Attività di ricerca',
'Search Activity Report': 'Ricerca relazione di attività',
'Search Addresses': 'Cerca indirizzi',
'Search After Save?': 'Ricerca dopo il salvataggio?',
'Search All Requested Items': 'Ricerca tutti i pezzi richiesti',
'Search All Requested Skills': 'Ricerca tutte le capacità richieste',
'Search Alternative Items': 'Ricerca alternative',
'Search and Edit Group': 'Ricercare e modificare il gruppo',
'Search and Edit Individual': 'Ricercare e modificare il singolo',
'Search Assessment Summaries': 'Ricercare i riepiloghi delle Valutazioni',
'Search Assessments': 'Ricercare le Valutazioni',
'Search Asset Log': 'Ricerca nei Log degli Asset',
'Search Assets': 'Ricercare i beni',
'Search Baseline Type': 'Ricerca il tipo di linea base',
'Search Baselines': 'Ricerca delle Linee base',
'Search Brands': 'Ricerca i marchi',
'Search Budgets': 'Ricerca i Bilanci',
'Search Bundles': 'Ricerca i BUNDLE',
'Search by skills': 'Ricerca per Capacità',
'Search by Skills': 'Ricerca per Capacità',
'Search Camp Services': 'Ricerca Servizi del Campo',
'Search Camp Types': 'Ricercare Tipi Di Campp',
'Search Camps': 'Ricerca Campi',
'Search Catalog Items': 'Ricerca Elementi a Catalogo',
'Search Catalogs': 'Cercare sui cataloghi',
'Search Certificates': 'Ricerca Certificati',
'Search Certifications': 'Ricercare Certificazioni',
'Search Checklists': 'Ricerca Elenchi',
'Search Cluster Subsectors': 'Ricerca Sottosettori di cluster',
'Search Clusters': 'Ricerca Cluster',
'Search Commitment Items': 'Ricerca Voci Impegno',
'Search Commitments': 'Ricerca Impegni',
'Search Competencies': 'Ricercare Competenze',
'Search Competency Ratings': 'Ricerca Valutazioni Competenze',
'Search Contact Information': 'Ricerca informazioni di contatto',
'Search Contacts': 'Cerca contatti',
'Search Course Certificates': 'Ricerca Dei Certificati Del Corso',
'Search Courses': 'Ricerca Corsi',
'Search Credentials': 'Ricerca Credenziali',
'Search Documents': 'Ricerca documenti',
'Search Donors': 'Ricerca Dei Donatori',
'Search Entries': 'Ricerca dellle voci',
'Search Events': 'Ricerca Eventi',
'Search Facilities': 'Ricerca Degli Impianti',
'Search Feature Layers': 'Ricercare i livelli di funzione',
'Search Flood Reports': "Ricercare i reports dell'alluvione",
'Search for a Location by name, including local names.': "Ricercare un' ubicazione per nome, inclusi i nomi locali.",
'Search for a Person': 'Ricerca una persona',
'Search for a Project': 'Ricerca per progetto',
'Search for a shipment by looking for text in any field.': 'Ricerca una spedizione cercando del testo in ogni campo.',
'Search for a shipment received between these dates': 'Ricerca di una spedizione ricevuta tra queste date',
'Search for an asset by text.': 'Cerca un asset per testo.',
'Search for an item by category.': 'Ricercare un elemento in base alla categoria.',
'Search for an item by text.': 'Ricerca un elemento in base al testo.',
'Search for an Organization by name or acronym': "Ricerca un'Organizzazione in base al nome o sigla",
'Search for an Organization by name or acronym.': "Ricerca un'Organizzazione in base al nome o sigla",
'Search for asset by country.': 'Ricerca di asset per paese.',
'Search for office by country.': 'Ricerca di un ufficio per paese.',
'Search for office by organization or branch.': "Cerca l'ufficio per organizzazione o Unità",
'Search for office by organization.': "Cerca l'ufficio per organizzazione.",
'Search for office by text.': 'Ricerca di un ufficio per testo.',
'Search for Persons': 'Ricercare per Persona',
'Search for Staff or Volunteers': 'Ricerca di personale o Volontari',
'Search for warehouse by country.': 'Ricerca di magazzini per paese.',
'Search for warehouse by organization.': 'Ricerca di magazzini per organizzazione.',
'Search for warehouse by text.': 'Ricerca di magazzino per testo.',
'Search Groups': 'Ricerca Gruppi',
'Search here for a person record in order to:': 'Cerca qui un record di una persona al fine di:',
'Search Human Resources': 'Ricerca delle risorse umane',
'Search Identity': 'Ricerca Identità',
'Search Images': 'Ricerca immagini',
'Search Impact Type': 'Ricerca Tipo di Impatto',
'Search Impacts': 'Ricerca Impatti',
'Search Incident Reports': 'Ricerca Rapporti di incidente',
'Search Inventory Items': 'Ricerca voci in inventario',
'Search Inventory items': 'Ricerca voci in inventario',
'Search Item Categories': 'Ricerca delle voci di categoria',
'Search Item Packs': "Ricerca pacco dell'articolo",
'Search Items': 'Ricerca degli articoli',
'Search Job Roles': 'Ricerca ruolo professionale',
'Search Keys': 'Chiavi di ricerca',
'Search Kits': 'Corredi di Ricerca',
'Search Layers': 'Ricerca I Livelli',
'Search Level 1 Assessments': 'Ricerca Valutazioni di livello 1',
'Search Level 2 Assessments': 'Ricerca Valutazioni di livello 2',
'Search location in Geonames': 'Cerca località in Geonames ',
'Search Locations': 'Cerca collocazioni',
'Search Log Entry': 'Ricerca voce di Log',
'Search Map Configurations': 'Ricerca Configurazioni Mappa',
'Search Markers': 'Ricerca Indicatori',
'Search Members': 'Ricerca Membro',
'Search Membership': 'Ricerca Condizione di socio',
'Search Memberships': 'Ricerca Appartenenze',
'Search messages': 'Ricerca messaggi',
'Search Missions': 'Ricerca Missioni',
'Search Need Type': 'Ricerca tipo di esigenza',
'Search Needs': 'Ricerca Esigenze',
'Search Offices': 'Ricerca Uffici',
'Search Organizations': 'Ricerca Organizzazioni',
'Search Peer': 'Ricerca Pari',
'Search Personal Effects': 'Ricerca di effetti personali',
'Search Persons': 'Cerca persone',
'Search Photos': 'Cerca foto',
'Search Population Statistics': 'Ricerca Statistiche sulla Popolazione',
'Search Positions': 'Ricerca Posizioni',
'Search Problems': 'Ricerca Problemi',
'Search Projections': 'Ricerca Le Proiezioni',
'Search Projects': 'Ricerca Progetti',
'Search Queries': 'Ricerca le queries',
'Search Query': 'Ricerca la query',
'Search Rapid Assessments': 'Ricerca Rapide Valutazioni',
'Search Received Items': 'Ricerca gli item ricevuti',
'Search Received Shipments': 'Ricerca Ricevuto Spedizioni',
'Search Records': 'Record di ricerca',
'Search Registations': 'Ricerca Registrazioni',
'Search Registration Request': 'Richiesta di registrazione di ricerca',
'Search Report': 'Relazione di Ricerca',
'Search Request': 'Richiesta di Ricerca',
'Search Request Items': 'Cerca Elementi della Richiesta',
'Search Requested Items': 'Cerca Elementi Richiesti',
'Search Requests': 'Ricerca Richieste',
'Search Resources': 'Ricerca Risorse',
'Search Results': 'Risultati della ricerca ',
'Search Rivers': 'Ricerca Fiumi',
'Search Roles': 'Cerca ruoli',
'Search Rooms': 'Ricerca Locali',
'Search Scenarios': 'Scenari di ricerca',
'Search Sections': 'Ricerca Sezioni',
'Search Sectors': 'Ricerca Settori',
'Search Sent Items': 'Ricerca elementi inviati',
'Search Sent Shipments': 'Ricerca Spedizioni Inviate',
'Search Service Profiles': 'Ricerca profili di servizio',
'Search Settings': 'Impostazioni di ricerca',
'Search Shelter Services': 'Ricerca i servizi della Struttura di Accoglienza',
'Search Shelter Types': 'Ricerca i tipi di Struttura di Accoglienza',
'Search Shelters': 'Ricerca Strutture di Accoglienza',
'Search Skill Equivalences': 'Ricerca Capacità Equivalenti',
'Search Skill Provisions': 'Ricerca disponibilità Capacità',
'Search Skill Types': 'Ricerca Tipi di Capacità',
'Search Skills': 'Ricerca Capacità',
'Search Solutions': 'Ricerca soluzioni',
'Search Staff & Volunteers': 'Ricerca Personale & Volontari ',
'Search Staff or Volunteer': 'Ricerca Personale o volontario',
'Search Staff Types': 'Ricerca Tipi di Personale',
'Search Status': 'Cerca stato',
'Search Subscriptions': 'Ricerca sottoscrizioni',
'Search Subsectors': 'Ricerca Sottosettori',
'Search Support Requests': 'Ricerca Richieste di supporto',
'Search Tasks': 'Ricerca attività',
'Search Teams': 'Ricerca squadre',
'Search Themes': 'Ricerca dei Temi',
'Search Tickets': 'Ricerca biglietti',
'Search Tracks': 'Ricerca tracce',
'Search Trainings': 'Ricerca corso formazione',
'Search Twitter Tags': 'Ricerca tag di Twitter',
'Search Units': 'Ricerca Unità',
'Search Users': 'Ricerca Utenti',
'Search Volunteer Availability': 'Ricerca disponibilità volontari',
'Search Volunteers': 'Ricerca Volontari',
'Search Warehouses': 'Ricerca Depositi',
'Searched?': 'Cercato ?',
'Searching for different groups and individuals': 'Ricerca per diversi gruppi e persone',
'Secondary Server (Optional)': 'Server secondario (opzionale) ',
'seconds': 'Secondi',
'Seconds must be a number between 0 and 60': 'Secondi deve essere un numero compreso tra 0 e 60',
'Seconds must be a number.': 'Secondi (deve essere un numero). ',
'Seconds must be less than 60.': 'Secondi (deve essere inferiore a 60)',
'Secretary General': 'Segreteria generale',
'Section deleted': 'Sezione eliminata',
'Section Details': 'Dettagli della sezione',
'Section updated': 'Sezione aggiornata',
'Sections': 'sezioni',
'Sector': 'Settore',
'Sector added': 'Settore aggiunto',
'Sector added to Organization': "Settore aggiunto all'organizzazione",
'Sector deleted': 'Settore cancellato',
'Sector Details': 'Dettagli del Settore',
'Sector removed from Organization': "Settore rimosso dall'Organizzazione",
'Sector updated': 'Settore aggiornato',
'Sector(s)': 'Settore(i)',
'Sectors': 'Settori',
'Security': 'Sicurezza',
'Security Arrest': 'Arresto di sicurezza',
'Security Dangerous Area': 'Area di Sicurezza pericolosa',
'Security Mine': 'Mine di sicurezzza',
'Security Officer': 'Ufficiale di sicurezza',
'Security problems': 'Problemi di sicurezza',
'Security Status': 'Stato della sicurezza',
'See all': 'Visualizza tutto',
'See All Entries': 'Visualizza tutte le Voci',
'see comment': 'vedi commento',
'see more': 'Visualizza altro',
'See unassigned recovery requests': 'Vedere richieste di recupero non assegnate',
'Seen': 'Visto',
'Select': 'Selezionare ',
'Select %(location)s': 'Seleziona %(location)s',
"Select 2 records from this list, then click 'Merge'.": "Seleziona 2 record da questo elenco, quindi fare clic su 'unione'. ",
'Select a location': "Seleziona un'ubicazione",
"Select a person in charge for status 'assigned'": "Selezionare una persona responsabile per stato 'assegnato'",
'Select a question from the list': "Selezionare una domanda dall'elenco",
'Select a range for the number of total beds': 'Selezionare un intervallo per il numero totale di letti',
"Select a Room from the list or click 'Create Room'": "Selezionare una Stanza dall'elenco o fare clic su 'Crea stanza'",
'Select All': 'Seleziona Tutto ',
'Select all': 'Seleziona tutto ',
'Select all that apply': 'Seleziona tutto ciò che si applica',
'Select an existing bin': 'Seleziona un esistene bin',
'Select an image to upload. You can crop this later by opening this record.': "Selezionare un'immagine da caricare. È possibile ritagliare l'immagine successivamente aprendo questo record. ",
'Select an Organization to see a list of offices': "Selezionare un' organizzazione per visualizzare un elenco degli uffici",
'Select Existing Location': 'Selezionare Ubicazione esistente ',
'Select from Registry': 'Selezionare dal Registro di sistema ',
'Select Items from the Request': 'Selezionare le voci dalla Richiesta',
'Select Items from this Inventory': 'Selezionare gli oggetti da questo inventario',
'Select Modules for translation': 'Seleziona i moduli da tradurre',
'Select Modules which are to be translated': 'Seleziona i moduli che devono essere tradotti',
'Select one or more option(s) that apply': 'Selezionare una o più opzioni possibili ',
'Select resources to import': 'Selezionare le risorse da importare',
'Select the default site.': 'Selezionare il sito predefinito. ',
'Select the option that applies': "Selezionare l'opzione che si applica ",
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Selezionare le sovrapposizioni di valutazione e le attività relative ad ogni necessità al fine di individuare il divario.',
'Select the person assigned to this role for this project.': 'Selezionare la persona assegnata a questo ruolo per questo progetto.',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'Selezionare questo se tutte le ubicazioni specifiche necessitano di un parent al livello più profondo della gerarchia di ubicazione. Ad esempio, se \' distretto\' è la piu\' piccola divisione nella gerarchia, tutte le ubicazioni specifiche dovrebbero avere un distretto come "parent".',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": "Selezionare questa opzione se tutte le ubicazioni specifiche necessitano di un' ubicazione parent nella gerarchia di collocazione. Questo può aiutare nella configurazione di una 'regione' che rappresenta un'area interessato.",
'Select this if you need this resource to be mapped from site_id instead of location_id.': 'Selezionare questa opzione se avete bisogno che questa risorsa sia mappata da site_id invece che da location_id.',
'Select This Location': 'Scegliere questa Ubicazione ',
'Select to show this configuration in the Regions menu.': 'Selezionare per visualizzare questa configurazione nel menu Regions.',
'selected': 'Selezionato',
'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': "Il modulo OCR selezionato non ha pagine. Utilizzare un'altra revisione o creare una nuova revisione scaricando un nuovo modulo.",
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Selezionare se utilizzare un Modem, Tropo o altri Gateway per inviare SMS',
'Send': 'Invia',
'Send a message to this person': 'Invia un messaggio a questa persona ',
'Send a message to this team': 'Invia un messaggio a questa squadra',
'Send Alerts using Email &/or SMS': 'Invia segnalazioni tramite e-mail e / o SMS',
'Send Commitment as Shipment': 'Inviare impegno come Spedizione',
'Send Event Update': 'Manda aggiornamento evento',
'Send from %s': 'Inviare da %s',
'Send Message': 'Invia un messaggio ',
'Send message': 'Invia messaggio',
'Send new message': 'Invia nuovo messaggio',
'Send New Shipment': 'Invia nuova Spedizione',
'Send Notification': 'Invia notifica',
'Send Shipment': 'Invia spedizione',
'Sender': 'Mittente',
'Sender deleted': 'Mittente cancellato',
'Sender Priority updated': 'Priorità del mittente aggiornata',
'Sender Whitelisted': 'Lista bianca del mittente',
'Sends & Receives Alerts via Email & SMS': 'Invia & ricevi gli avvisi via e-mail & SMS',
'Senior (50+)': 'Senior (50+)',
'Sent': 'Inviato',
'Sent By': 'Inviato Da',
'Sent By Person': 'Inviato Da Persona',
'Sent Emails': 'Mails inviate',
'Sent Item deleted': 'Inviato elemento eliminato',
'Sent Item Details': 'Dettagli Item inviato',
'Sent Item updated': 'Inviato elemento aggiornato',
'Sent Shipment canceled': 'Inviato Spedizione annullata',
'Sent Shipment canceled and items returned to Inventory': "Spedizione inviata annullata e articoli restituiti all'inventario",
'Sent Shipment Details': 'Dettagli della spedizione inviata',
'Sent Shipment updated': 'Spedizione inviata aggiornata',
'Sent Shipments': 'Spedizioni inviate',
'Sent SMS': 'SMS inviato',
'Sent Tweets': 'Tweets inviati',
'separated': 'separato',
'Separated children, caregiving arrangements': 'Bambini separati, accordi di tutela',
'separated from family': 'separato dalla famiglia',
'Serial Number': 'Numero di serie',
'Series': 'Serie',
'Server': 'Server',
'Service': 'Servizio',
'Service added': 'Servizio aggiunto',
'Service added to Organization': "Servizio aggiunto all'organizzazione",
'Service Catalog': 'catalogo servizi',
'Service deleted': 'Servizio cancellato',
'Service Details': 'Dettagli del Servizio',
'Service or Facility': 'Servizio o funzione',
'Service profile added': 'Profilo del servizio aggiunto',
'Service profile deleted': 'Profilo del servizio eliminato',
'Service profile updated': 'Profilo del servizio aggiornato',
'Service Record': 'Record di servizio ',
'Service removed from Organization': "Servizio rimosso dall'Organizzazione",
'Service updated': 'Servizio aggiornato',
'Services': 'Servizi',
'Services Available': 'Servizi disponibili',
'Set as my Default': 'Imposta come My Default',
'Set Base Site': 'Imposta sito di base',
'Set By': 'Impostato da',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'Impostare True per consentire la modifica di questo livello della gerarchia di ubicazioni dagli utenti che non sono MapAdmins.',
'Setting added': 'Impostazione aggiunta',
'Setting deleted': 'Impostazione eliminata',
'Setting Details': 'Dettagli impostazione',
'Setting updated': 'Impostazione Aggiornata',
'Settings': 'Impostazioni',
'Settings updated': 'Impostazioni aggiornate',
'Settings were reset because authenticating with Twitter failed': "Le impostazioni sono state reimpostate perché l'autenticazione con Twitter non è riuscita",
'Settings which can be configured through the web interface are available here.': "Le impostazioni che possono essere configurate mediante l'interfaccia web sono disponibili qui",
'settore_monday': 'settore_monday',
'Severe': 'Grave',
'Severity': 'Severità',
'Sex': 'Sesso',
'Shapefile Layer': 'Livello shapefile ',
'Share': 'Condividere',
'Share a common Marker (unless over-ridden at the Feature level)': 'Condividere un Indicatore comune (a meno che non sia sovrascritto a livello funzione)',
'shaved': 'rasato',
'Shelter': 'Struttura di Accoglienza',
'Shelter & Essential NFIs': 'Struttura di Accoglienza & NFIs essenziali',
'Shelter added': 'Struttura di Accoglienza aggiunta',
'Shelter deleted': 'Struttura di Accoglienza eliminata',
'Shelter Details': 'Dettagli della Struttura di Accoglienza',
'Shelter ID': 'ID della Struttura di Accoglienza',
'Shelter Name': 'Nome della Struttura di Accoglienza',
'Shelter name': 'nome della Struttura di Accoglienza',
'Shelter Registration': 'Registrazione della Struttura di Accoglienza',
'Shelter Registry': 'Registro deila Struttura di Accoglienza',
'Shelter removed': 'Struttura di Accoglienza rimosso',
'Shelter Service': 'Servizio della Struttura di Accoglienza',
'Shelter Service added': 'Servizio della Struttura di Accoglienza aggiunto',
'Shelter Service deleted': 'Servizio della Struttura di Accoglienza eliminato',
'Shelter Service Details': 'Dettagli della Struttura di Accoglienza',
'Shelter Service updated': 'Servizio della Struttura di Accoglienza aggiornato',
'Shelter Services': 'Servizi della Struttura di Accoglienza ',
'Shelter Settings': 'Impostazioni della Struttura di Accoglienza',
'Shelter Status': 'Stato della Struttura di Accoglienza ',
'Shelter Status added': 'Stato della Struttura di Accoglienza aggiunto',
'Shelter Status deleted': 'Stato della Struttura di Accoglienza eliminato',
'Shelter Status Details': 'Dettagli sullo Stato della Struttura di Accoglienza',
'Shelter Status updated': 'Stato della Struttura di Accoglienza aggiornato',
'Shelter Statuses': 'Stati della Struttura di Accoglienza',
'Shelter Type': 'Tipo di Struttura di Accoglienza',
'Shelter Type added': 'Tipo di Struttura di Accoglienza aggiunto',
'Shelter Type deleted': 'Tipo di Struttura di Accoglienza eliminato',
'Shelter Type Details': 'Dettagli sul tipo di Struttura di Accoglienza',
'Shelter Type updated': 'Tipo di Struttura di Accoglienza aggiornato',
'Shelter Types': 'Tipi di Struttura di Accoglienza ',
'Shelter Types and Services': 'Tipi e servizi di una Struttura di Accoglienza ',
'Shelter updated': 'Struttura di Accoglienza aggiornata',
'Shelter/NFI Assistance': 'Struttura di Accoglienza/Assistenza NFI',
'Shelters': 'Strutture di Accoglienza',
'Shipment Created': 'Spedizione Creata',
'Shipment Items': 'Voci di Spedizione',
'Shipment Items received by Inventory': 'Voci di Spedizione ricevute da inventario',
'Shipment Items sent from Inventory': 'Voci di spedizione inviate da inventario',
'Shipment to Send': 'Spedizione da inviare',
'Shipments': 'Spedizioni',
'Shipments To': 'Spedizione per',
'Shooting': 'Ripresa',
'short': 'corto',
'Short Assessment': 'Valutazione breve',
'Short Description': 'Descrizione breve',
'short<6cm': 'Corto<6 cm',
'Show': 'Mostra',
'Show %(number)s entries': 'Mostra le voci %(number)s',
'Show Checklist': 'Mostra Controllo',
'Show Details': 'Mostra dettagli',
'Show Map': 'Mostra mappa',
'Show on map': 'Mostra sulla mappa',
'Show on Map': 'Mostra sulla mappa',
'Show Region in Menu?': 'Mostrare la Regione nel menu?',
'Show Table': 'Mostra tabella',
'Show totals': 'Mostra totali',
'Shower Availability': 'Disponibilità di docce',
'Shower Handicap Facilities': 'Doccia Disabili ',
'Shower with handicap facilities': 'Doccia con accesso per disabili ',
'Showing 0 to 0 of 0 entries': 'Mostra 0 a 0 di 0 voci',
'Showing _START_ to _END_ of _TOTAL_ entries': 'Mostra _START_ a _END_ di voci _TOTAL_',
'sides': 'lati',
'Sign-up as a volunteer': 'Registrati come un volontario',
'Sign-up for Account': "Registarsi per l'Account",
'sign-up now': 'Iscriviti ora',
'Sign-up succesful - you should hear from us soon!': 'Iscrizione completata con successo - avrai nostre notizie a breve!',
'Signature': 'Firma',
'single': 'singolo ',
'Single PDF File': 'Singolo File PDF',
'Site': 'Sito',
'Site Administration': 'Amministrazione del sito',
'Site Key': 'Chiave del sito',
'Site Key which this site uses to authenticate at the remote site (if required for this type of repository).': 'Chiave del sito in uso per autenticazione al sito remoto (se richiesto per questo tipo di magazzino).',
'Site Name': 'Nome sito',
'Situation': 'Situazione',
'Situation Awareness & Geospatial Analysis': 'Analisi Situazione Attuale & Geospaziale',
'Skeleton Example': 'Struttura di esempio',
'Sketch': 'Schizzo',
'Skill': 'Capacità',
'Skill added': 'Capacità aggiunta',
'Skill Catalog': 'Catalogo delle Capacità',
'Skill deleted': 'Capacità cancellata',
'Skill Details': 'Dettagli della Capacità',
'Skill Equivalence': 'Equivalenza delle Capacità',
'Skill Equivalence added': 'Equivalenza di Capacità aggiunta',
'Skill Equivalence deleted': 'Equivalenza di Capacità eliminata',
'Skill Equivalence Details': 'Dettagli di equivalenza di Capacità',
'Skill Equivalence updated': 'Equivalenza di Capacità aggiornata',
'Skill Equivalences': 'Equivalenze di Capacità',
'Skill Provision': 'Disponibilità delle Capacità',
'Skill Provision added': 'Disponibilità di Capacità aggiunta',
'Skill Provision Catalog': 'Catalogo della Disponibilità delle Capacità',
'Skill Provision deleted': 'Disponibilità delle Capacità cancellata',
'Skill Provision Details': 'Dettagli su disponibilità delle Capacità',
'Skill Provision updated': 'Disponibilità delle Capacità aggiornata',
'Skill Provisions': 'Disponibilità delle Capacità',
'Skill removed': 'Capacità rimosse',
'Skill Status': 'Stato della Capacità',
'Skill TYpe': 'Tipo di Capacità',
'Skill Type': 'Tipo di Capacità ',
'Skill Type added': 'Tipo di Capacità aggiunto',
'Skill Type Catalog': 'Catalogo del tipo di Capacità',
'Skill Type deleted': 'Tipo Capacità eliminato',
'Skill Type Details': 'Dettagli tipo di Capacità',
'Skill Type updated': 'Tipo di Capacità aggiornato',
'Skill Types': 'Tipi di Capacità',
'Skill updated': 'Capacità aggiornata',
'Skills': 'Capacità',
'Skills Catalog': 'Catalogo delle Capacità',
'Skills Management': 'Gestione delle Capacità',
'Skin Marks': 'Segni sulla cute',
'Skype': 'Skype',
'Skype ID': 'ID Skype',
'slim': 'Magro',
'Slope failure, debris': 'Frana, attenzione ai detriti',
'Small Trade': 'Piccolo commercio',
'Smoke': 'Fumo',
'SMS': 'SMS',
'SMS deleted': 'SMS cancellato',
'SMS Details': 'Dettagli SMS',
'SMS InBox': 'SMS in casella',
'SMS Modem': 'SMS modem',
'SMS Modem Channels': 'SMS Modem Canali',
'SMS Outbound Gateway added': 'SMS in uscita, Gateway aggiunto',
'SMS Outbound Gateway deleted': 'SMS in uscita, Gateway cancellato',
'SMS Outbound Gateway Details': 'SMS in uscita, dettagli del Gateway',
'SMS Outbound Gateway updated': 'SMS in uscita, Gateway aggiornato',
'SMS Outbound Gateways': 'Gateways per SMS in uscita',
'SMS SMTP Channels': 'SMS SMTP Canali',
'SMS via SMTP (Outbound)': 'SMS via SMTP (in uscita)',
'SMS WebAPI (Outbound)': 'SMS WebAPI (in uscita)',
'SMS WebAPI Channels': 'SMS WebAPI Canali',
'SMTP': 'SMTP',
'SMTP to SMS Channel added': 'Canale SMTP a SMS aggiunto',
'SMTP to SMS Channel deleted': 'Canale SMTP a SMS cancellato',
'SMTP to SMS Channel Details': 'Dettagli del Canale SMTP a SMS',
'SMTP to SMS Channel updated': 'Canale aggiornato SMTP a SMS',
'SMTP to SMS Channels': 'Canali SMTP a SMS',
'Snapshot': 'Istantanea',
'Snapshot Report': 'Report sulla situazione attuale',
'Snow Fall': 'Nevicata',
'Snow Squall': 'Turbine di Neve',
'Social Welfare': 'Social Welfare',
'Society': 'Società',
'Socio-Economic Background': 'Socio-Economic contesto',
'Software Manager': 'Capo del Software',
'Soil bulging, liquefaction': 'Innalzamento del terreno, liquefazione',
'Solid waste': 'Rifiuti solidi',
'Solution': 'Soluzione',
'Solution added': 'Soluzione aggiunta',
'Solution deleted': 'Soluzione eliminata',
'Solution Details': 'Dettagli soluzione',
'Solution Item': 'Elemento soluzione',
'Solution updated': 'Soluzione aggiornata',
'Solutions': 'Soluzioni',
'Some': 'Alcuni',
'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': "Spiacente, l'ubicazione %(location) sembra essere al di fuori dell'area padre %(parent).",
'Sorry location %(location)s appears to be outside the area supported by this deployment.': "Spiacente, l'ubicazione %(location) sembra essere al di fuori dell'area supportato da questa distribuzione.",
'Sorry location appears to be outside the area of parent %(parent)s.': "Spiacente, l'ubicazione sembra essere al di fuori dell'area padre %(parent).",
'Sorry location appears to be outside the area supported by this deployment.': "Spiacente, l'ubicazione sembra essere al di fuori dell'area supportato da questa distribuzione. ",
'Sorry that location appears to be outside the area of the Parent.': "Spiacente che l'ubicazione appare all'esterno dell'area dell'elemento Parent.",
'Sorry that location appears to be outside the area supported by this deployment.': "Spiacenti che l'ubicazione appare al di fuori dell'area supportata da questa distribuzione.",
'Sorry, I could not understand your request': 'Spiacente, non è stato possibile capire la richiesta',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Spiacente, solo gli utenti con ruolo MapAdmin sono abilitati alla creazione di gruppi di location',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': "Spiacente, solo gli utenti con l' MapAdmin ruolo sono autorizzati a modificare queste posizioni",
'Sorry, something went wrong.': 'Spiacente, qualcosa è andato storto.',
'Sorry, that page is forbidden for some reason.': 'Spiacenti, la pagina è proibita per qualche motivo.',
'Sorry, that service is temporary unavailable.': 'Spiacenti, quel servizio è temporaneamente non disponibile.',
'Sorry, there are no addresses to display': 'Spiacente, non ci sono indirizzi da visualizzare',
"Sorry, things didn't get done on time.": 'Spiacenti, le cose non sono state fatte in tempo.',
"Sorry, we couldn't find that page.": 'Spiacenti, non è stato possibile individuare tale pagina.',
'Source': 'Origine ',
'source': 'target',
'Source ID': 'ID di origine',
'Source Name': 'Nome fonte',
'Source Time': 'Ora di origine',
'Source URL': 'URL di origine',
'Sources of income': 'Fonti di reddito',
'Space Debris': 'Spazio Detriti',
'Spanish': 'Spagnolo',
'Spanish - Spoken': 'Spagnolo - Parlato',
'Spanish - Written': 'Spagnolo - Scritto',
'Special Ice': 'Ghiaccio speciale',
'Special Marine': 'Speciale Marino',
'Specialized Hospital': 'Ospedale specializzato',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': "Area specifica (ad esempio edificio/stanza) all' interno del luogo in cui questa/o persona / gruppo è stata/o visto.",
'Specific language interpreter and/or cultural mediator required': 'Specific language interpreter and/or cultural mediator required',
'Specific locations need to have a parent of level': 'Luoghi specifici devono avere un parent di livello',
'specify': 'specificare',
'Specify a descriptive title for the image.': "Specifica un titolo descrittivo per l'immagine.",
'Specify a different place of birth (foreign country, village, hamlet)': 'Specify a different place of birth (foreign country, village, hamlet)',
'Specify the bed type of this unit.': 'Specificare il tipo di letto di questa unità.',
'Specify the number of available sets': 'Specificare il numero di set disponibili',
'Specify the number of available units (adult doses)': 'Specificare il numero di unità disponibili (adulte dosi)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Specificare il numero di unità disponibili (litri) di Ringer-Lactate o soluzioni equivalenti',
'Specify the number of sets needed per 24h': 'Specificare il numero di set necessari per 24h',
'Specify the number of units (adult doses) needed per 24h': 'Specificare il numero di unità (dosi per adulti) necessarie per 24h',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Specificare il numero di unità (litri) di Ringer-Lactate o soluzioni equivalenti necessarie per 24h',
'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'Spherical Mercator (900.913) è necessario per utilizzare i livelli di base di OpenStreetMap / Google / Bing.',
'Spreadsheet Importer': 'Inportatore di foglio elettronico.',
'Spreadsheet uploaded': 'Foglio elettronico caricato',
'Spring': 'Scattare',
'Squall': 'Urlare',
'Staff': 'Personale',
'staff': 'Personale',
'Staff & Volunteers': 'Personale e Volontari',
'Staff & Volunteers (Combined)': 'Personale e Volontari (insieme)',
'Staff and Volunteers': 'Personale e Volontari',
'Staff Assigned': 'Personale assegnato',
'Staff Assignment Details': 'Dettagli del personale assegnato',
'Staff Assignment removed': 'Il personale assegnato è stato rimosso',
'Staff Assignment updated': 'Il personale assegnato è stato aggiornato',
'Staff Assignments': 'Assegnamento personale',
'Staff ID': 'Personale ID',
'Staff Member added': 'Membro del Personale aggiunto',
'Staff member added': 'Membro del Personale aggiunto',
'Staff Member deleted': 'Membro del Personale cancellato',
'Staff Member Details': 'Dettagli su Membro del Personale',
'Staff Member Details updated': 'Dettagli su Membro del Personale aggiornati',
'Staff Members': 'Membri del personale',
'staff members': 'Membri del personale',
'Staff present and caring for residents': 'Personale presente e che cura i residenti',
'Staff Record': 'Archivio del personale',
'Staff Report': 'Report del personale',
'Staff Type added': 'Tipo di personale aggiunto',
'Staff Type deleted': 'Tipo di personale eliminato',
'Staff Type Details': 'Dettagli tipo di personale',
'Staff Type updated': 'Tipo Di personale aggiornato',
'Staff Types': 'Tipi di personale',
'Staff with Contracts Expiring in the next Month': 'Personale con contratto in scadenza il prossimo mese',
'Staff/Volunteer Record': 'Record Personale/Volontario',
'Staffing': 'Personale',
'Stairs': 'Scale',
'Start Date': "Data d'inizio",
'Start date': "Data d'inizio",
'Start of Period': 'Inizio periodo',
'state': 'stato',
'State': 'Stato',
'state location': 'ubicazione stato',
'Stationery': 'Cartoleria',
'Status': 'Stato',
'Status added': 'Stato aggiunto',
'Status deleted': 'Stato eliminato',
'Status Details': 'Dettagli sullo stato',
'Status of clinical operation of the facility.': 'Stato operativo della Struttura clinica.',
'Status of general operation of the facility.': 'Stato di funzionamento generale della Struttura.',
'Status of morgue capacity.': "Stato di capacità dell'obitorio.",
'Status of operations of the emergency department of this hospital.': 'Stato delle operazioni del pronto soccorso di questo ospedale.',
'Status of security procedures/access restrictions in the hospital.': "Stato delle procedure di sicurezza/limitazioni d'accesso nell'ospedale.",
'Status of the operating rooms of this hospital.': 'Lo stato delle sale operatorie di questo ospedale.',
'Status open= %s': 'Stato aperto= %s',
'Status removed': 'Stato rimosso',
'Status Reports': 'Reports di stato ',
'Status Updated': 'Situazione attuale aggiornata',
'Status updated': 'Aggiornamento sullo stato',
'Statuses': 'Stati',
'Steel frame': 'Pannello in acciaio',
'Stolen': 'Rubato',
'Store spreadsheets in the Eden database': 'Memorizzare fogli elettronici nel database Eden',
'Storeys at and above ground level': 'In Piani e soprattutto a livello campo',
'Storm Force Wind': 'Vento di tempesta',
'Storm Surge': 'Tempesta',
'Stowaway': 'Clandestino',
'straight': 'dritto',
'Strategy': 'Strategy',
'Street Address': 'Indirizzo stradale',
'Street View': 'Street View (vista a livello stadale)',
'String used to configure Proj4js. Can be found from %(url)s': 'String utilizzato per configurare Proj4js. Può essere trovato da %(url)s ',
'Strong': 'Forte',
'Strong Wind': 'Forte Vento',
'Structural': 'Strutturale',
'Structural Hazards': 'Rischi Strutturali',
'Style': 'Stile',
'Style Field': 'Stile del Campo',
'Style invalid': 'Stile non valido',
'Style Values': 'Stile Valori',
'Sub-type': 'Sottotipo',
'Subject': 'Oggetto',
'Submission successful - please wait': 'Inoltro riuscito - si prega di attendere',
'Submission successful - please wait...': 'Inoltro riuscito - si prega di attendere...',
'Submit': 'Sottometti',
'Submit a request for recovery': 'Inoltrare una richiesta per il recupero',
'Submit New': 'Sottometti uno nuovo',
'Submit New (full form)': 'Inoltra nuovo (formato completo)',
'Submit New (triage)': 'Inoltra nuovo modulo (triage)',
'Submit new Level 1 assessment (full form)': 'Inoltra la nuova valutazione di livello 1 (modulo completo)',
'Submit new Level 1 assessment (triage)': 'Inoltra la nuova valutazione di livello 1 (triage)',
'Submit new Level 2 assessment': 'Inoltra la nuova valutazione di livello 2',
'Subscribe': 'Sottoscrivere',
'Subscription added': 'Sottoscrizione aggiunta',
'Subscription deleted': 'Sottoscrizione cancellata',
'Subscription Details': 'Dettagli della Sottoscrizione',
'Subscription updated': 'Sottoscrizione aggiornata',
'Subscriptions': 'Sottoscrizioni',
'Subscriptions Status': 'Stato Sottoscrizioni',
'Subsector': 'Sottosettore',
'Subsector added': 'Sottosettore aggiunto',
'Subsector deleted': 'Sottosettore eliminato',
'Subsector Details': 'Dettagli sottosettore',
'Subsector updated': 'Sottosettore Aggiornato',
'Subsectors': 'Sottosettori',
'Subsidence': 'Cedimento del terreno',
'Subsistence Cost': "Costo dell'iscrizione",
'SubType of': 'Sottotipo di',
'Suburb': 'Sobborgo',
'suffered financial losses': 'Subito perdite finanziarie',
'Suggest not changing this field unless you know what you are doing.': 'Propongo Non modificare questo campo se non si è certi che di quello che si sta facendo.',
'Suitable': 'Adatto ',
'Summary': 'Riepilogo',
'Summary by Administration Level': 'Riepilogo per livello di Amministrazione',
'Sunday': 'Domenica',
'Supervisor': 'Supervisore',
'Supplier': 'Fornitore',
'Suppliers': 'Fornitori',
'Supply Chain Management': 'Gestione della catena di fornitura',
'Support Request': 'Richiesta di Supporto',
'Support Requests': 'Richieste di supporto',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Supporta il processo decisionale di grandi gruppi di Esperti in gestione delle crisi aiutando i gruppi a creare elenchi classificati.',
'Surgery': 'Chirurgia',
'Survey Answer': 'Risposta al sondaggio',
'Survey Answer added': 'Risposta al sondaggio aggiunta',
'Survey Answer deleted': 'Risposta al sondaggio eliminata',
'Survey Answer Details': 'Dettagli della risposta al sondaggio',
'Survey Answer updated': 'Risposta al sondaggio aggiornata',
'Survey Module': 'Questionario',
'Survey Name': 'Nome del sondaggio',
'Survey Question': 'Quesito di sondaggio',
'Survey Question added': 'Domanda del sondaggio aggiunta',
'Survey Question deleted': 'DOMANDA del sondaggio eliminata',
'Survey Question Details': 'Dettagli della domanda sondaggio',
'Survey Question Display Name': 'Nome di visualizzazione della domanda del sondaggio',
'Survey Question updated': 'DOMANDA di sondaggio aggiornata',
'Survey Series': 'Serie di sondaggi',
'Survey Series added': 'Serie di sondaggi aggiunte',
'Survey Series deleted': 'Serie di sondaggi eliminate',
'Survey Series Details': 'Dettagli delle serie di sondaggi',
'Survey Series Name': 'Nome delle serie di sondaggi',
'Survey Series updated': 'Aggiornate le serie di sondaggi',
'Survey Template': 'Modello di sondaggio',
'Survey Template added': 'Modello di sondaggio aggiunto',
'Survey Template deleted': 'Modello di sondaggio eliminato',
'Survey Template Details': 'Dettagli sul modello di indagine',
'Survey Template updated': 'Modello di analisi aggiornato',
'Survey Templates': 'Modelli di analisi',
'Switch to 3D': 'Passare al 3D',
'Symbologies': 'Simbologie',
'Symbology': 'Simbologia',
'Symbology added': 'Simbologia aggiunta',
'Symbology deleted': 'Simbologia cancellata',
'Symbology Details': 'Dettagli della simbologia',
'Symbology removed from Layer': 'Simbologia rimossa da (questo) livello',
'Symbology updated': 'Simbologia aggiornata',
'Sync Conflicts': 'Conflitti durante la Sincronizzazione',
'Sync History': 'Cronologia di sincronizzazione',
'Sync Now': 'Sincronizza ora',
'Sync Partners': 'Partner di sincronizzazione',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'I partner di sincronizzazione sono istanze o peer (SahanaEden, SahanaAgasti, Ushahidi, ecc. ) che si desidera per sincronizzare informazioni con. Fare clic sul collegamento sulla destra per andare alla pagina dove è possibile aggiungere i sync partner, ricerca per i sync partner e modificarli.',
'Sync Pools': 'Pool di sincronizzazione',
'Sync process already started on': 'Il processo di Sync è già stato avviato',
'Sync Schedule': 'Pianificazione delle sincronizzazioni',
'Sync Settings': 'Impostazioni del Sync',
'Synchronisation': 'Sincronizzazione',
'Synchronization': 'Sincronizzazione',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'la sincronizzazione consente di condividere dati che si hanno con altri e aggiornare il proprio database con i dati più recenti provenienti da altri peer. Questa pagina fornisce informazioni sulle modalità per utilizzare le funzioni di sincronizzazione di Sahana Eden',
'Synchronization Conflicts': 'Conflitti di sincronizzazione',
'Synchronization Details': 'Dettagli della sincronizzazione',
'Synchronization History': 'Cronologia della sincronizzazione',
'Synchronization mode': 'Synchronization mode',
'Synchronization not configured.': 'Sincronizzazione non configurata.',
'Synchronization Peers': 'Peer di Sincronizzazione',
'Synchronization Settings': 'Impostazioni di sincronizzazione',
'Synchronization settings updated': 'Impostazioni di sincronizzazione aggiornate',
'Syncronisation History': 'Storico delle sincronizzazioni',
"System's Twitter account updated": 'Utenza Twitter del Sistema aggiornata',
'Table': 'Tabella',
'table': 'tabella',
'Table name of the resource to synchronize': 'Nome della tabella della risorsa da sincronizzare',
'Table Permissions': 'Tabella delle autorizzazioni',
'Tablename': 'Tabella dei nomi',
'Tag added': 'Etichetta aggiunta',
'Tag Details': 'Dettagli etichetta',
'Tag Post': 'Pubblica etichetta',
'Tag removed': 'Etichetta rimosssa',
'Tag updated': 'Etichetta aggiornata',
'Tags': 'Etichette',
'Take a screenshot of the map which can be printed': 'Fare uno screenshot della mappa per poterlo stampare ',
'Take shelter in place or per <instruction>': 'Prendere la Struttura di Accoglienza al posto di o per <instruction>',
'tall': 'altezza',
'Task added': 'Attività aggiunta',
'Task deleted': 'Attività eliminata',
'Task Details': "Dettagli sull'attività",
'Task List': 'Elenco di attività',
'Task removed': 'Attività rimossa',
'Task Status': 'Stato attività',
'Task updated': 'Attività aggiornata',
'Tasks': 'Attività',
'Team': 'Squadra',
'Team added': 'Squadra aggiunta',
'Team deleted': 'Squadra cancellata',
'Team Description': 'Descrizione squadra',
'Team Details': 'Dettagli squadra',
'Team Id': 'Squadra Id',
'Team ID': 'Squadra ID',
'Team Leader': 'Capo Squadra',
'Team Member added': 'Membro della squadra aggiunto',
'Team Members': 'Membri squadra',
'Team Name': 'Nome della Squadra',
'Team Type': 'Tipo di Squadra',
'Team updated': 'Squadra aggiornata',
'Teams': 'Squadre',
'Technical testing only, all recipients disregard': 'Solo prove tecniche, tutti i destinatari ignorati',
'Telecommunications': 'Telecomunicazioni',
'Telecommunications Mobile Phone': 'Telecomunicazioni via telefoni mobili',
'Telephone': 'Telefono',
'Telephony': 'Telefonia',
'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'Dice GeoServer di fare MetaTiling per ridurre il numero di etichette duplicate.',
'Temp folder %s not writable - unable to apply theme!': 'Cartella Temp %s non scrivibile - impossibile applicare il tema!',
'Template file %s not readable - unable to apply theme!': 'File modello %s non leggibile - impossibile applicare il tema!',
'Templates': 'Modelli',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': "Termine per la suddivisione amministrativa di quinto livello all'interno del paese (ad esempio una suddivisione di voto o un Codice Postale). Questo livello non è utilizzato spesso.",
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': "Termine per la suddivisione amministrativa di quarto livello all'interno del paese (ad esempio Villaggio, Quartiere o Distretto).",
'Term for the primary within-country administrative division (e.g. State or Province).': "Termine per la suddivisione amministrativa primaria all'interno del paese (ad esempio Stato o Provincia).",
'Term for the secondary within-country administrative division (e.g. District or County).': "Termine per la divisione amministrativa secondaria all'interno del paese (ad esempio Distretto o Regione).",
'Term for the third-level within-country administrative division (e.g. City or Town).': "Termine per la divisione ammnistrativa di terzo livello all' interno del paese (ad es. Paese o Città).",
'Term for the top-level administrative division (i.e. Country).': 'Termine per la divisione amministrativa di livello superiore divisione (ad esempio paese).',
'Terms of Service': 'Termini di servizio',
'Territorial Authority': 'Territoriale Autorità',
'Terrorism': 'Terrorismo',
'Tertiary Server (Optional)': 'Server terziario (facoltativo)',
'Test Results': 'Risultati del test',
'Text': 'Testo',
'Text Color for Text blocks': 'Testo Colore per il testo blocchi',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': "Grazie per aver convalida l' e-mail. L' account utente è ancora in attesa di approvazione da parte del sistema amministratore (%s). si riceve una notifica tramite e-mail quando l' account è attivata.",
'Thanks for your assistance': "Grazie per l'assistenza",
'The': 'Il',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'La \\ " query\\ " è una condizione come \\ " db.table1.field1==\'value\'\\ ". Qualcosa come \\ " db.table1.field1 == db.table2.field2\\ " risulta in una JOIN SQL.',
'The area is': "Quest'area è",
'The Area which this Site is located within.': "La superficie all'interno della quale questo sito è ubicato.",
'The Assessments module allows field workers to send in assessments.': 'Il modulo Valutazioni permette ai lavoratori sul campo di inviare Valutazioni.',
'The asset must be assigned to a site OR location.': "L' asset deve essere assegnato a un sito o ubicazione.",
'The attribute used to determine which features to cluster together (optional).': 'Attributo utilizzato per determinare quali funzionalità si possono raggruppare (opzionale).',
'The attribute which is used for the title of popups.': 'Atributo uutilizzato per il titolo dei popup',
'The attribute within the KML which is used for the title of popups.': "Attributo all' interno del KML che viene utilizzato per il titolo dei popup",
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': "L' attributo (i) all' interno del KML che vengono utilizzati per il corpo dei messaggi a comparsa. (Utilizzare uno spazio tra gli attributi)",
'The Author of this Document (optional)': "L' autore di questo documento (facoltativo)",
'The body height (crown to heel) in cm.': 'Altezza del corpo (dalla testa ai piedi) in cm.',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Il modulo di valutazione degli edifici, permette di fornire la valutazione sulla sicurezza degli edifici, ad esempio dopo un terremoto.',
'The Camp this person is checking into.': 'Il campo nel quale questa persona si sta registrando.',
'The Camp this Request is from': 'Questa richiesta proviene dal campo ',
'The client ID to use for authentication at the remote site (if required for this type of repository).': 'The client ID to use for authentication at the remote site (if required for this type of repository).',
'The client secret to use for authentication at the remote site (if required for this type of repository).': 'The client secret to use for authentication at the remote site (if required for this type of repository).',
'The country the person usually lives in.': 'La nazione dove la persona vive',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': "La posizione corrente della persona / gruppo, può essere generale (per Prospetti) oppure dettagliata (per la visualizzazione su una mappa). immettere alcuni caratteri per cercare tra le localita' disponibili",
'The default Organization for whom this person is acting.': "L' Organizzazione predefinita per la quale questa persona sta operando.",
'The default Organization for whom you are acting.': "L' Organizzazione predefinita per la quale si sta operando.",
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Il (I) donatore(i) per questo progetto Più valori possono essere selezionati tenendo premuto il tasto 'Control'.",
'The duplicate record will be deleted': 'La registrazione duplicata verrà eliminata',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Indirizzo Email a cui vengono inviate le richieste di approvazione (di solito si tratta di un gruppo piuttosto che di una mail individuale). Se il campo è vuoto, allora le richieste vengono approvate automaticamente, se il dominio corrisponde.',
'The facility where this position is based.': 'La Struttura dove si trova questa posizione del personale',
'The first or only name of the person (mandatory).': "Il primo o l'unico nome della persona (obbligatorio)",
'The following %(new)s %(resource)s have been added': 'Le seguenti %(new)s %(resource)s sono state aggiunte',
'The following %(upd)s %(resource)s have been updated': 'Le seguenti %(upd)s %(resource)s sono state aggiornate',
'The following %s have been added': 'I seguenti %s sono stati aggiunti ',
'The following %s have been updated': 'I seguenti %s sono stati aggiornati',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': "La forma dell' URL è http://your/web/map/service?service=WMS&request=GetCapabilities dove your/web/map/service sta per il percorso URL per il WMS.",
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': "Il 'Sistema di Archiviazione degli Incidenti' consente di pubblico di riportare incidenti e di tenerli tracciati",
'The language you wish the site to be displayed in.': 'La lingua nella quale si desidera visualizzare il sito.',
'The length is': 'La lunghezza è',
'The list of Brands are maintained by the Administrators.': "L'elenco dei Marchi che sono gestiti dagli amministratori.",
'The list of Catalogs are maintained by the Administrators.': "L'elenco dei cataloghi è gestito dagli amministratori.",
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': "La ubicazione da cui proviene la persona, che può essere generale (per report) o esatta (per la visualizzazione su una mappa). Immettere alcuni caratteri per cercare tra le localita' disponibili",
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': "La ubicazione che la persona sta per avere, che può essere generale (per report) o precisa (per la visualizzazione su una mappa). Immettere alcuni caratteri per cercare tra le localita' disponibili",
'The map will be displayed initially with this latitude at the center.': 'La mappa verrà visualizzata inizialmente con questa latitudine al centro.',
'The map will be displayed initially with this longitude at the center.': 'La mappa verrà visualizzata inizialmente con questa Longitudine al centro.',
'The Maximum valid bounds, in projected coordinates': 'Numero massimo di limiti, nelle coordinate proiettate',
'The Media Library provides a catalog of digital media.': 'La libreria di supporto fornisce un catalogo di supporti digitali.',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Il modulo di messaggistica è il principale perno di comunicazione del sistema Sahana. Esso viene utilizzato per inviare avvisi e/o messaggi tramite SMS & E-mail ai vari gruppi ed individui prima, durante e dopo un disastro.',
'The minimum number of features to form a cluster.': 'Il numero minimo di funzioni per formare un cluster.',
'The minimum number of features to form a cluster. 0 to disable.': 'Numero minimo di caratteristiche per formare un cluster. 0 per disabilitare.',
'The name to be used when calling for or directly addressing the person (optional).': 'Il nome da utilizzare quando si chiama o ci si rivolge direttamente alla persona (opzionale).',
'The next screen will allow you to detail the number of people here & their needs.': 'La schermata successiva vi permetterà specificare il numero di persone e le loro esigenze. ',
'The number of pixels apart that features need to be before they are clustered.': 'Il numero di pixel oltre che le funzioni devono essere prima che con cluster.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Il numero di tessere attorno alla mappa visibile da scaricare. Zero indica che la prima pagina viene caricata più rapidamente, numeri più alti che la visualizzazione successiva è più veloce.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': "Il numero di unità di misura degli elementi alternativi che è uguale a una unità di misura dell'elemento",
'The organization managing this event': 'Organizzazione che gestisce questo evento',
'The Organization Registry keeps track of all the relief organizations working in the area.': "Il Registro dell'Organzzazione tiene traccia di tutti le organizzazioni di soccorso che operano nel settore.",
'The parse request has been submitted': 'La richiesta di analisi è stata inoltrata',
'The person at the location who is reporting this incident (optional)': 'La persona in loco che riporta questo incidente (facoltativo)',
'The poll request has been submitted, so new messages should appear shortly - refresh to see them': 'La richiesta di polling è stata inoltrata, in modo che i nuovi messaggi dovrebbero apparire tra poco - aggiornare per vederli',
'The POST variable containing the phone number': 'La variabile POST contenente il numero di telefono',
'The post variable containing the phone number': 'La variabile di posta che contiene il numero di telefono',
'The post variable on the URL used for sending messages': "La variabile di posta sull' URL utilizzato per l' invio di messaggi",
'The POST variable on the URL used for sending messages': 'La variabile POST su URL utilizzata per invio di messaggi',
'The post variables other than the ones containing the message and the phone number': 'Le variabili di posta diverse da quelle contenenti il messaggio e il numero di telefono',
'The POST variables other than the ones containing the message and the phone number': 'Le variabili POST diverse da quelle contenenti il messaggio e il numero di telefono',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Il modulo di Project Tracking consente la creazione di attività per colmare le lacune nella valutazione delle necessità.',
"The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": "Il 'formuuid' previsto non è valido. È stato selezionato un modulo revisione che non esiste su questo server.",
"The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": "Il 'jobuuid' previsto non è valido. La sessione del modulo di upload non è valida. Si consiglia di ripetere il caricamento.",
'The Role this person plays within this hospital.': "Il ruolo che questa persona svolge all' interno di questo ospedale.",
'The search request has been submitted, so new messages should appear shortly - refresh to see them': 'La richiesta di ricerca è stata presentata, in modo che i nuovi messaggi dovrebbero apparire tra poco - aggiornare per vederli',
'The search results are now being processed with KeyGraph': 'I risultati della ricerca sono in corso di elaborazione con KeyGraph',
'The search results should appear shortly - refresh to see them': 'I risultati della ricerca dovrebbe apparire tra poco - aggiornare per vederli',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': "La porta seriale a cui il modem e' connesso - /dev/ttyUSB0, ecc. su linux e com1, com2 ecc. su Windows",
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'il server non ha ricevuto in tempo la risposta da un altro server che stava accedendo per rispondere alla richiesta del browser',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'il server ha ricevuto una risposta non corretta da una altro server che stava accedendo per rispondere alla richiesta del browser',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Il registro delle strutture di Accoglienza segue tutti i rifugi e memorizza i relativi dettagli di base. Questo collabora con altri moduli per tenere traccia delle persone associate con una Struttura di Accoglienza, i servizi disponibili. etc.',
'The Shelter this person is checking into.': 'La Struttura di Accoglienza in cui questa persona si registra.',
'The Shelter this Request is from': 'La Struttura di Accoglienza da cui questa richiesta proviene',
'The site where this position is based.': 'Il sito dove è basata questa posizione',
"The staff member's official job title": 'Titolo professionale ufficiale del Membro del personale',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Il personale responsibile delle strutture può effettuare richieste di assistenza. Gli impegni possono essere presi nei confronti di queste richieste però le richieste restano aperte finché il richiedente conferma che la richiesta è stata completata.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': "L'evento oggetto non costituisce più una minaccia o preoccupazione e ogni azione successiva è descritte in",
'The subject of the alert (optional)': "La causa dell'allarme (opzionale)",
'The system supports 2 projections by default:': 'Il sistema supporta due proiezioni di default:',
'The time at which the Event started.': "Ora di inizio dell'evento.",
'The time at which the Incident started.': "Il tempo in cui l'incidente è cominciato.",
'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.': 'The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones.',
'The token associated with this application on': 'Il token associato con questa applicazione su',
'The Unique Identifier (UUID) as assigned to this facility by the government.': "L' identificativo univoco (UUID) come assegnato a questa funzione da parte del governo.",
'The unique identifier which identifies this instance to other instances.': "L'identificativo univoco che identifica questa istanza ad altre istanze.",
'The uploaded Form is unreadable, please do manual data entry.': 'Il form caricato non è leggibile. Si prega di procedere conl il caricamento manuale',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': "L' URL per la pagina GetCapabilities di un Web Map Service (WMS) i cui livelli desideri che siano disponibili tramite il pannello Browser sulla mappa.",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'URL del file immagine. Se non si carica un file di immagine, allora è necessario specificare quì la relativa ubicazione.',
'The URL of your web gateway without the POST parameters': 'URL del vostro web gateway senza parametri POST',
'The URL of your web gateway without the post parameters': 'URL del gateway web senza i parametri attaccati',
'The URL to access the service.': "L' URL per accedere al servizio.",
"The volunteer's role": 'Il ruolo del volontario',
'The way in which an item is normally distributed': 'Il modo in cui un elemento viene normalmente distribuito',
'The weight in kg.': 'Il peso in kg.',
'Theme': 'Tema',
'Theme added': 'Tema aggiunto',
'Theme Data': 'Dati tema ',
'Theme Data deleted': 'Dati tema cancellati ',
'Theme Data updated': 'Dati tema aggiornati',
'Theme deleted': 'Tema eliminato',
'Theme Details': 'Dettagli del tema',
'Theme Layer': 'Strato Tema ',
'Theme updated': 'Tema aggiornato',
'Themes': 'Temi',
'There are errors': 'Sono presenti degli errori',
'There are insufficient items in the Inventory to send this shipment': "Ci sono elementi insufficienti nell'inventario per inviare la spedizione",
'There are more than %(max)s results, please input more characters.': 'Ci sono più di %(max)s risultati, inserire più caratteri. ',
'There are multiple records at this location': 'Ci sono record multipli in questa posizione ',
'There are no contacts available for this person!': 'Non ci sono contatti disponibili per questa persona!',
"There are no details for this person yet. Add Person's Details.": 'Non ci sono i dettagli di questa persona. Aggiungere i Dettagli della Persona',
'There are too many features, please Zoom In or Filter': "Ci sono troppe funzioni, fare 'Zoom In' o filtrare ",
'There is no address for this person yet. Add new address.': "Non c'è alcun indirizzo di questa persona ancora. Aggiungere un nuovo indirizzo",
'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': "Non c'è ancora uno stato per %(site_label)s. Aggiungi %(site_label)s Stato.",
'There was a problem, sorry, please try again later.': 'Si è verificato un problema, si prega di riprovare più tardi. ',
'These are settings for Inbound Mail.': 'Queste sono le impostazioni per posta in entrata.',
'These are the Incident Categories visible to normal End-Users': "Queste sono le Categorie d'Incidente visibili agli utenti finali generici",
'These need to be added in Decimal Degrees.': 'Questi devono essere aggiunti in gradi decimali.',
'They': 'Essi',
'this': 'questo',
'This appears to be a duplicate of': 'Questo sembra essere un duplicato di',
'This email address is already in use': 'This email address is already in use',
'This email-address is already registered.': 'Questo indirizzo e-mail è già registrato. ',
'This file already exists on the server as': 'Questo file esiste già sul server come',
'This Group has no Members yet': 'Questo Gruppo non ha Membri ancora',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'Ciò è utile se questo livello è in costruzione. Per evitare modifiche accidentali dopo aver completato questo livello, impostare su False. ',
'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'Questo è normalmente modificato utilizzando il Widget in stile scheda nelle proprietà del livello sulla mappa. ',
'This is required if analyzing with KeyGraph.': 'Questo è necessario se si analizza con KeyGraph.',
'This is the way to transfer data between machines as it maintains referential integrity.': 'Questo è il modo per trasferire i dati fra macchine, come sostiene integrità referenziale.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Questo è il modo per trasferire i dati fra macchine, come sostiene integrità referenziale. ... dati duplicati 1ᵒ deve essere rimosso manualmente.',
"This isn't visible to the recipients": 'Questo non è visibile ai destinatari ',
'This job has already been finished successfully.': 'Questa attività è già stata completata con successo. ',
'This level is not open for editing.': 'Questo livello non è aperto per la modifica. ',
'This might be due to a temporary overloading or maintenance of the server.': 'Ciò potrebbe essere dovuto a un sovraccarico temporaneo o la manutenzione del server.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': "Questo modulo consente alle Voci d'Inventario di essere Richieste & Spedite tra gli inventari delle strutture.",
'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Questo modulo permette di gestire eventi - sia prepianificati (ad esempio, esercizi) o incidenti avvenuti al momento. È possibile allocare risorse adeguate (personale, attività e servizi) in modo che questi possono essere facilmente mobilitati.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Questo modulo consente di pianificare scenari per Esercitazioni & Eventi. È possibile assegnare le risorse appropriate (Umane, Beni & Servizi) in modo che possano essere mobilitate facilmente.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Questa pagina mostra registrazioni di precedenti sincronizzazioni. Fare clic sul collegamento sotto per andare a questa pagina.',
'This role can not be assigned to users.': 'Questo ruolo non può essere assegnato agli utenti. ',
'This screen allows you to upload a collection of photos to the server.': 'Questo pannello consente di caricare una raccolta di foto sul server.',
'This setting can only be controlled by the Administrator.': "Questa impostazione può essere controllato soltanto dall' amministratore.",
'This shipment has already been received.': 'Questa spedizione è già stata ricevuta.',
'This shipment has already been sent.': 'Questa spedizione è già stata inviata.',
'This shipment has not been received - it has NOT been canceled because can still be edited.': 'Questa spedizione non è stato ricevuta - essa NON è stata annullata poiché può essere ancora modificata.',
'This shipment has not been sent - it has NOT been canceled because can still be edited.': 'Questa spedizione non è stata spedita - essa NON è stata annullata pechè può ancora essere modificata.',
'This shipment will be confirmed as received.': 'Questa spedizione sarà confermata non appena ricevuta.',
'This should be an export service URL, see': 'Questo dovrebbe essere un URL servizio di esportazione, verificare',
'This Team has no Members yet': 'Questa Squadra non ha ancora membri',
'Thunderstorm': 'Temporale',
'Thursday': 'Giovedì',
'Ticket': 'Biglietto',
'Ticket added': 'Biglietto aggiunto',
'Ticket deleted': 'biglietto cancellato',
'Ticket Details': 'dettagli del biglietto',
'Ticket ID': 'ID del Ticket',
'Ticket updated': 'Ticket aggiornato',
'Ticketing Module': 'Modulo di Etichettamento',
'Tickets': 'Biglietti',
'Tiled': 'Piastrella ',
'Tilt-up concrete': 'Inclinazione-up concreto',
'Timber frame': 'Pannello in legname',
'Time': 'Tempo ',
'Timeline': 'sequenza temporale',
'Timeline Report': 'Relazione temporale',
'times': 'Volte',
'times (0 = unlimited)': 'Numero di volte (0=illimitato)',
'times and it is still not working. We give in. Sorry.': '(....) non è ancora funzionante. Siamo spiacenti.',
'tipo alluvione genova': 'tipo alluvione genova',
'Title': 'Titolo',
'Title to show for the Web Map Service panel in the Tools panel.': 'Titolo da visualizzare per il pannello del servizio di Mappa Web nel pannello strumenti.',
'TMS Layer': 'Strato TMS',
'To': 'A',
'to access the system': 'Per accedere al sistema',
'To begin the sync process, click the button on the right =>': 'Per iniziare il processo di sincronizzazione, fare clic sul pulsante a destra =>',
'To begin the sync process, click this button =>': 'Per iniziare il processo di sincronizzazione, fare clic su questo pulsante =>',
'To create a personal map configuration, click': 'Per creare la configurazione di una mappa personale, fare clic su',
'to download a OCR Form.': 'Scaricare un form OCR',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Per modificare OpenStreetMap, è necessario modificare le impostazioni di OpenStreetMap in models/000_config.py',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'Per modificare OpenStreetMap, è necessario modificare le impostazioni di OpenStreetMap nella mappa Config ',
'To Location': 'A ubicazione',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'Per spostare la linea temporale: utilizzare la rotella di scorrimento del mouse, i tasti freccia o afferrare e trascinare la Timeline. ',
'To Person': 'per la persona',
'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'Per stampare o condividere la mappa fare uno screenshot. Per sapere come fare uno screenshot, leggere le istruzioni per %(windows) o per %(mac) ',
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Per eseguire la ricerca per titolo del lavoro, immettere qualsiasi parte del titolo. È possibile utilizzare % come carattere jolly.',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Per eseguire la ricerca per nome persona, immettere qualusiasi tra Nome, secondo Nome o Cognome, separati da spazi. È possibile utilizzare % come carattere jolly. Premere 'Cerca' senza input per elencare tutte le persone.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Per ricercare un corpo, immettere il numero ID dell'etichetta del corpo. È possibile utilizzare % come carattere jolly. Premere ' Search ' senza input per elencare tutti i corpi.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Per ricercare un ospedale, immettere separati da spazi. uno dei nomi o sigla o ID dell' ospedale o dell'organizzazione È possibile utilizzare % come carattere jolly. Premere ' Search ' senza input per elencare tutti gli ospedali.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Per ricercare un ospedale, immettere uno dei nomi o ID dell' ospedale, separati da spazi. È possibile utilizzare % come carattere jolly. Premere ' Search ' senza input per elencare tutti gli ospedali.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Per cercare una ubicazione, inserirne il nome. Potete usare% come carattere jolly. Premere il tasto 'Cerca' senza input per elencare tutte le località. ",
"To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": "Per la ricerca di un Membro, inserire qualsiasi parte del nome della persona o del gruppo. Si può usare % come carattere jolly. Premere il tasto 'Cerca' senza input per ottenere un elenco di tutti i membri.",
'To search for a person, enter any of the ""first, middle or last names and/or an ID ""number of a person, separated by spaces. ""You may use % as wildcard.': 'Per cercare una persona, inserire uno qualunque tra ""primo, medio o cognome e/o un ID ""numero di una persona, separati da spazi. ""Si può usare % come carattere jolly.',
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Per cercare una persona, immettere qualsiasi porzione di Nome Cognome o dell'ID, separati da spazi. È possibile utilizzare % come carattere jolly. Premere 'Cerca' senza input per elencare tutte le persone.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Per ricercare una valutazione, immettere qualsiasi parte del numero del ticket. È possibile utilizzare % come carattere jolly. Premere ' Search ' senza input per elencare tutte le Valutazioni.",
'To variable': 'Alla variabile',
'tonsure': 'Chierica',
'Tools': 'Attrezzi',
'total': 'totale',
'Total': 'Totale ',
'Total # of households of site visited': 'N. totale delle famiglie del sito visitate',
'Total # of Target Beneficiaries': 'N. Totale dei Beneficiari finali',
'Total Beds': 'Totale Letti',
'Total Beneficiaries': 'Totale Beneficiari',
'Total Capacity (Night)': 'Capacità totale (Notte) ',
'Total Cost per Megabyte': 'Costo totale per Megabyte',
'Total Cost per Minute': 'Costo totale al minuto',
'Total gross floor area (square meters)': 'Area lorda totale della superficie (metri quadrati)',
'Total Monthly': 'Totale Mensile',
'Total Monthly Cost': 'Costo totale mensile',
'Total Monthly Cost:': 'Costo totale mensile:',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Numero totale dei letti in questo ospedale. Aggiornato automaticamente dai resoconti giornalieri.',
'Total number of houses in the area': "Numero totale delle case nell'area",
'Total Number of Resources': 'Numero totale di risorse',
'Total number of schools in affected area': 'Numero totale delle scuole in area interessata',
'Total One-time Costs': 'Totale costi una tantum',
'Total Persons': 'Totale Persone',
'Total Population': 'Popolazione totale ',
'Total population of site visited': 'Totale della popolazione del sito visitato',
'Total Records: %(numrows)s': 'Totale record: %(numRighe) ',
'Total Recurring Costs': 'Totale Costi Ricorrenti',
'Total Unit Cost': 'Totale costo unitario',
'Total Unit Cost:': 'Totale costo unitario:',
'Total Units': 'Totale Unità',
'Totals for Budget:': 'Totali di Bilancio:',
'Totals for Bundle:': 'Totali per il Bundle:',
'Totals for Kit:': 'Totali per Kit:',
'Tourist Group': 'Gruppo turistico ',
'Town': 'Città',
'Traces internally displaced people (IDPs) and their needs': 'Tracce delle persone interne sfollate (IDP) e le loro esigenze',
'Tracing': 'Tracciamento',
'Track': 'Traccia',
'Track deleted': 'Traccia eliminata',
'Track Details': 'Dettagli della traccia',
'Track updated': 'Traccia aggiornata',
'Track uploaded': 'Traccia caricata',
'Track with this Person?': 'Traccia con questa persona?',
'Trackable': 'Tracciabile (rintracciabile?)',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Tracciamento di informazioni di base sulla posizione, le funzioni e la dimensione delle Strutture di Accoglienza',
'Tracking of Projects, Activities and Tasks': 'Tracciamento dei progetti, attività e mansioni',
'Tracks': 'Tracce',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': "Tieni traccia dell'ubicazione, distribuzione, capacità e ripartizione delle vittime nelle strutture di Accoglienza",
'Traffic Report': 'Report sul traffico',
'Training': 'Addestramento',
'Training added': 'Formazione aggiunta',
'Training Course Catalog': 'Catalogo dei corsi di formazione',
'Training deleted': 'Formazione eliminata',
'Training Details': 'Dettagli della formazione',
'Training Event': 'Evento di formazione',
'Training Event added': 'Evento di formazione aggiunto',
'Training Event deleted': 'Evento di formazione cancellato',
'Training Event Details': 'Dettagli Evento di formazione',
'Training Event updated': 'Evento di formazione aggiornato',
'Training Events': 'Eventi di formazione',
'Training Facility': 'Struttura per la formazione',
'Training Hours (Month)': 'Ore di Formazione (Mese)',
'Training Hours (Year)': 'Ore di Formazione (Anno)',
'Training Report': 'Report sulla Formazione',
'Training updated': 'Formazione aggiornata',
'Trainings': 'Corsi di formazione',
'Transfer': 'Trasferimento',
'Transit': 'Transito',
'Transit Status': 'Stato del Transito',
'Transition Effect': 'Effetto di transizione',
'Translated File': 'Translated File',
'Translation': 'Traduzione',
'Transparent?': 'Trasparente? ',
'Transportation assistance, Rank': 'Assistenza per il trasporto, Valutazione',
'Trauma Center': 'Centro traumatologico',
'Travel Cost': 'costo del viaggio',
'Tricycles or Motorcycles. Power greater than 15 kW, top speed exceeding 45 km/h': 'Tricicli o Motocicli. Potenza superiore a 15 kW, velocità massima superiore a 45 km/h',
'Tropical Storm': 'Tempesta tropicale',
'Tropo': 'Tropo',
'Tropo Channel added': 'Canale Tropo aggiunto',
'Tropo Channel deleted': 'Canale Tropo cancellato',
'Tropo Channel Details': 'Dettagli Canale Tropo ',
'Tropo Channel updated': 'Canale Tropo aggiornato',
'Tropo Channels': 'Canali Tropo ',
'Tropical Cyclone': 'Ciclone Tropicale',
'Tropo Messaging Token': 'Token di Messaggistica Tropo ',
'Tropo Settings': 'Impostazioni Tropo',
'Tropo settings updated': 'impostazioni Tropo aggiornate',
'Tropo Voice Token': 'Tropo Voce Token',
'Truck': 'Autocarro',
'Try checking the URL for errors, maybe it was mistyped.': "Prova a verificare se ci sono errori nell' URL, forse non è stato digitato correttamente.",
'Try hitting refresh/reload button or trying the URL from the address bar again.': "Prova a premere sul pulsante aggiorna / ricarica o sottomettere di nuovo l' URL dalla barra degli indirizzi.",
'Try refreshing the page or hitting the back button on your browser.': 'Prova ad aggiornare la pagina o a premere il pulsante Indietro nel browser.',
'Tuesday': 'Martedì',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': "Il modulo tweepy non e' disponibile nell'inteprete di Python correntemente attivo - detto modulo deve essere installato per supportare Twitter di tipo non-Tropo.",
'Tweet': 'Tweet',
'Tweet deleted': 'Tweet cancellato',
'Tweet Details': 'Dettagli Tweet',
'Tweet ID': 'ID Tweet',
'Tweeted By': 'Tweettato Da',
'Tweeted by': 'Tweettato da',
'Tweeted on': 'Tweettato su',
'Tweeted On': 'Tweettato Su',
'Two-wheel mopeds, Three-wheeled vehicles, light quadricycles. Maximum speed 45 km/h and engine capacity of less than or equal to 50 cm cubic': 'Ciclomotori a due ruote, Veicoli a tre ruote, Quadricicli leggeri. Velocità massima 45 km/h e cilindrata inferiore o uguale a 50 cm cubici',
'Twilio (Inbound)': 'Twilio (In arrivo)',
'Twilio Channel added': 'Canale Twilio aggiunto',
'Twilio Channel deleted': 'Canale Twilio cancellato',
'Twilio Channel Details': 'Dettagli Canale Twilio ',
'Twilio Channel updated': 'Canale Twilio aggiornato',
'Twilio Channels': 'Canali Twilio',
'Twitter': 'Twitter',
'Twitter account updated': 'Account Twitter aggiornato',
'Twitter ID or #hashtag': 'Twitter ID o #hashtag',
'Twitter InBox': 'Twitter in arrivo',
'Twitter Search': 'Ricerca Twitter',
'Twitter Search Queries': 'Query di Ricerca Twitter',
'Twitter Search Results': 'Risultati di Ricerca Twitter',
'Twitter Settings': 'impostazioni di Twitter',
'Twitter Timeline': 'Linea del tempo Twitter',
'Type': 'Tipo ',
'Type of Construction': 'Tipo di Costruzione',
'Type of water source before the disaster': "Tipo di sorgente d'acqua prima del disastro",
"Type the first few characters of one of the Participant's names.": 'Inserisci i primi caratteri di uno dei nomi dei Partecipanti.',
"Type the first few characters of one of the Person's names.": 'Immettere le prime lettere di uno dei nomi della persona.',
'Type the name of a registered person or to add an unregistered person to this shelter click on Evacuees': 'Type the name of a registered person or to add an unregistered person to this shelter click on Evacuees',
'Type the name of a registered person \\ or to add an unregistered person to this \\\n shelter click on Evacuees': 'Digitare il nome di una persona registrata \\ o per aggiungere una persona non registrata a questa Struttura di Accoglienza \\ clicca su Evacuati',
'Types': 'Tipi',
'UID': 'UID',
'Ukrainian - Spoken': 'Ucraino - Parlato',
'Ukrainian - Written': 'Ucraino - Scritto',
'UN': 'ONU',
'UN agency': 'Agenzia delle Nazioni Unite ',
'Un-Repairable': 'Non riparabile',
'unable to parse csv file': 'Impossibile analizzare il file csv',
'Unable to parse CSV file or file contains invalid data': 'Impossibile analizzare il file CSV, o file contiene dati non validi',
'Unable to parse CSV file!': 'Impossibile analizzare il file CSV!',
'unavailable': 'Non disponibile',
'Uncheck all': 'Deseleziona tutto',
'uncheck all': 'deseleziona tutto',
'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization',
'Under which conditions local records shall be updated': 'Under which conditions local records shall be updated',
'Understaffed': 'Sottodimensionato',
'Unidentified': 'Non identificato',
'unidentified': 'Non identificato',
'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Unique identifier which THIS repository identifies itself with when sending synchronization requests.',
'Unit added': 'Unità aggiunta',
'Unit Cost': 'Costo unitario',
'Unit deleted': 'Unità eliminata',
'Unit of Measure': 'Unità di misura',
'Unit updated': 'Unità aggiornata',
'United States Dollars': 'Dollari USA ',
'Units': 'Unità',
'Unknown': 'Sconosciuto',
'unknown': 'sconosciuto',
'Unknown Peer': 'Peer sconosciuto',
'Unknown type of facility': 'Tipo Struttura sconosciuto',
'unlimited': 'Illimitato',
'Unmark as duplicate': 'Deselezionare come duplicato ',
'Unreinforced masonry': 'Muratura non rinforzata',
'Unresolved Conflicts': 'Conflitti irrisolti',
'Unsafe': 'Insicuro',
'Unselect to disable the modem': 'Deselezionare per disabilitare il modem',
'Unselect to disable this API service': 'Deseleziona per disattivare questo servizio API',
'Unselect to disable this SMTP service': 'Deselezionare per disattivare questo servizio SMTP',
'Unsent': 'Non inviato',
'Unspecified': 'Non specificato ',
'unspecified': 'Non specificato',
'Unsubscribe': 'Rimuovere sottoscrizione',
'Unsupported data format!': 'Formato dati non supportato!',
'Unsupported method!': 'Metodo non supportato!',
'unverified': 'non verificato',
'Update': 'Aggiornamento',
'Update Activity Report': 'Relazione di aggiornamento attività',
'Update Base Location': 'Aggiornamento Ubicazione Base ',
'Update Cholera Treatment Capability Information': 'Aggiornare informazioni sulla capacità di trattamento del colera',
'Update Coalition': 'Aggiorna la coalizione',
'Update if Master': 'Aggiornare se Master',
'Update if Newer': 'Aggiornare se più recente',
'Update Location': 'Aggiorna Località ',
'Update Method': 'Aggiorna il metodo',
'Update Notification': 'Aggiornare Notifica',
'Update Policy': 'Update Policy',
'Update Report': 'Relazione di aggiornamento ',
'Update Request': 'Richiesta di aggiornamento',
'Update saved filter': 'Aggiorna Filtro salvato',
'Update Service Profile': 'Aggiorna il Profilo di servizio',
'Update Status': 'Aggiornare lo Stato',
'Update Task Status': "Aggiorna lo Stato dell'attività",
'Update this entry': 'Aggiornare questa voce ',
'Update this filter?': 'Update this filter?',
'Update Unit': 'Unità di aggiornamento',
'Update your current ordered list': 'Aggiornate la vostra lista ordinata attualmente',
'updated': 'aggiornato',
'Updated By': 'Aggiornato da',
'updates only': 'solo aggiornamenti',
'Upload': 'Carica',
'Upload a (completely or partially) translated CSV file': 'Carica un file CSV tradotto (completamente o parzialmente)',
'Upload a Spreadsheet': 'Carica un foglio elettronico',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Carica un file immagine (bmp, gif, jpeg o png), max. 300x300 pixel!',
'Upload an image file (png or jpeg), max. 400x400 pixels!': 'Carica un file immagine (png o jpeg), grandezza massima in pixels 400x400 !',
'Upload an image file here.': 'Carica un file immagine da qui',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": "Carica un file immagine da qui Se non si carica un file di immagine, allora è necessario specificarne la posizione nel campo dell' URL.",
'Upload an image, such as a photo': "Carica un'immagine come una foto",
'Upload file': 'Carica file',
'Upload Format': 'Caricare il Format ',
'Upload Photos': 'Caricare foto',
'Upload Scanned OCR Form': 'Caricare OCR Form scansionati ',
'Upload Shapefile': 'Carica Shapefile ',
'Upload Spreadsheet': 'Carica foglio di calcolo',
'Upload Track': 'Carica Traccia',
'Upload translated files': 'Carica file tradotti',
'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'Il file caricato non è un file PDF. Fornire un form in formato PDF valido. ',
"Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": "I file caricati non sono immagini. I formati di immagine supportati sono '. Png', '. Jpg', '. Bmp', '. Gif'. ",
'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'Il file PDF caricato ha più / meno il numero di pagine di quanto richiesto. Verificare di aver fornito una revisione formalmente corretta e controllare che il modulo contenga un numero adeguato di pagine. ',
'Urban area': 'Area Urbana',
'Urban Fire': 'Incendio Urbano',
'Urgent': 'Urgente',
'URL': 'URL',
'URL for the Mobile Commons API': 'URL per le Mobile Commons API',
'URL for the twilio API.': 'URL per le twilio API.',
'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.': 'URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.',
'URL of the proxy server to connect to the repository (leave empty for default proxy)': 'URL of the proxy server to connect to the repository (leave empty for default proxy)',
'URL/Link': 'URL/Link',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Utilizzare (...)&(...) per AND, (...)|(...) per OR, e ~(...) per NOT per costruire query più complesse.',
'Use decimal': 'Utilizzare decimale ',
'Use default': 'Utilizzare default ',
'Use deg, min, sec': 'Utilizzare gradi, min, sec ',
'Use Geocoder for address lookups?': 'Utilizzare Geocoder per le ricerche di indirizzi? ',
'Use Site?': 'Usare Sito? ',
'Use these links to download data that is currently in the database.': 'Utilizzare questi collegamenti per scaricare i dati che sono attualmente nel database.',
'Use this to set the starting location for the Location Selector.': 'Utilizzare questa funzione per impostare la posizione di partenza per il selettore Ubicazione. ',
'Used by IRS & Assess': 'IRS utilizzato da & Valutare',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Utilizzato nel Tooltip onHover & nei Popups del Cluster per differenziare tra i tipi.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Utilizzato per creare tooltip onHover & primo campo utilizzato anche nei Popups del Cluster per differenziare tra i record.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Utilizzato per verificare che la latitudine delle ubicazioni inserite sia ragionevole. Può essere utilizzato per filtrare gli elenchi di risorse che hanno delle ubicazioni.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Utilizzato per verificare che la longitudine delle locazioni inserite sia ragionevole. Può essere utilizzato per filtrare gli elenchi di risorse che hanno delle ubicazioni.',
'Used to import data from spreadsheets into the database': 'Utilizzato per importare i dati da fogli elettronici nel database',
'Used to populate feature attributes which can be used for Styling and Popups.': 'Used to populate feature attributes which can be used for Styling and Popups.',
'Used to populate feature attributes which can be used for Styling.': 'Utilizzato per compilare gli attributi funzionalità che possono essere utilizzati per lo styling. ',
'Used within Inventory Management, Request Management and Asset Management': "Utilizzato all'interno della gestione inventario, gestione della richiesta e gestione del patrimonio",
'User': 'Utente ',
'User Account': 'Account Utente ',
'User Account has been Disabled': "L'Account dell'utente è stato disabilitato",
'User added': 'Utente aggiunto',
'User added to Role': 'Utente aggiunto al ruolo ',
'User already has this role': "L'utente ha già questo ruolo",
'User deleted': 'Utente eliminato',
'User Details': 'Dettagli utente',
'User ID': 'User ID',
'User Management': 'Gestione utenti',
'User Profile': 'Profilo Utente ',
'User Requests': 'Richieste utente',
'User Roles': 'Ruoli utente',
'User Updated': 'Utente aggiornato',
'User updated': 'Utente aggiornato',
'User with Role': 'Utente con ruolo ',
'Username': 'Nome utente ',
'Username to use for authentication at the remote site.': 'Username to use for authentication at the remote site.',
'Users': 'Utenti ',
'Users in my Organizations': 'Utenti nelle mie Organizzazioni ',
'Users must ensure that the content they post is accurate to the best of their knowledge and has been checked': 'Gli utenti si devono assicurare che i contenuti inseriti siano accurati e che siano stati fatti tutti i controlli',
'Users must have the approval of their organisation to post content.': "Gli utenti devono avere l'approvazione della loro organizzazione a pubblicare contenuti. ",
'Users must not post content which is critical or offensive to other users or stakeholders': 'Gli utenti non devono inviare contenuti offensivi per altri utenti o stakeholder',
'Users removed': 'Utenti rimossi',
'Users with this Role': 'Utenti con questo ruolo ',
'Uses the REST Query Format defined in': 'Utilizza il formato della query REST definito in',
'using default': 'Default in uso',
'UTC Offset': 'UTC Offset',
'Utilities': 'Utilità',
'Utility, telecommunication, other non-transport infrastructure': 'Utilità, telecomunicazioni, altre infraStruttura non di trasporto',
'UUID': 'UUID',
'Valid From': 'Valido Da',
'Valid Until': 'Valido Fino ',
'Value': 'Valore',
'Various Reporting functionalities': 'Varie funzionalità di reporting',
'Vehicle': 'Veicolo',
'Vehicle Crime': 'Veicolo del reato',
'Vehicle Types': 'Tipi di Veicolo',
'Vehicles designed for the transport of more than eight persons in addition to the driver, can be coupled to a trailer. Maximum authorized mass of the trailer is 750 kg': 'Autoveicoli progettati per il trasporto di più di otto persone oltre al conducente, può essere agganciato un rimorchio. Massa massima autorizzata per il rimorchio è 750 kg',
'Vehicles designed for the transport of not more than 16 people, in addition to the driver. Maximum length of 8 meters, can be combined with a trailer having a maximum authorized mass does not exceed 750 kg.': 'Autoveicoli progettati per il trasporto di non più di 16 persone, oltre al conducente. Lunghezza massima di 8 metri, può essere agganciato un rimorchio la cui massa massima autorizzata non superi 750 kg.',
'Vehicles designed for the transport of not more than eight persons in addition to the driver. Maximum authorized mass exceeding 3500 kg, but not exceeding 7500 kg; can be combined with a trailer having a maximum authorized mass does not exceed 750 kg.': 'Autoveicoli progettati per il trasporto di non più di otto persone oltre al conducente. Massa massima autorizzata superiore a 3500 kg, ma non superiore a 7500 kg; può essere agganciato un rimorchio la cui massa massima autorizzata non sia superiore a 750 kg.',
'Vehicles for the transport of not more than eight persons in addition to the driver. It can be combined with a trailer having a maximum authorized mass not exceeding 750 kg': 'Autoveicoli per il trasporto di non più di otto persone oltre al conducente. Può essere agganciato un rimorchio la cui massa massima autorizzata non sia superiore a 750 kg',
'Vehicles or machines Agricole. Max mass authorized 3500 kg and designed for the transport of not more than eight persons in addition to the driver. It can be combined with a trailer.': 'Autoveicoli o macchine Agricole. Massa massima autorizzata 3500 kg e progettati per il trasporto di non più di otto persone oltre al conducente. Può essere agganciato un rimorchio.',
'Vehicles with a towing vehicle in category B and a trailer or semitrailer. Maximum mass of the trailer does not exceed 3500 kg.': 'Veicoli con motrice della categoria B e con un rimorchio o semirimorchio. Massa massima del rimorchio non superiore a 3500 kg.',
'Vehicles with engine category C1 or B. Maximum authorized mass of the trailer is 750 Kg (C1) o 3500 Kg (B), overall should not exceed 12000 Kg': 'Veicoli con motrice di categoria C1 o B. Massa massima autorizzata per il rimorchio è 750 Kg (C1) o 3500 Kg (B), nel complesso non devono superare i 12000 Kg',
'Vehicles with engine type C and with a trailer. Maximum authorized mass 750 kg': 'Veicoli con motrice di tipo C e con un rimorchio o semirimorchio. Massa massima autorizzata 750 kg',
'Vehicles with engine type D and a trailer. Maximum authorized mass of the trailer is 750 Kg': 'Veicoli con motrice di tipo D e da un rimorchio. Massa massima autorizzata per il rimorchio è 750 Kg',
'Vehicles with engine type D1 and its trailer. Maximum authorized mass of the trailer is 750 Kg': 'Veicoli con motrice di tipo D1 e da un rimorchio. Massa massima autorizzata per il rimorchio è 750 Kg',
'Vehicles of category C1, C e/o C+E for professional transport': 'Veicoli della categoria C1, C e/o C+E per trasporto professionale',
'Vehicles of category D1, D e/o D+E in public service or line rental with driver': 'Veicoli della categoria D1, D e/o D+E in servizio pubblico di linea o di noleggio con conducente',
'Venue': 'Posto',
'Verification Status': 'Stato della verifica',
'verified': 'Verificato',
'Verified?': 'Verificato?',
'Verify password': 'Verifica password',
'Version': 'Versione',
'Very Good': 'Molto Bene',
'Very High': 'Molto alto',
'Very Strong': 'Molto forte',
'Video Tutorials': 'Video Tutorials',
'View': 'Vista ',
'View Alerts received using either Email or SMS': 'Visualizzare avvisi ricevuti mediante e-mail o SMS',
'View All': 'visualizza tutto',
'View and/or update their details': 'Visualizza e/o aggiorna i loro dettagli',
'View Email Accounts': 'Visualizzare gli Account di posta elettronica',
'View Email InBox': 'Visualizzare Email nella Posta in Arrivo',
'View Error Tickets': 'Vedi i biglietti di errore',
'View full screen': 'Visualizza schermo intero',
'View Fullscreen Map': 'Vista Mappa a Schermo Intero',
'View Image': 'Visualizza immagine',
'View InBox': 'Visualizzare Posta in Arrivo',
'View Items': 'visualizza voci',
'View Location Details': "Visualizza Dettagli dell'ubicazione",
'View Message Log': 'Visualizzare Log dei Messaggi',
'View Mobile Commons Settings': 'Visualizzare Impostazioni Comuni del Cellulare',
'View Modem Channels': 'Visualizzare Canali Modem ',
'View On Map': 'Vista sulla mappa',
'View on Map': 'Visualizza sulla mappa',
'View or update the status of a hospital.': 'Visualizzare o aggiornare lo stato di un ospedale.',
'View Outbox': 'Visualizzare Posta in uscita',
'View Parser Connections': 'Visualizzare Connessioni Decodificatori',
'View pending requests and pledge support.': 'Visualizzare le richieste in sospeso e fornire supporto',
'View Picture': 'Guarda Immagine',
'View Queries': 'Visualizzare Queries',
'View RSS Channels': 'Visualizzare Canali RSS ',
'View RSS Posts': 'Visualizzare Post RSS ',
'View Sender Priority': 'Visualizzare Priorità Mittente',
'View Sent Emails': 'Visualizzare Email Spedite',
'View Sent SMS': 'Visualizzare SMS spediti',
'View Sent Tweets': 'Visualizzare Tweet Spediti',
'View Settings': 'Visualizzare le Impostazioni',
'View SMS InBox': 'Visualizzare SMS in arrivo',
'View Test Result Reports': 'View Test Result Reports',
'View the hospitals on a map.': 'Visualizzare gli ospedali su una mappa.',
'View the module-wise percentage of translated strings': 'View the module-wise percentage of translated strings',
'View Tickets': 'Visualizza i ticket',
'View Translation Percentage': 'View Translation Percentage',
'View Tweet': 'Visualizzare Tweet ',
'View Twilio Channels': 'Visualizzare Canali Twilio',
'View Twitter InBox': 'Visualizzare Twitter in arrivo',
'View/Edit the Database directly': 'Visualizzare / Modificare il Database direttamente',
'Village': 'Villaggio',
'Village Leader': 'Capo del Villaggio',
'Visible?': 'Visibile?',
'Visual Recognition': 'Riconoscimento visivo',
'Volcanic Ash Cloud': 'Nube di cenere vulcanica',
'Volcanic Event': 'Evento vulcanico',
'Volcano': 'Vulcano',
'Volunteer': 'Volontario',
'volunteer': 'Volontario',
'Volunteer added': 'Volontario aggiunto',
'Volunteer Availability': 'Disponibilità del volontario',
'Volunteer availability added': 'Disponibilità del volontario aggiunta',
'Volunteer availability deleted': 'Disponibilità del volontario eliminata',
'Volunteer availability updated': 'Disponibilità del volontario aggiornata',
'Volunteer Cluster': 'Gruppo di Volontari',
'Volunteer Cluster added': 'Aggiunto Gruppo Volontari',
'Volunteer Cluster deleted': 'Cancellato Gruppo Volontari',
'Volunteer Cluster Position': 'Posizione del Gruppo di Volontari',
'Volunteer Cluster Position added': 'Aggiunta la Posizione del Gruppo di Volontari',
'Volunteer Cluster Position deleted': 'Cancellata la Posizione del Gruppo di Volontari',
'Volunteer Cluster Position updated': 'Aggiornata la Posizione del Gruppo di Volontari',
'Volunteer Cluster Type': 'Tipologia del Gruppo Volontari',
'Volunteer Cluster Type added': 'Aggiunta la tipologia del Gruppo di Volontari',
'Volunteer Cluster Type deleted': 'Cancellata la tipologia del Gruppo di Volontari',
'Volunteer Cluster Type updated': 'Aggiornata la tipologia del Gruppo di Volontari',
'Volunteer Cluster updated': 'Aggiornato il Gruppo dei Volontari',
'Volunteer deleted': 'Volontario cancellato',
'Volunteer Details': 'Spontanee Dettagli',
'Volunteer details updated': 'Dettagli del volontario aggiornanati',
'Volunteer Details updated': 'Dettagli volontario aggiornati',
'Volunteer Hours': 'Ore Volontario',
'Volunteer ID': 'ID Volontario',
'Volunteer Information': 'Informazioni sul volontario',
'Volunteer Management': 'Gestione Volontario',
'Volunteer Project': 'Progetto Volontari',
'Volunteer Record': 'Spontanee Record',
'Volunteer Report': 'Report dei volontari',
'Volunteer Request': 'Richiesta volontario',
'Volunteer Role': 'Ruolo Volontario',
'Volunteer Role added': 'Ruolo Volontario aggiunto',
'Volunteer Role Catalog': 'Catalogo Ruolo Volontario',
'Volunteer Role deleted': 'Ruolo Volontario cancellato',
'Volunteer Role Details': 'Dettagli Ruolo Volontario',
'Volunteer Role updated': 'Ruolo volontario aggiornato',
'Volunteer Service Record': 'Stato di Servizio del Volontario',
'volunteers': 'Volontari',
'Volunteers': 'Volontari',
'Volunteers were notified!': 'I volontari sono stati informati!',
'Vote': 'Voto',
'Votes': 'Voti',
'Walking Only': 'Solo pedoni',
'Wall or other structural damage': 'Muro o altri danni strutturali',
'Warehouse': 'Magazzino',
'Warehouse added': 'Archivio aggiunto',
'Warehouse deleted': 'Archivio eliminato',
'Warehouse Details': "Dettagli dell'archivio",
'Warehouse Manager': 'Capo del Magazzino',
'Warehouse updated': 'Magazzino aggiornato',
'Warehouses': 'Magazzini',
'Warning: this housing unit is full for daytime': 'Avviso: questo unità abitativa è pieno per le ore diurne',
'Warning: this housing unit is full for the night': 'Avviso: questo unità abitativa è pieno per la notte',
'Warning: this shelter is full for daytime': 'Avviso: questo struttura è pieno per le ore diurne',
'Warning: this shelter is full for the night': 'Avviso: questo struttura è pieno per la notte',
'WARNING': 'ATTENZIONE',
'WASH': 'Lavaggio',
'WASH Borehole': 'Lavaggio del pozzo',
'WASH Communal Latrine': 'Lavaggio delle latrine mobili',
'WASH Latrine Cabin': 'Lavaggio capanno delle Latrine',
'WASH Potable Water': 'Lavaggio acqua potabile',
'WASH Potable Water Source': 'Lavaggio sorgente di acqua potabile',
'WASH Sanitation': 'Lavaggio servizi igienici',
'WASH Shower': 'Lavaggio doccie',
'WASH Solid Waste': 'Lavaggio rifiuti solidi',
'WASH Spring Water': 'Lavaggio della sorgente',
'WASH Toilet': 'Lavaggio bagno',
'WASH Water Source': 'Lavaggio della sorgente di acqua',
'WASH Water Trucking': 'Lavaggio del trasporto di acqua',
'WASH Well': 'Lavarsi bene',
'Water and Sanitation': 'Acqua e Igiene',
'Water collection': 'Raccolta acqua',
'Water gallon': "Gallone d'acqua",
'Water Sanitation Hygiene': 'Igiene e depurazione acqua',
'Water storage containers in households': "Contenitori d'acqua di riserva nelle case",
'Water supply': "Approviggionamento d'acqua",
'Waterspout': 'Tromba marina',
'wavy': 'Ondulato',
'We have tried': 'Abbiamo provato ',
'Weak': 'Debole',
'Web API': 'Web API',
'Web API Channel added': 'Canale Web API Aggiunto',
'Web API Channel deleted': 'Canale Web API cancellato',
'Web API Channel Details': 'Dettagli del canale Web API',
'Web API Channel updated': 'Canale Web API aggiornato',
'Web API Channels': 'Canali Web API ',
'Web Map Service Browser Name': 'Nome del Browser del Servizio di Mappa Web',
'Web Map Service Browser URL': 'URL del Browser del Servizio di Mappa Web',
'Website': 'Sito WEB',
'Wednesday': 'Mercoledì',
'Weekends only': 'Solo Fine Settimana',
'weekly': 'Settimanale',
'Weekly': 'Settimanale',
'Weight': 'Peso',
'Weight (kg)': 'Peso (kg)',
'Welcome to the Sahana Portal at': 'Benvenuti nel portale Sahana a',
'Well-Known Text': 'Testo ben noto',
'WFS Layer': 'Strato WFS',
'WGS84 (EPSG 4236) is required for many WMS servers.': 'WGS84 (EPSG 4236) necessario per molti server WMS.',
'What order to be contacted in.': 'In quale ordine essere contattati',
'Wheat': 'frumento',
'When reports were entered': 'Quando i rapporti sono stati immessi',
'When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can\'t. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.': 'Quando si sincronizzano dei dati con altri, i conflitti si verificano in caso due (o più) parti vogliono sincronizzare i dati che sono stati entrambi modificati, ad esempio conflitto di informazioni. Il modulo Sync cerca di risolvere questi conflitti automaticamente, ma in alcuni casi non è possibile. In questi casi, sta all\'utente risolvere quei conflitti manualmente, fare clic sul collegamento sulla destra per accedere a questa pagina.',
'Whether calls to this resource should use this configuration as the default one': 'Chiamate indirizzate a questa risorsa dovrebbero utilizzare questa configurazione come quella di default',
'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': 'Latitudine e longitudine vengono ereditate da un livello superiore nella gerarchia delle posizioni (piuttosto che essere dati inseriti separatamente)',
'Whether the resource should be tracked using S3Track rather than just using the Base Location': "La risorsa deve essere monitorato con S3Track piuttosto che utilizzare l'archivio Ubicazioni",
'Which methods to apply when importing data to the local repository': 'Which methods to apply when importing data to the local repository',
'Whiskers': 'Baffi',
'white': 'Bianco',
'Whitelist a Sender': 'Whitelist un mittente',
'Whitelisted Senders': 'Whitelisted Mittenti',
'Who is doing what and where': 'Chi sta facendo che cosa e dove',
'Who usually collects water for the family?': "Chi raccoglie generalmente l'acqua per la famiglia?",
'wider area, longer term, usually contain multiple Activities': 'area più ampia, termine più lungo, generalmente contiene più attività',
'widowed': 'Vedovo',
'Width (m)': 'Larghezza (m)',
'Wild Fire': 'Fuoco selvaggio',
'Will create and link your user account to the following records': ' Il vostro account utente sarà creato e collegato con i seguenti record',
'Wind Chill': 'Refrigerare con il vento',
'Window frame': 'Cornice della finestra',
'Winter Storm': "Tempesta d'inverno",
'With Domestic Animals': 'Con animali domestici',
'within human habitat': "all'interno dell'habitat umano",
'WKT is Invalid!': 'WKT non valido',
'WMS Layer': 'Strato WMS',
'Women of Child Bearing Age': 'Puerpera',
'Women participating in coping activities': 'Donne che partecipano ad attività di copiatura',
'Women who are Pregnant or in Labour': 'Donne Incinte o Lavoratrici',
'Womens Focus Groups': 'Gruppo Femminile',
'Wooden plank': 'Tavolone di legno',
'Wooden poles': 'Poli di legno',
'Work': 'Lavoro ',
'Work on Program': 'Lavora sul Programma',
'Work phone': 'Telefono di lavoro',
'Working Distance from Shelter (km)': 'Distanza del lavoro dalla Struttura di Accoglienza (km)',
'Working hours end': 'Fine orario di lavoro',
'Working hours start': 'Inizio orario di lavoro',
'Working or other to provide money/food': 'Lavoro o altro per fornire denaro/alimenti',
'Working Status: %s': 'Stato di funzionamento: %s ',
'X-Ray': 'Raggi-X',
'xlwt module not available within the running Python - this needs installing for XLS output!': "Modulo Xlwt non disponibile all'interno di esecuzione Python - è necessario installarlo per output XLS!",
'XYZ Layer': 'Strato XYZ',
'Year': 'Anno ',
'Year built': 'Anno di costruzione',
'Year Founded': 'Anno di fondazione',
'Year of Manufacture': 'Anno di fabbricazione',
'Year that the organization was founded': "L'anno in cui l'organizzazione è stata fondata",
'Yellow': 'Giallo',
'yes': 'yes',
'Yes': 'Sì',
'YES': 'SI',
'You are a recovery team?': 'Siete una squadra di aiuto ?',
'You are attempting to delete your own account - are you sure you want to proceed?': 'Si sta tentando di eliminare il proprio account-Si è sicuri di voler continuare?',
'You are currently reported missing!': 'Sei attualmente riportato mancante!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'È possibile modificare la configurazione della sincronizzazione del modulo nella sezione Impostazioni. Questa configurazione include l\' UUID (unique identification number), pianificazioni sincronizzate, servizio di segnalazione e così via. Fare clic sul seguente collegamento per andare alla pagina delle impostazioni di Sincronizzazione.',
'You can click on the map below to select the Lat/Lon fields': 'Cliccare sulla mappa sottostante per selezionare i valori di Latitudine / Longitudine',
'You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press \'Search\' without input to list all.': 'È possibile cercare per nome del gruppo, descrizione o commenti e per nome dell\'organizzazione o acronimo. Si può usare % come carattere jolly. Premere il tasto \'Cerca\' senza input per elencare tutto.',
'You can search by course name, venue name or event comments. You may use % as wildcard. Press \'Search\' without input to list all events.': 'È possibile cercare per nome del corso, nome luogo o commenti evento. Si può usare % come carattere jolly. Premere il tasto \'Cerca\' senza input per elencare tutti gli eventi.',
'You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press \'Search\' without input to list all persons.': 'È possibile cercare per ruolo professionale o nome della persona - inserire Nome o Cognome, separati da spazi. Si può usare % come carattere jolly. Premere il tasto \'Cerca\' senza input per elencare tutte le persone. ',
'You can search by name, acronym or comments': 'Ricerca per nomi, acronimi o commenti',
'You can search by name, acronym, comments or parent name or acronym.': 'Ricerca per nomi, acronimi, commenti o nome del parente o acronimo.',
'You can search by trainee name, course name or comments. You may use % as wildcard. Press \'Search\' without input to list all trainees.': 'È possibile cercare per nome tirocinante, nome del corso o commenti. Potete usare% come carattere jolly. Premere il tasto \'Cerca\' senza input per elencare tutti i tirocinanti. ',
'You can select an area on the image and save to crop it.': "Selezionare un'area dell'immagine e poi salvarla per modificarla",
'You can select the Draw tool': 'È possibile selezionare lo strumento di disegno',
'You can set the modem settings for SMS here.': 'È possibile impostare il modem impostazioni per SMS in questa sede.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'È possibile utilizzare lo strumento di conversione per convertire o le coordinate GPS o i Gradi / minuti / secondi.',
'You do not have permission for any facility to make a commitment.': "Non si dispone dell'autorizzazione per qualsiasi Struttura per prendere un impegno.",
'You do not have permission for any facility to make a request.': "Non si dispone dell'autorizzazione per alcuna Struttura per eseguire una richiesta.",
'You do not have permission for any facility to perform this action.': 'Non hai il permesso per alcuna Struttura per eseguire questa azione.',
'You do not have permission for any organization to perform this action.': 'Non hai il permesso di alcuna organizzazione per eseguire questa azione ',
'You do not have permission for any site to add an inventory item.': "Non si dispone dell'autorizzazione per alcuna sede per aggiungere una voce di inventario.",
'You do not have permission for any site to receive a shipment.': "Non si dispone dell'autorizzazione per alcun sito per ricevere una spedizione.",
'You do not have permission for any site to send a shipment.': "Non si dispone dell'autorizzazione per alcuna sede per inviare una spedizione.",
'You do not have permission to cancel this received shipment.': "Non si dispone dell'autorizzazione per annullare questa spedizione ricevuta.",
'You do not have permission to cancel this sent shipment.': "Non si dispone dell'autorizzazione per annullare questa spedizione inviata.",
'You do not have permission to make this commitment.': 'Non si dispone delle autorizzazioni per creare questo impegno.',
'You do not have permission to receive this shipment.': "Non si dispone dell'autorizzazione per ricevere questa spedizione.",
'You do not have permission to send a shipment from this site.': "Non si dispone dell'autorizzazione per inviare una spedizione da questo sito.",
'You do not have permission to send messages': "Non si dispone dell'autorizzazione per l'invio di messaggi",
'You do not have permission to send this shipment.': "Non si dispone dell' autorizzazione per inviare questa spedizione.",
'You have a personal map configuration. To change your personal configuration, click': 'Si dispone di una configurazione personale della mappa. Per modificare la configurazione personale, fare clic su',
'You have found a dead body?': 'Hai trovato un cadavere?',
'You have to select a housing unit related to this shelter': "Devi selezionare un'unità abitativa relative a questa Struttura di Accoglienza",
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "Ci sono cambiamanti non salvati. Cliccare Cancella ora, e poi 'Salva' per salvarli Clicare OK per scartarli.",
'You have unsaved changes. You need to press the Save button to save them': 'Hai modifiche non salvate. Schiacciare il bottone Save per salvarle',
"You haven't made any calculations": 'Non hai fatto nessun calcolo',
'You must agree to the Terms of Service': 'È necessario accettare i Termini di Servizio',
'You must be logged in to register volunteers.': 'È necessario essere collegati per registrare volontari.',
'You must be logged in to report persons missing or found.': 'È necessario essere collegati per segnalare persone disperse o ritrovate.',
'You must enter a minimum of %d characters': 'Inserire un minimo di% caratteri',
'You must provide a series id to proceed.': 'È necessario fornire un identificativo di serie per continuare.',
'You need to have at least 2 records in this list in order to merge them.': 'Devi avere almeno 2 record in questa lista per unirli',
'You should edit Twitter settings in models/000_config.py': 'Si dovrebbero modificare le impostazioni di Twitter in models/000_config.py',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Di seguito è mostrato l\' attuale elenco ordinato di voci di soluzione . È possibile modificarlo, votando nuovamente.',
'Your post was added successfully.': 'Il post è stato aggiunto con successo.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Al suo sistema è stato assegnato un identificativo univoco (UUID), che gli altri computer nelle vicinanze potranno utilizzare per identificarla. Per visualizzare il proprio UUID, è possibile passare a Sincronizzazione -> impostazioni di sincronizzazione È anche possibile visualizzare altre impostazioni su tale pagina.',
'Zero Hour': 'Ora Zero',
'Zinc roof': 'Tetto in zinco',
'Zone': 'Zona',
'Zoom': 'Zoom ',
'Zoom In': 'Zoom in ',
'Zoom in closer to Edit OpenStreetMap layer': 'Zoom in per modificare lo strato OpenStreetMap',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'Zoom In: fare clic nella mappa o utilizzare il tasto sinistro del mouse e trascinare per creare un rettangolo',
'Zoom Levels': 'Livelli di ingrandimento',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'Zoom Out: fare clic nella mappa o utilizzare il tasto sinistro del mouse e trascinare per creare un rettangolo',
'Zoom to Current Location': 'Zoom sulla posizione attuale',
'Zoom to maximum map extent': 'Zoom della mappa fino alla misura massima',
}
| flavour/ifrc_qa | languages/it.py | Python | mit | 392,499 | [
"VisIt"
] | 2c316e56d0fd838fd4ee8ba11e4292998ee2a199e6cd0b5776af7c954d031059 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glance_store
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import six
from six.moves import http_client as http
import webob
from glance.api import policy
from glance.common import exception
from glance.common import timeutils
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance.i18n import _
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
def _get_member_repo(self, req, image):
try:
# For public images, a forbidden exception with message
# "Public images do not have members" is thrown.
return self.gateway.get_member_repo(image, req.context)
except exception.Forbidden as e:
msg = (_("Error fetching members of image %(image_id)s: "
"%(inner_msg)s") % {"image_id": image.image_id,
"inner_msg": e.msg})
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _lookup_image(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
return image_repo.get(image_id)
except (exception.NotFound):
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("You are not authorized to lookup image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _lookup_member(self, req, image, member_id):
member_repo = self._get_member_repo(req, image)
try:
return member_repo.get(member_id)
except (exception.NotFound):
msg = (_("%(m_id)s not found in the member list of the image "
"%(i_id)s.") % {"m_id": member_id,
"i_id": image.image_id})
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = (_("You are not authorized to lookup the members of the "
"image %s.") % image.image_id)
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
@utils.mutating
def create(self, req, image_id, member_id):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
image_member_factory = self.gateway.get_image_member_factory(
req.context)
try:
new_member = image_member_factory.new_image_member(image,
member_id)
member_repo.add(new_member)
return new_member
except exception.Forbidden:
msg = _("Not allowed to create members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.Duplicate:
msg = _("Member %(member_id)s is duplicated for image "
"%(image_id)s") % {"member_id": member_id,
"image_id": image_id}
LOG.warning(msg)
raise webob.exc.HTTPConflict(explanation=msg)
except exception.ImageMemberLimitExceeded as e:
msg = (_("Image member limit exceeded for image %(id)s: %(e)s:")
% {"id": image_id,
"e": encodeutils.exception_to_unicode(e)})
LOG.warning(msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
@utils.mutating
def update(self, req, image_id, member_id, status):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>,
'created_at': ..,
'updated_at': ..}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
member = self._lookup_member(req, image, member_id)
try:
member.status = status
member_repo.save(member)
return member
except exception.Forbidden:
msg = _("Not allowed to update members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except ValueError as e:
msg = (_("Incorrect request: %s")
% encodeutils.exception_to_unicode(e))
LOG.warning(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'members': [
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>,
'created_at': ..,
'updated_at': ..}, ..
]}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
members = []
try:
for member in member_repo.list():
members.append(member)
except exception.Forbidden:
msg = _("Not allowed to list members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
return dict(members=members)
def show(self, req, image_id, member_id):
"""
Returns the membership of the tenant wrt to the image_id specified.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:returns: The response body is a mapping of the following form
.. code-block:: json
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
try:
image = self._lookup_image(req, image_id)
return self._lookup_member(req, image, member_id)
except webob.exc.HTTPForbidden as e:
# Convert Forbidden to NotFound to prevent information
# leakage.
raise webob.exc.HTTPNotFound(explanation=e.explanation)
@utils.mutating
def delete(self, req, image_id, member_id):
"""
Removes a membership from the image.
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
member = self._lookup_member(req, image, member_id)
try:
member_repo.remove(member)
return webob.Response(body='', status=http.NO_CONTENT)
except exception.Forbidden:
msg = _("Not allowed to delete members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def __init__(self):
super(RequestDeserializer, self).__init__()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
def create(self, request):
body = self._get_request_body(request)
try:
member_id = body['member']
if not member_id:
raise ValueError()
except KeyError:
msg = _("Member to be added not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except ValueError:
msg = _("Member can't be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _('Expected a member in the form: '
'{"member": "image_id"}')
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(member_id=member_id)
def update(self, request):
body = self._get_request_body(request)
try:
status = body['status']
except KeyError:
msg = _("Status not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except TypeError:
msg = _('Expected a status in the form: '
'{"status": "status"}')
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(status=status)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema()
def _format_image_member(self, member):
member_view = {}
attributes = ['member_id', 'image_id', 'status']
for key in attributes:
member_view[key] = getattr(member, key)
member_view['created_at'] = timeutils.isotime(member.created_at)
member_view['updated_at'] = timeutils.isotime(member.updated_at)
member_view['schema'] = '/v2/schemas/member'
member_view = self.schema.filter(member_view)
return member_view
def create(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def update(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, image_members):
image_members = image_members['members']
image_members_view = []
for image_member in image_members:
image_member_view = self._format_image_member(image_member)
image_members_view.append(image_member_view)
totalview = dict(members=image_members_view)
totalview['schema'] = '/v2/schemas/members'
body = jsonutils.dumps(totalview, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def show(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
_MEMBER_SCHEMA = {
'member_id': {
'type': 'string',
'description': _('An identifier for the image member (tenantId)')
},
'image_id': {
'type': 'string',
'description': _('An identifier for the image'),
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
},
'created_at': {
'type': 'string',
'description': _('Date and time of image member creation'),
# TODO(brian-rosmaita): our jsonschema library doesn't seem to like the
# format attribute, figure out why (and also fix in images.py)
# 'format': 'date-time',
},
'updated_at': {
'type': 'string',
'description': _('Date and time of last modification of image member'),
# 'format': 'date-time',
},
'status': {
'type': 'string',
'description': _('The status of this image member'),
'enum': [
'pending',
'accepted',
'rejected'
]
},
'schema': {
'readOnly': True,
'type': 'string'
}
}
def get_schema():
properties = copy.deepcopy(_MEMBER_SCHEMA)
schema = glance.schema.Schema('member', properties)
return schema
def get_collection_schema():
member_schema = get_schema()
return glance.schema.CollectionSchema('members', member_schema)
def create_resource():
"""Image Members resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageMembersController()
return wsgi.Resource(controller, deserializer, serializer)
| stevelle/glance | glance/api/v2/image_members.py | Python | apache-2.0 | 14,639 | [
"Brian"
] | 8871a4c618aa9425317eb0185e7a2760a5821c48d2e3541efe051bec5e56edce |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.